text
stringlengths 56
7.94M
|
---|
\begin{document}
\maketitle
\begin{abstract}
Roughly speaking, by using the semi-stable minimal
model program, we prove that the moduli part of an lc-trivial fibration coincides
with that of a klt-trivial fibration induced by adjunction after taking a suitable generically finite cover.
As an application, we obtain that the moduli part of an lc-trivial fibration
is b-nef and abundant by Ambro's result on klt-trivial fibrations.
{\noindent \textsc{R\'esum\'e}.} Grosso modo, en utilisant le programme des mod\`eles minimaux semi-stables, nous montrons que la partie modulaire d'une fibration lc-triviale co\"incide
avec celle d'une fibration klt-triviale induite par adjonction apr\'es changement de base par un morphisme g\'en\'eriquement fini.
Comme application, eu utilisant le r\'esultat de Ambro sur fibrations klt-triviales, on obtient que la partie modulaire d'une fibration lc-triviale est b-nef et abondante.
\end{abstract}
\tableofcontents
\section{Introduction}
In this paper, we prove the following theorem.
More precisely, we reduce Theorem \ref{main} to
Ambro's result (see \cite[Theorem 3.3]{ambro2}) by using
the semi-stable minimal model program (see, for example, \cite{fujino3}).
For a related result, see \cite[Theorem 1.4]{floris}.
\begin{thm}[{cf.~\cite[Theorem 3.3]{ambro2}}]\label{main}
Let $f:X\to Y$ be a projective surjective
morphism between normal projective varieties with connected fibers.
Assume that $(X, B)$ is log canonical and $K_X+B\sim _{\mathbb Q, Y}0$. Then
the moduli $\mathbb Q$-b-divisor $\mathbf M$ is b-nef and abundant.
\end{thm}
Let us recall the definition of {\em{b-nef and abundant}} $\mathbb Q$-b-divisors.
\begin{defn}[{\cite[Definition 3.2]{ambro2}}]
A $\mathbb Q$-b-divisor $\mathbf M$ of a normal complete algebraic variety
$Y$ is called {\em{b-nef and abundant}}
if there exists a proper birational morphism $Y'\to Y$ from a normal variety $Y'$,
endowed with a proper surjective morphism
$h:Y'\to Z$ onto a normal variety $Z$ with connected fibers, such that:
\begin{itemize}
\item[(1)] $\mathbf M_{Y'}\sim _\mathbb Q h^*H$, for some nef and big $\mathbb Q$-divisor $H$ of $Z$;
\item[(2)] $\mathbf M=\overline {\mathbf M_{Y'}}$.
\end{itemize}
\end{defn}
Let us quickly explain the idea of the proof of Theorem \ref{main}.
We assume that the pair $(X, B)$ in Theorem \ref{main} is dlt for simplicity.
Let $W$ be a log canonical center of $(X, B)$ which is dominant onto $Y$ and is minimal over the generic point of $Y$.
We set $K_W+B_W=(K_X+B)|_W$ by adjunction.
Then we have $K_W+B_W\sim _{\mathbb Q, Y}0$.
Let $h:W\to Y'$ be the Stein factorization of $f|_W:W\to Y$.
Note that $(W, B_W)$ is klt over the generic point of $Y'$.
We prove that the moduli part $\mathbf M$ of $f:(X, B)\to Y$ coincides with
the moduli part $\mathbf M^{\min}$ of $h:(W, B_W)\to Y'$ after taking a
suitable generically finite base change by using the semi-stable minimal model program.
We do not need the {\em{mixed}} period map nor
the infinitesimal {\em{mixed}} Torelli theorem (see \cite[Section 2]{ambro2} and \cite{ssu}) for the proof of
Theorem \ref{main}.
We just reduce the problem on lc-trivial fibrations to Ambro's result on klt-trivial
fibrations, which follows from the theory of period maps.
Our proof of Theorem \ref{main} partially answers the questions in \cite[8.3.8 (Open problems)]{kollar}.
It is conjectured that $\mathbf M$ is b-semi-ample (see,
for example, \cite[0.~Introduction]{ambro1}, \cite[Conjecture 7.13.3]{ps},
\cite{floris}, \cite{birkar-chen}, and \cite[Section 3]{fujino10}).
The b-semi-ampleness of the moduli part has been proved only for some special cases
(see, for example, \cite{kawamata}, \cite{fujino1}, and \cite[Section 8]{ps}).
See also Remark \ref{41} below.
\end{ack}
We will work over $\mathbb C$, the complex number field, throughout this paper.
We will make use of the standard notation as in \cite{fujino-funda}.
\section{Preliminaries}
Throughout this paper, we do not use $\mathbb R$-divisors. We only use $\mathbb Q$-divisors.
\begin{say}[Pairs]
A pair $(X, B)$ consists of a normal variety $X$ over
$\mathbb C$ and a $\mathbb Q$-divisor
$B$ on $X$ such that $K_X+B$ is $\mathbb Q$-Cartier.
A pair $(X, B)$ is called {\em{subklt}} (resp.~{\em{sublc}}) if for any projective birational morphism
$g:Z\to X$ from a normal variety $Z$, every coefficient of $B_Z$ is $<1$ (resp.~$\leq 1$) where
$K_Z+B_Z:=g^*(K_X+B)$. A pair $(X, B)$ is called {\em{klt}}
(resp.~{\em{lc}}) if $(X, B)$ is subklt (resp.~sublc) and $B$ is effective. Let $(X, B)$ be an lc pair.
If there is a
log resolution $g:Z\to X$ of $(X, B)$ such that
$\mathrm{Exc}(g)$ is a divisor and that
the coefficients of the $g$-exceptional part of $B_Z$
are $<1$, then
the pair $(X, B)$ is called
{\em{divisorial log terminal}} ({\em{dlt}}, for short).
Let $(X, B)$ be a sublc pair and let $W$ be a closed subset of $X$. Then $W$ is called a {\em{log canonical
center}} of $(X, B)$ if
there are a projective birational morphism $g:Z\to X$ from a normal
variety $Z$ and a prime divisor $E$ on $Z$ such that $\mult _EB_Z=1$ and
that $g(E)=W$. Moreover we say that $W$ is {\em{minimal}} if it is minimal with
respect to inclusion.
\end{say}
In this paper, we use the notion of {\em{b-divisors}} introduced by Shokurov.
For details, we refer to \cite[2.3.2]{corti} and \cite[Section 3]{fujino4}.
\begin{say}
[Canonical b-divisors] Let $X$ be a normal variety and let $\omega$ be a top rational differential
form of $X$.
Then $(\omega)$ defines a b-divisor $\mathbf K$.
We call $\mathbf K$ the {\em{canonical b-divisor}} of $X$.
\end{say}
\begin{say}[$\mathbf A(X, B)$ and $\mathbf A^*(X, B)$]
The {\em{discrepancy b-divisor}} $\mathbf A=\mathbf A(X, B)$ of a pair $(X, B)$ is the $\mathbb Q$-b-divisor
of $X$ with the trace $\mathbf A_Y$ defined by the
formula
$$
K_Y=f^*(K_X+B)+\mathbf A_Y,
$$
where $f:Y\to X$ is a proper birational morphism of normal varieties.
Similarly, we define $\mathbf A^*=\mathbf A^*(X, B)$ by
$$
\mathbf A_Y^*=\sum _{a_i>-1}a_i A_i
$$
for
$$
K_Y=f^*(K_X+B)+\sum a_i A_i,
$$
where $f:Y\to X$ is a proper birational morphism of normal varieties.
Note that $\mathbf A(X, B)=\mathbf A^*(X, B)$ when $(X, B)$ is subklt.
By the definition, we have
$\mathcal O_X(\lceil \mathbf A^*(X, B)\rceil)\simeq \mathcal O_X$
if $(X, B)$ is lc (see \cite[Lemma 3.19]{fujino4}). We also
have $\mathcal O_X(\lceil \mathbf A(X, B)\rceil)
\simeq \mathcal O_X$ when $(X, B)$ is klt.
\end{say}
\begin{say}[b-nef and b-semi-ample $\mathbb Q$-b-divisors]
Let $X$ be a normal variety and let $X\to S$ be a proper surjective morphism onto a variety $S$.
A $\mathbb Q$-b-divisor $\mathbf D$ of $X$ is {\em{b-nef over $S$}}
(resp.~{\em{b-semi-ample over $S$}}) if there exists a proper birational morphism $X'\to X$ from a normal
variety $X'$ such that $\mathbf D=\overline {\mathbf D_{X'}}$ and $\mathbf D_{X'}$ is nef
(resp.~semi-ample) relative to the induced morphism $X'\to S$.
\end{say}
\begin{say}
Let $D=\sum _i d_iD_i$ be a $\mathbb Q$-divisor on a normal variety, where
$D_i$ is a prime divisor for every $i$, $D_i\ne D_j$ for $i\ne j$, and
$d_i\in \mathbb Q$ for every $i$. Then we
set
$$
D^{\geq 0}=\sum _{d_i\geq 0}d_iD_i \quad \text{and} \quad D^{\leq 0}=\sum _{d_i\leq 0}d_iD_i.
$$
\end{say}
\section{A quick review of lc-trivial fibrations}
In this section, we quickly recall some basic definitions and results on
{\em{klt-trivial fibrations}} and {\em{lc-trivial fibrations}} (see also \cite[Section 3]{fujino10}).
\begin{defn}[Klt-trivial fibrations]\label{def-klt}
A {\em{klt-trivial fibration}} $f:(X, B)\to Y$ consists of a proper
surjective morphism $f:X\to Y$ between normal varieties with connected fibers and
a pair $(X, B)$ satisfying the following properties:
\begin{itemize}
\item[(1)] $(X, B)$ is subklt over the generic point of $Y$;
\item[(2)] $\rank f_*\mathcal O_X(\lceil \mathbf A(X, B)\rceil)=1$;
\item[(3)] There exists a $\mathbb Q$-Cartier $\mathbb Q$-divisor
$D$ on $Y$ such that
$$
K_X+B\sim _{\mathbb Q}f^*D.
$$
\end{itemize}
\end{defn}
Note that Definition \ref{def-klt} is nothing but \cite[Definition 2.1]{ambro1},
where a klt-trivial fibration is called an lc-trivial fibration. So, our
definition of lc-trivial fibrations in Definition \ref{def-lc} is different from the
original one in \cite[Definition 2.1]{ambro1}.
\begin{defn}[Lc-trivial fibrations]\label{def-lc}
An {\em{lc-trivial fibration}} $f:(X, B)\to Y$ consists of a proper
surjective morphism $f:X\to Y$ between normal varieties with connected fibers and
a pair $(X, B)$ satisfying the following properties:
\begin{itemize}
\item[(1)] $(X, B)$ is sublc over the generic point of $Y$;
\item[(2)] $\rank f_*\mathcal O_X(\lceil \mathbf A^*(X, B)\rceil)=1$;
\item[(3)] There exists a $\mathbb Q$-Cartier $\mathbb Q$-divisor
$D$ on $Y$ such that
$$
K_X+B\sim _{\mathbb Q}f^*D.
$$
\end{itemize}
\end{defn}
In Section \ref{sec4}, we sometimes take various base changes and
construct the induced lc-trivial fibrations and klt-trivial fibrations. For the
details, see \cite[Section 2]{ambro1}.
\begin{say}[Induced lc-trivial fibrations by base changes]\label{33}
Let $f:(X, B)\to Y$ be a klt-trivial (resp.~an lc-tirivial) fibration and let $\sigma:Y'\to Y$ be a generically finite
morphism. Then we have an induced klt-trivial (resp.~lc-trivial) fibration $f':(X', B_{X'})\to Y'$, where
$B_{X'}$ is defined by $\mu^*(K_X+B)=K_{X'}+B_{X'}$:
$$
\xymatrix{
(X', B_{X'}) \ar[r]^{\mu} \ar[d]_{f'} & (X, B)\ar[d]^{f} \\
Y' \ar[r]_{\sigma} & Y,
}
$$
Note that $X'$ is the normalization of
the main component of $X\times _{Y}Y'$.
We sometimes replace $X'$ with $X''$ where $X''$ is a normal variety
such that there is a proper birational morphism
$\varphi:X''\to X'$.
In this case, we set
$K_{X''}+B_{X''}=\varphi^*(K_{X'}+B_{X'})$.
\end{say}
Let us explain the definitions of the {\em{discriminant}} and {\em{moduli}} $\mathbb Q$-b-divisors.
\begin{say}
[Discriminant $\mathbb Q$-b-divisors and moduli $\mathbb Q$-b-divisors]
Let $f:(X, B)\to Y$ be an lc-trivial fibration as in Definition \ref{def-lc}.
Let $P$ be a prime divisor on $Y$.
By shrinking $Y$ around the generic point of $P$,
we assume that $P$ is Cartier. We set
$$
b_P=\max \left\{t \in \mathbb Q\, \left|\,
\begin{array}{l} {\text{$(X, B+tf^*P)$ is sublc over}}\\
{\text{the generic point of $P$}}
\end{array}\right. \right\}
$$
and
set $$
B_Y=\sum _P (1-b_P)P,
$$
where $P$ runs over prime divisors on $Y$. Then it is easy to see that
$B_Y$ is a well-defined $\mathbb Q$-divisor on $Y$ and is called the {\em{discriminant
$\mathbb Q$-divisor}} of $f:(X, B)\to Y$. We set
$$
M_Y=D-K_Y-B_Y
$$
and call $M_Y$ the {\em{moduli $\mathbb Q$-divisor}} of $f:(X, B)\to Y$.
Let $\sigma:Y'\to Y$ be a proper birational morphism
from a normal variety $Y'$ and let $f':(X', B_{X'})\to Y'$ be the induced lc-trivial fibration
by $\sigma:Y'\to Y$ (see \ref{33}). We can define $B_{Y'}$, $K_{Y'}$ and $M_{Y'}$ such that
$\sigma^*D=K_{Y'}+B_{Y'}+M_{Y'}$,
$\sigma_*B_{Y'}=B_Y$, $\sigma _*K_{Y'}=K_Y$ and $\sigma_*M_{Y'}=M_Y$. Hence
there exist a unique $\mathbb Q$-b-divisor $\mathbf B$ such that
$\mathbf B_{Y'}=B_{Y'}$ for every $\sigma:Y'\to Y$ and a unique
$\mathbb Q$-b-divisor $\mathbf M$ such that $\mathbf M_{Y'}=M_{Y'}$ for
every $\sigma:Y'\to Y$.
Note that $\mathbf B$ is called the {\em{discriminant $\mathbb Q$-b-divisor}} and
that $\mathbf M$ is called the {\em{moduli $\mathbb Q$-b-divisor}} associated to $f:(X, B)\to Y$.
We sometimes simply say that $\mathbf M$ is the {\em{moduli part}} of $f:(X, B)\to Y$.
\end{say}
For the basic properties of
the discriminant and moduli $\mathbb Q$-b-divisors, see \cite[Section 2]{ambro1}.
Let us recall the main theorem of \cite{ambro1}. Note that
a klt-trivial fibration in the sense of Definition \ref{def-klt} is called
an lc-trivial fibration in \cite{ambro1}.
\begin{thm}[{see \cite[Theorem 2.7]{ambro1}}]\label{thm-klt-tri}
Let $f:(X, B)\to Y$ be a klt-trivial fibration and let $\pi:Y\to S$ be a proper morphism.
Let $\mathbf B$ and $\mathbf M$ be the induced discriminant and moduli $\mathbb Q$-b-divisors of $f$.
Then,
\begin{itemize}
\item[(1)] $\mathbf K+\mathbf B$ is $\mathbb Q$-b-Cartier,
that is, there exists a proper birational morphism $Y'\to Y$ from a normal
variety $Y'$ such that $\mathbf {K}+\mathbf {B}=\overline{K_{Y'}+\mathbf{B}_{Y'}}$,
\item[(2)] $\mathbf M$ is b-nef over $S$.
\end{itemize}
\end{thm}
Theorem \ref{thm-klt-tri} has some important applications, see, for example,
\cite[Proof of Theorem 1.1]{fujino-kawa} and
\cite[The proof of Theorem 1.1]{fujino4}.
By modifying the arguments in \cite[Section 5]{ambro1} suitably with the
aid of \cite[Theorems 3.1, 3.4, and 3.9]{fujino2} (see also \cite{fujino-fujisawa}),
we can generalize Theorem \ref{thm-klt-tri}
as follows.
\begin{thm}\label{thm-lc-tri}
Let $f:(X, B)\to Y$ be an lc-trivial fibration and let $\pi:Y\to S$ be a proper morphism.
Let $\mathbf B$ and $\mathbf M$ be the induced discriminant and moduli $\mathbb Q$-b-divisors of $f$.
Then,
\begin{itemize}
\item[(1)] $\mathbf K+\mathbf B$ is $\mathbb Q$-b-Cartier,
\item[(2)] $\mathbf M$ is b-nef over $S$.
\end{itemize}
\end{thm}
Theorem \ref{thm-klt-tri} is proved by using the theory of variations of
Hodge structure. On the
other hand, Theorem \ref{thm-lc-tri} follows from the theory of
variations of {\em{mixed}} Hodge structure.
We do not adopt the formulation in \cite[Section 4]{fujino-pre} (see also \cite[8.5]{kollar})
because the argument in \cite{ambro1} suits
our purposes better.
For the reader's convenience, we include the main ingredient of the proof of Theorem \ref{thm-lc-tri}, which
easily follows from \cite[Theorems 3.1, 3.4, and 3.9]{fujino2} (see also \cite{fujino-fujisawa}).
\begin{thm}[{cf.~\cite[Theorem 4.4]{ambro1}}]\label{thm-mhs}
Let $f:X\to Y$ be a projective morphism between algebraic varieties.
Let $\Sigma_X$ {\em{(}}resp.~$\Sigma_Y${\em{)}} be
a simple normal crossing divisor on $X$ {\em{(}}resp.~$Y${\em{)}} such that
$f$ is smooth over $Y\setminus \Sigma_Y$, $\Sigma_X$ is relatively normal crossing
over $Y\setminus \Sigma_Y$, and $f^{-1}(\Sigma_Y)\subset \Sigma _X$.
Assume that $f$ is semi-stable in codimension one.
Let $D$ be a simple normal crossing divisor on $X$ such that
$\Supp D\subset \Sigma_X$ and
that every irreducible component of $D$ is dominant onto $Y$. Then the following properties hold.
\begin{itemize}
\item[(1)] $R^pf_*\omega_{X/Y}(D)$ is a locally free sheaf on $Y$ for every $p$.
\item[(2)] $R^pf_*\omega_{X/Y}(D)$ is semi-positive for every $p$.
\item[(3)] Let $\rho :Y'\to Y$ be a projective morphism from a smooth variety $Y'$ such that
$\Sigma_{Y'}=\rho^{-1}(\Sigma_Y)$ is a simple normal crossing
divisor on $Y'$.
Let $\pi:X'\to X\times _YY'$ be a resolution of the main component of $X\times _YY'$ such that
$\pi$ is an isomorphism over $Y'\setminus \Sigma_{Y'}$. Then
we obtain the following commutative diagram{\em{:}}
$$
\xymatrix{
X' \ar[r] \ar[d]_{f'} & X\ar[d]^{f} \\
Y' \ar[r]_{\rho} & Y.
}
$$
Assume that $f'$ is projective, $D'$ is a simple normal crossing divisor on $X'$ such that
$D'$ coincides with
$D\times _YY'$ over $Y'\setminus \Sigma_{Y'}$, and every stratum of $D'$ is dominant onto
$Y'$.
Then there exists a natural isomorphism
$$
\rho^*(R^pf_*\omega_{X/Y}(D))\simeq R^pf'_*\omega_{X'/Y'}(D')
$$
which extends the base change isomorphism over $Y\setminus \Sigma_Y$ for
every $p$.
\end{itemize}
\end{thm}
\begin{rem}
For the proof of Theorem \ref{thm-lc-tri},
Theorem \ref{thm-mhs} for $p=0$ is sufficient.
Note that all the local monodromies on
$R^q(f_{0})_*\mathbb C_{X_0\setminus D_0}$ around $\Sigma_Y$ are unipotent
for every $q$ because $f$ is semi-stable in codimension one, where
$X_0=f^{-1}(Y\setminus \Sigma_Y)$, $D_0=D|_{X_0}$, and $f_0=f|_{X_0\setminus D_0}$.
More precisely, let $C_0^{[d]}$ be
the disjoint union of all the
codimension $d$ log canonical centers of $(X_0, D_0)$.
If $d=0$, then we put $C_0^{[0]}=X_0$.
In this case, we have the following weight spectral sequence
$$
_W\!E_1^{-d, q+d}=R^{q-d}(f|_{C_0^{[d]}})_*
\mathbb C_{C_0^{[d]}}\Longrightarrow R^q(f_0)_*\mathbb C_{X_0\setminus D_0}
$$
which degenerates at $E_2$ (see, for example, \cite[Corollaire (3.2.13)]{deligne}).
Since $f$ is semi-stable in codimension one, all the local monodromies
on $R^{q-d}(f|_{C_0^{[d]}})_*\mathbb C_{C_0^{[d]}}$ around $\Sigma_Y$ are
unipotent for every $q$ and $d$
(see, for example, \cite[VII.~The Monodromy theorem]{katz}).
By the above spectral sequence, we obtain that
all the local monodromies on $R^q(f_0)_*\mathbb C_{X_0\setminus D_0}$ around
$\Sigma_Y$ are unipotent.
\end{rem}
We add a remark on the proof of Theorem \ref{thm-lc-tri}.
In Remark \ref{rem39}, we explain how to modify the arguments in the proof of \cite[Lemma 5.2]{ambro1}
in order to treat lc-trivial fibrations.
It will help the reader to understand the main difference between klt-trivial fibrations and lc-trivial fibrations and the
reason why we need Theorem \ref{thm-mhs}.
\begin{rem}\label{rem39}
We use the notation in \cite[Lemma 5.2]{ambro1}. We only assume that
$(X, B)$ is sublc over the generic point of $Y$ in \cite[Lemma 5.2]{ambro1}.
We write
$$
B=\sum _{i\in I}d_iB_i
$$
where $B_i$ is a prime divisor for every $i$ and $B_i\ne B_j$ for $i\ne j$.
We set
$$
J=\left\{i\in I \, \left|\, {\text{$B_i$ is dominant onto $Y$
and $d_i=1$}} \right. \right\}
$$
and set
$$
D=\sum _{i\in J}B_i.
$$
In Ambro's original setting in \cite[Lemma 5.2]{ambro1}, we have
$D=0$ because
$(X, B)$ is subklt over the generic point of $Y$.
In the proof of \cite[Lemma 5.2 (4)]{ambro1},
we have to replace
$$
\widetilde f_*\omega_{\widetilde {X}/Y}=
\bigoplus _{i=0}^{b-1}f_*\mathcal O_X(\lceil (1-i)K_{X/Y}-iB+if^*B_Y+if^*M_Y\rceil)\cdot \psi^i.
$$
with
$$
\widetilde f_*\omega_{\widetilde {X}/Y}(\pi^*D)=
\bigoplus _{i=0}^{b-1}f_*\mathcal O_X(\lceil (1-i)K_{X/Y}-iB+D+if^*B_Y+if^*M_Y\rceil)\cdot \psi^i
$$
in order to treat lc-trivial fibrations.
We leave the details as exercises for the reader.
\end{rem}
The following theorem is a part of \cite[Theorem 3.3]{ambro2}.
\begin{thm}[{see \cite[Theorem 3.3]{ambro2}}]\label{thm-moduli}
Let $f:(X, B)\to Y$ be a klt-trivial fibration such that
$Y$ is complete,
the geometric generic fiber $X_{\overline \eta}=X\times \Spec \overline {\mathbb C(\eta)}$ is a projective variety,
and
$B_{\overline \eta}=B|_{X_{\overline {\eta}}}$ is effective, where
$\eta$ is the generic point of $Y$.
Then the moduli $\mathbb Q$-b-divisor $\mathbf M$ is b-nef and abundant.
\end{thm}
\section{Proof of Theorem \ref{main}}\label{sec4}
Let us give a proof of Theorem \ref{main}.
\begin{proof}[Proof of Theorem \ref{main}]
By taking a dlt blow-up, we may assume that
the pair $(X, B)$ is $\mathbb Q$-factorial and dlt (see, for example, \cite[Section 4]{fujino3}).
If $(X, B)$ is klt over the generic point of $Y$, then Theorem \ref{main} follows from \cite[Theorem 3.3]{ambro2}
(see Theorem \ref{thm-moduli}).
Therefore, we may also assume that $(X, B)$ is not klt over the generic point of $Y$.
Let $\sigma_1:Y_1\to Y$ be a suitable
projective birational morphism such that $\mathbf M=\overline {\mathbf M_{Y_1}}$
and $\mathbf M_{Y_1}$ is nef by Theorem \ref{thm-lc-tri}.
Let $W$ be an arbitrary log canonical center of $(X, B)$ which is dominant onto $Y$ and
is minimal over the
generic point of $Y$. We set
$$K_W+B_W=(K_X+B)|_W$$
by adjunction (see, for example, \cite[3.9]{fujino-what}). By the construction, we have $K_W+B_W
\sim _{\mathbb Q, Y}0$.
We consider the Stein factorization of $f|_W:W\to Y$ and denote it
by $h:W\to Y'$. Then $K_W+B_W\sim _{\mathbb Q, Y'}0$.
We see that $h:(W, B_W)\to Y'$ is a klt-trivial fibration since the general fibers of $f|_{W}$ are
klt pairs.
Let $Y_2$ be a suitable resolution of $Y'$ which factors through $\sigma_1:Y_1\to Y$.
By taking the base change by $\sigma_2:Y_2\to Y_1$,
we obtain $\mathbf M_{Y_2}=\sigma_2^*\mathbf M_{Y_1}$
(see \cite[Proposition 5.5]{ambro1}).
Note that the proof of \cite[Proposition 5.5]{ambro1} works for
lc-trivial fibrations by some suitable modifications.
By the construction, on the induced lc-trivial fibration $f_2:(X_2, B_{X_2})\to Y_2$,
where $X_2$ is the normalization of the main component of
$X\times _YY_2$,
there is a log canonical
center $W_2$ of $(X_2, B_{X_2})$ such that $f_2|_{W_2^\nu}: (W_2^\nu, B_{W_2^\nu})\to Y_2$ is a
klt-trivial fibration,
which is birationally equivalent to $h:(W, B_W)\to Y'$.
Note that $\nu: W_2^\nu\to W_2$
is the normalization, $K_{W_2^\nu}+B_{W_2^\nu}=\nu^*(K_{X_2}+B_{X_2})|_{W_2}$,
and $f_2|_{W_2^\nu}=f_2|_{W_2}\circ \nu$.
It is easy to see that
$$
K_{Y_2}+\mathbf M_{Y_2}+\mathbf B_{Y_2}\sim _{\mathbb Q}
K_{Y_2}+\mathbf M^{\min}_{Y_2}+\mathbf B^{\min}_{Y_2}
$$
where $\mathbf M^{\min}$ and $\mathbf B^{\min}$ are the induced moduli
and discriminant $\mathbb Q$-b-divisors
of $f_2|_{W^\nu_2}: (W^\nu_2, B_{W^\nu_2})\to Y_2$ such that
$$
K_{W^\nu_2}+B_{W^\nu_2}\sim _{\mathbb Q}(f_2|_{W^\nu_2})
^*(K_{Y_2}+\mathbf M_{Y_2}^{\min}+\mathbf B_{Y_2}^{\min}).
$$
By replacing $Y_2$ birationally, we may further assume that
$\mathbf M^{\min}=\overline {\mathbf M_{Y_2}^{\min}}$ by Theorem \ref{thm-klt-tri}.
By Theorem \ref{thm-moduli}, we see that $\mathbf M_{Y_2}^{\min}$ is nef and abundant.
Let $\sigma_3:Y_3\to Y_2$ be a suitable
generically finite morphism such that
the induced lc-trivial fibration $f_3:(X_3, B_{X_3})\to Y_3$ has a semi-stable resolution in codimension one
(see, for example, \cite{kkms}, \cite[(9.1) Theorem]{ssu}, and \cite[Theorem 4.3]{ambro1}).
Note that $X_3$ is the normalization of the main component of $X\times _YY_3$.
Here we draw the following big diagram for the reader's convenience.
$$
\xymatrix{
(V, B_V) \ar[dr]^{\textrm{log-res.}} & & & & & &\\
& (X_3,B_3) \ar[rrr] \ar[dd]^{f_3}
& & & (X_2,B_2)\ar[rr]\ar[dd]^{f_2}
& & (X, B)\ar[dd]^{f}
\\(W_3,B_{W_3})
\ar[dr]_{g_3}
\ar[ur]
& & W_2^\nu
\ar[drr]_{{f_2}|_{W_2^\nu}}
\ar[r]_\nu^{\textrm {norm.}}
& W_2
\ar@{^{(}->}[ur]
\ar[dr]^{{f_2}|_{W_2}}
& & W
\ar@{^{(}->}[ur]
\ar@{->>}[d]^h\ar@{->>}[dr]
&\\
&Y_3
\ar@/_1pc/[rrr]_{\textrm {semistab.}}
&&&
Y_2\ar[r]^{\textrm {desing.}}\ar[dr]_{\sigma_2}
&
Y'\ar[r]^{\textrm {Stein}}
&
Y
\\
& & & & & Y_1\ar[ur]_{\sigma_1} &
}
$$
Note that $g_3:(W_3, B_{W_3})\to
Y_3$ is the induced klt-trivial
fibration from $f_2|_{W_2^\nu}: W_2^\nu\to Y_2$ by $\sigma_3:Y_3\to
Y_2$.
On $Y_3$, we will see the following claim by using the semi-stable minimal model
program.
\begin{claim} The following equality
$$
\mathbf B_{Y_3}=\mathbf B_{Y_3}^{\min}
$$
holds.
\end{claim}
\begin{proof}[Proof of Claim]
By taking general hyperplane cuts, we may assume that $Y_3$ is a curve.
We write
$$
\mathbf B_{Y_3}=\sum _P(1-b_P)P \quad {\text{and}}\quad \mathbf B_{Y_3}^{\min}=\sum _P(1-b_P^{\min})P.
$$
Let $\varphi: (V, B_V)\to (X_3, B_{X_3})$ be a resolution of $(X_3, B_{X_3})$ with the following properties:
\begin{itemize}
\item $K_V+B_V=\varphi^*(K_{X_3}+B_{X_3})$;
\item $\pi^*Q$ is a reduced simple normal crossing divisor on $V$ for every $Q\in Y_3$, where
$\pi: V\to X_3\to Y_3$;
\item $\Supp \pi^*Q\cup \Supp B_V$ is a simple normal crossing divisor on $V$ for every $Q\in Y_3$;
\item $\pi$ is projective.
\end{itemize}
Let $\Sigma$ be a reduced divisor on $Y_3$ such that $\pi$ is smooth over $Y_3\setminus \Sigma$ and
that $\Supp
B_V$ is relatively normal crossing over $Y_3\setminus \Sigma$.
We consider the set of prime divisors $\{E_i\}$ where $E_i$ is a prime divisor on $V$ such that
$\pi(E_i)\in \Sigma$ and
$$\mult _{E_i} (B_V+\sum _{P\in\Sigma}b_P\pi^*P)^{\geq 0}<1. $$
We run the minimal model programs with ample scaling with respect to
$$
K_V+(B_V+\sum _{P\in \Sigma}b_P\pi^*P)^{\geq 0}+\varepsilon \sum_i E_i
$$
over $X_3$ and $Y_3$ for some small positive rational number $\varepsilon$.
Note that
$$
(V, (B_V+\sum _P b_P\pi^*P)^{\geq 0}+\varepsilon \sum _i E_i)
$$
is a $\mathbb Q$-factorial dlt pair because $0<\varepsilon \ll 1$.
We set $$E=-(B_V+\sum _P b_P\pi^*P)^{\leq0}+\varepsilon \sum_i E_i. $$
Then it holds that
$$
K_V+(B_V+\sum _Pb_P\pi^*P)^{\geq 0}+\varepsilon \sum_i E_i \sim _{\mathbb Q, Y_3}E\geq 0.
$$
First we run a minimal model program with ample scaling with respect to
$$
K_V+(B_V+\sum _Pb_P\pi^*P)^{\geq 0}+\varepsilon \sum_i E_i \sim _{\mathbb Q, X_3}E\geq 0
$$
over $X_3$. Note that every irreducible component of
$E$ which is dominant onto $Y_3$ is exceptional
over $X_3$ by the construction.
Thus, if $E$ is dominant onto $Y_3$, then it is not
contained in the relative movable cone over $X_3$. Therefore,
after finitely many steps, we may assume that every irreducible
component of $E$ is contained in a fiber over $Y_3$ (see,
for example, \cite[Theorem 2.2]{fujino3}). Next we run a minimal model
program with ample scaling with respect to
$$
K_V+(B_V+\sum _Pb_P\pi^*P)^{\geq 0}+\varepsilon \sum_i E_i \sim _{\mathbb Q, Y_3}E\geq 0
$$
over $Y_3$.
Then the minimal model program terminates at $V'$ (see, for example, \cite[Theorem 2.2]{fujino3}).
Note that all the components of $E+\sum _i E_i$ are contracted by
the above minimal model programs.
Thus, we have
$$
K_{V'}+(B_{V'}+\sum _P b_P{\pi'}^*P)^{\geq 0}\sim _{\mathbb Q, Y_3}0,
$$
where $\pi':V'\to Y_3$ and $B_{V'}$ is the pushforward of $B_V$ on $V'$.
Note that $B_{V'}+\sum _Pb_P\pi'^*P$ is effective
since $\Supp(E+\sum _i E_i)$ is contracted by the above minimal model programs.
Of course, we see that
$$
(V', (B_{V'}+\sum _P b_P\pi'^*P)^{\geq 0})=(V', B_{V'}+\sum _P b_P\pi'^*P)
$$
is a $\mathbb Q$-factorial dlt pair.
By the construction, the induced proper birational map
$$
(V, B_V+\sum _Pb_P\pi^*P)\dashrightarrow (V', B_{V'}+\sum _P b_P \pi'^*P)
$$
over $Y_3$ is $B$-birational (see \cite[Definition 1.5]{fujino-abun}), that is,
we have a common resolution
\begin{equation*}
\xymatrix{ & Z\ar[dl]_{a} \ar[dr]^{b}\\
V \ar@{-->}[rr] & & V'}
\end{equation*}
over $Y_3$ such that
$$
a^*(K_V+B_V+\sum _{P\in \Sigma}b_P\pi^*P)=b^*(K_{V'}+B_{V'}+\sum _{P\in \Sigma}b_P\pi'^*P).
$$
Let $S$ be any log canonical center of $(V', B_{V'}+\sum _Pb_P{\pi'}^*P)$ which is
dominant onto $Y_3$ and is minimal over the generic point of $Y_3$.
Then $(S, B_{S})$, where
$$
K_{S}+B_{S}=(K_{V'}+B_{V'}+\sum _Pb_P{\pi'}^*P)|_{S},
$$
is not klt but lc over every $P\in \Sigma$ since it holds that
\begin{align}\label{shiki1}
B_{V'}+\sum _{P\in \Sigma}b_P\pi'^*P\geq \sum _{P\in \Sigma}\pi'^*P. \tag{$\spadesuit$}
\end{align}
Note that \eqref{shiki1} follows from the fact that all the components of $\sum _i E_i$ are contracted
in the minimal model
programs.
Let $g_3:(W_3, B_{W_3})\to Y_3$ be the induced klt-trivial fibration from $(W^\nu_2,
B_{W^\nu_2})\to Y_2$ by $\sigma_2:Y_3\to Y_2$.
By \cite[Claims $(A_n)$ and $(B_n)$ in the proof of Lemma 4.9]{fujino-abun}, there is a log canonical center
$S_0$ of $(V', B_{V'}+\sum _Pb_P\pi'^*P)$ which is dominant onto $Y_3$ and is minimal
over the generic point of $Y_3$ such that
there is a $B$-birational map
$$
(W_3, B_{W_3}+\sum _{P\in\Sigma}b_Pg_3^*P)\dashrightarrow (S_0, B_{S_0})
$$
over $Y_3$, where
$$
K_{S_0}+B_{S_0}=(K_{V'}+B_{V'}+\sum _{P\in \Sigma}b_P\pi'^*P)|_{S_0}.
$$
This means that there is a common resolution
\begin{equation*}
\xymatrix{ & T\ar[dl]_{\alpha} \ar[dr]^{\beta}\\
W_3 \ar@{-->}[rr] & & S_0}
\end{equation*}
over $Y_3$ such that
$$
\alpha^*(K_{W_3}+B_{W_3}+\sum _Pb_P g_3^*P)=\beta^*(K_{S_0}+B_{S_0}).
$$
This implies that $b_P=b_P^{\min}$ for every
$P\in \Sigma$. Therefore, we have $\mathbf B_{Y_3}=\mathbf B_{Y_3}^{\min}$.
\end{proof}
Then we obtain
$$\mathbf M_{Y_3}\sim _{\mathbb Q}\mathbf M_{Y_3}^{\min}=\sigma_3^*\mathbf M_{Y_2}^{\min}
$$
because
$$
K_{Y_3}+\mathbf M_{Y_3}+\mathbf B_{Y_3}\sim _{\mathbb Q} K_{Y_3}
+\mathbf M_{Y_3}^{\min} +\mathbf B_{Y_3}^{\min}.
$$
Thus, $\mathbf M_{Y_3}$ is nef and abundant.
Since
$$
\mathbf M_{Y_3}=\sigma_3^*\mathbf M_{Y_2}=\sigma_3^*\sigma_2^*\mathbf M_{Y_1},
$$
$\mathbf M$ is b-nef and abundant.
Moreover, by replacing $Y_3$ with a suitable
generically finite cover, we have that
$\mathbf M_{Y_3}$ and $\mathbf M_{Y_3}^{\min}$ are both Cartier
(see \cite[Lemma 5.2 (5), Proposition 5.4, and Proposition 5.5]{ambro1})
and $\mathbf M_{Y_3}\sim \mathbf M_{Y_3}^{\min}$.
\end{proof}
We close this paper with a remark on the b-semi-ampleness of $\mathbf M$.
For some related topics, see \cite[Section 3]{fujino10}.
\begin{rem}[b-semi-ampleness]\label{41}
Let $f:X\to Y$ be a projective surjective morphism between normal projective varieties with
connected fibers. Assume that
$(X, B)$ is log canonical and $K_X+B\sim _{\mathbb Q, Y}0$. Without loss of
generality, we may assume that $(X, B)$ is dlt by taking a dlt blow-up. We set
$$
d_f(X, B)=\left\{\dim W-\dim Y \left|
\begin{array}{l} {\text{$W$ is a log canonical center of}}\\
{\text{$(X, B)$ which is dominant onto $Y$}}
\end{array}\right. \right\}.
$$
If $d_f(X, B)\in \{0, 1\}$, then the b-semi-ampleness of the moduli part $\mathbf M$
follows from \cite{kawamata} and \cite{ps} by the
proof of Theorem \ref{main}. Moreover, it is obvious that $\mathbf M\sim _\mathbb Q 0$ when $d_f(X, B)=0$.
\end{rem}
\end{document} |
\begin{document}
\title[Convolution Operators with Singular Measures of Fractional Type]{Convolution Operators with Singular Measures of Fractional Type on the Heisenberg Group}
\author{Tom\'as Godoy and Pablo Rocha}
\address{Universidad Nacional de C\'ordoba, FaMAF-UNC, C\'ordoba, 5000 C\'ordoba, Argentina}
\email{[email protected], \ [email protected]}
\thanks{\textbf{2.010
Math. Subject Classification}: 423A80, 42A38}
\thanks{\textbf{Key
words and phrases}: Singular measures, group Fourier transform, Heisenberg group, convolution operators}
\thanks{Partially supported by Conicet and Secyt-UNC}
\maketitle
\begin{abstract}
We consider the Heisenberg group $\mathbb{H}^{n}=\mathbb{C}^{n} \times \mathbb{R}$.
Let $\mu_{\gamma}$ be the fractional Borel measure on $\mathbb{H}^{n}$ defined by
$$ \mu_{\gamma}(E) = \int_{\mathbb{C}^{n}}\chi_{E}\left(w,\varphi(w)\right) \prod_{j=1}^{n} \eta_j \left( |w_j|^{2} \right) | w_j |^{-\frac{\gamma}{n}}dw,$$
where $0 < \gamma < 2n$, $\varphi(w) = \sum\limits_{j=1}^{n} a_{j} \left\vert w_{j}\right\vert^{2}$, $w=(w_{1},...,w_{n}) \in \mathbb{C}^{n}$,
$a_{j} \in \mathbb{R}$, and $\eta_j \in C_{c}^{\infty}(\mathbb{R})$. In this paper we study the set of pairs $(p,q)$ such that the right convolution operator with $\mu_{\gamma}$ is bounded from $L^{p}(\mathbb{H}^{n})$ into $L^{q}(\mathbb{H}^{n})$.
\end{abstract}
\section{Introduction}
Let $\mathbb{H}^{n} = \mathbb{C}^{n} \times \mathbb{R}$ be the Heisenberg group with group law $\left( z,t\right) \cdot \left(w,s\right)
=\left( z+w,t+s+\left\langle z,w\right\rangle \right) $ where $\langle z,w \rangle = \frac{1}{2} Im(\sum \limits_{j=1}^{n}z_{j} \cdot \overline{w_{j}})$. For $x=(x_{1},...,x_{2n})\in \mathbb{R}^{2n}$, we write $x=(x^{\prime },x^{\prime \prime })$ with
$x^{\prime }\in \mathbb{R}^{n}$, $x^{\prime \prime }\in \mathbb{R}^{n}$. So, $\mathbb{R}^{2n}$ can be identified with $\mathbb{C}^{n}$ via the map
$\Psi (x^{\prime },x^{\prime \prime })=x^{\prime }+ix^{\prime \prime }$. In this setting the form $\langle z,w \rangle $ agrees with the standard symplectic form on $\mathbb{R}^{2n}$. Thus $\mathbb{H}^{n}$ can be viewed as $\mathbb{R}^{2n} \times \mathbb{R}$
endowed with the group law
$$
\left( x,t\right) \cdot \left( y,s\right) =\left( x+y,t+s+\frac{1}{2} B(x,y) \right)
$$
where the symplectic form $B$ is given by $B(x,y)=\sum \limits_{j=1}^{n}\left( y_{n+j}x_{j}-y_{j}x_{n+j}\right) $, with $x=(x_{1},...,x_{2n})$ and $y=(y_{1},...,y_{2n})$, with neutral element $(0,0)$, and with inverse $\left( x,t\right) ^{-1}=\left(-x,-t\right) $.
Let $\varphi :\mathbb{R}^{2n}\rightarrow \mathbb{R}$ be a measurable function, and let $\mu_{\gamma} $ be the fractional Borel measure on $\mathbb{H}^{n}=\mathbb{R}^{2n}\times \mathbb{R}$ supported on the graph of $\varphi$, given by
\begin{equation}
\left\langle \mu_{\gamma} ,f\right\rangle
=\int\limits_{\mathbb{R}^{2n}}f\left( w,\varphi \left( w\right)
\right) \prod_{j=1}^{n} \eta_j \left( |w_j|^{2} \right) \left\vert w_j \right\vert ^{-\frac{\gamma}{n} }dw
\label{mu2}
\end{equation}
with $0< \gamma < 2n$, and where the $\eta _{j}$'s are functions in $C_{c}^{\infty }(\mathbb{R})$ such that $0\leq \eta _{j}\leq 1$, $\eta _{j}(t)\equiv 1$ if $t\in \lbrack -1,1]$ and $supp(\eta _{j})\subset (-2,2)$.
Let $T_{\mu_{\gamma} }$ be the right convolution operator by $\mu_{\gamma} $, defined by
\begin{equation}
T_{\mu_{\gamma} }f\left( x,t\right) =\left( f\ast \mu_{\gamma} \right) \left(x,t\right) =\int_{\mathbb{R}^{2n}}f\left( \left( x,t\right) \cdot
\left( w,\varphi \left( w\right) \right) ^{-1}\right) \prod_{j=1}^{n} \eta_j \left( |w_j|^{2} \right) \left\vert w_j \right\vert ^{-\frac{\gamma}{n}} dw. \label{tmu}
\end{equation}
We are interested in studying the type set
$$
E_{\mu_{\gamma} }=\left\{ \left( \frac{1}{p},\frac{1}{q}\right) \in \left[0,1\right] \times \left[ 0,1\right] :\left\Vert T_{\mu_{\gamma}}\right\Vert _{L^{p}-L^{q}}<\infty \right\}
$$
where the $L^{p}$ - spaces are taken with respect to the Lebesgue measure on $\mathbb{R}^{2n+1}$. We say that that the measure $\mu_{\gamma}$ defined in (\ref{mu2}) is $L^{p}$-\textit{improving} if $E_{\mu_{\gamma}}$ does not reduce to the diagonal $1/p=1/q$.
This problem is well known if in (\ref{tmu}) we consider $\gamma =0$ and replace the Heisenberg group
convolution with the ordinary convolution in $\mathbb{R}^{2n+1}$. If the
graph of $\varphi$ has non-zero Gaussian curvature at each point, a theorem
of Littman (see \cite{littman}) implies that $E_{\nu }$ is the closed
triangle with vertices $(0,0)$, $(1,1)$, and $\left( \frac{2n+1}{2n+2},\frac{
1}{2n+2}\right) $ (see \cite{oberlin}). A very interesting survey of results
concerning the type set for convolution operators with singular measures can
be found in \cite{ricci}.\\
Returning to our setting $\mathbb{H}^{n}$, in \cite{secco} and \cite{secco2} S. Secco obtains $L^{p}$-\textit{improving} properties of measures
supported on curves in $\mathbb{H}^{1}$, under certain assumptions. In \cite{ricci2} F. Ricci and E. Stein showed that the type set of the
measure given by (\ref{mu2}), for the case $\varphi(w)=0$, $\gamma =0$ and $n=1$, is the triangle with vertices
$(0,0),$ $(1,1),$ and $\left( \frac{3}{4},\frac{1}{4}\right)$. In \cite{G-R}, the authors adapt the work of Ricci and Stein for the case of manifolds quadratic hypersurfaces in $\mathbb{R}^{2n+1}$, there we also give some examples of surfaces with degenerate curvature at the origin.
We observe that if $\left( \frac{1}{p},\frac{1}{q}\right) \in E_{\mu_{\gamma}}$ then
\begin{equation}
p\leq q, \,\,\,\,\,\,\,\,\,\,\,\, \frac{1}{q}\geq \frac{2n+1}{p}-2n, \,\,\,\,\,\,\,\,\,\,\,\, \frac{1}{q}\geq\frac{1}{(2n+1)p}. \label{restricciones}
\end{equation}
Indeed, the first inequality follows from Lemma 3 in \cite{G-R}, replacing the sets $A_{\delta}$ and $F_{\delta, x}$ in the proof of Lemma 4 in \cite{G-R} by the sets
$$
A'_{\delta }=\left\{ (x,t)\in \mathbb{R}^{2n}\times
\mathbb{R}:x\in \widetilde{D}\wedge \left\vert t-\varphi (x)\right\vert \leq
\frac{\delta }{4}\right\}
$$
and
$$
F'_{\delta ,x}=\left\{ y\in \widetilde{D}:\left\Vert x-y\right\Vert _{\mathbb{R}
^{2n}}\leq \frac{\delta }{4n(1+\left\Vert \nabla \varphi \mid
_{supp(\eta )}\right\Vert _{\infty })}\right\}
$$ where $\widetilde{D}$ is a closed disk in $\mathbb{R}^{2n}$ contained in the unit disk centered in the origin such that the origin not belongs to $\widetilde{D}$, we observe that the argument utilized in the proof of Lemma 4 in \cite{G-R} works in this setting so we get the others two inequalities.
Since $0 < \gamma < 2n$ it is clear that $\| T_{\mu_{\gamma}} f \|_{p} \leq c \|f\|_{p}$ for all Borel function $f \in L^{p}(\mathbb{H}^{n})$ and all $1 \leq p \leq \infty$, so $(\frac{1}{p}, \frac{1}{p}) \in E_{\mu_{\gamma}}$.
In Lemma 4, section 2 below, we obtain the following necessary condition for the pair $(\frac{1}{p}, \frac{1}{q})$ to be in $E_{\mu_{\gamma}}$:
$$\frac{1}{q}\geq \frac{1}{p}-\frac{2n-\gamma }{2n+2}.$$
Let $D$ be the point of intersection, in the $(\frac{1}{p}, \frac{1}{q})$ plane, between the lines $\frac{1}{q}=\frac{2n+1}{p}
-2n$, $\frac{1}{q}=\frac{1}{p}-\frac{2n-\gamma }{2n+2}$ and let $D^{\prime }$ be its symmetric with respect to the non principal diagonal. So
\[
D=\left( \frac{4n^{2}+2n+\gamma }{2n(2n+2)},\frac{2n+(2n+1)\gamma }{2n(2n+2)}
\right) =\left( \frac{1}{p_{D}},\frac{1}{q_{D}}\right) \text{ y\
}D^{\prime }=\left( 1-\frac{1}{q_{D}},1-\frac{1}{p_{D}}\right).
\]
Thus $E_{\mu_{\gamma}}$ is contained in the closed trapezoid with vertices $(0,0)$, $(1,1)$, $D$ and $D^{\prime }$.
Finally, let $C_{\gamma}$ be the point of intersection of the lines
$\frac{1}{q}=1- \frac{1}{p}$ and $\frac{1}{q}= \frac{1}{p}- \frac{2n-\gamma}{2n+2}$, thus $C_{\gamma}= \left( \frac{4n+2-\gamma}{2(2n+2)},
\frac{2+\gamma}{2(2n+2)} \right)$.
In section 3 we prove the following results:
\begin{theorem} If $\mu_{\gamma}$ is the fractional Borel measure defined in (\ref{mu2}), supported on the graph of the function $\varphi
(w)= \sum_{j=1}^{n} a_j \left\vert w_j \right\vert ^{2}$, with $n \in \mathbb{N}$, $a_j \in \mathbb{R}$ and $w_j \in \mathbb{R}^{2}$, then the interior of the type set $E_{\mu_{\gamma}}$ coincide with the interior of the trapezoid
with vertices $(0,0)$, $(1,1)$, $D$ and $D^{\prime }$. Moreover the semi-open segments $\left[(1,1);(p_{D}^{-1},q_{D}^{-1}) \right)$ and
$\left[(0,0);(1-q_{D}^{-1},1-p_{D}^{-1}) \right)$ are contained in $E_{\mu_{\gamma}}$.
\end{theorem}
\begin{theorem} Let $\mu_{\gamma}$ be a fractional Borel measure as in Theorem 1. Then $C_{\gamma} \in E_{\mu_{\gamma}}$.
\end{theorem}
Let $\widetilde{\mu}_{\gamma}$ be the Borel measure given by
\begin{equation}
\left\langle \widetilde{\mu}_{\gamma} ,f\right\rangle
=\int\limits_{\mathbb{R}^{2n}}f\left( w, |w|^{2m}
\right) \eta \left( |w|^{2} \right) | w |^{\gamma }dw,
\label{mu3}
\end{equation}
where $m \in \mathbb{N}_{\geq 2}$, $\gamma = \frac{2(m-1)}{(n+1)m}$, and $\eta$ is a function in $C_{c}^{\infty }(\mathbb{R})$ such that $0\leq \eta \leq 1$, $\eta(t)\equiv 1$ if $t\in \lbrack -1,1]$ and $supp(\eta)\subset (-2,2)$. \\
In a similar way we characterize the type set of the Borel measure $\widetilde{\mu}_{\gamma}$ supported on the graph of the function $\varphi(w) = |w|^{2m}$. In fact we prove
\begin{theorem} Let $\widetilde{\mu}_{\gamma}$ be the Borel measure defined in (\ref{mu3}) with $\gamma = \frac{2(m-1)}{(n+1)m}$, where $n \in \mathbb{N}$
and $m \in \mathbb{N}_{\geq 2}$. Then the type set $E_{\widetilde{\mu}_{\gamma}}$ is the closed triangle with vertices
\[
A=\left( 0,0\right) ,\qquad B=\left( 1,1\right) ,\qquad C=\left( \frac{2n+1}{
2n+2},\frac{1}{2n+2}\right).
\]
\end{theorem}
This result improves to the one obtained in Theorem 2 in \cite{G-R}.
\qquad
Throughout this work, $c$ will denote a positive constant not necessarily the same at each occurrence.
\section{Auxiliary results}
\begin{lemma} Let $\mu_{\gamma}$ be the fractional Borel measure defined by (\ref{mu2}), where $\varphi(w) = \sum_{j=1}^{n} a_j |w_j|^{2}$ and $0 < \gamma < 2n$. If $\left(\frac{1}{p},\frac{1}{q}\right) \in E_{\mu_{\gamma}}$, then $\frac{1}{q}\geq \frac{1}{p}-\frac{2n-\gamma}{2n+2}$.
\end{lemma}
\begin{proof} For $0<\delta \leq 1$, we define $Q_{\delta}=D_{\delta}\times \left[-(4M+n)\delta^{2},(4M+n)\delta^{2}\right]$, where
$D_{\delta}=\left\{x \in \mathbb{R}^{2n}: \left\Vert x \right\Vert \leq \delta \right\}$ and $M=\max \left\{\left\vert \varphi(y)\right\vert:y \in D_{1} \right\}.$ We put
$$A_{\delta} = \left\{(x,t) \in D_{\frac{\delta}{2}}\times \mathbb{R}: \left\vert t- \varphi(x) \right\vert \leq 2M\delta^{2}\right\}.$$
Let $f_{\delta} = \chi_{Q_{\delta}}$. We will prove first that $\left\vert (f_{\delta} \ast \mu_{\gamma})(x,t) \right\vert\geq c \delta^{2n-\gamma}$ for all $(x,t) \in A_{\delta}$, where $c$ is a constant independent of $\delta$. \\
If $(x,t) \in A_{\delta}$, we have that
\begin{equation}
(x,t)\cdot (y,\varphi(y))^{-1} \in Q_{\delta} \text{\textit{ for all }} y \in D_{\frac{\delta}{2}}; \label{qd}
\end{equation}
indeed, $(x,t)\cdot (y,\varphi(y))^{-1}=\left(x-y,t-\varphi(y)-\frac{1}{2} B(x,y) \right)$,
from the homogeneity of $\varphi$ and since
$\frac{1}{2}\left\vert B(x,y)\right\vert \leq n\left\Vert x\right\Vert _{ \mathbb{R}^{2n}}\left\Vert x-y\right\Vert _{\mathbb{R}^{2n}}$, (\ref{qd}) follows . So
\[
\left\vert(f_{\delta} \ast \mu)(x,t) \right\vert = \int_{\mathbb{R}^{2n}} f_{\delta}\left((x,t)\cdot(y,\varphi(y))^{-1} \right) \prod_{j=1}^{n} \eta_j(|y_j|^{2}) \left\vert y_j \right\vert^{-\frac{\gamma}{n}} dy
\]
\[
\geq \int_{D_{\frac{\delta}{2}}} \left\vert y \right\vert^{-\gamma} \prod_{j=1}^{n} \eta_j(|y_j|^{2}) dy
=\int_{D_{\frac{\delta}{2}}}\left\vert y \right\vert^{-\gamma} dy=c \delta^{2n-\gamma}
\]
for all $(x,t) \in A_{\delta}$ and all $0 < \delta < 1/2$. Thus
\[
\left\Vert f_{\delta} \ast \mu_{\gamma} \right\Vert _{q}\geq \left(\int_{A_{\delta}} \left\vert f \ast \mu_{\gamma} \right\vert^{q} \right)^{\frac{1}{q}}\geq c\delta ^{2n-\gamma}\left\vert A_{\delta} \right\vert ^{\frac{1}{q}} = c \delta^{2n-\gamma+\frac{1}{q}(2n+2)}.
\]
On the other hand
$\left(\frac{1}{p},\frac{1}{q}\right) \in E_{\mu}$ implies
\[
\left\Vert f_{\delta} \ast \mu_{\gamma} \right\Vert _{q}\leq c \left\Vert f_{\delta} \right\Vert_{p}= c \delta^{\frac{1}{p}(2n+2)},
\]
therefore
$\delta^{2n-\gamma+\frac{1}{q}(2n+2)}\leq c
\delta^{\frac{1}{p}(2n+2)}$ for all $0 < \delta < 1$ small enough, then
$$\frac{1}{q}\geq \frac{1}{p} - \frac{2n-\gamma}{2n+2}.$$
\end{proof}
The following two lemmas deal on certain identities that involve to the Laguerre polynomials.
We recall the definition of these polynomials: the Laguerre polynomials $L^{\alpha}_{n}(x)$ are defined by the formula
\[
L^{\alpha}_{n}(x)= e^{x} \frac{x^{-\alpha}}{n!} \frac{d^{n}}{dx^{n}}(e^{-x} x^{n + \alpha}), \,\,\,\,\,\,\,\, n=0, 1, 2, ...
\]
for arbitrary real number $\alpha > -1$.
\begin{lemma} If $Re(\beta)>-1$, then
$$\int\limits_{0}^{\infty }\sigma ^{\beta }L_{k}^{n-1}\left( \sigma
\right) e^{-\sigma \left( \frac{1}{2}+i\xi \right) }d\sigma
=\frac{1}{k!} \left[\frac{d^{k}}{dr^{k}} \left(
\frac{\Gamma (\beta +1)}{(1-r)^{n} \left(
\frac{1}{2}+\frac{r}{1-r}+i\xi \right)^{\beta +1}}\right) \right]_{r=0},$$
for $n \in \mathbb{N}$ and $k \in \mathbb{N} \cup \{ 0 \}$.
\end{lemma}
\begin{proof} Let $0<\epsilon <1$ be fixed. From the generating function identity (4.17.3) in \cite{Lebedev} p. 77, we have
\begin{equation}
\sum\limits_{j\geq 0}
\sigma ^{\beta }L_{j}^{n-1}\left( \sigma \right) e^{-\sigma \left( \frac{1}{2}+i\xi \right) } r^{j}=\frac{1}{(1-r)^{n}}
\sigma ^{\beta }e^{-\sigma \left( \frac{1}{2}+\frac{r}{1-r}+i\xi
\right)}, \,\,\,\,\,\,\,\,\,\, |r|<1. \label{ident3}
\end{equation}
Since $\left\vert L_{j}^{n-1}\left( \sigma \right) e^{-\sigma
\frac{1}{2}} \right\vert \leq \frac{(j+n-1)!}{j!(n-1)!}$ for all
$\sigma > 0$ (see proposition 4.2 in \cite{thangavelu}), the series in
(\ref{ident3}) is uniformly convergent on the interval
$\left[\epsilon, \frac{1}{\epsilon} \right]$. Integrating on this interval we obtain
$$
\sum\limits_{j\geq 0}\left( \int\limits_{\epsilon }^{\frac{1}{\epsilon }
}\sigma ^{\beta }L_{j}^{n-1}\left( \sigma \right) e^{-\sigma \left( \frac{1}{
2}+i\xi \right) }d\sigma \right) r^{j}=\frac{1}{(1-r)^{n}}
\int\limits_{\epsilon }^{\frac{1}{\epsilon }}\sigma ^{\beta
}e^{-\sigma \left( \frac{1}{2}+\frac{r}{1-r}+i\xi \right) }d\sigma,
$$
so
\begin{equation}
\int\limits_{\epsilon }^{\frac{1}{\epsilon }}\sigma ^{\beta
}L_{k}^{n-1}\left( \sigma \right) e^{-\sigma \left(
\frac{1}{2}+i\xi \right)
}d\sigma =\frac{1}{k!}\left[ \frac{d^{k}}{dr^{k}}\left( \frac{1}{
(1-r)^{n}}\int\limits_{\epsilon }^{\frac{1}{\epsilon }}\sigma
^{\beta }e^{-\sigma \left( \frac{1}{2}+\frac{r}{1-r}+i\xi \right)
}d\sigma \right)\right]_{r=0}. \label{ident4}
\end{equation}
Now let us computation $\left[ \frac{d^{k}}{dr^{k}}
\left( \int\limits_{\epsilon }^{\frac{1}{\epsilon
}}\sigma^{\beta} e^{-\sigma \left( \frac{1}{2}+\frac{r}{1-r}+i\xi
\right) }d\sigma \right) \right]_{r=0}$. We start to compute first the derivatives of the function $u\rightarrow \int\limits_{\epsilon
}^{\frac{1}{\epsilon }}\sigma ^{\beta }e^{-\sigma u }d\sigma$, where $Re(u) > 0$. We define
$\alpha_{\epsilon}(\sigma)= \sigma u$, $\sigma \in \left[\epsilon,
\frac{1}{\epsilon} \right]$, so
$$\int\limits_{\epsilon }^{\frac{1}{\epsilon
}}\sigma ^{\beta }e^{-\sigma u }d\sigma = u^{-(\beta+1)}
\int\limits_{\alpha_{\epsilon }} z^{\beta }e^{-z }dz$$
to apply the Cauchy's Theorem we have
\begin{equation}
\int\limits_{\epsilon }^{\frac{1}{\epsilon
}}\sigma ^{\beta }e^{-\sigma u }d\sigma = u^{-(\beta+1)} \left[\int\limits_{\epsilon }^{\frac{1}{\epsilon
}} x^{\beta }e^{-x }dx + I_{1}(u,\epsilon) - I_{2}(u,\epsilon)
\right] \label{ident5}
\end{equation}
where
$$I_{1}(u,\epsilon)=\int\limits_{\left[\frac{1}{\epsilon},\frac{1}{\epsilon}u \right]} z^{\beta }e^{-z }dz$$
and
$$I_{2}(u,\epsilon)=\int\limits_{\left[\epsilon,\epsilon u \right]} z^{\beta }e^{-z}dz$$
are line integrals on $\mathbb{C}$. Now we will prove that for each $u_{0} \in
\mathbb{C}$ with $Re(u_{0})>0$ the following identity holds
\begin{equation}
\lim_{\epsilon \rightarrow 0} \left[ \frac{d^{k}}{du^{k}} I_{j}(u, \epsilon) \right]_{u=u_{0}} = 0
\label{ij}
\end{equation}
for $j=1,2$ and all $k \geq 0$. It is easy to check that
$$I_{2}(u,\epsilon)=\epsilon ^{\beta+1} \int\limits_{\left[1, u \right]} z^{\beta }e^{-\epsilon z}dz.$$
Since $Re(\beta)>-1$ we have that $\lim_{\epsilon
\rightarrow 0} I_{2}(u_{0}, \epsilon) = 0$. From the analyticity of the function $z \rightarrow z^{\beta }e^{-\epsilon z}$ on the region
$\{ z: Re(z)>0 \}$ it follows for $k \geq 1$
$$\left[ \frac{d^{k}}{du^{k}} I_{2}(u, \epsilon) \right]_{u=u_{0}}=\epsilon
^{\beta+1} \left[ \frac{d^{k-1}}{du^{k-1}} u^{\beta} e^{-\epsilon u} \right]_{u=u_{0}},$$ then $\lim_{\epsilon
\rightarrow 0} \left[ \frac{d^{k}}{du^{k}} I_{2}(u, \epsilon) \right]_{u=u_{0}} = 0$ for all $k\geq 0$. \\
Analogously and taking account the rapid decay of the function $z\rightarrow e^{-z}$ on the region $\{z : Re(z)>0 \}$ we obtain
that $\lim_{\epsilon \rightarrow 0} \left[ \frac{d^{k}}{du^{k}} I_{1}(u, \epsilon) \right]_{u=u_{0}} =0$ for all $k\geq 0$,
so (\ref{ij}) follows. To derive in (\ref{ident5}), from the Leibniz's formula and (\ref{ij}) it follows that
\begin{equation}
\lim_{\epsilon \rightarrow 0} \left[ \frac{d^{k}}{du^{k}} \int\limits_{\epsilon
}^{\frac{1}{\epsilon }}\sigma ^{\beta }e^{-\sigma u }d\sigma
\right]_{u=u_{0}} =\Gamma(\beta+1)\left[ \frac{d^{k}}{du^{k}} u^{-(\beta+1)}\right]_{u=u_{0}}. \label{iu}
\end{equation}
Finally, from (\ref{iu}), to apply the chain rule to the function
$r\rightarrow \int\limits_{\epsilon }^{\frac{1}{\epsilon
}}\sigma ^{\beta }e^{-\sigma u(r) }d\sigma$ where
$u(r)=\frac{1}{2}+\frac{r}{1-r}+i \xi$ and the Leibniz's formula give, to do $\epsilon \rightarrow 0$ in (\ref{ident4}), that
$$\int\limits_{0}^{\infty }\sigma ^{\beta }L_{k}^{n-1}\left(
\sigma \right) e^{-\sigma \left( \frac{1}{2}+i\xi \right)
}d\sigma =\lim_{{\epsilon \rightarrow 0}} \int\limits_{\epsilon }^{
\frac{1}{\epsilon }}\sigma ^{\beta }L_{k}^{n-1}\left( \sigma
\right) e^{-\sigma \left( \frac{1}{2}+i\xi \right) }d\sigma
$$ $$=\frac{1}{k!} \left[\frac{d^{k}}{dr^{k}} \left(
\frac{\Gamma (\beta +1)}{(1-r)^{n} \left(
\frac{1}{2}+\frac{r}{1-r}+i\xi \right)^{\beta +1}}\right) \right]_{r=0}.$$
\end{proof}
\begin{lemma} If $Re(\beta) > -1$ and $w(\xi)= -\frac{\frac{1}{2}-i \xi}{\frac{1}{2}+i \xi}$ $(\xi \in \mathbb{R})$, then
\[
\int\limits_{0}^{\infty }\sigma ^{\beta }L_{k}^{n-1}\left(
\sigma \right) e^{-\sigma \left( \frac{1}{2}+i\xi \right) }d\sigma
=\frac{\Gamma(\beta+1)}{\left(\frac{1}{2}+i\xi \right)^{\beta+1}}
\sum_{j+l=k}
\frac{\Gamma(n-1-\beta+j)\Gamma(\beta+1+l)}{\Gamma(n-1-\beta)\Gamma(\beta+1)}
\frac{w(\xi)^{l}}{j!l!},
\]
for $n \in \mathbb{N}$ and $k \in \mathbb{N} \cup \{ 0 \}$.
\end{lemma}
\begin{proof}
We will start find the power series centered at $r=0$ of the following function
$$Q(r)=\frac{1}{(1-r)^{n} \left( \frac{1}{2}+ \frac{r}{1-r}+i \xi \right)^{\beta+1}}, \,\,\,\,\,\,\,\,\,\, |r| < 1.$$
We observe that
$$Q(r)=\frac{1}{(1-r)^{n-\beta-1}\left(\frac{1}{2}+ i\xi+ r\left( \frac{1}{2}-i \xi \right) \right)^{\beta+1} },$$
doing $w=-\frac{\frac{1}{2}-i \xi}{\frac{1}{2}+i \xi}$, we obtain
$$Q(r)=\frac{1}{ \left(\frac{1}{2}+i \xi \right)^{\beta+1} (1-r)^{n-1-\beta}(1-rw)^{\beta+1}}.$$
A simple computation gives
$$(1-r)^{-n+\beta+1}=1+\sum_{j\geq 1}
(n-1-\beta)(n-1-\beta+1)...(n-1-\beta+j-1) \frac{r^{j}}{j!}$$
\begin{equation}
=\sum_{j\geq 0} \frac{\Gamma(n-1-\beta+j)}{\Gamma(n-1-\beta)}
\frac{r^{j}}{j!}. \label{serie2}
\end{equation}
Analogously we have
\[
(1-rw)^{-\beta-1}=\sum_{j\geq 0}
\frac{\Gamma(\beta+1+j)}{\Gamma(\beta+1)} \frac{(rw)^{j}}{j!}.
\]
Thus
$$Q(r)=\frac{1}{\left(\frac{1}{2}+i\xi \right)^{\beta+1}} \left(
\sum_{j+l \geq 0}
\frac{\Gamma(n-1-\beta+j)\Gamma(\beta+1+l)}{\Gamma(n-1-\beta)\Gamma(\beta+1)}
\frac{r^{j+l}w^{l}}{j!l!} \right).$$
Finally, from Lemma 5 it follows
$$\int\limits_{0}^{\infty }\sigma ^{\beta }L_{k}^{n-1}\left(
\sigma \right) e^{-\sigma \left( \frac{1}{2}+i\xi \right) }d\sigma
=\frac{\Gamma(\beta+1)}{\left(\frac{1}{2}+i\xi \right)^{\beta+1}}
\sum_{j+l=k}
\frac{\Gamma(n-1-\beta+j)\Gamma(\beta+1+l)}{\Gamma(n-1-\beta)\Gamma(\beta+1)}
\frac{w^{l}}{j!l!}.$$
\end{proof}
\begin{lemma} If $Re(\beta) > -1$, then
\[
\sum_{j+l=k} \frac{\Gamma(n-1-Re(\beta)+j)) \Gamma(Re(\beta)+1+l)}{\Gamma(n-1-Re(\beta))\Gamma(Re(\beta)+1)} \frac{1}{j!l!}=\frac{(n+k-1)!}{(n-1)!k!},
\]
for $n \in \mathbb{N}$ and $k \in \mathbb{N} \cup \{ 0 \}$.
\end{lemma}
\begin{proof} From $(\ref{serie2})$ it obtains
$$\sum_{j+l=k}
\frac{\Gamma(n-1-Re(\beta)+j))
\Gamma(Re(\beta)+1+l)}{\Gamma(n-1-Re(\beta))\Gamma(Re(\beta)+1)}
\frac{1}{j!l!}$$ $$= \frac{1}{k!} \left[ \frac{d^{k}}{dr^{k}}
(1-r)^{-n+Re(\beta)+1}
(1-r)^{-Re(\beta)-1}\right]_{r=0}=\frac{1}{k!} \left[ \frac{d^{k}}{dr^{k}}
(1-r)^{-n} \right]_{r=0}.$$
Since
$$(1-r)^{-n} =\sum_{j\geq 0} \frac{\Gamma(n+j)}{\Gamma(n)}
\frac{r^{j}}{j!}=\sum_{j\geq 0} \frac{(n+j-1)!}{(n-1)!j!} r^{j},$$
we have $$\sum_{j+l=k} \frac{\Gamma(n-1-Re(\beta)+j))
\Gamma(Re(\beta)+1+l)}{\Gamma(n-1-Re(\beta))\Gamma(Re(\beta)+1)}
\frac{1}{j!l!}=\frac{(n+k-1)!}{(n-1)!k!}.$$
\end{proof}
\section{The main results}
To prove Theorem 1 we will decompose the operator $T_{\mu_{\gamma}}$ of the following way: we consider a family $\left\{T_{\mu_{k}} \right\}_{k \in \mathbb{N}}$ of operators such that
$T_{\mu_{\gamma}} = \displaystyle{\sum_{k \in \mathbb{N}}} T_{\mu_{k}}$, $\left\Vert T_{\mu_{k}} \right\Vert_{1,1} \sim 2^{-k(2n-\gamma)}$ and $\left\Vert T_{\mu_{k}} \right\Vert_{p,q} \sim 2^{k\gamma}\left\Vert T_{\mu_{0}} \right\Vert_{p,q}$ where $T_{\mu_{0}}$ is the operator defined by (\ref{tmu}), taking there $\gamma =0$ and $\varphi(w)=\sum_{j=1}^{n} a_j \left\vert w_j\right\vert^{2}$.
Then Theorem 1 will follow from Theorem 1 in \cite{G-R}, the Riesz-Thorin convexity Theorem and Lemma 4.
\\
${}$
\\
\textit{Proof of Theorem 1.} For each $k \in \mathbb{N}$ we define
\[
A_{k}= \left\{ y=(y_1, ..., y_n) \in (\mathbb{R}^{2})^{n} : 2^{-k} < \left\vert y_j \right\vert \leq 2^{-k+1}, j=1, 2,..., n \right\}
\]
Let $\mu_{k}$ be the fracional Borel measure given by
\[
\mu_{k}(E)= \int_{A_{k}} \chi_{E} \left(y, \varphi(y) \right) \prod_{j=1}^{n} \eta_j \left( |y_j|^{2} \right) | y_j |^{-\frac{\gamma}{n}} dy
\]
and let $T_{\mu_{k}}$ be its corresponding convolution operator, i.e: $T_{\mu_{k}}f=f \ast \mu_{k}$.
Now, it is clear that $\mu_{\gamma}=\sum_{k}\mu_{k}$ and $\left\Vert T_{\mu_{\gamma}} \right\Vert_{p,q} \leq \sum_{k} \left\Vert T_{\mu_{k}} \right\Vert_{p,q}$.
For $f\geq 0$ we have that
$$
\int f(y,s) d\mu_{k}(y,s) \leq 2^{k\gamma}\int_{\mathbb{R}^{2n}} f\left(y, \varphi(y) \right) \prod_{j=1}^{n} \eta_j \left( |y_j|^{2} \right) dy.$$
Thus $\left\Vert T_{\mu_{k}} \right\Vert_{p,q} \leq c 2^{k\gamma}\left\Vert T_{\mu_{0}} \right\Vert_{p,q}$, from Theorem 1 in \cite{G-R} it follows that
\[
\left\Vert T_{\mu_{k}} \right\Vert_{\frac{2n+2}{2n+1},2n+2} \leq c 2^{k\gamma}.
\]
It is easy to check that $\left\Vert T_{\mu_{k}}
\right\Vert_{1,1}\leq \left\vert \mu_{k} (\mathbb{R}^{2n+1})
\right\vert \sim \int_{A_{k}} \left\vert y \right\vert^{-\gamma} dy= c 2^{-k(2n-\gamma)}.$ \\
For $0< \theta <1$, we define
$$\left(\frac{1}{p_{\theta}}, \frac{1}{q_{\theta}} \right) = \left(\frac{2n+1}{2n+2}, \frac{1}{2n+2} \right) (1-\theta) + (1,1)\theta,$$
by the Riesz convexity Theorem we have
$$
\left\Vert T_{\mu_{k}} \right\Vert_{p_{\theta},q_{\theta}} \leq c
2^{k\gamma(1-\theta)-k(2n-\gamma) \theta}$$
choosing $\theta$ such that $k\gamma(1-\theta)-k(2n-\gamma) \theta = 0$ result $\displaystyle{\sup_{k \in \mathbb{N}}} \left\Vert T_{\mu_{k}}
\right\Vert_{p_{\theta},q_{\theta}} \leq c < \infty$. A simple computation gives $\theta=\frac{2n-\gamma}{2n}$, then $\left( \frac{1}{p_{\theta}}, \frac{1}{q_{\theta}} \right) = \left( \frac{1}{p_{D}}, \frac{1}{q_{D}} \right)$, so $\left\Vert T_{\mu_{k}} \right\Vert_{p_{D},q_{D}} \leq c$,
where $c$ no depend on $k$. Interpolating once again, but now between the points $\left(\frac{1}{p_{D}}, \frac{1}{q_{D}} \right)$ and $(1,1)$ we obtain, for each $0< \tau <1$ fixed
$$
\left\Vert T_{\mu_{k}} \right\Vert_{p_{\tau},q_{\tau}} \leq c 2^{-k(2n-\gamma) \tau},$$
since $\left\Vert T_{\mu_{\gamma}} \right\Vert_{p,q} \leq \sum_{k} \left\Vert T_{\mu_{k}} \right\Vert_{p,q}$ and $0< \gamma < 2n$, it follows that
$$
\left\Vert T_{\mu_{\gamma}} \right\Vert_{p_{\tau},q_{\tau}} \leq c\sum_{k \in \mathbb{N}} 2^{-k(2n-\gamma) \tau} <\infty,$$ by duality we also have
$$\left\Vert T_{\mu_{\gamma}} \right\Vert_{\frac{q_{\tau}}{q_{\tau}-1},\frac{p_{\tau}}{p_{\tau}-1}}\leq c_{\tau} <\infty.$$
Finally, the theorem follows from the Riesz convexity Theorem, the restrictions that appear in (\ref{restricciones}) and Lemma 4.
$\square$
\qquad
To prove Theorem 2, we will consider an auxiliary operator $T_N$, with $N \in \mathbb{N}$ fixed, which will be embedded in an analytic family $T_{N,z}$ of operators on the strip $-\frac{2n-\gamma }{2+\gamma }\leq Re(z)\leq 1$ such that
\begin{equation}
\left\{
\begin{array}{c}
\left\Vert T_{N,z}\left( f\right) \right\Vert _{L^{\infty }\left( \mathbb{H}
^{n}\right) }\leq A_{z}\left\Vert f\right\Vert _{L^{1}\left( \mathbb{H}
^{n}\right) }\qquad Re(z)=1 \\
\left\Vert T_{N,z}\left( f\right) \right\Vert _{L^{2}\left( \mathbb{H}
^{n}\right) }\leq A_{z}\left\Vert f\right\Vert _{L^{2}\left( \mathbb{H}
^{n}\right) }\qquad Re(z)=-\frac{2n-\gamma }{
2+\gamma } \label{desig2}
\end{array}
\right.
\end{equation}
where $A_{z}$ will depend admissibly on the variable $z$ and it will not depend on $N$. We denote $T_N = T_{N,0}$. By Stein's theorem on complex interpolation, it will follow that the operator $T_{N}$ will be bounded from $L^{p_{\gamma}}(\mathbb{H}^{n})$ into $L^{p'_{\gamma}}(\mathbb{H}^{n})$, where
$\left(\frac{1}{p_{\gamma}}, \frac{1}{p'_{\gamma}} \right)=C_{\gamma}$, uniformly in $N$. If we see that $T_{N}f(x,t) \rightarrow c T_{\mu_{\gamma}}f(x,t)$ a.e.$(x,t)$ as $N\rightarrow \infty$, then Theorem 2 will follow from Fatou's Lemma.
To prove the second inequality in (\ref{desig2}) we will see that such a
family will admit the expression
\[
T_{N,z}(f)(x,t)=\left( f\ast K_{N,z}\right) (x,t),
\]
where $K_{N,z}\in L^{1}(\mathbb{H}^{n})$, moreover it is a \textit{polyradial}
function (i.e. the values of $K_{N,z}$ depend on $\left\vert
w_{1}\right\vert ,$...$,\left\vert w_{n}\right\vert $ and $t$). Now our
operator $T_{N,z}$ can be realized as a multiplication of operators via the
group Fourier transform, i.e.
\[
\widehat{T_{N,z}(f)}(\lambda )=\widehat{f}(\lambda )\widehat{K_{N,z}}
(\lambda )
\]
where, for each $\lambda \neq 0$, $\widehat{K_{N,z}}(\lambda )$ is an
operator on the Hilbert space $L^{2}(\mathbb{R}^{n})$ given by
\[
\widehat{K_{N,z}}(\lambda )g(\xi )=\int\limits_{\mathbb{H}
^{n}}K_{N,z}(\varsigma ,t)\pi _{\lambda }(\varsigma ,t)g(\xi )d\varsigma dt.
\]
It then follows from Plancherel's theorem for the group Fourier transform
that
\[
\left\Vert T_{N,z}f\right\Vert _{L^{2}(\mathbb{H}^{n})}\leq A_{z}\left\Vert
f\right\Vert _{L^{2}(\mathbb{H}^{n})}
\]
if and only if
\begin{equation}
\left\Vert \widehat{K_{N,z}}(\lambda )\right\Vert _{op}\leq A_{z}
\label{L21}
\end{equation}
uniformly over\textit{\ }$N$\textit{\ }and\textit{\ }$\lambda \neq 0.$ Since
$K_{N,z}$ is a poliradial integrable function, then by a well known result
of Geller (see Lemma 1.3, p. 213 in \cite{geller}), the operators $\widehat{
K_{N,z}}(\lambda ):L^{2}(\mathbb{H}^{n})\rightarrow L^{2}(\mathbb{H}^{n})$
are, for each $\lambda \neq 0$, diagonal with respect to a Hermite basis for
$L^{2}(\mathbb{R}^{n})$. This is
\[
\widehat{K_{N,z}}(\lambda )=C_{n}\left( \delta _{\gamma ,\alpha }\nu
_{N,z}(\alpha ,\lambda )\right) _{\gamma ,\alpha \in \mathbb{N}_{0}^{n}}
\]
where $C_{n}=(2\pi )^{n}$, $\alpha =(\alpha _{1},...,\alpha _{n})$, $\delta _{\gamma ,\alpha }=1$ if $\gamma = \alpha$ and $\delta _{\gamma ,\alpha }=0$ if $\gamma \neq \alpha$, and the
diagonal entries $\nu _{N,z}(\alpha _{1},...,\alpha _{n},\lambda )$ can be
expressed explicitly in terms of the Laguerre transform. We have in fact
\[
\nu_{N,z}(\alpha _{1},...,\alpha _{n},\lambda )=\int\limits_{0}^{\infty
}\,...\,\int\limits_{0}^{\infty }\,K_{N,z}^{\lambda
}(r_{1},...,r_{n})\prod_{j=1}^{n}\left( r_{j}L_{\alpha _{j}}^{0}(\frac{1}{2}
\left\vert \lambda \right\vert r_{j}^{2})e^{-\frac{1}{4}\left\vert \lambda
\right\vert r_{j}^{2}}\right) \,dr_{1}...dr_{n}
\]
where $L_{k}^{0}(s)$ are the Laguerre polynomials, i.e. $L_{k}^{0}(s)=
\sum_{i=0}^{k}\left( \frac{k!}{(k-i)!i!}\right) \frac{(-s)^{i}}{i!}$ and $
K_{N,z}^{\lambda }(\varsigma )=\int\limits_{\mathbb{R}}K_{N,z}(\varsigma
,t)e^{i\lambda t}dt.$ Now (\ref{L21}) is equivalent to
\[
\left\Vert T_{N,z}f\right\Vert _{L^{2}(\mathbb{H}^{n})}\leq A_{z}\left\Vert
f\right\Vert _{L^{2}(\mathbb{H}^{n})}
\]
if and only if
\begin{equation}
\left\vert \nu_{N,z}(\alpha _{1},...,\alpha _{n},\lambda )\right\vert \leq
A_{z} \label{L22}
\end{equation}
uniformly over\textit{\ }$N$, $\alpha _{j}$\textit{\ }and\textit{\ }$\lambda
\neq 0.$ If $Re(z)=-\frac{2n-\gamma }{2+\gamma }$, in the proof of Theorem 2 we find that (\ref{L22}) holds with $A_{z}$
independent of $N$, $\lambda \neq 0$ and $\alpha _{j}$, and then we obtain
the boundedness on $L^{2}(\mathbb{H}^{n})$ that is stated in (\ref{desig2}).
We consider the family $\{ I_{z} \}_{z \in \mathbb{C}}$ of distributions on $\mathbb{R}$ that arises by analytic continuation of the family
$\{ I_{z} \}$ of functions, initially given when $Re(z)>0$ and $s\in \mathbb{R} \setminus\{ 0 \}$ by
\begin{equation}
I_{z}(s)=\frac{2^{-\frac{z}{2}}}{\Gamma \left( \frac{z}{2}\right) }
\left\vert s\right\vert ^{z-1}. \label{iz}
\end{equation}
In particular, we have $
\widehat{I_{z}}=I_{1-z}$, also $I_{0}=c\delta $ where $\widehat{\cdot }$
denotes the Fourier transform on $\mathbb{R}$ and $\delta $ is the Dirac
distribution at the origin on $\mathbb{R}$.
Let $H\in S(\mathbb{R)}$ such that $supp(\widehat{H})\subseteq
\left( -1,1\right) $ and $\int \widehat{H}(t)dt=1$. Now we put $\phi
_{N}(t)=H(\frac{t}{N})$ thus $\widehat{\phi _{N}}(\xi )=N\widehat{H}(N\xi )$
and $\widehat{\phi _{N}}\rightarrow \delta $ in the sense of the
distribution, as $N\rightarrow \infty $.
For $z\mathbb{\in C}$ and $N\in \mathbb{N}$, we also define $J_{N,z}$ as the
distribution on $\mathbb{H}^{n}$ given by the tensor products
\begin{equation}
J_{N,z}=\delta \otimes ...\otimes \delta \otimes \left( I_{z}\ast _{\mathbb{R
}}\widehat{\phi _{N}}\right) \label{jz}
\end{equation}
where $\ast _{\mathbb{R}}$ denotes the usual convolution on $\mathbb{R}$ and
$I_{z}$ is the fractional integration kernel given by (\ref{iz}). We observe that
\begin{equation}
J_{N,0}=\delta \otimes
...\otimes \delta \otimes c\widehat{\phi _{N}}\rightarrow \delta \otimes ...\otimes \delta \otimes c\delta \label{jz2}
\end{equation}
in the sense of the distribution as $N \rightarrow \infty $.
\qquad
\textit{Proof of Theorem 2.} Let $\left\{ T_{N,z}\right\} $ be the family operators on the strip $-\frac{2n-\gamma }{
2+\gamma }\leq Re(z)\leq 1$, given by
\[
T_{N,z}f=f\ast \mu_{\gamma, z}\ast J_{N,z},
\]
where $J_{N,z}$ is given by (\ref{jz}) and $\mu _{\gamma, z}$ by
\begin{equation}
\mu_{\gamma, z}(E)=\int\limits_{\mathbb{R}^{2n}}\chi _{E}\left(
w,\varphi(w)\right) \prod_{j=1}^{n} \eta_j \left( |w_j|^{2} \right)
\left\vert w_j \right\vert^{(z-1) \frac{\gamma}{n}}dw. \label{muz}
\end{equation}
Now (\ref{jz2}) implies that $T_{N, 0}f(x,t)\rightarrow c T_{\mu_{\gamma}}f(x,t)$ a.e.$(x,t)$ as $N\rightarrow \infty $.
\qquad
For $Re(z)=1$ we have
$$\mu _{\gamma, z}\ast J_{N,z}(x,t)= \left( I_{z}\ast _{\mathbb{R}}\widehat{\phi
_{N}}\right) \left( t-\varphi(x)\right) \prod_{j=1}^{n}\eta_{j} \left( |x_j|^{2} \right) |x_j|^{iIm(z)\frac{\gamma}{n} },$$
so $\left\Vert \mu_{\gamma, z}\ast J_{N,z}\right\Vert _{\infty }\leq c\left\vert \Gamma \left( \frac{z}{2}\right) \right\vert ^{-1}$.
Then, for $Re(z)=1$, we obtain
$$
\left\Vert T_{N,z}f\right\Vert _{\infty }\leq \left\Vert f\ast \mu_{\gamma, z}\ast J_{N,z}\right\Vert _{\infty }\leq \left\Vert f\right\Vert
_{1}\left\Vert \mu_{\gamma, z}\ast J_{N,z}\right\Vert _{\infty }\leq c\left\vert \Gamma \left( \frac{z}{2}\right) \right\vert ^{-1}\left\Vert f\right\Vert _{1}
$$
where $c$ is a positive constant independent of $N$ and $z$.
\qquad
We put $K_{N, z} = \mu _{\gamma, z}\ast J_{N,z}$, for $Re(z)=-\frac{2n-\gamma }{2+\gamma }$ we have that $K_{N,z} \in L^{1}(\mathbb{H}^{n})$. Indeed
\[
K_{N,z}(x,t)=\left( I_{z}\ast_{\mathbb{R}}\widehat{\phi _{N}}\right) \left( t-\varphi( x) \right) \prod_{j=1}^{n}\eta_{j} \left( |x_j|^{2} \right) |x_j|^{(z-1) \frac{\gamma}{n}},
\]
since $0<\gamma <2n$ it follows that $2+Re((z-1)\frac{\gamma}{n})=2-\frac{2n+2}{2+\gamma} \frac{\gamma}{n} > 0$ and so $\prod_{j=1}^{n}\eta_{j} \left( |x_j|^{2} \right) |x_j|^{(z-1) \frac{\gamma}{n}} \in L^{1}(\mathbb{R}^{2n})$, in the proof of Lemma 5 in \cite{G-R} it shows that $\left( I_{z}\ast_{\mathbb{R}}\widehat{\phi _{N}}\right) \in L^{1}(\mathbb{R})$. These two facts imply that $K_{N,z} \in L^{1}(\mathbb{H}^{n})$.
In addition $K_{z,N}$ is a polyradial function. Thus the operator $\widehat{K_{z,N}}(\lambda )$ is diagonal with respect to a Hermite base for $L^{2}(\mathbb{R}^{n})$, and its diagonal entries $\nu_{z,N}(\alpha,\lambda )$, with $\alpha =(\alpha_1, ..., \alpha_n) \in \mathbb{N}_{0}^{n}$, are given by
\[
\nu_{N, z}(\alpha ,\lambda )= \int\limits_{0}^{\infty
}\,...\,\int\limits_{0}^{\infty }\,K_{N,z}^{\lambda
}(r_{1},...,r_{n})\prod_{j=1}^{n}\left( r_{j}L_{\alpha _{j}}^{0}(| \lambda| r_{j}^{2}/2)e^{-\frac{1}{4}\left\vert \lambda
\right\vert r_{j}^{2}}\right) \,dr_{1}...dr_{n}
\]
\begin{equation}
= I_{1-z}(-\lambda
)\phi _{N}(\lambda ) \prod_{j=1}^{n} \int\limits_{0}^{\infty }\eta_{j}(r_j^{2})L_{\alpha_j}^{0}\left(
| \lambda |r_{j}^{2}/2\right)
e^{-\frac{1}{4}| \lambda | r_{j}^{2}}e^{i\lambda a_j
r_{j}^{2}}r_j^{1+(z-1)\frac{\gamma}{n}} dr_j. \label{diag2}
\end{equation}
Thus, it is enough to study the integral
\[
\int\limits_{0}^{\infty }\eta_{1}(r^{2})L_{\alpha_1}^{0}\left(|\lambda|r^{2}/2\right) e^{-\frac{1}{4} | \lambda|r^{2}}
e^{i\lambda a_1 r^{2}} r^{1+(z-1)\frac{\gamma}{n}} dr,
\]
where $a_1 \in \mathbb{R}$ and $\eta_1 \in C_{c}^{\infty}(\mathbb{R})$. We make the change of variable $\sigma = |\lambda|r^{2}/2$ in such an integral to obtain
\[
\int\limits_{0}^{\infty }\eta_{1}(r^{2})L_{\alpha_1}^{0}\left(|\lambda|r^{2}/2\right) e^{-\frac{1}{4} | \lambda|r^{2}}
e^{i\lambda a_1 r^{2}} r^{1+(z-1)\frac{\gamma}{n}} dr
\]
\[
= 2^{-\frac{(n+1) \gamma}{(2+\gamma)n}}| \lambda |^{-\left( 1+\frac{(z-1)\gamma}{2n}\right)} \int\limits_{0}^{\infty }\eta_{1}\left( \frac{2\sigma }{
| \lambda |}\right) L_{\alpha_1}^{0}\left( \sigma \right) e^{-
\frac{\sigma }{2}}e^{i2sgn(\lambda ) a_1 \sigma }\sigma
^{\frac{(z-1)\gamma}{2n}}d\sigma
\]
\[
= 2^{-\frac{(n+1) \gamma}{(2+\gamma)n}} | \lambda |^{-\left( 1+\frac{(z-1)\gamma}{2n} \right) }\left( F_{\alpha_1 ,\beta}G_{\lambda }\right) \widehat{\left. {}\right. }
(-2sgn(\lambda ) a_1)
\]
\[
= 2^{-\frac{(n+1) \gamma}{(2+\gamma)n}} \left\vert \lambda \right\vert ^{-\left( 1+\frac{(z-1)\gamma}{2n}\right) }(\widehat{F_{\alpha_1 ,\beta}}\ast \widehat{G_{\lambda }}
)(-2sgn(\lambda ) a_1)
\]
where $$F_{\alpha_1, \beta}(\sigma ):=\chi _{(0,\infty )}(\sigma
)L_{\alpha_1}^{0}\left( \sigma \right) e^{-\frac{\sigma }{2}}\sigma
^{\beta },$$ with $\beta =\frac{ (z-1) \gamma }{2n}$, and
$$G_{\lambda }(\sigma ):=\eta _{1}\left( \frac{
2\sigma }{\left\vert \lambda \right\vert }\right).$$
Now
\begin{equation}
\left\vert ( \widehat{F_{\alpha_1, \beta}}\ast \widehat{G_{\lambda
}})(-2sgn(\lambda ) a_1)\right\vert \leq \left\Vert
\widehat{F_{\alpha_1 ,\beta}}\ast \widehat{G_{\lambda }}\right\Vert
_{\infty }\leq \left\Vert \widehat{F_{\alpha_1 ,\beta}}\right\Vert
_{\infty }\left\Vert \widehat{ G_{\lambda }}\right\Vert
_{1}=\left\Vert \widehat{F_{\alpha_1 ,\beta}}\right\Vert _{\infty
}\left\Vert \widehat{\eta _{1}}\right\Vert _{1}. \label{fb}
\end{equation}
So it is enough to estimate $\left\Vert
\widehat{F_{\alpha_1 ,\beta}}\right\Vert _{\infty }$. Since
$$
\widehat{F_{\alpha_1 ,\beta}}(\xi )=\int\limits_{0}^{\infty }\sigma
^{\beta }L_{\alpha_1}^{0}\left( \sigma \right) e^{-\sigma \left(
\frac{1}{2}+i\xi \right) }d\sigma,
$$
from Lemma 6, with $n=1$, $k= \alpha_1$ and $\beta =\frac{ (z-1) \gamma }{2n}$ we obtain
$$\widehat{F_{\alpha_1 ,\beta}}(\xi ) = \frac{\Gamma(\beta+1)}{\left(\frac{1}{2}+i\xi \right)^{\beta+1}}
\sum_{j+l= \alpha_1} \frac{\Gamma(-\beta+j)\Gamma(\beta+1+l)}{\Gamma(-\beta)\Gamma(\beta+1)} \frac{w^{l}}{j!l!},$$
to take modulo in this expression and since $\left\vert w \right\vert = 1$ it follows that
$$\left\vert \widehat{F_{\alpha_1 ,\beta}}(\xi ) \right\vert \leq
\frac{\Gamma(-Re(\beta)) \Gamma(Re(\beta)+1)}{\left\vert
\left(\frac{1}{2}+i\xi \right)^{\beta+1}\right\vert \left\vert
\Gamma(-\beta) \right\vert} \sum_{j+l=\alpha_1}
\frac{\Gamma(-Re(\beta)+j))
\Gamma(Re(\beta)+1+l)}{\Gamma(-Re(\beta))\Gamma(Re(\beta)+1)}
\frac{1}{j!l!}.$$
From Lemma 7, with $n=1$ and $k = \alpha_1$ we have
$$\sum_{j+l=\alpha_1}
\frac{\Gamma(-Re(\beta)+j))
\Gamma(Re(\beta)+1+l)}{\Gamma(-Re(\beta))\Gamma(Re(\beta)+1)}
\frac{1}{j!l!}=1.$$
So
$$\left\vert \widehat{F_{\alpha_1 ,\beta}}(\xi ) \right\vert \leq \frac{\Gamma(-Re(\beta))
\Gamma(Re(\beta)+1)}{\left\vert \left(\frac{1}{2}+i\xi
\right)^{\beta+1}\right\vert \left\vert \Gamma(-\beta)
\right\vert} \leq \frac{\Gamma
\left(\frac{(n+1) \gamma}{(2+\gamma)n} \right) \Gamma
\left(\frac{2n-\gamma}{(2+\gamma)n}\right)}{(1/2)^{\frac{2n-\gamma}{(2+ \gamma)n}} e^{-\frac{Im(z) \pi \gamma}{4n}} \left\vert
\Gamma\left(\frac{(1-z)\gamma}{2n} \right) \right\vert}.$$
Finally, for $Re(z)=-\frac{2n-\gamma}{2+\gamma}$, we obtain
$$| \nu_{z,N}(k,\lambda) |\leq c_{n, \gamma} |I_{1-z}(-\lambda) \phi_{N}(\lambda)| | \lambda |^{-\left( 1+\frac{(z-1)\gamma}{2n}\right)n} \prod_{j=1}^{n} \| \widehat{F_{\alpha_j, \beta}} \|_{\infty} \| \widehat{\eta_{j}} \|_{1}$$
$$
\leq \frac{c_{n, \gamma} \,\, e^{\frac {Im(z) \pi
\gamma}{4}} \left\Vert H \right\Vert_{\infty} \left[ \Gamma
\left(\frac{(n+1) \gamma}{(2+\gamma)n} \right) \right]^{n} \left[\Gamma
\left(\frac{2n-\gamma}{(2+\gamma)n}\right) \right]^{n} \prod_{j=1}^{n} \left\Vert
\widehat{\eta_{j}} \right\Vert_{1}} {\left\vert \Gamma
\left(\frac{1-z}{2}\right) \right\vert \left\vert
\Gamma\left(\frac{(1-z)\gamma}{2n} \right) \right\vert^{n}}.$$
By (\ref{L22}) it follows, for $Re(z)=-\frac{2n-\gamma}{2+\gamma}$, that
\[
\left\Vert T_{N,z}f\right\Vert _{L^{2}(\mathbb{H}^{n})}\leq c_{n, \gamma} \,\, \frac{e^{\frac {Im(z) \pi
\gamma}{4}}}{\left\vert \Gamma \left( \frac{1-z}{2}\right) \right\vert \left\vert
\Gamma\left(\frac{(1-z)\gamma}{2n} \right) \right\vert^{n}}
\left\Vert f\right\Vert _{L^{2}(\mathbb{H}^{n})}
\]
It is easy to see, with the aid of the Stirling formula (see \cite{stein4},
p. 326), that the family $\left\{ T_{N,z}\right\} $ satisfies, on the strip $
-\frac{2n-\gamma}{2+\gamma} \leq Re(z) \leq 1$, the hypothesis of the complex interpolation theorem
(see \cite{stein2} p. 205) and so $T_{N,0}$ is bounded from $L^{p_{\gamma}}(\mathbb{H}^{n})$ into $L^{p_{\gamma}'}(\mathbb{H}^{n})$ uniformly on $N$, where
$\left( \frac{1}{p_{\gamma}}, \frac{1}{p_{\gamma}'} \right) = C_{\gamma}$,
then letting $N$ tend to infinity, we obtain that the operator $T_{\mu_{\gamma}}$ is
bounded from $L^{p_{\gamma}}(\mathbb{H}^{n})$ into $L^{p_{\gamma}'}(\mathbb{H}^{n})$.
$\square$
\qquad
We re-establish Theorem 1 and Theorem 2 in the following
\begin{theorem} Let $\mu_{\gamma}$ be a fractional Borel measure as in Theorem 1. Then the interior of $E_{\mu_{\gamma}}$ is the open trapezoidal region with vertices $(0,0)$, $(1,1)$, $D$ and $D'$. Moreover $C_{\gamma}$ and the closed segments joining $D'$ with $(0,0)$ and $D$ with $(1,1)$ except maybe $D$ and $D'$ are contained in $E_{\mu_{\gamma}}$.
\end{theorem}
\qquad
\textit{Proof of Theorem 3.} We consider, for each $N \in \mathbb{N}$ fixed, the analytic family $\{ U_{N, z} \}$ of operators on the strip $-n \leq Re(z) \leq 1$, defined by $U_{N, z}f = f \ast \widetilde{\mu}_{(1-z)\gamma} \ast J_{N,z}$ where $\widetilde{\mu}_{(1-z)\gamma}$ i given by (\ref{mu3}), $J_{N, z}$ by (\ref{jz}) and $U_{N, 0}f(x,t) \rightarrow U_{\widetilde{\mu}_{\gamma}}f(x,t) := (f \ast \widetilde{\mu}_{\gamma})(x,t)$ a.e.$(x,t)$ as $N \rightarrow \infty$. Proceeding as in the proof of Theorem 2 it follows, for $Re(z) = 1$, that $\| U_{N, z} \|_{1, \infty} \leq c \left| \Gamma (z/2) \right|^{-1}$. Also it is clear that, for $Re(z) = -n$, the kernel
$\widetilde{\mu}_{(1-z)\gamma} \ast J_{N,z} \in L^{1}(\mathbb{H}^{n})$ and it is a radial function. Now, our operator $\widehat{(\widetilde{\mu}_{(1-z)\gamma} \ast J_{N,z})}(\lambda)$ is diagonal, with diagonal entries $\nu_{N,z}(k, \lambda)$ given by
\[
\nu_{z,N}(k,\lambda )=\frac{k!}{(k+n-1)!} \int\limits_{0}^{\infty} (\widetilde{\mu}_{(1-z)\gamma} \ast J_{N,z})(s, \widehat{-\lambda}) L_{k}^{n-1}(|\lambda|s^{2}/2) e^{-|\lambda|s^{2}/4} s^{2n-1} ds
\]
\[
=\frac{k!}{(k+n-1)!}I_{1-z}(-\lambda
)\phi _{N}(\lambda )\int\limits_{0}^{\infty }\eta(s^{2}) L_{k}^{n-1}(|\lambda|s^{2}/2) e^{-|\lambda|s^{2}/4} e^{i\lambda
s^{2m}}s^{2n-1+(1-z)\gamma }ds.
\]
Now we study this integral, the change of variable $\sigma = |\lambda| s^{2}/2$ gives
\[
\int\limits_{0}^{\infty }\eta(s^{2}) L_{k}^{n-1}(|\lambda|s^{2}/2) e^{-|\lambda|s^{2}/4} e^{i\lambda
s^{2m}}s^{2n-1+(1-z)\gamma }ds
\]
\[
=2^{n-1+ \frac{(1-z)\gamma}{2}} |\lambda|^{-\left(n + \frac{(1-z)\gamma}{2}\right)} \int\limits_{0}^{\infty }\eta\left( \frac{2\sigma}{ |\lambda|} \right) L^{n-1}_{k}(\sigma) e^{-\frac{\sigma}{2}} e^{i (2\sigma)^{m} |\lambda|^{1-m} sgn(\lambda)}
\sigma^{n-1+ \frac{(1-z)\gamma}{2}} d\sigma.
\]
\[
=2^{n-1+ \frac{(1-z)\gamma}{2}} |\lambda|^{-\left(n + \frac{(1-z)\gamma}{2}\right)}\left(
\widehat{F_{k,\beta}}\ast \left( \widehat{G_{\lambda }}\ast
\widehat{R_{\lambda }}\right) \right) (0),
\]
where
$$F_{k,\beta}(\sigma ):=\chi _{(0,\infty )}(\sigma
)L_{k}^{n-1}\left( \sigma \right) e^{-\frac{\sigma }{2}}\sigma
^{\beta },$$ with $\beta =n-1+\frac{ (1-z)\gamma}{2}$,
$$G_{\lambda }(\sigma ):=\eta\left( 2\sigma / |\lambda|\right)$$ and
$$R_{\lambda }(\sigma )=\chi
_{(0,\left\vert \lambda \right\vert )}(\sigma
)e^{i2^{m}sgn(\lambda )\left\vert \lambda \right\vert ^{1-m}\sigma
^{m}}.$$
Now
\begin{equation}
\left\Vert \widehat{F_{k,\beta}}\ast \left( \widehat{G_{\lambda }}\ast \widehat{
R_{\lambda }}\right) \right\Vert_{\infty } \leq \left\Vert \widehat{F_{k,\beta}}
\right\Vert _{1}\left\Vert \widehat{G_{\lambda }}\right\Vert
_{1}\left\Vert \widehat{R_{\lambda }}\right\Vert _{\infty
}\label{fgr}
\end{equation}
So it is enough to estimate the right side of this inequality. From Lemma 6 and Lemma 7 we obtain
$$\left\vert \widehat{F_{k,\beta}}(\xi ) \right\vert \leq \frac{\Gamma(n-1-Re(\beta))
\Gamma(Re(\beta)+1)}{\left\vert \left(\frac{1}{2}+i\xi
\right)^{\beta+1}\right\vert \left\vert \Gamma(n-1-\beta)
\right\vert} \frac{(n+k-1)!}{(n-1)!k!}.$$
Since $Re(z)=-n$,
$\gamma = \frac{2(m-1)}{(n+1)m}$ and $\beta =n-1+\frac{ (1-z)\gamma}{2}$ we have $Re(\beta) = n- \frac{1}{m},$ thus it follows that
\begin{equation}
\left\Vert \widehat{F_{k,\beta_{z}}} \right\Vert_{1} \leq \frac{c \,
e^{\frac{\vert Im(z) \vert \gamma \pi}{4}}} {\left\vert \Gamma
\left(\frac{z-1}{2}\gamma \right) \right\vert} \,
\frac{(n+k-1)!}{(n-1)!k!} \, \int\limits_{0}^{\infty}
\frac{1}{ \left(\frac{1}{4}+\xi^{2} \right)^{\frac{1}{2}
\left(n+1-\frac{1}{m} \right)}} d\xi, \label{estif}
\end{equation}
the last integral is finite for all $n \geq 1$ and all $m \geq 2$.
It is clear that $\left\Vert \widehat{G_{\lambda }}\right\Vert_{1}=\left\Vert
\widehat{\eta} \right\Vert_{1}$. Now, we estimate $\left\Vert \widehat{R_{\lambda }}\right\Vert _{\infty
}$. Taking account of Proposition 2, p. 332, in \cite{stein3} we note that
\begin{equation}
\left\vert \widehat{R_{\lambda }}(\xi )\right\vert =\left\vert
\int\limits_{0}^{\left\vert \lambda \right\vert }e^{i(2^{m}sgn(\lambda
)\left\vert \lambda \right\vert ^{1-m}\sigma ^{m}-\xi \sigma )}d\sigma
\right\vert \leq
\frac{C_{m}}{\left\vert \lambda \right\vert ^{\frac{1-m}{m}}}
\label{estir}
\end{equation}
where the constant $C_m$ does not depend on $\lambda$. Then, for $Re(z)=-n$,
from (\ref{fgr}), (\ref{estif}) and (\ref{estir}) we obtain
$$|\nu_{N,z}(k, \lambda)| \leq c \, \frac{k!}{(k+n-1)!} \, |I_{1-z}(-\lambda) \phi_{N}(\lambda)| |\lambda|^{-\left(n + \frac{(1+n)\gamma}{2} \right)}\left\Vert \widehat{F_{k,\beta}}\ast \left( \widehat{G_{\lambda }}\ast \widehat{
R_{\lambda }}\right) \right\Vert_{\infty }$$
$$\leq c_{n,m} \Vert H
\Vert_{\infty} \left\Vert \widehat{\eta} \right\Vert_{1}
\frac{ e^{\frac{\vert Im(z) \vert \gamma \pi}{4}}} {\left\vert \Gamma
\left(\frac{1-z}{2} \right) \right\vert \left\vert \Gamma
\left(\frac{z-1}{2}\gamma \right) \right\vert} \int\limits_{0}^{\infty}
\frac{1}{ \left(\frac{1}{4}+\xi^{2} \right)^{\frac{1}{2}
\left(n+1-\frac{1}{m} \right)}} d\xi.
$$
By (\ref{L22}) it follows, for $Re(z)=-n$, that
\[
\| U_{N,z} f \|_{L^{2}(\mathbb{H})^{n}} \leq c_{n,m} \, \frac{ e^{\frac{\vert Im(z) \vert \gamma \pi}{4}}} {\left\vert \Gamma
\left(\frac{1-z}{2} \right) \right\vert \left\vert \Gamma
\left(\frac{z-1}{2}\gamma \right) \right\vert} \|f\|_{L^{2}(\mathbb{H})^{n}}.
\]
It is clear that the family $\{ U_{N,z} \}$ satisfies, on the strip $-n \leq Re(z) \leq 1$, the hypothesis of the complex interpolation theorem. Thus $U_{N,0}$ is bounded from $L^{\frac{2n+2}{2n+1}}(\mathbb{H}^{n})$ into $L^{2n+2}(\mathbb{H}^{n})$ uniformly in $N$, and letting $N$ tend to infinity we conclude that the operator $U_{\widetilde{\mu}_{\gamma}}$ is bounded from $L^{\frac{2n+2}{2n+1}}(\mathbb{H}^{n})$ into $L^{2n+2}(\mathbb{H}^{n})$ for $n\in \mathbb{N}$. Finally, the theorem follows from the restrictions that appear in (\ref{restricciones}).
$\square$
\end{document} |
\begin{document}
\title{INC: A Scalable Incremental Weighted Sampler}
\author{
\IEEEauthorblockN{Suwei Yang}
\IEEEauthorblockA{
\textit{National University of Singapore}\\
Singapore
}
\and
\IEEEauthorblockN{Victor C. Liang}
\IEEEauthorblockA{\textit{GrabTaxi Holdings Pte. Ltd.} \\
Singapore
}
\and
\IEEEauthorblockN{Kuldeep S. Meel}
\IEEEauthorblockA{
\textit{National University of Singapore}\\
Singapore
}
}
\maketitle
\begin{abstract}
The fundamental problem of weighted sampling involves sampling of satisfying assignments of Boolean formulas, which specify sampling sets, and according to distributions defined by pre-specified weight functions to weight functions. The tight integration of sampling routines in various applications has highlighted the need for samplers to be incremental, i.e., samplers are expected to handle updates to weight functions.
The primary contribution of this work is an efficient knowledge compilation-based weighted sampler, {\inc}\footnote{code available at \url{https://github.com/grab/inc-weighted-sampler/}}, designed for incremental sampling. {\inc} builds on top of the recently proposed knowledge compilation language, OBDD[$\wedge$], and is accompanied by rigorous theoretical guarantees. Our extensive experiments demonstrate that {\inc} is faster than state-of-the-art approach for majority of the evaluation. In particular, we observed a median of $1.69\times$ runtime improvement over the prior state-of-the-art approach.
\end{abstract}
\begin{IEEEkeywords}
knowledge compilation, sampling, weighted sampling
\end{IEEEkeywords}
\section{Introduction} \label{sec:introduction}
Given a Boolean formula $F$ and weight function $W$, weighted sampling involves sampling from the set of satisfying assignments of $F$ according to the distribution defined by $W$. Weighted sampling is a fundamental problem in many fields such as computer science, mathematics and physics, with numerous applications. In particular, constrained-random simulation forms the bedrock of modern hardware and software verification efforts~\cite{KK07}.
Sampling techniques are fundamental building blocks, and there has been sustained interest in the development of sampling tools and techniques. Recent years witnessed the introduction of numerous sampling tools and techniques, from approximate sampling techniques to uniform samplers SPUR and KUS, and weighted sampler WAPS~\cite{JS1996,SSL15,AHT18,SGRM18,GSRM19}. Sampling tools and techniques have seen continuous adoption in many applications and settings~\cite{NRJKVMS07,GPMXWOCB14,KW19,BLM20,PLVSMMVKG20,BCMS21}. The scalability of a sampler is a consideration that directly affects its adoption rate. Therefore, improving scalability continues to be a key objective for the community focused on developing samplers.
The tight integration of sampling routines in various applications has highlighted the importance for samplers to handle incremental weight updates over multiple sampling rounds, also known as incremental weighted sampling. Existing efforts on improving scalability typically focus on single round weighted sampling, and might have overlooked the incremental setting. In particular, existing approaches involving incremental weighted sampling typically employ off-the-shelf weighted samplers which could lead to less than ideal incremental sampling performance.
The primary contribution of this work is an efficient scalable weighted sampler {\inc} that is designed from the ground up to address scalability issues in incremental weighted sampling settings. The core architecture of {\inc} is based on knowledge compilation (KC) paradigm, which seeks to succinctly represent all satisfying assignments of a Boolean formula with a directed acyclic graph (DAG)~\cite{DM02}. In the design of {\inc}, we make two core decisions that are responsible for outperforming the current state-of-the-art weighted sampler. Firstly, we build {\inc} on top of {\prob} (Probabilistic OBDD[$\land$]~\cite{LLY17}) which is substantially smaller than the KC diagram used in the prior state-of-the-art approaches. Secondly, {\inc} is designed to perform \textit{annotation}, which refers to the computation of joint probabilities, in log-space to avoid the slower alternative of using arbitrary precision math computations.
Given a Boolean formula $F$ and weight function $W$, {\inc} compiles and stores the compiled {\prob} in the first round of sampling. The weight updates for subsequent incremental sampling rounds are processed without recompilation, amortizing the compilation cost.
Furthermore, for each sampling round, {\inc} simultaneously performs \textit{annotation} and sampling in a single bottom-up pass of the {\prob}, achieving speedup over existing approaches. We observed that {\inc} is significantly faster than the existing state-of-the-art in the incremental sampling routine. In our empirical evaluations, {\inc} achieved a median of $1.69\times$ runtime improvement over the state-of-the-art weighted sampler, WAPS~\cite{GSRM19}. Additional performance breakdown analysis supports our design choices in the development of {\inc}. In particular, {\prob} is on median $4.64\times$ smaller than the KC diagram used by the competing approach, and log-space \textit{annotation} computations are on median $1.12\times$ faster than arbitrary precision computations. Furthermore, {\inc} demonstrated significantly better handling of incremental sampling rounds, with incremental sampling rounds to be on median $5.9\%$ of the initial round, compared to $67.6\%$ for WAPS.
The rest of the paper is organized as follows. We first introduce the relevant background knowledge and related works in Section~\ref{sec:background}. We then introduce {\prob} and its properties in Section~\ref{sec:prob}. In Section~\ref{sec:inc}, we introduce our weighted sampler {\inc}, detail important implementation decisions, and provide theoretical analysis of {\inc}. We then describe the extensive empirical evaluations and discuss the results in Section~\ref{sec:experiments}. Finally, we conclude in Section~\ref{sec:future-works}.
\section{Background and Related Work} \label{sec:background}
\paragraph*{\textbf{Knowledge Compilation}}
Knowledge compilation (KC) involves representing logical formulas as directed acyclic graphs (DAG), which are commonly referred to as knowledge compilation diagrams~\cite{DM02}. The goal of knowledge compilation is to allow for tractable computation of certain queries such as model counting and weighted sampling. There are many well-studied forms of knowledge compilation diagrams such as d-DNNF, SDD, BDD, ZDD, OBDD, AOBDD, and the likes~\cite{L1959,B1986,M1993,A01,A02,MDM08,A11}. In this work, we build our weighted sampler upon a variant of OBDD known as OBDD[$\land$]~\cite{LLY17}.
\paragraph*{\textbf{OBDD[$\land$]}}
Lee~\cite{L1959} introduced Binary Decision Diagram (BDD) as a way to represent Shannon expansion~\cite{B1854}. \cite{B1986} introduced fixed variable orderings to BDDs (known as OBDD)~\cite{B1986} for canonical representation and compression of BDDs via shared sub-graphs. Lai et al.~\cite{LLY17} introduced conjunction nodes to OBDDs (known as OBDD[$\land$])~\cite{LLY17} to further reduce the size of the resultant DAG to represent a given Boolean formula. In this work, we parameterize an OBDD[$\land$] to form a {\prob} that is used for weighted sampling.
\paragraph*{\textbf{Sampling}}
A Boolean variable $x$ can be assigned either \textit{true} or \textit{false}, and its literal refers to either $x$ or its negation. A Boolean formula is in conjunctive normal form (CNF) if it is a conjunction of clauses, with each clause being a disjunction of literals. A Boolean formula $F$ is satisfiable if there exists an assignment $\tau$ of its variables such that the $F$ evaluates to \textit{true}. The model count of Boolean formula $F$ refers to the number of distinct satisfying assignments of $F$.
Weighed sampling concerns with sampling elements from a distribution according to non-negative weights provided by a user-defined weight function $W$. In the context of this work, weighted sampling refers to the process of sampling from the space of satisfying assignments of a Boolean formula $F$. The weight function $W$ assigns a non-negative weight to each literal $l$ of $F$. The weight of an assignment $\tau$ is defined as the product of the weight of its literals.
\paragraph*{\textbf{WAPS}}
KUS~\cite{SGRM18} utilizes knowledge compilation techniques, specifically Deterministic Decomposable Negation Normal Form (d-DNNF)~\cite{A02}, to perform uniform sampling in 2 passes of the d-DNNF. \textit{Annotation} is performed in the first pass, followed by sampling. WAPS~\cite{GSRM19} improves upon KUS by enabling weighted sampling via parameterization of the d-DNNF. WAPS performs sampling in a similar manner to KUS, the main difference being that the \textit{annotation} step in WAPS takes into account the provided weight function. In contrast, we introduce {\inc} which performs weighted sampling in a single pass by leveraging the DAG structure of {\prob}.
Knowledge compilation-based samplers typically perform incremental sampling as follows. The sampling space is first expressed as satisfying assignments of a Boolean formula, which is then compiled into the respective knowledge compilation form. In the following step, samples are drawn according to the given weight function $W$. Subsequently, the weights are updated depending on application logic and weighted sampling is performed again. The process is repeated until an application-specific stopping criterion is met. An example of such an application would be the Baital framework~\cite{BLM20}, developed to use incremental weighted sampling to generate test cases for configurable systems.
\section{{\prob}: - Probabilistic OBDD[$\land$]} \label{sec:prob}
{\prob} is a DAG composed of four types of nodes - \textit{conjunction}, \textit{decision}, \textit{true} and \textit{false} nodes. The internal nodes of a {\prob} consist of conjunction and decision nodes whereas the leaf nodes of the {\prob} consist of true and false nodes. A {\prob} is recursively made up of sub-{\prob}s that represent sub-formulas of Boolean formula $F$. We use $\varset{n}$ to refer to the set of variables of $F$ represented by a {\prob} with $n$ as the root node. $\subdiagram{n}$ refers to the sub-{\prob} starting at node $n$ and $\parent{n}$ refers to the immediate parent of node $n$ in {\prob}.
\subsection{{\prob} Structure}
\paragraph*{\textbf{Conjunction node ($\land$-node)}}
A $\land$-node $n_c$ represents conjunctions in the assignment space. There are no limits to the number of child nodes that $n_c$ can have. However, the set of variables ($\varset{\cdot}$) of each child node of $n_c$ must be disjoint. An example of a $\land$-node would be $n2$ in Figure~\ref{fig:smooth-prob-example}. Notice that $\varset{n4} = \{z\}$ and $\varset{n5} = \{y\}$ are disjoint.
\paragraph*{\textbf{Decision node}}
A decision node $n_d$ represents decisions on the associated Boolean variable $\var{n_d}$ in Boolean formula $F$ that the {\prob} represents. A decision node can have exactly two children - \textit{lo-child} ($\lo{n_d}$) and \textit{hi-child} ($\hi{n_d}$). $\lo{n_d}$ represents the assignment space when $\var{n_d}$ is set to \textit{false} and $\hi{n_d}$ represents otherwise. $\theta_{n_{d_{hi}}}$ and $\theta_{n_{d_{lo}}}$ refer to the parameters associated with the edge connecting decision node $n_d$ with $\hi{n_d}$ and $\lo{n_d}$ respectively in a {\prob}. Node $n1$ in Figure~\ref{fig:smooth-prob-example} is a decision node with $\var{n1} = x$, $\hi{n1} = n3$ and $\lo{n1} = n2$.
\paragraph*{\textbf{True and False nodes}}
True ($\top$) and false ($\bot$) nodes are leaf nodes in a {\prob}. Let $\tau$ be an assignment of all variables of Boolean formula $F$ and let {\prob} $\psi$ represent $F$. $\tau$ corresponds to a traversal of $\psi$ from the root node to leaf nodes. The traversal follows $\tau$ at every decision node and visits all child nodes of every conjunction node encountered along the way. $\tau$ is a satisfying assignment if all parts of the traversal eventually lead to the true node. $\tau$ is not a satisfying assignment if any part of the traversal leads to the false node. With reference to Figure~\ref{fig:smooth-prob-example}, let $\tau_1 = \{x, y, \neg z\}$ and $\tau_2 = \{x, y, z\}$. For $\tau_1$, the traversal would visit $n1, n3, n6, n7, n9$, and $\tau_1$ is a satisfying assignment since the traversal always leads to $\top$ node ($n9$). As a counter-example, $\tau_2$ is not a satisfying assignment with its corresponding traversal visiting $n1, n3, n6, n7, n8, n9$. $\tau_2$ traversal visits $\bot$ node ($n8$) because variable $z \mapsto \textit{true}$ in $\tau_2$ and $\hi{n6}$ is node $n8$.
\begin{figure}
\caption{A smooth {\prob}
\label{fig:smooth-prob-example}
\end{figure}
\subsection{{\prob} Parameters} \label{subsec:prob-param}
In the {\prob} structure, each decision node $n_d$ has two parameters $\theta_{\lo{n_d}}$ and $\theta_{\hi{n_d}}$, associated with the two branches of $n_d$, which sums up to $1$. $\theta_{\lo{n_d}}$ is the normalized weight of the literal $\neg \var{n_d}$ and similarly, $\theta_{\hi{n_d}}$ is that of the literal $\var{n_d}$. One can view $\theta_{\lo{n_d}}$ to be the probability of picking $\neg \var{n_d}$ and $\theta_{\hi{n_d}}$ to be that of picking $\var{n_d}$ by the \textit{determinism} property introduced later. Let $x_i$ be $\var{n_d}$. Given a weight function $W$:
\begin{align*}
& \theta_{\lo{n_d}} = \frac{W(\neg x_i )}{W(\neg x_i ) + W(x_i)} & \theta_{\hi{n_d}} = \frac{W(x_i)}{W(\neg x_i ) + W(x_i)}
\end{align*}
\subsection{{\prob} Properties}
The {\prob} structure has important properties such as \textit{determinism} and \textit{decomposability}. In addition to the \textit{determinism} and \textit{decomposability} properties, we ensure that {\prob}s used in this work have the \textit{smoothness} property through a smoothing process (Algorithm~\ref{alg:smooth}).
\begin{property}[Determinism]
For every decision node $n_d$, the set of satisfying assignments represented by $\hi{n_d}$ and $\lo{n_d}$ are logically disjoint.
\end{property}
\begin{property}[Decomposability]
For every conjunction node $n_c$, $\varset{c_i} \cap \varset{c_j} = \emptyset$ for all $c_i$ and $c_j$ where $c_i, c_j \in \child{n_c}$ and $c_i \not= c_j$.
\end{property}
\begin{property}[Smoothness]
For every decision node $n_d$, $\varset{\hi{n_d}} = \varset{\lo{n_d}}$.
\end{property}
\subsection{Joint Probability Calculation with {\prob}}
In Section~\ref{subsec:prob-param}, we mention that one can view the branch parameters as the probability of choosing between the positive and negative literal of a decision node. Notice that because of the \textit{decomposability} and \textit{determinism} properties of {\prob}, it is straightforward to calculate the joint probabilities at every given node. At each conjunction node $n_c$, since the variable sets of the child nodes of $n_c$ are disjoint by \textit{decomposability}, the joint probability of $n_c$ is simply the product of joint probabilities of each child node. At each decision node $n_d$, there are only two possible outcomes on $\var{n_d}$ - positive literal $\var{n_d}$ or negative literal $\neg \var{n_d}$. By \textit{determinism} property, the joint probability is the sum of the two possible scenarios. Formally, the calculations for joint probabilities $P'$ at each node in {\prob} are as follows:
\begin{align}
P'\text{ of $\land$-node }n_c &= \prod_{c \in \child{n_c}} P'(c) \label{eq:eq1}\tag{EQ1}
\\
P'\text{ of decision-node }n_d &= \theta_{\lo{n_d}} \times P'(\lo{n_d}) \label{eq:eq2}\tag{EQ2}
\\
& + \theta_{\hi{n_d}} \times P'(\hi{n_d}) \notag
\end{align}
For true node $n$, $P'(n)=1$ because it represents satisfying assignments when reached. In contrast $P'(n)=0$ when $n$ is a \textit{false} node as it represents non-satisfying assignments. In Proposition~\ref{prop:sampling-correctness}, we show that weighted sampling is equivalent to sampling according to joint probabilities of satisfying assignments of a {\prob}.
\section{{\inc} - Sampling from {\prob}} \label{sec:inc}
In this section, we introduce {\inc} - a bottom-up algorithm for weighted sampling on {\prob}. We first describe {\inc} for drawing one sample and subsequently describe how to extend {\inc} to draw $k$ samples at once. We also provide proof of correctness that {\inc} is indeed performing weighted sampling. As a side note, samples are drawn with replacement, in line with the existing state-of-the-art weighted sampler~\cite{GSRM19}.
\subsection{Preprocessing {\prob}}
In the main sampling algorithm (Algorithm~\ref{alg:inc-sampling}) to be introduced later in this section, the input is a smooth {\prob}. As a preprocessing step, we introduce {\smooth} algorithm that takes in a {\prob} $\psi$ and performs smoothing.
\begin{algorithm}[htb]
\begin{flushleft}
\textbf{Input}: {\prob} $\psi$\\
\textbf{Output}: smooth {\prob}
\end{flushleft}
\begin{algorithmic}[1]
\STATE $\kappa \leftarrow \mathsf{initMap()}$
\FOR{node $n$ of $\psi$ in bottom-up order}
\IF{$n$ is \textit{true} node or \textit{false} node}
\STATE $\kappa[n] \leftarrow \emptyset$
\ELSIF{$n$ is $\land$-node}
\STATE $\kappa[n] \leftarrow$ $\mathsf{unionVarSet}$(${\child{n}}, \kappa$)
\ELSE
\IF{$\kappa[{\hi{n}}] - \kappa[{\lo{n}}] \neq \emptyset$} \label{line:smooth-lo}
\STATE lset $\leftarrow \kappa[{\hi{n}}] - \kappa[{\lo{n}}]$
\STATE lcNode $\leftarrow new\land-node()$
\STATE lcNode.$\mathsf{addChild({\lo{n}})}$
\FOR{var $v$ in lset}
\STATE dNode $\leftarrow \mathsf{checkMakeTrueDecisionNode}(v)$
\STATE lcNode.$\mathsf{addChild}$(dNode)
\ENDFOR
\STATE ${\lo{n}}
\leftarrow $lcNode
\ENDIF
\IF{$\kappa[{\lo{n}}] - \kappa[{\hi{n}}] \neq \emptyset$} \label{line:smooth-hi}
\STATE rset $\leftarrow \kappa[{\lo{n}}] - \kappa[{\hi{n}}]$
\STATE rcNode $\leftarrow new\land-node()$
\STATE rcNode.$\mathsf{addChild({\hi{n}})}$
\FOR{var $v$ in rset}
\STATE dNode $\leftarrow \mathsf{checkMakeTrueDecisionNode}(v)$
\STATE rcNode.$\mathsf{addChild}$(dNode)
\ENDFOR
\STATE ${\hi{n}}
\leftarrow $rcNode
\ENDIF
\STATE $\kappa[n] \leftarrow {\var{n}} \cup \mathsf{unionVarSet}$($\{{\hi{n}}, {\lo{n}}\}$)
\ENDIF
\ENDFOR
\STATE \textbf{return} $\psi$
\end{algorithmic}
\caption{$\mathsf{Smooth}$ - returns a smoothed {\prob}}
\label{alg:smooth}
\end{algorithm}
The $\mathsf{Smooth}$ algorithm processes the nodes in the input {\prob} $\psi$ in a bottom-up manner while keeping track of $\varset{n}$ for every node n in $\psi$ using a map $\kappa$. \textit{True} and \textit{false} nodes have $\emptyset$ as they are leaf nodes and do not represent any variables. At each conjunction node, its variable set is the union of variable sets of its child nodes.
The smoothing happens at decision node $n$ in $\psi$ when $\varset{\lo{n}}$ and $\varset{\hi{n}}$ do not contain the same set of variables as shown by lines~\ref{line:smooth-lo} and~\ref{line:smooth-hi} of Algorithm~\ref{alg:smooth}. In the smoothing process, a new conjunction node (\textit{lcNode} for ${\lo{n}}$ and \textit{rcNode} for ${\hi{n}}$) is created to replace the corresponding child of $n$, with the original child node now set as a child of the conjunction node. Additionally, for each of the missing variables $v$, a decision node representing $v$ is created and added as a child of the respective conjunction node. The decision nodes created during smoothing have both their lo-child and hi-child set to the \textit{true} node. To reduce memory footprint, we check if there exists the same decision node before creating it in the $\mathsf{checkMakeTrueDecisionNode}$ function.
\begin{figure}
\caption{A {\prob}
\label{fig:Prob-non-smooth}
\end{figure}
As an example, we refer to $\psi_2$ in Figure~\ref{fig:Prob-non-smooth}. It is obvious that $\psi_2$ is not smooth, because $\varset{\lo{n1}} = \{y\}$ and $\varset{\hi{n1}} = \{z\}$. In the smoothing process, we replace ${\lo{n1}}$ with a new conjunction node $n2$ and add a decision node $n4$ representing missing variable $z$, with both child set to \textit{true} node $n9$. We repeat the steps for ${\hi{n1}}$ to arrive at {\prob} $\psi_1$ in Figure~\ref{fig:smooth-prob-example}.
\subsection{Sampling Algorithm}
\begin{algorithm}[htb]
\begin{flushleft}
\textbf{Input}: smooth {\prob} $\psi$\\
\textbf{Output}: a sampled satisfying assignment
\end{flushleft}
\begin{algorithmic}[1]
\STATE cache $\omega$ $\leftarrow$ $\mathsf{initCache()}$
\STATE joint prob cache $\varphi$ $\leftarrow$ $\mathsf{initCache()}$
\STATE $\psi'$ $\leftarrow$ $\mathsf{hideFalseNode}(\psi)$ \label{line:zddc-preprocess-roulette}
\FOR{node $n$ of $\psi'$ in bottom-up order}
\IF{$n$ is \textit{true} node}
\STATE $\omega[n] \leftarrow \emptyset$
\STATE $\varphi[n] \leftarrow 1$ \label{line:true-node-calc}
\ELSIF{$n$ is $\land$-node}
\STATE $\omega[n] \leftarrow$ $\mathsf{unionChild}$(${\child{n}}, \omega$) \label{line:union-conjunction-child-roulette}
\STATE $\varphi[n] \leftarrow \prod_{c \in {\child{n}}} \varphi[c]$ \label{line:conjunction-node-calc}
\ELSE
\STATE $p_{lo} \leftarrow \theta_{\lo{n}} \times \varphi[{\lo{n}}]$ \label{line:sample-decision-start-roulette}
\STATE $p_{hi} \leftarrow \theta_{\hi{n}} \times \varphi[{\hi{n}}]$
\STATE $p_{joint} \leftarrow p_{lo} + p_{hi}$
\STATE $\varphi[n] \leftarrow p_{joint}$
\STATE $r$ $\leftarrow$ $x \sim \mathsf{binomial}(1, \frac{p_{hi}}{p_{joint}})$ \label{line:binomial-random}
\IF{$r$ is $1$}
\STATE $\omega[n]$ $\leftarrow$ $\omega[\hi{n}] \cup \var{n}$
\ELSE
\STATE $\omega[n]$ $\leftarrow$ $\omega[\lo{n}] \cup \neg \var{n}$
\ENDIF \label{line:sample-decision-end-roulette}
\ENDIF
\ENDFOR
\STATE \textbf{return} $\omega$[rootnode($\psi$)]
\end{algorithmic}
\caption{{\inc} - returns a satisfying assignment based on {\prob} $\psi$ parameters}
\label{alg:inc-sampling}
\end{algorithm}
{\inc} takes a {\prob} $\psi$ representing Boolean formula $F$ and draws a sample from the space of satisfying assignments of $F$, the process is illustrated by Algorithm~\ref{alg:inc-sampling}. {\inc} performs sampling in a bottom-up manner while integrating the \textit{annotation} process in the same bottom-up pass. Since we want to sample from the space of satisfying assignments we can ignore \textit{false} nodes in $\psi$ entirely by considering a sub-DAG that excludes \textit{false} nodes and edges leading to them, as shown by line~\ref{line:zddc-preprocess-roulette}. As an example, $\mathsf{hideFalseNode}$ when applied to $\psi_1$ would remove node $n8$ and the edges immediately leading to it. Next, {\inc} processes each of the remaining nodes in bottom-up order while keeping two caches - $\omega$ to store the partial samples from each node, $\varphi$ to store the joint probability at each node. {\inc} starts with $\emptyset$ at the \textit{true} node since there is no associated variable.
At each conjunction node, {\inc} takes the union of the child nodes in line~\ref{line:union-conjunction-child-roulette}. Using $n2$ in Figure~\ref{fig:smooth-prob-example} as an example, if sample drawn at $n4$ is $\omega[n4] = \{\neg z\}$ and at $n5$ is $\omega[n5] = \{y\}$, then $\mathsf{unionChild}(\child{n2},\omega) = \{y, \neg z\}$. At each decision node $n$, a decision on $\var{n}$ is sampled from lines~\ref{line:binomial-random} to~\ref{line:sample-decision-end-roulette}. We first calculate the joint probabilities, $p_{lo}$ and $p_{hi}$ of choosing $\neg \var{n}$ and choosing $\var{n}$. Subsequently, we sample decision on $\var{n}$ using a binomial distribution in line~\ref{line:binomial-random} with the probability of success being the joint probability of choosing $\var{n}$. After processing all nodes, the sampled assignment is the output at root node of $\psi$.
\paragraph*{\textbf{Extending {\inc} to $k$ samples}}
It is straightforward to extend the single sample {\inc} shown in Algorithm~\ref{alg:inc-sampling} to draw $k$ samples in a single pass, where $k$ is a user-specified number. At each node, we have to store a list of $k$ independent copies of partial assignments drawn in $\omega$. At each conjunction node $n_c$, we perform the same union process in line~\ref{line:union-conjunction-child-roulette} of Algorithm~\ref{alg:inc-sampling} for child outputs in the same indices of the respective lists in $\omega$. More specifically, if $n_c$ has child nodes $c_x$ and $c_y$, the outputs of index $i$ are combined to get the output of $n_c$ at index $i$. This process is performed for all indices from 1 to $k$. At each decision node $n_d$, we now draw $k$ independent samples instead of a single sample from the binomial distribution as shown in line~\ref{line:binomial-random}. The sampling step in lines~\ref{line:binomial-random} to~\ref{line:sample-decision-end-roulette} are performed independently for the $k$ random numbers. There is no change necessary for the calculation of joint probabilities in Algorithm~\ref{alg:inc-sampling} as there is no change in literal weights.
\paragraph*{\textbf{Incremental sampling}}
Given a Boolean formula $F$ and weight function $W$, {\inc} performs incremental sampling with the sampling process shown in Figure~\ref{fig:incremetal-sampling-flow}. In the initial round, {\inc} compiles $F$ and $W$ into a {\prob} $\psi$ and performs sampling. Subsequent rounds involve applying a new set of weights $W$ to $\psi$, typically generated based on existing samples by the controller~\cite{BLM20}, and performing weighted sampling according to the updated weights. The number of sampling rounds is determined by the controller component, whose logic varies according to application.
\begin{figure}
\caption{{\inc}
\label{fig:incremetal-sampling-flow}
\end{figure}
\subsection{Implementation Decisions} \label{subsec:inc-tech-analysis}
\paragraph*{\textbf{Log-Space Calculations}}
{\inc} performs \textit{annotation} process - computation of joint probabilities in log space. This design choice is made to avoid the usage of arbitrary precision math libraries, which WAPS utilized to prevent numerical underflow after many successive multiplications of probability values. Using the $\mathsf{LogSumExp}$ trick below, it is possible to avoid numerical underflow.
\begin{align*}
\log(a + b) &= \log(a) + \log( 1 + \frac{b}{a}) \\
&= \log(a) + \log(1 + \exp( \log(b) - \log(a) ))
\end{align*}
The joint probability at a decision node $n_d$ is given by $\theta_{\lo{n_d}} \times \text{joint probability of } \lo{n_d} + \theta_{\hi{n_d}} \times \text{joint probability of } \hi{n_d} $. Notice that if we were to perform the calculation in log space, we would have to add the two weighted log joint probabilities, termed $p_{lo}$ and $p_{hi}$ in Algorithm~\ref{alg:inc-sampling}. Using the $\mathsf{LogSumExp}$ trick, we do not need to exponentiate $p_{lo}$ and $p_{hi}$ independently which risks running into numerical underflow. Instead, we only need to exponentiate the difference of $p_{lo}$ and $p_{hi}$ which is more numerically stable. Equations \ref{eq:eq1} and \ref{eq:eq2} can be implemented in log space as follows:
\begin{align*}
Q \text{ of $\land$-node }n_c &= \sum_{c \in \child{n_c}} Q(c) \\
Q \text{ of decision-node }n_d &= \mathsf{LogSumExp}[ \\ &\log(\theta_{\lo{n_d}}) + Q(\lo{n_d}), \\
& \log(\theta_{\hi{n_d}}) + Q(\hi{n_d})]
\end{align*}
In the equations above, $Q$ refers to the corresponding log joint probabilities in \ref{eq:eq1} and \ref{eq:eq2}. In the experiments section, we detail the runtime advantages of using log computations compared to arbitrary precision math computations.
\paragraph*{\textbf{Dynamic Annotation}}
In existing state-of-the-art weighted sampler WAPS, sampling is performed in two passes - the first pass performs \textit{annotation} and the second pass samples assignments according to the joint probabilities. In {\inc}, we combine the two passes into a single bottom-up pass performing \textit{annotation} dynamically while sampling at each node.
\subsection{Theoretical Analysis} \label{subsec:theo-analysis}
\begin{proposition} \label{prop:param-correctness}
Branch parameters of any decision node $n_d$ are correct sampling probabilities, i.e. $W(x_i) : W(\neg x_i) = \theta_{\hi{x_i}} : \theta_{\lo{x_i}}$ where $\var{n_d} = x_i$.
\end{proposition}
\begin{proof}
\begin{align*}
\frac{W(x_i)}{W(\neg x_i)} &= \frac{\frac{W(x_i)}{W(x_i) + W(\neg x_i)}}{\frac{W(\neg x_i)}{W(x_i) + W(\neg x_i)}} = \frac{\theta_{\hi{x_i}}}{\theta_{\lo{x_i}}}
\end{align*}
We start with the ratio of literal weights of $x$, multiply both numerator and denominator by $W(x_i) + W(\neg x_i)$ and arrive at the ratio of branch parameters of $n_d$. Notice that only the ratio matters for sampling correctness and not the absolute value of weights.
\end{proof}
\begin{remark}
Let $n_d$ be an arbitrary decision node in {\prob} $\psi$. When performing sampling according to a weight function $W$, $\theta_{\lo{n_d}}$ is the probability of picking $\neg \var{n_d}$ and $\theta_{\hi{n_d}}$ is that of $\var{n_d}$. The \textit{determinism} property states that the choice of either literal is disjoint at each decision node.
\end{remark}
\begin{proposition} \label{prop:sampling-correctness}
{\inc} samples an assignment $\tau$ from {\prob} $\psi$ with probability $\frac{1}{N} \prod_{l \in \tau} W(l)$, where $N$ is a normalization factor.
\end{proposition}
\begin{proof}
The proof consists of two parts, one for $\land$-node and another for decision node.
\paragraph*{\textbf{$\land$-node}} Let $n_c$ be an arbitrary conjunction node in {\prob} $\psi$. Recall that by decomposability property, $\forall c_i, c_j \in \child{n_c}$ and $c_i \neq c_j$, $\varset{c_i} \cap \varset{c_j} = \emptyset$. As such an arbitrary variable $x_i \in \varset{n_c}$ only belongs to the variable set of one child node $c_i \in \child{n_c}$. Therefore, assignment of $x_i$ can be sampled independent of $x_j$ where $x_j \in \varset{c_j}, \forall c_j \not= c_i$. Let $\tau'_{c_i}$ be partial assignment for child node $c_i \in \child{n_c}$. Notice that each partial assignment $\tau'_{c_i}$ is sampled independently of others as there are no overlapping variables, hence their joint probability is simply the product of their individual probabilities. This agrees with the weight of an assignment being the product of its components, up to a normalization factor.
\paragraph*{\textbf{Decision node}} Let $n_d$ be an arbitrary decision node in {\prob} $\psi$ and $x_d$ be $\var{n_d}$. At $n_d$, we sample an assignment of $x_d$ based on the parameters $\theta_{\lo{x_d}}$ and $\theta_{\hi{x_d}}$, which are probabilities of literal assignment by Proposition~\ref{prop:param-correctness}. By Proposition~\ref{prop:param-correctness}, one can see that the assignment of $x_d$ is sampled correctly according to $W$. As the sampling process at $n_d$ is independent of its child nodes by the determinism property, the joint probability of sampled assignment of $x_d$ and the output partial assignment from the corresponding child node would be the product of their probabilities. Notice that the joint probability aligns with the definition of weight of an assignment being the product of the weight of its literals, up to a normalization factor.
Since we do not consider the \textit{false} node and treat it as having 0 probability, we always sample from satisfying assignments by starting at the \textit{true} node in bottom-up ordering. Reconciling the sampling process at the two types of nodes, it is obvious that any combination of decision and $\land$-nodes encountered in the sampling process would agree with a given weight function $W$ up to a normalization factor $1/N$. In fact, $N = \sum_{\tau_i \in S} W(\tau_i)$ where $S$ is the set of satisfying assignments of Boolean formula $F$ that $\psi$ represents. As mentioned in Proposition~\ref{prop:param-correctness} proof, normalization factors do not affect the correctness of sampling according to $W$, and we have shown that {\inc} performs weighted sampling correctly under multiplicative weight functions.
\end{proof}
\begin{remark}
From the proof of Proposition 2, the determinism and decomposability property is important to ensure the correctness of {\inc}. The smoothness property is important to ensure that the sampled assignment by {\inc} is complete. For formula $F = (x \lor y) \land (\neg x \lor \neg z)$, an assignment $\tau_1$ sampled from a non-smooth {\prob} could be $\{x, \neg z \}$. Notice that $\tau_1$ is missing assignment for variable $y$. By performing smoothing, we will be able to sample a complete assignment of all variables in the Boolean formula as both child nodes of each decision node $n$ have the same $\varset{\cdot}$.
\end{remark}
\section{Experiments} \label{sec:experiments}
We implement {\inc} in Python 3.7.10, using NumPy 1.15 and Toposort package. In our experiments, we make use of an off-the-shelf KC diagram compiler, KCBox~\cite{LMY21}. In the later parts of this section, we performed additional comparisons against an implementation of {\inc} using the Gmpy2 arbitrary precision math package ({\incap}) to determine the impact of log-space \textit{annotation} computations.
Our benchmark suite consists of instances arising from a wide range of real-world applications such as DQMR networks, bit-blasted versions of SMT-LIB (SMT) benchmarks, ISCAS89 circuits, and configurable systems~\cite{GSRM19,BLM20}. For incremental updates, we rely on the weight generation mechanism proposed in the context of prior applications of incremental sampling~\cite{BLM20}. In particular, new weights are generated based on the samples from the previous rounds, resulting in the need to recompute joint probabilities in each round. Keeping in line with prior work, we perform 10 rounds (R1-R10) of incremental weighted sampling and 100 samples drawn in each round. The experiments were conducted with a timeout of 3600 seconds on clusters with Intel Xeon Platinum 8272CL processors.
In this section, we detail the extensive experiments conducted to understand {\inc}'s runtime behavior and to compare it with the existing state-of-the-art weighted sampler WAPS~\cite{GSRM19} in incremental weighted sampling tasks. We chose WAPS as it has been shown to achieve significant runtime improvement over other samplers, and accordingly has emerged as a sampler of the choice for practical applications~\cite{BLM20}. In particular, our empirical evaluation sought to answer the following questions:
\begin{description}
\item[RQ 1] How does {\inc}'s incremental weighted sampling runtime performance compare to current state-of-the-art?
\item[RQ 2] How does using {\prob} affect runtime performance?
\item[RQ 3] How does log-space calculations impact runtime performance?
\item[RQ 4] Does {\inc} correctly perform weighted sampling?
\end{description}
\begin{figure*}
\caption{Single Round (R1) Runtime Scatter Plot}
\label{fig:runtime-comparison-single-round}
\caption{Incremental Runtime Scatter Plot}
\label{fig:runtime-comparison-incremental}
\caption{Runtime comparisons between {\inc}
\label{fig:runtime-comparison}
\end{figure*}
\paragraph*{\textbf{RQ 1: Incremental Sampling Performance}}
The scatter plot of incremental sampling runtime comparison is shown in Figure~\ref{fig:runtime-comparison}, with Figure~\ref{fig:runtime-comparison-single-round} showing runtime comparison for the first round (R1) and Figure~\ref{fig:runtime-comparison-incremental} showing runtime comparison over 10 rounds. The vertical axes represent the runtime of {\inc} and the horizontal axes represent that of WAPS. In the experiments, {\inc} completed 650 out of 896 benchmarks whereas WAPS completed 674. {\inc} completed 21 benchmarks that WAPS timed out and similarly, WAPS completed 45 benchmarks that {\inc} timed out. In the experiments, {\inc} achieved a median speedup of $1.69\times$ over WAPS.
\begin{table*}
\centering
\begin{tabular}{l|rrrrr}
\toprule
Statistic &
$\dfrac{\text{WAPS $\mathsf{MEAN}$(R2 to R10)}}{\text{WAPS R1}}$ &
$\dfrac{\text{{\inc} $\mathsf{MEAN}$(R2 to R10)}}{\text{{\inc} R1}}$ &
$\dfrac{\text{WAPS R1}}{\text{{\inc} R1}}$ &
$\dfrac{\text{WAPS $\mathsf{SUM}$(R2 to R10)}}{\text{{\inc} $\mathsf{SUM}$(R2 to R10)}}$ &
$\dfrac{\text{WAPS Total}}{\text{{\inc} Total}}$
\\
\midrule
Mean & 0.74 & 0.064 & 1.03 & 15.66 & 6.12 \\
\midrule
Std & 0.24 & 0.040 & 1.47 & 26.42 & 10.73 \\
\midrule
Median & 0.67 & 0.059 & 0.44 & 4.48 & 1.69 \\
\midrule
Max & 1.25 & 0.188 & 10.65 & 172.66 & 73.96 \\
\bottomrule
\end{tabular}
\caption{Incremental weighted sampling runtime ratio statistics for WAPS and {\inc} (Numerators and denominators refer to the corresponding runtimes)}
\label{tab:sampling-result-stats}
\end{table*}
\begin{table*}
\centering
\small
\tabcolsep=0.2cm
\begin{tabular}{ll|rrrrrrrrrrr|r}
\toprule
Benchmark & Tool & R1 & R2 & R3 & R4 & R5 & R6 & R7 & R8 & R9 & R10 & Total & Speed \\
\midrule
or-50-5-5-UC-10 & WAPS & \textbf{56.6} & 56.3 & 52.5 & 59.4 & 52.5 & 53.6 & 59.4 & 53.2 & 53.4 & 61.7 & \textbf{558.6} & 1.0$\times$ \\
(100, 253) & {\inc} & 1461.3 & \textbf{7.6} & \textbf{8.4} & \textbf{8.4} & \textbf{8.4} & \textbf{8.4} & \textbf{8.5} & \textbf{8.5} & \textbf{8.4} & \textbf{8.5} & 1536.3 & 0.4$\times$ \\
\midrule
or-100-20-9-UC-30 & WAPS & \textbf{73.0} & 69.1 & 66.7 & 76.0 & 66.5 & 66.9 & 76.6 & 66.0 & 66.9 & 78.6 & 706.1 & 1.0$\times$ \\
(200, 528) & {\inc} & 269.5 & \textbf{4.7} & \textbf{4.8} & \textbf{4.8} & \textbf{4.9} & \textbf{5.1} & \textbf{4.8} & \textbf{4.8} & \textbf{4.8} & \textbf{5.1} & \textbf{313.4} & 2.3$\times$ \\
\midrule
s953a\_15\_7 & WAPS & \textbf{1.7} & 1.1 & 1.1 & 1.2 & 1.0 & 1.1 & 1.2 & 1.1 & 1.1 & 1.3 & 11.9 & 1.0$\times$ \\
(602, 1657) & {\inc} & 4.9 & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{0.7} & \textbf{11.5} & 1.0$\times$ \\
\midrule
h8max & WAPS & 90.3 & 104.2 & 92.4 & 116.0 & 94.3 & 94.1 & 112.9 & 92.9 & 94.4 & 120.4 & 1011.9 & 1.0$\times$ \\
(1202, 3072) & {\inc} & \textbf{34.1} & \textbf{2.1} & \textbf{2.2} & \textbf{2.4} & \textbf{2.3} & \textbf{2.4} & \textbf{2.2} & \textbf{2.4} & \textbf{2.4} & \textbf{2.3} & \textbf{55.7} & 18.2$\times$ \\
\midrule
innovator & WAPS & 195.5 & 221.9 & 201.3 & 244.4 & 200.1 & 206.7 & 247.2 & 202.0 & 202.9 & 257.4 & 2179.3 & 1.0$\times$ \\
(1256, 50452) & {\inc} & \textbf{32.8} & \textbf{1.6} & \textbf{1.8} & \textbf{1.9} & \textbf{1.9} & \textbf{1.9} & \textbf{1.8} & \textbf{1.9} & \textbf{1.9} & \textbf{1.9} & \textbf{49.4} & 44.1$\times$ \\
\bottomrule
\end{tabular}
\caption{Runtime (seconds) breakdowns for each of ten rounds (R1-R10) between WAPS and {\inc} for benchmarks of different sizes e.g. `h8max' benchmark consists of 1202 variables and 3072 clauses.}
\label{tab:benchmark-instance-comparison}
\end{table*}
Further results are shown in Table~\ref{tab:sampling-result-stats}. Observe that for runtime taken for R1 (column 3), WAPS is faster and takes around $0.44\times$ of {\inc}'s runtime in the median case. However, {\inc} takes the lead in runtime performance when we examine the total time taken for the incremental rounds R2 to R10 (column 4). For incremental rounds, WAPS always took longer than {\inc}, in the median case WAPS took $4.48\times$ longer than {\inc}. We compare the average incremental round runtime with the first round runtime for both samplers in columns 1 and 2. In the median case, an incremental round for WAPS takes $67\%$ of the time for R1 whereas an incremental round for {\inc} only requires $5.9\%$ of the time R1 takes. We show the per round runtime for 5 benchmarks in Table~\ref{tab:benchmark-instance-comparison} to further illustrate {\inc}'s runtime advantage over WAPS for incremental sampling rounds, even though both tools reuse the respective KC diagram compiled in R1. This set of results highlights {\inc}'s superior performance over WAPS in the handling of incremental sampling settings. {\inc}'s advantage in incremental sampling rounds led to better overall runtime performance than WAPS in $75\%$ of evaluations. The runtime advantage of {\inc} would be more obvious in applications requiring more than 10 rounds of samples.
Therefore, we conducted sampling experiments for 20 rounds to substantiate our claims that {\inc} will have a larger runtime lead over WAPS with more rounds. Both samplers are given the same 3600s timeout as before and are to draw 100 samples per round, for 20 rounds. The number of completed benchmarks is shown in Table~\ref{tab:20round-timeout-comparison} In the 20 sampling round setting, {\inc} completed 649 out of 896 benchmarks, timing out on 1 additional benchmark compared to 10 sampling round setting. In comparison, WAPS completed 596 of 896 benchmarks, timing out on 78 additional benchmarks than in the 10 sampling round setting. In addition, WAPS takes on median $2.17\times$ longer than {\inc} under the 20 sampling round setting, an increase over the $1.69\times$ under the 10 sampling round setting.
\begin{table}
\centering
\small
\begin{tabular}{l|r|r}
\toprule
Number of rounds & WAPS & {\inc} \\
\midrule
10 & 674 & 650 \\
\midrule
20 & 596 & 649 \\
\bottomrule
\end{tabular}
\caption{Number of completed benchmarks within 3600s, for 10 and 20 round settings}
\label{tab:20round-timeout-comparison}
\end{table}
The runtime results clearly highlight the advantage of {\inc} for incremental weighted sampling applications and that {\inc} is noticeably better at incremental sampling than the current state-of-the-art.
\paragraph*{\textbf{RQ 2: {\prob} Performance Impacts}}
\begin{table}
\centering
\small
\begin{tabular}{l|r}
\toprule
Statistic & $\frac{\text{WAPS KC size}}{\text{{\inc} KC size}}$\\
\midrule
Mean & 18.92 \\
\midrule
Std & 81.19 \\
\midrule
Median & 4.64 \\
\midrule
Max & 1734.08 \\
\bottomrule
\end{tabular}
\caption{Statistics for number of nodes in d-DNNF (WAPS KC diagram) over that of smoothed {\prob} ({\inc} KC diagram).}
\label{tab:benchmark-kc-diagram-size}
\end{table}
We now focus on the analysis of the impact of using {\prob} compared to d-DNNF in the design of a weighted sampler. We analyzed the size of both {\prob} and d-DNNF across the benchmarks that both tools managed to compile and show the results in Table~\ref{tab:benchmark-kc-diagram-size}. From Table~\ref{tab:benchmark-kc-diagram-size}, {\prob} is always smaller than the corresponding d-DNNF. Additionally, {\prob} is at median $4.64\times$ smaller than the corresponding d-DNNF, and that for {\prob} is an order of magnitude smaller for at least $25\%$ of the benchmarks. As such, {\prob} emerges as the clear choice of knowledge compilation diagram used in {\inc}, owing to its succinctness which leads to fast incremental sampling runtimes.
\paragraph*{\textbf{RQ 3: Log-space Computation Performance Impacts}}
\begin{table}
\centering
\small
\begin{tabular}{l|r}
\toprule
Statistic & $\frac{\text{{\incap} runtime}}{\text{{\inc} runtime}}$\\
\midrule
Mean & 1.14 \\
\midrule
Std & 0.16 \\
\midrule
Median & 1.12 \\
\midrule
Max & 1.89 \\
\bottomrule
\end{tabular}
\caption{Runtime comparison of {\inc} and {\incap}}
\label{tab:inc-implementation-runtime-comparison}
\end{table}
In the design of {\inc}, we utilized log-space computations to perform \textit{annotation} computations as opposed to naively using arbitrary precision math libraries. In order to analyze the impact of this design choice, we implemented a version of {\inc} where the dynamic \textit{annotation} computations are performed using arbitrary precision math in a similar manner as WAPS. We refer to the arbitrary precision math version of {\inc} as {\incap}. As an ablation study, we compare the runtime of both implementations across all the benchmarks and show the comparison in Table~\ref{tab:inc-implementation-runtime-comparison}. The statistics shown is for the ratio of {\incap} runtime to {\inc} runtime, a value of $1.12$ means that {\incap} takes $1.12\times$ that of {\inc} for the corresponding statistics.
The results in Table~\ref{tab:inc-implementation-runtime-comparison} highlight the runtime advantages of our decision to use log-space computations over arbitrary precision computations. {\inc} has faster runtime than {\incap} in majority of the benchmarks. {\inc} displayed a minimum of $0.70 \times$, a median of $1.12 \times$,and a max of $1.89 \times$ speedup over {\incap}. Furthermore, {\incap} timed out on 2 more benchmarks compared to {\inc}. It is worth emphasizing that log-space computations do not introduce any error, and our usage of them sought to improve on the naive usage of arbitrary precision math libraries.
\paragraph*{\textbf{RQ 4: {\inc} Sampling Quality}}
We conducted additional evaluation to further substantiate evidence of {\inc}'s sampling correctness, apart from theoretical analysis in Section~\ref{subsec:theo-analysis}. Specifically, we compared the samples from {\inc} and WAPS, which has proven theoretical guarantees~\cite{GSRM19}, on the `case110' benchmark that is extensively used by prior works~\cite{SGRM18,AHT18,GSRM19}. We gave each positive literal weight of $0.75$ and each negative literal $0.25$, and subsequently drew one million samples using both {\inc} and WAPS and compare them in Figure~\ref{fig:distplot}.
\begin{figure}
\caption{Distribution comparison for Case110, with $\log$ scale for both axes}
\label{fig:distplot}
\end{figure}
Figure~\ref{fig:distplot} shows the distributions of samples drawn by {\inc} and WAPS for `case110' benchmark. A point ($x,y$) on the plot represents $y$ number of unique solutions that were sampled $x$ times in the sampling process by the respective samplers. The almost perfect match between the weighted samples drawn by {\inc} and WAPS, coupled with our theoretical analysis in Section~\ref*{subsec:theo-analysis}, substantiates our claim {\inc}'s correctness in performing weighted sampling. Additionally, it also shows that {\inc} can be a functional replacement for existing state-of-the-art sampler WAPS, given that both have theoretical guarantees.
\paragraph*{\textbf{Discussion}}
We demonstrated the runtime performance advantages of {\inc} and the two main contributing factors - a choice of succinct knowledge compilation form and dynamic log-space \textit{annotation}. {\inc} takes longer than WAPS for single-round sampling, mainly because WAPS takes less time for KC diagram compilation than {\inc}, leading to WAPS being faster in single-round sampling. In the incremental sampling setting, the compilation costs of KC diagrams are amortized, and since {\inc} is substantially better at handling incremental updates, it thus took the overall runtime lead from WAPS in the majority of the benchmarks. Extrapolating the trend, it is most likely that {\inc} would have a larger runtime lead over WAPS for applications requiring more than 10 sampling rounds. The runtime breakdown demonstrates that {\inc} is able to amortize the compilation time over the incremental sampling rounds, with subsequent rounds being much faster than WAPS.
In summary, we show that {\inc} is substantially better at incremental sampling than existing state-of-the-art.
\section{Conclusion and Future Work} \label{sec:future-works}
In conclusion, we introduced a bottom-up weighted sampler, {\inc}, that is optimized for incremental weighted sampling. By exploiting the succinct structure of {\prob} and log-space computations, {\inc} demonstrated superior runtime performance in a series of extensive benchmarks when compared to the current state-of-the-art weighted sampler WAPS. The improved runtime performance, coupled with correctness guarantees, makes a strong case for the wide adoption of {\inc} in future applications.
For future work, a natural step would be to seek further runtime improvements for {\prob} compilation since {\inc} takes longer than SOTA for the initial sampling round, due to slower compilation. Another extension would be to investigate the design of a partial \textit{annotation} algorithm to reduce computations when only a small portion of the weights have been updated. It would also be of interest if we could store partial sampled assignments at each node as a succinct sketch to reduce memory footprint, for instance we could store each unique assignment and its count.
\end{document} |
\begin{document}
\begin{center}{\Large \bf Elegant vertex labelings with prime numbers}\end{center}
\begin{center}{{\large Thierry Gensane}\\
{\small LMPA J. Liouville\\
Universit\'e du Littoral\\
Calais, FRANCE\\
\tt [email protected]\\}}\end{center}
\begin{abstract}
We consider graph labelings with an assignment of odd prime numbers
to the vertices. Similarly to graceful graphs, a labeling is said
to be elegant if the absolute differences between the labels of adjacent
vertices describe exactly the first even numbers. The labels of an
elegant tree with $n$ vertices are the first $n$ odd prime numbers
and we want that the resulting edge labels are exactly the first even
numbers up to $2n-2$. We conjecture that each path is elegant and
we give the algorithm with which we got elegant paths of $n$ primes
for all $n$ up to $n=3500$.
\end{abstract}
\section{Introduction}
In this paper, we adapt the notion of graceful graphs by considering
an assignment of odd prime numbers to the vertices. Let $G=(V,E)$ be a graph,
we look for labelings of the vertices with distinct odd primes which induce
edge labelings with all even integers from $2$ up to $2|E|$. For
instance in the tree displayed in Fig.~\ref{fig_tree_12}, the first twelve odd
primes are assigned to the vertices and we get all the even positive integers up to
$22$. We call elegant any graph for which there exists such a labeling.
We refer to Galian~\cite{gali} for a very detailed survey about graph
labelings.
Let us denote the increasing sequence
of all odd prime numbers by $p_{1},p_{2},\ldots,p_r,\ldots$ and $\mathbb{P}_{n}=\{p_{1},p_{2},\ldots,p_{n}\}$.
We now precise the definition of an elegant graph.
\begin{definition} Let $G=(V,E)$ be an undirected graph without
loop or multiple edge, with $n$ vertices and $r$ edges. We say that
$G$ is {\rm elegant} if there exists an injective map $\varphi:V\to\left\{ p_{1},p_{2},\ldots,p_{r+1}\right\} $
such that the induced map
\[
\begin{array}{rccl}
\psi\;: & E & \longrightarrow & 2\mathbb{N}^{*}\\
\\
& e=uv & \longrightarrow & \psi(e)=|\varphi(v)-\varphi(u)|
\end{array}
\]
is a one-to-one correspondence from $E$ to $\{2,4,6,\ldots,2r\}$.
\end{definition}
\begin{center}
\begin{figure}
\caption{\label{fig_tree_12}
\label{fig_tree_12}
\end{figure}
\par\end{center}
The complete graphs up to $K_{4}$ are
graceful and elegant: Their elegant labelings are respectively determined by $\varphi(V)=\{3,5\}, \{5,7,11\}$ and $\{7,11,17,19\}$. As in the case of graceful graphs, it seems that no other elegant complete graphs exists. We display an elegant
labeling of the Petersen graph in Fig.~\ref{fig_petersen5}.
Of course, if the number of vertices is too weak relatively to the
maximal degree of the graph, then the graph is probably not (or cannot)
be elegant. For instance, the star graphs $S_{n}$ with $|V(S_{n})|=n$
are not elegant as soon as the center has more than $8$ adjacent
vertices (in fact, we verified that the only elegant star graphs are
$S_{2},$ $S_{3}$, $S_{5}$, $S_{6}$ and $S_{9}$). Nevertheless,
in the case of trees, we could hope that for each integer $d\geq2$,
the answer to the following question ${\rm A}_{d}$ be positive.
{}
\noindent \textbf{Question ${\rm A}_{d}$:} Let $d\geq2$ be an integer.
Is there an integer $N_{d}$ such that each tree of maximal degree
$d$ and with more than $N_{d}$ vertices is elegant?
{}
The answer to ${\rm A}_{d}$ is negative for $d$ large enough that is an easy consequence of the prime number theorem, see for instance
\cite{apos,hard}: Let us consider a symmetric and elegant tree $T$
rooted at a vertex of degree $d$, with exactly $d^{k}$ vertices
of degree $d+1$ at each level $k=1,\ldots,m-1$ and with $d^{m}$
leaves at level $m$. We have $n=|V(T)|=1+d+\cdots+d^{m}$ and then
$p_{n}\underset{d\to\infty}{\sim}md^{m}\log d$. Moreover, the absolute
difference between labels of two adjacent vertices is less than $2n-2\sim_{d\to\infty}2d^{m}$.
We consider the path $v_{0}v_{1}\cdots v_{k}\cdots v_{p}$ on the
tree $T$, from the vertex $v_{0}$ labeled by $\varphi(v_{0})=3$
and ended by the vertex $v_{p}$ labeled by $\varphi(v_{p})=p_{n}$.
Since $p\leq2m$, we get
\[
md^{m}\log d\underset{d\to\infty}{\sim}|\varphi(v_{0})-\varphi(v_{p})|\leq\sum_{i=0}^{^{p-1}}|\varphi(v_{i})-\varphi(v_{i+1})|\leq4md^{m}\left(1+o(1)\right)
\]
\begin{figure}
\caption{ \label{fig_petersen5}
\label{fig_petersen5}
\end{figure}
$\;$
\noindent which is impossible when $d$ is large enough. Since $n\to\infty$
when $m\to\infty$, there exists a minimal integer $d_{1}$ such that
$A_{d}$ is false for all $d\geq d_{1}$.
As soon as $d=3$, it is difficult to know if $A_{d}$ is true or
false (and we are far from the value $d=e^{4}$ which appears in the previous proof). Let us illustrate this point
with an example: Let ${\rm C}_{n}$ be the regular caterpillar with
$n$ vertices of degree $3$ and $n+2$ leaves. On the one hand, up to
$n=25$ a trivial stocchastic algorithm has given elegant labelings
of $C_{n}$ only for $n=3,5,10,18,19,20,22$ but it is quite possible
that $C_{n}$ be elegant for all $n$ large enough. Let us recall
that Rosa \cite{rosa} proved that all caterpillars are graceful.
On the other hand, up to $n=25$, as soon as we add a supplementary leaf $w$ on any leaf $v\in V(C_{n})$
or if we supress
anywhere one leaf of $V(C_n)$, then surprisingly our program finds in a few seconds
that the modified tree is elegant.
{}
Fortunately, the case $d=2$ seems not to be resistant and we are
confident that $N_{2}=2$:
\begin{conjecture}For all $n\geq2$, the path of length $n$ is elegant.
\end{conjecture} Let us recall that a path of $n$ vertices is elegant
if there exists a permutation $\sigma\in S_{n}$
such that
\[
\left\{ \left|p_{\sigma(i+1)}-p_{\sigma(i)}\right|;1\leq i\leq n-1\right\} =\left\{ 2,4,6,\ldots,2n-2\right\} .
\]
\noindent For instance, up to $n=10$, the following labelings are
elegant:
\begin{itemize}
\item $3$ $\underset{2}{\text{\textendash\textendash}}$ $5$
\item $5$ $\underset{2}{\text{\textendash\textendash}}$ $3$ $\underset{4}{\text{\textendash\textendash}}$
$7$
\item $11$ $\underset{6}{\text{\textendash\textendash}}$ $5$ $\underset{2}{\text{\textendash\textendash}}$
3 $\underset{4}{\text{\textendash\textendash}}$ $7$
\item $13$ $\underset{6}{\text{\textendash\textendash}}$ $7$ $\underset{4}{\text{\textendash\textendash}}$
$11$ $\underset{8}{\text{\textendash\textendash}}$ $3$ $\underset{2}{\text{\textendash\textendash}}$
$5$
\item $7$ $\underset{2}{\text{\textendash\textendash}}$ $5$ $\underset{6}{\text{\textendash\textendash}}$
$11$ $\underset{8}{\text{\textendash\textendash}}$ $3$ $\underset{10}{\text{\textendash\textendash}}$
$13$ $\underset{4}{\text{\textendash\textendash}}$ $17$
\item $13$ $\underset{4}{\text{\textendash\textendash}}$ $17$ $\underset{10}{\text{\textendash\textendash}}$
$7$ $\underset{12}{\text{\textendash\textendash}}$ $19$ $\underset{8}{\text{\textendash\textendash}}$
$11$ $\underset{6}{\text{\textendash\textendash}}$ $5$ $\underset{2}{\text{\textendash\textendash}}$
$3$
\item $13$ $\underset{10}{\text{\textendash\textendash}}$ $3$ $\underset{4}{\text{\textendash\textendash}}$
$7$ $\underset{2}{\text{\textendash\textendash}}$ $5$ $\underset{14}{\text{\textendash\textendash}}$
$19$ $\underset{8}{\text{\textendash\textendash}}$ $11$ $\underset{12}{\text{\textendash\textendash}}$
$23$ $\underset{6}{\text{\textendash\textendash}}$ $17$
\item $11$ $\underset{2}{\text{\textendash\textendash}}$ $13$ $\underset{8}{\text{\textendash\textendash}}$
$5$ $\underset{14}{\text{\textendash\textendash}}$ $19$ $\underset{10}{\text{\textendash\textendash}}$
$29$ $\underset{12}{\text{\textendash\textendash}}$ $17$ $\underset{6}{\text{\textendash\textendash}}$
$23$ $\underset{16}{\text{\textendash\textendash}}$ $7$ $\underset{4}{\text{\textendash\textendash}}3$
\item $19$ $\underset{12}{\text{\textendash\textendash}}$ $31$ $\underset{14}{\text{\textendash\textendash}}$
$17$ $\underset{6}{\text{\textendash\textendash}}$ $23$ $\underset{10}{\text{\textendash\textendash}}$
$13$ $\underset{16}{\text{\textendash\textendash}}$ $29$ $\underset{18}{\text{\textendash\textendash}}$
$11$ $\underset{8}{\text{\textendash\textendash}}$ $3$ $\underset{4}{\text{\textendash\textendash}}$
$7$ $\underset{2}{\text{\textendash\textendash}}$ $5$
\end{itemize}
\section{Operations on admissible paths}
In the sequel, a \textit{path of $l$ primes} ${\cal Q={\cal Q}}_{l}=q_{1}q_{2}\ldots q_{l}$
represents the labeling of the path of length $l$ with the primes
$q_{1},q_{2},\ldots,q_{l}$ in this order; we will identify the vertices of a path and their label $q_i$.
\begin{definition} Let $n\geq2$ be a given integer.
\begin{enumerate}
\item We say that a path ${\cal Q}_{l}=q_{1}q_{2}\ldots q_{l}$ is {\rm admissible}
if the primes $q_{i}\in\mathbb{P}_{n}$ are distinct and if the set
$E_{l}$ of the $l-1$ gaps $\vert q_{i+1}-q_{i}\vert$ is a subset
of cardinal $l-1$ of $\{2,4,\ldots,2n-2\}$.
\item With regard to a path ${\cal Q}_{l}=q_{1}q_{2}\ldots q_{l}$ with $l<n$,
a prime $p$ in $\mathbb{P}_{n}$ is said to be {\rm free} if it
is not a vertex of ${\cal Q}_{l}$. A gap $2k\leq2n-2$ is said to
be {\rm free} if it does not belong to $E_{l}$.
\end{enumerate}
\end{definition}
We also adopt the notations :
\begin{itemize}
\item If ${\cal Q}=q_{1}q_{2}\ldots q_{l}$ then $\overline{{\cal Q}}=q_{l}q_{l-1}\ldots q_{1}$.
\item If ${\cal Q}_{1}=q_{1}q_{2}\ldots q_{l}$ and ${\cal Q}_{2}=q_{l+1}q_{l+2}\ldots q_{p}$
, then ${\cal Q}_{1}{\cal Q}_{2}=q_{1}\ldots q_{l}q_{l+1}\ldots q_{p}$ is the \textit{concatenation} of ${\cal Q}_{1}$
and ${\cal Q}_{2}$.
When we want to add a prime to a path ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}$
respectively on the left end, between ${\cal Q}_{1}$ and ${\cal Q}_{2}$
or on the right end, we note these paths $p{\cal Q},$ ${\cal Q}_{1}p{\cal Q}_{2}$
or ${\cal Q}p$.
\item If ${\cal Q}=q_{1}q_{2}\ldots q_{l}$ then $f({\cal Q})=q_{1}$, $\ell({\cal Q})=q_{l}$
and ${\rm length}({\cal Q})=l$.
\item If ${\cal Q}=q_{1}q_{2}\ldots q_{l}$ is admissible, $F_{p}({\cal Q})$ is
\textit{the set of free primes for ${\cal Q}$} and $F_{g}({\cal Q})$ is \textit{the set
of free gaps for ${\cal Q}$}.
\end{itemize}
In the algorithm described in Section 3, we randomly apply transformations on admissible paths of length $l<n$ primes in order to find other admissible paths. Our aim is either to improve the length $l$ by adding a prime to the path, or to substitute a prime of the path for a free prime, or simply to shuffle the path. Proposition \ref{shuffle} gives two elementary tools $A1$-$A2$ and $A3$-$A4$ for shuffling an admissible path:
\begin{proposition}\label{shuffle} Let $n\geq 3$ and ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}$
be an admissible path of $l<n$ primes. We denote by $\delta= |f({\cal Q}_{2})-\ell({\cal Q}_{1})|$ the gap between ${\cal Q}_{1}$ and ${\cal Q}_{2}$.
\begin{enumerate}\label{
shuffle}
\item[A1.] If $|f({\cal Q}_{2})-f({\cal Q}_{1})|\in F_{g}({\cal Q})$, then the path ${\cal Q}^{*}=\overline{{\cal Q}}_{1}{\cal Q}_{2}$
is admissible and
\[
F_{g}({\cal Q}^{*})=F_{g}({\cal Q})\cup\left\{ \delta\right\} \setminus\left\{ |f({\cal Q}_{2})-f({\cal Q}_{1})|\right\} .
\]
\item[A2.] If $|\ell({\cal Q}_{2})-\ell({\cal Q}_{1})|\in F_{g}({\cal Q})$, then the path ${\cal Q}^{*}={\cal Q}_{1}\overline{{\cal Q}}_{2}$
is admissible and
\[
F_{g}({\cal Q}^{*})=F_{g}({\cal Q})\cup\left\{ \delta\right\} \setminus\left\{ |\ell({\cal Q}_{2})-\ell({\cal Q}_{1})|\right\} .
\]
\item[A3.] If $|\ell({\cal Q}_{2})-f({\cal Q}_{1})|\in F_{g}({\cal Q})$, then the path ${\cal Q}^{*}=Q_{2}Q_{1}$
is admissible and
\[
F_{g}({\cal Q}^{*})=F_{g}({\cal Q})\cup\left\{ \delta\right\} \setminus\left\{ |\ell({\cal Q}_{2})-f({\cal Q}_{1})|\right\} .
\]
\item[A4.] If $|\ell({\cal Q}_{2})-f({\cal Q}_{1})|=\delta$, then the path ${\cal Q}^{*}=Q_{2}Q_{1}$ is admissible
and
\[
F_{g}({\cal Q}^{*})=F_{g}({\cal Q}).
\]
\end{enumerate}
\end{proposition}
\begin{remark}[Insertion of a free gap]\label{insertfreegap} {\rm In Algorithm 1 decribded in Section 3 and when we get an admissible transformation from ${\cal Q}$ to ${\cal Q}^*$ with a property $A_i$, we try to insert a free prime in ${\cal Q}^*$: If $r$ is a new free prime for ${\cal Q}^*$, we can try to insert it directly with Proposition~\ref{insertprime}. If a gap $ \delta$ has become free, we can try to insert in ${\cal Q}^*$ by Proposition~\ref{insertprime}, any prime $r=s\pm\delta\in F_p({\cal Q})$ if ${\cal Q}^*={\cal Q}_1{\cal Q}_2$ with $s=\ell({\cal Q}_1)$ or $s=f({\cal Q}_2)$. Obviously, we also test in the algorithm if one of the paths $r{\cal Q}$ or ${\cal Q}r$ is admissible.
For instance, let us consider $n=7$ and the admissible path ${\cal Q}_6=7\textendash 19\textendash 17\textendash 11\textendash 3\textendash 13$. The last free gap is $4=|11-7|$ and we can apply the transformation given by A1 with ${\cal Q}_1= 7\textendash 19\textendash 17$ and ${\cal Q}_2=11\textendash 3\textendash 13$, we get ${\cal Q}^{*}=\overline{{\cal Q}}_{1}{\cal Q}_{2}=17\textendash 19\textendash 7\textendash 11\textendash 3\textendash 13$ and the last free gap is now $\delta=6$. The last free prime is $r=5=11-\delta$ and we can insert $r$ by the third point of Proposition \ref{insertprime} with ${\cal Q}_1= 17\textendash 19\textendash 17\textendash 11$ and ${\cal Q}_2= 3\textendash 13$: We get an admissible and elegant path of seven primes with the admissible transformation $ {\cal Q}_6={\cal Q}_1{\cal Q}_2\to{\cal Q}_7={\cal Q}_{1}r\overline{{\cal Q}_{2}}=17\textendash 19\textendash 7\textendash 11\textendash 5 \textendash 13\textendash 3$.}
\end{remark}
\begin{proposition}[Insertion of a free prime] \label{insertprime}Let $n\geq3$
and ${\cal Q=Q}_{1}{\cal Q}_{2}$ be an admissible path of $l<n$
primes and let $r\in F_{p}({\cal Q})$.
We denote by \textbf{C}
the condition
\[\textbf{C}\,:\,\left\{
\begin{array}{c}
\left(|r-p|\in F_{g}({\cal Q})\text{ and }|r-p|\neq|q-r|\in F_{g}({\cal Q})\right)\\
\text{or}\\
\left(|r-p|\in F_{g}({\cal Q})\text{ and }|q-r|=\delta\right)\\
\text{or}\\
\left(|q-r|\in F_{g}({\cal Q})\text{ and }|r-p|=\delta\right).
\end{array}
\right.
\]
\noindent If \textbf{C}
is true for $\delta=|f({\cal Q}_2)-l({\cal Q}_1)|$ with
\begin{enumerate}
\item $p=\ell({\cal Q}_{1})$ and $q=f({\cal Q}_{2})$, then the path ${\cal Q}^*=Q_{1}rQ_{2}$
is admissible;
\item $p=f({\cal Q}_{1})$ and $q=f({\cal Q}_{2})$, then the path ${\cal Q}^*=\overline{{\cal Q}_{1}}r{\cal Q}_{2}$
is admissible;
\item $p=\ell({\cal Q}_{1})$ and $q=\ell({\cal Q}_{2})$, then the path ${\cal Q}^*={\cal Q}_{1}r\overline{{\cal Q}_{2}}$
is admissible.
\end{enumerate}
\end{proposition}
In order to substitute a prime $p$ of a path ${\cal Q}$ for a free prime, we can consider $36$ transformations $A_5,A_6,\ldots,A_{40}:{\cal Q}\in X\rightarrow {\cal Q}^*\in Y$
where $ X=\left\{ q{\cal Q}_{1}{\cal Q}_{2},{\cal Q}_{1}q{\cal Q}_{2},\right.$ $\left.{\cal Q}_{1}{\cal Q}_{2}q\right\} $
and
$
Y=$ $\left\{ r{\cal Q}_{1}{\cal Q}_{2}, r{\cal Q}_{1}{\cal \overline{Q}}_{2}, r{\cal \overline{Q}}_{1}{\cal Q}_{2}, r{\cal \overline{Q}}_{1}\overline{{\cal Q}}_{2}, {\cal Q}_{1}r{\cal Q}_{2},{\cal Q}_{1}r{\cal \overline{Q}}_{2},{\cal \overline{Q}}_{1}r{\cal Q}_{2},{\cal \overline{Q}}_{1}r\overline{{\cal Q}}_{2},\right.$
\noindent${\cal Q}_{1}{\cal Q}_{2}r,{\cal Q}_{1}{\cal \overline{Q}}_{2}r,
\left.{{\cal \overline{Q}}_{1}{\cal Q}_{2}r,\cal \overline{Q}}_{1}\overline{{\cal Q}}_{2}r\right\}.
$
The following proposition
details only the transformations ${\cal Q}={\cal Q}_{1}q{\cal Q}_{2}$ $\to{\cal Q}^*={\cal Q}_{1}r{\cal Q}_2$
and ${\cal Q}={\cal Q}_{1}q{\cal Q}_{2}\to{\cal Q}^*={\cal \overline{Q}}_{1}r{\cal Q}_{2}$,
but it is trivial to find for which conditions the other transformations $A_i$ give admissible paths.
\begin{proposition} Let $n\geq 3$ and ${\cal Q}={\cal Q}_{1}q{\cal Q}_{2}$
be an admissible path of $l<n$ primes. Let $r$ be a free prime.
\begin{enumerate}
\item The path ${\cal Q}^{*}={\cal Q}_{1}r{\cal Q}_{2}$ is admissible
if $ \left|r-\ell({\cal Q}_{1})\right|\neq \left|f({\cal Q}_{2})-r\right|$ and
\[
\left\{ \left|r-\ell({\cal Q}_{1})\right|,\left|f({\cal Q}_{2})-r\right|\right\} \subset F_{g}(Q)\cup\left\{ \left|q-\ell({\cal Q}_{1})\right|,\left|f({\cal Q}_{2})-q\right|\right\} .
\]
\item The path ${\cal Q}^{*}={\cal \overline{Q}}_{1}r{\cal Q}_{2}$ is
admissible if $ \left|r-f({\cal Q}_{1})\right|\neq \left|f({\cal Q}_{2})-r\right|$ and
\[
\left\{ \left|r-f({\cal Q}_{1})\right|,\left|f({\cal Q}_{2})-r\right|\right\} \subset F_{g}(Q)\cup\left\{ \left|q-\ell({\cal Q}_{1})\right|,\left|f({\cal Q}_{2})-q\right|\right\} .
\]
\end{enumerate}
\end{proposition}
\section{An algorithm for finding elegant paths }
We have found elegant labelings for all paths up to $n=3500$ vertices
with the algorithm we describe below. In Algorithm 1, we construct incrementally a sequence
of paths: from a path ${\cal Q}_{l}$ of $l$ primes, we try either
to add a prime or to modify the path ${\cal Q}_{l}$ without
improving its length. The integer $n$ being fixed, our aim is to
obtain $l=n$.
The first part 1-4 of Algorithm 1 is a trivial greedy algorithm, we simply try to add free primes on the ends of ${\cal Q}_{l}$.
In the While statement of the step 5, we intensively and randomly use the transformations $A_i$ given in Section~2. When $m$ reaches $N$ without giving an elgant path of length $n$, we quit and we start a new run of Algorithm~1. With $N=40n$,
we get with Algorithm~1, all the elegant paths of $n$ primes from $n=2$ up to $n=200$
in less than 5 seconds with one core of a 3.6 GHz processor.
{}
If $n\geq200$, we accelerate the calculations with Algorithm 2 in
which we have chosen $N=20n$ et $c_{0}=20$: when a run reaches $l\geq n-\delta$,
we do not give up the path if $l<n$ but we suppress the right end $q_l$
of the path ${\cal Q}_{l}$. We take for instance $\delta=1$ for $n=200$ and $\delta=5$
from $n=2000$ up to $n=3500$. This can be seen as a perturbation
on a non-optimal and rigid configuration. When $n\in[2000,2100]$, the average run-time
to find one elegant path with Algorithm~2 is 4 minutes; when $n\in[3400,3500]$,
this average run-time becomes $40$ minutes. It is surprising to find a solution so easily among $3500!>10^{10000}$ permutations.
{}
\noindent
\fbox{\begin{minipage}[t]{5.99 in}
\textbf{Algorithm 1}
\end{minipage}}
\noindent
\fbox{\begin{minipage}[t]{5.99 in}
\begin{minipage}[t]{5.8in}
\begin{enumerate}
\item Randomly choose a prime $q_{1}$ in $\mathbb{P}_{n}$ and set $l:=1$,
${\cal Q}_{l}:=q_{1}$;
\item $l_{1}:=0;$
\item {\bf While} $l_{1}\neq l$ and $l<n$ {\bf do}
\begin{itemize}
\item $l_{1}:=l_{1}+1$;
\item If we find $p\in F_{p}({\cal Q}_{l})$ s.t. ${\cal Q}_{l}p$
(or resp. $p{\cal Q}_{l}$) is admissible then we set ${\cal Q}_{l+1}:={\cal Q}_{l}p$
(or resp. ${\cal Q}_{l+1}:=p{\cal Q}_{l}$) and $l:=l+1$;
\end{itemize}
\item {\bf If} $l=n$ {\bf then} return the elegant path ${\cal Q}_{n}$ and {\bf quit}.
\end{enumerate}
\end{minipage}
\end{minipage}}
\noindent
\fbox{\begin{minipage}[t]{5.99 in}
\begin{minipage}[t]{5.8in}
\begin{enumerate}
\item[5.] {\bf While} $l<n$ and $m<N$ {\bf do}
\begin{itemize}
\item Randomly choose $i\in\{1,2,3\}$.
{\bf Case} $i=1$ {\bf:} Look for an admissible transformation
${ \cal Q}^*=\overline{{ \cal Q}_{1}}{\cal Q}_{2}$ or ${ \cal Q}_{1}\overline{{\cal Q}_{2}}$ of ${\cal Q}_{l}$ with A1 or A2. If we succeed in changing ${\cal Q}_l$, we set ${\cal Q}_l:={\cal Q}^*$. Then, we try to insert a free prime in this new path ${\cal Q}_l$ with Remark~\ref{insertfreegap} and Prop.~\ref{insertprime}; if we succeed in this, we set $l:=l+1$ and ${\cal Q}_l:=\cal Q^*$, the path $\cal Q^*$ having been given by Prop. 6.
{\bf Case} $i=2$ {\bf:} {\bf If} $|\ell({\cal Q}_l)-f({\cal Q}_l)|\in F_g({\cal Q}_l)$ {\bf then} randomly choose $u\in\{1,\ldots,l-1\}$ and set ${\cal Q}_1:=q_1\cdots q_u$, ${\cal Q}_2:=q_{u+1}\cdots q_l$ and ${\cal Q}_l:={\cal Q}_2{\cal Q}_1$. Then, if we succeed in inserting a free prime in this new ${\cal Q}_l$ with Remark~\ref{insertfreegap} and Prop.~\ref{insertprime}, we set $l:=l+1$ and ${\cal Q}_l:=\cal Q^*$, the path $\cal Q^*$ having been given by Prop.~\ref{insertprime}.
{\bf Else} there exists $u\in\{1,\ldots,l-1\}$ s.t. $|q_{u+1}-q_u|= |l({\cal Q}_l)-f({\cal Q}_l)|$
and we set ${\cal Q}_l:={\cal Q}_2{\cal Q}_1$ with ${\cal Q}_1=q_1\cdots q_u$ and ${\cal Q}_2=q_{u+1}\cdots q_l$.
{\bf Case} $i=3$ {\bf:} Look for a transformation among $A_5,\ldots,A_{40}$ which gives an admissible path ${\cal Q}^*$. If we succeed in modifying ${\cal Q}_l$, we set ${\cal Q}_l:={\cal Q}^*$.
If we succeed in inserting a free prime in this new ${\cal Q}_l$ with Remark~\ref{insertfreegap} and Prop.~\ref{insertprime}, we set $l:=l+1$ and ${\cal Q}_l:=\cal Q^*$.
\item $m:=m+1$.
\end{itemize}
\item[6.] {\bf Return} the path ${\cal Q}_{l}$ which is elegant if $l=n$.
\end{enumerate}
\end{minipage}
\end{minipage}}
\noindent
\fbox{\begin{minipage}[t]{5.99 in}
\textbf{Algorithm 2}
\end{minipage}}
\noindent
\fbox{\begin{minipage}[t]{5.99 in}
\hspace{0.1in} \begin{minipage}[t]{5.8 in}
$l:=0$;
{\bf While} $l<n$ {\bf do}
\begin{enumerate}
\item {\bf Do} \textbf{Algorithm 1} which gives an admissible
path ${\cal Q}_{l}$ of length $l$;
\item {\bf If} $l=n$ {\bf then return} the elegant path ${\cal Q}_{n}$;
\item {\bf If} $n-\delta\leq l<n$ {\bf then}
\hspace{0.1cm} c:=0;
\hspace{0.1cm} {\bf While} $l<n$ and $c<c_{0}$ {\bf do}
\begin{enumerate}
\item Suppress the last prime $q_{l}$ of ${\cal Q}_{l}$ and set $l:=l-1$;
\item Do {\bf Step 5} of {\bf Algorithm 1} and set c:=c+1;
\item {\bf If} $l=n$ {\bf then} return the elegant path ${\cal Q}_{n}$.
\end{enumerate}
\end{enumerate}
\end{minipage}
\end{minipage}}
\noindent
{}
Let us detail an example of a possible run of {\bf Algorithm~1} in
the case $n=11$.
\begin{itemize}
\item {\bf Steps $1-4$} of {\bf Algorithm 1}: We randomly choose $q_{1}=5$, $q_{2}=7,\;q_{3}=3,\ldots$,
and we get
\[
{\cal Q}_{10}:=5\underset{2}{\text{\textendash\textendash}}7\underset{4}{\text{\textendash\textendash}}3\underset{16}{\text{\textendash\textendash}}19\underset{12}{\text{\textendash\textendash}}31\underset{20}{\text{\textendash\textendash}}11\underset{18}{\text{\textendash\textendash}}29\underset{8}{\text{\textendash\textendash}}37\underset{14}{\text{\textendash\textendash}}23\underset{6}{\text{\textendash\textendash}}17,
\]
the last free prime is $13$ and the free gap is $10$.
\item {\bf Step $5$} of {\bf Algorithm 1}: We randomly choose values for $i$ in $\{1,2,3\}$ :
\begin{itemize}
\item $i=1$: The gap $|17-7|=10$ is free and we apply the transformation ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}\rightarrow {\cal Q}^*={\cal Q}_1\overline{{\cal Q}_{2}}$ with ${\cal Q}_{1}=5-7$ and ${\cal Q}_{2}=3-19-31-11-29-37-23-17$. We get
\[
{\cal Q}_{10}:=
5\underset{2}{\text{\textendash\textendash}}7\underset{10}{\text{\textendash\textendash}}17\underset{6}{\text{\textendash\textendash}}23\underset{14}{\text{\textendash\textendash}}37\underset{8}{\text{\textendash\textendash}}29\underset{18}{\text{\textendash\textendash}}11\underset{20}{\text{\textendash\textendash}}31\underset{12}{\text{\textendash\textendash}}19\underset{16}{\text{\textendash\textendash}}3,
\]
the last free gap is now $4$.
\item $i=3$ : We apply the transformation ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}q\rightarrow {\cal Q}^*={\cal Q}_1r\overline{{\cal Q}_{2}}$ with ${\cal Q}_{1}=5-7-17$, ${\cal Q}_{2}=23-37-29-11-31-19$, $q=3$ and $r=13$. We find
\[
{\cal Q}_{10}:=5\underset{2}{\text{\textendash\textendash}}7\underset{10}{\text{\textendash\textendash}}17\underset{4}{\text{\textendash\textendash}}13\underset{6}{\text{\textendash\textendash}}19\underset{12}{\text{\textendash\textendash}}31\underset{20}{\text{\textendash\textendash}}11\underset{18}{\text{\textendash\textendash}}29\underset{8}{\text{\textendash\textendash}}37\underset{14}{\text{\textendash\textendash23}},
\]
the free prime is now $3$ and the free gap is $16$.
\item$i=2$ : Since the gap between the two ends is $|23-5|=18\notin F_g({\cal Q})$, we apply ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}\rightarrow {\cal Q}^*={\cal Q}_2{\cal Q}_{1}$ with ${\cal Q}_{1}=5-7-17-13-19-31-11$ and ${\cal Q}_{2}=29-37- 23$. We get
\[
{\cal Q}_{10}:=29\underset{8}{\text{\textendash\textendash}}37\underset{14}{\text{\textendash\textendash}}23\underset{18}{\text{\textendash\textendash}}5\underset{2}{\text{\textendash\textendash}}7\underset{10}{\text{\textendash\textendash}}17\underset{4}{\text{\textendash\textendash}}13\underset{6}{\text{\textendash\textendash}}19\underset{12}{\text{\textendash\textendash}}31\underset{20}{\text{\textendash\textendash}}11.
\]
\item $i=1$ : The gap $|29-13|=16$ is free and we apply the transformation ${\cal Q}={\cal Q}_{1}{\cal Q}_{2}\rightarrow {\cal Q}^*=\overline{{\cal Q}_1}{\cal Q}_{2}$ with ${\cal Q}_{1}=29-37-23-5-7$ and ${\cal Q}_{2}=17-13-19-31-11$. We get
\[
{\cal Q}_{10}:=17\underset{10}{\text{\textendash\textendash}}7\underset{2}{\text{\textendash\textendash5}}\underset{18}{\text{\textendash\textendash}}23\underset{14}{\text{\textendash\textendash}}37\underset{8}{\text{\textendash\textendash}}29\underset{16}{\text{\textendash\textendash}}13\underset{6}{\text{\textendash\textendash}}19\underset{12}{\text{\textendash\textendash}}31\underset{20}{\text{\textendash\textendash}}11,
\]
and the last free gap is $4$. We can place the prime $3$ between
$7$ and $5$ and we get an elegant path of eleven primes:
\[
{\cal Q}_{11}:=17\underset{10}{\text{\textendash\textendash}}7\underset{4}{\text{\textendash\textendash3}}\underset{2}{\text{\textendash\textendash5}}\underset{18}{\text{\textendash\textendash}}23\underset{14}{\text{\textendash\textendash}}37\underset{8}{\text{\textendash\textendash}}29\underset{16}{\text{\textendash\textendash}}13\underset{6}{\text{\textendash\textendash}}19\underset{12}{\text{\textendash\textendash}}31\underset{20}{\text{\textendash\textendash}}11.
\]
\end{itemize}
\end{itemize}
\end{document} |
\begin{document}
\title[The Quantum Steenrod Squares and Their Algebraic Relations]{A construction of the quantum Steenrod squares and their algebraic relations}
\author{Nicholas Wilkins}
\address{School of Mathematics, University of Bristol, Bristol BS8 1UG, UK, and Heilbronn Institute for Mathematical Research, Bristol, UK}
\email{[email protected]}
\date{version: \today}
\begin{abstract}
We construct a quantum deformation of the Steenrod square construction on closed monotone symplectic manifolds, based on the work of Fukaya, Betz and Cohen. We prove quantum versions of the Cartan and Adem relations. We compute the quantum Steenrod squares for all $\mathbb{CP}^n$ and give the means of computation for all toric varieties. As an application, we also describe two examples of blowups along a subvariety, in which a quantum correction of the Steenrod square on the blowup is determined by the classical Steenrod square on the subvariety.
\end{abstract}
\maketitle
\section{Introduction}
We begin with the background of the Steenrod squares. We will then mention quantum cohomology and the results in this paper.
The Steenrod squares are cohomology operations that are uniquely defined by a set of axioms, although this uniqueness does not include a construction. There are multiple ways of constructing the squares, one of which involves constructing the operations on $H^{*}(K(\mathbb{Z}/2,n);\mathbb{Z}/2)$ for Eilenberg-MacLane spaces $K(\mathbb{Z}/2,n)$.
For a topological space $M$, the Steenrod squares are additive homomorphisms $$ Sq^{i} : H^{n}(M) \rightarrow H^{n+i}(M)$$
using $\mathbb{Z}/2$ coefficients. They generalize the squaring operation on cohomology with respect to the cup product, $x \mapsto x \cup x$. The $\mathbb{Z}/2$ coefficients ensure that the $Sq^i$ are additive. These Steenrod squares together determine a degree doubling operation that we call the Steenrod square,
$$ Sq: H^*(M) \rightarrow H^*(M)[h],$$ where $$Sq(x) = \sum Sq^{|x|-i}(x) \, h^{i} .$$
Here $h$ is a formal variable in degree 1 that represents the generator of $$H^{*}(\mathbb{RP}^{\infty}; \mathbb{Z}/2)=\mathbb{Z}/2[h].$$
The Steenrod square satisfies the Cartan relation
\begin{equation} \label{equation:introcartan} Sq(x \cup y) = Sq(x) \cup Sq(y) \end{equation}
which, for example, allows one to inductively compute the Steenrod squares for the cohomology of $\mathbb{CP}^n$ (which we will review in Example \ref{exmpl:classcpn}). The Steenrod square also satisfies the Adem relations, which are relations between compositions of the $Sq^i$. Namely, for all $p,q>0$ such that $q<2p$,
\begin{equation} \label{equation:ademrel} Sq^{q}Sq^{p} = \sum_{s=0}^{[q/2]} {{p-s-1}\choose{q-2s}}Sq^{p+q-s} Sq^{s} \end{equation} where $[q/2]$ is the integer part of $q/2$. The Adem relations are classically implied by the axioms.
This paper begins in Section \ref{sec:preliminaries} with a preliminary section that explains in more detail the relevant background material.
We will then describe two different constructions of the Steenrod square in Section \ref{sec:morssteensqu}: the first construction uses Morse homology and the second uses intersections of cycles. The first construction is based on the definition for Floer theory by Seidel in \cite{seidel}, the origins of which are in the flowlines construction of Betz in \cite{betz}, and Fukaya in \cite{fukaya}, the former of which was extended to a more categorical definition by Betz, Cohen and Norbury in \cite{betzcoh,cohnor}. The second construction we give will be isomorphic to the first construction, using the isomorphism between Morse and singular cohomology.
After considering these constructions of the Steenrod square, in Section \ref{sec:SqQviaMorse} we extend them to define a quantum Steenrod square on the quantum cohomology of a closed monotone symplectic manifold $(M,\omega)$.
In Section \ref{subsec:quantcupprod} we will give details of the quantum cohomology $QH^*(M)$. Briefly, $QH^*(M,\omega)$ is $H^*(M)[[t]]$ as a vector space using a graded formal variable $t$ of degree $2$. However, the cup product is deformed by quantum contributions from counting 3-pointed genus zero Gromov-Witten invariants. That is, by counting certain $J$-holomorphic spheres in $M$ where $J$ is an almost complex structure on $M$ compatible with $\omega$. We often abbreviate by $T= t^N$ for $N$ the minimal Chern number.
The quantum Steenrod square will be a degree doubling operation, denoted $Q \mathcal{S}$, where \begin{equation} \label{equation:sqstatement} Q\mathcal{S} : QH^{*}(M) = H^{*}(M)[[t]] \rightarrow H^{*}(M)[[t]][h] = QH^{*}(M)[h]. \end{equation} As in the case of the classical Steenrod square, $Q\mathcal{S}$ will be built using additive homomorphisms $Q\mathcal{S}_{i,j}: QH^*(M) \rightarrow QH^{2*-i-2jN}(M)$, so $$Q\mathcal{S}(x) = \sum_{i,j \ge 0} Q\mathcal{S}_{i,j}(x) h^i T^j.$$
The quantum Steenrod square is not necessarily axiomatically defined, but a construction was first suggested by Fukaya in \cite{fukaya} based on his Morse homotopy theory. Our construction is different from Fukaya's, and can be viewed as a Morse theory analogue of the work by Seidel in Floer theory in \cite{seidel}. The first goal of this paper is to solve an open problem posed by Fukaya in \cite[Problem 2.11]{fukaya} as to whether the Adem and Cartan relations hold for quantum Steenrod squares and, if not, what their quantised versions should be. Our second goal is to explore consequences of the solution to this problem, specifically in computations for certain closed monotone symplectic manifolds.
In answer to the first part of Fukaya's problem, the immediate generalisation of the Cartan and Adem relations fail. In the case of the Cartan relation, this means that it is not in general true that $Q \mathcal{S}(x*y) = Q \mathcal{S}(x) * Q \mathcal{S}(y)$. We will show this in the following example.
\begin{exmpl}
\label{exmpl:difficulties}
In Definition \ref{defn:mqss} of the quantum Steenrod square, we will see that \begin{equation} \label{equation:qssttot2} Q \mathcal{S}(aT) = Q \mathcal{S}(a)T^2 \end{equation} for any $a \in QH^*(M)$.
Let $M = \mathbb{P}^{1}$. Let $x$ be the generator of $H^{2}(M)$. Recall that the quantum product is $x*x=T$, where $T$ has degree $4$. Then $$Q\mathcal{S}(x*x) = Q\mathcal{S}(T)=T^{2},$$ using \eqref{equation:qssttot2} and the fact that $Q \mathcal{S}(1) = 1$. Using degree reasons, knowledge of the classical Steenrod square for $\mathbb{P}^1$ and of the quantum cohomology ring, one can show that $Q\mathcal{S}(x) = xh^{2}+T$. Then $$Q\mathcal{S}(x)*Q\mathcal{S}(x) = (xh^{2}+T)*(xh^{2}+T) = Th^{4}+T^{2}.$$
Hence, in this case $Q \mathcal{S}(x) * Q \mathcal{S}(x) \neq Q \mathcal{S}(x*x)$.
\end{exmpl}
In Section \ref{sec:quancar} we will prove why the Cartan relation does not immediately generalise and compute the actual quantum Cartan relation. Briefly, the quantum Cartan relation is deformed because the moduli space $\overline{M}_{0,5}$ of genus zero stable curves with 5 marked points $(z_0,z_1,z_2,z_3,z_4)$ has non-trivial $\mathbb{Z}/2$-equivariant cohomology, under the $\mathbb{Z}/2$ action that transposes marked points via the permutation $(12)(34)$. More precisely, the two configurations in Figure \ref{fig:m05elmts} that determine $Q\mathcal{S}(x*y)$ and $Q\mathcal{S}(x) * Q\mathcal{S}(y)$ are not connected by a $\mathbb{Z}/2$-invariant path in $\overline{M}_{0,5}$.
We will prove that a quantum deformation of the Cartan relation holds:
\begin{thm}[Quantum Cartan relation]
\label{thm:quancar}
$$Q\mathcal{S}(x*y) = Q\mathcal{S}(x)*Q\mathcal{S}(y) + \sum_{i,j} q_{i,j}(W_0 \times D^{i-2,+})(x,y)h^{i}$$ where the correction term is written in terms of linear homomorphisms $$q_{i,j}: H_*^{\mathbb{Z}/2}(\overline{M}_{0,5}) \otimes QH^*(M) \otimes QH^*(M) \rightarrow QH^*(M),$$ such that $q_{i,j}(W_0 \times D^{i-2,+})$ is nonzero only if $i \ge 2$ and $j > 0$. The $q_{i,j}$ will be defined precisely in Definition \ref{defn:qopn}.
\end{thm}
In the correction term, $$W_0 \times D^{i,+} \subset \overline{M}_{0,5} \times_{\mathbb{Z}/2} S^{\infty},$$ where $W_0 \subset \overline{M}_{0,5} \simeq Bl_{\{(0,0),(1,1),(\infty,\infty) \} } (\mathbb{CP}^1 \times \mathbb{CP}^1)$ is the exceptional divisor over $(0,0)$ (compare to Figure \ref{fig:w0}). The notation $D^{i,+}$ means the upper $i$-dimensional hemisphere in $S^i \subset S^{\infty}$. In fact, we are abusing notation as we are really interested in the homology class represented by $W_0 \times D^{i,+}$ in $H_*(\overline{M}_{0,5} \times_{\mathbb{Z}/2} S^{\infty})$, where we are using the singular homology.
In Section \ref{sec:computingqsstoric} we use Theorem \ref{thm:quancar} to calculate the quantum Steenrod squares for a Fano toric variety $M$, as proven in Theorem \ref{thm:SqQtoric}. Here, for $\mu \in H_2(M; \mathbb{Z})$ (which is a free $\mathbb{Z}$-module as a Fano toric variety $M$ is simply connected), let $\mu_2$ be the image of $\mu$ under $H_2(M ; \mathbb{Z}) \rightarrow H_2(M ; \mathbb{Z}/2)$. Denote by $x *_{ \mu, k} y$ the coefficient of $t^{kN}$ in the quantum product $x * y$, using spheres representing $\mu$. Let $N$ be the minimal Chern number, and $|t|=2$.
\begin{thm}
\label{thm:SqQtoric}
Let $M$ be a Fano toric manifold. For $b, x \in H^*(M)$ and $|x| = 2$,
\begin{equation} \label{equation:SqQtoric} q_{i,j}(W_0 \times D^{i,+})(b,x) = \sum_{j \ge 1} \sum_{k=1}^{j} \sum_{c_1(\mu) = 2kN} n(x,\mu_2) \cdot \left( Q\mathcal{S}_{|b|-i+2,j-k}(b) *_{\mu, k} x \right) \cdot t^{jN} \end{equation}
summing over a basis of $\mu \in H_2(M; \mathbb{Z})$, so $c_1(\mu) = 2kN \in \mathbb{Z}$ and if $\chi$ is some pseudocycle representative of $x$ then $n(x,\mu_2) := \# (\chi \bullet \mu_2) \in \mathbb{Z}/2$.
\end{thm}
For example, if $M = \mathbb{CP}^n$ then setting $b = x^i$ for the generator $x \in H^2(\mathbb{CP}^n)$, we will show in Lemma \ref{lem:qWcpn} that:
$$ q_{4i+2-2n,1}(W_0 \times D^{4i-2n,+})(x^{i},x) = {{i} \choose {n-i}} T, \text{ else } q_{i,j} = 0.$$ Hence
\begin{equation} \label{equation:qsforcpn} Q\mathcal{S}(x^{i}) = \sum_{j=0}^{i} \left( {{i} \choose {j}}+ \sum_{k=0}^{\lfloor n/2 \rfloor + 1} {{n-k}\choose{k}}\cdot {{i-(n+1-k)} \choose {j-k}} \right) x^{i+j} h^{2(i-j)}, \end{equation} where $x^p$ denotes the $p$-th quantum power of $x$. In particular, if $i+j \ge n+1$ in \eqref{equation:qsforcpn} then $x^{i+j}$ refers to $x^{i+j - n - 1} T$. Omitting the inner summation would give the classical Steenrod square.
\begin{corollary}
\label{corollary:fanotoricdecided}
Let $M$ be a Fano toric manifold. Then if we can compute $QH^*(M)$ (over the Novikov ring as in \cite[Section 9.2]{jhols}) then we can compute $Q\mathcal{S}$ through recursive calculations.
\end{corollary}
In Section \ref{sec:QAR} we extend the Adem relations from Equation \eqref{equation:ademrel} to the quantum Steenrod square. In order to state the quantum Adem relation, we next introduce operations $Q\mathcal{S}^{a,b}: QH^*(M) \rightarrow QH^*(M)$, such that the sum of these $Q\mathcal{S}^{a,b}$ is the total quantum Steenrod square $Q \mathcal{S}$. The index $a$ is the change in homological degree, and the index $b$ is the change in the index of $T$.
\begin{defn}
Define $Q\mathcal{S}^{a,b}$ by $$Q\mathcal{S} (x T^i) = \sum_{a,b \in \mathbb{Z}} Q\mathcal{S}^{a,b} (x T^i) \cdot h^{|x| - 2N(b+i) - a} \quad \textrm{where} \quad Q\mathcal{S}^{a,b}(xT^i) \in T^{b+i} H^{|x|+a}(M),$$ for any $x \in H^*(M)$.
\end{defn}
Computing $Q\mathcal{S}^{a,b}$ for $\mathbb{CP}^2$, we find that the naive generalisation of the Adem relation (Equation \eqref{equation:ademrel}), namely $$\sum_{b,d} \left( Q\mathcal{S}^{q-2bN,b} \circ Q\mathcal{S}^{p-2dN,d}(\alpha) - \sum_{s=0}^{q/2} {{p-s-1}\choose{q-2s}} Q\mathcal{S}^{p+q-s-2bN,b} \circ Q\mathcal{S}^{s-2dN,d}(\alpha) \right) = 0,$$ does not hold, as in the example below.
\begin{exmpl}
Let $M = \mathbb{CP}^2$, so $2N = 6$. Then $Q\mathcal{S}^{2-2N,1} \circ Q\mathcal{S}^{2-0N,0}(x) = T$, but $$\sum_{s=0}^{s = 1} {{1-s}\choose{2-2s}} Q\mathcal{S}^{2+2-s-2iN,i} \circ Q\mathcal{S}^{s-2jN,j}(x) = 0$$ for all $i,j$.
\end{exmpl}
In order to prove the quantum Adem relation, we begin with the technical Theorem \ref{thm:QAR}. The terminology used in Theorem \ref{thm:QAR} will be fully defined in Section \ref{subsec:QAR}.
\begin{thm}
\label{thm:QAR}
For $M$ a closed monotone symplectic manifold, with $\alpha \in QH^*(M)$, and for $p,q>0$ such that $q<2p$:
$$qq_{|\alpha|+p-q,|\alpha|-p}(\alpha) = \sum_{s=0}^{q/2} {{p-s-1}\choose{q-2s}} qq_{|\alpha|+2s-p-q,|\alpha|-s}(\alpha).$$
\end{thm}
The homomorphism $$qq: H^*(M) \rightarrow QH^*(M) \otimes H^*(BD_8),$$ where $D_8$ is the dihedral group. This $qq$ operation will include the data of the composition $Q \mathcal{S} \circ Q \mathcal{S}$. The ring $H^*(BD_8)$ has three generators, labelled $e, \sigma_1, \sigma_2$ (of which we only need to consider $e$ and $\sigma_2$). We then denote by $qq_{i,j}(\alpha)$ the coefficient of $e^i \sigma_2^j$ in $qq(\alpha)$, defined in Equation \eqref{equation:qqijdef}.
This should be compared to equation \eqref{equation:ademrel}. The above theorem leads to a Corollary in more familiar terms:
\begin{corollary}[Quantum Adem Relations]
\label{corollary:QAR}
For $p,q > 0$ such that $q < 2p$, and $\alpha \in QH^*(M)$,
\begin{equation} \label{equation:QAR} \sum_{b,d} \left( Q\mathcal{S}^{q,b} \circ Q\mathcal{S}^{p,d}(\alpha) - \sum_{s=0}^{q/2} {{p-s-1}\choose{q-2s}} Q\mathcal{S}^{p+q-s,b} \circ Q\mathcal{S}^{s,d}(\alpha) \right) = T \cdot Q(\alpha) \end{equation} for the correction term
$$
\begin{array}{rcl}
T \cdot Q(\alpha) &=& q_{D_8}((g m_1 + g^2 m_1) \otimes \Psi (e^{|\alpha| + p - q} \sigma_2^{|\alpha|-p}))(\alpha)
\\[0.5em] &&- \sum_{s=0}^{[q/2]} {{p-s-1}\choose{q-2s}} q_{D_{8}}((g m_1 + g^2 m_1) \times \Psi (e^{|\alpha| +2s - p - q} \sigma_2^{|\alpha|-s}))(\alpha).
\end{array}
$$
\end{corollary}
In the above corollary, the dihedral group $D_8 = \langle (12), (13)(24) \rangle \subset S_4$ acts on the four incoming marked points $(z_1,z_2,z_3,z_4)$ by permutations. The operation $q_{D_8}$ is a linear homomorphism determined by homology classes in $\overline{M}_{0,5} \times_{D_8} ED_8$, so $q_{D_8}(A) : H^*(M) \rightarrow H^{4*-|A|}(M)$ for $A \in H_*(\overline{M}_{0,5} \times_{D_8} ED_8)$. It is analogous to the $q_{i,j}$ in Theorem \ref{thm:quancar}. Here $m_1 \in \overline{M}_{0,5}$ (see Figure \ref{fig:m05elmts}), $e^i \sigma_2^j \in H^*(BD_8)$ and $\Psi: H^*(\overline{M}_{0,5} \times_{D_8} ED_8) \rightarrow H_*(\overline{M}_{0,5} \times_{D_8} ED_8)$ is the universal coefficients isomorphism. Let $g=(123) \in S_4$, such that the cosets of $D_8$ in $S_4$ are $D_8, gD_8, g^2 D_8$.
In Section \ref{sec:blowups}, we calculate $Q \mathcal{S}$ in the case of the blowups $M= Bl_Y(\mathbb{CP}^3)$ and $M= Bl_Y(\mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1)$ where $Y$ is respectively the intersection of two quadrics and the intersection of two linear hypersurfaces. The setup here is similar to Blaier \cite{blaier}. Most of the squares can be computed using the methods from Section \ref{sec:computingqsstoric}. The new computation is of $Q \mathcal{S}_{1,1}$, which is given in the following theorem.
\begin{thm}
\label{thm:blowupID}
\begin{equation} \label{equation:blowupID1} \qquad Q\mathcal{S}_{1,1} = id : H^3(Bl_Y(\mathbb{CP}^3)) \rightarrow H^3(Bl_Y(\mathbb{CP}^3)) \end{equation}
and
\begin{equation} \label{equation:blowupID2} \qquad Q\mathcal{S}_{1,1} = id : H^3(Bl_Y(\mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1)) \rightarrow H^3(Bl_Y(\mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1)). \end{equation}
\end{thm}
Observe that $Q\mathcal{S}_{1,1}$ are quantum correction terms to the classical Steenrod square on the blowup $M$. They are determined by lifts of contributions to the classical Steenrod square on $Y$.
\section{Preliminaries}
\label{sec:preliminaries}
Henceforth we always work with coefficients in $\mathbb{Z}/2$, unless otherwise stated. For example $H^{*}(M)$ means $H^{*}(M;\mathbb{Z}/2)$.
\subsection{Equivariant Cohomology}
\label{subsec:equivcohom}
We follow \cite[Section 2]{seidel}.
\begin{defn}[Equivariant cohomology of a chain complex]
\label{defn:equcohom}
Let $(C^{\bullet},d)$ be a cochain complex over $\mathbb{Z}/2$. Suppose $(C^{\bullet},d)$ has a chain involution $\iota$, so $\iota: C^{\bullet} \rightarrow C^{\bullet}$ is a chain map with $\iota^2 = id_{C^{\bullet}}$. Let $h$ be a formal variable in grading $1$. The equivariant chain complex is $$(C^{\bullet}_{\mathbb{Z}/2} ,\delta) = (C^{\bullet}[h],d + h(id_{C} + \iota)).$$
Define $H^{*}_{\mathbb{Z}/2}(C) := H^*(C^{\bullet}_{\mathbb{Z}/2} ,\delta) $, the equivariant cohomology of $(C,d,\iota)$.
\end{defn}
\begin{defn}[Equivariant Cohomology of a manifold]
\label{defn:equivcohomman}
Let $N$ be a topological space with a continuous involution $\iota: N \rightarrow N$. Let $C = C^{*}(N)$ be the singular cochain complex of the topological space $N$. There is a $\mathbb{Z}/2$ action on $C^* (N)$ induced by $\iota$. As in Definition \ref{defn:equcohom}, the \textit{equivariant cohomology of $N$} is $H^* _{\mathbb{Z}/2}(N) := H^* _{\mathbb{Z}/2}(C^* (N))$.
\end{defn}
The important examples of this will be $M \times M$ with the involution swapping the factors and $M$ with the trivial involution. We will respectively denote the equivariant chains in this case by $C^{\bullet}_{\mathbb{Z}/2}(M \times M)$ (or in that case of Morse cohomology by $CM^{\bullet}_{\mathbb{Z}/2}(M \times M)$) and by $C^{\bullet}_{\mathbb{Z}/2}(M)$. Similarly, equivariant cohomology will be denoted respectively by $H^{\bullet}_{\mathbb{Z}/2}(M \times M)$ and by $H^{\bullet}_{\mathbb{Z}/2}(M)$
\begin{rmk}
There is another description of $H^*_{\mathbb{Z}/2}(N)$ for a manifold $N$ with a continuous involution. Recall that $E \mathbb{Z}/2$ is the classifying space of $ \mathbb{Z}/2$: a contractible space with a free $\mathbb{Z}/2$ action, for example $E \mathbb{Z}/2 = S^{\infty}$ with the involution being the antipodal map. Then $$H^{*}_{ \mathbb{Z}/2}(N) := H^{*}(N \times_{\mathbb{Z}/2} E \mathbb{Z}/2).$$
This definition is equivalent to Definition \ref{defn:equivcohomman}. If we let $N = \{ pt \}$ then we obtain $pt \times_{\mathbb{Z}/2} S^{\infty} = S^{\infty} / (\mathbb{Z}/2) = \mathbb{RP}^{\infty}$, hence $$H^{*}_{\mathbb{Z}/2}(pt) = H^{*}(\mathbb{RP}^{\infty}) = \mathbb{Z}/2[h].$$
\end{rmk}
\subsection{The Steenrod Squares}
\label{subsec:theSqs}
For a reference, see \cite[Section 4.L]{algtop}.
The Steenrod square operations $\{ Sq^{i} \}$ are the unique collection of additive homomorphisms such that:
\begin{enumerate}
\item $Sq^{i} : H^{n}(M) \rightarrow H^{n+i}(M)$ for each $n \ge 0$ and topological space $M$,
\item Each $Sq^{i}$ is natural in $M$,
\item $Sq^{0}$ is the identity,
\item $Sq^{n}$ acts as the cup square on $H^{n}$, so $Sq^{|x|}(x) = x \cup x$,
\item If $n > |x|$ or $n < 0$ then $Sq^{n}(x) = 0$,
\item (Cartan relation) For each $n$, $$Sq^{n}(x \cup y) = \sum_{i+j=n} Sq^{i}(x) \cup Sq^{j}(y).$$
\end{enumerate}
Here $|x|$ is the cohomological grading of $x \in H^*(M)$. Recall that we use $\mathbb{Z}/2$ coefficients to ensure additivity: $(x+y) \cup (x+y) = x \cup x + y \cup y$ modulo 2. These $Sq^{i}$ together define a single operator, the ``total Steenrod square" $$Sq:H^{*}(M) \rightarrow (H^{\bullet}(M)[h])^{2*},$$ where $Sq^{i}$ is the coefficient of $h^{n-i}$, so $Sq(x) = \sum_{i} Sq^{|x|-i}(x) \cdot h^i$. The cup product on $H^{n}(M)[h]$ is $(a \cdot h^i) \cup (b \cdot h^j) = (a \cup b) \cdot h^{i+j}$, so the Cartan relation becomes $Sq(x \cup y) = Sq(x) \cup Sq(y)$ and thus $Sq$ is a unital ring homomorphism. We will henceforth call $Sq$ the ``Steenrod square" when there is no ambiguity, noting that it contains the same information as $\{ Sq^i \}$.
One must note that although these axioms imply that there is a unique Steenrod square, there are many different approaches to constructing them.
\begin{exmpl}[The classical Steenrod square for $\mathbb{CP}^{n}$]
\label{exmpl:classcpn}
$$H^{*}(\mathbb{CP}^{n}) \cong \mathbb{Z}/2 [x] / (x^{n+1})$$ where $|x| = 2$. We see that $Sq^{0}(x) = x$ and $Sq^{2}(x) = x^{2}$ using axioms $3$ and $4$, and these are all of the nonzero terms by axiom $5$. Hence $Sq(x) = xh^{2} + x^{2}$. By the Cartan relation (axiom 6), $$Sq(x^{i}) = Sq(x)^{i} = (xh^{2} + x^{2})^{i} = x^{i} \sum_{j=0}^{i} {{i}\choose{j}} x^{j} h^{2(i-j)}.$$ Looking at the coefficient of $h^{2i-k}$, $Sq^k(x) = 0$ for $k$ odd and $Sq^{2j}(x^i) = {{i}\choose{j}} x^{i+j}$.
\end{exmpl}
\subsection{The Betz-Cohen Construction}
\label{subsec:prelimbcncon}
The details are relevant for Section \ref{subsec:tmssissq}.
Fix a Morse-Smale function $f$ on $M$, and pick a small convex neighbourhood $U_f$ of $f$ in $C^{\infty}(M)$ consisting of Morse-Smale functions. Let $\Gamma$ be the $Y$-shaped graph, oriented and parametrised as $(-\infty,0] \vee_0 [0,\infty) \vee_0 [0,\infty)$. We denote $S$ to be the set of triples $\sigma = (f_{1,s},f_{2,s},f_{3,s})$ such that $f_{1,s} \in U_f$ for each $s \in (-\infty,0]$ and $f_{2,s}, f_{3,s} \in U_f$ for $s \in [0,\infty)$, subject to:
\begin{enumerate}
\item $f_{1,0}, f_{2,0}, f_{3,0}$ are pairwise distinct.
\item $f_{i,s} = \beta(|s|) f_{i,0} + (1-\beta(|s|)) f$, where $\beta: [0,\infty) \rightarrow [0,1]$ is a fixed monotone bump function such that $\beta(s) =1$ for $s \le 1/2$ and $\beta(s) = 0$ for $s \ge 1$.
\end{enumerate}
We define $\mathcal{M}_{\sigma}$ to be the set of continuous maps $\gamma: \Gamma \rightarrow M$ that are smooth on the edges, such that for each edge $E_i$ of $\Gamma$ we denote $\gamma_i = \gamma|_{E_i}$, and require $$d \gamma_i / dt (s) + \nabla f_{i,s}(\gamma_i(s)) = 0.$$ This is actually slightly different to the construction in \cite{betzcoh}, in which the $f_1,f_2,f_3$ were pairwise distinct and had no $s$ dependence. The construction due to Betz-Cohen is equivalent to that given here, as we are simply using a deformation retraction of their moduli space of metric Morse flows.
Let $\mathcal{M}_{BC}= \sqcup_{\sigma \in S} \mathcal{M}_{\sigma}$, topologised so that $\mathcal{M}_{BC} \rightarrow S$ is continuous. Observe that there is a $\mathbb{Z}/2$-action $\iota_S$ on $S$, induced by the permutation $(23)$. This induces a $\mathbb{Z}/2$-action on $\mathcal{M}_{BC}$, via $(\sigma, \gamma) \mapsto (\iota_S \circ \sigma, \gamma \circ R_{\Gamma})$. Here, $R_{\Gamma}$ is the involution on $\Gamma$ that swaps the two positive half-lines and fixes the negative half-line.
For $a_1,a_2, a_3 \in \text{crit}(f)$, define $\mathcal{M}_{BC}(a_1,a_2,a_3)$ to consist of equivalence classes of pairs $[\sigma, \gamma] \in \mathcal{M}_{BC}/(\mathbb{Z}/2)$ such that $${\displaystyle \lim_{i \rightarrow -\infty}} \gamma_1(s) = a_1, \ {\displaystyle \lim_{i \rightarrow \infty}} \gamma_2(s) = a_2, \ {\displaystyle \lim_{i \rightarrow \infty}} \gamma_3(s) = a_3.$$
The space $S$ is contractible and has a free $\mathbb{Z}/2$-action, so $SB := S /( \mathbb{Z}/2)$ is homotopy equivalent to $\mathbb{RP}^{\infty}$. Thus, there are representatives $\delta_i$ of the nontrivial generator of $H_i(SB) \cong \mathbb{Z}/2$ for each $i$. Strictly, we consider some $\delta_i = \sum_{j} \tau_{i,j}$ where $\tau_{i,j}: \Delta_j \rightarrow S$ is a simplex. For each $i \ge 0$, let $$\mathcal{M}_{BC,i}(a_1,a_2,a_3) = {\displaystyle \bigcup_j} \tau_{i,j}^* \mathcal{M}_{BC}(a_1,a_2,a_3),$$ the union of the pullback of $\mathcal{M}_{BC}(a_1,a_2,a_3)$ along $\tau_{i,j}: \Delta_i \rightarrow S$, glued along faces.
Recal that in Morse theory, $CM^*(M \times M, f \oplus f)$ and $CM^*(M,f) \otimes CM^*(M,f)$ are identified via the K\"unneth isomorphism, where $$f \oplus f: M \times M \rightarrow \mathbb{R}, \quad (f \oplus f)(x,y) = f(x) + f(y).$$ One uses the correspondence between critical points of $f \oplus f$ and formal pairs of critical points of $f$, denoted $a \otimes b$ for $a,b \in \text{crit}(f)$. The isomorphism between Morse and singular cohomology respects the involution that swaps the factors, and hence we may replace the equivariant cohomology of $C^*(M) \otimes C^*(M)$, denoted $H^*_{\mathbb{Z}/2}(M \times M)$, with the equivariant cohomology of $CM^*(M,f) \otimes CM^*(M,f)$. One can think of this in terms of equivariant Morse cohomology, as detailed in \cite[Section 2]{seidelsmith}, where for the given $\mathbb{Z}/2$-action all of the necessary transversality conditions are satisfied.
Using the fact that the equivariant chains are $C^*_{\mathbb{Z}/2}(M \times M) = C^*(M \times M)[h]$, as well as using the previous paragraph, elements of $C^*_{\mathbb{Z}/2}(M \times M)$ may be written as a finite sum of $(b \otimes c)h^j$ for some $j \ge 0$ and $b,c \in \text{crit}(f)$. Let $a \in \text{crit}(f)$ and $\delta_i$ be the generator of $H_i(SB) \cong \mathbb{Z}/2$ (because $S$ is an $E \mathbb{Z}/2$).
We define $$q: H_i(SB) \otimes H^j_{\mathbb{Z}/2}(M \times M) \rightarrow H^{j-i}(M),$$ at the chain level, such that the coefficient of $a$ in $q(\delta_i \otimes (b \otimes c)h^k)$ is $\# \mathcal{M}_{BC,i-k}(a,b,c)$, when $\mathcal{M}_{BC,i-k}(a,b,c)$ is a collection of points. Let $q_i(b \otimes c) = q(\delta_i \otimes b \otimes c)$. One defines $$Sq^{|x|-i}(x) = q_i (x \otimes x).$$
\subsection{The Quantum Cup Product}
\label{subsec:quantcupprod}
For more details on the quantum cup product, see \cite[Chapter 8]{jhols}. Throughout this paper, for $M$ a closed $n$-manifold, we denote by $PD: H^*(M) \rightarrow H_{n-*}(M)$ and $PD: H_*(M) \rightarrow H^{n-*}(M)$ the Poincar\'e duality operation over $\mathbb{Z}/2$ coefficients.
Let $(M, \omega)$ be a monotone symplectic manifold of dimension $n$, with a fixed almost complex structure $J$ compatible with $\omega$.
\begin{defn}
A symplectic manifold $(M,\omega)$ is \textit{monotone} if the restriction to spherical homology classes of the cohomology class of $\omega$ is positively proportional to the first Chern class of $TM$. In other words, there exists a constant $\lambda > 0$ such that $$[\omega]|_{\pi_2(M)} = \lambda \cdot c_1(TM)|_{\pi_2(M)}.$$
\end{defn}
As an abelian group, $QH^{*}(M) = H^{*}(M)[[t]]$ where $t$ is a formal variable of degree $2$. Let $T = t^N$, where $N \geq 0$ is the minimal Chern number of $M$, determined by $c_{1}(\pi_{2}(M)) = N \mathbb{Z}$. By rescaling our symplectic form if necessary, we will assume that $\lambda = 1/N$, and so referring to a {\it $J$-holomorphic map $u$ of energy $k$} means that $c_1(u_*[S^2]) = N \cdot [\omega](u)= kN$.
As an important note, we define the quantum cochains $$QC^*(M) := C^*(M) \otimes_{\mathbb{Z}/2} \mathbb{Z}/2[[T]].$$ Then $QH^*(M) = H^*(QC^*(M), d \otimes id)$, where $d$ is the differential on $C^*(M)$. Most of the operations that we consider are defined at the chain level, and then descend to maps on (co)homology.
We pick a basis $\mathcal{B}$ for $H^*(M)$ and a dual basis with respect to the nondegenerate cup product pairing $(e,f) \mapsto \langle e \cup f, [M] \rangle$. There is a dual basis $\mathcal{B}^{\vee}$ with respect to this pairing. Let $\alpha^{\vee} \in H^{n-|\alpha|}(M)$ denote the dual of the cohomology class $\alpha \in H^{|\alpha|}(M)$. Our operations on cohomology will not depend on this choice of basis, although they may affect the chain level description.
Given $A \in H_{2}(M)$, let $\mathcal{M}_{A}(J)$ be the moduli space of $J$-holomorphic spheres $u: S^2 \rightarrow M$ such that $u_{*}([S^2]) = A$, up to reparametrisation by $PSL(2,\mathbb{C})$. For a generic choice of $J$, this moduli space is a smooth manifold with $$\dim \mathcal{M}_{A}(J) = 2c_{1}(A) + \dim(M).$$ For each $z \in S^{2}$, there is an evaluation map $ev_{A,z} : \mathcal{M}_{A}(J) \rightarrow M$ with $ev_{A,z}(u) = u(z)$. Pick three distinct points, $z_{1}, z_{2},z_{3} \in S^2$. We use $0,1,\infty$ throughout, and denote $ev_{A} = ev_{A,0} \times ev_{A,1} \times ev_{A,\infty} : \mathcal{M}_{A}(J) \rightarrow M \times M \times M$.
\begin{defn}[Quantum Product]
\label{defn:quantumproduct}
Let $\alpha, \beta \in H^{*}(M) \subset QH^{*}(M)$. Pick generic pseudocycle representatives $a, b$ of the classes $PD(\alpha)$ and $PD(\beta)$ (so that they are transverse to the evaluation maps in the previous paragraph). Similarly, for each $\gamma \in \mathcal{B}$, we pick a representative $c^{\vee}$ of $PD(\gamma^{\vee})$. Denote by $a \times b \times c^{\vee}$ the product of these cycles, landing in $M \times M \times M$.
Then we define $$\alpha * \beta = \sum_{j \in \mathbb{Z}, \ \gamma \in \mathcal{B} : |\gamma| = |\beta| + |\alpha| - 2jN} n(\gamma, \alpha, \beta, j) \cdot \gamma \cdot T^j,$$ $$n(\gamma, \alpha, \beta, j) = \sum_{A \in H_2 (M) : c_{1}(A) = jN} ev_A \bullet (a \times b \times c^{\vee}).$$ Here $\bullet$ is the intersection number of pseudocycles of complementary dimension. Extending $\mathbb{Z}/2[[t]]$-linearly defines $*$ on $QH^*(M)$.
\end{defn}
Observe that for generic $J$, the evaluation map is a pseudocycle, \cite{jhols}. In order to show that this is well defined, one must prove that the outcome is independent of choice of pseudocycle representatives that we choose, such as in \cite[Lemma 7.1.4]{jhols}. The degree condition ensures that the pseudocycles are of complementary dimension. Notice that $|a*b| = |a|+|b|$, using that $|T|=2N$. If $A=0$ (so $E(u)=0$ and $u$ is constant), this recovers the classical intersection product.
\begin{rmk}
In concrete terms, in Definition \ref{defn:quantumproduct} we count the number of $J$-holomorphic spheres in $M$ intersecting some choice of pseudocycle representatives of the $PD(\alpha)$, $PD(\beta)$ and $PD(\gamma^{\vee})$. This can be thought of as the intersection $$ev_{A,0}^{-1}(a) \cap ev_{A,1}^{-1}(b) \cap ev_{A,\infty}^{-1}(c^{\vee})$$ in the space of $J$-holomorphic stable maps representing $A$.
\end{rmk}
\section{Two constructions of the Steenrod Squares}
\label{sec:morssteensqu}
The first construction will use Morse theory, and will be based on that given in \cite{seidel}, \cite{fukaya} and \cite{betzcoh}. The second construction is a generalisation of the first, involving pseudocycles. In this section $\Gamma$ is the Y-shaped graph with incoming edge $e_1$ and outgoing edges $e_2$ and $e_3$. Let $e_1$ be parametrised by $(-\infty, 0]$ and $e_2,e_3$ by $[0,\infty)$. This is illustrated in Figure \ref{fig:classmorsqu}.
Throughout this section, $M$ will be a smooth closed manifold. We recall the Morse theoretic cup product: given a Morse function $f$, pick three generic perturbations $f^1_{s}$ for $s \in (- \infty,0]$ and $f^2_{s}$, $f^3_{s}$ for $s \in [0,\infty)$ (so that they are ``transverse at $0$"). Making a generic choice ensures that the moduli space in Definition \ref{defn:morsecupprod} is cut out transversely: specifically, the genericity condition ensures that the moduli space is a smooth manifold. This is discussed in \cite[Chapter 5.2, Chapter 2]{schwarz} by Schwarz. It should be pointed out that the construction of the Morse theoretic cup product in the cited work uses three distinct fixed Morse functions $f^1,f^2,f^3$, rather than using perturbations $f^1_s,f^2_s,f^3_s$. Combining the case of Schwarz with the standard notion of continuation maps from $f^i$ to our fixed Morse function $f$, and applying a gluing argument, means that we can instead consider $s$-dependent functions on the edges. After applying such a gluing of continuation maps, the requirement that $f^1,f^2,f^3$ be chosen generically translates to requiring that $f^1_0,f^2_0, f^3_0$ be chosen generically, which is what we meant by ``transverse at $0$" above. This idea is made precise in \cite[Section 2]{morsetrajectories}. With this in mind, we choose the $f^i_s$ such that there is an $R > 0$ with $f^i_{s} = f$ if $|s| \ge R$, so that we can apply Morse theoretic arguments outside of a compact neighbourhood of the vertex in $\Gamma$. Denote by $\text{crit}_k(f)$ the critical points of $f$ of Morse index $k$. Write $|x|$ for the Morse index of $x \in \text{crit}(f)$.
\begin{defn}[Morse cup product]
\label{defn:morsecupprod}
Let $a_{2},a_{3}$ be critical points of $f$, with respective Morse indices $|a_{2}|, |a_{3}|$ and let $k = |a_{2}|+|a_{3}|$, then $$a_{2} \cdot a_{3} := \sum_{a_{1} \in \text{crit}_k(f)} n_{a_1, a_2, a_3} a_1$$ where $n_{a_1, a_2, a_3}$ is the number of elements in the $0-$dimensional moduli space $\mathcal{M}(f^i_{s},a_{1},a_{2},a_{3})$ of continuous maps $u: \Gamma \rightarrow M$, smooth on the edges, such that:
\begin{enumerate}
\item $d (u|_{e_i})/ds = -\nabla f^i_{s}$,
\item $u|_{e_1}(x) \rightarrow a_1$ as $x \rightarrow -\infty$,
\item $u|_{e_i}(x) \rightarrow a_i$ as $x \rightarrow \infty$ for $i=2,3$.
\end{enumerate}
\end{defn}
\subsection{Morse Steenrod square}
\label{subsec:msss}
Henceforth, we will consider a nested sequence of spheres $S^{0} \subset S^{1} \subset ... \subset S^{\infty}$, consisting of equators that exhaust $S^{\infty}$ and are preserved under the involution $v \mapsto -v$. Denote $$S^{\infty} = \{ (x_0,x_1,x_2, \ldots) \subset \bigoplus_{i \ge 0} \mathbb{R}^{i} : \sum_i x_i^2 = 1 \},$$ the subset $S^i \subset S^{\infty}$ consists of those elements of $S^{\infty}$ of the form $(x_0, \ldots, x_i, 0 ,\ldots)$.
We refine the choice of $f^i_{s}$ by picking a collection of smooth functions $f^i_{v,s}: M \rightarrow \mathbb{R}$, smoothly parametrised by $v \in S^{\infty}$ and $s \in (-\infty,0]$ for $i=1$, respectively $s \in [0,\infty)$ for $i=2,3$, satisfying the following conditions:
\begin{enumerate}
\item $f^2_{v,s} = f^3_{-v,s}$,
\item For each $i$, the smooth map $f^2_{\cdot,0}: S^{i} \times M \rightarrow \mathbb{R}$ must be chosen generically, with more details provided in Appendix \ref{subsec:mssrmks}.
\item There is an $R>0$ such that $f^i_{v,s} = f$ for all $|s| \ge R$ and $v \in S^{\infty}$.
\item $f^1_{v,s} = f^1_{-v,s}$ .
\end{enumerate}
Given $a_1, a_2, a_3 \in \text{crit}(f)$, and $v \in S^{\infty}$, we define $\mathcal{M}'_{v}(a_1, a_{2}, a_{3})$ to be the set of pairs $(u: \Gamma \rightarrow M, v)$ such that:
\begin{enumerate}
\item $d(u|_{e_i})/ds = -\nabla f^i_{v,s}$.
\item $u|_{e_1} (s) \rightarrow a_1$ as $s \rightarrow -\infty$ and $u|_{e_i} (s) \rightarrow a_{i}$ for $i=2,3$ as $s \rightarrow \infty$.
\end{enumerate}
Let $$\mathcal{M}'_{i}(a_1, a_{2},a_{3}) = \bigsqcup_{v \in S^{i}} \mathcal{M}'_{v}(a_1,a_{2}, a_3),$$ topologised as a subset of $C(\Gamma, M) \times S^i$ (where $C(\Gamma, M)$ is the space of continuous maps from $\Gamma$ to $M$ that are smooth on the edges). The projection to $S^i$ is then continuous for all $i$. Indeed, $\mathcal{M}'_{i}(a_1, a_{2},a_{3})$ is a smooth manifold for each $i$, by the genericity conditions as given in Appendix \ref{subsec:mssrmks}.
Let $r : \Gamma \rightarrow \Gamma$ be the reflection that swaps $e_2$ and $e_3$ (preserving parametrisations) and fixes $e_1$. If $a_2 = a_3$ as in Figure \ref{fig:classmorsqu}, there is a free $\mathbb{Z}/2$ action on the moduli space $\mathcal{M}'_{i}(a_1,a_2, a_2)$, via $$(u,v) \mapsto (u \circ r, -v).$$
Let $\mathcal{M}_{i}(a_1,a_2, a_2) = \mathcal{M}'_{i}(a_1, a_2,a_2)/(\mathbb{Z}/2)$, the quotient by the $\mathbb{Z}/2$ action. If $a_2 \neq a_3$, $$\mathcal{M}_{i}(a_1,a_2, a_3) = \bigsqcup_{v \in D^{i,+}} \mathcal{M}'_{v}(a_1,a_{2}, a_3) = \bigsqcup_{v \in D^{i,-}} \mathcal{M}'_{v}(a_1,a_{3}, a_2),$$ where $D^{i,\pm}$ is the upper/lower $i-$dimensional hemisphere in $S^i \subset S^{\infty}$. Observe that when $v \in \partial D^{i,\pm}$, there is no overcounting of solutions (when $a_2 \neq a_3$). This is because a solution for $v \in \partial D^{i,+}$, with asymptotics $a_1, a_2, a_3$, does not correspond to a solution for $-v \in \partial D^{i,+}$ with asymptotics $a_1, a_2, a_3$: the action $u \mapsto u \circ r$ swaps the $a_2$ and $a_3$ asymptotics. Indeed, when $a_2 \neq a_3$ the number of solutions for $v \in \partial D^{i,\pm}$ exactly corresponds to the $Sq'((a_2 \otimes a_3 + a_3 \otimes a_2)h)$ term in Equation \eqref{equation:Sq'chainmap}.
Consider the natural projection $\mathcal{M}_{i}(a_1, a_2,a_3) \rightarrow \mathbb{RP}^{i}$. Over a generic $v \in \mathbb{RP}^{i}$ there is a smooth manifold of degree $|a_{1}| - |a_{2}| - |a_3|$, so the dimension of the moduli space is $$\text{dim}\mathcal{M}_{i}(a_{1},a_{2}, a_3) = |a_{1}| - |a_{2}| - |a_3|+ i.$$ This is an example of genericity in family Morse theory, as in \cite[Theorem 3.4]{hutchingsfamilies}, and as used in \cite[Equations (4.26), (4.95)]{seidel}.
\begin{figure}
\caption{Morse flowline configurations for the Steenrod square.}
\label{fig:classmorsqu}
\end{figure}
Before giving the definition, we recall the notation of Section \ref{subsec:equivcohom}. Specifically, given the chain complex $CM^{\bullet}(M,f)$ with the trivial action of $\mathbb{Z}/2$, one defines the $\mathbb{Z}/2$-equivariant Morse cohomology using the equivariant chain complex $CM_{\mathbb{Z}/2}^{\bullet}(M,f)$. Similarly, given the chain complex $CM^{\bullet}(M,f) \otimes CM^{\bullet}(M,f)$ (which we identify with $CM^{\bullet}(M \times M ,f \oplus f)$ via the K\"unneth isomorphism), there is the action of $\mathbb{Z}/2$ that swaps the two factors, and we denote the $\mathbb{Z}/2$-equivariant chain complex in this case $CM_{\mathbb{Z}/2}^{\bullet}(M \times M)$.
\begin{defn}[The Morse Steenrod Square]
\label{defn:mss}
Let $a_2, a_3 \in \text{crit}(f)$. This determines $a_2 \otimes a_3 \in CM_{\mathbb{Z}/2}^{\bullet}(M \times M)$. Define $$Sq': CM^{\bullet}_{\mathbb{Z}/2}(M \times M) \rightarrow CM_{\mathbb{Z}/2}^{\bullet}(M),$$ by $$Sq'(a_2 \otimes a_3) = \sum_{i =0}^{|a_2| + |a_3|} \sum_{a_1 \in \text{crit}_{|a_2| + |a_3|-i}(f)} n_{a_1, a_2, a_3,i} \cdot a_1 \cdot h^i$$ where $n_{a_1, a_2,a_3,i} = \# \mathcal{M}_{i}(a_1,a_2,a_3)$ for $\#$ the number of points modulo 2. Then extend as a $h$-module.
We then need to prove that $Sq'$ descends to a map on equivariant cohomology. To do this, we use a standard argument involving a $1$-dimensional moduli space (see for example \cite[Section 2.4, Section 5.3]{schwarz}, applied as in \cite[Proposition 1.9, Lemma 1.10]{fukaya}). We then consider its compactification, as covered in detail in Appendix \ref{sec:equivariantcompact}. This in turn shows that $Sq'$ is a chain map, i.e.: \begin{equation} \label{equation:Sq'chainmap} Sq'( (a_2 \otimes a_3 + a_3 \otimes a_2) h + (d a_2) \otimes a_3 + a_2 \otimes (d a_3)) = d Sq'(a_2 \otimes a_3). \end{equation}
Further, post-composing with the doubling operation $$\text{double}: CM^{*}(M) \rightarrow CM^{2*}_{\mathbb{Z}/2}(M \times M), \ a \mapsto a \otimes a,$$ which also descends to a map on equivariant cohomology, we define $$Sq := [Sq'] \circ [\text{double}].$$ Here $[-]$ denotes the cohomology level operation of the respective map of chains. This definition is independent of the choice of parametrised Morse functions by a standard continuation argument, such as in \cite[Section 3.4]{salamonfloer}.
The coefficient of $h^{|a|-i}$ is denoted by $Sq^{i}(a) \in H^{|a|+i}(M)$.
\end{defn}
\begin{propn}
\label{propn:propositionftw}
The homomorphism $Sq$ is additive, and satisfies axioms $1,2,4$ and $5$ from Section \ref{subsec:theSqs}.
\end{propn}
\begin{proof}
To prove additivity, observe first $Sq(x+y) = Sq(x) + Sq(y) + Sq'(x \otimes y + y \otimes x)$. Hence we must show that $[Sq'(x \otimes y + y \otimes x)] =0 $ when $dx = dy = 0$. In such a case, we see $d(x \otimes y) = (x \otimes y + y \otimes x) h$. Using that $Sq'$ is a chain map, it follows that $Sq'((x \otimes y + y \otimes x)h) =d Sq'(x \otimes y)$. As multiplication by $h$ is injective on $H^*_{\mathbb{Z}/2}(M) = H^*(M) \otimes H^*(B \mathbb{Z}/2)$, this shows that $[Sq'(x \otimes y + y \otimes x)]$ is exact, as required.
\textit{Axiom 1} is immediate from the definition of $Sq^{i}$
\textit{Axiom 2} and naturality is true for the same reason as for the Morse cup product: see for example \cite[Section 2.1]{rot}.
For \textit{Axiom 4}, for $|y| = 2 |x|$ the coefficient of $y$ in $Sq^{|x|}(x)$ is the number of elements of the $0-$dimensional moduli space $\mathcal{M}_{0}(y,x,x)$. From the definition of $\mathcal{M}_{0}(y,x,x)$, and Definition \ref{defn:morsecupprod}, this number is the same as the coefficient of $y$ in $x^2$.
For \textit{Axiom 5}(1), $Sq^{i}(x) = 0$ for $i > |x|$ by definition, as only non-negative powers of $h$ are counted in Definition \ref{defn:mss}.
For \textit{Axiom 5}(2), $f_{v,s}$ is a perturbation of $f$. The perturbation may be chosen arbitrarily small in the $C^{2}$ topology. For generic $f$ there is no $-\nabla f$ flowline from $b$ to $a$ if $|b| < |a|$. As $f_{v,s}$ is close to $f$, this means that generically for any $v$ there is no `flowline' from $b$ to $a$ that has gradient $- \nabla f$ for $s < 0$ and $- \nabla f_{v,s}$ for $s > 0$. Hence $Sq^{i}(x) = 0$ for $i < 0$.
We verify Axiom $3$ in Section \ref{subsec:propofSq} and Axiom $6$ in Section \ref{subsec:Cartan}.
\end{proof}
\begin{rmk}
\label{rmk:msqaxioms}
Note that showing $Sq$ satisfies these axioms is not sufficient to show that it is indeed the Steenrod square, because we have not shown naturality under all continuous maps: this definition is only applicable for closed smooth manifolds. Nonetheless it provides a sanity check.
\end{rmk}
\begin{rmk}
\label{rmk:mssrmks} It is not straightforward to prove $Sq^{0} = id$ without a specific choice of Morse functions. We prove it in Section \ref{subsec:intss} using a different approach.
\end{rmk}
\subsection{The Morse Steenrod square is the Steenrod square}
\label{subsec:tmssissq}
Recall from Section \ref{subsec:prelimbcncon} the Steenrod square due to Betz and Cohen. Recall in Section \ref{subsec:msss} Definition \ref{defn:mss} of the Morse Steenrod square. We will show that these are the same.
In the previous section we chose $f^i_{v,s}$ for $(v,s) \in S^{\infty} \times [0,\infty)$ and $i=1,2,3$, such that $f^2_{v,s} = f^3_{-v,s}$. We abbreviate $f_{v,s} = f^2_{v,s}$ where appropriate, and observe we may choose $f_{v,0}$ distinct from $\{ f_{-v,0}, f \}$ for each $v$ (as $\text{Conf}_3(C^{\infty}(M))$ is open and dense in $(C^{\infty}(M))^3$, hence the condition is generic). Recall, from Section \ref{subsec:prelimbcncon}, that $S$ was a space consisting of triples $(f^1_{s},f^2_{s},f^3_{s})$, with each $f^p_{s} \in U_f$, a small neighbourhood of the Morse function $f$. Observe $S \xrightarrow{\simeq} \text{Conf}_3(C^{\infty}(M))$ is a $\mathbb{Z}/2$-equivariant homotopy equivalence, using the map $(f^1_{s},f^2_{s}, f^3_{s}) \mapsto (f^1_{0},f^2_{0}, f^3_{0})$, with the obvious homotopy inverse. Henceforth, assume $S = \text{Conf}_3(C^{\infty}(M))$. Let $SB = S / \langle (23) \rangle$, where the transposition $(23)$ acts on $S$ by permutation of the components. As remarked previously, $SB $ is homotopy equivalent to $\mathbb{RP}^{\infty}$.
There is a natural $\mathbb{Z}/2$-equivariant map $i: S^{\infty} \xhookrightarrow{} S$ induced by $v \mapsto (f,f_{v,0},f_{-v,0})$, which descends to $i: \mathbb{RP}^{\infty} \rightarrow SB$.
\begin{lemma}
$i_*: H_*(\mathbb{RP}^{\infty}) \rightarrow H_*(SB)$ is an isomorphism.
\end{lemma}
\begin{proof}
If $i$ is a weak homotopy equivalence then it is a quasi-isomorphism, see \cite[Proposition 4.21]{algtop}. As the two spaces are both homotopy equivalent to $\mathbb{RP}^{\infty}$ (which is a $K(\mathbb{Z}/2,1)$), it is sufficient to show that $$i_*: \pi_1(\mathbb{RP}^{\infty}) \cong \mathbb{Z}/2 \rightarrow \pi_1(SB) \cong \mathbb{Z}/2$$ is nontrivial.
Identify $S^1 \subset S^{\infty}$ with $\mathbb{R}/(2 \pi \mathbb{Z})$, parametrised by $\theta \in [0,2 \pi)$. Denote $f_v = f_{e^{iv},0}$. We wish to show that $\theta \mapsto [(f, f_{\theta/2}, f_{\theta / 2 + \pi})]$ determines a nontrivial loop, where $[\cdot]$ denotes the $\mathbb{Z}/2$-equivalence class. Observe that $\theta \mapsto (f, f_{\theta/2}, f_{\theta / 2 + \pi})$ is a path in $\text{Conf}_3(C^{\infty}(M))$ with different endpoints, hence the loop is not contractible.
\end{proof}
Consider Diagram \eqref{cohnorsquareandmorse}:
\begin{equation}\label{cohnorsquareandmorse}
\xymatrix{
H_*(\mathbb{RP}^\infty) \otimes H^*(M)
\ar@{->}_-{i_* \otimes id}^-{\cong}[d]
\ar@{->}^-{MSq}[rr]
&
&
H^*(M)
\ar@{->}^{=}[d]
\\
H_*(SB) \otimes H^*(M)
\ar@{->}^-{s}[r]
\ar@/_2.0pc/@{->}_{Sq}[rr]
&
H_*(SB) \otimes H^*_{\mathbb{Z}/2}(M \times M)
\ar@{->}^-{q}[r]
&
H^*(M)
}
\end{equation}
Here $s(A \otimes x) = A \otimes x \otimes x$, and the map $q$ is as in Section \ref{subsec:prelimbcncon}. We have reinterpreted the Morse Steenrod square from the previous section, here denoted $MSq$, to be a map $MSq: H_*(\mathbb{RP}^\infty) \otimes H^*(M) \rightarrow H^*(M)$, which we can do canonically as there is a unique graded basis of the homology of $\mathbb{RP}^{\infty}$. Observe that if we use the pushforward of the generator of $H_i(\mathbb{RP}^{\infty})$ by $i_*$ as the generator of $H_i(SB)$, then it is immediate that Diagram \eqref{cohnorsquareandmorse} commutes. Hence, Definition \ref{defn:mss} yields the Steenrod square.
\subsection{The Cartan Relation}
\label{subsec:Cartan}
Let $T$ be a family of graphs as in Figure \ref{fig:modspatree}, parametrised by $t \in (0, \infty)$. Edge $e_1$ is a negative half-line and edges $e_3,e_4,e_6,e_7$ are positive half-lines. Edges $e_2,e_5$ are parametrised by $[0,t]$. Compactify $T$ by adding the graphs at $0$ and $\infty$ as in the figure, to obtain the compactification $T^{c} \cong [0,1]$. Use edge labels as given in Figure \ref{fig:modspatree}. Fix a Morse function $f$ on $M$. The edge parameter in each case will be denoted by $s$.
\begin{figure}
\caption{Elements of $T^{c}
\label{fig:modspatree}
\end{figure}
Pick 5 perturbations of $f$ corresponding to the 5 tree edges in $t=0 \in T^{c}$ in figure \ref{fig:modspatree}. These are $f^{p}_{v,s,0}$ for $p$ the edge label, $s \in \mathbb{R}^{\pm}$ and $v \in S^{\infty}$. We choose $f^{1}_{v,s,0}=f$ for all $s, v$. We ensure that $f^{3}_{v,s,0} = f^{4}_{-v,s,0}$ and $f^{6}_{v,s,0} = f^{7}_{-v,s,0}$ for all $v,s$. The choice of $f^p_{v,s,0}$ is made along with an $S_0 \in \mathbb{R}$ such that $f^p_{v,s,0} = f$ for $|s| \ge S_0$ and for all edge labels $p$.
Choose 7 perturbations of $f$ labelled $f^{p}_{v,s,t}$ for $p=1,...,7$ corresponding to the edge labels in Figure \ref{fig:modspatree}, where $t \in T^{c}$, $v \in S^{\infty}$ and $s \in \mathbb{R}^{+}$ for $p=3,4,6,7$, $s \in \mathbb{R}^{-}$ for $p=1$ and $s \in [0,t]$ for $p=2,5$. Choose $f^{1}$ to be independent of $s,v,t$ in this case. Choose Morse functions $f^{2}_{s,2},f^{5}_{s,2}$ for $s \in [0,2]$ such that $f^{p}_{s} = f$ for $s > 1$ and $p=2,5$. The $f^{p}$ must be chosen ``generically at each vertex of $\Gamma$", which is discussed in Appendix \ref{subsec:appendcartrel}. This ensures the transversality of the moduli spaces. The $f^{p}_{v,s,t}$ satisfy the following conditions:
\begin{enumerate}
\item $f^{p}_{v,s,t} = f^{p}_{v,s,0}$ as picked previously for $p = 1,3,4,6,7$ and for all $t$.
\item $f^2_{v,s,t},f^5_{v,s,t}$ are independent of $v$.
\item For $t \ge 2$ and $p= 2,5$: \ $\begin{cases} \begin{array}{l} f^p_{s,t} = f^p_{s,2} \text{ for } s \le 2, \\ f^p_{s,t} = f \text{ for } s \ge 2. \end{array} \end{cases}$ In particular, $f^p_{2,2} = f$.
\end{enumerate}
Fix $i \in \mathbb{N}$ and $x,y \in \text{crit}(f)$. Let $\overline{T} \xrightarrow{\cong} T^c$ consist of pairs $(|t|,t)$ where $t \in T^c \cong [0,\infty]$ and $|t|$ is the metric tree represented by $t$ as a topological space. The metric structure for $t \in [0,\infty)$ is that the outer edges are semi-infinite and parametrised by respectively $(-\infty,0]$ for the incoming edge and $[0,\infty)$ for the outgoing edges. The inner edges are of length $t$, parametrised by $[0,t]$. For the $t=\infty$ boundary, the metric structure on $|\infty|$ is that the edges attached to bivalent vertices are semi-infinite with the infinite end at the bivalent vertex.
For $z \in \text{crit}_{2|x|+2|y|-i}(f)$ consider the space $\tilde{\mathcal{M}}_{1}(x,y,z)$ of triples $(t,u,v)$ with $t \in T^c$, $u: |t| \rightarrow M$ a map and $v \in S^{|x| + |y| - i}$, such that $u$ satisfies:
$$\partial_{s}u_{s,t} = -\nabla f^{p}_{v,s,t}$$ along edge $p$, with asymptotic conditions $(z,x,x,y,y)$ on the exterior edges $(1,3,4,6,7)$. One needs to use an equivariant gluing theorem at the $t=\infty$ boundary, as discussed in Appendix \ref{sec:equivariantgluing}.
For generic $t \in T^{c}$ there is a $0$-dimensional subset of pairs $(u: |t| \rightarrow M ,v \in S^i)$ satisfying the conditions. So $\tilde{\mathcal{M}}_{1}(x,y,z)$ is 1-dimensional. Observe that $\tilde{\mathcal{M}}_{1}(x,y,z)$ has a free $\mathbb{Z}/2$ action, $(t,u,v) \mapsto (t,u \circ \overline{r},-v)$ for $\overline{r}$ acting on $|t|$ by the permutation of edges $(34)(67)$. Let $\mathcal{M}_{1}(x,y,z) = \tilde{\mathcal{M}}_{1}(x,y,z) / (\mathbb{Z}/2)$, which is still 1-dimensional.
We also define a moduli space $\tilde{\mathcal{M}}_{2}(x,y,z)$ by choosing another 7 Morse functions, labelled $f^{p}_{v,s,t}$ as above, but now with the conditions:
\begin{enumerate}
\item $f^{p}_{v,s,t}= f^{q}_{-v,s,t}$ for $(p,q)=(3,4),(6,7),(2,5)$,
\item $f^{p}_{v,s,t}$ is independent of $(v,t)$ for large enough $t$ and for $p = 1,3,4,6,7$,
\item $f^p_{v,s,t} = f$ for $p=1,3,4,6,7$ and $|s| \ge 1$.
\item For large enough $t$ and $s \in [1,t]$, $f^{2}_{v,s,t} = f^5_{v,s,t}= f$.
\end{enumerate}
\begin{figure}
\caption{Tree labelling for $\mathcal{M}
\label{fig:modspatree2}
\end{figure}
In defining equations for pairs $(t,u,v) \in \tilde{\mathcal{M}}_{2}(x,y,z)$, use the edge labellings in Figure \ref{fig:modspatree2}, i.e. the edge labels $4$ and $6$ from Figure \ref{fig:modspatree} have been swapped. For each edge label the equations and asymptotic conditions are the same as in the $\tilde{\mathcal{M}}_{1}$ case. Further, there is a free $\mathbb{Z}/2$ action on $\tilde{\mathcal{M}}_{2}(x,y,z)$ similarly to $\tilde{\mathcal{M}}_{1}$ but with edge permutation $(25)(34)(67)$ (using the new edge labels in Figure \ref{fig:modspatree2}). Taking the quotient defines $\mathcal{M}_{2}(x,y,z) = \tilde{\mathcal{M}}_{2}(x,y,z)/ (\mathbb{Z}/2)$.
The following theorem is classical, and the following proof is a modification of \cite[Section 2, Example 2]{betzcoh} for our definition of the Steenrod square. The modification uses a cobordism argument as in \cite[Section 3.4]{salamonfloer}.
\begin{thm}[The Cartan Relation]
\label{thm:classicalcartan}
$$Sq^{i}(x \cup y) = \sum_{j+k=i} Sq^{j}(x) \cup Sq^{k}(y).$$
\end{thm}
\begin{proof}
The moduli space $\mathcal{M}_{1}(x,y,z)$ is a $1$-dimensional cobordism, corresponding to $[0,\infty]$, so $\# \partial \mathcal{M}_{1}(x,y,z) = 0$. Simliarly $\# \partial \mathcal{M}_{2}(x,y,z) = 0$. The $t = \infty$ boundary of $\mathcal{M}_{1}(x,y,z)$ is the count of the contribution of $z$ in $$\sum_{j+k=i} Sq^{j}(x) \cup Sq^{k}(y)$$ (see Figure \ref{fig:cartanSqSq} and Lemma \ref{lemma:lemmaSqSq}). The number of points in the boundary at $t=0$ for $\mathcal{M}_{2}(x,y,z)$ is the same as for $\mathcal{M}_{1}(x,y,z)$, as follows: suppose that $(0,u, v)$ is a point in the $t=0$ boundary of $\mathcal{M}_{2}(x,y,z)$. The domain of $u$ consists of a parametrised graph $\Gamma'$ with an incoming edge labelled $1$, and four outgoing edges labelled $3,4,6,7$. Consider the automorphism $r': \Gamma' \rightarrow \Gamma'$ that acts by the permutation $(46)$ on the edges (without changing the parametrisation). Then $(0, u \circ r', v)$ is a point in the $t=0$ boundary of $\mathcal{M}_{1}(x,y,z)$, and as $r'$ is an involution we see that this is a bijective correspondence. Notice that as we are working with $\mathbb{Z}/2$-coefficients, we do not need to worry about changing the orientation of the moduli space.
The number of points in the $t=\infty$ boundary component of $\mathcal{M}_{2}(x,y,z)$ is the count of the contribution of $z$ in $Sq^{i}(x \cup y)$, by Lemma \ref{lemma:lemmaSqcup}. Hence, the bijection between the $t=0$ boundaries of the moduli spaces, along with the $1$-cobordisms assigned to $\mathcal{M}_{1}(x,y,z)$ and $\mathcal{M}_{2}(x,y,z)$, yield that $$\sum_{j+k=i} Sq^j(x) \cup Sq^k(y) = Sq^i(x \cup y),$$ as required.
\end{proof}
\begin{figure}
\caption{Flowline configurations for $Sq(x) \cup Sq(y)$. }
\label{fig:cartanSqSq}
\end{figure}
\begin{lemma}
\label{lemma:lemmaSqSq}
Summing over all choices of $w_1,w_2 \in \text{crit}(f)$, counting equivalence classes $[(u,v)] \in \mathcal{M}_1(x,y,z)$ satisfying the asymptotic conditions as shown in Figure \ref{fig:cartanSqSq}, yields the coefficient of $z$ in $\sum_{j+k=i} Sq^{j}(x) \cup Sq^{k}(y)$.
\end{lemma}
\begin{proof}
We have that $|w_1| + |w_2| = |z| = |x|+|y| + i$. Hence if $|w_1| = |x|+j$ and $|w_2| = |y| + k$ then $j+k = i$. Throughout fix $w_1$,$w_2$ for the configuration, as outputs of $Sq^{j}(x), Sq^{k}(y)$ respectively.
Restrict attention to the upper right-hand Y-shaped graph of Figure \ref{fig:cartanSqSq}. Suppose that we restrict the $v$ parameter space to $\mathbb{RP}^{|x|-j} \subset \mathbb{RP}^{|x|+|y|-i}$: in this case, counting $[(u,v)]$ satisfying the configuration conditions would be exactly the count of the coefficient of $w_1 \cdot h^{|x|-j}$ in $Sq^{j}(x)$, which we denote $n_{w_1}$. In our case, $v$ varies in the entirety of $\mathbb{RP}^{|x|+|y|-i}$, we call the set of such pairs $$\mathcal{U}_x = \left\{ [v,u] \biggr\vert \begin{array}{l} v \in S^{|x|+|y|-i} \text{ and } u: \Gamma \rightarrow M \text{ satisfies conditions as} \\ \text{illustrated in the upper right-hand graph of Figure } \ref{fig:cartanSqSq} \end{array}\right\}.$$ Here $[v,u]$ refers to taking the quotient by the $\mathbb{Z}/2$-action $(v,u) \rightarrow (-v,u \circ r)$ (where $r$ is the involution on the $Y$-shaped graph as seen previously). Similarly for the lower right-hand branch, for each $\mathbb{RP}^{|y|-k} \subset \mathbb{RP}^{|x|+|y|-i}$ there is a count of $n_{w_2}$, the coefficient of $w_2 \cdot h^{|y|-k}$ in $Sq^{k}(y)$. Define similarly $$\mathcal{U}_y = \left\{ [v,u] \biggr\vert \begin{array}{l} v \in \mathbb{RP}^{|x|+|y|-i} \text{ and } u: \Gamma \rightarrow M \text{ satisfies conditions as} \\ \text{illustrated in the lower right-hand graph of Figure } \ref{fig:cartanSqSq} \end{array}\right\}.$$
Let $n_{z, w_1, w_2}$ be the coefficient of $z$ in $w_1 \cup w_2$ (the chain level Morse cup product obtained by using the perturbed Morse functions $f^1_s, f^2_s, f^5_s$). This is obtained by counting elements of the zero dimensional set corresponding to configurations as in the left hand $Y$-shaped graph of Figure \ref{fig:cartanSqSq}. We will show that the contribution of configurations as in Figure \ref{fig:cartanSqSq} to the coefficient of $z$ is $n_{z, w_1,w_2} \cdot n_{w_1} \cdot n_{w_2}$.
Following \cite[Lemmas 4.2-4.5]{schwarzmorsesingiso}, suppose in fact that $x$ is a Morse cycle (specifically some sum of critical points, $\sum_i a_i \cdot x_i$ where $x_i \in \text{crit}(f)$). Then we may modify $\mathcal{U}_{x}$ to $\overline{\mathcal{U}_{x}},$ obtained by first taking the disjoint union of $a_i$ copies of $\mathcal{U}_{x_i}$ for each $i$ (defined as for $x$ above) and then adding in codimension $1$ strata, and identifying them in pairs (this can be done exactly because $dx = 0$). Here, the codimension $1$ strata correspond to the case when the $Y$-shaped graph undergoes a ``breaking" at one end. The outcome is then the union of a $Y$-shaped graph and an unparametrised flowline, such that:
\begin{itemize}
\item one of the $Y$-shaped graph's positively/negatively asymptotic critical points coincides with the flowline's negatively/positively asymptotic critical points. The other three asymptotic critical points are $w_1, x_i, x_i$ for some $i$.
\item the index difference between the asymptotic critical points of the unparametrised flowline is $1$.
\end{itemize}
Then observe that $\overline{\mathcal{U}_{x}}$ is a smooth manifold, \cite[Lemma 4.4]{schwarzmorsesingiso}. Let $$\pi_x : \overline{\mathcal{U}_{x}} \rightarrow \mathbb{RP}^{|x|+|y|-i}$$ be the projection onto the first coordinate. Then $\pi_x$ is a pseudocycle: specifically, consider $[v_n, u_n]$ such that $v_n$ converges in $\mathbb{RP}^{|x|+|y|-i}$ but $[v_n, u_n]$ has no convergent subsequence in $\overline{\mathcal{U}_{x}}$. By parametrised compactness of Morse flowlines we know that $[v_n, u_n]$ must have a convergent subsequence in the full compactification of $\mathcal{U}_{x}$: but that convergent subsequence must be in the codimension $2$ strata, as $\overline{\mathcal{U}_{x}}$ contains its codimension $1$ strata.
Observe also that by the second paragraph of the proof (i.e. knowing the intersection of $\pi_x$ with $\mathbb{RP}^{|x|-j}$, and in fact with any perturbation of $\mathbb{RP}^{|x|-j}$, is $n_{w_1}$) we deduce that $$\pi_x \bullet [\mathbb{RP}^{|x|-j}] = n_{w_1},$$ where $\bullet$ is the intersection number, hence $\pi_x$ is a weak representative of $n_{w_1} \cdot [\mathbb{RP}^{|y|-k}]$ (by which we mean that the intersection number of any cycle with $\pi_x$ is the same as with $n_{w_1} \cdot [\mathbb{RP}^{|y|-k}]$). Similarly the first projection $\pi_y :\overline{\mathcal{U}_y} \rightarrow \mathbb{RP}^{|x|+|y|-i}$ is a weak representative of $n_{w_2} \cdot [\mathbb{RP}^{|x|-j}]$.
The count of all solutions $[(u,v)]$ satisfying the configuration in Figure \ref{fig:cartanSqSq} is now $$n_{z, w_1,w_2} \cdot ( \pi_x \bullet \pi_{y}) = n_{z,w_1,w_2} \cdot n_{w_1} \cdot n_{w_2}.$$
Now recall from the definitions of $n_{w_1}, n_{w_2}$ that $$Sq^j(x) = \sum_{w_1 \in \text{crit}_{|x|+j}(f)} n_{w_1} w_1 h^{|x|-j}$$ and $$Sq^k(y) = \sum_{w_2 \in \text{crit}_{|y|+k}(f)} n_{w_2} w_2 h^{|y|-k}.$$ Then $$Sq^j(x) \cup Sq^k(y) = \sum_{w_1,w_2} n_{w_1} \cdot n_{w_2} \cdot w_1 \cup w_2,$$ and recalling that $n_{z,w_1,w_2}$ is the coefficient of $z$ in $w_1 \cup w_2$, the lemma is proved.
\end{proof}
\begin{lemma}
\label{lemma:lemmaSqcup}
The count for the $t=\infty$ boundary component of $\mathcal{M}_{2}(x,y,z)$ is the count of the contribution of $z$ in $Sq^{i}(x \cup y)$.
\end{lemma}
\begin{proof}
The edge and asymptotic conditions are as shown in Figure \ref{fig:cartanSqcup}. The edges attached to bivalent vertices are semi-infinite with the infinite end at the bivalent vertex, which is a critical point of $f$. For this operation, the $t=\infty$ boundary, we choose the perturbed Morse functions so that the two right-hand Y-shaped graphs use the same perturbations $f^3,f^6$ of $f$. Specifically, we may assume that $f^3$ and $f^6$ are independent of $v$. The number of such setups is then immediately the coefficient of $z$ in $Sq^{i}(x \cup y)$.
\end{proof}
\begin{figure}
\caption{Flowline configurations for $Sq(x \cup y)$.}
\label{fig:cartanSqcup}
\end{figure}
\subsection{Steenrod Squares via intersections of cycles}
\label{subsec:intss}
Recall that there are nested equators $S^{i} \subset S^{\infty}$, invariant under the antipodal action. Let $a \in H^{|a|}(M)$. Let $\mathcal{B}$ be a basis of $H^* (M)$.
In practice, we would like to work with representatives. A representative of a homology class $A$ is a pair $(X,\alpha)$, often denoted simply $\alpha$, where $X$ is a smooth compact manifold and $\alpha: X \rightarrow M$ is smooth such that $\alpha_*[X] = A$. We recall that over $\mathbb{Z}/2$-coefficients every homology class has a representative (see e.g. \cite[Theorem B]{buonhacon}). For notation, we will denote a homology class by $A$, $a$ will denote its Poincar\'e dual cohomology class, and $\alpha$ will be a representative as above. We will say that $\alpha$ represents a cohomology class $a$ if $\alpha$ represents its Poincar\'e dual homology class. Similarly for $b \in \mathcal{B}$, we denote $B = PD(b)$. As previously, we denote by $b^{\vee}$ the dual basis element to $b$ in the dual basis $\mathcal{B}^{\vee}$ of $H^*(M)$.
In order to link this definition to the previous definition, we will weaken our requirements below: in fact we only ask that $\alpha: X \rightarrow M$ is a pseudocycle representative of $a$. Note however that the definition will proceed identically in the cases where we can instead use either representatives or embeddings. Denote by $\beta^{\vee}: Y_b \rightarrow M$ a pseudocycle representative of $PD(b^{\vee})$.
We will choose some smooth manifold $X$, along with a sequence of smooth maps $\alpha_i: X \times S^i \rightarrow M \times S^i$ (for brevity we shorten $X_i := X \times S^i$) such that:
\begin{enumerate}
\item For $\pi_{2}: M \times S^i \rightarrow S^i$ the second projection, $\pi_{2}(\alpha_i(x,v)) = v$ for all $(x,v) \in X \times S^{i}$.
\item The restriction $\alpha_i |_{X_j} = \alpha_j$ for $j \le i$.
\item For $\pi_{1}: M \times S^i \rightarrow M$ the first projection, for any $v \in S^{i}$ then \begin{equation} \label{equation:alphav} \alpha_v:= \pi_1 \circ \alpha|_{X \times \{ v \}} : X_v := X \times \{ v \} \rightarrow M \end{equation} is a pseudocycle representative of $A$ in $M$ (and is well defined by (2) above).
\item For $b \in \mathcal{B}$, in $M \times M \times M \times S^{i}$ we require: \begin{equation} \label{tripleintersection} (\Delta \times id) \pitchfork \euscr{W} \end{equation} where \begin{equation} \label{euscrw} \euscr{W}: Y_b \times X \times X \times S^i \rightarrow M \times M \times M \times S^i \end{equation} is defined by $(y, x,x',v) \mapsto (\beta^{\vee}(y),\alpha_i(x,v), \alpha_i(x',-v), v)$ and $$\Delta \times id: M \times S^i \rightarrow M \times M \times M \times S^i$$ is defined by $(z,v) \mapsto (z,z,z, v)$.
\end{enumerate}
The pseudocycles $\Delta \times id \text{ and } \euscr{W}$ in \eqref{tripleintersection} descend to pseudocycles $$[\Delta \times id]: M \times \mathbb{RP}^i \rightarrow M \times ((M \times M) \times_{\mathbb{Z}/2} S^i),$$ and $$[\euscr{W}]: Y_b \times ((X \times X) \times_{\mathbb{Z}/2} S^i) \rightarrow M \times ((M \times M) \times_{\mathbb{Z}/2} S^i),$$ respectively. Provided $|b| = 2 |a|- i$, define $n_{i,b,a} = [\Delta \times id] \bullet [\euscr{W}]$, the intersection of these two pseudocycles (of complementary dimension).
\begin{defn}[Steenrod Square]
\label{defn:miss}
Define $$Sq(a) = \sum_{i \in \mathbb{Z}, \ b \in \mathcal{B}, \ |b| = 2|a|-i} n_{i,b,a} b h^{i},$$ where $\#$ is the count modulo $2$.
\end{defn}
\begin{rmk}
To see that Definition \ref{defn:miss} is a good one, i.e. independent of the choice of $\alpha_i$ (all the other choices are immediately covered by pseudocycle theory, e.g. \cite{zinger}), observe that the given number of points $n_{i,b,a}$ in any given degree (by which we mean for any fixed choice of $S^i \subset S^{\infty}$) is obtained as the number of intersection points of two pseudocycles. A construction as in \cite[Lemma 3.2]{zinger} for two different choices of $\alpha_i$ yields a bordism of pseudocycles, meaning that the intersection number $n_{i,b,a}$ is independent of this choice.
\end{rmk}
\begin{rmk}
\label{rmk:ourdefinitionsthesame}
The Morse Steenrod square of Definition \ref{defn:mss} is the same as Definition \ref{defn:miss} using the isomorphism $HM^*(M,f) \cong H^*(M)$ that intertwines the Morse product and the cup product, in particular as described in \cite{schwarzmorsesingiso}.
Recall that for each $v$, and Morse cocycle $a = \sum n_i \cdot a_i$ ($n_i \in \mathbb{Z}$ and $a_i \in \text{crit}(f)$) there is a pseudocycle associated to the $s$-dependent Morse function $f_{v,s}$. The domain of this pseudocycle is constructed first by taking the spaces $W^s(a_i,f_{v,s})$ of smooth $u: [0,\infty) \rightarrow M$ such that $\partial u/ \partial t(s) = - \nabla f_{v,s}(u(s))$ and $u(\infty) = a_i$, the stable manifold under $f_{v,s}$. One then adds in the codimension $1$ strata of the standard Morse compactification, and then glues together the disjoint union of $n_i$ copies of each $W^s(a_i,f_{v,s})$, along the codimension $1$ strata, which one knows can be done because $da = 0$. We call this space $\overline{W}(a,f_{v,s})$. The map of this pseudocycle is (on the codimension $0$ strata) evaluation at $0$, denoted $E_v: \overline{W}(a,f_{v,s}) \rightarrow M$. Details are in \cite[Lemma 4.5]{schwarzmorsesingiso}, for the pseudocycle $\overline{W}(a,f)$ associated to the fixed Morse function $f$.
Recall that we chose $f_{v,s}$ in Section \ref{subsec:tmssissq}, based on Section \ref{subsec:prelimbcncon}. Specifically, they satisfy $f_{v,s} = \beta(s) f_{v} + (1-\beta(s))f$ (where $f_v$ is confined to a small contractible neighbourhood of Morse functions $U_f$ containing $f$). Observe that in this instance $f_{v,s} = f$ for $s \ge 1$. Recall that for each $v \in S^i$ there is a $1$-parameter family of diffeomorphisms $\phi_{v,s}: M \rightarrow M$ for $s \in [0,1]$, defined by $\phi_{v,0} = id$ and $$\partial \phi_{v,t} /\partial t |_{t=s}(x) = - \nabla f_{v,s}(x).$$ Then $W(a,f_{v,s}) = \phi_{v,1}^{-1}(W(a,f))$.
Hence, for each $i \in \mathbb{Z}_{\ge 0}$ we obtain an $\alpha_i: \overline{W}(a,f) \times S^i \rightarrow M \times S^i$, defined by $$\alpha_i(u,v) = (E_v \phi_{v,1}^{-1} u, v).$$ Then recalling the conditions we required from $\alpha$, earlier in Section \ref{subsec:intss}, we see that:
\begin{itemize}
\item condition $(1)$ holds,
\item condition $(2)$ is immediate because we define our map fibrewise for each $v$,
\item condition $(3)$ holds because of \cite[Lemma 4.5]{schwarzmorsesingiso},
\item condition $(4)$ holds because of condition $(2)$ at the beginning of Section \ref{subsec:msss}.
\end{itemize}
\end{rmk}
\begin{rmk}
\label{rmk:embeddedsubs}
As the definition in this section will be used as a computational tool for our purposes, for simplicity we will assume in certain places that our homology classes in $\mathcal{B}$ can be represented as embedded submanifolds: in this instance, we may replace a $\alpha_i: X \times S^i \rightarrow M \times S^i$ (which in such a case satisfies that $\pi_1 \alpha_i(\cdot,v): X \rightarrow M$ is an embedding for each $v \in S^i$) by $X_v := \pi_1 \alpha_i(X,v)$.
\end{rmk}
\begin{rmk}
Suppose that $\alpha$ is represented by an embedded submanifold $\mathcal{A} \subset M$. Then each $\alpha_i(X_i)$ cannot simply be $\{ (p,v) | p \in \mathcal{A}, v \in S^{i} \}$, because then transversality would not hold. More generally, we cannot assume that the pseudocycles $\alpha_i$ are independent of $v$. In the next section we construct a family of admissible choices of $\alpha_i$. However, we may take $B^{\vee} \times S^{i}$ to be such a ``standard representative". This is analogous to how, in the Morse definition, $f^1_s$ is chosen to be independent of $v$.
\end{rmk}
\subsection{Properties of the Steenrod Square}
\label{subsec:propofSq}
As promised in Section \ref{subsec:msss} we now check Axiom 3 from Section \ref{subsec:theSqs}.
\begin{lemma}
\label{lemma:sq0baby}
$Sq^{0}(PD(pt))=PD(pt)$.
\end{lemma}
\begin{proof}
Let $n=\text{dim}(M)$. Write $a = PD(pt)$. We construct a representative of $\{ pt \} \times S^n$ in $M \times S^n$:
The submanifold $pt \subset M$ has trivialisable normal bundle, so the disc subbundle $D(pt)$ of the normal bundle $N(pt)$ embeds into $M$ as a small disc around $pt$. Let $S^{n}, D^n \subset \mathbb{R}^{n+1}$, where $$S^n = \biggr\{ (x_1,\ldots x_{n+1} ) \in \mathbb{R}^{n+1} \biggr\vert \sum_{i} x_i^2 = 1 \biggr\}$$ and $$D^n = \biggr\{ (x_1,\ldots x_{n+1}) \in \mathbb{R}^{n+1} \biggr\vert x_{n+1} = 0, \sum_{i} x_i^2 \le 1 \biggr\}$$ is the $n$-disc with the $n+1^{th}$ coordinate $0$. There is a natural flattening map denoted $\phi':S^{n} \rightarrow D^{n}$, where $\phi'(x_1,\ldots, x_n, x_{n+1}) = (x_1,\ldots x_n, 0)$ is projection of $S^n$ onto the first $n$ coordinates. Note that $\phi'$ is a double cover except on the equator, which is $\partial D^{n} \cong S^{n-1}$.
There is a diffeomorphism $D^{n} \cong D(pt) \subset M$. Composing $\phi'$ with this diffeomorphism defines $\phi : S^{n} \rightarrow M$. The map $\phi$ is homotopic to a constant map hence $\bigsqcup_{v \in S^n} (\phi(v), v)$, the graph of $\phi$ in $M \times S^n$, is cobordant to $\{ pt \} \times S^{n} \subset M \times S^n$. Specifically, we denote $\alpha_n : \{ pt \} \times S^n \rightarrow M \times S^n$ by $\alpha_n(pt, v) = (\phi(v),v)$, and this immediately satisfies most of the relevant properties of $\alpha_n$ from Section \ref{subsec:intss} (we will verify transversality after computing the points of intersection).
Observe that for $b \neq [M]^*$ (hence $PD(b^{\vee}) \neq [M]$), and for a general choice of the dual basis pseudocycles $\beta^{\vee}$, there is no intersection as in Statement \eqref{tripleintersection} (hence transversality holds trivially). To check transversality in the case where $b = [M]^*$, we pick $\beta^{\vee} = id_M : M \rightarrow M$. Then for $\Delta \subset M \times M$ the diagonal, $\Delta \times S^i$ intersects $\Phi := \sqcup_{v \in S^i} \{ \phi(v) \} \times \{ \phi(-v) \} \times \{v \}$ exactly when $\phi(v) = \phi(-v)$. We know there is exactly one such pair $\{ \pm v_0 \}$, where $v_0 = (0,...,0,1) \subset S^{n} \subset \mathbb{R}^{n+1}$.
To verify transversality, consider the tangent directions at $(\beta^{\vee}(x), \phi(v_0),\phi(-v_0), v_0)$ in $T(M \times M \times M \times S^i) = TM \oplus TM \oplus TM \oplus TS^i$. Those tangent directions in $0 \oplus 0 \oplus 0 \oplus TS^i$ and $T \Delta \oplus 0 \subset T(M \times M \times M) \oplus TS^i$ are all contained in $T(\Delta \times S^i) = T \Delta \oplus TS^i$. Similarly, as $\beta^{\vee} = id_M$ we obtain all tangent vectors in $TM \oplus 0 \oplus 0 \oplus 0$. It remains to show that we may obtain the rest of the tangent vectors of $T(M \times M \times M \times S^i)$. Observe that $d \phi(v_0) = - d \phi(-v_0)$ is nondegenerate, because $v_0 \not\in \phi'(\partial D^n)$. Hence in particular $\{ (\phi(v), \phi(-v)) \}_{v \in S^n} \subset M \times M$ intersects $\{(x,x)\}_{x \in M} \subset M \times M$ transversely at $(\phi(v_0),\phi(-v_0))$. This immediately implies transversality.
To calculate the coefficient of $a$ in $Sq^{0}(a)$, count the number of (pairs of) solutions to $\phi(v) = \phi(-v)$ modulo $\mathbb{Z}/2$. Recall from above there exists exactly one such (pair of) solutions $v = \pm v_0$. Taking this modulo the $\mathbb{Z}/2$ action gives $Sq^0(a) = a + ...$. The cycle $\{ a \}$ generates $H^n (M)$ so there are no more contributions to $Sq^0(a)$ for degree reasons.
\end{proof}
An easy generalisation of the above proof shows:
\begin{lemma}
\label{lemma:sq0}
For $x \in H^*(M)$, when $PD(x)$ is represented by an embedded submanifold $\chi$ then $Sq^{0}(x)=x$.
\end{lemma}
\begin{proof}
Let $x$ be as given in the statement. Proceed as in the previous lemma, but now $x = PD(X)$ for some cycle $X$. It is convenient to assume that $X$ is in a basis for the homology of $M$, with $PD(X^{\vee}) = x^{\vee}$ being the corresponding member of the dual basis under the intersection product. Similarly to above, for general pseudocycle representatives $\alpha: \chi \rightarrow M$ and $\alpha^{\vee}: Y \rightarrow M$ of $X, X^{\vee}$, we detemine that $\alpha \cdot \alpha^{\vee}$ consists of a finite, odd number of points $\{ p_{i} \}$ (since $x \cdot x^{\vee} = 1$ mod $2$ by definition). In particular, this is true when $\alpha$ is the embedding of $\chi$. Moreover, this is true of any generic sufficiently small perturbation of $\alpha$, such as when the image of $\alpha$ is contained in a sufficiently small normal disc bundle of $\chi$.
Each of these $p_i$ has a small neighbourhood $U_{i} \subset M$ such that the normal bundle $N(\chi)$ of $\chi$ is trivial on $U_{i} \cap \chi$, with the $U_{i}$ being pairwise disjoint. Pick a bump function $\beta_{i}$ for each neighbourhood $U_{i}$. On the neighbourhood $U_{i}$, there is a diffeomorphism between the disc bundle and the trivial bundle $D(U_{i}) \cong (U_{i} \cap \chi) \times D^{n-\dim(\chi)}$. Using the tubular neighbourhood theorem, $N(\chi)$ and hence $(U_{i} \cap \chi) \times D^{n-\dim(\chi)}$ embeds into $M$ via a map $e$.
Hence there is a smooth map $\phi: \chi \times S^{n- \dim(\chi)} \rightarrow M$, such that if $x \in \chi$ is not in any $U_{i}$, then $\phi(x,v) = x$. Otherwise $x$ is in exactly one $U_{i}$ and we define $\phi(x,v): = e(x, \beta_{i}(x) \phi'(v))$, where $\phi': S^{n-\dim (\chi)} \rightarrow D^{n- \dim (\chi)}$ is the flattening map as in the previous lemma. This yields $\alpha_{n-\dim(\chi)}(x,v) :=\phi(x,v)$, recalling that $n-dim(\chi) = |x|$. Consider the intersection modulo $\mathbb{Z}/2$, whose transversality is verified as in Lemma \ref{lemma:sq0baby}. The coefficient of $xh^{|x|}$ is obtained by using as the output cycle $\alpha^{\vee}(Y) \times S^{n-dim(\chi)}$. By construction, such intersections only occur when the first coordinate is one of the $p_{i}$. At $p_{i}$, there is exactly one pair of solutions corresponding to the two solutions as in the previous claim: i.e. $\phi'$ is $2$ to $1$ on a dense open subset.
Take the quotient by the $\mathbb{Z}/2$ action to deduce that the number of contributions is an odd number (the number of $p_{i}$) multiplied by an odd number (the number of pairs of solutions at each $p_{i}$), hence is odd. Therefore $Sq^{0}(x) = xh^{|x|} + ...$. To show that there are no more terms in $Sq^0(x)$, repeat this with $S^{n-dim(\chi)} \times B$ as the output cycle, for $B$ representing another element of the dual basis of homology. Strictly, to cover all cases at once we must choose pseudocycle representatives for every $B \in \mathcal{B}^{\vee}$. Then instead of considering $\{ p_i \}$, we now have $\{p_{B,i} \}$, where $B$ varies in $\mathcal{B}^{\vee}$, which are pairwise distinct. Define similarly pairwise disjoint $U_{B,i} \ni p_{B,i}$, and a map $\phi$ as previously. Then as $B \neq X^{\vee}$ is in the dual basis, a general pseudocycle representative of $B$ intersects $\chi$ with an even number of points. We count exactly as in the previous case, except the number of contributions is an even number (the intersection number of $B \cdot \chi$) multiplied by an odd number. Hence the count is even and the contributions due to other $B$ are $0$.
\end{proof}
\begin{corollary}
\label{corollary:trivialisable}
Let $A$ be a closed submanifold of $M$, with trivialisable normal bundle. Then $Sq^{i}(PD([A])) = 0$ for $i \neq 0$.
\end{corollary}
\begin{proof}
Use the embedding $e: A \times D^{n-\text{dim}(A)} \rightarrow M$ by inclusion of the unit disc bundle of $A$, which exists because $A$ has trivialisable normal bundle, to define $A_v$ for $v \in S^{n-i}$ for $i > 0$. Immediately no intersections occur for $i \neq 0$, as $A_v \cap A_{-v} = \emptyset$ for all $v \in S^{n-i}$.
\end{proof}
\begin{rmk}
More generally, for any immersed submanifold $A \xrightarrow{} X$, consider the homology class $[A] \in H_*(X)$. Then $Sq^i (PD([A]))$ is the Stiefel-Whitney class $w_i(N_A X)$ (where $N_A X$ is the normal bundle of $A$ in $X$). An account of Stiefel-Whitney classes is given in \cite{stiefelwhitney}.
\end{rmk}
\section{Quantum Steenrod Square via Morse theory}
\label{sec:SqQviaMorse}
Let $M$ be a closed monotone symplectic manifold. The definition of the quantum Steenrod square uses a $Y$-shaped graph as with the Morse Steenrod square, but now allows for a $J$-holomorphic sphere at the trivalent vertex in the Y-shaped graph in the definition. This is a $J$-holomorphic sphere with $2+1$ marked points, and 2 incoming and 1 outgoing Morse flowlines from the respective marked points.
Make a choice of $f^p_{s,v}$ as in Subsection \ref{subsec:msss}, for $p=1,2,3$. Let $N$ be the minimal Chern number of $M$. Fix $i,j \in \mathbb{Z}_{\ge 0}$ and $a,b \in H^{*}(M)$ with $$|b| - 2 |a| + i + 2jN = 0.$$
Let $\mathcal{M}'_{i,j}(b,a)$ be the moduli space of pairs $(u,v)$, such that:
\begin{itemize}
\item $v \in S^{i}$,
\item $u: S^2 \rightarrow M$ is a simple $J$-holomorphic map of Chern number $2jN$, i.e. \begin{equation} \label{equation:jvsholoc} du(z) = J(u(z)) \circ du(z) \circ j_{S^2}(z), \end{equation} where $j_{S^2}$ is the standard almost complex structure on $S^2$,
\item the $-\nabla f^1_{s,v}$ flowline from $u(0)$ converges to $b$ as $s \rightarrow -\infty$ and the $-\nabla f^p_{s,v}$ flowline from $u(1), u(\infty)$ converge to $a$ as $s \rightarrow \infty$ for $p=2,3$ respectively.
\end{itemize}
There is a free $\mathbb{Z}/2$-action on this moduli space: $$\iota_{\mathcal{M}} (u,v) = (u \circ R, -v)$$ where $R$ is the unique M\"obius map in $PSL(2,\mathbb{C})$ swapping $1 \text{ and } \infty$ and fixing $0$. Let $$\mathcal{M}_{i,j}(a,b) = \mathcal{M}'_{i,j}(a,b) / \iota_{\mathcal{M}}.$$ The space $\mathcal{M}_{i,j}(a,b)$ is a smooth manifold of dimension $|b| - 2|a| + i + 2jN$. See Appendix \ref{subsection:transvholspheres} for a discussion of transversality for the equivariant case in the presence of pseudoholomorphic spheres.
\begin{defn}[Morse Quantum Steenrod Square]
\label{defn:mqss}
Pick a basis $\mathcal{B}$ of $H^*(M)$. Let $a \in H^*(M)$. For each $i,j$, let $$Q\mathcal{S}_{i,j}(a) = \sum_{b \in \mathcal{B} : |b| + i + 2jN = 2 |a|} \# \mathcal{M}_{i,j}(b,a) \cdot b,$$
$$Q\mathcal{S}(a) = \sum_{i,j} Q\mathcal{S}_{i,j}(a) \cdot h^{i}T^{j}.$$
Extend to a general element of $QH^{*}(M)$ by $Q\mathcal{S}(aT^{j}) = Q\mathcal{S}(a)T^{2j}$.
\end{defn}
The proof that $Q\mathcal{S}$ is an additive homomorphism is identical to Proposition \ref{propn:propositionftw}. First define $Q \mathcal{S}' : H^*_{\mathbb{Z}/2}(M \times M) \rightarrow QH^*(M) \otimes H^*(B \mathbb{Z}/2)$. This is identical to $Sq'$ from Definition \ref{defn:mss}, but one uses moduli spaces $\mathcal{M}_{i,j}(a_1,a_2,a_3)$ that have a $J$-holomorphic map $u:S^2 \rightarrow M$ in place of the intersection of the Morse flowlines. Then $Q \mathcal{S} = Q \mathcal{S}' \circ \text{double}$, and observe that $Q \mathcal{S}'(x_1 \otimes x_2 + x_2 \otimes x_1) = 0$.
\begin{rmk}
\label{rmk:propertiesqs}
For $a \in H^*(M)$, $$Q\mathcal{S}_{i,0}(a) = Sq^{|a|-i}(a)$$ as it counts constant spheres. Further, $$\sum_{j \ge 0} Q\mathcal{S}_{0,j}(a) T^j = a * a$$ is the usual quantum product.
\end{rmk}
\subsection{Quantum Steenrod Squares via intersections of cycles}
\label{subsec:qssintcyc}
Let $a \in H^{|a|}(M)$, and we pick a basis $\mathcal{B}$ of $H^*(M)$. Denote $\alpha = PD(a), \beta = PD(b)$ for $b \in \mathcal{B}$. We define a moduli space and evaluation maps analogously to Section \ref{subsec:quantcupprod}: given $j \in \mathbb{Z}_{\ge 0},$ consider $\mathcal{M}_{j}(J) \times S^{i}$ consisting of pairs $(u,v)$ where $u$ is a $J$-holomorphic map such that $u_*[S^2]$ has Chern number $jN$ and $v \in S^i$. Fixing $q \in \mathbb{CP}^1$, the evaluation maps are $ev_{q} \times id_{S^i}:\mathcal{M}_j (J) \times S^i \rightarrow M \times S^i$, which we abusively denote $ev_q$. Choose a sequence of maps $(\alpha_i)_{i=0}^{\infty}: X \times S^i \rightarrow M \times S^{i}$ as in Section \ref{subsec:intss}, satisfying conditions (1), (2) and (3) but we will modify (4). Firstly, let
$\mathcal{M}(j,J)$ be the space of $J$-holomorphic spheres of Chern number $jN$, with a $\mathbb{Z}/2$-action acting by $u \mapsto u \circ R$, where as in Section \ref{sec:SqQviaMorse} $R: S^2 \rightarrow S^2, \ R(z) = z/(z-1)$. Further, for $b \in \mathcal{B}$, and $i \in \mathbb{Z}_{\ge 0}$ we define:
$$\euscr{Y}_Q: Y_b \times ((X \times X) \times_{\mathbb{Z}/2} S^i) \rightarrow M \times ((M \times M) \times_{\mathbb{Z}/2} S^i)$$ by $$(y,((x,x'),[v])) \mapsto (\beta^{\vee}(y),[\alpha_i(x,v), \alpha_i(x',-v), v]),$$ and
$$ev: \mathcal{M}(j,J) \times_{\mathbb{Z}/2} S^i \rightarrow M \times ((M \times M) \times_{\mathbb{Z}/2} S^i)$$ is defined by $$[u,v] \mapsto (u(0), [u(1), u(\infty), v]).$$ The required condition (4) is then:
\begin{enumerate}
\setcounter{enumi}{3}
\item For $b \in \mathcal{B}$, and $i \in \mathbb{Z}_{\ge 0}$, the intersection of pseudocycles \begin{equation} \label{tripleintersectionquant} ev(\mathcal{M}(j,J) \times_{\mathbb{Z}/2} S^i) \cap \euscr{Y}_Q(X \times \mathbb{RP}^{i}) \end{equation} is transverse in $M \times ((M \times M) \times_{\mathbb{Z}/2} S^{i})$.
\end{enumerate}
Given $i,j \in \mathbb{Z}_{\ge 0}$, for $|b| = 2 |a| - i -2j$, the pseudocycles are of complementary dimension. Define $n_{i,j}(a,b)$ to be the intersection number of these pseudocycles.
\begin{defn}[Quantum Steenrod Square]
\label{defn:singqss}
For $a \in H^*(M)$ define $$Q\mathcal{S} : QH^{*}(M) \rightarrow QH^{*}(M)[h],$$ such that
$$Q\mathcal{S}(a):= \sum_{i,j \in \mathbb{Z}_{\ge 0}, \ b \in \mathcal{B}, \ |b| = 2|a|-i-2jN} n_{i,j}(a,b) \cdot b T^{j} h^i$$
with $Q\mathcal{S}$ a linear homomorphism. Then extend $Q\mathcal{S}$ linearly to $QH^*$ by requiring that $Q\mathcal{S}(a T^k) = Q\mathcal{S}(a) T^{2k}$. Also define $Q\mathcal{S}_{i,j}(a)$ as previously.
\end{defn}
As in the classical case this is equivalent to Definition \ref{defn:mqss}.
\subsection{Quantum Stiefel Whitney Class}
For a smooth compact manifold $M$, the classical Stiefel-Whitney class of $TM$, $w(TM)$, is constructed as in \cite[Section 5.3]{cohnor}, using a certain graph operation. We will not go into details. A more classical treatment is found in \cite{stiefelwhitney}.
Using the convention that $\langle ah,A \rangle = \langle a,A \rangle h$ for $a \in H^*(M), A \in H_*(M)$, one can use a gluing theorem as in \cite[Theorem 20]{cohnor}, or a direct argument to prove that:
\begin{lemma}
$$w(TM) = \sum_{y \in \mathcal{B}} Sq(y) \cdot \langle Sq(y^{\vee}),[M] \rangle.$$
\end{lemma}
\begin{proof}
Recalling that $w(TM) = Sq(v)$, where $v$ is the Wu class of $M$, it is sufficient to prove that \begin{equation} \label{equation:wTM} v = \sum_{y \in \mathcal{B}} y \cdot \langle Sq(y^{\vee}),[M] \rangle. \end{equation} Suppose that we write $v$ as an element of $H^*(M)[h]$, i.e. $$v = \sum_{y \in \mathcal{B}, \ i \ge 0} n_{y,i} \cdot y h^i.$$ Substituting this into the definition of $v$, i.e. $\langle Sq(b), [M] \rangle = \langle b \cup v, [M] \rangle$ for any $b \in H^*(M)$, we obtain that $$\langle Sq(b), [M] \rangle = \sum_{y \in \mathcal{B}} n_{y,i} \cdot \langle b \cup y h^i, [M] \rangle.$$ For each $y \in \mathcal{B}$, let $b = y^{\vee}$. Hence $$\langle Sq(y^{\vee}), [M] \rangle = n_{y,i} \cdot \langle y^{\vee} \cup y h^i, [M] \rangle = n_{y,i} \cdot h^i \langle [M]^{*}, [M] \rangle = n_{y,i} \cdot h^i,$$ and \eqref{equation:wTM} follows.
\end{proof}
Let $M$ be a closed monotone symplectic manifold.
\begin{defn}[Quantum Stiefel-Whitney Class]
The Quantum Stiefel-Whitney class is $$w_Q(TM) := \sum_{y \in \mathcal{B}} Q\mathcal{S}(y) \langle Sq(y^{\vee}),[M] \rangle.$$
\end{defn}
It follows from this definition and a grading argument that:
\begin{lemma}
\label{propn:quantumstiefel4Ng2n}
If the minimal Chern number $N > (\dim M)/2$ then $w_Q(TM) = w(TM)$.
\end{lemma}
\begin{proof}
We will show that given the assumptions of this lemma, for every $y \in H^*(M)$, either $Q \mathcal{S}(y) = Sq(y)$ or $\langle Sq(y^{\vee}),[M] \rangle = 0$.
Suppose that $Q \mathcal{S}(y)$, which is of degree $2|y|$, has a summand containing some nontrivial power of $T$, which is of degree $2N$. This implies that $2|y| \ge 2N > \dim M$. Hence $|y| > (\dim M)/2$. Hence $|y^{\vee}| < (\dim M)/2$, and therefore for degree reasons there can be no summand of the form $[M]^* h^j$ in the expansion of $Sq(y^{\vee})$. Therefore $\langle Sq(y^{\vee}),[M] \rangle = 0$.
\end{proof}
\begin{corollary}
Let $M = \mathbb{CP}^n$. Then $w_Q(TM) = w(TM)$.
\end{corollary}
\begin{proof}
The minimal Chern number for $\mathbb{CP}^n$ is $N = n+1 > n = (\dim \mathbb{CP}^n) /2$. Now apply Lemma \ref{propn:quantumstiefel4Ng2n}.
\end{proof}
\section{The Quantum Cartan relation}
\label{sec:quancar}
We continue the discussion from Example \ref{exmpl:difficulties}. Consider the space $M^{\#}_{0,5}$ of 5 distinct marked points on the $2$-sphere, and let $$M_{0,5} = M^{\#}_{0,5} / PSL(2,\mathbb{C})$$ where the M\"obius group $G = PSL(2,\mathbb{C})$ acts diagonally on the 5 marked points. There are two different descriptions of $M_{0,5}$ that will be useful:
\begin{enumerate}
\item $\{ (z_{0},z_{1},z_{2},z_{3},z_{4}) \} / G$ of five distinct points modulo the action of $G$, reparametrising M\"obius maps.
\item $\{ (0,1, \infty, z_{3},z_{4}) \}$ with $z_{3},z_{4}$ distinct from each other and from $0,1,\infty$.
\end{enumerate}
The former description gives a simpler definition of the compactification, but the latter description is more useful when describing homology classes. Letting $z_{3},z_{4}$ vary in the description (2) yields a third description:
\begin{enumerate}
\setcounter{enumi}{2}
\item $$M_{0,5} \cong ((\mathbb{CP}^{1} - \{ 0,1,\infty \}) \times (\mathbb{CP}^{1} - \{0,1,\infty \})) - \Delta,$$ where $\Delta$ is the diagonal.
\end{enumerate}
One compactifies this space, adding stable genus $0$ nodal curves with $5$ marked points (there are 10 copies of $\mathbb{CP}^{1} - \{0,1,\infty \}$ and 15 points to add), one obtains a space $$\overline{M}_{0,5} \simeq Bl_{\{ (0,0),(1,1),(\infty, \infty) \}}(\mathbb{CP}^{1} \times \mathbb{CP}^{1}).$$ See \cite[Section D.7.]{jholssympl}. Then $\overline{M}_{0,5}$ is homotopy equivalent to $(\mathbb{CP}^{1} \times \mathbb{CP}^{1}) \# 3 (\overline{\mathbb{CP}^{2}})$, which means:
\begin{equation} \label{equation:m05coh} \begin{array}{l} H^{*}(\overline{M}_{0,5}) = \mathbb{F}_{2}[\delta_1, \delta_2,w_{0},w_{1},w_{\infty}] / I \\ I = (\delta_1^{2},\ \delta_2^{2}, \ w_{i}^{3},w_{i}^{2}+\delta_1 \delta_2 \text{ for all } i, \text{ and } \delta_i \omega_j \text{ for all } i,j, \text{ and } \omega_i \omega_j \text{ for } i \neq j) \end{array} \end{equation}
where $w_{i}$ corresponds to the exceptional divisor at $(i,i)$ and $\delta_1, \delta_2$ correspond to the spheres $\mathbb{CP}^{1} \times \{ pt \}$ and $\{ pt \} \times \mathbb{CP}^{1}$ respectively: thus all the generators have degree $2$. A treatment of this is \cite[Section D.7]{jholssympl}. Henceforth $W_i = PD(w_i) \text{ and } \Delta_j = PD(\delta_j)$ for $i=0,1,\infty$ and $j=1,2$.
Let $x,y,z$ be cohomology classes in $H^*(M)$. Let $\zeta: Z^{\vee} \rightarrow M$ be a pseudocycle representative of $PD(z^{\vee})$. There is a natural $\mathbb{Z}/2$ action on $\overline{M}_{0,5}$, induced by $(12)(34)$. Specifically, $$\iota : (z_{0},z_{1},z_{2},z_{3},z_{4}) \mapsto (z_{0},z_{2},z_{1},z_{4},z_{3}).$$ Then $\iota \times -\text{id}$ defines a free diagonal $\mathbb{Z}/2$ action on $\overline{M}_{0,5} \times S^{i}$ for each $i$. Define $$P_i := ( \overline{M}_{0,5} \times S^i) / (\iota \times -\text{id}).$$
Pick smooth maps $\chi_i: X_{i} \rightarrow M$ and $\gamma_i: Y_{i} \rightarrow M$, as in Section \ref{subsec:qssintcyc}, for the cohomology classes $x,y$. Then $\mathcal{M}'_{i,j}(x,y,z)$ consists of triples $(u,m,v)$, where $m$ is a $5$-pointed genus $0$ holomorphic nodal curve, and $u: |m| \rightarrow M$ is a smooth stable (nodal) $J$-holomorphic map representing a homology class of Chern number $jN$ (here $|m|$ refers to forgetting the marked points of $m$). The parameter space is $v \in S^{i}$. The map $u$ satisfies $u(z_{0}) \in \zeta(Z^{\vee})$, $u(z_{1}) \in \chi_v(X_{v})$, $u(z_{2}) \in \chi_{-v}(X_{-v})$, $u(z_{3}) \in \gamma_v(Y_{v})$ and $u(z_{4}) \in \gamma_{-v}(Y_{-v})$.
There is a $\mathbb{Z}/2$-action on $\mathcal{M}'_{i,j}(x,y,z)$, acting by:
\begin{equation} \label{equation:actiononmoduli} (u,m,v) \mapsto (u,\iota m,-v), \end{equation} recalling that $\iota$ acts, as on $\overline{M}_{0,5}$, by the permutation of marked points $(12)(34)$. Then the action \eqref{equation:actiononmoduli} is well defined because $|\iota m| = |m|$. There is also an action induced by reparametrisation: specifically, if $g \in PSL(2,\mathbb{C})$ acts on some holomorphic sphere $m^a$ of $m$, with corresponding $J$-holomorphic map $u^a: |m^a| \rightarrow M$, then \begin{equation} \label{equation:actionGonmoduli} g \cdot (u^a,m^a,v) = (u^a \cdot g^{-1}, g \cdot m^a, v). \end{equation} We denote $$\mathcal{M}_{i,j}(x,y,z)$$ the moduli space obtained after quotienting $\mathcal{M}'_{i,j}(x,y,z)$ by the actions in Equation \eqref{equation:actiononmoduli} and \eqref{equation:actionGonmoduli}.
There is a natural map $$\pi_{x,y,z}: \mathcal{M}_{i,j}(x,y,z) \rightarrow P_{i}, \ [u,m,v] \mapsto [\text{stab}(m),v],$$ where $\text{stab}(m)$ denotes taking the stabilisation of the $5$-pointed genus $0$ nodal curve $m$, which corresponds to an element of $\overline{M}_{0,5}$. The square brackets denote taking equivalences classes with respect to the actions of $PSL(2,\mathbb{C})$ and $\mathbb{Z}/2$.
\begin{rmk}
\label{rmk:evalmapquancar}
We can think of $\mathcal{M}_{i,j}(x,y,z)$ as the inverse image under the evaluation map $$ev: \overline{\mathcal{M}_{0,5}(J,j)} \times_{\mathbb{Z}/2} S^{i} \rightarrow M \times (M^{4} \times_{\mathbb{Z}/2} S^{i}),$$ $$ev([[u,(p_0, p_1...,p_4)],v]) = (u(p_0),[(u(p_1),...,u(p_4)),v])$$ where $\mathcal{M}_{0,5}(J,j)$ is the set of $5$-pointed $J$-holomorphic maps $u: \mathbb{CP}^{1} \rightarrow M$ of Chern number $jN$, where $m$ combines the information of $\mathbb{CP}^1$ along with the $5$ marked points. The space $\overline{\mathcal{M}_{0,5}(J,j)}$ is a partial compactification using stable nodal genus $0$ holomorphic curves that contain no repeated or multiply covered components. The $\mathbb{Z}/2$-action acts on the marked points by the permutation $(12)(34)$. Observe that one uses some large machinery, namely the gluing theorem for $J$-holomorphic curves (see \cite[Chapter 10]{jholssympl}), to show that this partial compactification of the space of simple maps has a fundamental class. Then given $x,y,z$ as previous to this remark, it is immediate from the definition that $ \mathcal{M}_{i,j}(x,y,z)$ is obtained by \begin{equation} \label{equation:lotsofpseudo} ev^{-1} \left( \zeta(Z^{\vee}) \times \left[ \bigcup_{[v] \in \mathbb{RP}^{i} } \alpha_v(X_{v}) \times \alpha_{-v}(X_{-v}) \times \gamma_v(Y_{v}) \times \gamma_{-v}(Y_{-v}) \times \{ v \} \right] \right). \end{equation} As in Appendix \ref{subsec:mssrmks}, we may interpret the expression between $\left(, \right)$ in Equation \eqref{equation:lotsofpseudo} as a pseudocycle. The square brackets $\left[, \right]$ in Equation \eqref{equation:lotsofpseudo} denote the equivalence class under the $\mathbb{Z}/2$ action. This allows us to calculate $\dim \mathcal{M}_{i,j}(x,y,z)$ as follows.
\end{rmk}
Let $Q$ be a closed submanifold of the parameter space $P_{i}$. Then $Q$ represents a cycle in $H_{*}(P_{i})$, such that: \begin{equation} \label{eq:dimension} \dim \pi_{x,y,z}^{-1}(Q) = |z| - 2|x| - 2|y| + \dim(Q) + 2jN. \end{equation} In particular, for $Q = P_{i}$, using $\dim(P_{i}) = 4+i$, $$\dim \mathcal{M}_{i,j}(x,y,z) = \dim \pi_{x,y,z}^{-1}(P_{i}) = |z| - 2|x| - 2|y| + i+4 + 2jN.$$
\begin{defn}
\label{defn:qopn}
Let $W$ be a cycle in $H_*(P_{i})$ with $i,j$ fixed, represented by a union of embedded closed submanifolds $\bigcup_{a \in A} Q_{a} \subset P_i$. Let $x,y \in H^*(M)$. Define $$q_{i,j}(W)(x,y) = \sum_{z : \dim \pi_{x,y,z}^{-1}(Q) = 0} \left( \sum_{a \in A} \# (\pi_{x,y,z}^{-1}(Q_a)) \right) \cdot zT^{j}$$ where the first sum is taken over a basis of $z$ for $H^{|z|}(M)$ such that Equation (\ref{eq:dimension}) is $0$. Extending bilinearly over $\mathbb{Z}/2 [T]$, this defines a bilinear map $$q_{i,j}(W) : QH^{k}(M) \otimes QH^{l}(M) \rightarrow QH^{k+l-|W|}(M).$$
\end{defn}
\begin{lemma}
\label{lemma:additiveandindep}
The homomorphism $$q_{i,j}(W) : QH^{k}(M) \otimes QH^{l}(M) \rightarrow QH^{k+l-|W|}(M)$$ does not depend on the representative of $W$, and is additive.
\end{lemma}
\begin{proof}
Represent $W$ by a pseudocycle $\omega: U \rightarrow P_i$ (in the case of Definition \ref{defn:qopn}, we chose a union of embedded submanifolds). Observe that the coefficient of $z$ in $q_{i,j}(W)(x,y)$ is the intersection number of two pseudocycles. Using notation as previously, these are $\pi_{x,y,z}: \mathcal{M}_{i,j}(x,y,z) \rightarrow P_i$ and $\omega: U \rightarrow P_i$. We know that intersection numbers are independent of the choice of pseudocycle representative, i.e. $q_{i,j}(W)$ only depends on the homology class of $W$.
Then it is immediate that $q_{i,j}(W+W') = q_{i,j}(W) + q_{i,j}(W')$, as if $\omega: U \rightarrow P_i$ represents $W$ and $\omega': U' \rightarrow P_i$ represents $W'$ then consider $\omega'': U \sqcup U' \rightarrow P_i$, defined by $\omega''|_{U^{\lambda}} = \omega^{\lambda}$ for $\lambda = ' \text{or } ''$. Then $\omega ''$ represents $W+W'$. The intersection numbers from the previous paragraph are additive.
\end{proof}
\begin{figure}
\caption{$m_{1}
\label{fig:m05elmts}
\end{figure}
\begin{rmk}
One likewise calculates the coefficients of $z T^j$ in $q_{i,j}(W)(x,y)$ (with notation as in Definition \ref{defn:qopn}) in the following way. Take the cup product of the cycle $\pi^* \rho$, where $\rho = PD(Q) \in H^* (P_i)$, with the pullback of $z^{\vee} \times x \times x \times y \times y$ under the evaluation map (specifically, the cup product takes place in $H^*(\overline{\mathcal{M}_{0,5}(j,J)} \times_{\mathbb{Z}/2} S^i)$). Integrate this over the equivariant fundamental class of $\overline{\mathcal{M}_{0,5}(j,J)}$.
\end{rmk}
In the following, use a cell decomposition for $S^{i}$ with cells $D^{i,\pm}$ in degree $i$, corresponding to the two hemispheres of dimension $i$. For $d$ the differential on cellular chains, $d(D^{i,\pm}) = D^{i-1,+} + D^{i-1,-}$.
The class of cases we consider are when $\text{dim}(Q) = i$. If $m_{1}, m_{2} \in \overline{M}_{0,5}$ are as given in Figure \ref{fig:m05elmts}, then $m_{1}$ and $m_{2}$ are invariant under the $\mathbb{Z}/2$ action on $\overline{M}_{0,5}$. Hence $\{ m_{1} \} \times D^{i,+}$ and $\{ m_{2} \} \times D^{i,+}$, which are embedded submanifolds of $P_i$, represent well defined cycles in $H_*(\overline{M}_{0,5} \times_{\mathbb{Z}/2} S^{i})$. For $p=1,2$ call these cycles $Q_{p}^{i}$. To see that these cycles are indeed closed, observe that (for example using singular homology), if $X \subset \overline{M}_{0,5}$ is an embedded submanifold, then $X \times D^{i,+}$ represents some chain in $H_*(P_i)$. Then abusing notation (applying the K\"unneth isomorphism, and writing the submanifold $X$ instead of a sum of the simplices representing $X$), $$d([X \times D^{i,+}]) = [(dX) \times D^{i,+}] + [X \times (D^{i-1,+} + D^{i-1,-})] = [(X + \iota X) \times D^{i-1,+}].$$ The brackets $[,]$ represent that we have taken the quotient by $\mathbb{Z}/2$ of the chain complex $C_*(\overline{M}_{0,5} \times S^i)$. The last equality uses the $\mathbb{Z}/2$-action on $\overline{M}_{0,5} \times S^{i}$. Hence, if $X$ is a $\mathbb{Z}/2$-invariant closed submanifold, such as $\{ m_1 \}$ and $\{ m_2 \}$, then the chain represented by $X \times D^{i,+}$ is closed in equivariant homology.
Indeed, by the previous, for $i > 0$ the chain ``$\{ pt \} \times D^{i,+}$" is only a cycle when $pt$ is a fixed point of the $\mathbb{Z}/2$ action on $\overline{M}_{0,5}$. The space of fixed points $(\overline{M}_{0,5})^{\mathbb{Z}/2}$ is the disjoint union of a sphere containing $m_{2}$ and the single point $m_{1}$. See Remark \ref{rmk:z2actioncompact} at the end of this section for more details on this $\mathbb{Z}/2$-action.
\begin{lemma}
\label{lemma:lem1}
$$\sum_{i,j} q_{i,j}(Q_{1}^{i})(x,y)h^i = Q\mathcal{S}(x)*Q\mathcal{S}(y) \text{ and } \sum_{i,j} q_{i,j}(Q_{2}^{i})(x,y)h^i = Q\mathcal{S}(x*y).$$
\end{lemma}
\begin{proof}
For the rest of this proof, we fix $i,j$, and we show that $$q_{i,j}(Q_{1}^{i})(x,y) = [Q\mathcal{S}(x)*Q\mathcal{S}(y)]_{i,j}T^{j}.$$ To do this we proceed as in Lemma \ref{lemma:lemmaSqSq}, using $1$-dimensional moduli spaces, the ends of which count e.g. $\sum_{i,j} q_{i,j}(Q_{1}^{i})(x,y) \cdot h^i$ and $Q\mathcal{S}(x)*Q\mathcal{S}(y)$ respectively. This yields a bordism between the endpoints, with more details provided in Appendix \ref{subsec:bordismquantumcartantrans}.
Fixing some $i \in \mathbb{N}$ (the dimension of the sphere in which $v$ will vary), we consider the $1$-dimensional moduli spaces from Section \ref{subsec:Cartan}, denoted $\tilde{\mathcal{M}}_1(x,y,z)$ and $\tilde{\mathcal{M}}_2(x,y,z)$. Recall the spaces $T^c \cong [0,\infty]$, and $\overline{T} \rightarrow T^c$. We define quantum analogues $\tilde{\mathcal{M}}^Q_{p}(x,y,z)$ of the $\tilde{\mathcal{M}}_p(x,y,z)$ from Section \ref{sec:quancar} for $p=1,2$, where now each element of $\tilde{\mathcal{M}}^Q_{p}(x,y,z)$ is a triple $(t, u, v)$ where $t \in T^c$, $v \in S^i$ and $u: (|t|_Q,t) \rightarrow M$ is continous, and smooth away from nodes. Here, $|t|_Q$ is obtained by taking the graph associated to $|t|$, and adding a sphere at each trivalent vertex (in such a way that the incoming edge of $t$ is attached at $0$ on the sphere, and the outgoing vertices are attached at $1,\infty$ respectively). We then require that $u$ is $J$-holomorphic on each sphere, satisfies the edge and asymptotic equations as in Section \ref{subsec:Cartan}, and the sum of the Chern numbers of the three spheres is $jN$. The $\mathbb{Z}/2$-action acts by $(u, v) \mapsto (u \circ \overline{r}, -v)$, where $\overline{r}$ acts on $|t|$ as in Section \ref{sec:quancar} and extends to the holomorphic spheres in the following ways:
\begin{itemize}
\item for $\tilde{\mathcal{M}}^Q_{1}(x,y,z)$, the involution $\overline{r}$ acts by $z \mapsto z / (z-1)$ on the two right holomorphic spheres and the identity on the left holomorphic sphere.
\item for $\tilde{\mathcal{M}}^Q_{2}(x,y,z)$, the involution $\overline{r}$ acts by $z \mapsto z / (z-1)$ on the left holomorphic sphere and the identity on the two right holomorphic spheres.
\end{itemize}
For the $t=0$ end of the moduli spaces, we instead consider for $\tilde{\mathcal{M}}^Q_{p}(x,y,z)$ continuous maps $u: |m_p|' \rightarrow M$. Here, $m_p$ are the elements of $\overline{M}_{0,5}$ as in Figure \ref{fig:m05elmts}, and $|m_p|'$ is obtained from the nodal sphere configuration $|m_p|$ associated to $m_p$, by attaching to $z_0$ the negative half-line $(-\infty,0]$ and to $z_1,z_2,z_3,z_4$ the positive half-line $[0,\infty)$. We then require the same conditions on the edges, the asymptotics and the energy of the nodal spheres. The $\mathbb{Z}/2$-action extends continuously from the $t \in (0,\infty]$ action. We let $\mathcal{M}^Q_{p}(x,y,z) = \tilde{\mathcal{M}}^Q_{p}(x,y,z) / (\mathbb{Z}/2)$ for $p=1,2$.
It is then immediate from the definition that counting the setups corresponding to the $t=0$ end of $\mathcal{M}_{p,j}(x,y,z)$ is the coefficient of $z$ in $q_{i,j}(Q_{p}^{i})(x,y)h^i$, for each $i$. Hence, it remains to show that counting the $t=\infty$ ends of $\mathcal{M}^Q_{p}(x,y,z)$ corresponds to the coefficient of $Q\mathcal{S}(x)*Q\mathcal{S}(y)$ and $Q\mathcal{S}(x*y)$ respectively for $p=1,2$. The proof of this is identical to Lemmas \ref{lemma:lemmaSqSq} and \ref{lemma:lemmaSqcup} respectively.
We do not need to worry about the bubbling off of extra spheres because $M$ is monotone. Specifically, any $J$-holomorphic bubble must have strictly less than $2$ marked points. Introducing ``phantom" marked points, we may consider this to be a $3$-pointed Gromov-Witten invariant corresponding to intersections with the Poincar\'e dual of $1 \in H^0(M)$. We know that, for a general choice of $J$, such Gromov-Witten invariants only contain contributions from constant spheres, as in \cite[Proposition 11.1.11(ii)]{jholssympl}.
\end{proof}
We now prove a slightly more general lemma for $\mathbb{Z}/2$-equivariant homology.
\begin{lemma}
\label{lemma:lem111}
Suppose that $M$ is a smooth connected manifold with a smooth $\mathbb{Z}/2$-action $\iota:M \rightarrow M$. Suppose that $W^n, L^{n-1} \subset M$ are submanifolds, fixed set-wise by $\iota$, of dimensions $n, n-1$ respectively, and representing respective homology classes $[W]$, $[L]$. Suppose further that $W = L \cup U \cup \iota U$ for some open submanifold $U \subset W$ such that $\partial \overline{U} = L$, where $\overline{U}$ is the closure of $U$ in $W$.
Then denoting $D^{i,+}$ for the upper $i$-dimensional hemisphere in $S^j$ ($i \le j$), the submanifolds $W \times D^{i-1,+}$ and $L \times D^{i,+}$ of $M \times S^j$ represent homologous elements of $H_*(M \times_{\mathbb{Z}/2} S^j)$ i.e. $$[W \times D^{i-1,+}] = [L \times D^{i,+}].$$
\end{lemma}
\begin{proof}
By the K\"unneth isomorphism, using singular homology, there is a quasi-isomorphism between $C_{\bullet}(M) \otimes C_{\bullet}(S^j)$ and $C_{\bullet}(M \times S^j)$, here using singular homology. In fact we may replace singular homology of $C_{\bullet}(S^j)$ by cellular homology, as the K\"unneth isomorphism is natural on chain complexes. In this cellular decomposition, there are two $i$-cells for each $0 \le i \le j$, such that one obtains a decomposition of $S^{j+1}$ from $S^j$ by attaching two $j+1$-cells along their boundaries at $S^j$.
Observe then that there is an involution on $C_{\bullet}(M) \otimes C_{\bullet}(S^j)$, which is the chain map $\phi := \iota_* \otimes (-id)_*$. We consider the homology of the complex $(C_{\bullet}(M) \otimes C_{\bullet}(S^j))/\phi$, the quotient of the complex $C_{\bullet}(M) \otimes C_{\bullet}(S^j)$ by $\phi$, with the differential being induced by the differential on the tensor product. Then we know that the K\"unneth isomorphism is natural (in particular, with respect to the action of $\phi$), hence $$H_*((C_{\bullet}(M) \otimes C_{\bullet}(S^j))/\phi) \cong H_*(C_{\bullet}(M \times S^j) / \phi).$$ The homology of the latter complex is isomorphic to the homology of $M \times_{\mathbb{Z}/2} S^j$, but we will represent chains using the former complex.
We will (abusively) denote by $W \times D^{i-1,+}$ the $\phi$-equivalence class of the chain (i.e. sum of simplices) corresponding to the submanifold $W \times D^{i-1,+} \subset M \times S^j$. Observe that $$d(\overline{U} \times D^{i,+}) = (d \overline{U}) \times D^{i,+} + \overline{U} \times (d D^{i,+}),$$ abusively also denoting by $d$ the differentials on all possible complexes. We know that $d \overline{U} = L$ by assumption. Further, $d D^{i,+} = D^{i-1,+} + D^{i-1,-}$. Hence $$d(\overline{U} \times D^{i,+}) = L \times D^{i,+} + \overline{U} \times (D^{i-1,+} + D^{i-1,-}).$$ Note that $$\begin{array}{lll} \overline{U} \times (D^{i-1,+} + D^{i-1,-}) &=& \overline{U} \times D^{i-1,+} + \overline{U} \times D^{i-1,-} \\ & =& \overline{U} \times D^{i-1,+} + \iota \overline{U} \times D^{i-1,+} \\ &=& (\overline{U} + \iota \overline{U}) \times D^{i-1,+}, \end{array}$$ using for the second equality that the chains represent elements of the complex quotiented by the involution $\phi$. But note that by definition the chains $\overline{U} + \iota \overline{U} = W$ (summing simplices, the boundaries match and cancel along $L$). Hence $$d(\overline{U} \times D^{i,+}) = L \times D^{i,+} + W \times D^{i-1,+},$$ as required.
\end{proof}
Let $A^{i} = Q_{1}^{i} - Q_{2}^{i}$. Let $W_{q}$ be the pullback under the blowdown $$Bl_{(0,0), (1,1), (\infty,\infty)}(\mathbb{CP}^{1} \times \mathbb{CP}^{1}) \rightarrow Bl_{(q,q)}(\mathbb{CP}^{1} \times \mathbb{CP}^{1}),$$ of the exceptional $\mathbb{CP}^{1}$ divisor in $Bl_{(q,q)}(\mathbb{CP}^{1} \times \mathbb{CP}^{1})$, for $q=0,1,\infty$. The elements of $W_0$ are given in Figure \ref{fig:w0}.
\begin{figure}
\caption{Elements of $W_0$}
\label{fig:w0}
\end{figure}
\begin{lemma}
\label{lemma:lem222}
$[W_0 \times D^{i-2,+}] = [\{ m_1 \} \times D^{i,+}] + [\{ m_2 \} \times D^{i,+}]$.
\end{lemma}
\begin{proof}
We use $M = \overline{M}_{0,5}$ and $W = W_0$, and $\iota = (12)(34)$. Observe that we may identify $W_0 \cong S^2$ with the extended complex plane, fixing $(z_0,z_3,z_4) = (0,1,\infty)$. The $z \in \mathbb{C} \cup \{ \infty \}$ corresponds to the freely moving point on the four-pointed component of an element $m$ of $W_0$: specifically, the node connecting together the two components (i.e. copies of $S^2$) that together comprise $m$. The $\mathbb{Z}/2$-action on $\mathbb{C} \cup \{ \infty \}$ is then $z \mapsto z/(z-1)$. Let $L = \mathbb{R} \subset \mathbb{C} \cup \{ \infty \}$. By Lemma \ref{lemma:lem111}, we know that $$[W_0 \times D^{i-2,+}] = [L \times D^{i-1,+}].$$ Now observe that $L$ contains two fixed points, $\{ m_1, m_3 \} \in W_0$ corresponding to the points $\{ 0, 2 \} \in \mathbb{R} \subset \mathbb{C} \cup \{ \infty \}$. Applying Lemma \ref{lemma:lem111} again, we obtain that $$[W_0 \times D^{i-2,+}] = [L \times D^{i-1,+}] = [\{m_1, m_3 \} \times D^{i,+}] = [\{m_1\} \times D^{i,+}] + [\{m_3\} \times D^{i,+}].$$
Hence it remains to prove that $[\{m_3\} \times D^{i,+}] = [\{m_2\} \times D^{i,+}]$. Recall we stated earlier (and will elaborate in Remark \ref{rmk:z2actioncompact}) that the fixed point set of $\overline{M}_{0,5}$ corresponds to the union of $\{ m_1 \}$ and a $2$-dimensional sphere. In particular, the points $m_3$ and $m_2$ can be joined by a path of invariant points, which we denote $l$. Then $d (l \times D^{i,+}) = \{m_3\} \times D^{i,+} + \{m_2\} \times D^{i,+}$, as required.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thm:quancar}]
By Lemmas \ref{lemma:lem222} and \ref{lemma:additiveandindep}, $q_{i,j}(\{ m_{1} \} \times D^{i,+}) = q_{i,j}(\{ m_{2} \} \times D^{i,+}) + q_{i,j}(W_{0} \times D^{i-2,+})$. Multiply by $h^{i}$ and sum over all $i,j$ and apply Lemma \ref{lemma:lem1}.
\end{proof}
\begin{lemma}
\label{lemma:lemqWtermscor}
The homomorphism $q_{i,j}(W_0 \times D^{i-2,+})$ is only nonzero for $i > 0, j >0$.
\end{lemma}
\begin{proof}
The $j=0$ case corresponds to $J$-holomorphic maps that are constant. Suppose that we have chosen input cocycles $x,y$ and a test output cocycle $z^{\vee}$, such that the moduli space used to calculate the coefficient of $z$ in $q_{i,j}(W_0 \times D^{i-2,+})(x,y)$ is $0$-dimensional. In particular, we require that $|z| = 2|x| + 2|y| - (i-2)$, using Equation \eqref{eq:dimension} and recalling that $\dim W_0 \times D^{i-2,+} = i$ and $j=0$. However, such setups in fact consists of pairs $(m \in W_0, (v,u))$ such that $(v,u)$ is a configuration as in the $t=0$ end of Figure \ref{fig:modspatree}. For generic choices of data, there do not exist any such $(v,u)$ (the space of such pairs is of virtual dimension $|z| - 2|x| - 2|y| + i = -2$), hence the moduli space is empty and therefore trivially transverse.
For the vanishing for $i=0$, observe that the $h^{i}$ terms correspond to calculating $q_{i,j}(W_0 \times D^{i-2,+})$ which vanishes for $i < 2$, as $S^{\infty}$ has no cells of negative dimension.
\end{proof}
We will verify Theorem \ref{thm:quancar} in the case of $\mathbb{CP}^{1}$.
\begin{exmpl}[$\mathbb{CP}^{1}$]
\label{exmpl:cp1calc}
Let $x$ be the generator of 2-dimensional cohomology. We verify that $$[Q\mathcal{S}(x)*Q\mathcal{S}(x)]_{i,j} = Q\mathcal{S}_{i,j}(x*x) + q_{i,j}(W_{0} \times D^{i-2,+})(x,x).$$ We know that there can only be contributions from $j>0$ and $i>0$, by Lemma \ref{lemma:lemqWtermscor}. In the cases where $j=0$ or $i=0$, we have already verified this using Example \ref{exmpl:difficulties}. For degree reasons, there cannot be any solutions for $j \ge 2$ or $i \ge 4$, and there cannot be solutions for $i=1,3$ (as $\mathbb{CP}^1$ has only even cohomology). Hence we need to consider only the cases $(i,j) = (4,1), (2,1)$.
For the case $(i,j) = (4,1)$, $$[Q\mathcal{S}(x) * Q\mathcal{S}(x)]_{4,1} = [T^{2} + h^{4}T]_{4,1} = h^{4}T$$ $$Q\mathcal{S}_{4,1}(x*x) = Q\mathcal{S}_{4,1}(T) = [T^{2}]_{4,1} = 0.$$ We then calculate $q_{4,1}(W_{0} \times D^{2,+})(x,x)$.
Pick representatives of $PD(x) \times S^{2}$ as follows: let $\phi : S^{2} \rightarrow D^{2}$ be the ``flattening map" of the sphere, i.e. if $S^{2} \subset \mathbb{R}^{3}$ it is projection onto $\mathbb{R}^{2} \subset \mathbb{R}^{3}$. Pick two disjoint discs in $\mathbb{CP}^{1} = S^{2}$, call them $D$ and $D'$, and pick maps $\eta: D^2 \rightarrow D \xhookrightarrow{} \mathbb{CP}^{1}$ and $\eta': D^2 \rightarrow D \xhookrightarrow{} \mathbb{CP}^{1}$ identifying $D^{2}$ with $D,D'$ respectively. Let $\psi = \eta \circ \phi: S^2 \rightarrow \mathbb{CP}^{1}$, and likewise $\psi' = \eta' \circ \phi: S^2 \rightarrow \mathbb{CP}^{1}$. Then two representatives of $PD(x)_{v} = \{ pt \}_{v}$ are $\psi(v)$ and $\psi'(v)$ where $v$ varies in $S^{2}$. Recall that elements of $W_{0}$ are as in Figure \ref{fig:w0}.
Every element of $W_0$ consists of two spheres, joined at a point, which in this discussion we call ``components": recall that one has three special points, and one has four. The space $S^2$ has minimal Chern number $N = 2$, so for $J$-holomorphic maps $u$ from elements of $W_0$ to $\mathbb{CP}^1$, the map $u$ may be non-constant on only one of the two components. Further, this $J$-holomorphic map must be degree 1 on the other component. It is immediate that the map must be constant on the component with three marked points (if it were constant on the other component, then the solution cannot be rigid as $z_{4}$ can vary freely), and the other sphere of $u$ has degree $1$. Then $u(z_{1}) = \psi(v)$ and $u(z_{2}) = \psi(-v)$ meet at the unique point on the sphere where $\psi(v) = \psi(-v)$. Hence there is one solution, and this solution gives the correction term $h^{4}T$.
For the case $(i,j) = (2,1)$, observe that the coefficient of $h^2$ in the correction term corresponds to using as the parameter space $D^{2-2,+} \subset S^2$, which is a single point: thus, we are simply performing a nonequivariant calculation. As we are calculating the coefficient of $T$, as in the previous paragraph we know that any $J$-holomorphic maps must be constant on the component with three special points. This setup then corresponds to deducing the coefficient of $x T$ in $(x*x) * (x \cup x)$, but $x \cup x = 0$ in $\mathbb{CP}^1$. Example \ref{exmpl:difficulties} provides the contributions $Q\mathcal{S}_{2,1}(x*x) = 0$ and $[Q\mathcal{S}(x)*Q\mathcal{S}(x)]_{2,1} = 0$.
\end{exmpl}
Henceforth, for brevity we will denote $$q(W)(x,y) := \sum_{i,j} q_{i,j}(W_0 \times D^{i-2,+})(x,y)h^i$$ for $x,y \in QH^*(M)$.
\begin{rmk}[Quantum Cartan in Classical Case]
Lemma \ref{lemma:lemqWtermscor} gives a sanity check that in the classical case, $Sq(x) \cup Sq(y) = Sq(x \cup y)$.
\end{rmk}
\begin{rmk}
\label{rmk:z2actioncompact}
We recall that the $\mathbb{Z}/2$-action $\iota$ on $M_{0,5}$ acts on the labels of the points by the transposition $(12)(34)$. Hence, suppose that $(0,1,\infty,z_3,z_4) \in M_{0,5}$. Then $\iota (0,1,\infty,z_3,z_4) = [0,\infty,1,z_4,z_3]$. This is no longer from description $(2)$ of $M_{0,5}$. We must apply the element $R \in PSL(2,\mathbb{C})$ such that $R(z) = z/(z-1)$. Then $$[0,\infty,1,z_4,z_3] = [R0,R\infty,R1,Rz_4,Rz_3] = (0,1,\infty,z_4/(z_4 - 1), z_3/(z_3-1)).$$ Such a point in $M_{0,5}$ is fixed exactly when $z_3 = z_4/(z_4-1)$. This provides a $2$-dimensional family $F$ of fixed points of $\iota$, as $z_3$ varies in $S^2 - \{0,1,\infty,2 \}$.
Using description $(2)$ of $M_{0,5}$, the action $\iota$ extends in the obvious way to the compactification, by permuting edge labels of the marked points. Fixed points of the $\iota$-action on the compactification can be found in the limit as $z_3 \rightarrow 0,1,\infty,2$, assuming that $z_4 \rightarrow 0,\infty,1,2$ respectively. These four points compactify $F$ to a $2$-sphere that we denote $\overline{F}$. The point when $z_3 \rightarrow 1$ and $z_4 \rightarrow \infty$ is $m_2$.
We now use description $(1)$ of $M_{0,5}$. It can be deduced by inspection that there are no fixed points if exactly one of the pairs $(z_1, z_3), (z_2,z_4), (z_1,z_4), (z_2,z_3)$ collide. The collisions of $(z_1,z_2)$ are only fixed if they collide at $2$, which is covered above. The collision of $(z_3,z_4)$ is only fixed if it occurs at $2$, which is the single point when $(z_1,z_2)$ collide at $0$, which is counted above. It is an easy check that any point in a collision of $(z_0,z_i)$ is not fixed for $i=1,2,3,4$. Hence, the only other possibility to check is a collision when two pairs collide at the same time, say $(z_i,z_j),(z_k,z_l)$ for $(i,j,k,l)$ all distinct. Checking the cases, we see that there is a single point that has not yet been accounted for in $\overline{F}$, namely $(z_1,z_2)$ and $(z_3,z_4)$. This is the point $m_1$.
\end{rmk}
\section{Computing the Quantum Steenrod Square for toric varieties}
\label{sec:computingqsstoric}
In this section, we will use the intersection definition of the quantum Steenrod square (Definition \ref{defn:singqss}). We will require that $\alpha_v: X_v \rightarrow M$ is an embedded submanifold for each $v$ (and not just a pseudocycle), and we will abusively replace $\alpha_v(X_v)$ by $X_v$.
\subsection{Quantum Steenrod squares for $\mathbb{CP}^{n}$}
\label{subsec:computingqsscpn}
Let $x^{i}$ generate $H^{2i}(\mathbb{CP}^{n})$. By the quantum Cartan relation, Theorem \ref{thm:quancar}, $$Q\mathcal{S}(x^{i+1}) = Q\mathcal{S}(x^{i}) * Q\mathcal{S}(x) + q(W)(x^{i},x)$$ We can iteratively construct $Q\mathcal{S}(x^{i+1})$ as long as we know $q(W)(x^{i},x)$. Using a combination of degree reasons and Remark \ref{rmk:propertiesqs}, $Q\mathcal{S}(x) = x * x + xh^2$.
\begin{lemma}
\label{lem:qWcpn}
For $2i<n$, $q(W)(x^i,x)=0$. For $n \le 2i \le 2n$,
$$q(W)(x^{i},x) = {{i} \choose {n-i}} T h^{4i+2-2n}.$$
\end{lemma}
\begin{proof}
Recall that we make a generic choice of $C^{n-1}_v \subset \mathbb{CP}^n$, parametrised by $v \in S^{\infty}$, such that $C^{n-1}_v$ represents $PD(x) \in H_{2(n-1)}(\mathbb{CP}^n)$ for each $v$. Similarly, we choose $C^{n-i}_v \subset \mathbb{CP}^n$ representing $PD(x^i) \in H_{2(n-i)}(\mathbb{CP}^n)$ for each $v \in S^{\infty}$. Observe that $q(W)(x^i,x)$ has degree $4i+4$ so, by Lemma \ref{lemma:lemqWtermscor}, for $i=1,...,n$ we deduce that:
\begin{equation}
\label{equation:qW}
\begin{array}{rcl}
q(W)(x^i, x) & = & \sum_{j=n-i}^{i} m^{i+1}_{j} x^{i+j-n} T h^{2(i+1)-2j}
\\[2em]
& = &
m_{n-i}^{i+1} x^0 T h^{4i+2-2n}
+
m_{n-i-1}^{i+1} x^1 T h^{4i-2n}
+
\cdots
+
m_i^{i+1} x^{2i-n} T h^2
\end{array}
\end{equation}
where $m^{i+1}_{j}$ are coefficients and the degrees are $|x| = 2$, so $|x^{i}| + |x| = 2i+2$ and $|T| = 2(n+1)$. Equation \eqref{equation:qW} follows for grading reasons.
We claim that $m^{i+1}_j$, the coefficient of $x^{i+j-n} T h^{2(i+1)-2j}$, is the number of (unparametrised) $J$-holomorphic spheres that intersect both $\mathbb{CP}^{{i+j}-n}$ and some representative of $PD(Sq^{2j}(x^i))$. We proceed in the following steps:
\begin{enumerate}[i)]
\item Counting the coefficient of $x^{i+j-n} T h^{2(i+1)-2j}$ in $q(W)(x^i,x)$ is the same as counting setups as in Figure \ref{fig:qWcpn}(1) for $v \in D^{2i-2j, +}$ (recall $D^{2i-2j,+}$ corresponds to the $h^{2i-2j+2}$ term when defining $q(W)$ in Theorem \ref{thm:quancar}). Only $T^{1}$ appears in equation \eqref{equation:qW}, so one of the holomorphic bubbles has degree $0$ and the other degree $1$. For the solutions to be rigid, the sphere with the marked points $z_1, z_2$ must be constant (as in Example \ref{exmpl:cp1calc}). This yields the setup in Figure \ref{fig:qWcpn}(2).
\item Let $b$ be an element of the basis of cohomology, $\mathcal{B}$. The intersection of $C^{n-i}_{v}$ and $C^{n-i}_{-v}$ with some representative of $PD(b^{\vee})$, taken over all $v \in D^{2i-2j, +}$, is the coefficient of $b$ in $Sq^{2j}(x^{i})$ (by definition).
\item Suppose that we neglect the intersections with $C^{n-1}_{\pm v}$ in Figure \ref{fig:qWcpn}(2). Then we count the number of (unparametrised) $J$-holomorphic spheres $u:S^2 \rightarrow M$ that intersect:
\begin{itemize}
\item a representative of $PD((x^{i+j-n})^{\vee}) = PD(x^{2n-(i+j)})$ (an example of which is a copy of $\mathbb{CP}^{i+j-n} \subset \mathbb{CP}^n$) and
\item a representative of $PD(Sq^{2j}(x^{i}))$.
\end{itemize} We recall that $Sq^{2j}(x^{i}) = {{i} \choose {j}} x^{i+j}$ (see \cite[Section 4.L.]{algtop}). This implies that $PD(Sq^{2j}(x^{i})) = {{i} \choose {j}} PD(x^{i+j})$. Recall that $PD(x^{i+j})$ is represented by a copy of $\mathbb{CP}^{n-(i+j)}$. Our problem reduces to asking how many lines there are intersecting $\mathbb{CP}^{(i+j)- n }$ and $\mathbb{CP}^{n - (i+j) }$, and multiplying this by the coefficient ${{i} \choose {j}}$.
However, this only makes sense if $i+j-n \ge 0$ and $n-(i+j) \ge 0$ (both of the representatives must be of nonnegative dimension), hence $j=n-i$. In particular, the representative of of $PD((x^{i+j-n})^{\vee})$ is a point, denoted $pt$. Further, there are a finite number (congruent to ${{i} \choose {n-i}} \text{ mod } 2$) of pairs $\{ v_k, -v_k \}$ such that $C^{n-i}_{v_k} \cap C^{n-i}_{-v_k} = pt_k \cong \mathbb{CP}^0$. For each $pt_k$ there is exactly one line between $pt_k$ and $pt$ (i.e. there is always exactly one line between any two points in $\mathbb{CP}^n$).
\item The homology class of each of the degree $1$ $J$-holomorphic spheres (the lines from the previous step) is the same homology class as that of $\mathbb{CP}^{1}$. Observe that $\mathbb{CP}^{1} \cap C^{n-1}_v = \{ pt'_v \}$ for each $v$. We make a generic choice of $C^{n-1}_v$ such that the $J-$holomorphic spheres are not contained in $C^{n-1}_{v}$ for generic $v$: specifically, we choose some hypersurface in $\mathbb{C}^{n-1}$ not containing the finite collection of lines from step iii, and then require that $C^{n-1}_v$ is a $C^2$-small perturbation in $v$ from this hypersurface (to ensure transversality). Then for each pair $\{ v_k , -v_k \}$, the intersection of the line at $pt'_{v_k}$ fixes the parametrisation of the $J$-holomorphic map, and the intersection of the line at $pt'_{-v_k}$ fixes which element $m$ of $W_0$ that we are using as the domain.
\end{enumerate}
Hence for each of the ${{i} \choose {n-i}}$ lines from step iii, there is exactly one choice of tuple $(m,u,v_k)$ (up to reparametrisation and the $\mathbb{Z}/2$-action) satisfying the configuration in Figure \ref{fig:qWcpn}(2).
\end{proof}
\begin{thm}
\label{thm:SqQcpn}
For all $i \ge 0$,
\begin{equation}
\label{equation:quant1}
Q\mathcal{S}(x^{i}) = \sum_{j=0}^{i} \left( {{i} \choose {j}}+ \sum_{k=0}^{\lfloor n/2 \rfloor + 1} {{n-k}\choose{k}}\cdot {{i-(n+1-k)} \choose {j-k}} \right) x^{i+j} h^{2(i-j)},
\end{equation}
where $x^{i+j}$ is the $(i+j)$-th quantum power of $x$.
\end{thm}
Observe that if $i+j \ge n$ then $x^{i+j} = x^{i+j-n} T$, as this is the quantum power.
Recall that $Q\mathcal{S}(x^i) = Sq(x^i) + T(...)$ where by Example \ref{exmpl:classcpn}: $$Sq(x^{i}) = \sum_{j=0}^{n-i} {{i} \choose {j}} x^{i+j} h^{2i-2j}.$$
\begin{figure}
\caption{Configurations for $q(W)(x^{i}
\label{fig:qWcpn}
\end{figure}
\begin{proof}[Proof of theorem \ref{thm:SqQcpn}]
Since $T = x^{n+1}$, we can express the square as:
$$
\begin{array}{rcl}
Q\mathcal{S}(x^{i}) & = & \sum_{j=0}^{i} l^{i}_{j} x^{i+j} h^{2i-2j}
\\[2em]
& = &
l_0^i x^i h^{2i} + l_1^i x^{i+1} h^{2i-2} + \cdots
+ l_i^{i} x^{2i} h^{0}
\end{array}
$$
for some $l^i_j \in \mathbb{Z}/2$.
By the Quantum Cartan relation, Theorem \ref{thm:quancar} and Lemma \ref{lem:qWcpn}, the coefficients $l^i_j$ satisfy $l^{i+1}_{j} = l^{i}_{j} + l^{i}_{j-1}$ for $j \neq n-i$ and $l^{i+1}_{n-i} = l^{i}_{n-i-1} + l^i_{n-i} + {{i}\choose{n-i}}$ (the latter term arises from the quantum correction). Using a Pascal Triangle and the iterative formula for the $l^{i}_{j}$, one can write down the closed form solution.
\end{proof}
In particular, truncating the sum in equation \eqref{equation:quant1} to $j \le n-i$ recovers the classical Steenrod square formula for $\mathbb{CP}^{n}$ from Example \ref{exmpl:classcpn}. This is because if $j \le n-i$ then every term in the second summation in Equation \eqref{equation:quant1} vanishes because either
\begin{itemize}
\item $j-k < 0$
\item or $i-(n+1-k) < 0$
\end{itemize}
To see that this is true, observe that if $j \ge k$ then $$i-(n+1-k) = k+i-n-1 \le j+i-n-1.$$ Then as $j \le n-i$, we see that $j+i-n-1 \le -1 < 0$
Explicit examples:
\begin{enumerate}
\item[$\mathbb{CP}^1$] :
$q(W)(x,x) = {{1}\choose{1-1}}T h^{4+2-2}$
$Q\mathcal{S}(x) = xh^{2} + T$
$Q\mathcal{S}(T) = (x h^2 + T)^2 + {\bf Th^4} = T^2$.
\item [$\mathbb{CP}^2$] :
$q(W)(x,x) = {{1}\choose{2-1}}T h^{4+2-4}$
$Q\mathcal{S}(x) = xh^{2} + x^{2}$
$Q\mathcal{S}(x^{2}) =(xh^{2} + x^{2})^2 + {\bf Th^2} = x^{2}h^{4} + Th^{2} + xT$.
\item[$\mathbb{CP}^3$] :
$q(W)(x,x) = {{1}\choose{3-1}}T h^{4+2-6} = 0$ and $q(W)(x^2,x) = {{2}\choose{3-2}}T h^{8+2-6} = 0$
$Q\mathcal{S}(x) = xh^{2} + x^{2}$
$Q\mathcal{S}(x^{2}) = (xh^{2} + x^{2})^2 = x^{2}h^{4} + T$
$Q\mathcal{S}(x^{3}) = (xh^{2} + x^{2})(x^{2}h^{4} + T) = x^{3}h^{6} + Th^{4} + xTh^{2} + x^{2}T$.
\end{enumerate}
\begin{rmk}
Observe that, after one appeals to dimension reasons to rule out the other cases, the proof of Theorem \ref{thm:SqQcpn} only uses $GW(\mathbb{CP}^{n-1},\{pt \}, \{ pt' \} )$.
\end{rmk}
\subsection{Fano Toric Varieties}
\label{subsec:gentorvar}
Let $M$ be a compact monotone toric manifold, with $b \in H^{|b|}(M)$ and $x \in H^{2}(M)$, and let $X = PD(x)$. Then analogously to Theorem \ref{thm:SqQcpn}, one proves Theorem \ref{thm:SqQtoric}.
\begin{proof}[Proof of Theorem \ref{thm:SqQtoric}]
Consider setups as in Figure \ref{fig:qTORIC}, which we henceforth call setups. These are configurations that, when counted, yield the coefficient of $c T^{c_1(\mu)} h^{i+2}$ in $q(W)(b,x_p)$. Henceforth we fix the dimension of the equivariant parameter space, $i \in \mathbb{Z}_{\ge 0}$ corresponding to $S^{i} \subset S^{\infty}$, and some $\mu \in H_2(M, \mathbb{Z})$ such that the $J$-holomorphic curves we consider represent $\mu$. We also fix $x \in H^2(M)$ and $b \in H^*(M)$, as in the statement of the theorem. We make choices of $X_v, B_v$ for $v \in S^{\infty}$, with the usual conditions for the input cycles used in $q(W)(b,x_p)$. Given a test output cycle $c \in H^*(M)$, we pick an embedded submanifold representing $PD(c^{\vee})$.
We will describe configurations that are related to Figure \ref{fig:qTORIC}, which we call {\it reduced setups}, which arise by neglecting the intersection with $X_v$ and the marked point $z_4$ corresponding to it. The setup as given remains dimension $0$: removing $z_4$ ``removes 2 dimensions", and removing the intersection with $X_v$ ``adds 2 dimensions".
A ``reduced setup" is a pair $(v,u_{red})$ such that $v \in S^{i}$ and $u_{red}: S^2 \vee_{1 \sim 0} S^2 \rightarrow M$. Then $v \in S^{i}$ and $u_{red}$ is $J$-holomorphic, and subject to $(u_{red})_*[S^2 \vee S^2] = \mu$. The map $u_{red}$ satisfies: $$u_{red}(0) \in PD(c^{\vee}), u_{red}(\infty) \in X_{-v}, u_{red}(1) \in B_v, u_{red}(\infty) \in B_{-v}.$$ Note that given a setup, we may obtain a reduced setup by forgetting the point $z_4$ (and the associated intersection condition). Observe that the space of setups and reduced setups is of the same dimension, $|c| + i + 2c_1(\mu) - 2|x| - 2|b|$.
We would like to prove that for a generic choice of $\{ X_v \}$, if $(v, u_{red})$ is a reduced setup, then for every $p \in S^2$ such that $u_{red}(p) \in X_v$:
\begin{itemize}
\item $u_{red}$ and $X_v$ intersect transversely in $M$ at $u_{red}(p)$, and
\item $p$ is an injective point of $u_{red}$.
\end{itemize}
Observe that if we are in the situation where the set of reduced setups is $0$-dimensional, i.e. $|c| + i + 2c_1(\mu) - 2|x| - 2|b|=0$, then we may assume that no intersections occur for $v \in \partial D^{i,+} = S^{i-1}$. Further, counting reduced setups with $v \in S^i$ and then quotienting by the free $\mathbb{Z}/2$-action of Equation \eqref{equation:actiononmoduli} is identical to simply restricting to $v \in \mathring{D}^{i,+}$ (without taking a quotient). With this in mind, we may freely perturb our choice of $X_v$ for $v \in \mathring{D}^{i,+}$, without changing the reduced setups. We make sure that the perturbation is sufficiently small that the moduli spaces of setups remains transverse. Then the argument becomes a classical argument that a generic perturbation of the embedded submanifold/pseudocycle $X_v$ will be transverse to $u_{red}$, and \cite[Proposition 1.3.1]{jhols} implies that the set of injective points of a simple curve $u_{red}$ is open and dense: hence, generically each intersection occurs at an injective point of $u_{red}$.
Now suppose that we are given a reduced setup $(v, u_{red})$. Then there are $\# (X_v \bullet \mu)$ (modulo $2$) setups corresponding to it. Observe that the actual number of corresponding setups is $\# (X_v \cap \mu)$, where $\cap$ is the absolute number of intersection points counted without signs. Generally such a count is not preserved under changes of representatives of $X_v$ and $\mu$, but one immediately sees that $\# (X_v \bullet \mu) = \# (X_v \cap \mu)$ for transversely intersecting pseudocycles in characteristic $2$. This choice of $\# (X_v \bullet \mu)$ setups corresponds to a choice of the marked point $z_4$ on the domain, which we know bijects with a choice of intersection points of $X_v$ and $\text{Im}(u)$ (as it is an injective point).
In fact, setups and reduced setups are in a $1$ to $\#( X \bullet \mu)$ correspondence (recalling that $X = PD(x)$). This holds because one may pick $X_v$ such that every $X_v$ is a normal perturbation in a $C^2$-small tubular neighbourhood of some fixed submanifold representative $\euscr{X}$ of $X$ (argue likewise for a pseudocycle representative). This is then bordant to having chosen $X_v = \euscr{X}$ for all $v$, by deformation retracting the tubular neighbourhood to $\euscr{X}$. Hence $\# (X_v \bullet \mu) = \#( X \bullet \mu)$.
It is now sufficient to prove that reduced setups count $$\sum_{2i=0}^{|b|} \sum_{j \ge 1} \sum_{k=1}^{j} \sum_{\mu \in H_2(M) : E(\mu) = k} \left( Q\mathcal{S}_{2i,j-k}(b) *_{\mu, k} x \right) \cdot h^{|b|-2i+2} T^{j}.$$ However, considering reduced setups alone one may choose $X_{-v}$ to be independent of $v \in D^{i,+}$ (again, choose a deformation retraction of a tubular neighbourhood to its core $\euscr{X}$). The result follows immediately from the definitions of $Q\mathcal{S}$ and the quantum product.
\end{proof}
\begin{figure}
\caption{Configurations for $q(W)(b,x_{p}
\label{fig:qTORIC}
\end{figure}
As the cohomology of a toric variety $M$ is generated by $H^{2}(M)$, iterated application of \eqref{equation:SqQtoric} yields a general solution, i.e. one can calculate $Q\mathcal{S}(x_{p_{1}}x_{p_{2}}...x_{p_{r}})$ assuming the base cases $Q\mathcal{S}(x_{p_{i}})$ for a basis $\{ x_{p} \}$ of $H^2(M)$ are known. Using a combination of degree reasons and Remark \ref{rmk:propertiesqs}, $Q\mathcal{S} (x_p) = x_p * x_p + x_p \cdot h^2$.
\begin{proof}[Proof of Corollary \ref{corollary:fanotoricdecided}]
We induct on degree. The base case is for $|x|=2$, and we know from above that $Q\mathcal{S}(x)= xh^2 + x * x$ is determined by $QH^*(M)$. Given $a \in H^{*}(M)$ for $* > 2$, write $a = b * x$ for $x \in H^{2}(M)$. By Theorem \ref{thm:quancar}, we have $Q\mathcal{S} (a) = Q\mathcal{S} (b) * Q\mathcal{S} (x) + q(W)(b,x)$. By induction $Q\mathcal{S} (b)$ and $Q\mathcal{S} (x)$ are determined by $QH^* (M)$, hence so is $Q\mathcal{S} (b) * Q\mathcal{S} (x)$. By Theorem \ref{thm:SqQtoric}, $q(W)(b,x)$ is determined by $QH^* (M)$ (observing that $\# (X \bullet \mu)$ is determined from singular cohomology).
\end{proof}
Let $\beta: QH^*(M) \rightarrow QH^*(M)$ be a ring homomorphism satisfying $\beta(T) = T$. In the notation of Theorem \ref{thm:SqQtoric}, we deduce that for $a,b \in QH^*(M)$, $$\beta(a *_{0,0} b ) = \beta(a) *_{0,0} \beta(b).$$ This is because $\mu =0$ is the only possible element of $H_2(M, \mathbb{Z})$ of Chern number $0$ when $M$ is monotone. Indeed, Theorem \ref{thm:SqQtoric} simplifies to state that if $|x| = 2$, then \begin{equation} \label{equation:toriccartanagain} Q \mathcal{S}( b * x) = Q \mathcal{S}(b) * Q \mathcal{S}(x) + (Q \mathcal{S}(b)* x - Q \mathcal{S}(b)*_{0,0} x). \end{equation} Thus, any ring homomorphism $\beta$ with the given constraint is compatible with the quantum Steenrod square.
\begin{exmpl}[$\mathbb{CP}^{1} \times \mathbb{CP}^{1}$]
We let $x,y$ be the generators of $H^2(\mathbb{CP}^1 \times \mathbb{CP}^1)$, with $PD(x) = [\{ pt \} \times \mathbb{CP}^1]$ and $PD(y) = [\mathbb{CP}^1 \times \{ pt' \}]$ . Here $q(W_{0} \times D^{i-2,+})(x,y) = 0$ hence $Q\mathcal{S}(x) * Q\mathcal{S}(y) = Q\mathcal{S}(x*y)$. Indeed by equation \eqref{equation:SqQtoric},
$$q(W_{0} \times D^{i-2,+})(x,y) =\sum_{2i=0}^{2} \sum_{j \ge 1} \sum_{k=1}^{j} k \cdot Q\mathcal{S}_{2i,j-k}(x) *_{\mu, k} y h^{4-2i} T^{j},$$ recalling that $x *_{ \mu, k} y$ the coefficient of $T^{k}$ in the quantum product $x * y$, using spheres representing $\mu$. Working from definitions, $Q\mathcal{S} (x) = x h^{2} + T$. Then $\alpha *_{k} y \neq 0 \implies k = 1, \alpha = y$. There are no $i,j,k$ such that $Q\mathcal{S}_{2i,j-k}(x) = y$. Hence the sum on the right hand side is $0$.
\end{exmpl}
\section{The Quantum Adem Relations}
\label{sec:QAR}
\subsection{Classical Adem Relations}
\label{subsec:classadem}
We begin with a discussion of the group cohomology of $S_4$ and $D_8$. This will involve adding details to the argument alluded to by Cohen-Norbury to prove the classical Adem relations in \cite[Section 5.2]{cohnor}.
It is proved in \cite[Sections IV.1, VI.1]{ademmilgram} that \begin{equation} \label{equation:HBD8} H^{*}(BD_{8}) = \mathbb{Z}/{2}[e,\sigma_{1},\sigma_{2}]/(e \sigma_{1}) \end{equation} where $e, \sigma_{1}$ are of degree 1 and $\sigma_{2}$ is of degree 2, and \begin{equation} \label{equation:HBS4} H^{*}(BS_{4}) = \mathbb{Z}/{2}[n_{1},n_{2},c_{3}]/(n_{1} c_{3}), \end{equation} where again subscripts denote the degree of the elements. Considering $$D_{8} = \langle (12),(34),(13)(24) \rangle \subset S_{4},$$ there are subgroups $$\mathbb{Z}/2 = \langle (13)(24) \rangle, \qquad \mathbb{Z}/2 \times \mathbb{Z}/2 = \langle (12),(34) \rangle.$$ Then $$H^{*}(B \mathbb{Z}/2) = \mathbb{Z}/{2}[e], \qquad H^{*}(B (\mathbb{Z}/2 \times \mathbb{Z}/2)) = \mathbb{Z}/{2}[x,y].$$ Consider the commutative diagram \eqref{commutativediagramofgroups} induced by the various inclusion maps of groups. As in \cite{ademmilgram}, one shows that:
\begin{equation}\label{commutativediagramofgroups}
\xymatrix{
H^*(B \mathbb{Z}/2)
\\
H^*(BD_8)
\ar@{->}^-{i_1}[u]
\ar@{->}_-{i_2}[d]
&
H^*(BS_4)
\ar@{->}^-{j_1}[ul]
\ar@{->}^-{j_2}[dl]
\ar@{->}^-{\pi^*}[l]
\\
H^*(B(\mathbb{Z}/2 \times \mathbb{Z}/2))
}
\end{equation}
\begin{tabular}{l }
$i_{1}(e) = e$\\
$i_{2}(\sigma_{1}) = x+y, \quad i_{2}(\sigma_{2})=xy$\\
$j_{1}(n_{2}) = e^{2}$\\
$j_{2}(n_{1}) = x+y, \quad j_{2}(n_{2}) = xy$\\
\end{tabular}
All of the other generators map to $0$ via the $i,j$ maps. From this, and the fact that $\pi^*$ is injective, we deduce that $$\pi^{*}(n_{1}) = \sigma_{1} \qquad \pi^{*}(n_{2}) = \sigma_{2} + e^{2} \qquad \pi^{*}(c_{3}) = e \sigma_{2}.$$
By Cohen-Norbury, \cite{cohnor}, there is a commutative diagram, namely diagram \eqref{classicalademdiagram}, where $qq^0$ satisfies $$qq^0(\alpha) = \sum_{p,q} Sq^{q} \circ Sq^{p}(\alpha) e^{|\alpha| + p - q} \sigma_{2}^{|\alpha| - p}.$$
\begin{equation}\label{classicalademdiagram}
\xymatrix{
H^*(M)
\ar@{->}^-{\hat{qq}^0}[r]
\ar@{->}_-{=}[d]
&
H^*(M) \otimes H^*(BS_4)
\ar@{->}^-{id_{H^*(M)} \otimes \pi^*}[d]
\\
H^*(M)
\ar@{->}^-{qq^0}[r]
&
H^*(M) \otimes H^*(BD_8)
}
\end{equation}
We do not in general know a closed form definition of $\hat{qq}^0$ in terms of compositions of Steenrod squares, but in fact we do not need to: the Adem relation is a purely algebraic relation, only using the fact that $qq^0$ lifts to a homomorphism $\hat{qq}^0$ (and not any information about the homomorphism itself). For a definition of $\hat{qq}^0$ in Diagram \eqref{classicalademdiagram}, use the $T^0$-component of $\hat{qq}$ from Definition \ref{defn:qs4}.
\begin{Fact}
\label{Fact:commuteAdem}
By Theorem 19 (Invariance) in \cite{cohnor}, the diagram \eqref{classicalademdiagram} commutes. This implies that the image of $qq^0$ lies in the image of $id_{H^{*}(M)} \otimes \pi^{*}$. Hence there are constraints on the image. Specifically, $e^{2i} \sigma_{2}^{j}$ may only appear in $qq^0(\alpha)$ if it arises from some $(e \sigma_{2})^{2k} (e^{2} + \sigma_{2})^{i+j-3k}$ for $k=0,1,...$, with coefficient ${i+j-3k}\choose{i-k}$. This is a special case of Lemma \ref{lemma:quantumademdiagram}.
\end{Fact}
\begin{lemma}
\label{lem:bincoeff}
For any $s,m$,
\begin{equation}
\label{equation:combinatorial}
{{3s+m}\choose{s+m}} = \sum_{l=0}^{\infty} {{m+l-1}\choose{2l}} {{3s+m}\choose{s-l}}
\end{equation}
modulo 2.
\end{lemma}
\begin{proof}
We prove this by induction. Let $c(m,s) = {{m+3s}\choose{m+s}}$. Then modulo 2, $$c(m+2,s) = c(m,s) + c(m+3,s-1).$$
Define $S(m,s) = \sum_{l} {{m-1+l}\choose{2l}}{{3s+m}\choose{s-l}}$. Check that $S(m,s) = c(m,s)$ for $s=0,1$ and $m=1,2$. These are the base cases. Hence if $S(m+2,s) = S(m,s) + S(m+3s,s-1)$ for all $m,s$ then the lemma holds by induction. This is an exercise in binomial coefficient algebra modulo $\mathbb{Z}/2$.
\end{proof}
\begin{thm}[Classical Adem Relations]
\label{thm:car}
Given $\alpha \in H^{*}(M)$ and $q,p>0$ such that $q<2p$,
\begin{equation} \label{equation:classicalademrelations} Sq^{q}Sq^{p}(\alpha) = \sum_{k=0}^{[q/2]} {{p-k-1}\choose{q-2k}}Sq^{p+q-k} Sq^{k}(\alpha).\end{equation}
\end{thm}
\begin{proof}
Suppose $q$ is even. Let $l = |\alpha|-p$, $m=p-q/2$, $n=q/2-k$, thus $$ {{p-k-1}\choose{q-2k}} = {{m+n-1}\choose{2n}}.$$ Assume $l=2r$. The cases for $q$ or $l$ odd are proven identically, except for slight modifications in the substitutions and the exponents of the labelled equations. Throughout, for $E \in H^*(B D_8)$, let $\text{cff}(E)$ be the coefficient of $E$ in $qq^0(\alpha)$. By definition of $qq^0$: $$Sq^{q} \circ Sq^{p} (\alpha)= \text{cff}(e^{l+2m} \sigma_{2}^{l}) \qquad \textrm{and} \qquad Sq^{p+1-k} \circ Sq^k (\alpha) = \text{cff}(e^{l-2n} \sigma_{2}^{l+m-n}).$$ By Fact $1$ (which also ensures that the right hand sides of the following two equations are well defined), \begin{equation} \label{equation:cff1} \text{cff}(e^{l+2m} \sigma_{2}^{l})= \sum_{i=0}^{r} {{3r+m-3i}\choose{r+m-i}} \cdot \text{cff}((e \sigma_{2})^{2i} (e^{2} + \sigma_{2})^{3r+m-3i})\end{equation} and \begin{equation} \label{equation:cff2} \text{cff}(e^{l-2n} \sigma_{2}^{l+m+n}) = \sum_{i=0}^{r} {{3r+m-3i}\choose{r-n-i}} \cdot \text{cff}((e \sigma_{2})^{2i} (e^{2} + \sigma_{2})^{3r+m-3i}).\end{equation}
The claim now follows since by Lemma \ref{lem:bincoeff}, $${{3r+m-3i}\choose{r+m-i}} = \sum_{n=0}^{\infty} {{m+n-1}\choose{2n}}{{3r+m-3i}\choose{r-n-i}}.$$ Substitute this into Equation \eqref{equation:cff1}, swap the summation, and then substitute Equation \eqref{equation:cff2}. This yields Equation \eqref{equation:classicalademrelations}, after substituting back for $p,q$ and $k$.
The terms with $n > q/2$ will not appear in the final statement because $n > q/2$ implies $k < 0$, and $Sq^k(\alpha) = 0$ for $k<0$.
\end{proof}
\subsection{Quantum Adem Relations}
\label{subsec:QAR}
In this section we will denote by $\mathcal{B}$ some basis of $H^*(BS_4)$, by $\hat{\mathcal{B}}$ some basis of $H^*(BD_8)$ and by $\mathcal{B}_M$ some basis of $H^*(M)$.
Recall that in Definition \ref{defn:qopn}, for $W \in H_{*}(\overline{M}_{0,5} \times_{\mathbb{Z}/2} S^i)$ for some $i$, we defined additive homomorphisms $$q_{i,j}(W): QH^a(M) \otimes QH^b(M) \rightarrow QH^{2a+2b-i-2jN}(M).$$ We will define a similar construction of operators that are parametrised by $H_{*}^{D_{8}} (\overline{M}_{0,5})$ and $H_{*}^{S_{4}} (\overline{M}_{0,5})$, where $$D_{8} = \langle (12),(34),(13)(24) \rangle \subset S_{4}$$ acts by permutations on the indices of $[z_{0},z_{1},z_{2},z_{3},z_{4}] \in \mathcal{M}_{0,5}$. We will abbreviate $P^{p,q,r}_{D_8} = \overline{M}_{0,5} \times_{D_8} ES_4^{p,q,r}$ and $P^{p,q,r}_{S_4} = \overline{M}_{0,5} \times_{S_4} ES_4^{p,q,r}$, recalling the constructions in Appendix \ref{sec:ed8es4}, where we expressed $ES_4$ as the union of a countable nested sequence of smooth closed manifolds, $ES_4^{p,q,r}$ of respective dimension $2p-1+3q+6r$.
We note that for any $M$ with $H^*(M)$ finitely generated in all degrees, there is a map $$\Psi: H^*(M) \rightarrow H_*(M),$$ along with its inverse also denoted $\Psi: H_*(M) \rightarrow H^*(M)$. This is an isomorphism via universal coefficients (as usual working over $\mathbb{Z}/2$): explicitly, one picks a dual basis under the pairing $\langle \alpha, a \rangle \mapsto \alpha (a)$ given by evaluation of cocycles. For brevity we denote $P_{D_8} = \overline{M}_{0,5} \times_{D_8} ES_4$ and $P_{S_4} = \overline{M}_{0,5} \times_{S_4} ES_4$. The homology of $P_{D_8}$ and $P_{S_4}$ satisfy this finite generation condition: this is due to the Cartan Leray spectral sequence.
Pick a pseudocycle representative $\zeta^{\vee}: Z^{\vee} \rightarrow M$ for each $z^{\vee} \in \mathcal{B}_M^{\vee}$. For $\alpha \in H^*(M)$, choose pseudocycles $i_v: A_v \rightarrow M$ for $v \in ES_{4}$ (where $A_v = A \times \{v \} \subset A \times ES_4^{p,q,r}$ for some sufficiently large $p,q,r$). We do this such that $i_v A_v$ is a weak representative of $PD(\alpha)$ for each $v$, by which we mean that $i_v A_v \bullet X = PD(\alpha) \bullet X$ for all $X \in H_*(M)$, where $\bullet$ is the intersection number. We choose $i_v$ with invariance and genericity conditions as follows:
\begin{enumerate}
\item $A_v = A_{(23) \cdot v} = A_{(24) \cdot v}$ and $i_v = i_{(23) \cdot v} = i_{(24) \cdot v}$ for all $v \in ES_{4}$.
\item Let $\mathcal{M}_{0,5}(J,j)$ be the space of $J$-holomorphic maps of Chern number $jN$ from $S^2$ to $M$ with $5$ marked points. Let $\overline{\mathcal{M}_{0,5}(J,j)}$ be its compactification with stable nodal maps. Then the $i_v$ must be chosen sufficiently generically so that the intersection of the $S_4$-equivariant pseudocycles in Equations \eqref{pseudoaaa1} and \eqref{pseudoaaa2} is transverse: \begin{equation} \label{pseudoaaa1} \begin{array}{l} ev: \overline{\mathcal{M}_{0,5}(J,j)} \times ES_4^{p,q,r} \rightarrow M \times M \times M \times M \times M \times ES_4^{p,q,r} \\ (u,v) \mapsto (u(z_0), u(z_1), u(z_2), u(z_3), u(z_4), v) \end{array} \end{equation} and \begin{equation} \label{pseudoaaa2} \begin{array}{l} Z^{\vee} \times A \times A \times A \times A \times ES_4^i \rightarrow M \times M \times M \times M \times M \times ES_4^i \\ (x,a_1, a_2, a_3,a_4, v) \mapsto (\zeta(x), i_v(a_1), i_{(12) \cdot v} (a_2), i_{(13) \cdot v} (a_3), i_{(14) \cdot v} (a_4), v). \end{array} \end{equation}
\end{enumerate}
Observe that we may restrict to the special case of Morse theory, as we have done throughout this paper. Specifically we choose $f_{v,s}$ for $v \in ES_4$ and $s \in [0,\infty)$. We do this such that $f_{v,s} = f$ for $s \gg 0$, and $f_{(23) \cdot v, s} = f_{(24) \cdot v, s} = f_{v, s} $ for all $v,s$, and we replace the incidence condition with $i_{(1p) \cdot v} (a_p)$ by incidence with a $-\nabla f_{(1 p) \cdot v,s}$-flowline asymptotic to a critical point $\alpha$.
\begin{defn}
\label{defn:s4operators}
Let $\alpha \in \text{crit}(f)$. For $d \in H^{*}_{S_{4}}(\overline{M}_{0,5})$, we pick a pseudocycle representative $\delta: D \rightarrow P^{p,q,r}_{S_4}$ of $\Psi (d) \in H_* (P^{p,q,r}_{S_4})$ (for some sufficiently large $p,q,r$). Then we define an operation $q_{S_4}(D): H^*(M) \rightarrow QH^*(M),$ by $$q_{S_4}(D)(\alpha) := \sum_{z \in \text{crit}(f), \ j \ge 0} n_{z, \alpha, j} \cdot z \cdot T^j,$$ where $n_{z, \alpha,j}$ counts the number of $S_4$ equivalence classes of triples $(m,u,v)$ with $[m,v] \in \delta(D) \subset P_{S_4}$ and $u: m \rightarrow M$ is $J$-holomorphic and of Chern number $j N$. We also require that $$u_0: (-\infty,0] \rightarrow M, \ u_p: [0,\infty) \rightarrow M,$$ for $p=1,2,3,4$, such that $$\begin{array}{l} \partial_t u_0(s) = -\nabla f(u_0(s)) , \ \partial_t u_p(s) = -\nabla f_{(1 p) \cdot v}(u_p(s)), \\ u(z_p) = u_p(0), \ u_0(-\infty) = z, \text{ and } u_p(\infty) = \alpha. \end{array}$$
On cohomology the operation will be independent of the representative $\delta$ of $\Psi (d)$, by the same proof as Lemma \ref{lemma:additiveandindep} (i.e. we express our coefficients as the intersections of pseudocycles, and bordant pseudocycles give the same intersection number).
\end{defn}
In order to show that Definition \ref{defn:s4operators} is well defined on cohomology, we must define an operation $q_{S_4}'(D) : C^*_{S_4}(M \times M \times M \times M) \rightarrow C^*(M) \otimes C^*(BS_4)$ as in Definition \ref{defn:mss}. It is then a standard compactification theorem to prove that $q_{S_4}$ is well defined on cohomology (as $D$ is closed), for example as in Equation \ref{equation:Sq'chainmap}.
The definition of $q_{D_8}(D)$ is identical to Definition \ref{defn:s4operators}, replacing everywhere $S_4$ by $D_8$ (note specifically that this definition uses $BD_8 = ES_4 / D_8$ as its parameter space.
Henceforth, we will restrict to the subalgebra $H^*(BS_4)_{red}$ of $H^*(BS_4)$ generated by $\sigma_2$ and $e$, and similarly the subalgebra $H^*(BD_8)_{red}$ of $H^*(BD_8)$ generated by $n_2$ and $c_3$. The map $\pi^*: H^*(BS_4)_{red} \rightarrow H^*(BD_8)_{red}$ is well defined and injective because of Diagram \eqref{commutativediagramofgroups}. Indeed, the only difference to using $H^*(BS_4)$ and $H^*(BD_8)$ is that we we forget all additive generators that include monomials with some nontrivial $\sigma_1$ and $n_1$ exponent respectively.
As in the case of the quantum Cartan relation, we would like to consider cycles in $H_*(P_{S_4})$ parametrised by some basis $\mathcal{B}$ of $H^*(BS_4)_{red}$. Compare this to the proof of the quantum Cartan relations, where the classes $[\{ m_1 \} \times D^{i,+}] \in H_*( P_{\mathbb{Z}/2})$ were parametrized by $[D^{i,+}] \in H_*(B \mathbb{Z}/2) = H_* (\mathbb{RP}^{\infty})$. Further, we will show later that \begin{equation} \label{equation:quantumsquarecomposition} Q\mathcal{S} \circ Q\mathcal{S} (\alpha) = \sum_{i,j} q_{D_8} (\{ m_1 \} \otimes \Psi (e^i \sigma_2^j) )(\alpha) \cdot e^i \sigma_2^j. \end{equation}
Hence, ideally we would like the chains represented by $\{ \{ m_1 \} \otimes B \}$ to be elements of $H_*(P_{S_4})$, for $B \in \mathcal{B}$. This will not work because $m_1$ is not $S_4$-invariant. However, the cycle $m_1 + g m_1 + g^2 m_1 \in H_*(\overline{M}_{0,5})$ is $S_4$ invariant, where $g = (123)$ generates the cosets of $D_8$ in $S_4$ (note that $g m_1 = m_2$).
\begin{defn}
\label{defn:qs4}
Given a basis $\mathcal{B}$ of $H^*(BS_4)_{red}$, define: $$\hat{qq}: H^*(M) \rightarrow QH^*(M) \otimes H^*(BS_4)_{red},$$ $$\hat{qq}(\alpha) = \sum_{b \in \mathcal{B}} q_{S_4}((m_1 + g m_1 + g^2 m_1) \otimes \Psi (b)) (\alpha) \cdot b.$$
\end{defn}
\begin{defn}
Given a basis $\tilde{\mathcal{B}}$ of $H^*(BD_8)_{red}$, define: $$qq: H^*(M) \rightarrow QH^*(M) \otimes H^*(BD_8)_{red},$$ $$qq(\alpha) := \sum_{\tilde{b} \in \tilde{\mathcal{B}}} q_{D_8}((m_1 + g m_1 + g^2 m_1) \otimes \Psi (\tilde{b})) (\alpha) \cdot \tilde{b}.$$
\end{defn}
We fix some additive basis $\mathcal{B}$ for $H^{*}(BS_{4})_{red}$, of the form $\{ n_2^a c_3^q \}$ (with notation as in Equation\eqref{equation:HBS4}). Recall from Diagram \eqref{classicalademdiagram} there is $\pi_* : H_*(BD_8)_{red} \rightarrow H_*(BS_4)_{red}$ and $\pi^* : H^*(BS_4)_{red} \rightarrow H^*(BD_8)_{red}$, which are induced by the continuous quotient map $$\pi: ES_4 / D_8 \rightarrow ES_4 / S_4.$$ We also define: $$i_* : H_*(BS_4)_{red} \rightarrow H_*(BD_8)_{red}, \qquad i_*(D) = D + gD + g^2 D$$ and $$i^* : H^* (BD_8)_{red} \rightarrow H^* (BS_4)_{red}, \qquad i^* (d) = d + g d + g^2 d.$$ As we work over $\mathbb{Z}/2$ we see that $\pi_* \circ i_* = id$ and $i^* \circ \pi^* = id$, which also shows that $\pi^*$ is injective. As $\pi^*$ is injective, $\pi^* \mathcal{B}$ is linearly independent in $H^*(BD_8)_{red}$. We extend this to a basis $\hat{\mathcal{B}} = \pi^* \mathcal{B} \cup \mathcal{B}'$ of $H^*(BD_8)_{red}$.
\begin{lemma}
\label{lemma:adem1}
$$q_{S_4}((m_1 + g m_1 + g^2 m_1) \otimes \pi_* \Psi (b)) = q_{D_8}((m_1 + g m_1 + g^2 m_1) \otimes \Psi (b)).$$
\end{lemma}
\begin{proof}
Suppose that we pick some pseudocycle representative $f: X \rightarrow BD_8$ of $\Psi(b) \in H_*(BD_8)_{red}$ (or specifically, some stratum $BD_8^{p,q,r}$). To define a pseudocycle representative of $\pi_* \Psi(b) \in H_*(BS_4)_{red}$, we choose $\pi \circ f$. So in particular, there is a pseudocycle representative of $(m_1 + g m_1 + g^2 m_1) \otimes \Psi (b)$ of the form $$f': \{pt_1,pt_g,pt_{g^2} \} \times X \rightarrow PD_8, \ f'(pt_a, x) = (a \cdot m_1, f(x)),$$ which we see descends to a $D_8$-equivariant pseudocycle, and similarly an $S_4$-equivariant pseudocycle: $$\pi \circ f': \{pt_1,pt_g,pt_{g^2} \} \times X \rightarrow PS_4, \ \pi \circ f'(pt_a, x) = (a \cdot m_1,\pi \circ f(x)).$$
Let $z \in \text{crit}(f)$. Let $\overline{\mathcal{M}}(J,j)$ be a partial compactification of the space of genus $0$ stable $J$-holomorphic maps (i.e. excluding repeated or multiply covered components). Recall from Lemma \ref{lemma:additiveandindep} the means by which we determine the coefficient of $z$ in $q_{S_4}((m_1 + g m_1 + g^2 m_1) \otimes \pi_* \Psi (b))(x)$ as an intersection number. One defines a $5$-pointed Gromov-Witten invariant assigned to $\overline{\mathcal{M}}(J,j)$. Push this forwards along the map $$\mathcal{W}: \overline{\mathcal{M}}(J,j) \times ES_4^{p,q,r} \rightarrow M \times ((M \times M \times M \times M \times \overline{M}_{0,5}) \times_{S_4} ES_4^{p,q,r})$$ (for some $p,q,r$), which is induced by the evaluation map on the five marked points, the stabilisation map $\overline{\mathcal{M}}(J,j) \rightarrow \overline{M}_{0,5}$ and the identity on the $ES_4^{p,q,r}$ factor. This determined a cohomology class in $M \times ((M \times M \times M \times M \times \overline{M}_{0,5}) \times_{S_4} ES_4^{p,q,r})$. There is also a pseudocycle constructed using the evaluation maps on the partially compactified (un)stable manifolds $W^u(z,f)$, $W^s(x,f_{v},s)$, $W^s(x,f_{(12) \cdot v},s)$, $W^s(x,f_{(13) \cdot v},s)$, $W^s(x,f_{(14) \cdot v},s)$, alongside the map $\pi \circ f'$. The intersection of the image of the (equivariant) Gromov-Witten invariant with the pseudocycle provides the coefficient of $z$. A similar argument holds for $q_{D_8}$, this time using the map $f'$. Then $$M \times ((M \times M \times M \times M \times \overline{M}_{0,5}) \times_{D_8} ES_4^{p,q,r}) \rightarrow M \times ((M \times M \times M \times M \times \overline{M}_{0,5}) \times_{S_4} ES_4^{p,q,r})$$ is a $3$-to-$1$ covering, hence the coefficients of this intersection differ by multiplying the $S_4$-coefficient by three. As we work over $\mathbb{Z}/2$-coefficients, multiplication by three is the identity.
Ensuring transversality for pseudocycles in both the base and the cover simulataneously is not an issue, as the property of an intersection being transverse is preserved under a $p$-fold smooth covering map (being a local diffeomorphism), so it suffices to ensure transversality on the cover, $M \times ((M \times M \times M \times M \times \overline{M}_{0,5}) \times_{D_8} ES_4^{p,q,r})$, which we know by Appendix \ref{subsec:quantumademrels}.
\end{proof}
\begin{lemma}
For $b' \in \mathcal{B}' := \hat{\mathcal{B}} - \pi^* \mathcal{B}$, $$q_{D_8} ((m_1 + g m_1 + g^2 m_1) \otimes \Psi (b')) = 0.$$
\end{lemma}
\begin{proof}
By Lemma \ref{lemma:adem1}, $$q_{D_8} ((m_1 + g m_1 + g^2 m_1) \otimes \Psi (b')) = q_{S_4} ((m_1 + g m_1 + g^2 m_1) \otimes \pi_* \Psi (b')).$$ If $B' = \Psi (b')$ then for all $b \in \mathcal{B}$, $$\langle b, \pi_* \Psi(b') \rangle = \langle b, \pi_* B' \rangle = \langle \pi^* b, B' \rangle = \langle \pi^* b, \Psi(b') \rangle = 0$$ by definition of the dualising isomorphism $\Psi$. Hence $\pi_* \Psi (b') = 0$.
\end{proof}
This implies that \begin{equation} \label{equation:qq} qq(\alpha) := \sum_{\pi^* b \in \pi^* \mathcal{B}} q_{D_8}((m_1 + g m_1 + g^2 m_1) \otimes \Psi (\pi^* b)) (\alpha) \cdot \pi^* b. \end{equation}
\begin{lemma}
\label{lemma:quantumademdiagram}
The following diagram commutes:
\begin{equation} \label{quantumademdiagram}
\xymatrix{
H^*(M)
\ar@{->}^-{\hat{qq}}[r]
\ar@{->}_-{=}[d]
&
QH^*(M) \otimes H^*(BS_4)_{red}
\ar@{->}^-{id_{H^*(M)} \otimes \pi^*}[d]
\\
H^*(M)
\ar@{->}^-{qq}[r]
&
QH^*(M) \otimes H^*(BD_8)_{red}
}
\end{equation}
\end{lemma}
\begin{proof}
Observe that $$(id \otimes \pi^*) \hat{qq}(\alpha) = \sum_{\pi^* b \in \pi^* \mathcal{B}} q_{S_4} ((m_1 + g m_1 + g^2 m_1) \otimes \Psi (b)) (\alpha) \cdot \pi^* b.$$ Then $$qq (\alpha) = \sum_{\pi^* b \in \pi^* \mathcal{B}} q_{S_4}((m_1 + g m_1 + g^2 m_1) \otimes \pi_* \Psi (\pi^* b)) (\alpha) \cdot \pi^* b$$ using Equation \eqref{equation:qq} and Lemma \ref{lemma:adem1}. For $b \in \mathcal{B}$, let $D = \Psi (\pi^* b)$. Then $\langle \pi^* b, D \rangle = 1$ and $\langle \hat{b}, D \rangle = 0$ for all $\hat{b} \in \hat{\mathcal{B}} - \pi^* b$, specifically for $\hat{b} = \pi^* d$ with $d \in \mathcal{B} - b$. Hence $\langle d, \pi_* D \rangle = 0$ for $d \in \mathcal{B} - b$ and $\langle b, \pi_* D \rangle = 1$, so $\pi_* \Psi (\pi^* b) = \Psi (b)$ by definition of $\Psi$.
\end{proof}
We now pick a different basis $\tilde{\mathcal{B}}$ for $H^*(BD_8)_{red}$ (i.e. different from $\hat{B}$) consisting of elements of the form $e^i \sigma_2^j$ (see Section \ref{subsec:classadem} to recall the notation). Let \begin{equation} \label{equation:qqijdef}qq_{i,j}(\alpha) := q_{D_8}((m_1 + g m_1 + g m_1) \otimes \Psi (e^{i} \sigma_{2}^{j})), \end{equation} the coefficient of $e^{i} \sigma_{2}^{j}$ in $qq(\alpha)$.
\begin{proof}[Proof of Theorem \ref{thm:QAR}, The Adem Relations]
The theorem follows immediately by Lemma \ref{lemma:quantumademdiagram} and the combinatorial argument in Theorem \ref{thm:car}.
\end{proof}
We relate this to a composition of quantum Steenrod squares. Firstly, we observe that instead of using $BD_8$ as our parameter space, we will use the spaces $E$ and $B = E / D_8$ as defined in Appendix \ref{subsec:ed8}. Recall from Appendix \ref{subsec:es4}, there is a map $\rho: ES_4 \rightarrow \mathbb{S}_2 = E$, a space that is an $ED_8$ (i.e. a contractible space with a free $D_8$-action).
Recall then that $E$ is stratified by finite dimensional $D_8$-invariant submanifolds $E^{i,j} = S^i \times (S^j \times S^j)$. Let $D^{j,+}$ be the upper $i$-dimensional hemisphere as usual. It is immediate that $D^{i,+} \times D^{j,+} \times D^{j,+} \subset E$ represents a closed cycle in $H_*(BD_8)$.
\begin{lemma}
\label{lemma:submandidjdj}
The submanifold $D^{i,+} \times D^{j,+} \times D^{j,+}$ represents $\Psi (e^i \sigma_2^j)$
\end{lemma}
\begin{proof}
Consider the projections $$k_1: E \rightarrow S^{\infty}, \ (x,(x_1,x_2)) \mapsto x$$ and $$k_2: E \rightarrow S^{\infty} \times S^{\infty}, \ (x,(x_1,x_2)) \mapsto (x_1,x_2),$$ which are respectively $\mathbb{Z}/2 \cong \langle (13)(24) \rangle$- and $\mathbb{Z}/2 \times \mathbb{Z}/2 \cong \langle (12), (34) \rangle$-equivariant. Indeed, they induce respectively $\mathbb{Z}/2$- and $\mathbb{Z}/2 \times \mathbb{Z}/2$-equivariant homotopy equivalences for the same reason as the map at the end of Appendix \ref{subsec:es4}. We abusively denote by $k_1,k_2$ the maps after quotienting by the free $\mathbb{Z}/2$-action. Combining these with the quotient maps $$l_1: E /( \mathbb{Z}/2) \rightarrow B$$ and $$l_1: E / (\mathbb{Z}/2 \times \mathbb{Z}/2) \rightarrow B,$$ we obtain $i_p$ from Diagram \ref{commutativediagramofgroups} as the composition $i_p = l_p^* \circ (k_p^*)^{-1}$, for $p=1,2$.
If $j=0$ then observe that $i_1(e^i) = e^i$ using Diagram \ref{commutativediagramofgroups}. Notice that for the homogeneous choice of homology basis, $\Psi(e^i)$ in $B \mathbb{Z}/2$ is represented by $D^{i,+} \subset S^{\infty}$. Letting $(i_{p})_{*}: H_*(B \mathbb{Z}/2) \rightarrow H_*(B)$ be the pushforward, observe that $$\Psi(e^i) = (i_{1})_{*} \Psi (i_{1})^{*}(e^i) = (i_{1})_{*} \Psi(e^i) = (i_{1})_{*}([D^{i,+}]) = [D^{i,+} \times D^{0,+} \times D^{0,+}] \in H_*(B).$$ Hence the result holds for $j=0$. Similarly the result holds for $i=0$, using the homomorphism $i_2$ and replacing $e$ by $\sigma_2^j$ and $D^{i,+}$ by $D^{j,+} \times D^{j,+}$, we see that $\Psi(\sigma_2^j)$ is respresented by $[D^{0,+} \times D^{j,+} \times D^{j,+}]$.
Observe, via the K\"unneth isomorphism, that elements of $H^*(B)$ may be represented as $D_8$-equivalence classes of cochains $x \otimes y \otimes z$, where $x \in C^*(S^{\infty})$. By the previous paragraph, we know that $e^i \sigma_2^j$ is represented by $x_i \otimes x_j \otimes x_j$, where $x_i$ is the indicator homomorphism for a simplex representing $D^{i,+}$. Then $\Psi([x_i \otimes x_j \otimes x_j]) = [D^{i,+} \times D^{j,+} \times D^{j,+}]$ as required.
\end{proof}
We reinterpret the operation $qq$ in terms of the parameter space $B = E / D_8$ (as opposed to $BD_8 = ES_4 / D_8$). Observe that the space $E$ does not have an action of any element of $S_4 - D_8$. Hence, in Definition \ref{defn:s4operators} we no longer ask for our Morse function $f_{v,s}$ to have invariance under $(23),(24)$ (as this is meaningless). Further, we choose the $f_{v,s}$ that we use for incidence conditions to be respectively $f_{v}, f_{(12) \cdot v}, f_{(13)(24) \cdot v}, f_{(14)(23) \cdot v}$. (Observe that when we used the parameter space $ES_4$, the invariance conditions imply that $f_{(13)(24) \cdot v} = f_{(13) \cdot v}$ and $f_{(14)(23) \cdot v} = f_{(14) \cdot v}$).
For the following proof, we fix $\alpha \in \text{crit}(f)$.
\begin{proof}[Proof of Corollary \ref{corollary:QAR}]
The corollary follows from Theorem \ref{thm:QAR} if we prove that for each $i,j \in \mathbb{Z}_{\ge 0}$ \begin{equation} \label{equation:corollaryequation} q_{D_8}(m_1 \otimes \Psi (e^{i} \sigma_{2}^{j}))(\alpha) = \sum_{b,d} Q\mathcal{S}^{i,b} \circ Q\mathcal{S}^{j,d}(\alpha). \end{equation} Henceforth we will fix some choice of $b,d$, and count those contributions to $q_{D_8}$, ostensibly denoted $q_{D_8,b,d}$, which arise from counting configurations with nodal curves with three components, corresponding to a nodal sphere comprised of a sphere of Chern number $bN$ attached to two spheres of Chern number $dN$ at $1$ and $\infty$. As we have now fixed $b,d$, we abusively exclude them from the notation.
To prove Equation \eqref{equation:corollaryequation}, we use a similar idea to proving the Cartan relation, as is illustrated in Figure \ref{fig:ademmodulispace}. Specifically, recall the $1$-dimensional space of graphs $T^c$ from Section \ref{subsec:Cartan}. Recall from Lemma \ref{lemma:lem1} that for each $t \in [0,\infty]$ there is a space $|t|_Q$, consisting of three copies of $S^2$ with semi-infinite or finite lines attached at $0,1,\infty$ (with the length of the finite edges being $t$ for $t \in [0,\infty)$). Associated to each $t \in [0,\infty)$, we define $f_{v,s,t}$ and $g_{v,s,t}$ for $v \in E$ and for $s \in [0,\infty)$ or $[0,t]$ respectively, and $t \in T$. We also choose a perturbation $f_s$ for $s \in (-\infty, 0]$. We choose these such that:
\begin{enumerate}
\item $f_{s} = f$ for $s \le -1$.
\item $f_{(14)(23) \cdot v, s, t} = f_{(13)(24) \cdot v, s, t} = f_{v,s,t}$ for $t \ge 1$,
\item $g_{(12) \cdot v,s,t} = g_{(34) \cdot v,s,t} = g_{v,s,t}$ for all $v,s,t$.
\item $f_{v,s,t} = f$ for $s \ge 1$, for all $v,t$.
\item $g_{v,s,t} = f$ when $t \ge 1$ and $s \ge 1$.
\end{enumerate}
These conditions are analogues of those made in Definition \ref{defn:s4operators} (here adapted to the $D_8$ case).
\begin{figure}
\caption{Moduli space for the Adem relations.}
\label{fig:ademmodulispace}
\end{figure}
Similarly to the case of the quantum Cartan relation, we define for each $t \in [0,\infty]$ a moduli space $\tilde{\mathcal{M}}_t(\alpha, z)$, consisting of pairs $(v,u)$ such that $v \in S^{i,+} \times S^{j,+} \times S^{j,+}$ (see Lemma \ref{lemma:submandidjdj}) and $u: |t|_Q \rightarrow M$ such that $u$ is $J$-holomorphic, with the Chern number on each sphere being as fixed at the beginning of the proof, and edge and asymptotic conditions as in Figure \ref{fig:ademmodulispace}. There is the previously given $D_8$-action on $E^{i,j} = S^{i,+} \times S^{j,+} \times S^{j,+}$, and $D_8$ also acts by permutations on $\overline{M}_{0,5}$ (which induces an action on the moduli space of $u: |t|_Q \rightarrow M$). Together these yield a $D_8$-action on $\tilde{\mathcal{M}}_t(\alpha, z)$, and we write $\mathcal{M}_t(\alpha, z) = \tilde{\mathcal{M}}_t(\alpha, z) / D_8$. We let $\mathcal{M}(\alpha, z) = \sqcup_{t \in T^c} \mathcal{M}_t(\alpha, z)$, which is a smooth $1$-dimensional manifold (establishing transversality is a modification of Appendix \ref{subsec:bordismquantumcartantrans} using the considerations of Appendix \ref{subsec:quantumademrels}, so we omit a restatement here).
Note that it is immediate from the definition that the $t=0$ boundary corresponds to $q_{D_8}(m_1 \otimes \Psi (e^{i} \sigma_{2}^{j}))(\alpha)$ (i.e. when the output is $z \in \text{crit}(f)$, this yields the coefficient of $z$ in $q_{D_8}(m_1 \otimes \Psi (e^{i} \sigma_{2}^{j}))(\alpha)$). Hence it remains to prove that the $t= \infty$ boundary yields the coefficient of $z$ in $Q\mathcal{S}^{i,b} \circ Q\mathcal{S}^{j,d}(\alpha)$.
We observe that from our choice of $g_{v,s,t}$, we may ensure that $g_{v,s,\infty}$ depends only on the first summand $S^{\infty}$ of $E = S^{\infty} \times S^{\infty} \times S^{\infty}$, which we denote $\tilde{g}_{v,s}$ for $v \in S^{\infty}$. Similarly, we may ensure that $f_{v,s,\infty}$ depends only on the second two summands, denoted $\tilde{f}_{v_1,v_2,s}$ for $(v_1,v_2) \in S^{\infty} \times S^{\infty}$. Then let $S$ be the space obtained by attaching $(-\infty,0]$ to $S^2$ at $0$, and two copies of $[0,\infty)$ to $S^2$ at $1$ and $\infty$ respectively. Let $R : S \rightarrow S$ be the involution that is $z \mapsto z/(z-1)$ on $S^2$, swapping the positive half-lines and fixing the negative half-line.
The configurations for the $t=\infty$ end then decouple. Specifically, if $v = (v_0,(v_1,v_2)) \in E$ then pairs $(v,u) \in \mathcal{M}(\alpha,z)$ correspond to two tuples as follows:
\begin{itemize}
\item A pair $(v_0,u)$ such that $v_0 \in S^{i}$, $u: S \rightarrow M$ such that $u$ is $J$-holomorphic of Chern number $bN$, satisfying conditions in Figure \ref{fig:ademmodulispace}$(I)$.
\item A four-tuple $(v_1,v_2,u_1, u_2)$ such that $(v_1,v_2) \in S^{j} \times S^{j}$, and $u_p: S \rightarrow M$ for $p=1,2$ such that $u_p$ is $J$-holomorphic of Chern number $dN$, satisfying conditions in Figure \ref{fig:ademmodulispace}$(II), (III)$ respectively.
\end{itemize}
The $D_8$ action on these pairs is as follows (recalling that the invariance conditions on $\tilde{f}$ ensures that $\tilde{f}_{v_1,v_2,t} = \tilde{f}_{-v_1,-v_2,t}$ for any $v_1,v_2 \in S^{\infty}$):
\begin{itemize}
\item $\begin{array}{l} (12) \cdot (v_0,u) = (v_0,u), \\ (34) \cdot (v_0,u) = (v_0,u), \\ (13)(24) \cdot (v_0,u) = (-v_0, u \circ R). \end{array} $ \\
\item $\begin{array}{l} (12) \cdot (v_1,v_2,u_1,u_2) = (-v_1,v_2, u_1 \circ R, u_2 \circ R), \\ (34) \cdot (v_1,v_2,u_1, u_2) = (v_1,-v_2,u_1 \circ R, u_2 \circ R), \\ (13)(24) \cdot (v_1,v_2,u_1, u_2) = (v_2,v_1, u_2, u_1). \end{array}$
\end{itemize}
To begin with, we see that counting the pair $(v_0, u)$ such that $v_0 \in S^{\infty}$, modulo the $D_8$ action, is exactly the coefficient of $(Q \mathcal{S}')^{i,b}(A \otimes B(x))$, where the operation $A \otimes B: QH^*(M) \rightarrow QH^*(M) \otimes QH^*(M)[h]$ is determined by counting configurations corresponding to the $(v_1,v_2,u_1,u_2)$ above (with $(v_1, u_1)$ determining the $A$ component and $(v_2,u_2)$ determining the $B$ component), and $Q \mathcal{S}'$ is recalled from Definitions \ref{defn:mss} and \ref{defn:mqss}.
In fact, we only need to count solutions where $v_1 = v_2$ and $u_1 = u_2$. Firstly, (from Figure \ref{fig:ademmodulispace}) consider contributions from using the intermediate critical points $w_1 \neq w_2$. Then if $(v_0, u), (v_1, v_2,u_1,u_2)$ contributes to a term of the form $(Q \mathcal{S}')^{i,b}(w_1 \otimes w_2 T^d)$, it must be that $(v_0, u), (v_2, v_1,u_2,u_1)$ contributes to $(Q \mathcal{S}')^{i,b}(w_2 \otimes w_1 T^d)$. Hence together counting all such contributions, one will attain a summand of the form $$(Q \mathcal{S}')^{i,b}(n \cdot (w_1 \otimes w_2 + w_2 \otimes w_1) T^d),$$ for some $n \in \mathbb{Z}/2$. We know this to be zero by an argument as in Proposition \ref{propn:propositionftw}. Hence the only contributions we must count occur when $w_1 = w_2$, in which case if $v_1 \neq v_2$ or $u_1 \neq u_2$ then solutions come in pairs $(v_0, u_0), (v_1, v_2, u_1, u_2)$ and $(v_0, u_0), (v_2, v_1, u_2, u_1)$ which are not related by the $D_8$-action (hence are counted separately).
In particular, it is immediate that (with asymptotic conditions as given in Figure \ref{fig:ademmodulispace}) the number of pairs $(v_0,u),(v_1,u_1)$ up to the action of $D_8$, is the coefficient of $w_1$ in $Q \mathcal{S}^{j,d}(\alpha)$ multiplied by the coefficient of $z$ in $Q \mathcal{S}^{i,b}(w_1)$. Summing over all $w_1 \in \text{crit}(f)$, we get that the count of the moduli spaces of maps is then exactly the coefficient of $z$ in $Q \mathcal{S}^{i,b} \circ Q \mathcal{S}^{j,d}(\alpha)$ as required.
\end{proof}
\begin{rmk}
Observe that the coefficients of $zT^0$ in $q_{D_{8}}(g m_1 \times \Psi b)(x)$ and in $q_{D_{8}}(g^2 m_1 \times \Psi b)(x)$ (corresponding to constant spheres) are the same: specifically, we are counting exactly the same moduli space in both cases. Consider the contributions of $q_{D_{8}}(g m_1 \times \Psi b)(x)$ and $q_{D_{8}}(g^2 m_1 \times \Psi b)(x)$ to Equation \eqref{equation:QAR} for constant spheres. These then cancel out modulo $2$, and so we are left with only $q_{D_8}( m_1 \times \Psi b)(x)$, which for constant $J$-holomorphic spheres is $Sq \circ Sq(x)$.
\end{rmk}
\begin{rmk}
The term $qq_{j,0}(\alpha) \in QH^*(M)$ is the $h^{j}$ coefficient in $Q\mathcal{S}(\alpha) * Q\mathcal{S}(\alpha)$. This is one of the correction terms that can be computed, e.g. the $p=|\alpha|$ term in Corollary \ref{corollary:QAR}.
\end{rmk}
\section{$Q\mathcal{S}$ for blow-ups}
\label{sec:blowups}
Denote by $Q \mathcal{S}_{i,j}(x)$ the coefficient of $h^i T^j$ in $Q \mathcal{S}$ (where $|T|$ is the minimal Chern number of $M$). We will demonstrate calculations of $QS_{1,1}$ in two cases. The setup in both cases will be similar to the setup in \cite[Section 8]{blaier}, where Blaier computes the quantum Massey product.
\subsection{$\mathbb{CP}^3$}
Fix two generic quadric hypersurfaces in $X = \mathbb{CP}^3$ . Their intersection $Y$ is an elliptic curve, hence a torus. We let $M = Bl_Y X$, equipped with the blowdown $\rho: M \rightarrow X$. Recall that there is a $\mathbb{CP}^1$-bundle $\pi: E \rightarrow Y$ over the torus and an inclusion $i: E \rightarrow M$ of the exceptional divisor $E$. Specifically $E$ is the projectivisation of the normal bundle of $y: Y \xhookrightarrow{} X$.
Consider the continuous $3$-disc bundle $\pi': DY \rightarrow Y$ such that $E \xhookrightarrow{} DY$ is an inclusion of the subbundle $E$, with the maps of fibres being inclusion of the boundary $S^2 \xhookrightarrow{} D^3$. Locally, let $U \subset Y$ is a trivialising neighbourhood for $Y$, so there is a homeomorphism $\phi_U: U \times S^2 \xrightarrow{\cong} \pi^{-1}(U)$. Then we define $DY$ to have the same trivialising neighbourhoods as $E$, i.e. $\phi'_U: U \times D^3 \cong \pi'^{-1}(U)$. Further, we require that the transition functions $\psi'_{U_1,U_2} :=(\phi'_{U_1})^{-1} \circ \phi'_{U_2}: (U_1 \cap U_2) \times D^3 \rightarrow (U_1 \cap U_2) \times D^3$ are defined by $$\psi'_{U_1,U_2}((r,\theta),x) = ((r, \psi_{U_1,U_2}(\theta)), x),$$ where here we use polar coordinates on $D^3$ and $\psi_{U_1,U_2}$ is the transition function on $Y$. One can use the Mayer-Vietoris sequence, by observing that $M \cup_{E} DY$ is homotopy equivalent to $X$, to write down the {\it long exact sequence of a blow-up},
\begin{equation}\label{lesblowup}
\xymatrix{
\ldots \ar@{->}[r]
&
H_*(E) \ar@{->}^-{i_* \oplus \pi_*}[r]
&
H_*(M) \bigoplus H_*(Y) \ar@{->}^-{\rho_* - y_*}[r]
&
H_*(X) \ar@{->}^-{\delta}[r]
&
H_{*-1}(E) \ar@{->}[r]
&
\ldots
}
\end{equation}
One can also use the homological Gysin sequence for the bundle $DY \rightarrow Y$ to get an exact sequence (after applying the Thom isomorphism and observing that $DY \rightarrow Y$ is a homotopy equivalence):
\begin{equation}\label{homgysin}
\xymatrix{
\ldots \ar@{->}[r]
&
H_*(E) \ar@{->}^-{\pi_*}[r]
&
H_*(Y) \ar@{->}^-{g}[r]
&
H_{*-3}(Y) \ar@{->}[r]^{\phi}
&
H_{*-1}(E) \ar@{->}[r]
&
\ldots
}
\end{equation}
Where $\delta, \phi$ are the connecting homomorphisms in the long exact sequence and the maps $i_*, \pi_*, \rho_*$ and $y_*$ are induced by the continuous maps above. The map $g$ is induced by the composition of $H_*(Y) \cong H_*(DY) \rightarrow H_*(DY, DY - Y)$ (where $DY - Y$ denotes the removal of the $0$-section) with the Thom isomorphism. Putting these together, we see that:
\begin{equation} \label{equation:eq111} H_2(M) \cong H_2(X) \oplus H_2(E)/H_2(Y) \end{equation} \begin{equation} \label{equation:eq222} i_*: H_3(E) \xrightarrow{\cong} H_3(M) \end{equation} \begin{equation} \label{equation:eq333} \phi: H_1(Y) \xrightarrow{\cong} H_3(E) .\end{equation} In particular $\dim H_3 (E) = 2$. The class of a sphere lifted from $\mathbb{P}^3$ to $M$ and the class of a fibre $\pi^{-1}(y)$ of $\pi$ over $y \in Y$ generate $H_2(M)$.
We calculate $c_1(TM)$. There is a natural embedding $j: M \rightarrow \mathbb{P}^3 \times \mathbb{P}^1$ as a complex hypersurface of bidegree $(2,1)$, with respect to the generators of $H_2(M)$ in the previous paragraph. By bidegree, we mean that the number of points of intersection of $M$ with a general curve $A$ of $\mathbb{P}^3 \times \mathbb{P}^1$ is the following:
\begin{itemize}
\item if $A = \mathbb{P}^1 \times \mathbb{P}^0$, then we get an intersection number of $2$ because we are counting the number of solutions of a general quadric equation (the blow-up is defined as the set of $(t,[r:s]) \in \mathbb{CP}^3 \times \mathbb{CP}^1$ such that $r f(t) + s g(t) = 0$, where $f$ and $g$ are the quadric equations defining $Y$).
\item if $A = \mathbb{P}^0 \times \mathbb{P}^1$, then we obtain an intersection number of $1$ because a general point $p \in \mathbb{P}^3$ is such that there is only one point $q$ such that $(p,q) \in M.$
\end{itemize}
Functorality and the Whitney sum formula imply that $$c_{1}(TM) + c_1(v M) = c_1(T(\mathbb{P}^3 \times \mathbb{P}^1)|_M)$$ where $v M$ is the normal bundle of $M$ in $\mathbb{P}^3 \times \mathbb{P}^1$. Recall $c_1(T(\mathbb{P}^3 \times \mathbb{P}^1)|_M) = (4,2)$, and $c_1(v M) = (2,1)$ because it is the same as the degree (here we note that the Euler class can be reinterpreted as $j^* PD([M])$, represented by the self intersection of $M$, where $[M] \in H_6(\mathbb{P}^3 \times \mathbb{P}^1)$). Hence $c_1(TM) = (2,1)$. Therefore, when calculating $Q\mathcal{S}_{1,1}$ we only need to consider the spheres in the fibre class of $M$ as these are the only $J$-holomorphic spheres of Chern number $1$, which are confined to be in $E$.
Consider $Q\mathcal{S}_{1,1} : H^3(M) \rightarrow H^3(M)$. We will show that $Q\mathcal{S}_{1,1}|_{H^3(M) } = id$. First we show that calculating $Q\mathcal{S}_{1,1}|_{H^3(M)}$ reduces to calculating $Q\mathcal{S}_{1,1}: H^3(E) \rightarrow H^1(E)$. We use $i^{!}$ on cohomology to mean $PD \circ i_* \circ PD^{-1}$, and let $i_! = PD^{-1} \circ i^* \circ PD$ on homology.
\begin{lemma}
\label{lemma:SqE2M}
Fix $a \in H^3(M)$. Then
$$Q\mathcal{S}_{1,1} \circ i^* (a) = i^! \circ Q\mathcal{S}_{1,1}(a)$$ for $a \in H^3(M),$ i.e. \eqref{squareEMcommutes} commutes:
\begin{equation}\label{squareEMcommutes}
\xymatrix{
H^3(M)
\ar@{->}^-{Q\mathcal{S}_{1,1}}[r]
\ar@{->}_-{i^*}[d]
&
H^3(M)
\ar@{<-}^-{i^!}[d]
\\
H^3(E)
\ar@{->}^-{Q\mathcal{S}_{1,1}}[r]
&
H^1(E)
}
\end{equation}
\end{lemma}
\begin{proof}
\begin{figure}
\caption{Configurations for $Q\mathcal{S}
\label{fig:p_3setup}
\end{figure}
Fix a generator $a \in H^3(M)$ whose Poincar\'e dual is represented by a smooth submanifold $A$. To compute the coefficient of $b$ in $Q \mathcal{S}_{1,1}(a)$, we choose $A_v$ for $v \in S^{\infty}$ (such that $A_v$ represents $PD(a)$ for each $v$) and $B^{\vee}$ that represents $PD(b^{\vee})$. To simplify the notation, $^{\vee}$ acts on $H_*(M)$ as the conjugation of the cohomological intersection dual by Poicar\'e duality, so $PD(b)^{\vee} := PD(b^{\vee})$. We therefore see that the coefficient of $b$ in $Q \mathcal{S}_{1,1}(a)$ is determined by counting setups as in Figure \eqref{fig:p_3setup}I.
In general, if $a$ is represented by $A$ then a representative of $i^*(a)$ is obtained by choosing some small perturbation $A'$ of $A$, such that $A'$ intersects $E$ transversely. Then the intersection $A' \cap E$, a submanifold of $E$, represents $i^*(a)$. Hence, supposing that we perturb $A$ to intersect $E$ transversely, if we choose $A_v$ to be a sufficiently small perturbation of $A$ for each $v$ then $A_v \cap E$ is transverse for each $v \in S^{\infty}$. By this procedure, we obtain representatives of $i^*A$ for each $v$ that we denote $A_v \cap E$, and we similarly obtain a representative $B^{\vee} \cap E$ of $i^*(B^{\vee})$. This can be done in such a way as to ensure that the space of setups as in Figure \eqref{fig:p_3setup}II are transverse: in particular any perturbation of $A_v \cap E$ may be extended to yield a perturbation of $A_v$ in a small neighbourhood of $E$. By making this perturbation sufficiently small, we ensure that the intersection between $A_v$ and $E$ remains transverse.
Observe that using the choice of basis induced from $H_1(Y)$ by Equations \eqref{equation:eq222} and \eqref{equation:eq333}, a direct computation shows that for any $b \in H^3(M)$, \begin{equation} \label{Bvees} i_! PD(b)^{\vee} = (i_*^{-1} PD(b))^{\vee}. \end{equation} Hence in particular $B^{\vee} \cap E$ represents $((i^!)^{-1}b)^{\vee}$. With all of this in mind, the coefficient of $(i^!)^{-1}b$ in $Q \mathcal{S}_{1,1}(i^* a)$ is determined by counting setups as in Figure \eqref{fig:p_3setup}II.
Hence, to show that the diagram commutes we need to show that setups of type I and II biject. This is immediate, however, because every $J$-holomorphic curve $u$ of Chern number $1$ (in $M$) is contained in $E$. Hence if $(v,u)$ is a setup of type I (i.e. $u$ intersects $A_{\pm v}$ and $B^{\vee}$) then $u$ will automatically intersect with $E \cap A_{\pm v}$ and $E \cap B^{\vee}$ (hence $(v,u)$ will be a setup of type II), and vice versa.
\end{proof}
\begin{rmk}
In Lemma \ref{lemma:SqE2M}, in order to show that $E \cap A_v$ is of the correct form for Definition \ref{defn:singqss}, we need to demonstrate that in fact $$\bigsqcup_{v \in S^i} (E \cap A_{v}) \times \{ v \}$$ is of the form $A' \times S^i$ for some smooth manifold $A'$. We make two observations:
\begin{itemize}
\item The space $\sqcup_{v \in S^i} A_{v} \times \{ v \}$ is a smooth manifold by assumption, transversely intersecting $E \times S^i$. Hence $\sqcup_{v \in S^i} (E \cap A_{v}) \times \{ v \} = (\sqcup_{v \in S^i} A_{v} \times \{ v \}) \cap (E \times S^i)$ is a smooth manifold. Further, the map $\sqcup_{v \in S^i} E \cap A_{v} \times \{ v \} \rightarrow S^i$ induced by projection to the second factor is a proper surjective submersion between two smooth manifolds, hence it is a fibre bundle by Ehresmann's Lemma.
\item This fibre bundle is trivial, because we know that it must extend to a fibre bundle over $D^{i+1,+}$, the upper hemisphere in $S^i$, which is a contractible base.
\end{itemize}
A similar lemma to \ref{lemma:SqE2M} would hold if we replaced embedded submanifolds by pseudocycles.
\end{rmk}
Note that the index of the codomain of $Q \mathcal{S}_{1,1}$ changes by $2$, between $H^3(M)$ and $H^1(M)$, in Diagram \eqref{squareEMcommutes}. This comes from the fact that $i^!$ changes cohomological degree by 2 (and is also to be expected because the minimal Chern number of $E$ is $2$, whereas the minimal Chern number of $M$ is $1$).
Henceforth, we will use the Morse theoretic definition of the quantum square. Observe that $E = Y \times \mathbb{P}^1$ so we may pick the Morse function on $E$ to be $f+g$ where $f: Y \rightarrow \mathbb{R}$ and $g: \mathbb{P}^1 \rightarrow \mathbb{R}$, such that $g$ has two critical points of index $0,2$, which we call $a_0, a_2$, and $f$ has critical points $b_0, b_1, b_1^{'}, b_2$ (whose indices are the subscripts). Recalling that $\pi: E \rightarrow Y$ is the projection map, $$(\pi^!)^{-1} (b_1) = (b_1, a_0) \text{ and } \pi^*(b_1) = (b_1,a_2).$$
\begin{lemma}
\label{lemma:SqE2Y}
Let $Sq_i(x)$ be the coefficient of $h^i$ in $Sq(x)$. Then $Q\mathcal{S}_{1,1} =\pi^* \circ Sq_1 \circ \pi^!$.
\end{lemma}
\begin{proof}
Recall that input elements of $H^3(E)$ correspond to $(b_1,a_2)$ or $(b^{'}_1,a_2)$, which project down under $\pi$ to $b_1$ or $b_1^{'}$ respectively. Output elements of $H^1(E)$ correspond to $(b_1,a_0)$ or $(b^{'}_1,a_0)$.
We will show that pairs $(\tilde{v},\tilde{u})$ in the moduli space used to calculate the coefficient of $c$ in $Q\mathcal{S}_{1,1}(x)$ correspond to pairs $(v,u)$ in the moduli space yielding the coefficient of $\pi^* c$ in $Sq_{1}(\pi^{!} x)$. For clarity we will fix $x = (b_1,a_2)$ and $c = (b_1,a_0)$ (hence $\pi^{*} (b_1) = x$ and $\pi^! (c) = b_1$), although the argument follows identically for any choice of $x$ and $c$. For conciseness we denote the moduli spaces of pairs respectively as $\mathcal{M}_Q$ and $\mathcal{M}$ for $Q \mathcal{S}$ on $E$ and $Sq$ on $M$.
Consider a pair $(v,u) \in \mathcal{M}_Q$, as on the right hand side of Figure \eqref{fig:sqtosqQblowup}. We observe that, using the projection $\pi: E \rightarrow Y$, the setup $(v, \pi u)$ is one that is counted when calculated the coefficient of $b_1$ in $Sq_{1}(b_1)$, i.e. $(v, \pi u) \in \mathcal{M}$. This is because a fibre sphere in $E$ lives above a point $y \in Y$, hence the incidence condition of the flowlines attaching to $S^2$ at the points $0,1,\infty$ translates under this projection to the three flowlines coinciding at the point $y \in Y$. As the transversality condition is generic, we may choose the perturbations $f_{v,s}$ and $g_{v,s}$ of $f$ and $g$ in such a way that both the moduli space $\mathcal{M}$ and $\mathcal{M}_Q$ are transverse.
We show that every pair $(v,u) \in \mathcal{M}$ arises uniquely from the a pair $(\tilde{v}, \tilde{u}) \in \mathcal{M}_Q$ as in the previous paragraph. Consider such a pair $(v,u)$: specifically, the image of $u$ consists of three perturbed half-flowlines meeting at some point $y$. Consider the $- \nabla f_{v,s}$ flowline $l$, which is the image of $u$ restricted to one of the two positive halflines. This flowline is asymptotic to $b_1$ in $Y$, hence it lifts uniquely to a $- \nabla (f_{v,s} + g_{v,s})$ flowline that is asymptotic to $(b_1,a_2)$ in $E$. The uniqueness is because $a_2$ is the maximum of $g$, and hence there is a unique $- \nabla g_{v,s}$-flowline $L$ asymptotic to $a_2$. Specifically this is the flowline $L: [0,\infty) \rightarrow S^2$ such that $L(s) = a_2$ for $s \gg 0$. See Figure \ref{fig:sqtosqQblowup}. Likewise the output flowline on $Y$, which is a $- \nabla f_s$-flowline, lifts uniquely to a $-\nabla (f_s+g_s)$-flowline on $E$, asymptoting to $(b_1,a_0)$, which is unique because $a_0$ is the minimum of $g$.
\begin{figure}
\caption{Lifting configurations of $Sq$ on $Y$ to $Q\mathcal{S}
\label{fig:sqtosqQblowup}
\end{figure}
Moreover, because this setup lifts from $Y$ we know that the three flowlines will all intersect the $J$-holomorphic sphere $\pi^{-1} y$ at $0$. We also know where each lifted flowline intersects $J$-holomorphic sphere $\pi^{-1} y$, as this is determined by the gradient of $f+g$ on each of the flowlines at $s=0$. Hence there is a unique $J$-holomorphic sphere that fits into the lifted setup, giving a unique configuration on $E$ corresponding to the configuration on $Y$.
\end{proof}
\begin{proof}[Proof of Equation \eqref{equation:blowupID1} in Theorem \ref{thm:blowupID}]
Note that $Sq|_{H^1(Y)}: H^1(Y) \rightarrow H^1(Y)$ is the identity, which is known by the definition of $Sq$. Lemmas \ref{lemma:SqE2M} and \ref{lemma:SqE2Y} imply that Diagram \eqref{squareMEYcommutes} commutes.
\begin{equation}\label{squareMEYcommutes}
\xymatrix{
H^3(M)
\ar@{->}^-{Q\mathcal{S}_{1,1}}[r]
\ar@{->}_-{i^*}[d]
&
H^3(M)
\ar@{<-}^-{i^!}[d]
\\
H^3(E)
\ar@{->}^-{Q\mathcal{S}_{1,1}}[r]
\ar@{->}_-{\pi^!}[d]
&
H^1(E)
\ar@{<-}_-{\pi^*}[d]
\\
H^1(Y)
\ar@{->}^-{Sq_{1}}[r]
&
H^1(Y)
}
\end{equation}
The abelian group $H^i(E)$ is generated by $\{ (b_1,a_{i-1}), (b'_1,a_{i-1}) \}$ for $i=1,3$. From the axioms of $Sq$, we know that $Sq_1 = id$. Then from Lemma \ref{lemma:SqE2Y}: \begin{equation} \label{equation:eq211} Q\mathcal{S}_{1,1}(b_1,a_0) = (b_1, a_2) \text{ and } Q\mathcal{S}_{1,1}(b^{'}_1,a_0) = (b^{'}_1, a_2) \end{equation}
We apply the isomorphism between Morse and classical cohomology and then Poincar\'e duality to the Morse cocycles $(b_1,a_2) \in H^3(E)$ and $(b_1,a_0) \in H^1(E)$. This yields cycles $B_1 \in H_1(E)$ and $B_3 \in H_3(E)$. Likewise we define $B'_i \in H_i(E)$ for $(b'_1,a_{3-i})$ for $i=1,3$. We recall that $^{\vee}$ is the intersection dual on homology (defined as the conjugation by Poincar\'e duality of the duality on cohomology, for our given basis). Note that \begin{equation} \label{equation:eq212} B^{\vee}_3 = B'_1, \end{equation} and so on. In this notation $Q\mathcal{S}_{1,1} (B_1) = B_3$. Observe that $B_3 \cap B_3 = \emptyset$ so we see that $i_* B_3 \cap i_* B_3 = \emptyset$ (which is immediate if one chooses a generic submanifold representative: then nonintersection in $E$ implies nonintersection in $M$). As $H_3(M)$ is generated by $i_* B_3$ and $i_* B'_3$, this implies
\begin{equation} \label{istarB} (i_* B_3)^{\vee} = i_* B'_3.\end{equation}
By Equation \eqref{istarB}, \begin{equation} \label{equation:bbb1} i_* \circ Q\mathcal{S}_{1,1} \circ i_! ((i_* B_3)^{\vee}) = i_* \circ Q\mathcal{S}_{1,1} \circ i_! ((i_* B'_3)).\end{equation}
By Equation \eqref{Bvees}, then Equations \eqref{equation:eq211} and \eqref{equation:eq212}, \begin{equation} \label{equation:bbb2} i_* \circ Q\mathcal{S}_{1,1} \circ i_! ((i_* B_3)^{\vee}) = i_* \circ Q\mathcal{S}_{1,1} (B^{\vee}_3) = i_* B'_3. \end{equation}
From Equations \eqref{equation:bbb1} and \eqref{equation:bbb2}, along with identical calculations for the other generators, plus the fact that $i_*$ is an isomorphism, we deduce that $i_* \circ Q\mathcal{S}_{1,1} \circ i^* = id.$ Diagram \eqref{squareMEYcommutes} then implies that $$Q \mathcal{S}_{1,1} = i_* \circ Q\mathcal{S}_{1,1} \circ i^* = id.$$
\end{proof}
\subsection{$\mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1$}
Now let $X = \mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1$, with $Y \subset X$ defined by the intersection of two generic linear hypersurfaces. ``{\it Linear}" means that we require that the defining equation of the hypersurfaces are linear in the coordinates of each $\mathbb{CP}^1$ (when the other coordinates are treated as constants). The subvariety $Y$ is in fact a torus, which one can see by using the adjunction formula: in particular, one proves that $K_Y = 0$. Specifically, $K_X = (-2,-2,-2)$ and the two linear hypersurfaces are $(1,1,1)$ by definition, and hence $K_Y = (1,1,1) + (1,1,1) + (-2,-2,-2) = 0$. Then the genus $g$ of $Y$ satisfies $g = 1 + (\deg K_Y)/2 = 1$, hence $Y$ is a surface of genus $1$.
Define $M = Bl_Y X$. Using a similar method to the $\mathbb{CP}^3$ case, we can show that the Chern class of $M$ is $(1,1,1,1)$, where the first three entries correspond to lifting the $J$-holomorphic spheres on each of the $3$ coordinates of $X$, and the final entry corresponds to a fibrewise $J$-holomorphic sphere in the exceptional divisor. Hence, when calculating $Q\mathcal{S}_{1,1}: H^3(M) \rightarrow H^3(M)$ there are contributions from the fibre direction plus those from $J$-holomorphic spheres in $X$ that have been lifted to $M$. The fibrewise contributions are calculated in exactly the same way as for $\mathbb{CP}^3$, so we turn our attention to the spheres lifted from $\mathbb{CP}^1 \times \mathbb{CP}^1 \times \mathbb{CP}^1$.
\begin{proof}[Proof of Equation \eqref{equation:blowupID2} in Theorem \ref{thm:blowupID}]
Suppose the defining linear equations for $Y$ are $P_1(x,y,z)$ and $P_2(x,y,z)$ in local coordinates on $\mathbb{P}^1 \times \mathbb{P}^1 \times \mathbb{P}^1$. Fixing $x$ and $y$, there is at most one solution $z$ such that $P_1(x,y,z) = P_2(x,y,z)=0$. Hence, let $S = \{ x \} \times \{ y \} \times \mathbb{CP}^1$ be a $J$-holomorphic curve in $X$, and $\tilde{S}$ its lift to $M$. By the previous, $\tilde{S} \cap E$ is at most $1$ point. If $A \in H_3(M)$ then recall from Equation \eqref{equation:eq222}, we may assume that $A = i_* A_E$ for some $A_E \in H_3(E)$. Recall that to calculate $Q \mathcal{S}_{1,1}(a)$, where $A = PD (a)$, we need to choose some $A_v$ satisfying the transversality conditions. We may pick the representatives $A_v$ to be some $D_v \times \mathbb{P}^1$, where $D_v$ is a representative of $D \in H_1(Y)$. Then assuming $S$ is not contained in $Y$, there are no solutions to Figure \ref{fig:p_1_3setup}. For such a solution, we would need that $\tilde{S}$ intersects $E$ in at least $2$ points (as $A_v \subset E$ for all $v$), which we know is impossible. Hence the space of such setups is transverse, because it is empty.
\begin{figure}
\caption{Configurations for contributions to $Q\mathcal{S}
\label{fig:p_1_3setup}
\end{figure}
The case $S \subset Y$ is not possible, because there is no degree $1$ holomorphic map $\mathbb{P}^1 \rightarrow Y$.
\end{proof}
\appendix
\section{Equivariant Compactification}
\label{sec:equivariantcompact}
We give a more in-depth treatment of Equation \eqref{equation:Sq'chainmap} in Definition \ref{defn:mss}.
Consider a $1$-dimensional moduli space $\mathcal{M}_{i}(a_1,a_{2}, a_3)$. Specifically, we require that $|a_1| -|a_2| - |a_3| + i = 1$. Here, the notation is as in Section \ref{subsec:msss}. One characterises the different possible limits $(v_{\infty}, u_{\infty})$ of some sequence of pairs $(v_j,u_j)$ in $\mathcal{M}_{i}(a_1,a_{2}, a_3)$. There are two possibilities for each:
\begin{itemize}
\item either $v_{\infty} \in D^{i,+}$ or $v_{\infty} \in \partial D^{i,+} = D^{i-1,+} \cup D^{i-1,-}$, and
\item either $u_{\infty}$ is a map taking as its domain the $Y$-shaped graph with a single broken edge (broken) or $u_{\infty}$ is a map with the $Y$-shaped graph as its domain (unbroken).
\end{itemize}
No codimension $1$ boundary setups are present when either:
\begin{itemize}
\item $u_{\infty}$ is unbroken and $v_{\infty} \in D^{i,+}$, or
\item $u_{\infty}$ is broken and $v_{\infty} \in \partial D^{i,+}$.
\end{itemize}
In the case where $v_{\infty} \in D^{i-1,+}$ and $u_{\infty}$ is unbroken, one obtains the coefficient of $a_1$ in $Sq'_{i-1}(a_2 \otimes a_3) = Sq'_{i}((a_2 \otimes a_3)h)$. Similarly, when $v_{\infty} \in D^{i-1,-}$ and $u_{\infty}$ is unbroken, one obtains the coefficient of $a_1$ in $Sq'_{i-1}(a_3 \otimes a_2) = Sq'_{i}((a_3 \otimes a_2)h)$. When $v_{\infty} \in D^{i,+}$ and $u_{\infty}$ is unbroken, this is the standard compactification of the $Y$-shaped graph, and thus one obtains the coefficient of $a_1$ in $dSq'_i(a_2 \otimes a_3) + Sq'_i((d a_2) \otimes a_3) + Sq'_i(a_2 \otimes (d a_3))$. These are all of the terms in Equation \eqref{equation:Sq'chainmap}.
\section{Notes on Transversality}
Following are more detailed treatments of the relevant transversality considerations for the sections of this paper.
\subsection{The Morse Steenrod Square: Section \ref{subsec:msss}}
\label{subsec:mssrmks}
In the second of the conditions at the beginning of Section \ref{subsec:msss}, for $a_1, a_2 \in \text{crit}(f)$, we required that our moduli spaces were transverse. More specifically, denote by $$W^s(a_2, f^2_{v,s}) = \{ p \in M | {\displaystyle \lim_{s \rightarrow \infty} } \phi_{v,s}(p) = a_2 \},$$ where $\phi_{v,t}$ is the $1$-parameter family of diffeomorphisms defined for $v \in S^i$ and $t \ge 0$ by \begin{equation} \label{equation:backint} \dfrac{d \phi_{v,t}}{dt}(s) = - \nabla f^2_{v,s} \text{ and } \phi_{v,0} = id. \end{equation} We recall first that if $a$ is in fact a sum of critical points of $f$ such that $da = 0$, then there is a (partial) compactification $\overline{W^s(a,f)}$ of $W^s(a,f)$, the stable manifold of the critical point $a$ for the Morse function $f$. The compactification $\overline{W^s(a,f)}$ comes equipped with an evaluation map $E: \overline{W^s(a,f)} \rightarrow M$, so that $\overline{W^s(a,f)}$ becomes a pseudocycle (i.e. \cite{schwarzmorsesingiso}). We consider $$\tilde{\euscr{W}}^{i} := \overline{W^u(a_1, f)} \times \bigsqcup_{v \in S^i} \overline{W^s(a_2, f^2_{v,s})} \times \overline{W^s(a_2, f^2_{-v,s})} \times \{ v \},$$ and $\euscr{W}^{i} = \tilde{\euscr{W}}^{i} / (\mathbb{Z}/2)$ (i.e. the pseudocycle descending to the quotient). The $\mathbb{Z}/2$-action is induced from $M \times M \times M \times S^i$, fixing the first $M$ factor, swapping the second and third $M$ factors and acting antipodally on $S^i$. This induces a smooth pseudocycle representative of a cycle in $M \times ((M \times M) \times_{\mathbb{Z}/2} S^i)$, which we can see for example by precomposing the smooth $\mathbb{Z}/2$-equivariant map \begin{equation} \label{equation:pseudohere}\begin{array}{l} \nu: M \times M \times M \times S^i \rightarrow M \times M \times M \times S^i, \\ (x_0, x_1, x_2,v) \mapsto (x_0, \phi_{v,-1}(x_1), \phi_{-v,-1}(x_2), v). \end{array} \end{equation} with the pseudocycle $$E \times E \times E \times id: \overline{W^u(a_1,f)} \times \overline{W^s(a_2,f)} \times \overline{W^s(a_2,f)} \times S^i \rightarrow M \times M \times M \times S^i.$$ Here $\phi_{v,-1}$ is the diffeomorphism induced by backwards integrating along $f^2_{v,s}$ from $s=1$ to $s=0$: specifically, $\dfrac{d \phi_{v,t}}{dt}(-s) = \nabla f^2_{v,1-s} \text{ and } \phi_{v,0} = id$ for $s \in [0,1]$, analogously to Equation \eqref{equation:backint}. Indeed, $\phi_{v,-1} = \phi_{v,1}^{-1}$. We abusively denote by $\euscr{W}^{i}$ the pseudocycle associated to the quotient by $\mathbb{Z}/2$ of $\nu \circ (E \times E \times E \times id)$.
For transversality, we require that for $$\zeta: M \times \mathbb{RP}^i \rightarrow M \times( (M \times M) \times_{\mathbb{Z}/2} S^i), \text{ such that } (x, [v]) \mapsto (x,[x,x,v]),$$ the pseudocycle $\euscr{W}^{i}$ is transverse to $\zeta$. Indeed, the coefficient $n_{a_1,a_2,a_3,i}$ in Definition \ref{defn:mss} is the intersection number of these two pseudocycles. More generally in that definition, we considered moduli spaces $\mathcal{M}_i(a_1,a_2,a_3)$ for general $a_1,a_2,a_3 \in \text{crit}(f)$. This does not yield a pseudocycle (as there may be codimension $1$ boundary strata). Nonetheless, such an intersection may still be defined at the chain level, and the operation $Sq$ itself is only well defined on cohomology (i.e. exactly when the codimension $1$ strata of these moduli spaces may be glued so as to define a smooth manifold).
To demonstrate that we can choose such an $f_{v,s}$ (and indeed such a choice is generic in a reasonable sense), we first assume that the $f_{v,s}$ are constrained to $U_f$ from Section \ref{subsec:prelimbcncon}. We next, as in the same section, assume that $f_{v,s} =\beta(s) f_{v,0} + (1-\beta(s)) f$ for the monotonic bump function $\beta : \mathbb{R} \rightarrow [0,1]$. Observe that for $v \in S^0$, there is always a choice of $f_{v,0}$ and $f^1_s$ such that $f_{v,0}$, $f_{-v,0}$ and $f^1_0$ yield transverse moduli spaces $\mathcal{M}'_i(a_1,a_2,a_3)$ of dimension $|a_1| - |a_2| - |a_3| + i$, as in Definition \ref{defn:mss}. The above pseudocycle description using $\zeta$ and $\euscr{W}^{i}$ reveals that this is simply a classical transversality question. We wish to ensure transversality for all possible incoming and outgoing critical points $a_1,a_2,a_3$, which requires only a finite number of choices. Having shown a base case, we then proceed inductively. Next suppose that we have made a choice of $f_{v,0}$ for $v \in S^{i-1}$. Choose a small open collar neighbourhood $N^{i-1}$ of $S^{i-1}$ in $S^i$. As the setup is transverse when we let $v \in S^{i-1}$ vary, if we pick some small perturbation of $f_{v,0}$ for $v \in N^{i-1}$, then the pseudocycle intersection remains transverse when we let $v$ vary in $N^{i-1}$. We then choose an extension of $f_{v,0}$ to $S^i$. Observe that we may freely (i.e. without requiring any consistency conditions on our $f_{v,0}$) pick a small perturbation away from a (potentially smaller than $N^{i-1}$) collar neighbourhood of $S^{i-1}$. This ensures that the intersection is now transverse when we vary $v$ in $S^{i}$, and remains transverse when restricting to $S^{i-1}$. Further, as we make a countable number of choices, this condition is generic. This is an example of family Morse homology, as covered in \cite{hutchingsfamilies} (the Floer theoretic example of this is made in \cite[Section 4c]{seidel}).
\begin{rmk}
The difference between the case considered here and that in \cite{seidel} is that our parameter space is $\mathbb{RP}^i$, and not the space of flowlines of some fixed Morse function $h: \mathbb{RP}^i \rightarrow \mathbb{R}$. In our instance there is no technical difference between these two options, because the underlying chain complexes have a trivial action of $\mathbb{Z}/2$. The technical importance of using a parametrisation by the space of flowlines appears when considering gluing and compactness of equivariant flowlines in the equivariant Morse or Floer complex: see \cite[4b]{seidel}.
\end{rmk}
\begin{rmk}
Note that the transversality condition required here is weaker than requiring $W^s(a_2, f^2_{v,s}) \pitchfork W^s(a_2,f^2_{-v,s})$ and $(W^s(a_2, f^2_{v,s}) \cap W^s(a_2,f^2_{-v,s})) \pitchfork W^u(a_1,f)$ intersect transversely for all $v$ (which is impossible in general with our given conditions). Indeed, it is the failure of transversality for particular isolated $v$ that ensures we obtain interesting Steenrod squares.
\end{rmk}
\subsection{The Cartan Relation: Section \ref{subsec:Cartan}}
\label{subsec:appendcartrel}
We will, for convenience, assume that $f^i_{v,s,t} = f$ for $s \ge 1$ when $i=3,4,6,7$.
In Section \ref{subsec:Cartan}, when constructing the moduli spaces that we used to prove the Cartan relation, we asked that we chose our $f^i_{v,s,t}$ to be ``generic at each vertex". To illustrate what is meant by this, observe that for each vertex of the graph corresponding to $t \in T$, there are either three or five edges meeting at this vertex. The idea is that for each $p$, the smooth functions $f^p_{v,0,t}$ and $f^p_{v,t,t}$ are chosen in such a way that all of the intersections occur transversely within the space $M^{\times 9} \times_{\mathbb{Z}/2} S^{\infty}$ (denoting the $9$-fold Cartesian product of $M$ by $M^{\times 9}$). Here, labelling the copies of $M$ by $1,...,9$, the $\mathbb{Z}/2$-action is denoted as the transposition $(56)(89)$.
More specifically, we first define (analogously to Equation \eqref{equation:backint}) some $1$-parameter families of diffeomorphisms $\phi^p_{v,s,t}: M \rightarrow M$ (with parameter $s$), via $$\dfrac{d \phi^p_{v,s,t}}{ds}(s_0) = - \nabla f^p_{v,s_0,t} \text{ and } \ \phi_{v,0,t} = id \text{ for all } v,t.$$ For the moduli space denoted $\mathcal{M}_1(x,y,z)$ we define a map $$\zeta_1: M \times M \times M \times \mathbb{RP}^i \rightarrow ( M^{\times 9}) \times_{\mathbb{Z}/2} S^i,$$ $$(x_1, x_2, x_3, [v]) \mapsto [x_1, x_1, x_1, x_2, x_2, x_2,x_3, x_3, x_3,v],$$ recalling that in this instance $f^2_{v,s,t}$ and $f^5_{v,s,t}$ (hence $\phi^2_{v,s,t}$ and $\phi^5_{v,s,t}$) are independent of $v$. We also define: $$\nu_1: M^{\times 7} \times S^i \times T_K \rightarrow M^{\times 9} \times S^i,$$ where $T_K = [0,K]$ is an interval for any $K \in \mathbb{R}_{>0}$, $$\left( \begin{array}{l} m_1 \\ m_2 \\ m_3 \\ m_4 \\ m_5 \\ m_6 \\ m_7 \\ v \\ t \end{array} \right) \mapsto \left( \begin{array}{l} \phi^1_{v,1,t}(m_1) \\ m_2 \\ m_3 \\ \phi^2_{v, t,t}(m_2) \\ \phi^3_{v,-1, t}(m_4) \\ \phi^3_{-v,-1, t}(m_5) \\ \phi^5_{v,t,t}(m_3) \\ \phi^6_{v,-1, t}(m_6) \\ \phi^6_{-v,-1, t}(m_7) \\ v \end{array} \right),$$ to construct a pseudo-cycle bordism when composing with $$\left( \begin{array}{l} E \\ id_M \\ id_M \\ E \\ E \\ E \\ E \\ id_{S^i} \\ id_{T_K} \end{array} \right) {\Huge :}\left( \begin{array}{l} \overline{W^u(a_3,f)} \\ M \\ M \\ \overline{W^s(a_1,f)} \\ \overline{W^s(a_1,f)} \\ \overline{W^s(a_2,f)} \\ \overline{W^s(a_2,f)} \\ S^i \\ T_K \end{array} \right) \rightarrow M^{\times 7} \times S^i \times T_K,$$ where $E$ is the evaluation map. Transversality in this instance means that we want to make a choice of $f^i_{v,0,t}$ (for all $i$) and $f^j_{v,t,t}$ (for $j=2,5$) such that the two pseudocycles intersect transversely for a generic choice of $t$, including $t=0, 1$. To do this we appeal once again to genericity for Morse homology, observing as previously that we may always perturb any choice of $f^i_{v,0,t}$ and $f^j_{v,t,t}$ in a generic way so that these pseudocycles intersect transversely. This ensures that the moduli space $\mathcal{M}'_1(a_1,a_2,a_3)$ is a manifold of dimension $1$ if $|a_3| - 2|a_1| - 2 |a_2| + i =0$ (intuitively there is a limiting process as $K \rightarrow \infty$, in practice one appeals to a gluing theorem). There is a similar argument for $\mathcal{M}'_2(a_1,a_2,a_3)$.
\begin{rmk}
\label{rmk:EZ/2}
Some of the conditions require a choice of $f^p_{v,s,t}$ that becomes independent of $v$ for large enough $t$. The reason why this is not an unrealistic request is that we need to retain a nontrivial $v$ dependency on the moduli space in some form, but it need not be everywhere. In essence we are constructing a copy of $E \mathbb{Z}/2$: there is a tuple $(f^{1}_{v,s,t},...,f^{7}_{v,s,t})$ for each $v \in S^{\infty}$, along with the associated $\mathbb{Z}/2$ action $\iota$. Our choices ensure that $\iota: v \mapsto -v$ always acts freely on tuples, hence the union of tuples over all $v \in S^{\infty}$ is an $E \mathbb{Z}/2 \subset (C^{\infty}(M))^7$. Nonetheless, each pair $(f^{i}_{v,s,t},f^{j}_{v,s,t})$ for $(i,j)=(2,5),(3,4),(6,7)$ also defines an $E \mathbb{Z}/2$, as long as the action remains free on that pair (which will be the case for a generic choice of $v$-dependence). So as long as $\iota$ acts freely on some pair, the set of these tuples is an $E \mathbb{Z}/2$.
\end{rmk}
\begin{rmk}
Intuitively, the $M^{\times 9}$ corresponds to the $9$ possible finite ends of a flowline in the domain of $\mathcal{M}'_1(a_1,a_2,a_3)$ (as each vertex has valence $3$). The $M^{\times 7}$ corresponds to the $7$ edges in the domain of $\mathcal{M}'_1(a_1,a_2,a_3)$. Then the condition of intersecting with $\zeta_1$ is exactly identifying the edges at the vertexes at their finite endpoints.
\end{rmk}
\subsection{Transversality for the quantum Steenrod square: Section \ref{sec:SqQviaMorse}}
\label{subsection:transvholspheres}
The argument that we can ensure that the moduli spaces $\mathcal{M}_{i,j}(b,a)$ are carved out transversely in Section \ref{sec:SqQviaMorse} is identical to that in Appendix \ref{subsec:mssrmks}, after turning this problem into a suitable intersection problem involving pseudocycles.
In particular, fixing $a,b$ we consider $$\tilde{\euscr{W}} := \bigsqcup_{v \in S^i} W^u(b,f) \times W^s(a, f^2_{v,s}) \times W^s(a, f^2_{-v,s}) \times \{ v \} \subset M \times M \times M \times S^i,$$ which similarly to the previous appendices when passing to the partial compactification (if $a$ is a closed sum of critical points) induces (via the evaluation map) a pseudocycle that we denote $\euscr{W}$ in $M \times ((M \times M) \times_{\mathbb{Z}/2} S^i)$. We require that this intersects transversely with the pseudocycle defined by the evaluation map, $$ev: \overline{\mathcal{M}}(j,J) \times S^i \rightarrow M \times M \times M \times S^i,$$ defined by $$ev(u,v) = (u(0), u(1), u(\infty), v).$$ We observe that this is classically a pseudocycle, such as in \cite[Theorem 6.6.1]{jholssympl} where we denote by $\overline{\mathcal{M}}(j,J)$ the compactification of the smooth moduli space of $u: S^2 \rightarrow M$ such that $u_*[S^2]$ is a homology class of Chern number $jN$ and $u$ is $J$-holomorphic.
Once we quotient the two pseudocycles by the given $\mathbb{Z}/2$-action, our requirement is then that we can perturb the choice of Morse functions in such a way that the Morse pseudocycle is transverse to the pseudocycle $ev$. This is the same as the previous transversality problems.
\subsection{The quantum Cartan relation: Section \ref{sec:quancar}}
\label{subsec:bordismquantumcartantrans}
In the proof of Lemma \ref{lemma:lem1}, and in Appendix \ref{subsec:appendcartrel}, we constructed a bordism. We will make this somewhat clearer, using the language of the previous appendices.
Strictly, for each $t$ the setup as given can be shown to be identical to the intersection of two pseudocycles, as usual. The first is:
$$ev: \mathcal{M}_j(J) \times \mathcal{M}_j(J) \times \mathcal{M}_j(J) \times S^i \rightarrow M^9 \times S^i,$$ acting by $$ev: (u_1,u_2,u_3,v) \mapsto (u_1(0),u_1(1), u_1(\infty), u_2(0),u_2(1), u_2(\infty),u_3(0),u_3(1), u_3(\infty),v).$$
The second is (for $\chi_i : X_i \rightarrow M \times S^i$ and $\gamma_i: Y_i \rightarrow M \times S^i$ being the ``input" pseudocycles, and $\zeta: Z \rightarrow M$ the ``output" pseudocycle) the following map: $$g: M \times M \times X \times X \times Y \times Y \times Z \times S^i \times [0,K] \rightarrow M^9 \times S^i,$$ for some large $K$, such that \begin{equation} \label{equation:qcarpseudo} g: \left( \begin{array}{l}a_1 \\ a_2 \\ x_1 \\ x_2 \\ y_1 \\ y_2 \\ z \\ v \\ t \end{array} \right) \mapsto \left( \begin{array}{l} \zeta(z) \\ a_1 \\ a_2 \\ \phi^2_{v,t,t}(a_1,v) \\ \phi^3_{v,-1,t} \circ \chi_v(x_1,v) \\ \phi^4_{v,-1,t} \circ \chi_{-v}(x_2,-v) \\ \phi^5_{v,t,t}(a_2, v) \\ \phi^6_{v,-1,t} \circ \gamma_v(y_1,v) \\ \phi^7_{v,-1,t} \circ \gamma_{-v}(y_2,-v) \\ v\end{array} \right) \end{equation} We observe that this is a pseudocycle bordism, identical to the construction for the classical Cartan relation of Appendix \ref{subsec:appendcartrel}. For each fixed $t$, the pseudocycle is then obtained by using the parameter $t \in [0,K]$. Observe that for sufficiently large $K$, an (equivariant) gluing theorem (in this case it is nothing more than a standard gluing theorem: see Appendix \ref{sec:equivariantgluing}) shows that the $K$-end of the bordism corresponds to a count using broken trajectories. Further, the entire bordism $g$ above shows that the pseudocycles corresponding to the $t=0$ and $t=K$ ends are bordant, hence have the same intersection with the pseudocycle $ev$.
\subsection{Transversality for the quantum Adem relations: Section \ref{sec:QAR}}
\label{subsec:quantumademrels}
We give only brief details for this case, as one just needs to edit the previous transversality arguments using the discussion in this Appendix.
We recall that in our previous constructions, we proved transversality by observing that we may assume that $v$ varies in some fundamental domain $D$ of the $\mathbb{Z}/2$-action on $S^{\infty}$, and then we may freely choose $f_{v}$ for $v \in D$. We would like to make a similar claim for the more general finite groups that we consider.
Suppose that we are defining operations as in Section \ref{sec:QAR}, using $ES_4$ as our parameter space. One constructs a pseudocycle as in Equation \eqref{pseudoaaa2}, and intersects this with the evaluation pseudocycle in Equation \eqref{pseudoaaa1}. In order to ensure transversality, one requires that the $f_v$ may be chosen (where $v \in E S_4$) sufficiently generically. In particular, we must ensure that the invariance conditions are not troublesome. Suppose that $D$ were a fundamental domain of the $S_4$-action on $ES_4$. The only way that the invariance conditions could prove to be a problem for transversality is if one of the invariance conditions were to in some way relate $(1 p) \cdot D$ with $(1 q) \cdot D$ for some $p,q = 1,2,3,4$. If this were the case then we could not freely choose our Morse function on $f_{(1 q) \cdot v}$ with $v \in D$ (because then this would influence our choice of $f_{(1 p ) \cdot D)}$). Recall that the invariance conditions required that $f_{(23) \cdot v} = f_{(24) \cdot v} = f_v$. The pseudocycle that one must write down involves $f_v$, $f_{(12) \cdot v}$, $f_{(13) \cdot v}$ and $f_{(14) \cdot v}$. It is then sufficient to demonstrate that the cosets of $G:= \langle (23), (24) \rangle \subset S_4$ are exactly $\{ G, (12) \cdot G, (13) \cdot G, (14) \cdot G \}$, which is a straightforward verification.
\section{Equivariant Gluing}
\label{sec:equivariantgluing}
We will not repeat the classical gluing argument, as pertinent details are well known for example as in \cite[Chapter 10]{jholssympl}. Instead, for completeness we mention gluing for the equivariant case (i.e. as necessary for Section \ref{subsec:Cartan}). A more general equivariant gluing argument, as in for example \cite[Section 4c]{seidel}, is not necessary for this paper. In the case of Morse flowlines, observe that the Morse-Smale condition is open, so for a fixed metric $g$ we may assume that each pair $(f_{v,s},g)$ is Morse-Smale. The equivariant gluing theorem for a broken pair of flowlines with parameter $v$ simply works applying a standard gluing argument to a $- \nabla f_{v,s}$-flowline concatenated with a $- \nabla f_{v,\pm \infty}$-flowline. The conditions on the functions $f^p_{v,s,t}$ for $p=1,...,7$ for Section \ref{subsec:Cartan} were specifically chosen for this to be the case. Specifically, for large $t$, one chooses $f^p_{v,s,t}$ such that $f^p_{v,s,t}$ is independent of $v$ and $s$ when $|s|$ is sufficiently large.
The gluing theorem for holomorphic spheres holds likewise: one may apply a gluing theorem on $\mathcal{M}(J,j)$ of stable genus $0$ $J$-holomorphic maps of Chern number $j$, and this immediately provides a gluing theorem for $\mathcal{M}(J,j) \times S^i$. However, an important point must be highlighted: when descending to $\mathcal{M}(J,j) \times_{\mathbb{Z}/2} S^i$, recalling the action of $\mathbb{Z}/2$ on $\mathcal{M}(J,j)$ is by $$(z \mapsto z/(z-1)) \in PSL(2,\mathbb{C}),$$ suppose that $u$ is a nodal $J$-holomorphic map (with at least one node). Then if $[u,v] \in \mathcal{M}(J,j) \times_{\mathbb{Z}/2} S^i$, in general there is an ambiguity in how one defines a glued solution (which depends on a choice of lift to $\mathcal{M}(J,j) \times S^i$). In particular, if the domain of $u$ is $m_1$ (in the notation of Figure \ref{fig:m05elmts}) then there is no way to coherently glue the domain while respecting the $\mathbb{Z}/2$-action. This is because $m_1$ is an isolated point in the fixed point set of $\overline{M}_{0,5}$.
\section{Constructions for $ED_8$ and $ES_4$}
\label{sec:ed8es4}
In this appendix, we construct $ED_8$ and $ES_4$ as the union of a countable nested family of submanifolds (with respective $D_8$ and $S_4$ actions).
\subsection{The construction for $ED_8$}
\label{subsec:ed8}
Consider the contractible space $S^{\infty} \times (S^{\infty} \times S^{\infty})$, along with an action of $D_8 = \langle r, a | r^2 = a^4 = 1, r \cdot a = a^3 \cdot r \rangle \subset S_4$ (where $r=(13)(24)$ and $a= (1324)$) as follows:
\begin{itemize}
\item $r \cdot (z,( z_1, z_2)) = (-z, (z_2,z_1))$.
\item $a \cdot (z,(z_1,z_2)) = (-z, (-z_2, z_1))$.
\end{itemize}
This is a free action of $D_8$ on the contractible space $S^{\infty} \times (S^{\infty} \times S^{\infty})$, which can be checked by verifying all possibilities. We call this space $E$, and it is a the model for $ED_8$. We denote $B = E / D_8$, to contrast with the $BD_8$ as constructed in the next section. Observe that there are finite dimensional submanifolds $S^i \times (S^j \times S^j)$, for each $(i,j)$, which are invariant under the $D_8$-action. These will be referred to as $E^{i,j}$, and let $B^{i,j} = E^{i,j} / D_8$.
\subsection{The construction for $ES_4$}
\label{subsec:es4}
Recall from Appendix \ref{subsec:ed8}, the space $$\mathbb{S}_2 := S^{\infty} \times (S^{\infty} \times S^{\infty}),$$ defined for $D_8$ using $r=(12)(34)$ and $a= (1324)$. We may define similarly $\mathbb{S}_3, \mathbb{S}_4$ for the conjugates $(23) \cdot D_8 \cdot (23)$ and $(24) \cdot D_8 \cdot (24)$ (corresponding to copies of $D_8$ with $r=(13)(24)$ and $r=(14)(23)$) respectively.
Let $S^{\infty}_{\mathbb{C}} = \cup_{i \ge 1} S^{2i-1}$ denote the infinite dimensional sphere taken as the union of the odd dimensional spheres $S^{2i-1} \subset \mathbb{C}^{i}$. Let $$\mathbb{S}_{\mathbb{C}} = S^{\infty}_{\mathbb{C}} \times \mathbb{S}_{2} \times \mathbb{S}_{3} \times \mathbb{S}_{4}.$$ This remains contractible, but now has a free $S_4$ action as follows (to define the action, it is sufficient to define it on the generators $(12),(13),(14)$ and show that all relations are satisfied). Firstly, these act on $$\mathbb{S}_{2} \times \mathbb{S}_{3} \times \mathbb{S}_{4},$$ by:
$$(12): \left( \begin{array}{ccc} a & a_1 & a_2 \\ b & b_1 & b_2 \\ c & c_1 & c_2 \end{array} \right) \mapsto \left( \begin{array}{ccc} -a & -a_1 & a_2 \\ c & c_2 & c_1 \\ b & b_2 & b_1 \end{array}\right)$$
$$(13): \left( \begin{array}{ccc} a & a_1 & a_2 \\ b & b_1 & b_2 \\ c & c_1 & c_2 \end{array} \right) \mapsto
\left( \begin{array}{ccc} c & -c_2 & c_1 \\ -b & -b_1 & b_2 \\ a & a_2 & -a_1 \end{array}\right) $$
$$(14): \left( \begin{array}{ccc} a & a_1 & a_2 \\ b & b_1 & b_2 \\ c & c_1 & c_2 \end{array} \right)\mapsto
\left( \begin{array}{ccc} b & -b_2 & -b_1 \\ a & -a_2 & -a_1 \\ -c & -c_1 & c_2 \end{array}\right). $$
Here we denote an element of this space as a matrix $$\left( \begin{array}{ccc} a & a_1 & a_2 \\ b & b_1 & b_2 \\ c & c_1 & c_2 \end{array}\right),$$ where for example $(a,(a_1,a_2)) \in \mathcal{S}_2$ and so on.
Further, to decide the action of these transpositions $(12),(13),(14)$ on $S^{\infty}_{\mathbb{C}}$, we pick three reflections of $S^{\infty}$ (i.e. involutions that restrict to reflections in each $S^{2i-1} \subset S^{\infty}$), such that the composition of any two of these reflections are a rotation by $2 \pi /3$ in each complex coordinate, using the identification with the unit sphere $S^{2i-1} \subset \mathbb{C}^i$. Alternatively, this is multiplication by the third root of unity $\zeta \in \mathbb{C}$. To see that we can do this, recall that any element of $O(2n)$ may be block diagonalised into $2 \times 2$ blocks. It is then sufficient to show that this may be done for $S^1$ (and then extending likewise for each $2 \times 2$ block). Observe that if one takes the diameters of $S^1$ through $0$, $\zeta$ and $\zeta^2$ then reflection through any two of these three lines suffices.
It remains to show that this is indeed a well defined free action of $S_4$, which we will not prove here but can be checked by exhaustion. Note further that the inclusion of $D_8 \subset S_4$ from Appendix \ref{subsec:ed8} demonstrates this $ES_4$ as an $ED_8$. We denote by $ES_4^{i,j,k}$ the $S_4$-invariant submanifold of $ES_4$ corresponding to $$S^{2i-1} \times (S^j \times (S^k \times S^k)) \times (S^j \times (S^k \times S^k)) \times (S^j \times (S^k \times S^k)).$$
Choosing any point $p \in ES_4$ that is fixed by the reflection associated to $(12)$, there is a homotopy equivalence between our $ES_4$ and $E$ (from Appendix \ref{subsec:ed8}), via the projection $$\rho: ES_4 \rightarrow \mathbb{S}_2,$$ which is in fact a $D_8$-equivariant homotopy equivalence with respect to the action of $D_8 \subset S_4$. To see that we can do this, recall the following argument that $S^{\infty}_{\mathbb{C}}$ is contractible: there is a shift map $$T: S^{\infty}_{\mathbb{C}} \rightarrow S^{\infty}_{\mathbb{C}}, \ (z_0,z_1,\ldots) \mapsto (0, z_0, z_1,\ldots),$$ where each $z_i \in \mathbb{C}$. Pick some $p \in S^{\infty}_{\mathbb{C}} - \text{Im}(T)$. Then $T$ is both homotopic to the identity map on $S^{\infty}_{\mathbb{C}}$ and the constant map at $p$ via a (normalised) linear interpolation (the key being that this interpolation is never zero). A similar argument shows that $S^{\infty}$ is contractible. We then observe that $\rho$ is $D_8$-equivariant, and the $ES_4 / D_8 \rightarrow E / D_8$ is a homotopy equivalence because it is a fibration over $E / D_8$ with a contractible fibre. Using the long exact sequence of homotopy for a fibration, this implies that $\rho$ is a weak equivalence. By construction, our choices of $ES_4$ and $E$ are CW-complexes, hence this is indeed a homotopy equivalence.
For notational purposes, we will denote $BD_8 = ES_4 / D_8$ (specifically, using the contractible space $ES_4$ with a free $S_4$ action as constructed in Appendix \ref{subsec:es4}, but only quotienting by $D_8$). We will denote $B = E / D_8$ (specifically, using the contractible space $E$ from Appendix \ref{subsec:ed8}), noting that by standard theory of classifying spaces $B$ and $BD_8$ are both homotopy equivalent, as in the previous paragraph.
\end{document} |
\begin{document}
\date{\today}
\title
[Square function and local smoothing estimates]{A trilinear approach to square function and local smoothing estimates for the wave operator}
\author[J. Lee]{Jungjin Lee}
\address{Department of Mathematical Sciences, School of Natural Science, Ulsan National Institute of Science and Technology, UNIST-gil 50, Ulsan 44919, Republic of Korea}
\email{[email protected]}
\subjclass[2010]{42B10, 42B15, 42B37}
\keywords{Wave equation, square function, smoothing estimates}
\thanks{
The author was supported in part by NRF grant No. 2017R1D1A1B03036053 (Republic of Korea).}
\begin{abstract}
The purpose of this paper is to improve the known estimates for Mockenhaupt's square function in $\mathbb R^3$ and for Sogge's local smoothing in $\mathbb R^{2+1}$ spacetime. For this we use the trilinear approach of S. Lee and A. Vargas for the cone multiplier with some trilinear estimates obtained from the $\ell^2$ decoupling theorem and multilinear restriction theorem.
\end{abstract}
\maketitle
\section{Introduction}
Let $\Gamma = \{ (\xi,\tau) \in \mathbb R^2 \times \mathbb R : \tau = |\xi|,~ 1 \le \tau \le 2 \}$ be a truncated light cone in $\mathbb R^3$. For given small $0< \delta <1 $, let $\Gamma_\delta$ denote the $\delta$-neighborhood of $\Gamma$. Let $f$ be a function on $\mathbb R^3$ whose Fourier transform is supported in $\Gamma_\delta$.
We partition $\Gamma_\delta$ into $O(\delta^{-1/2})$ sectors
\(
\Theta = \{ (\xi, \tau) \in \Gamma_\delta : \xi/|\xi| \in \theta \}
\)
corresponding to an arc $\theta$ of angular length $O(\delta^{1/2})$ in the unit circle, and
let $\mathbf \Pi_\delta$ denote the collection of such sectors. We take a collection of Schwartz functions \( \Xi_{\Theta} \) so that its Fourier transform $\widehat \Xi_\Theta$ is supported on a neighborhood of $\Theta$ and $\{\widehat \Xi_\Theta \}_{\Theta \in \mathbf \Pi_\delta}$ forms a partition of unity of $\Gamma_\delta$.
The square function $S_\delta f$ is defined as
\[
S_\delta f = \Big(\sum_{\Theta \in \mathbf \Pi_\delta} |f_\Theta|^2 \Big)^{1/2}
\]
where $f_\Theta = f \ast \Xi_\Theta$.
For $1 \le p \le \infty$, we say that the square function estimate ${\mathcal{SQ}}(p \rightarrow p; \alpha)$ holds if the estimate
\[
\| f \|_p \le C_{\epsilon}\delta^{-\alpha-\epsilon} \| S_\delta f \|_p
\]
holds for all $\epsilon>0$ and all functions $f$ having Fourier support in $\Gamma_\delta$, where $C_\epsilon$ is a positive constant depending on $\epsilon$ but not on $\delta$.
It was conjectured that the square function estimate ${\mathcal{SQ}}(p \rightarrow p; \alpha)$ holds for $p > 2$ and $\alpha \ge \max(0, \frac{1}{2} - \frac{2}{p})$, see \cite{garrigos2009cone, tao2000bilinearII}. Mockenhaupt \cite{mockenhaupt1993cone} first considered it, and proved the estimate $\mathcal{SQ}(4 \to 4; 1/8=0.125)$. It was observed by Bourgain \cite{bourgain1995cone} that the exponent $\alpha$ could be less than $1/8$, and Tao and Vargas \cite{tao2000bilinearII} gave an explicit exponent $\alpha$ by combining their bilinear cone restriction estimates with Bourgain's arguments. After that, the sharp bilinear cone restriction estimate was obtained by Wolff \cite{wolff2001sharp}, and the estimate $\mathcal{SQ}(4 \to 4; 5/44=0.113\dot6\dot3)$ immediately followed by a theorem in \cite{tao2000bilinearII}.
Garrig\'os and Seeger \cite{garrigos2009cone} have studied \textit{$\ell^p$ decoupling estimates} (called Wolff-type inequalities \cite{wolff2000local}) for cones, and they further improved the exponent $\alpha$ by combining $\ell^p$ decoupling estimates with bilinear arguments in \cite{tao2000bilinearII}. In \cite{wolff2000local}, Wolff introduced an important type of estimate related to the above square function which have become known as {$\ell^p$ decoupling inequalities}. Decoupling inequalities will play an important role in this paper and will be discussed in detail in section 3. Recently, the sharp $\ell^2$ decoupling theorem for the cone was proved by Bourgain and Demeter \cite{bourgain2015proof} using the multilinear restriction theorem due to Bennett, Carbery and Tao \cite{bennett2006multilinear}. So, by results in \cite{garrigos2009cone} the estimate $\mathcal{SQ}(4 \to 4; 3/32=0.09375)$ was obtained.
Our first result is to make a further progress on the exponent $\alpha$.
\begin{thm} \label{thm:sqfEst}
The estimate $\mathcal{SQ}(4 \to 4;1/16=0.0625)$ holds.
\end{thm}
The approach to Theorem \ref{thm:sqfEst} is based on trilinear methods. S. Lee and Vargas \cite{lee2012cone} already employed a trilinear approach to square function estimates by adapting the multilinear arguments of Bourgain and Guth \cite{bourgain2011bounds}, and obtained the sharp estimate $\mathcal{SQ}(3 \to 3;0)$. In \cite{lee2012cone}, it was observed that trilinear square function estimates for the cone are essentially equivalent to linear ones. To get a trilinear square function estimate, the multilinear restriction theorem of Bennet, Carbery and Tao \cite{bennett2006multilinear} will be utilized as in \cite{lee2012cone}. However, to lift the $L^3$ estimate to the $L^4$ estimate we will combine this with the sharp $\ell^2$ decoupling theorem due to Bourgain and Demeter \cite{bourgain2015proof}. Also, we will adapt the induction-on-scales argument of Bourgain and Demeter \cite{bourgain2015proof}. However, since their arguments take advantage of some properties of decoupling norm not derived from the square function, we cannot formulate an iteration as strong as in \cite{bourgain2015proof}. Nevertheless, it is enough to obtain Theorem \ref{thm:sqfEst}.
The square function estimate is related to several deep questions in harmonic analysis such as the cone multiplier, local smoothing conjecture and the $L^p$ regularity conjecture for convolution operator with the helix. In particular, these conjectures follow from the sharp estimate $\mathcal{SQ}(4 \to 4;0)$, see for example \cite{tao2000bilinearII}, \cite{garrigos2009cone}.
Theorem \ref{thm:sqfEst} implies the following partial results on these problems.
\begin{cor} \label{cor}
\emph{(i)} If $\alpha > 1/16$ then the local smoothing estimate
\[
\Big( \int_{1}^{2} \big\| e^{it \sqrt{-\Delta}} f \big\|^4_{L^4(\mathbb R^2)} dt \Big)^{1/4} \le C_{\alpha} \|f\|_{L_\alpha^{4}(\mathbb R^2)}
\]
holds, where $L_\alpha^p$ is the $L^p$-Sobolev space of order $\alpha$.
\emph{(ii)} If $\alpha > 1/16$ then the cone multiplier operator $T_\alpha$ defined by $\widehat{T_\alpha f} (\xi,\tau)= \rho(\tau) (1-|\xi|^2/ \tau^2)_+^{\alpha}\hat f(\xi)$ is bounded on $L^4$, where $\rho$ is a bump function on $[1,2]$.
\emph{(iii)} If $\alpha < 5/24$ then the convolution operator $T$ defined by\[
Tf(x) = \int f(x_1 -\cos t, x_2 - \sin t, x_3 -t ) \phi(t) dt
\]
maps $L^4$ to $L_{\alpha}^4$, where $\phi$ is a bump function.
\end{cor}
We note that the sharp estimate $L^p \to L^p_{1/p}$, $p >4$, for the averaging operator $T$ may be obtained by combining the theorem due to Pramanik and Seeger \cite{Pramanik2007averages} and the Bourgain--Demeter decoupling estimates.
The proof of Corollary \ref{cor} is well known, and we will not reproduce here, see for example \cite{tao2000bilinearII}. For other related problems, see
\cite{garrigos2009cone}, \cite{bourgain2015proof}.
\
We are further concerned with $L_\alpha^p \to L^q$ type local smoothing estimates
\begin{equation} \label{eqn:LS}
\Big( \int_1^2 \big\| e^{it \sqrt{-\Delta}} f \big\|^q_{L^q(\mathbb R^2)} dt \Big)^{1/q} \le C_{p,q,\alpha} \|f\|_{L_\alpha^{p}(\mathbb R^2)}.
\end{equation}
It is conjectured that this local smoothing estimate holds if
\begin{equation}\label{apq_con}
\begin{gathered}
1 \le p \le q \le \infty, \\
\frac{1}{p} + \frac{3}{q} = 1, \qquad \alpha \ge \frac{1}{p} -\frac{3}{q} + \frac{1}{2},
\end{gathered}
\end{equation}
see \cite{schlag1997local, tao2000bilinearII}.
Indeed, the necessity of condition $p \le q$ follows from translation invariance, see \cite{hormander1960estimates}. From the focusing example, Knapp example and delta function, one has three necessary conditions
\begin{align}
\label{al_1}
\alpha &\ge \frac{1}{p} -\frac{3}{q} + \frac{1}{2},\\
\label{al_2}
\alpha &\ge \frac{3}{2p} - \frac{3}{2q},\\
\label{al_3}
\alpha &\ge \frac{2}{p} -\frac{1}{q} - \frac{1}{2},
\end{align}
respectively, see \cite{tao2000bilinearII} for details.
Let $I_1 = (1,1;1/2+\varepsilon),~ I_2 =(2,2;0),~ I_\infty =(\infty, \infty; 1/2+\varepsilon),~ I_{1,\infty} =(1,\infty;3/2+\varepsilon)$ where $\varepsilon >0$ is arbitrary. When $(p,q;\alpha) = I_1, I_2, I_\infty$ and $I_{1,\infty}$, one can obtain \eqref{eqn:LS} from the fixed-time estimates due to Miyachi \cite{miyachi1980some} and Peral \cite{peral1980lp}. First, in case that \eqref{al_3} is dominant, the reciprocal range $(1/p,1/q)$ is the triangular shape with vertices $(1,1)$, $(1/2,1/2)$ and $(1,0)$. In this case, by interpolation, the estimates \eqref{eqn:LS} for such triangular shape range follow from the estimates for $I_1, I_2$ and $I_{1, \infty}$. We see that the conjecture \eqref{apq_con} satisfies both \eqref{al_1} and \eqref{al_2}. If we have the conjecture, by interpolating between \eqref{apq_con} and $I_{\infty}$
the estimates \eqref{eqn:LS} are obtained when \eqref{al_1} is dominant, and analogously the interpolation between \eqref{apq_con} and $I_{2}$ gives the estimates \eqref{eqn:LS} when \eqref{al_2} is dominant. For an endpoint $(p,q;\alpha)=(4,4;0)$, it is known that the local smoothing estimate does not hold, see \cite{Wolff96recentwork}. But, for $q > 4$, $\frac{1}{p} + \frac{3}{q} =1$ and $\alpha = \frac{1}{p} - \frac{3}{q} + \frac{1}{2}$, it is not known whether the local smoothing estimate holds or not.
The critical $L_\alpha^4 \to L^4$ estimate has been considered in Corollary \ref{cor}. We continue to study a sharp $L_\alpha^p \to L^q$ estimate when $p<q$. From Strichartz' estimate $L_{1/2}^2 \to L^6$, this conjecture follows for $q \ge 6$. Schlag and Sogge \cite{schlag1997local} first improved this to $q \ge 5$, and Tao and Vargas \cite{tao2000bilinearII} made further progress by using bilinear approach. By the sharp bilinear cone restriction estimate due to Wolff \cite{wolff2001sharp} and the results in \cite{tao2000bilinearII}, the conjecture was improved to $q \ge 14/3 = 4.\dot6$, and the $\epsilon$-loss of $\alpha$ was removed by S. Lee \cite{lee2003endpoint}.
Our second result is to obtain an improved sharp local smoothing estimate.
\begin{thm} \label{thm:localSm}
The estimate \eqref{eqn:LS} holds
for $q \ge 30/7=4.\dot28571\dot4$ and $p,\alpha$ satisfying the conditions in \eqref{apq_con} except the endpoint $(p,q;\alpha) = (10/3, 30/7 ;1/10)$.
\end{thm}
Theorem \ref{thm:localSm} will be proved through the trilinear approach too. The proof is simpler than Theorem \ref{thm:sqfEst}. We will reduce this linear estimate to a trilinear one, and the desired trilinear estimate will be obtained from interpolating between two trilinear estimates deduced from the multilinear restriction theorem \cite{bennett2006multilinear} and the $\ell^2$ decoupling theorem \cite{bourgain2015proof}.
\
Throughout this paper,
we write $A \lesssim B$ or $A = O(B)$ if $A \le CB$ for some constant $C >0$ which may depend on $p$, $q$ but not on $\delta$, $R$ and $N$, and $A \sim B$ if $A\lesssim B$ and $B \lesssim A$.
The constants $C$, $C_\varepsilon$, $C_{\epsilon}$, $C_{\epsilon_1}$ and the implicit constants in $\lesssim$ and $\sim$ will be adjusted numerous times throughout the paper.
For any finite set $A$, we use $\#A$ to denote its cardinality, and
if $A$ is a measurable set, we use $|A|$ to denote its Lebesgue
measure.
If $R$ is a rectangular box or an ellipsoid and $k$ is a positive real number, we use $kR$ to denote the $k$-dilation of $R$ with center of dilation at the center of $R$.
\section{Reduction to a trilinear estimate}
In this section, we will show that the linear square function estimate is equivalent to a trilinear one.
The arguments of this section are a small modification of arguments found in \cite{lee2012cone}. Specifically, we replace $L^3$ arguments by $L^p$ ones for $p \ge 2$.
For an arc $\Omega \subset S^1$ we define a sector $\Gamma^{\Omega}$ and a $\delta$-fattened sector $\Gamma_\delta^{\Omega}$ by
\[
\Gamma^\Omega = \{ (\xi,\tau) \in \Gamma : \xi/|\xi| \in \Omega \},
\qquad
\Gamma_\delta^\Omega = \{ (\xi,\tau) \in \Gamma_\delta : \xi/|\xi| \in \Omega \}.
\]
Let $\Omega_1, \Omega_2, \Omega_3 \subset S^1$ be arcs whose lengths are comparable to each other. We say that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are \textit{$\nu$-transverse} if for any unit normal vector $n_i$ to $\Gamma^{\Omega_i}$, $i=1,2,3$, the parallelepiped formed by $n_1, n_2, n_3$ has volume $\ge \nu$, see Figure \ref{fig:transversal}. A key geometric property of the cone $\Gamma$ is that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse if and only if $\Omega_1, \Omega_2, \Omega_3$ are mutually separated by a distance $\gtrsim \nu^{1/3}$, see \cite{lee2012cone}.
\begin{figure}\label{fig:transversal}
\end{figure}
Let us use the notation $\mathcal{SQ}(p \times p \times p \rightarrow p;\alpha)$ if one has the trilinear square function estimate
\[
\Big\| \Big( \prod_{i=1}^{3} |f_i| \Big)^{1/3} \Big\|_p
\le C_{\nu,\epsilon} \delta^{-\alpha-\epsilon} \Big( \prod_{i=1}^{3} \|S_\delta f_i \|_p \Big)^{1/3}
\]
for all $\epsilon>0$ and all $f_i$ with $\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}$, where $\Omega_1, \Omega_2, \Omega_3$ are any arcs such that their lengths are comparable to each other, and $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse.
It is easy to see that $\mathcal{SQ}(p \rightarrow p;\alpha)$ implies $\mathcal{SQ}(p \times p \times p \rightarrow p;\alpha)$ by H\"older's inequality. We will show that the converse is true.
Let \( 1 > \gamma_1 > \gamma_2 > 0 \) be small positive numbers.
We define \( \mathbf \Omega(\gamma) \) to be a family of $O(\gamma^{-1})$ arcs of length $\gamma$ covering the unit circle with finite overlap.
We take a Schwartz function $\Xi_\Omega$ whose Fourier transform $\widehat \Xi_\Omega$ is a bump function supported on a neighborhood of $\Gamma_\delta^\Omega$.
The following is due to S. Lee and Vargas \cite{lee2012cone}*{equation (23)}.
\begin{lem}[Lee--Vargas \cite{lee2012cone}*{equation (23)}] \label{lem:LinTriLcompare}
Suppose that $f$ has Fourier support in $\Gamma_\delta$ and let $0 < \gamma_2 < \gamma_1 < 1$.
Then for any $x \in \mathbb R^3$,
\begin{align}
| f(x) | &\lesssim \max_{\Omega \in \mathbf
\Omega(\gamma_1)}| f_{\Omega}(x) |
+ \gamma_1^{-1}
\max_{\Omega \in \mathbf \Omega(\gamma_2)} | f_{\Omega}(x)|
+ \gamma_2^{-50}
\max_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf
\Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big( \prod_{i=1}^{3} |f_{\Omega_i}(x)| \Big)^{1/3}
\end{align}
where $f_\Omega = f \ast \Xi_\Omega$.
\end{lem}
To obtain the above lemma, S. Lee and Vargas adapted the arguments of Bourgain and Guth \cite{bourgain2011bounds} who made progress on the restriction conjecture by using a multilinear approach.
Using Lemma \ref{lem:LinTriLcompare} we can establish the following relation between the linear and trilinear square function estimates.
\begin{prop} \label{prop:MSQmeanSQ}
Let $p \ge 2$ and $\alpha \ge 0$. Suppose that $\mathcal{SQ}(p\times p\times p \rightarrow p;\alpha)$ holds. Then $\mathcal{SQ}(p \rightarrow p;\alpha)$ is valid.
\end{prop}
\begin{proof}
Let $\epsilon>0$ be given. We assume that $\beta \ge 0$ is the best exponent for which
\begin{equation} \label{asshy}
\| f \|_p \le C \delta^{-\beta-\epsilon} \| S_\delta f \|_p
\end{equation}
holds for all $f$ with $\supp \hat f \subset \Gamma_\delta$, i.e.,
\[
\beta = \inf_{\delta > 0} \Big( \log_{1/\delta} \sup_{f: \supp \hat f \subset \Gamma_\delta} \frac{ \| f \|_p } {\| S_\delta f \|_p} \Big) - \epsilon.
\]
It suffices to show that for any small $0<\epsilon_1<1$,
\begin{equation} \label{expRel}
\beta \le \alpha+ O(\epsilon_1)+\log_{1/\delta} C_{\epsilon, \epsilon_1},
\end{equation}
since if we choose a sufficiently small $\epsilon_1$ then $O(\epsilon_1)$ is bounded by $\epsilon$, which can be absorbed in an $\epsilon$-loss in the estimate $\mathcal{SQ}(p \to p;\alpha)$.
The dependence on $\epsilon$ and $\epsilon_1$ of the constant $C_{\epsilon, \epsilon_1}$ in the above inequality comes from employing $\mathcal{SQ}(p \times p \times p \to p; \alpha)$. Especially $\epsilon_1$ is related to the transversality of trilinear estimates below.
We may assume that $\delta>0$ is sufficiently small, say $0< \delta \le \delta_0$, because the desired estimate is trivially obtained, otherwise, where $\delta_0$ is a small parameter to be fixed later in the proof. Let \( 1 > \gamma_1 > \gamma_2\ge \delta_0^{\epsilon_1/2} \) be dyadic multiples of $\delta^{1/2}$, the value of which is to be fixed later in the argument.
By Lemma \ref{lem:LinTriLcompare} and the embedding $\ell^{p} \subset \ell^{\infty}$,
\begin{equation} \label{LpTricom}
\begin{split}
\| f \|_p^p &\lesssim \sum_{\Omega_1 \in \mathbf
\Omega(\gamma_1)} \| f_{\Omega_1} \|_p^p
+ \gamma_1^{-p}
\sum_{\Omega_2 \in \mathbf \Omega(\gamma_2)} \| f_{\Omega_2} \|_p^p \\
&\qquad\qquad + \gamma_2^{-50p}
\sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf
\Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |f_{\Omega_i}| \Big)^{1/3} \Big\|_p^p,
\end{split}
\end{equation}
where $\Omega_j$ is taken such that if $\theta$ intersects the interior of $\Omega_j$ then $\theta \subset \Omega_j$ for $j=1,2$.
Consider the first and second summation in the right-hand side of \eqref{LpTricom}. For convenience we denote by $\Omega = \Omega_j$ and $\gamma =\gamma_j$. Using Lorentz rescaling we will show
\begin{equation} \label{ppsc}
\| f_\Omega \|_p \le C_\epsilon (\delta / \gamma^2)^{-\beta-\epsilon} \| S_\delta f_{\Omega} \|_p.
\end{equation}
By rotating the unit circle we may assume that $\Omega$ is centered at $(1,0)$. Let $T : \mathbb R^3 \to \mathbb R^3$ be a linear transformation so that
\[
T(e_1,1) = (e_1,1),\quad T(-e_1,1) = \gamma^{2}(-e_1,1),\quad T(e_2,0) = \gamma(e_2,0)
\]
where $\{e_1, e_2\}$ is a standard basis in $\mathbb R^2$. Then $\hat f_\Omega \circ T$ is supported in $\Gamma_{\delta/\gamma^2}$.
From the equation $\widehat{f_\Omega \circ T^{-t}} = |\det T| \hat f_\Omega \circ T$, it follows that $\widehat{f_\Omega \circ T^{-t}}$ has support in $\Gamma_{\delta/\gamma^2}$ where $T^{-t}$ is the inverse transpose of $T$. Since $\gamma \ge \delta^{1/2}$, by \eqref{asshy} it follows that
\begin{equation} \label{bsc}
\|f_\Omega \circ T^{-t}\|_p \lesssim (\delta/\gamma^2)^{-\beta-\epsilon} \|S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) \|_p.
\end{equation}
By definition,
\[
S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) = \Big( \sum_{\Upsilon \in \mathbf \Pi_{\delta/\gamma^2}} \big|(f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} \big|^2 \Big)^{1/2}.
\]
From $\ \hat \Xi_\Upsilon \circ T^{-1}= \hat \Xi_{T(\Upsilon)}$, it follows that
\(
\big((f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} \big)\sphat =|\det T|(\hat f_\Omega \circ T) \hat \Xi_{\Upsilon,}
= |\det T|(\hat f_\Omega \hat \Xi_{T(\Upsilon)} ) \circ T.
\)
Thus, by taking the inverse Fourier transform,
\[
(f_\Omega \circ T^{-t}) \ast \Xi_{\Upsilon} = (f_\Omega \ast \Xi_{T(\Upsilon)}) \circ T^{-t}.
\]
Since $f_\Omega \ast \Xi_{T(\Upsilon)}$ has Fourier support in $T(\Upsilon)$ which is a sector of size $1 \times \delta \times C\delta^{1/2}$ in $\Gamma_{\delta}$, we have
\[
S_{\delta/\gamma^2} (f_\Omega \circ T^{-t}) = \Big( \sum_{\Upsilon \in \mathbf \Pi_{\delta/\gamma^2}} |(f_\Omega \ast \Xi_{T(\Upsilon)}) \circ T^{-t} |^2 \Big)^{1/2} = (S_{\delta} f_\Omega) \circ T^{-t}.
\]
We substitute this in \eqref{bsc} and remove $T^{-t}$ by changing variables. Then we obtain \eqref{ppsc}.
By \eqref{ppsc} we have
\[
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| f_{\Omega} \|_p^p \le C_\epsilon (\delta /\gamma^{2})^{-p\beta-p\epsilon} \sum_{\Omega \in \mathbf \Omega(\gamma)} \| S_\delta f_{\Omega} \|_p^p.
\]
Since we can decompose $f_\Omega = \sum_{\Theta \in \mathbf \Pi_\delta: \theta \subset \Omega} f \ast \Xi_\Theta$, we have that for $p \ge 2$,
\begin{align*}
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| S_\delta f_{\Omega} \|_p^p &= \sum_{\Omega \in \mathbf \Omega(\gamma)} \int \Big( \sum_{\Theta \in \mathbf \Pi_\delta: \theta \subset \Omega} | f \ast \Xi_\Theta|^2 \Big)^{p/2} \\
&\le \int \Big(\sum_{\Omega \in \mathbf \Omega(\gamma)} \sum_{\Theta \in \mathbf \Pi_\delta : \theta \subset \Omega} | f \ast \Xi_\Theta|^2 \Big)^{p/2} \\
&\le \|S_\delta f\|_p^p.
\end{align*}
Inserting this into the previous estimate, we obtain
\begin{equation} \label{indP}
\sum_{\Omega \in \mathbf \Omega(\gamma)} \| f_{\Omega} \|_p^p \le C_\epsilon (\delta /\gamma^{2})^{-p\beta-p\epsilon} \|S_\delta f\|_p^p.
\end{equation}
Consider the trilinear part in \eqref{LpTricom}. By applying $\mathcal{SQ}(p\times p \times p \rightarrow p; \alpha)$,
\begin{equation} \label{Tripart}
\sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf
\Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |f_{\Omega_i}| \Big)^{1/3} \Big\|_p^p \le C_{\epsilon, \gamma_2} \gamma_2^{-3} \delta^{-p\alpha-p\epsilon} \|S_\delta f \|_p^p.
\end{equation}
We substitute \eqref{indP} and \eqref{Tripart} in \eqref{LpTricom}. Then,
\[
\|f\|_p \le (C_\epsilon \gamma_1^{2(\beta+\epsilon)} \delta^{-\beta-\epsilon} + C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} \delta^{-\beta-\epsilon} + C_{\epsilon, \gamma_2}\gamma_2^{-60} \delta^{-\alpha-\epsilon}) \|S_\delta f \|_p.
\]
So, by the assumption for $\beta$,
\begin{align*}
\delta^{-\beta} &\le (C_\epsilon \gamma_1^{2(\beta+\epsilon)} + C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} )\delta^{-\beta} + C_{\epsilon,\gamma_2}\gamma_2^{-60} \delta^{-\alpha}.
\end{align*}
We now choose $\gamma_1, \gamma_2$ and $\delta_0$ so that $C_\epsilon\gamma_1^{2(\beta+\epsilon)} \le 1/4$, $ C_\epsilon \gamma_1^{-1} \gamma_{2}^{2(\beta+\epsilon)} \le 1/4$ and $1> \gamma_1 > \gamma_2 \ge \delta_0^{{\epsilon_1}/{2}}$.
Then $\delta^{-\beta} \le C_{\epsilon,\gamma_2} \gamma_2^{-60}\delta^{-\alpha} \le C_{\epsilon, \epsilon_1} \delta^{-30\epsilon_1 - \alpha}$, which means \eqref{expRel}.
\end{proof}
\section{Decoupling norms} \label{sec:decoupling}
In this section, we will show that the decoupling norm for the cone essentially satisfies the reverse H\"older inequality, and apply this to the interpolation between decoupling estimates. In fact, our interpolation lemmas can be obtained by using known interpolation theorems, so our proof is an alternative one (which is actually weaker).
This section is obtained by modifying the arguments for paraboloid decoupling in \cite{bourgain2015proof}*{section 3}. For further discussion for decoupling, see \cite{wolff2000local}, \cite{laba2002local}, \cite{garrigos2008improvements}, \cite{garrigos2010mixed}.
Let $f$ be a function having Fourier support in $\Gamma_\delta$.
For such functions, the norm $\| \cdot \|_{p,\delta}$, $1 \le p \le \infty$ is defined by
\[
\|f\|_{p,\delta} := \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|^2_{p} \Big)^{1/2}.
\]
It is easy to see that if $m$ is a positive real number then $\|f\|_{p,m\delta} \le C_m \|f\|_{p,\delta}$ by Minkowski's inequality.
We first introduce a wave packet decomposition, which is a fundamental tool for studying Fourier restriction type problems. To decompose $f$ both in frequency space and in spatial space, we define standard bump functions.
Let $\phi(x) := (1+|x|^2)^{-M/2}$ where $M$ is a sufficiently large exponent. Let $\psi: \mathbb R^{3}\rightarrow \mathbb R$ be a nonnegative Schwartz function such that $\psi$ is strictly positive in the unit ball $B(0,1)$, Fourier supported in a ball $B(0,1/4)$ and $\sum_{k \in \mathbb Z^3}\psi(x-k) = 1$. For an ellipsoid $E$, we define $a_E$ to be an affine map from the unit ball $B(0,1)$ to $E$. Let $\phi_E = \phi \circ a_E^{-1}$ and $\psi_E = \psi \circ a_E^{-1}$.
\begin{lem} \label{lem:wavepack}
Suppose that $f$ is Fourier supported in $\Gamma_\delta$. Then there exists a decomposition
\begin{equation} \label{waveDec}
f(x) = \sum_{\Theta \in \mathbf \Pi_\delta}\sum_{\pi \in \mathbf P_\Theta} h_{\pi} f_{\pi}(x),
\end{equation}
where $\mathbf P_\Theta = \mathbf P_\Theta(f)$ is a family of separated rectangles $\pi$ of size $\delta^{-1} \times \delta^{-1/2} \times 1$ with its dual $\pi^* = \Theta$, such that the coefficients $h_\pi > 0$ have the property that
\begin{equation} \label{lp_sum}
\Big( \sum_{\Theta \in \mathbf \Pi_\delta}\Big( \sum_{\pi \in \mathbf P_\Theta} |\pi| h_\pi^p \Big)^{2/p} \Big)^{1/2} \sim \|f\|_{p,\delta}
\end{equation}
for all $1 \le p < \infty$ and
\begin{equation} \label{l_infty}
\Big( \sum_{\Theta \in \mathbf \Pi_\delta} \sup_{\pi \in \mathbf P_\Theta} h_\pi^2 \Big)^{1/2} \sim \|f\|_{\infty,\delta},
\end{equation}
and the functions $f_\pi$ obey
\begin{equation} \label{fourierSupp}
\supp \hat f_\pi \subset 4\Theta
\end{equation}
and
\begin{equation} \label{ess_supp}
|f_{\pi}(x)| \lesssim \phi_\pi(x).
\end{equation}
\end{lem}
\begin{proof}
For each $\Theta \in \mathbf \Pi_\delta$, we partition $\mathbb R^3$ into the dual rectangles $\pi$ of $\Theta$.
For each $\pi$, we define a coefficient $h_\pi$ and a function $f_\pi$ by
\[
h_{\pi} = \frac{1}{|\pi|}\int |f_\Theta(x)| \psi_\pi(x) dx \qquad \text{and} \qquad
f_\pi(x) = h_\pi^{-1} \psi_\pi(x) f_\Theta (x).
\]
Then, \eqref{fourierSupp} immediately follows, and some direct calculating gives \eqref{waveDec}.
By Bernstein's inequality,
\begin{equation*}
|\psi_{\pi}(x) f_\Theta (x)| \lesssim h_\pi,
\end{equation*}
so we have $|f_\pi(x)| \lesssim |\psi_\pi(x)|$. This implies \eqref{ess_supp}.
By H\"older's inequality we have $h_\pi \lesssim \Big( \frac{1}{|\pi|}\int |f_\Theta(x)|^p \psi_\pi(x) dx \Big)^{1/p}$, and using Bernstein's lemma we can see that $\Big( \frac{1}{|\pi|}\int |f_\Theta(x)|^p \psi_\pi(x) dx \Big)^{1/p} \lesssim h_\pi$. So, we have
\[
\sum_{\pi \in \mathbf P_\Theta} |\pi| h_\pi^p \sim \sum_{\pi \in \mathbf P_\Theta} \int |f_\Theta|^p \psi_\pi
= \|f_\Theta\|_p^p,
\]
from which \eqref{lp_sum} follows. Similarly, we have that $h_\pi \sim \sup_{x \in \pi} |f_\Theta(x)|$ and that $\sup_{\Theta \in \mathbf P_\Theta} h_\pi \sim \|f_\Theta\|_\infty$. Thus \eqref{l_infty} follows.
\end{proof}
Now we study the reverse H\"older inequality for the decoupling norm.
We say that $f$ is a \textit{balanced function} if $f$ is a function of the form \eqref{waveDec} with $h_\pi=1$
such that $f$ satisfies \eqref{fourierSupp}, \eqref{ess_supp}
and a property that for any $\Theta, \Theta' \in \mathbf \Pi_\delta$, the nonempty $\mathbf P_{\Theta}(f), \mathbf P_{\Theta'}(f)$ have comparable cardinality. These kinds of functions were first explicitly used by Wolff \cite{wolff2000local}.
\begin{lem} \label{lem:revHol}
Suppose that $1 \le p,q,r \le \infty$ and that for some $\theta \in (0,1)$,
\[
\frac{1}{r} = \frac{1-\theta}{q} + \frac{\theta}{p}.
\]
Then
\[
\| f \|_{r,\delta} \sim \|f\|_{q, \delta}^{1-\theta} \|f\|_{p, \delta}^{\theta},
\]
for all balanced function $f$.
\end{lem}
\begin{proof}
Since $f$ is a balanced function, there is a number $\kappa>0$ such that every nonempty $\mathbf P_\Theta(f)$ has cardinality comparable to $\kappa$. Let $\nu$ be the number of nonempty $\mathbf P_\Theta(f)$. Then by \eqref{lp_sum} and \eqref{l_infty}, one has
\[
\|f\|_{r,\delta} \sim \nu^{1/2} \kappa^{1/r} |\pi|^{1/r}
= \nu^{\frac{1-\theta}{2}} \kappa^{\frac{1-\theta}{q}} |\pi|^{\frac{1-\theta}{q}} \nu^{\frac{\theta}{2}} \kappa^{\frac{\theta}{p}} |\pi|^{\frac{\theta}{p}} \sim \|f\|_{q, \delta}^{1-\theta} \|f\|_{p, \delta}^{\theta}.
\]
\end{proof}
As an application we have the following interpolation lemma.
\begin{lem} \label{lem:interp}
Let $2 \le p_1, p_2, q_1, q_2 \le \infty$.
Assume that
\begin{equation} \label{givenEstLL}
\| f \|_{q_1} \le A_1 \| f \|_{p_1,\delta},\qquad
\| f \|_{q_2} \le A_2 \| f \|_{p_2,\delta}
\end{equation}
for all $f$ with $\supp \hat f \subset \Gamma_\delta$.
Suppose that for some $\theta \in (0,1)$,
\[
\frac{1}{q} = \frac{1-\theta}{q_1} + \frac{\theta}{q_2}, \qquad
\frac{1}{p} = \frac{1-\theta}{p_1} + \frac{\theta}{p_2},
\]
and $2 \le p \le q \le \infty$.
Then
\begin{equation} \label{ineq:intp}
\| f \|_{q} \lesssim \delta^{-\varepsilon}A_1^{1-\theta} A_2^{\theta} \| f \|_{p,\delta}
\end{equation}
for all $f$ with $\supp \hat f \subset \Gamma_\delta$ and all $\varepsilon >0$.
\end{lem}
\begin{proof}
For localization we decompose $f = \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k f$ where $\psi_k := \psi(\delta(x-k))$. Then,
\[
\|f\|_q^q \le \sum_{k' \in \delta^{-1} \mathbb Z^3} \Big\| \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k f \Big\|_{L^q(B(k',2\delta^{-1}))}^q.
\]
Since $\psi_k$ has rapid decay outside $B(k,\delta^{-1-\varepsilon})$, we have that if $x \in B(k',2\delta^{-1})$ then
\[
\Big|\sum_{k \in \delta^{-1}\mathbb Z^3 \setminus B(k',2\delta^{-1-\varepsilon})} \psi_k(x) \Big| \le C_K \delta^{K}
\]
for all $K>0$. Using this and a rough estimate $\|f\|_q \lesssim \delta^{-C} \|f\|_{p,\delta}$, we have that for any $\varepsilon>0$ and $K>0$,
\[
\|f\|_q^q \le \sum_{k'} \Big\| \sum_{k \sim k'} \psi_k f \Big\|_{L^q(B(k',2\delta^{-1}))}^q + C_K\delta^{K}\|f\|_{p,\delta}^q,
\]
where $k\sim k'$ means that $k \in B(k',2\delta^{-1-\varepsilon}) \cap \delta^{-1}\mathbb Z^{3}$. Since the number of $k \in \delta^{-1}\mathbb Z^3$ contained in $B(k',2\delta^{-1-\varepsilon})$ is $O(\delta^{3\varepsilon})$, we have
\begin{align*}
\|f\|_q^q &\lesssim \delta^{-3\varepsilon q} \sum_{k'} \sum_{k \sim k'} \|\psi_k f \|_{L^q(B(k',2\delta^{-1})}^q + C_K\delta^{K} \|f\|_{p,\delta}^q\\
&\lesssim \delta^{-3\varepsilon q} \sum_{k'} \sum_{k \sim k'} \|\psi_k f \|_q^q + C_K\delta^{K} \|f\|_{p,\delta}^q \\
&\lesssim \delta^{-3\varepsilon q-3\varepsilon} \sum_{k} \|\psi_k f \|_q^q + C_K\delta^{K}\|f\|_{p,\delta}^q.
\end{align*}
Since $p \le q$, we have that for any $\varepsilon >0$ and any $K > 0$,
\[
\|f\|_q \lesssim \delta^{-C\varepsilon}\Big( \sum_{k} \| \psi_k f \|_q^{p} \Big)^{1/p} + C_K\delta^{K}\|f\|_{p,\delta}.
\]
On the other hands, by Minkowski's inequality and $p \ge 2$ it follows that
\[
\Big( \sum_{k} \| \psi_k f \|_{p,2\delta}^{p} \Big)^{1/p} \le \|f\|_{p,2\delta} \lesssim \|f\|_{p,\delta}.
\]
Thus, by the above two estimates the proof of \eqref{ineq:intp} is reduced to showing
\begin{equation*}
\| \psi_k f \|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta} \| \psi_k f \|_{p,2\delta}.
\end{equation*}
By translation invariance it is enough to consider $\psi_0 f$. Let $g := \psi_0 f$.
By normalization we may assume that $\|g\|_{p,2\delta} = 1$. Then it is reduced to showing
\begin{equation} \label{redForm}
\|g\|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta}.
\end{equation}
Since $\psi_0$ has fast decay outside $B(0,C\delta^{-1})$,
we have
\(
\|g\|_q \le \|g\|_{L^q(B(0,\delta^{-1-\varepsilon}))} + C_K\delta^{K}
\)
for all $\varepsilon>0$ and $K>0$.
Since $\psi_0$ has Fourier support in $B(0,\delta/2)$, $\widehat g$ is supported in $\Gamma_{2\delta}$.
By Lemma \ref{lem:wavepack}, it is decomposed into
\[
g(x) = \sum_{\Theta \in \mathbf \Pi_{2\delta}}\sum_{\pi \in \mathbf P_\Theta} h_{\pi} g_{\pi}(x).
\]
We first remove some minor $\pi$'s. By \eqref{ess_supp}, we can eliminate $\pi$ that is disjoint from $B(0, C\delta^{-1-\varepsilon})$. Let $\mathring{\mathbf P}$ be the collection of $\pi$ intersecting $B(0, C\delta^{-1-\varepsilon})$. Then $\# \mathring{\mathbf P} \lesssim \delta^{-2-3\varepsilon}$.
The rectangles $\pi$ with $h_\pi = O(\delta^{500})$ can be also eliminated, since
\[
\Big\| \sum_{\pi \in \mathring{\mathbf P} : 0< h_\pi \lesssim \delta^{500}} h_\pi g_\pi \Big\|_q
\lesssim \delta^{500} |\pi| \# \mathring{\mathbf P} \lesssim \delta^{400}.
\]
We group the rectangles $\pi$ by value of coefficients $h_\pi$. Since $\|g\|_{p,2\delta} =1$, from \eqref{lp_sum} we can see that $h_\pi \lesssim 1$.
For any dyadic number $\delta^{500} \lesssim h \lesssim 1$ we define
\(
\mathring{\mathbf P}_h := \{ \pi \in \mathring{\mathbf P}: h \le h_\pi < 2h \}.
\)
It is classified into $\mathring{\mathbf P}_{h, \Theta} := \mathring{\mathbf P}_h \cap \mathbf P_\Theta$, and let
\[
\mathring{\mathbf P}_{h}^k := \bigcup_{k \le \# \mathring{\mathbf P}_{h, \Theta} < 2k} \mathring{\mathbf P}_{h, \Theta}
\]
for dyadic numbers $1 \le k \lesssim \delta^{-2}$.
Since there are $O(\log \delta^{-1})$ dyadic numbers $\delta^{500} \lesssim h \lesssim 1$ and $1 \le k \lesssim \delta^{-2}$, by pigeonholing there exist $h$ and $k$ so that
\[
\Big\|
\sum_{\delta^{500} \le h \lesssim 1} h \sum_{1 \le k \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi \Big\|_q \lesssim (\log \delta^{-1})^2 h \Big\|\sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi \Big\|_q .
\]
Let $\tilde g := \sum_{\pi \in \mathring{\mathbf P}_h^k} g_\pi $. Then from these estimates, one has
\[
\|g\|_q \lesssim \delta^{-\varepsilon} h \|\tilde g\|_q +\delta^{400}.
\]
Since $\tilde g$ is a balanced function, from H\"oler's inequality, \eqref{givenEstLL} and Lemma \ref{lem:revHol} it follows that
\[
\|\tilde g\|_q \le \|\tilde g\|_{q_1}^{1-\theta} \|\tilde g\|_{q_2}^{\theta}
\le A_1^{1-\theta} A_2^{\theta}\|\tilde g\|_{p_1,2\delta}^{1-\theta}\|\tilde g\|_{p_2,2\delta}^{\theta} \lesssim A_1^{1-\theta} A_2^{\theta} \|\tilde g\|_{p,2\delta},
\]
and by \eqref{lp_sum},
\[
h\| \tilde g \|_{p,2\delta} \lesssim \| g \|_{p,2\delta}.
\]
Therefore, by combining these estimates we obtain \eqref{redForm}.
\end{proof}
\begin{remark} \label{rem:Alt_inter}
By using known interpolation theorems we can obtain Lemma \ref{lem:interp} without $\varepsilon$-losses. Indeed, since $f$ in Lemma \ref{lem:interp} has the Fourier support condition, we are not able to apply interpolation theorems directly. To avoid this,
we define a linear operator $T$ by
\[
T \mathbf f = \sum_{j \in J} f_j \ast \Xi_{\Theta_j}
\]
for $\mathbf f = \{ f_j\}_{j \in J}$, where $J$ is an index set of $\mathbf\Pi_\delta$. Then the inequality
\(
\|f\|_q \le A \|f\|_{p,\delta}
\)
in Lemma \ref{lem:interp}
is equivalent to
\(
\| T \mathbf f \|_q \le A \| \mathbf f\|_{\ell^2(L^p)},
\) where $\ell^2(L^p)$ is the space of $L^p$-valued $\ell^2$-sequences.
Since the functions $\{f_j\}_{j \in J}$ are not subject to the Fourier support condition, by applying the complex interpolation theorem we get Lemma \ref{lem:interp} without $\varepsilon$-losses.
\end{remark}
\
To prove Theorem \ref{thm:sqfEst} we need a trilinear interpolation lemma. Before stating the lemma let us define a notation $\underline\prod$, which will be repeatedly used in the remaining parts of this paper. For $A_1, A_2, A_3 \in \mathbb C$, let $\underline \Pi A_i$ denote the geometric mean of their absolute values; that is,
\[
\underline\prod A_i := \Big( \prod_{i=1}^{3} |A_i| \Big)^{1/3}.
\]
From simple calculations it is easy to see the followings.
If $A$, $A_i$ and $B_i$ are complex numbers for $i=1,2,3$, then
\begin{align*}
\underline\prod A &= |A|, \\
\underline\prod CA_i &= C \underline\prod A_i \qquad \text{for $C \ge 0$},\\
\underline\prod (A_i B_i) &= \underline\prod A_i \underline\prod B_i, \\
\underline\prod A_i^{\alpha} &= \Big( \underline\prod A_i \Big)^{\alpha} \qquad \text{for $\alpha \in \mathbb R$}.
\end{align*}
Also, if all $A_{i,\Delta} \in \mathbb C$ and $f_i \in L^p$, then by H\"older's inequality it follows that for $1 \le p \le \infty$,
\begin{align}
\label{Holder1}
\Big( \sum_{\Delta} \underline\prod A_{i,\Delta}^{p} \Big)^{1/p} &\le \underline\prod \Big( \sum_{\Delta} |A_{i,\Delta}|^p \Big)^{1/p}, \\
\label{Holder2}
\Big\| \underline\prod f_i \Big\|_p &\le \underline\prod \|f_i\|_p.
\end{align}
Now we state our trilinear interpolation lemma.
\begin{lem} \label{lem:MulInterpolation}
Let $2 \le p_1,p_2,q_1,q_2 \le \infty$.
Assume that
\begin{equation} \label{givenEst}
\Big\| \underline\prod f_i \Big\|_{q_1} \le A_1 \underline\prod \| f_i \|_{p_1,\delta},\qquad
\Big\|\underline\prod f_i \Big\|_{q_2} \le A_2 \underline\prod \| f_i \|_{p_2,\delta}
\end{equation}
for all $f_i$, $i=1,2,3,$ with $\hat f_i \subset \Gamma_\delta$.
Suppose that for some $\theta \in (0,1)$,
\[
\frac{1}{q} = \frac{1-\theta}{q_1} + \frac{\theta}{q_2}, \qquad
\frac{1}{p} = \frac{1-\theta}{p_1} + \frac{\theta}{p_2}
\] and $2 \le p \le q \le \infty$.
Then
\[
\Big\| \underline\prod f_i \Big\|_{q} \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta} \underline\prod \| f_i \|_{p,\delta}
\]
for all $f_i$, $i=1,2,3,$ with $\hat f_i \subset \Gamma_\delta$ and all $\varepsilon >0$.
\end{lem}
\begin{proof}
The proof is similar to Lemma \ref{lem:interp}.
We decompose $\underline \prod f_i = \sum_{k \in \delta^{-1} \mathbb Z^3} \psi_k \underline \prod f_i$ where $\psi_k := \psi(\delta(x-k))$. We can reduce it in an analogous manner to the proof of Lemma \ref{lem:interp}. By localization, it suffices to show that
\begin{equation} \label{mulinterpol}
\Big\| \underline\prod g_i \Big\|_q \lesssim \delta^{-\varepsilon} A_1^{1-\theta} A_2^{\theta}
\end{equation}
for all $g_i := \psi_0 f_i$ with $\|g_i\|_{p,2\delta} = 1$.
Some minor portions can be removed as in the proof of Lemma \ref{lem:interp}. Since $\psi_0$ decays rapidly outside $B(0,C\delta^{-1})$, we have
\(
\| \underline\prod g_i\|_q \le \| \underline\prod g_i\|_{L^q(B(0,\delta^{-1-\varepsilon}))} + C_K\delta^{K}
\)
for all $\varepsilon>0$ and $K>0$.
Since $g_i$ is Fourier supported in $\Gamma_{2\delta}$,
by Lemma \ref{lem:wavepack},
\[
g_i(x) = \sum_{\Theta_i \in \mathbf \Pi_{\delta}} \sum_{\pi_i \in \mathbf P_{\Theta_i}} h_{\pi_i} g_{\pi_i}(x).
\]
By \eqref{ess_supp}, we can eliminate $\pi_i$ that is disjoint from $B(0, C\delta^{-1-\varepsilon})$, so we can restrict $\mathbf P_i$ to the collection $\mathring{\mathbf P}_i$ of $\pi_i$ intersecting $B(0, C\delta^{-1-\varepsilon})$. We can also remove $\pi_i$ with $0< h_{\pi_i} \lesssim \delta^{500}$.
For dyadic $\delta^{500} \lesssim h_i \lesssim 1$, we define
\(
\mathring{\mathbf P}_{h_i}:= \{ \pi \in \mathring{\mathbf P}_i: h_i \le h_\pi < 2h_i \}.
\)
Let
\(
\mathring{\mathbf P}_{\Theta_i}(h_i) := \mathring{\mathbf P}_{h_i} \cap \mathbf P_{\Theta_i}
\), and for any dyadic number $1 \le k_i \lesssim \delta^{-2}$ we define
\[
\mathring{\mathbf P}_{i}(h_i,k_i) = \bigcup_{k_i \le \# \mathring{\mathbf P}_{\Theta_i}(h_i) < 2k_i} \mathring{\mathbf P}_{\Theta_i}(h_i).
\]
Then, we have
\[
\Big\| \underline\prod g_i \Big\|_q \lesssim \Big\| \underline\prod \Big( \sum_{\delta^{500} \lesssim h_i \lesssim 1} h_i \sum_{1 \lesssim k_i \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q + \delta^{100}.
\]
We write as
\[
\prod_{i=1}^{3} \Big( \sum_{h_i} h_i \sum_{k_i} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big)
= \sum_{h_1,h_2,h_3} \sum_{k_1,k_2,k_3} \prod_{i=1}^{3} \Big( h_i \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big).
\]
By dyadic pigeonholing, there exist dyadic numbers $h_i$ and $k_i$, $i=1,2,3,$ so that
\[
\Big\|
\underline \prod \Big( \sum_{\delta^{500} \lesssim h_i \lesssim 1} h_i \sum_{1 \lesssim k_i \lesssim \delta^{-2}} \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q \lesssim (\log \delta^{-1})^2 \Big( \underline\prod h_i \Big) \Big\|\underline\prod \Big( \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} \Big) \Big\|_q .
\]
Let $\tilde g_i := \sum_{\pi \in \mathring{\mathbf P}_i(h_i,k_i)} g_{\pi_i} $. Then from these estimates we have
\[
\Big\| \underline\prod g_i \Big\|_q \lesssim \delta^{-\varepsilon} \Big( \underline\prod h_i \Big) \Big\| \underline\prod \tilde g_i \Big\|_q +\delta^{100}.
\]
Since $\tilde g_i$ are balanced functions, from H\"oler's inequality, \eqref{givenEst} and Lemma \ref{lem:revHol} it follows that
\[
\Big\|\underline\prod \tilde g_i \Big\|_q \le \Big\|\underline\prod \tilde g_i \Big\|_{q_1}^{1-\theta} \Big\|\underline\prod \tilde g_i \Big\|_{q_2}^{\theta}
\le A_1^{1-\theta} A_2^{\theta}\underline\prod \|\tilde g_i\|_{p_1,2\delta}^{1-\theta} \underline\prod \|g_i\|_{p_2,2\delta}^{\theta} \lesssim A_1^{1-\theta} A_2^{\theta} \delta^{-\varepsilon} \underline\prod \| \tilde g_i\|_{p,2\delta}
\]
and by \eqref{lp_sum},
\[
h_i \| \tilde g_i \|_{p,2\delta} \lesssim \| g_i \|_{p,2\delta}.
\]
Therefore, these estimates yield \eqref{mulinterpol}.
\end{proof}
\begin{remark}
By using analogous methods to Remark \ref{rem:Alt_inter}, we can obtain Lemma \ref{lem:MulInterpolation} without $\epsilon$-losses by known multilinear interpolation theorems, see, e.g., \cite{bergh1976interpolation}.
\end{remark}
\section{Proof of Theorem \ref{thm:sqfEst}.}
This section is devoted to the proof of $\mathcal{SQ}(4 \to 4; 1/16)$.
By Proposition \ref{prop:MSQmeanSQ} this follows from the trilinear square function estimate $\mathcal{SQ}(4 \times 4 \times 4 \to 4;1/16)$. To prove this we will utilize the following two theorems. The first one is the multilinear restriction theorem due to Bennet, Carbery and Tao \cite{bennett2006multilinear}.
\begin{thm}[Bennet--Carbery--Tao \cite{bennett2006multilinear}] \label{thm:MRT}
Let $f_i$, $i=1,2,3,$ be supported in $\Gamma^{\Omega_i}$.
Suppose that $\Gamma^{\Omega_1}, \Gamma^{\Omega_2}, \Gamma^{\Omega_3}$ are $\nu$-transverse.
If $R \gg \nu^{-1}$ then for any $\epsilon >0$ and any ball $Q_R$ of radius $R$,
\begin{equation} \label{mrt}
\Big\| \underline\prod \widehat{f_jd\sigma_j} \Big\|_{L^3(Q_R)} \le C_\epsilon R^{\epsilon} \underline\prod \| f_j \|_{2},
\end{equation}
where $d\sigma_j$ is the induced Lebesgue measure on $\Gamma^{\Omega_j}$.
\end{thm}
Note that if the restriction operator $\mathfrak R$ is defined as the restriction $\mathfrak R f =\hat f \big|_{\Gamma}$ to $\Gamma$ of the Fourier transform $\hat f$, then the \textit{extension operator} $\widehat{f d\sigma}$ is its adjoint operator $\mathfrak R^*f$.
\
The second one is the $\ell^2$ decoupling theorem due to Bourgain and Demeter \cite{bourgain2015proof}.
\begin{thm}[Bourgain--Demeter \cite{bourgain2015proof}]\label{thm:Decoupling}
Suppose that the Fourier support of $f$ is contained in $\Gamma_\delta$. Then for any $\epsilon >0$,
\begin{equation} \label{FrDecp}
\| f \|_{6} \le C_\epsilon \delta^{-\epsilon}\Big(\sum_{\Theta \in \mathbf \Pi_\delta} \| f_\Theta \|_6^2 \Big)^{1/2}.
\end{equation}
\end{thm}
To deal with local estimates we define local norms as follows:
\[
\|f\|_{L^p(\psi_B)} := \|f \psi_B \|_p.
\]
and for any functions $f$ with $\supp \hat f \subset \Gamma_\delta$,
\[
\|f\|_{p,\delta,B} := \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|^2_{L^p(\psi_{B})} \Big)^{1/2}.
\]
Note that if $B$ is a ball of radius $\ge 2/\sqrt\delta$ then for $p\ge 2$,
\begin{equation} \label{locDecNorm}
\| f \psi_{B} \|_{p,\delta} \lesssim \| f \|_{p,\delta,B}.
\end{equation}
Indeed, we decompose the Fourier transform of $(f\psi_B)\ast \Xi_\Theta$ as follows:
\[
(\hat f \ast \hat \psi_{B}) \hat \Xi_\Theta = ((\hat f \hat \Xi_{C\Theta} ) \ast \hat \psi_{B}) \hat \Xi_\Theta
+ ((\hat f (1- \hat \Xi_{C\Theta}) ) \ast \hat \psi_{B}) \hat \Xi_\Theta.
\]
Consider the last term of the above equation. We write as
\[
((\hat f (1- \hat \Xi_{C\Theta}) ) \ast \hat \psi_{B})(x) \hat \Xi_\Theta(x) = \int \hat f(y) (1- \hat \Xi_{C\Theta})(y) \hat \psi_{B}(x-y) \hat \Xi_\Theta(x) dy.
\]
For $y \in \Gamma_\delta \setminus C\Theta$ and $x \in \Theta$ we have $|x-y| \ge \sqrt\delta$, and $\hat \psi_{B}$ is supported in a ball of radius $\le \sqrt\delta/2$ with center 0. By considering supports we can see that the above equation is zero. Thus, by Fourier inversion,
\[
(f \psi_{B}) \ast \Xi_\Theta = ((f \ast \Xi_{C\Theta}) \psi_{B}) \ast \Xi_\Theta.
\]
By this equation, Young's inequality and the triangle inequality, we have
\begin{align*}
\| (f \psi_{B}) \ast \Xi_\Theta \|_p
\lesssim \|(f \ast \Xi_{C\Theta}) \psi_{B} \|_p
\lesssim \sum_{\Theta' \subset C\Theta}\|(f \ast \Xi_{\Theta'}) \psi_{B} \|_p.
\end{align*}
From this we can obtain \eqref{locDecNorm}.
\subsection{}
We will deduce a trilinear decoupling estimate from Theorem \ref{thm:MRT} and Theorem \ref{thm:Decoupling}.
By combining Theorem \ref{thm:MRT} with a localization argument and a slicing argument, it follows that
\[
\Big\| \underline \prod f_i \Big\|_{3}
\le C_\epsilon \delta^{1/2 -\epsilon} \underline\prod \|f_i\|_{2}
\]
for all $f_i$ with $\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}$,
(for the details, see \cite{bennett2006multilinear}, \cite{lee2012cone}, \cite{tao1998bilinear}). By orthogonality, if $f$ is a function with $\supp \hat f \subset \Gamma_\delta$, then
\[
\| f \|_2 \sim \Big( \sum_{\Theta \in \mathbf \Pi_{\delta}} \| f_{\Theta} \|_2^2 \Big)^{1/2} = \| f\|_{2,\delta}.
\]
Thus, we have
\begin{equation*}
\Big\| \underline \prod f_i \Big\|_{3}
\le C_\epsilon \delta^{1/2-\epsilon} \underline\prod \|f_i\|_{2,\delta}.
\end{equation*}
On the other hand, from \eqref{FrDecp} and H\"older's inequality we have
\[
\Big\| \underline \prod f_i \Big\|_{6}
\le C_\epsilon \delta^{-\epsilon} \underline\prod \|f_i\|_{6,\delta}.
\]
We interpolate these two estimates by Lemma \ref{lem:MulInterpolation}. Then,
\[
\Big\| \underline \prod f_i \Big\|_{4}
\le C_\epsilon \delta^{1/4-\epsilon}\underline\prod \|f_i\|_{3,\delta}.
\]
By H\"older's inequality one has $\|f_i\|_{3,\delta} \le \|f_i\|_{4,\delta}^{2/3} \|f_i\|_{2,\delta}^{1/3}$. Inserting this into the above we obtain
\begin{equation} \label{mainEq}
\Big\| \underline \prod f_i \Big\|_{4}
\le C_\epsilon \delta^{1/4-\epsilon} \Big(\underline\prod \|f_i\|_{4,\delta}\Big)^{2/3} \Big( \underline\prod \|f_i\|_{2,\delta} \Big)^{1/3} .
\end{equation}
\subsection{}
Set $R=\delta^{-1}$.
We take a covering $\{\Delta\}$ of $\mathbb R^3$ by finitely overlapping $2R^{1/2}$-balls. We apply the estimate \eqref{mainEq} to $f_i\psi_{\Delta}$. Since the Fourier support of $f_i \psi_\Delta$ is in $\Gamma_{2\sqrt\delta}$, by \eqref{mainEq} and \eqref{locDecNorm} we obtain
\[
\Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}
\le C_\epsilon R^{-1/8 +\epsilon/2} \Big(\underline\prod \|f_i\|_{4,\sqrt\delta,\Delta}\Big)^{2/3} \Big( \underline\prod \|f_i\|_{2,\sqrt\delta,\Delta} \Big)^{1/3}.
\]
After taking the 4th power in the above, we sum over $\Delta$, and apply H\"older's inequality. Then,
\[
\sum_{\Delta} \Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}^4
\le C_\epsilon R^{-1/2+2\epsilon} \Big( \sum_{\Delta} \underline\prod \|f_i\|^4_{4,\sqrt\delta,\Delta} \Big)^{2/3} \Big( \sum_{\Delta} \underline\prod \|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/3}.
\]
After taking the 4th root in the above, we apply \eqref{Holder1} to the right-hand sums. Then,
\begin{align*}
\Big( \sum_{\Delta} \Big\| \underline \prod f_i \Big\|_{L^4(\Delta)}^4 \Big)^{1/4}
&\le C_\epsilon R^{-1/8+\epsilon/2} \Big( \underline\prod \Big( \sum_{\Delta}\|f_i\|^4_{4,\sqrt\delta,\Delta} \Big)^{1/4}\Big)^{2/3} \Big( \underline\prod \Big( \sum_{\Delta}\|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/4}\Big)^{1/3}.
\end{align*}
We have \(
\big( \sum_{\Delta}\|f_i\|^4_{4,\sqrt\delta,\Delta} \big)^{1/4} \lesssim \|f_i\|_{4,\sqrt\delta}
\) by Minkowski's inequality. Thus, from the above estimate it follows that
\begin{equation} \label{eqn:AB}
\Big\| \underline \prod f_i \Big\|_4
\le C_\epsilon R^{-1/8 + \epsilon/2} \Big( \underline\prod A_i \Big)^{2/3} \Big( \underline\prod B_i \Big)^{1/3},
\end{equation}
where
\[
A_i := \|f_i\|_{4,\sqrt\delta}, \qquad B_i := \Big( \sum_{\Delta}\|f_i\|^4_{2,\sqrt\delta,\Delta} \Big)^{1/4}.
\]
\subsection{}
We will show that
\begin{equation}\label{L2part}
B_i \lesssim R^{3/8}\| S_\delta f_i\|_{4}.
\end{equation}
By definition we write
\( \|f_i\|_{2,\sqrt\delta,\Delta}^2 = \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \|f_{i,\Upsilon}\|_{L^2(\psi_{\Delta})}^2. \)
Since $f_{i,\Upsilon}$ is decomposed as
\( f_{i,\Upsilon} = \sum_{\Theta \in \mathbf \Pi_{\delta} : \Theta \subset 2 \Upsilon} f_{i,\Theta}, \) we have
\[
\|f_i\|_{2,\sqrt\delta,\Delta}^2 = \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \int \Big| \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} f_{i,\Theta} \psi_\Delta \Big|^2.
\]
We see that the Fourier support of $f_{i,\Theta} \psi_\Delta$ is contained in the $\delta^{1/2}$-neighborhood of $\Theta$ which is a rectangular box of size $C\delta^{1/2} \times C\delta^{1/2} \times C$ for some constant $C>1$. So, by orthogonality it follows that
\[
\|f_i\|_{2,\sqrt\delta,\Delta}^2
\lesssim \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon}\int | f_{i,\Theta} \psi_\Delta |^2
\lesssim \sum_{\Theta \in \mathbf \Pi_{\delta}} \int | f_{i,\Theta} \psi_\Delta |^2.
\]
Since \( \sum_{\Theta \in \mathbf \Pi_{\delta}} \int | f_{i,\Theta} \psi_\Delta |^2 = \int \big( \sum_{\Theta \in \mathbf \Pi_{\delta}} | f_{i,\Theta} |^2 \big)^{\frac{1}{2} \times 2} \psi_\Delta^2 = \| S_\delta f_i \|_{L^2(\psi_\Delta)}^2, \)
the above estimate may be written as
\[
\|f_i\|_{2,\sqrt\delta,\Delta} \lesssim \| S_\delta f_i \|_{L^2(\psi_\Delta)}.
\]
By using this estimate and H\"older's inequality,
\[
B_i \lesssim \Big( \sum_{\Delta}\|S_\delta f_i\|^4_{L^2(\psi_{\Delta})} \Big)^{1/4}
\lesssim R^{\frac{3}{2}\big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Delta}\|S_\delta f_i\|^4_{L^4(\psi_{\Delta})} \Big)^{1/4} \lesssim R^{3/8}\| S_\delta f_i\|_{4}.
\]
Thus we obtain \eqref{L2part}.
\subsection{}
Let $\alpha \ge 0$ be the best constant such that $\mathcal{SQ}(4 \times 4 \times 4 \rightarrow 4;\alpha)$, i.e.,
\[
\alpha = \inf_{\delta > 0} \Big( \log_{1/\delta} \sup_{f_i:\supp \hat f_i \subset \Gamma_\delta^{\Omega_i}} \frac{\| \underline\prod f_i \|_4} {\underline\prod \|S_\delta f_i \|_4} \Big).
\]
To prove $\mathcal{SQ}(4 \times 4 \times 4 \rightarrow 4;1/16)$ it is enough to show that for any $\epsilon>0$,
\[
\alpha \le \frac{1}{16} +C\epsilon.
\]
By H\"older's inequality,
\[
A_i \lesssim R^{\frac{1}{4} \big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| f_{i,\Upsilon} \|_{4}^{4} \Big)^{1/4}.
\]
By the definition of $\alpha$ and Proposition \ref{prop:MSQmeanSQ} one has $SQ(4 \to 4; \alpha)$.
By Lorentz rescaling, as in \eqref{ppsc},
\[
\| f_{i,\Upsilon} \|_4 \le C_\epsilon R^{\alpha/2 + \epsilon} \| S_\delta f_{i,\Upsilon} \|_4.
\]
So, we have
\[
A_i
\le C_\epsilon R^{\alpha/2 + \epsilon} R^{\frac{1}{4} \big( \frac{1}{2} - \frac{1}{4} \big)} \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| S_\delta f_{i,\Upsilon} \|_4^4 \Big)^{1/4} .
\]
Since
\begin{align*}
\sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \| S_\delta f_{i,\Upsilon} \|_4^4 &\lesssim \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \int \Big( \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} |f_{i,\Theta}|^2 \Big)^{2} \\
&\lesssim \int \Big( \sum_{\Upsilon \in \mathbf \Pi_{\sqrt\delta}} \sum_{\Theta \in \mathbf \Pi_{\delta} :\Theta \subset 2\Upsilon} |f_{i,\Theta}|^2 \Big)^{2} \\
&\lesssim \| S_\delta f_i \|_4^4,
\end{align*}
we obtain
\begin{equation} \label{APart}
A_i \le C_\epsilon R^{1/16 +\alpha/2+\epsilon} \| Sf_i \|_4.
\end{equation}
Now we insert \eqref{APart} and \eqref{L2part} into \eqref{eqn:AB}. Then,
\[
\Big\| \underline \prod f_i \Big\|_{L^4(Q_R)} \le C_\epsilon R^{1/24+\alpha/3 + C\epsilon} \underline\prod \|Sf_i \|_4.
\]
Since $\alpha$ is the best constant holding $SQ(4 \times 4 \times 4 \rightarrow 4;\alpha)$, we have
\( \alpha \le \frac{1}{24} + \frac{\alpha}{3} +C\epsilon. \)
Therefore, \( \alpha \le \frac{1}{16} +C\epsilon. \) This completes the proof.
\section{Proof of Theorem \ref{thm:localSm}.}
In this section, Theorem \ref{thm:localSm} will be proved by using a corresponding trilinear estimate.
Let us define an operator $U_N$ by
\[
U_N f(x,t) = \check \eta_N \ast e^{it \sqrt{-\Delta}} f(x)
\]
where $\eta_N$ is a bump function supported in $\{\xi \in \mathbb R^2 : |\xi| \sim N\}$ and $\check \eta_N$ is the inverse Fourier transform of $\eta_N$.
By the Littlewood--Paley decomposition, to prove Theorem \ref{thm:localSm} it suffices to show that the estimate
\[
\|U_Nf \|_{L^{30/7}(\mathbb R^2 \times [1,2])} \le C_\epsilon N^{1/10+\epsilon} \|f\|_{10/3}
\]
holds for all $\epsilon>0$, all $N \ge 1$ and all $f \in L^{10/3}(\mathbb R^2)$.
For convenience of rescaling we reform $U_Nf$ as follows.
By a linear transformation $J:(\xi_1,\xi_2,\xi_3) \mapsto (\zeta_1,\zeta_2,\zeta_3) = ({\xi_3-\xi_1},\xi_2,{\xi_3+\xi_1})$ which maps the cone $\{ (\xi_1,\xi_2,\pm \sqrt{\xi_1^2+\xi_2^2}) \}$ to the leaned cone $\{ (\zeta_1, \zeta_2,\zeta^2_2/\zeta_1 \}$, we redefine $U_Nf$ by
\begin{equation} \label{eqn:U_N}
U_N f(x,t) = \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \hat f(\xi) \eta_{N}(\xi_1) \varphi(\xi_2/\xi_1) d\xi, \qquad \xi=(\xi_1,\xi_2),
\end{equation}
where $\varphi$ is a bump function supported in the unit interval. Then, ${U_Nf}$ has Fourier support in
\[
\Gamma(N) := \{(\xi_1,\xi_2,\xi_2^2/\xi_1) : |\xi_1|\sim N,~ |\xi_2/\xi_1| \lesssim 1\}.
\]
The leaned cone $(\xi_1,\xi_2,\xi_2^2/\xi_1)$ is written as $\xi_1(1,\theta,\theta^2)$ where $\theta = \xi_2 /\xi_1$. So one may identify $\theta$ with an angular variable of the cone.
We say that the local smoothing estimate $\mathcal{LS}(p \to q; \alpha)$ holds if
\begin{equation} \label{unf}
\| U_Nf \|_{L^q(\mathbb R^2 \times [1,2])} \le C_{\epsilon} N^{\alpha+\epsilon} \|f\|_{p}
\end{equation}
holds for all $\epsilon>0$, all $N > 1$ and all $f \in L^p(\mathbb R^2)$. To prove Theorem \ref{thm:localSm} it suffices to show
\[
\mathcal{LS}(10/3 \to 30/7; 1/10).
\]
For given $1 \le p < q \le \infty$ and $\frac{1}{p} + \frac{3}{q} =1$, we define
\begin{equation} \label{alpCon}
\alpha= \alpha(p,q) \ge \frac{1}{p} - \frac{3}{q} +\frac{1}{2}
\end{equation}
to be the best exponent for which the estimate \eqref{unf} holds for all $N > 1$ and all $f \in L^p(\mathbb R^2)$, i.e.,
\[
\alpha(p,q) = \inf_{N > 1} \Big( \log_N \sup_{f \in L^p(\mathbb R^2)} \frac{\|U_N f\|_{L^q(\mathbb R^2 \times [1,2])}}{\|f\|_p} \Big).
\]
Then it is enough to show that for all $\epsilon,~ \epsilon_1>0$,
\begin{equation} \label{alG}
\alpha\Big(\frac{10}{3}, \frac{30}{7} \Big) \le \frac{1}{10} + C\epsilon_1 + \log_N C_{\epsilon, \epsilon_1},
\end{equation}
since we may take $\epsilon = \epsilon_1$, which can be absorbed in an $\epsilon$-loss in \eqref{unf}.
\
\subsection{}
Let an arbitrary small $\epsilon_1>0$ be given.
Let $N \ge N_0$ and \( 1 > \gamma_1 > \gamma_2 \ge N_0^{-\epsilon_1/2} \). Later, $\gamma_1$, $\gamma_2$ and $N_0$ will be chosen.
By rescaling and (a minor variant of) Lemma \ref{lem:LinTriLcompare} one has that for any $(x,t) \in \mathbb R^2 \times [1,2]$,
\begin{align*}
|U_N f(x,t) | &\lesssim \max_{\Omega \in \mathbf \Omega(\gamma_1)} | U_N^{\Omega} f(x,t)|
+ \gamma_1^{-1}
\max_{\Omega \in \mathbf \Omega(\gamma_2)} | U_N^{\Omega} f(x,t) |\\
&\qquad \qquad + \gamma_2^{-50}
\max_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf
\Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big| \Big( \prod_{i=1}^{3} |U_N^{\Omega_i} f(x,t)| \Big)^{1/3} \Big|,
\end{align*}
where $U_N^{\Omega}$ is defined as \eqref{eqn:U_N} with $\varphi$ replaced by $\varphi_\Omega$ which is a bump function supported in $\Omega$.
By embedding $\ell^{q} \subset \ell^{\infty}$ it follows that
\begin{equation} \label{LpTricom1}
\begin{split}
\|U_N f \|_{L^q(\mathbb R^2 \times I)} & \lesssim \Big( \sum_{\Omega_1 \in \mathbf \Omega(\gamma_1)} \| U_N^{\Omega_1} f \|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q}
+ \gamma_1^{-1} \Big(
\sum_{\Omega_2 \in \mathbf \Omega(\gamma_2)} \| U_N^{\Omega_2} f \|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q} \\
&\qquad \qquad + \gamma_2^{-50} \Big(
\sum_{\substack{\Omega_1,\Omega_2,\Omega_3 \in \mathbf
\Omega(\gamma_2): \\ \dist(\Omega_i, \Omega_j) \ge \gamma_2,\, i
\neq j}} \Big\| \Big( \prod_{i=1}^{3} |U_N^{\Omega_i} f_i| \Big)^{1/3} \Big\|^q_{L^q(\mathbb R^2 \times I)} \Big)^{1/q},
\end{split}
\end{equation}
where $I=[1,2]$.
We consider the first and second summation in the right-hand side of \eqref{LpTricom1}.
From rescaling and the definition of $\alpha$ it follows that
\begin{equation} \label{LShyp}
\| U_N^{\Omega_i} f\|_{L^q(\mathbb R^2 \times I)} \le C\gamma_i^{3\big(\frac{1}{q}-\frac{1}{p} \big)} (\gamma_i^2 N)^{\alpha+\epsilon} \| f \|_p.
\end{equation}
More specifically, by rotating we may assume that $\Omega$ is centered at 0. Then we may write $U_N^{\Omega_i} f$ as
\[
U_N^{\Omega_i} f(x,t) = \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \hat f(\xi) \eta_N(\xi_1) \varphi(\gamma_i^{-1} \xi_2/\xi_1) d\xi.
\]
Let $\sigma(x_1,x_2,t) = (\gamma_i^2 x_1, \gamma_i x_2, t)$ and $\underline \sigma(x_1,x_2) = (\gamma_i^2 x_1, \gamma_i x_2)$.
Then, we have
\(
U_N^{\Omega_i} f \circ \sigma = U_{\gamma_i^2 N}(f \circ \underline\sigma).
\)
Thus, using \eqref{unf} and this relation we have \eqref{LShyp}.
If we define $f_\Omega$ by
\begin{equation*}
\widehat f_\Omega(\xi_1,\xi_2) = \hat f(\xi_1,\xi_2) \chi_{\{|\xi_1| \sim N\}}(\xi_1) \chi_{\Omega}(\xi_2/\xi_1),
\end{equation*}
then we may replace $U_N^{\Omega_i} f$ with $U_N^{\Omega_i} f_{\Omega_i}$, where $\chi$ denotes a characteristic function. By \eqref{LShyp},
\begin{equation} \label{eqn:sum_of_U_N}
\Big( \sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| U_N^{\Omega_i} f_{\Omega_i} \|_q^q \Big)^{1/q}
\le C \gamma_i^{3\big(\frac{1}{q} - \frac{1}{p} \big)} (\gamma_i^{2}N)^{\alpha+\epsilon} \Big(\sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| f_{\Omega_i} \|_p^q \Big)^{1/q}.
\end{equation}
We recall the following lemma from \cite{tao2000bilinearII}.
\begin{lem}[\cite{tao2000bilinearII}*{Lemma 7.1}] \label{lem:desum}
Let $R_k$ be a collection of rectangles such that the dilates $2R_k$ are almost disjoint, and suppose that $f_k$ are a collection of functions whose Fourier transforms are supported on $R_k$. Then for all $1 \le p \le \infty$ we have
\[
\Big( \sum_{k} \|f_k\|_p^{p^*} \Big)^{1/p^*} \lesssim \Big\| \sum_{k} f_k \Big\|_p \lesssim \Big( \sum_k \|f_k \|_p^{p_*} \Big)^{1/p_*},
\]
where $p_* = \min(p,p')$, $p^* = \max(p,p')$.
\end{lem}
It is remarked that Lemma \ref{lem:desum} is elementary, and simply a consequence of interpolation between Plancherel's theorem and Minkowski's inequality for the $L^\infty$ space.
After embedding $\ell^p \subset \ell^q$ in the right-hand side of \eqref{eqn:sum_of_U_N}, we apply Lemma \ref{lem:desum}. Then we obtain
\begin{equation} \label{scEst}
\Big( \sum_{\Omega_i \in \mathbf \Omega(\gamma_i)} \| U_N^{\Omega_i} f \|_{L^q(\mathbb R^2 \times I)}^q \Big)^{1/q} \le C \gamma_i^{3\big(\frac{1}{q} - \frac{1}{p} \big)} (\gamma_i^2 N)^{\alpha+\epsilon} \|f\|_p.
\end{equation}
\
\subsection{}
We consider the last summation in the right-hand side of \eqref{LpTricom1}.
We will show that for any $\epsilon>0$,
\begin{equation} \label{GTriLS}
\Big\| \underline \prod U_N^{\Omega_i} f \Big\|_{L^{30/7}(\mathbb R^2 \times I)} \le C_\epsilon N^{1/10+\epsilon} \|f\|_{10/3}.
\end{equation}
First we prove a corresponding local estimate.
\begin{lem}
Let $B$ be a unit ball. Then, for any $\epsilon > 0$,
\begin{equation} \label{localTLS}
\Big\| \underline\prod |U_N^{\Omega_i} f_i| \Big\|_{L^{30/7}(B \times I)} \le C_\epsilon N^{1/10+\epsilon} \underline\prod \|f_i\|_{10/3}.
\end{equation}
\end{lem}
\begin{proof}
By interpolation it suffices to show
\begin{align} \label{LSdec}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^6(B \times I)} &\le C_\epsilon N^{1/6+\epsilon} \underline\prod \|f_i\|_{6},\\
\label{LSMR}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^3(B \times I)} &\le C_\epsilon N^{\epsilon} \underline\prod \|f_i\|_{2}.
\end{align}
Consider \eqref{LSdec}. By H\"older's inequality it is enough to show
\begin{equation} \label{LSdec2}
\| U_Nf \|_{L^6(B \times I)}
\le C_\epsilon N^{1/6+\epsilon} \|f\|_6.
\end{equation}
Since $\psi_{I}(t)U_Nf(x,t) $ has Fourier support in a $C$-neighborhood of $\Gamma(N)$, from Theorem \ref{thm:Decoupling} and rescaling it follows that
\[
\| U_Nf \|_{L^6(B \times I)}
\le C_\epsilon N^{\epsilon} \Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf) \ast \Xi_{\widetilde\Theta}\|_{6}^2 \Big)^{1/2},
\]
where
$\widetilde\Theta$ is a sector of size $CN^{1/2} \times CN \times C$.
By H\"older's inequality, this is bounded by
\[
\le C_\epsilon N^{1/6+\epsilon} \Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf) \ast \Xi_{\widetilde\Theta}\|_{6}^6 \Big)^{1/6}.
\]
It is well known (see, e.g., \cite{wolff2000local}*{Lemma 6.1}, \cite{stein1993harmonic}*{XI: 4.13}, \cite{mockenhaupt1992wave}) that for $p \ge 2$,
\[
\Big( \sum_{\widetilde\Theta} \|(\psi_I U_Nf )\ast \Xi_{\widetilde\Theta}\|_{p}^p \Big)^{1/p} \lesssim \|f\|_p.
\]
Thus, we obtain \eqref{LSdec2}
Consider \eqref{LSMR}. In \eqref{mrt}, the restriction operator $\widehat{f_j d\sigma_j}$ can be replaced with $U_1^{\Omega_j} \check f$ where $\check f$ denotes the inverse Fourier transform of $f$. Thus, from Theorem \ref{thm:MRT} and Plancherel's theorem it follows that
\[
\Big\| \underline\prod U_1^{\Omega_i} f_i \Big\|_{L^3(Q_N)} \le C_\epsilon N^{\epsilon} \underline\prod \|f_i\|_{2}.
\]
If $s(x,t) = N^{-1}(x,t)$ and $\underline s (x) = N^{-1}x$, then $U_N^{\Omega} f \circ s = U_1^{\Omega} (f \circ \underline s)$. So, by changing variables and translation invariance, the above estimate gives \eqref{LSMR}.
\end{proof}
We now prove that \eqref{localTLS} implies \eqref{GTriLS}. This immediately follows from the next localization lemma.
\begin{lem}
Suppose that the local estimate
\begin{equation} \label{ifloc}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^q(B \times I)} \le A(N) \underline\prod \| f_i \|_{p}
\end{equation}
holds for all unit cubes $B$ and all $f_i \in L^p(\mathbb R^2)$.
If $p \le q$ then the estimate
\begin{equation} \label{thenGl}
\Big\| \underline\prod U_N^{\Omega_i} f_i \Big\|_{L^q(\mathbb R^2 \times I)} \le C N^{\epsilon} A(N) \underline\prod \| f_i \|_{p}
\end{equation}
holds for all $\epsilon>0$ and all $f_i \in L^p(\mathbb R^2)$.
\end{lem}
\begin{proof}
We write as
\[
U_N f(x,t)
= (K_N(t) \ast f)(x)
\]
where
\[
K_N(t)(x) = K_N(x,t)
:= \int e^{2\pi i (x \cdot \xi +t {\xi_2^2}/{\xi_1})} \eta_N(\xi_1) \varphi(\xi_2/\xi_1) d\xi.
\]
By using a stationary phase method, it follows that for $(x,t) \in \mathbb R^2 \times I$,
\[
|K_N(t)(x)| \le C_M N^{2} (1+ |x|)^{-M} \qquad \forall M>0.
\]
Thus, for $(x,t) \in \mathbb R^2 \times I$,
\begin{equation} \label{asyt}
|U_N f(x,t)|
\le C_M ( a_N \ast |f| )(x), \quad \forall M>0,
\end{equation}
where $a_N(x) = N^2 (1+ |x|)^{-M}$.
If a unit lattice square $B \subset \mathbb R^2$ is given, then we decompose
\begin{equation} \label{ptLoc}
|U_N f|\chi_{B \times I} \lesssim |U_N(f \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M |\mathcal E_{B^c}f| \chi_{B \times I},
\end{equation}
where
\[
\mathcal E_{B^c} f := a_N \ast (|f|\chi_{\mathbb R^2 \setminus N^{\epsilon}B}).
\]
Consider $|\mathcal E_{B^c}f| \chi_{B \times I}$. If $|x-y| \gtrsim N^{\epsilon}$ then one has $a_N(x-y) \lesssim N^2N^{-\epsilon M} \le N^{-2000C}$.
So, we have
\begin{align*}
\chi_B(x) \big( a_N \ast (|f|\chi_{\mathbb R^2 \setminus N^{\epsilon}B} ) \big)(x) &= \chi_B(x) \int a_N(x-y)\chi_{\mathbb R^2 \setminus N^{\epsilon}B}(y) |f(y)| dy \\
&\lesssim N^{-1000C} \chi_B(x) \int a_N^{1/2}(x-y) |f(y)| dy \\
&\lesssim N^{-1000C} \chi_B(x) (a^{1/2}_N \ast |f|)(x).
\end{align*}
Thus, by Young's inequality we obtain
\begin{equation} \label{errest}
\Big( \sum_{B} \| \mathcal E_{B^c} f\|_{L^q(B)}^q \Big)^{1/q}
\lesssim N^{-900C} \|f \big\|_{p}.
\end{equation}
On the other hand, by some rough estimates (cf. Young's inequality) we see that
$\|U_N f \|_{L^q({B \times I})} \lesssim N^C \|f\|_p$. So, by embedding $\ell^p \subset \ell^q$, we have
\begin{equation} \label{roughEst}
\Big( \sum_{B} \| U_N(f \chi_{N^\epsilon B}) \|_{L^q({B \times I})}^q \Big)^{1/q}
\lesssim N^{C} \Big( \sum_{B} \|f \big\|_{L^p(N^\epsilon B)}^q \Big)^{1/q}
\lesssim N^{2C} \|f\|_p.
\end{equation}
Now, we consider the estimate \eqref{thenGl} by using \eqref{errest} and \eqref{roughEst} .
We define $f_{\Omega_i}$ as
\[
\widehat f_{\Omega_i}(\xi_1,\xi_2) = \hat f_i(\xi) \eta_N(\xi_1) \varphi_{\Omega_{i}}(\xi_2/\xi_1).
\]
Then we may replace $U_N^{\Omega_i} f_i$ with $U_N f_{\Omega_i}$. By \eqref{ptLoc},
\begin{align}
\underline\prod U_N f_{\Omega_i} \chi_{B \times I} &\lesssim
\underline\prod \Big( |U_N(f_{\Omega_i} \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M (\mathcal E_{B^c}f_{\Omega_i} )\chi_{B \times I}\Big) \nonumber\\
\label{dle}
&\lesssim
\underline\prod |U_N(f_{\Omega_i} \chi_{N^{\epsilon}B})| \chi_{B \times I} + C_M \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3})\chi_{B \times I},
\end{align}
where
\begin{align*}
\mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) &:= \sum_{i,j,k \in \{1,2,3\}} \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \\
&+ \sum_{i,j,k \in \{1,2,3\}} \big( \mathcal E_{B^c}f_{\Omega_i} \mathcal E_{B^c}f_{\Omega_j} |U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3}
+ \underline\prod \mathcal E_{B^c}f_{\Omega_i}.
\end{align*}
By Minkowski's inequality,
\begin{equation} \label{errsum}
\begin{split}
&\Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad \lesssim \max_{i,j,k}\Big( \sum_{B} \| \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad\qquad + \max_{i,j,k}\Big( \sum_{B} \| \big( \mathcal E_{B^c}f_{\Omega_i} \mathcal E_{B^c}f_{\Omega_j} |U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\qquad\qquad\qquad +\Big( \sum_{B} \Big\| \underline\prod \mathcal E_{B^c}f_{\Omega_i} \Big\|_{L^q({B \times I})}^q \Big)^{1/q}.
\end{split}
\end{equation}
Consider the right-hand side of \eqref{errsum}.
By H\"older's inequality,
\begin{multline*}
\Big( \sum_{B} \| \big(\mathcal E_{B^c}f_{\Omega_i} |U_N(f_{\Omega_j} \chi_{N^{\epsilon}B})||U_N(f_{\Omega_k} \chi_{N^{\epsilon}B})| \big)^{1/3} \|_{L^q({B \times I})}^q \Big)^{1/q} \\
\le \Big( \sum_{B} \| \mathcal E_{B^c}f_{\Omega_i} \|_{L^q({B \times I})}^q \Big)^{1/3q} \Big( \sum_{B} \| U_N(f_{\Omega_j} \chi_{N^{\epsilon}B}) \|_{L^q({B \times I})}^q \Big)^{1/3q} \\
\times \Big( \sum_{B} \| U_N(f_{\Omega_k} \chi_{N^{\epsilon}B}) \|_{L^q({B \times I})}^q \Big)^{1/3q}.
\end{multline*}
Thus, by \eqref{errest} and \eqref{roughEst} it is bounded by
\[
\lesssim N^{-200C} \underline\prod \| f_{\Omega_i} \|_p.
\]
The second and third summations in the right-hand side of \eqref{errsum} are estimated by an analogous method. Thus,
\begin{equation} \label{mulerEst}
\Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q} \lesssim N^{-200C} \underline\prod \| f_{\Omega_i} \|_p.
\end{equation}
By \eqref{dle}
\begin{align*}
\Big\| \underline\prod U_N f_{\Omega_i} \Big\|_{L^q(\mathbb R^2 \times I)} &= \Big( \sum_{B} \Big\| \underline\prod |U_N f_{\Omega_i} \Big\|_{L^q({B \times I})}^q \Big)^{1/q} \\
&\lesssim \Big( \sum_{B} \Big\| \underline\prod U_N(f_{\Omega_i} \chi_{N^{\epsilon}B}) \Big\|_{L^q({B \times I})}^q \Big)^{1/q}
+ \Big( \sum_{B} \| \mathcal E(f_{\Omega_1},f_{\Omega_2},f_{\Omega_3}) \|_{L^q({B \times I})}^q \Big)^{1/q}.
\end{align*}
By \eqref{ifloc}, \eqref{mulerEst} and embedding $\ell^p \subset \ell^q$, it follows that
\[
\Big\| \underline\prod U_N f_{\Omega_i} \Big\|_{L^q(\mathbb R^2 \times I)} \lesssim (N^{\epsilon}A(N)+ N^{-200C}) \underline\prod \| f_{\Omega_i} \|_p.
\]
Since $\|f_{\Omega_i}\|_p \lesssim \|f_i\|_p$ by Young's inequality, we obtain \eqref{thenGl}.
\end{proof}
\
\subsection{}
Last of all, we will show \eqref{alG}.
We substitute \eqref{scEst} and \eqref{GTriLS} in \eqref{LpTricom1} with $(p,q)=(10/3,30/7)$. Then, it follows that
\begin{equation}
\|U_N f \|_{L^{30/7}(I \times \mathbb R^2)} \lesssim (\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} N^{\alpha+\epsilon} + \gamma_1^{-1} \gamma_2^{2\alpha - \frac{1}{5} +2\epsilon} N^{\alpha+\epsilon} + C_{\epsilon, \epsilon_1} \gamma_2^{-60} N^{\frac{1}{10}+\epsilon}) \|f\|_{10/3}.
\end{equation}
So, by the assumption that $\alpha$ is a best exponent,
\begin{align*}
N^{\alpha} \le C(\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} + \gamma_1^{-1} \gamma_2^{2\alpha - \frac{1}{5}+2\epsilon} ) N^{\alpha} + C_{\epsilon, \epsilon_1}\gamma_2^{-60} N^{\frac{1}{10}}.
\end{align*}
Observe that $2\alpha -\frac{1}{5} \ge 0$ by \eqref{alpCon}.
We now choose $\gamma_1$, $\gamma_2$ and $N_0$ so that $C\gamma_1^{2\alpha - \frac{1}{5}+2\epsilon} \le 1/4$, $ C\gamma_1^{-1} \gamma_{2}^{2\alpha - \frac{1}{5}+2\epsilon} \le 1/4$ and $1> \gamma_1 > \gamma_2 \ge N_0^{-{\epsilon_1}/{2}}$.
Then $N^{\alpha} \le C_{\epsilon,\epsilon_1} N^{\frac{1}{10} + 30\epsilon_1} $. Thus we obtain \eqref{alG}.
\section{Acknowledgments}
The author is indebted to the anonymous referee
whose comments helped improve the presentation of the work.
The author would like to thank Andreas Seeger for informing his work with Malabika Pramanik.
\
\end{document} |
\begin{document}
\title[]{Boundedness and unboundedness results for some maximal operators on functions of bounded variation}
\author{J. M. Aldaz and J. P\'erez L\'azaro}
\address{Departamento de Matem\'aticas y Computaci\'on,
Universidad de La Rioja, 26004 Logro\~no, La Rioja, Spain.}
\email{[email protected]}
\address{Departamento de Matem\'aticas e Inform\'atica,
Universidad P\'ublica de Navarra, 31006 Pamplona, Navarra, Spain.}
\email{[email protected]}
\thanks{2000 {\em Mathematical Subject Classification.}
42B25, 26A84}
\thanks{Both authors were partially supported by Grant BFM2003-06335-C03-03 of the
D.G.I. of Spain}
\thanks{The second named author thanks the University of La Rioja for its
hospitality.}
\begin{abstract} We characterize the space $BV(I)$ of functions of bounded variation on an arbitrary interval $I\subset \mathbb{R}$, in terms of a uniform boundedness condition satisfied by the local uncentered maximal operator $M_R$ from $BV(I)$ into the Sobolev space $W^{1,1}(I)$.
By restriction, the corresponding characterization holds for
$W^{1,1}(I)$. We also show that if $U$ is open in $\mathbb{R}^d, d >1$,
then boundedness from $BV(U)$ into $W^{1,1}(U)$ fails for the local
directional maximal operator $M_T^{v}$, the local strong maximal
operator $M_T^S$, and the iterated local directional maximal
operator $M_T^{d}\circ \dots\circ M_T^{1}$. Nevertheless, if $U$
satisfies a cone condition, then $M_T^S:BV(U)\to L^1(U)$ boundedly,
and the same happens with $M_T^{v}$, $M_T^{d} \circ \dots\circ
M_T^{1}$, and $M_R$.
\end{abstract}
\maketitle
\section
{Introduction.}
\markboth{J. M. Aldaz, J. P\'erez L\'azaro}
{A characterization of $BV(I)$}
The {\em local} uncentered Hardy-Littlewood maximal operator $M_R$ is defined in the same way as the uncentered Hardy-Littlewood maximal operator $M$, save for the fact that the supremum is taken over balls of diameter bounded by $R$, rather than all balls.
The terms {\em restricted} and {\em truncated} have also been used in the literature
to designate $M_R$.
We showed in \cite{AlPe} that if $I$ is a bounded interval, then $M:BV(I)\to W^{1,1}(I)$ boundedly (Corollary 2.9). Here we complement this result by proving that for every interval
$I$, including the case of infinite length, $M_R:BV(I)\to W^{1,1}(I)$ boundedly. Of course,
no result of this kind can hold if we consider $M$ instead of $M_R$, since $\|Mf\|_1 =\infty$
whenever $f$ is nontrivial. We shall see that if
$f\in BV(I)$, then
$\|M_Rf\|_{W^{1,1}(I)}\le \max \{3 (1+2\log^+R), 4\} \|f\|_{BV(I)}$ (Theorem \ref{bd}), and
furthermore, the
logarithmic order of growth of
$c:= \max \{3 (1+2\log^+R), 4\}$ cannot be improved (cf. Remark \ref{log} below). Also, since $c$ is nondecreasing in $R$, it provides a uniform
bound for $M_T$ whenever $T\le R$. This observation leads to the following converse: Let
$f\ge 0$.
If there exists an $R>0$ and a constant $c = c(f,R)$ such that for all
$T\in (0,R]$, $M_Tf\in W^{1,1}(I)$ and $\| M_Tf\|_{W^{1,1}(I)} \le c$, then $f\in BV(I)$. A fortiori, given a locally integrable $f\ge 0$,
we have that $f\in BV(I)$ if and only if
for every $R>0$, $M_Rf\in W^{1,1}(I)$ and there exists a constant $c = c(f,R)$ such that for all
$T\in (0,R]$, $\| M_Tf\|_{W^{1,1}(I)} \le c$. By restriction to the functions $f$ that are absolutely continuous on $I$, we obtain the corresponding characterization for $W^{1,1}(I)$.
If $f$ is real valued rather than nonnegative, since $f\in BV(I)$ (respectively $f\in W^{1,1}(I)$) if and only if both
its positive and negative parts $f^+, f^-\in BV(I)$ (respectively $f^+, f^-\in W^{1,1}(I)$),
we simply apply the previous criterion to $M_Tf^+$ and $M_T f^-$.
It is natural to ask whether the uniform bound condition is necessary to ensure that $f\in BV(I)$, or whether it
is sufficient just to require that for all $T\in\mathbb{R}$,
$M_Tf\in W^{1,1}(I)$. Uniform bounds are in fact needed (see Example
\ref{counter1d}).
In higher dimensions we show that boundedness fails for the local strong maximal operator (where the supremum is taken over rectangles with sides
parallel to the axes and uniformly bounded diameters) and
the local directional maximal operator (where the supremum is taken over uniformly bounded segments parallel to a fixed vector), cf. Theorem \ref{Strong} below.
But it is an open question whether the standard local maximal operator is bounded when $d > 1$, i.e., whether given a ``sufficiently nice" open set $U\subset\Bbb R^d$, $M_R$ maps $BV(U)$ boundedly into $W^{1,1}(U)$, or even into $BV(U)$.
On the other hand, the direction from uniform
boundedness of $M_Tf^+$ and $M_Tf^-$ to $f\in BV( U)$ follows immediately from the Lebesgue theorem on differentiation of integrals, even in the cases
of the strong and directional maximal functions (cf. Theorem \ref{trivialdir}). All the maximal operators mentioned above map $BV(U)$ boundedly into $L^1(U)$, provided
$U$ satisfies a cone condition (Theorem \ref{L1bounds}), so the question of boundedness of $M_R$ on $BV(U)$ is reduced to finding out how $DM_R$ behaves.
Previous results on these topics include the following. In \cite{Ha}, Piotr Haj\l asz utilized the local centered
maximal operator to present a
characterization, unrelated to the one given here, of the Sobolev space $W^{1,1}(\Bbb R^d)$. The boundedness of the centered Hardy-Littlewood maximal operator
on the Sobolev spaces $W^{1,p}(\Bbb R^{d})$, for $1<p\le\infty$, was proven by
Juha Kinnunen in
\cite{Ki}. A local version of this result, valid on
$W^{1,p}(\Omega )$, $\Omega\subset\Bbb R^{d}$ open, appeared in \cite{KiLi}. Additional
work within this line
of research includes the papers \cite{HaOn}, \cite{KiSa}, \cite{Lu}, \cite{Bu}, \cite{Ko1}, and
\cite{Ko2}. Of course, the case $p=1$ is significantly different from
the case $p>1$.
Nevertheless, in dimension $d=1$, Hitoshi
Tanaka showed (cf. \cite{Ta}) that if
$f\in W^{1,1}(\Bbb R)$, then the uncentered
maximal function $Mf$ is differentiable a.e. and
$\| DMf\|_1 \le 2\| Df\|_1$ (it is asked in
\cite{HaOn}, Question 1, p. 169, whether an analogous result holds when $d > 1$).
In \cite{AlPe} we strengthened Tanaka's result,
showing that if $f\in BV(I)$, then $Mf$ is absolutely continuous and $\| DMf\|_1 \le | Df|(I)$, cf. \cite{AlPe}
Theorem 2.5.
Finally we mention that the local (centered and
uncentered) maximal operator has been used in connection with inequalities involving
derivatives, cf. \cite{MaSh} and \cite{AlPe}. Another instance of this type of
application is given below (see Theorem \ref{ineq}).
\section{Definitions, boundedness, and unboundedness results.}
Let $I$ be an interval and let $\lambda$ ($\lambda^d$ if
$d > 1$) be Lebesgue measure. Since functions of bounded variation always have lateral limits,
we can go from $(a,b)$ to $[a,b]$ by extension, and viceversa by restriction.
Thus, in what follows it does not matter whether $I$
is open, closed or neither, nor whether it is bounded or has infinite length.
\begin{definition} We say that $f:I \to \Bbb R$ is of bounded variation if its distributional derivative
$Df$ is a Radon measure with $|Df|(I) <\infty$, where $|Df|$ denotes the
total variation of $Df$. In higher dimensions the definition is the same, save
for the fact that $Df$ is (co)vector valued rather than real valued. More precisely,
if $U\subset \Bbb R^d$ is an open set and $f:U \to \Bbb R$ is of bounded variation,
then $Df$ is the vector valued Radon measure that satisfies, first,
$\int_U f \operatorname{div }\phi dx = - \int_U \phi\cdot dDf$ for all
$\phi\in C_c^1(U, \Bbb R^d)$, and second, $|Df|(U) <\infty$.
\end{definition}
In addition to $|Df|(I) <\infty$, it is often required that $f\in L^1(I)$. We do so
only when defining the space $BV(I)$, and likewise in higher dimensions. The next definition is given only for the one dimensional case, being entirely analogous
when $d > 1$.
\begin{definition} Given the interval $I$,
$$
BV(I) := \{f:I\to \Bbb R| f\in L^1(I), Df \mbox{ is a Radon measure,
and } |Df|(I) <\infty\},
$$
and
$$
W^{1,1}(I) := \{f:I\to \Bbb R| f\in L^1(I), Df \mbox{ is a function, and } Df\in L^1(I)\}.
$$
\end{definition}
It is obvious that $W^{1,1}(I) \subset BV(I)$ properly.
The Banach space $BV(I)$ is endowed with the norm $\|f\|_{BV(I)}:= \|f\|_1 + |Df|(I)$,
and
$
W^{1,1}(I),$ with the restriction of the $BV$ norm, i.e., $\|f\|_{W^{1,1}(I)}:= \|f\|_1 + \|Df\|_1$.
\begin{definition} The canonical representative of $f$ is the
function
$$
\overline{f}(x) := \limsup_{\lambda (I)\to 0, x\in I}\frac{1}{\lambda (I)}\int_I f(y)dy.
$$
\end{definition}
In dimension $d=1$, bounded variation admits an elementary, equivalent definition.
Given
$P=\{x_1,\dots ,x_L\}\subset I$ with
$x_1 <\dots <x_L$, the variation of the {\em function} $f:I\to \Bbb R$ associated to the partition $P$ is
defined as
$
V(f, I, P):= \sum_{j=2}^{L} |f(x_j) - f(x_{j-1})|,
$
and the variation of $f$ on $I$, as
$
V(f, I):=\sup_P V(f, I,P),
$
where the supremum is taken over all partitions $P$ of $I$.
Then $f$ is of bounded variation if $V(f, I) <\infty$.
As it stands this definition is not $L^p$ compatible, in the
sense that modifying $f$ on a set of measure zero can change $V(f,I)$,
and
even make $V(f,I) = \infty.$ To remove this defect one simply says that $f$ is
of bounded variation if $V(\overline{f}, I) <\infty$. It is then well known that $|Df|(I) = V(\overline{f}, I)$.
\begin{definition}
Let $f: I\rightarrow \mathbb{R}$ be measurable and finite a.e.. The non-increasing rearrangement $f^*$ of $f$ is defined for $0<t<\lambda(I)$ as
\begin{equation*}
f^*(t) = \sup_{\lambda(E)=t} \inf_{y\in E}|f(y)|.
\end{equation*}
\end{definition}
The function $f^*$ is non-increasing and equimeasurable with
$|f|$. Furthermore,
\begin{equation}\label{rear}
\int_I f(y)dy =\int_0^{\lambda(I)}f^*(t)dt.
\end{equation}
For these and other basic properties of rearrangements see
\cite[Chapter 2]{BeSh}. We mention that the same definition can be used
for general measure spaces.
In the next definition, $\operatorname{ diam } (A)$ denotes the
diameter of a set $A$, $U\subset \Bbb R^d$ denotes an open set, and
$B\subset \Bbb R^d$ a ball with respect to some fixed norm.
\begin{definition} Given a locally integrable function $f:U\to \Bbb R$, the {\em local}
uncentered Hardy-Littlewood
maximal function $M_R f$ is defined by
$$
M_Rf(x) := \sup_{ x\in B\subset U, \operatorname{ diam } B \le R}
\frac{1}{\lambda^d (B)}\int_B |f(y)|dy.
$$
Of course, if the bound $R$ is eliminated then we get the usual uncentered Hardy-Littlewood
maximal function $Mf$.
\end{definition}
As noted in the introduction, the terms {\em restricted} and {\em truncated} have also been used in the literature
to designate $M_R$,
but we prefer {\em local} for the reasons detailed in Remark 2.4 of \cite{AlPe}.
Next we recall the well known weak type (1,1) inequality satisfied by $M$ in dimension 1, with the sharp constant 2. For all $f \in L^1(I)$ and all $ t>0$,
\begin{equation}\label{weak}
(Mf)^*(t)\le 2 \|f\|_1/t.
\end{equation}
\begin{definition} Let $U\subset \Bbb R^d$ be an open set, and let
$f:U\to \Bbb R$ be a locally integrable function.
By a rectangle $R$ we mean a rectangle with sides parallel to the
axes. The local uncentered {\em strong} Hardy-Littlewood maximal
function $M_T^S f$ is defined by
$$
M_T^Sf(x) := \sup_{ x\in R\subset U, \operatorname{ diam } (R)\le T}\frac{1}{\lambda^d (R)}\int_R |f(y)|dy.
$$
Next, let $v\in \Bbb R$ be a fixed vector, and let $J$ denote a (one dimensional) segment
in $\Bbb R^d$ parallel to $v$.
The local
uncentered {\em directional} Hardy-Littlewood
maximal function $M_T^v f$ is defined by
$$
M_T^v f(x) := \sup_{ x\in J\subset U, \lambda (J)\le T}\frac{1}{\lambda (J)}\int_J |f(y)|dy.
$$
If $v=e_i$, then we write $M_T^i$ instead of $M_T^{e_i}$.
\end{definition}
We shall also be interested in the composition $M_T^d\circ\dots\circ M_T^1$ of the $d$ local directional maximal operators
in the directions of the coordinate axes, since such composition controls $M_T^S$ pointwise.
But first, we deal with the one dimensional case.
\begin{theorem}\label{bd}
If $|f|\in BV(I )$, then $M_Rf\in W^{1,1}(I)$ and furthermore,
$\|M_Rf\|_{W^{1,1}(I)}\le 3 (1+2\log^+ R)\|f\|_{L^1 (I)}
+4\left|D|f|\right| (I).$ Hence, $\|M_Rf\|_{W^{1,1}(I)}\le \max \{3
(1+2\log^+ R), 4\} \|f\|_{BV (I)}.$
\end{theorem}
\begin{proof} Note that for any interval $J$ and any $h\in BV(J)$
\begin{equation}\label{eq3}
\|h\|_{L^\infty (J)} \le \operatorname{ essinf } |h| + |Dh| (J) \le \frac{\|h\|_{L^1(J)}}{\lambda(J)}+|Dh| (J).
\end{equation}
Now, given $f:I\to \Bbb R$, if $\left|D|f|\right|$ is a finite Radon measure on $I$, then $M_Rf$ is
absolutely continuous on $I$ and $\|DM_Rf\|_{L^1(I)}\le \left|D|f|\right|(I)$ by
\cite{AlPe}, Theorem 2.5 (we mention that for this bound on the
size of the derivative, the hypothesis $f\in L^1(I)$ is not
needed). Thus, it is enough to prove that given $|f|\in BV(I)$,
\begin{equation}\label{est}
\|M_Rf\|_{L^1(I)}\le 3(1+2\log^+R) \|f\|_{L^1(I)} + 3 |D|f||(I).
\end{equation}
We may assume that $0\le f=\bar{f}$, since this does not change
any value of $M_Rf$. Given $k\in\mathbb{Z}$ we denote by $I_k$ and $J_k$
the (possibly empty) intervals $I\cap[kR,(k+1)R)$ and $I\cap[(k-1)R,(k+2)R)$ respectively.
We also set $f_k := f|_{J_k}$. Fix $k$. Then
\begin{equation}\label{eq2}
\int_{I_k}M_Rf(x)dx =
\int_{I_k}M_Rf_k(x)dx \le
\int_{I_k}Mf_k(x)dx.
\end{equation}
Suppose first that $\lambda(I_k)\le 1$. From (\ref{eq3}) we get
\begin{equation}\label{smalllamb}
\int_{I_k}Mf_k(x)dx \le \lambda (I_k) \|f_k\|_{L^\infty (J_k)} \le
\|f_k\|_{L^1(J_k)}+|Df_k| (J_k).
\end{equation}
And if $\lambda (I_k) > 1$, then from (\ref{rear}) and (\ref{weak}) we obtain
\begin{equation}\label{biglamb}
\int_{I_k}Mf_k(x)dx = \int_0^{\lambda (I_k)} (Mf_k)^*(t)dt
= \int_0^1 +\int_1^{\lambda (I_k)}
\end{equation}
\begin{equation*}
\le\|f_k\|_{L^\infty (J_k)} +2 \|f_k\|_{L^1 (J_k)}\int_1^{\lambda (I_k)}t^{-1}dt
\end{equation*}
\begin{equation*}
\le (1+2\log R)\|f_k\|_{L^1 (J_k)} +|Df_k| (J_k).
\end{equation*}
Since the intervals $I_k$ are all disjoint, and each nonempty $I_k$ is contained in
$J_{k-1}, J_k$ and $J_{k+1}$, having empty intersection with all the other $J_i$'s, the
estimates (\ref{smalllamb}) and (\ref{biglamb}) yield
\begin{equation}\label{L1norm}
\|M_R f\|_{L^1(I)} = \sum_{-\infty}^{\infty} \int_{I_k}M_Rf(x)dx
\end{equation}
\begin{equation*}
\le \sum_{-\infty}^{\infty} \left((1+2\log^+ R)\|f_k\|_{L^1 (J_k)}
+|Df_k| (J_k)\right) \end{equation*}
\begin{equation*}
= 3\sum_{-\infty}^{\infty} (1+2\log^+ R)\|f_k\|_{L^1 (I_k)}
+3\sum_{-\infty}^{\infty}|Df_k| (I_k) \end{equation*}
\begin{equation*}
= 3 (1+2\log^+ R)\|f\|_{L^1 (I)} +3|Df| (I).
\end{equation*}
Thus,
\begin{equation*}\label{Rest}
\|M_R f\|_{BV(I)} \le 3 (1+2\log^+ R)\|f\|_{L^1 (I)} +4|Df| (I) \le
\max \{3 (1+2\log^+ R), 4\} \|f\|_{BV (I)}.
\end{equation*}
\end{proof}
\begin{remark}\label{log} The example $f:\Bbb R \to \Bbb R$ given by $f:=\chi_{[0,1]}$ shows that
the logarithmic order of growth in the preceding theorem is the correct
one. Here all the relevant quantities can be easily computed: $\|f\|_{L^1 (\Bbb R)}= 1$,
$|Df|(\Bbb R) = 2$, $\|M_R f\|_{L^1 (\Bbb R)}= 1 + 1/ R + 2\log R$ for $R\ge 1$, and
$|DM_Rf|(\Bbb R) = 2$ (for all $R>0$).
\end{remark}
As noted in \cite{AlPe}, this kind of bounds on the size of maximal functions and their
derivatives can be
used to obtain variants of the classical Poincar\'e inequality, as well as other inequalities involving
derivatives, under less regularity,
by using $DM_R f$ (a function) instead
of $Df$ (a Radon measure).
Here we present another instance of the same idea, a Poincar\'e type inequality involving $\|M_Rf\|_1$; the
argument is standard but short, so we include it for the reader's convenience.
\
Given a compactly supported function $f$, denote by
$N(f,R):= \operatorname{ supp }f + [-R, R]\subset \Bbb R$ the closed $R$-neighborhood
of its support, that is, the set of all points at distance less than or equal to $R$ from
the support of $f$.
\begin{theorem}\label{ineq}
Let $f\in BV(\Bbb R )$ be compactly supported. Then for all $R> 0$, we have
$ \|f\|_2^2 $
\begin{equation*}\label{MRpoin}
\le \min\left\{ \frac{(3 (1+2\log^+R))^2}{\lambda (N(f,R))} \|f\|_{BV(\mathbb{R})}^2+
\left(\frac{ \left(\lambda(N(f,R))\right)^2}{2}\right) \|D M_Rf\|_2^2, \lambda(N(f,R))^2
\|D M_Rf\|_2^2, \right\}.
\end{equation*}
\end{theorem}
\begin{proof} Let $x< y$ be points in $\Bbb R$. By the
Fundamental Theorem of Calculus,
\begin{equation*}
M_R f(y) - M_R f(x) = \int_x^y DM_Rf(t) dt \le \|D M_Rf\|_1.
\end{equation*}
Squaring and integrating with respect to $x$ and $y$ over
$N(f,R)^2$, we get
\begin{equation*}
\|M_Rf\|_2^2 \le
\frac{\|M_Rf\|_1^2}{\lambda (N(f,R))}+ \|D M_Rf\|_1^2\left( \frac{\lambda (N(f,R))}{2}\right).
\end{equation*}
Since $ \|f\|_2^2 \le \|M_Rf\|_2^2$, using (\ref{L1norm}) and either Jensen or H\"older inequality we obtain
\begin{equation*}\label{b}
\|f\|_2^2 \le \frac{(3 (1+2\log^+R))^2}{\lambda (N(f,R))} \|f\|_{BV(\mathbb{R})}^2+
\left(\frac{ \left(\lambda(N(f,R))\right)^2}{2}\right) \|D M_Rf\|_2^2.
\end{equation*}
On the other hand, integrating $M_R f(y) = \int_{\infty}^y DM_Rf(t) dt \le \|D M_Rf\|_1$
and repeating the previous steps we get
\begin{equation*}\label{a}
\|f\|_2^2 \le \lambda(N(f,R))^2
\|D M_Rf\|_2^2.
\end{equation*}
\end{proof}
\begin{remark} In connection with the preceding inequality, we point out that if
$1 < p < \infty$ and $f\in W^{1,p}(\Bbb R )$, then $\|D M_Rf\|_p\le c_p \|D f\|_p$,
with $c_p$ independent of $R$. Of course, the interest of the result lies in the
fact that we can have $\|D M_Rf\|_p < \infty$ even if $Df$ is not a function (standard
example, $f=\chi_{[0,1]}$). The cases $p=1,\infty$ are handled in \cite{AlPe}, Theorems
2.5 and 5.6. There we have $\|D M_Rf\|_p\le \|D f\|_p$. To see why $\|D M_Rf\|_p\le c_p \|D f\|_p$ holds
with $c_p$ independent of $R$, repeat
the sublinearity argument from \cite{Ki}, Remark 2.2 (i) (cf. also
\cite{HaOn}, Theorem 1) using $M_Rf\le Mf$ to remove the dependency of the constant on
$R$.
\end{remark}
We shall consider next the local strong, directional, and iterated
directional maximal operators, proving boundedness from $BV(U)$ into
$L^1(U)$ and lack of boundedness from $BV(U)$ into $BV(U)$. Of
course, since the strong maximal operator dominates pointwise (up to
a constant factor) the maximal operator associated to an arbitrary
norm, we also obtain the boundedness of $M_R$ from $BV(U)$ into
$L^1(U)$ .
\begin{remark}\label{alt} It is possible to define $BV(U)$, where $U$ is open in $\mathbb{R}^d$,
without knowing a priori
that $|Df|$ is a Radon measure: Write
\begin{equation}\label{defvar}
\int_U |Df| := \sup\left\{\int_U f \operatorname{div} g: g\in C^1_c (U,\mathbb{R}^d), \|g\|_\infty \le 1\right\}.
\end{equation}
Then $f\in BV(U)$ if $f\in L^1(U)$ and $\int_U |Df| < \infty$ (cf., for instance,
Definition 1.3, pg. 4 of \cite{Giu}, or Definition 3.4, pg. 119 and Proposition 3.6,
pg. 120 of \cite{AFP}). Integration by parts immediately yields
that if $f\in C^1(U)$, then
\begin{equation*}
\int_U |Df| = \int_U |\nabla f|dx,
\end{equation*}
(this is Example 1.2 of \cite{Giu}).
With this approach
one has the following semicontinuity and
approximation results (cf. Theorems 1.9 and 1.17 of \cite{Giu}),
without any reference to Radon measures.
\end{remark}
\begin{theorem}\label{semi} If a sequence of functions $\{f_n\}$ in $BV(U)$ converges in $L^1_{loc}(U)$ to $f$, then $\int_U|Df|\le \liminf_n \int_U |Df_n|$.
\end{theorem}
\begin{theorem}\label{approx} If $f\in BV(U)$, then there exists a sequence of functions $\{f_n\}$ in $BV(U)\cap C^\infty(U)$ such that $\lim_n \int_U |f -f_n| dx = 0$ and
$\int_U|Df|= \lim_n \int_U |Df_n|$.
\end{theorem}
Note that by passing to a subsequence, we may also assume that $\{f_n\}$ converges
to $f$ almost everywhere.
If one uses the definition of $BV(U)$ given in Remark \ref{alt}, the fact that $Df$ is
a Radon measure is obtained a posteriori via the Riesz Representation Theorem. Then
of course $\int_U |Df| = |Df|(U)$.
\begin{definition} A finite cone $C$ of height $r$, vertex at $0$,
axis $v$, and aperture angle $\alpha$,
is the subset of $B(0,r)$ consisting of all vectors $y$ such that
the angle between $y$ and $v$ is less than or equal to $\alpha /2$.
A finite cone $C_x$ with vertex at $x$,
is a set of the form $x + C$, where the vertex of $C$ is $0$. Finally, an open set $U$ satisfies a cone condition if there exists a fixed finite cone $C$ such that every
$x\in U$ is the vertex of a cone obtained from $C$ by a rigid motion.
\end{definition}
We shall assume a cone condition in order to have available the following special case of
the Sobolev embedding theorem (see, for instance, Theorem 4.12, pg. 85 of \cite{AdFo}).
Of course, other type of conditions which also ensure the existence of such an embedding
could be used instead (e.g., $U$ is an extension domain). The next Theorem and its Corollary are well known and included here for the sake of
readability.
\begin{theorem}\label{Sobemb} Let the open set $U\subset \mathbb{R}^d$ satisfy a cone condition. Then
there exists a constant $c>0$, depending only on $U$, such that for all $f\in W^{1,1}(U)$,
$\|f\|_{L^{\frac{d}{d-1}}(U)}\le c \|f\|_{W^{1,1}(U)}$.
\end{theorem}
\begin{corollary}\label{BVemb} Let the open set $U\subset \mathbb{R}^d$ satisfy a cone condition. Then
there exists a constant $c>0$, depending only on $U$, such that for all $f\in BV(U)$,
$\|f\|_{L^{\frac{d}{d-1}}(U)}\le c \|f\|_{BV(U)}$.
\end{corollary}
\begin{proof} Let $\{f_n\}$ be a sequence of functions in $BV(U)\cap C^\infty(U)$ such that
$f_n\to f$ a.e., $\lim_n \int_U |f -f_n| dx = 0$, and
$\int_U|Df|= \lim_n \int_U |\nabla f_n|dx$. By Fatou's lemma and Theorem \ref{Sobemb},
$\|f\|_{L^{\frac{d}{d-1}}(U)}\le \liminf_n \|f_n\|_{L^{\frac{d}{d-1}}(U)}
\le \lim_n c \|f_n\|_{W^{1,1}(U)}= c \|f\|_{BV(U)}$.
\end{proof}
The next definition and lemma are valid for an arbitrary set $E\subset \mathbb{R}^k$,
with measure defined by the restriction of the Lebesgue outer measure to the
$\sigma$-algebra of all intersections of Lebesgue
sets with $E$.
\begin{definition}\label{llog} Let $E\subset \mathbb{R}^k$ and $r\ge 1$. A function $g$ belongs to the Banach space $L(\log^+L)^r (E)$ if for
some $t > 0$ we have
\begin{equation}\label{condllogl}
\int \frac{|g(x)|}{t} \left(\log^+ \frac{|g(x)|}{t}\right)^r dx< \infty.
\end{equation}
In that case the Luxemburg norm of $g$ is
\begin{equation*}
\|g\|_{L(\log^+L)^r} := \inf\left\{t > 0: \int \frac{|g(x)|}{t} \left(\log^+ \frac{|g(x)|}{t}\right)^r dx \le 1\right\}.
\end{equation*}
\end{definition}
Note that by monotone convergence the inequality
\begin{equation*}
\int \frac{|g(x)|}{t} \left(\log^+ \frac{|g(x)|}{t}\right)^r dx \le 1
\end{equation*}
holds when $t = \|g\|_{L(\log^+L)^r}$.
We mention that on finite measure spaces, the condition of
Definition \ref{llog} is equivalent to the seemingly stronger
requirement that for all $t> 0$, (\ref{condllogl}) hold.
The next lemma must be well known, but we include it for the reader's convenience.
While stated for all $r\ge 1$, we only need the cases $r=1$ (used in
Remark \ref{better}), $r=d-1$
(used in Theorem \ref{trivialdir}) and $r=d$ (used in Theorem \ref{L1bounds}).
\begin{lemma}\label{logemb} Let $E\subset \mathbb{R}^d$, where $d\ge 2$, and let $r\ge 1$. If $g\in L^{\frac{d}{d-1}}(E)$, then $g\in L(\log^+L)^r(E)$ and $\|g\|_{L(\log^+L)^r(E)}\le \left(r(d - 1)\right)^{\frac{r(d-1)}d}\|g\|_{L^{\frac{d}{d-1}}(E)}.$
\end{lemma}
\begin{proof} Note that $\log^+ y \le y^\alpha/\alpha$ for
all $y,\alpha>0$, so given $t>0$, if we set $y = \frac{|g(x)|}{t}$ and $\alpha = \frac{1}{r(d-1)}$,
we get
\begin{equation*}
\int \frac{|g(x)|}{t} \left(\log^+ \frac{|g(x)|}{t}\right)^r
dx\le
\left(r(d - 1)\right)^{r}\left\|\frac{g}{t}\right\|_{L^{\frac{d}{d-1}}(E)}^{\frac{d}{d-1}}.
\end{equation*}
Now let $t_0 < \|g\|_{L(\log^+L)^r}$. Then
$1 < \left(r(d - 1)\right)^{r}\left\|\frac{g}{t_0}\right\|_{L^{\frac{d}{d-1}}(E)}^{\frac{d}{d-1}}$,
from which it follows that $\|g\|_{L(\log^+L)^r(E)}\le \left(r(d - 1)\right)^{\frac{r(d-1)}d}\|g\|_{L^{\frac{d}{d-1}}(E)}.$
\end{proof}
The proof of the next result is similar to that of Theorem \ref{bd}. We indicate the main differences: 1) In Theorem \ref{bd}, since $d=1$, no cone condition appears and we give a fully explicit constant; 2) when $d = 1$, we use the trivial embedding of
$BV(I)$ in $L^\infty$ given in (\ref{eq3}) instead of Corollary \ref{BVemb} and Lemma \ref{logemb}; 3) for $d > 1$, bounds on
the distributional gradient of the corresponding maximal operator are either false or not known.
\begin{theorem}\label{L1bounds} Let the open set $U\subset \Bbb R^d$ satisfy
a cone condition. For every $R>0$, the local iterated directional maximal operator
$M_R^{d}\circ \dots\circ M_R^{1}$ and the
local strong maximal operator $M_R^S$ map $BV(U)$ into $L^1(U)$
boundedly. Hence, so do the following operators: The standard local uncentered maximal operator
$M_R$ associated to an arbitrary norm, the local directional maximal operator $M_R^v$, and
$M_R^{i_k}\circ \cdots\circ M_R^{i_1}$, where $1\le k < d$ and $i_1 < \dots <i_k$.
In fact, if $S_R$ is any of the above
maximal operators, then there exists a constant $c > 0$, which depends only on the open
set $U$, such that for all $f\in BV(U)$,
\begin{equation}\label{conc}
\|S_Rf\|_{L^1(U)} \le c \left(\|f\|_{BV(U)} + (\log^+R)^d \|f\|_{L^1(U)} \right).
\end{equation}
\end{theorem}
\begin{proof}
By Corollary \ref{BVemb}, it is enough to
show that
\begin{equation}\label{LplusL}
\|S_Rf\|_{L^1(U)} \le c \left(\|f\|_{L^{d/(d-1)}(U)} + (\log^+R)^d
\|f\|_{L^1(U)} \right).
\end{equation}
Now we can assume that $U=\mathbb{R}^d$. Else, we
extend
$f$ without changing the right hand side of (\ref{LplusL}), by setting $f= 0$ on $\mathbb{R}^d\setminus U$.
The reason we are interested in having
$U=\mathbb{R}^d$ is that later on, we will use the
pointwise equivalence on $\mathbb{R}^d$ of
maximal functions associated to different norms.
By $\eta$ we denote a generic $d$-tuple of integers $(n_1,\ldots,n_d)\in
\mathbb{Z}^d$. For $\eta\in\mathbb{Z}^d$, we define the cubes
$I_{\eta}=[n_1 R,(n_1+1)R)\times\dots\times [n_d R,(n_d +1)R)$ and
$J_{\eta}=[(n_1-1)R,(n_1+2)R)\times\dots\times [(n_d-1)R,(n_d+2)R)$.
Set $f_{\eta}=f|_{J_{\eta}}$.
We want to estimate
\begin{equation*}
\alpha_{\eta}:= \int_{I_{\eta}}M^d_R\circ\cdots\circ M^1_Rf(x)dx
\end{equation*}
\begin{equation*}
=
\int_{I_{\eta}}M^d_R\circ\cdots\circ M^1_Rf_{\eta}(x)dx \le
\int_{I_{\eta}}M^d\circ\cdots\circ M^1f_{\eta}(x)dx.
\end{equation*}
From \cite[\S I. Theorem 1]{Fa}, we get
\begin{equation}\label{weakt}
\lambda^d(\{M^d\circ\cdots\circ M^1f_{\eta}>4t\})\le C
\int_{J_{\eta}} \frac{|f_{\eta}(x)|}{t}
\left(\log^+\frac{|f_{\eta}(x)|}{t}\right)^{d-1} dx,
\end{equation}
where $C$ is a constant that depends only on $d$. Moreover, calling
$A= \|f_{\eta}\|_{L(\log^+L)^{d}}$ and using (\ref{weakt}) we obtain
\begin{equation*}
\alpha_{\eta} = 4 \int_0^\infty \lambda^d(I_{\eta}\cap \{M^d_R\circ\cdots\circ M^1_Rf_{\eta}(x)>4t\})dt
=4\int_0^{A/R^d} + 4 \int_{A/R^d}^\infty
\end{equation*}
\begin{equation}\label{tres}
\le 4 A + 4C\int_{A/R^d}^\infty \int_{J_{\eta}}
\frac{|f_{\eta}(x)|}{t}
\left(\log^+\frac{|f_{\eta}(x)|}{t}\right)^{d-1} dx dt = 4A + B.
\end{equation}
Let $\tilde{J}_{\eta}:=J_{\eta}\cap\{|f(x)|>A/R^d\}$. Applying the
Fubini-Tonelli Theorem and the change of variable $y(t)
=\log\frac{|f_{\eta}(x)|}{t}$ we have
\begin{equation*}
B = 4 C\int_{\tilde{J}_{\eta}} \int_{A/R^d}^{|f_{\eta}(x)|}
\frac{|f_{\eta}(x)|}{t}
\left(\log\frac{|f_{\eta}(x)|}{t}\right)^{d-1}dt dx
\end{equation*}
\begin{equation*}
= 4 C\int_{\tilde{J}_{\eta}}
|f_{\eta}(x)|dx\int_0^{\log^+\frac{|f_{\eta}(x)|R^d}{A}}
y^{d-1}dy
\end{equation*}
\begin{equation*}= \frac{4 C}{d}\int_{\tilde{J}_{\eta}}
|f_{\eta}(x)|\left(\log\frac{|f_{\eta}(x)|}{A} + d \log R\right)^d dx
\end{equation*}
\begin{equation*}\le\frac{4 C 2^d}{d}\int_{{J}_{\eta}}
|f_{\eta}(x)|\left(\left(\log^+\frac{|f_{\eta}(x)|}{A}\right)^d + d^d \left(\log^+ R\right)^d\right)dx
\end{equation*}
\begin{equation*}
= \frac{4 C 2^d }{d} \left(A\int_{J_{\eta}}
\frac{ |f_{\eta}(x)|}{A}\left(\log^+ \frac{|f_{\eta}(x)|}{A}\right)^d
dx + d^d\|f_{\eta}\|_{L^1(J_{\eta})} (\log^+R)^d\right)
\end{equation*}
\begin{equation}\label{last}
\le \frac{4 C 2^d }{d}\left(A + d^d\|f_{\eta}\|_{L^1(J_{\eta})} (\log^+
R)^d\right).
\end{equation}
Putting together (\ref{tres}), (\ref{last}), and Lemma
\ref{logemb}, we get
\begin{equation*}
\alpha_{\eta}\le C^\prime \left(\|f_{\eta}\|_{L^{d/(d-1)}(J_{\eta})}+ \|f_{\eta}\|_{L^1(J_{\eta})}(\log^+R)^d\right).
\end{equation*}
Next we sum over all $d$-tuples $\eta\in \mathbb{Z}^d$. Since a
point in $\mathbb{R}^d$ cannot be contained in more than $3^d$
different cubes of type $J$, we conclude that for some $c > 0$,
\begin{equation}\label{logd}
\int_{\mathbb{R}^d} M_R^d\circ \cdots\circ M_R^1f(x) dx \le c \left( \|f\|_{L^{d/(d-1)}(\mathbb{R}^d)}+ \|f\|_{L^1(\mathbb{R}^d)}(\log^+R)^d\right).
\end{equation}
Since $M_R^Sf(x) \le M_R^d\circ \cdots\circ M_R^1f(x)$ for almost all $x
\in \mathbb{R}^d$, the same inequality holds for $M_R^Sf$. Likewise,
$M_R^S$
dominates pointwise the maximal operator $M_R$ associated to
the $l^\infty$ norm (i.e., to cubes), so (\ref{conc}) also holds for
$M_R$. Since local maximal operators associated to different norms
are pointwise comparable by the equivalence of all norms in
$\mathbb{R}^d$, inequality (\ref{conc}) holds, perhaps with a
different value of $c$, for the maximal operator $M_R$ defined by
any given norm. Finally, if $1\le k < d$ and $i_1 < i_2 <\dots<i_k$,
we have $M_R^{i_k}\circ \cdots\circ M_R^{i_1}f(x) \le M_R^d\circ
\cdots\circ M_R^1f(x)$ for all $x\in \mathbb{R}^d$, and $M_R^v$
obviously satisfies the same bounds as $M^1_R$, so (\ref{conc})
holds for all the operators under consideration.
\end{proof}
\begin{remark}\label{better} It is possible to obtain bounds for $M_R$ directly, using essentially
the same proof as in the previous theorem, rather than deriving them from the corresponding
bounds for $M^S_R$. In fact, a direct approach yields a lower order of growth, $O(\log R)$ instead
of $O((\log R)^d)$. More precisely, replace in the proof
$L(\log^+L)^d$ by $L(\log^+L)$, and
inequality (\ref{weakt}) by the following well known refinement (due to N. Wiener, cf. \cite[Theorem 4$^\prime$]{Wi}) of the
weak type inequality:
\begin{equation*}
\lambda^d(\{Mf>t\})\le \frac{C}{t}\int_{\{|f|>t/2\}}|f(x)|dx
\qquad \textnormal{for all }t>0.
\end{equation*}
Then argue as before, to get
\begin{equation*}
\int_U M_R f(x) dx \le c \left( \|f\|_{BV(U)}+ \|f\|_{L^1(U)}\log^+R\right).
\end{equation*}
An analogous remark can be made with respect to the operators $M_R^{i_k}\circ \cdots\circ M_R^{i_1}$ and $M_R^v$, obtaining orders of growth $O(\log^k R)$ and $O(\log R)$ respectively.
\end{remark}
\begin{theorem}\label{Strong} Let $d > 1$ and let $U\subset\mathbb{R}^d$ be open. Given any $R>0$, the following maximal operators are unbounded on $BV(U)$: The local directional
maximal operator $M_R^v$, the local iterated directional maximal operator $M_R^{d}\circ \dots\circ M_R^{1}$, and the local strong maximal operator $M_R^S$.
\end{theorem}
\begin{proof} We will show that if $S_R$ denotes any of the maximal operators considered
in the statement of the theorem, then there exists a sequence of characteristic functions
$f_{1/n}$ such that $\lim_{n\to\infty}\|f_{1/n}\|_{BV(U)} = 0$ and $$\lim_{n\to\infty}\frac{|DS_R(f_{1/n})|(U)}{\|f_{1/n}\|_{BV(U)}} =
\infty.$$
In fact, the same result holds for the corresponding nonlocal maximal operators,
which can be included in the notation $S_R$ by allowing the possibility $R=\infty$, as
we do in this proof. So we take $0<R\le\infty$. Actually it is enough to consider
$2 < R\le \infty$, since the argument we give below adapts to smaller values for $R$ just by rescaling. Similarly it is enough to consider the case $U=\mathbb{R}^d$. We start with
$M^v_R$. By a rotation we may assume that $v = e_1$. For notational simplicity,
we
will write the proof for the case $d = 2$ only.
Fix $R$. Given $0<\delta< 1$, set $f_\delta(x):=\chi_{[0,\delta]^2}(x)$.
Then
\begin{equation*}
\|f_\delta\|_1=\delta^2
\end{equation*}
and, since $|Df_\delta|(\mathbb{R}^2)$ is just the perimeter of the square $[0,\delta]^2$
(cf., for instance, Exercise 3.10 pg. 209 of \cite{AFP}),
\begin{equation*}
|Df_\delta|(\mathbb{R}^2)=4\delta.
\end{equation*}
Thus
\begin{equation}\label{une}
\|f_\delta\|_{BV(\mathbb{R}^2)} = O(\delta) \textnormal{ when }
\delta \rightarrow 0.
\end{equation}
Next, let $\delta\le x\le 1$, and $0\le y\le \delta$. It is then easy to check that
\begin{equation*}
M_R^1(f_\delta)(x,y)=\frac{\delta}{x}.
\end{equation*}
Given $\delta \le t < 1$, the level sets $E_t :=\{M_R^1 (f_\delta ) > t\}$ are rectangles,
with perimeter
$$|D\chi_{E_t}|(\mathbb{R}^2) \ge 2\delta + \frac{2\delta}{t}.$$
By the coarea formula for BV functions
(cf. Theorem 3.40, pg. 145 of \cite{AFP}), we have
\begin{equation}\label{coar}
|DM^1_R f_\delta|(\mathbb{R}^2)=\int_{-\infty}^{\infty}
|D\chi_{E_t}|(\mathbb{R}^2) dt \ge \int_{\delta}^{1}
|D\chi_{E_t}|(\mathbb{R}^2) dt \ge 2\delta \int_{\delta}^{1}\left( 1
+ \frac{1}{t}\right) dt = \Theta\left(\delta\log
\frac1{\delta}\right).
\end{equation}
where $\Theta$ stands for the exact order of growth.
From (\ref{une}) and (\ref{coar}) we obtain
\begin{equation}\label{unbound}
\frac{|DM_R^1(f_\delta)|(\mathbb{R}^2)}{\|f_\delta\|_{BV((\mathbb{R}^2)}}\rightarrow
\infty \textnormal{ when }\delta\rightarrow 0,
\end{equation}
as was to be proven.
Note next that on $[0,1]\times [0,\delta]$ the three maximal functions $M^1_Rf_\delta$,
$M_R^2\circ M_R^1 f_\delta$ and $M_R^S f_\delta$ take the same values, from which
it easily follows that for $\delta \le t < 1$,
$$|D\chi_{\{M_R^2\circ M_R^1 (f_\delta ) > t\}}|(\mathbb{R}^2) \ge 2\delta + \frac{2\delta}{t}$$
and
$$|D\chi_{\{M_R^S (f_\delta ) > t\}}|(\mathbb{R}^2) \ge 2\delta + \frac{2\delta}{t}.$$
Thus, the analogous statement to (\ref{unbound}) holds for $M_R^2\circ M_R^1 f_\delta$ and $M_R^S f_\delta$ also.
\end{proof}
A standard mollification argument shows that the preceding maximal operators are
not bounded on $W^{1,1}(U)$ either.
\section{Converses and a one dimensional characterization.}
Recall that $f^+$ and $ f^-$ denote respectively the positive and negative parts of $f$.
Now, for any open set $U\subset \Bbb R^d$, $f\in BV(U)$ if and
only if both $f^+\in BV(U)$ and $f^-\in BV(U)$. This can be seen as
follows: If $f\in BV(U)$, it is immediate from the definition
\ref{defvar} contained in Remark \ref{alt} that $\int_U |Df| \ge
\int_U |D(f^+)|$ and $\int_U |Df| \ge \int_U |D(f^-)|$, so $f^+,
f^-\in BV(U)$.
On the other hand, if both $f^+, f^-\in
BV(U)$, then there are sequences $\{g_n\}$ and $\{h_n\}$ of
$C^\infty$ functions that approximate $f^+$ and $f^-$ respectively,
in the sense of Theorem \ref{approx}. Since $g_n - h_n\to f$ in
$L^1(U)$, by semicontinuity $|Df|(U) \le \liminf_n \int_U |\nabla
(g_n - h_n)| dx \le \lim_n \int_U |\nabla g_n| dx +\lim_n \int_U
|\nabla h_n| dx = |D(f^+)|(U) + |D(f^-)(U)|.$ Hence $f\in BV(U)$.
\begin{theorem}\label{trivialdir}
Let $U\subset \Bbb R^d$ be an open set
and let $f:U\to \Bbb R $ be locally integrable. Suppose that there
exists a sequence $\{a_n\}_1^\infty$ with $\lim_n a_n = 0$ and a
constant $c$ such that for all $n$,
$M_{a_n}f^+\in W^{1,1}(U)$, $M_{a_n}f^-\in W^{1,1}(U)$, $\| M_{a_n}f^+\|_{W^{1,1}(U)} \le c$, and $\| M_{a_n}f^-\|_{W^{1,1}(U)} \le c$.
Then $f\in BV(U)$. The same happens if instead of $M_R$ we consider
either the local directional maximal operator, or, under the
additional hypothesis that $U$ satisfies a cone condition, the local
strong
maximal operator.
\end{theorem}
\begin{proof} Consider first $f^+$. By the Lebesgue Theorem on differentiation of integrals we have that
$\lim_n M_{a_n} f^+ = f^+$ a.e., so by dominated convergence, $M_{a_n} f^+ \to f^+$ in
$L^1(U)$, and by Theorem \ref{semi}, $\int_U|Df^+| \le \liminf_n
\int_U |DM_{a_n} f^+| \le c <\infty$. Repeating the argument for $f^-$ we get
$|Df|(U) \le |Df^+|(U) + |Df^-|(U) <\infty$.
The result for the local strong maximal operator follows from
the well known Theorem of Jessen, Marcinkiewicz and Zygmund (\cite{JMZ}) stating that
basis of rectangles (with sides parallel to the axes)
differentiates $L(\log ^+ L)^{d-1}_{loc}(U)$, and hence $BV(U)$ (cf. Corollary \ref{BVemb} and Lemma \ref{logemb}; for the first embedding we
use the cone condition). Finally, the weak type $(1,1)$ boundedness
of $M_T^v$ (which is obtained from the one dimensional result and
the Fubini-Tonelli Theorem) also entails, by the standard argument,
the corresponding differentiation of integrals result, so $\lim_n
M^v_{a_n} f^+ = f^+$ and $\lim_n M^v_{a_n} f^- = f^-$.
\end{proof}
For intervals $I\subset \mathbb{R}$ we have the following characterization.
\begin{theorem}\label{charact}
Let $f:I\to \Bbb R $ be locally integrable. Then the following are equivalent:
a) $f\in BV(I )$.
b) $M_Rf^+\in W^{1,1}(I)$, $M_Rf^-\in W^{1,1}(I)$,
$\|M_Rf^+\|_{W^{1,1}(I)}\le 3(1+2\log^+(R)) \|f^+\|_{L^1(I)} + 4 |Df^+|(I),$ and $\|M_Rf^-\|_{W^{1,1}(I)}\le 3(1+2\log^+(R)) \|f^-\|_{L^1(I)} + 4 |Df^-|(I).$
c) There exists a sequence $\{a_n\}_1^\infty$ with $\lim_n a_n = 0$ and a constant
$c = c(f, \{a_n\}_1^\infty)$ such that for all $n$,
$M_{a_n}f^+\in W^{1,1}(I)$, $M_{a_n}f^-\in W^{1,1}(I)$, $\| M_{a_n}f^+\|_{W^{1,1}(I)} \le c$, and $\| M_{a_n}f^-\|_{W^{1,1}(I)} \le c$.
d) There exists an $R>0$ and a constant $c = c(f, R)$ such that for all
$T\in (0,R]$, $M_Tf^+\in W^{1,1}(I)$, $M_Tf^-\in W^{1,1}(I)$, $\| M_Tf^+\|_{W^{1,1}(I)} \le c$, and $\| M_Tf^-\|_{W^{1,1}(I)} \le c$.
e) For every $R>0$ there exists a constant $c = c(f,R)$ such that for all
$T\in (0,R]$, $M_Tf^+\in W^{1,1}(I)$, $M_Tf^-\in W^{1,1}(I)$, $\| M_Tf^+\|_{W^{1,1}(I)} \le c$, and $\| M_Tf^-\|_{W^{1,1}(I)} \le c$.
If $f:I\to \Bbb R $ is absolutely continuous, then
a') $f\in W^{1,1}(I)$ is equivalent to b), c), d) and e).
\end{theorem}
\begin{proof} The implications b) $\to $ e), e) $\to $ d) and d) $\to $ c) are obvious,
and a) $\to$ b) is the content of Theorem \ref{bd}. Without
loss of generality we may take $I$ to be open, so c) $\to $ a) is a special case
of Theorem \ref{trivialdir}. Finally, the last claim follows from the fact that
$f\in W^{1,1}(I)$ if and only if $f$ is absolutely continuous and
$f\in BV(I)$.
\end{proof}
Let $f:I\to \Bbb R $ be locally integrable.
By Theorem \ref{bd}, if $|f|\in BV(I )$ then for every $R >0$,
$M_Rf\in W^{1,1}(I)$ boundedly, with bound depending on $R$. Thus it is natural to ask whether
the latter condition alone suffices to ensure that $|f|\in BV(I )$. In other words, we
are asking whether the uniform bound condition appearing in parts c), d) and e) of
Theorem \ref{charact} is really
needed. The following example shows that the answer is positive.
\begin{example}\label{counter1d}{\em There exists a non-negative function $f\in L^1(\Bbb R)\setminus BV(\Bbb R)$ such that for all
$R>0$, $M_Rf\in W^{1,1}(\Bbb R )$.}
\begin{proof} Let $A$ be the closed set $[-1000,0]\cup\left(\cup_{n=0}^\infty [2^{-n}, 2^{-n} + 2^{-n-1}]\right)$,
and let $f$ be the upper semicontinuous function $\chi_A$. Fix $R>0$. Clearly $M_R f \ge f$ everywhere,
so by Lemma 3.4 of \cite{AlPe}, $M_Rf$ is a continuous function. Also, $M_R f|_{\mathbb{R}\setminus (0,2^{-n})}$ is Lipschitz, with $\operatorname{Lip }(M_Rf)\le \max\{R^{-1}, 2^{n+1}\}$, by Lemma 3.8 of \cite{AlPe}. Hence, if $E\subset \Bbb R$ has measure zero, so does $M_Rf(E)$, being a countable union of sets of measure zero. Next we show that $|DM_R f|(\Bbb R) < \infty$. Let $n\ge 1$ . On intervals of the
form $( 2^{-n} + 2^{-n-1}, 2^{-n+1})$, if $R > 2^{-n-2}$ then $M_Rf > f$, so by Lemma 3.6 of \cite{AlPe}
there exists an
$x_n\in ( 2^{-n} + 2^{-n-1}, 2^{-n+1})$ such that
$M_Rf$ is decreasing on $( 2^{-n} + 2^{-n-1},x_n)$ and increasing on $(x_n, 2^{-n+1})$.
Taking this fact into account, it is easy to see
that $V(M_R f, \mathbb{R})$ is decreasing in $R$, so we may suppose $R \in (0,1)$.
Select $N\in \Bbb N$ such that $2^{-N+1} < R$. Then for $n > N$,
\begin{equation*}
V(M_Rf,( 2^{-n} + 2^{-n-1}, 2^{-n+1}))
= 2\left(1-M_Rf(x_n)\right)
\le
2\left(1-\frac{R- 2^{-n+1}}{R}\right)\le \frac{ 2^{-n+2}}{R}.
\end{equation*}
Hence $|DM_R f|(\Bbb R ) \le 2 + 2(N+1) <\infty$.
Since $M_Rf$ is continuous, of bounded variation, and maps measure
zero sets into measure zero sets, by the Banach Zarecki Theorem it
is absolutely continuous, so $M_Rf\in W^{1,1}(\Bbb R )$.
\end{proof}
Of course, using $\Bbb R$ above is not necessary, the example can be easily adapted to any
other interval $I$.
\end{example}
\end{document} |
\begin{document}
\title{Spectral gap global solutions for degenerate Kirchhoff equations}
\begin{abstract}
We consider the second order Cauchy problem
$$u''+\m{u}Au=0,
\hspace{2em}
u(0)=u_{0},{\mathbb{Q}}uad
u'(0)=u_{1},$$
where $m:[0,+\infty)\to[0,+\infty)$ is a continuous function, and
$A$ is a self-adjoint nonnegative operator with dense domain on a
Hilbert space.
It is well known that this problem admits local-in-time solutions
provided that $u_{0}$ and $u_{1}$ are regular enough, depending on
the continuity modulus of $m$, and on the strict/weak
hyperbolicity of the equation.
We prove that for such initial data $(u_{0},u_{1})$ there exist
two pairs of initial data $(\overline{u}_{0},\overline{u}_{1})$,
$(\widehat{u}_{0},\widehat{u}_{1})$ for which the solution is
global, and such that $u_{0}=\overline{u}_{0}+\widehat{u}_{0}$,
$u_{1}=\overline{u}_{1}+\widehat{u}_{1}$.
This is a byproduct of a global existence result for initial data
with a suitable spectral gap, which extends previous results
obtained in the strictly hyperbolic case with a smooth
nonlinearity $m$.
pace{1cm}
{\mathbb{N}}oindent{\bf Mathematics Subject Classification 2000 (MSC2000):}
35L70, 35L80, 35L90.
pace{1cm}
{\mathbb{N}}oindent{\bf Key words:} uniqueness, integro-differential hyperbolic
equation, degenerate hyperbolic equation, continuity modulus,
Kirchhoff equations, Gevrey spaces.
\end{abstract}
\section{Introduction}
Let $H$ be a real Hilbert space. For every $x$ and $y$ in $H$, let
$|x|$ denote the norm of $x$, and let $\langle x,y\rangle$ denote the
scalar product of $x$ and $y$. Let $A$ be an unbounded linear
operator on $H$ with dense domain $D(A)$. We always assume that $A$
is self-adjoint and nonnegative, so that for every $\alpha\geq 0$ the
power $A^{\alpha}$ is defined in a suitable domain $D(A^{\alpha})$.
Given a continuous function $m:[0,+\infty)\to[0,+\infty)$ we consider
the Cauchy problem
\begin{equation}
u''(t)+\m{u(t)}Au(t)=0,
\hspace{2em}\forall t\in[0,T),
\label{pbm:h-eq}
\end{equation}
\begin{equation}
u(0)=u_0,\hspace{3em}u'(0)=u_1.
\label{pbm:h-data}
\end{equation}
It is well known that ({\mathbb{R}}f{pbm:h-eq}), ({\mathbb{R}}f{pbm:h-data}) is the
abstract setting of the Cauchy-boundary value problem for the
quasilinear hyperbolic integro-differential partial differential
equation
\begin{equation}
u_{tt}(t,x)-
m{\left(\int_{\Omega}\left|{\mathbb{N}}abla u(t,x)\right|^2\,dx\right)}
\Delta u(t,x)=0
\hspace{2em}
\forall(x,t)\in\Omega\times[0,T),
\label{eq:k}
\end{equation}
where $\Omega\subseteq{\mathbb{R}}^{n}$ is an open set, and ${\mathbb{N}}abla u$ and
$\Delta u$ denote the gradient and the Laplacian of $u$ with respect
to the space variables.
Equation ({\mathbb{R}}f{pbm:h-eq}) is called strictly hyperbolic if
\begin{equation}
m(\sigma)\geq{\mathbb{N}}u>0
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall\sigma\geq 0.
\label{hp:s-h}
\end{equation}
Equation ({\mathbb{R}}f{pbm:h-eq}) is called weakly (or degenerate) hyperbolic
if
$$m(\sigma)\geq 0 {\mathbb{Q}}uad{\mathbb{Q}}uad
\forall\sigma\geq 0.$$
Existence of local/global solutions to ({\mathbb{R}}f{pbm:h-eq}),
({\mathbb{R}}f{pbm:h-data}) has long been investigated in the last century.
The theory is well established in the case of local solutions, which
are known to exist in the following situations.
\begin{enumerate}
\item[(L1)] When equation ({\mathbb{R}}f{pbm:h-eq}) is strictly hyperbolic,
$m$ is Lipschitz continuous, and initial data $(u_{0},u_{1})\in
D(A^{3/4})\times D(A^{1/4})$ (see \cite{ap} and the references
quoted therein).
\item[(L2)] When equation ({\mathbb{R}}f{pbm:h-eq}) is weakly hyperbolic,
$m$ is continuous, and initial data are analytic. In this case
solutions are actually global (see \cite{as}, \cite{das-an-1},
\cite{das-an-2}).
\item[(L3)] More generally, when initial data belong to suitable
intermediate spaces, depending on the continuity modulus of $m$,
and on the strict/weak hyperbolicity of ({\mathbb{R}}f{pbm:h-eq}) (see
\cite{hirosawa-main} and \cite{gg:k-derloss}). This is a sort of
interpolation between (L1) and (L2). We refer to
section~{\mathbb{R}}f{sec:prelim} for precise definitions of the functional
spaces in the abstract framework and a formal local existence
statement (Theorem~{\mathbb{R}}f{thm:hirosawa}).
\end{enumerate}
Existence of global solutions is a much more difficult problem, and it
is still widely open. A positive answer has been given in the case
(L2), and in some special situations: quasi-analytic initial data (see
\cite{nishihara}), or Sobolev-type data but special nonlinearities $m$
(see \cite{poho-m}), or dispersive operators and small data (see
\cite{gh}, \cite{das}). But for (L2) all these results assume the
strict hyperbolicity and the Lipschitz continuity of $m$.
Recently \textsc{R.\ Manfrin}~\cite{manfrin1,manfrin2} (see
also~\cite{hirosawa2}) considered once again the strictly hyperbolic
case with a smooth nonlinearity. He proved global existence in a
special class of nonanalytic initial data. Manfrin's spaces are not
vector spaces and do not contain any Gevrey space $\mathcal{G}_{s}$ with $s>1$.
However they have the following astonishing property:
\begin{enumerate}
\item[(M)] every pair of initial conditions $(u_{0},u_{1})\in
D(A)\times D(A^{1/2})$ is the sum of two pairs of initial
conditions in Manfrin's spaces, i.e., the sum of two initial
conditions for which the solution is global!
\end{enumerate}
This theory requires the strict hyperbolicity and some smoothness of
$m$, which is assumed to be of class $C^{2}$ both in \cite{manfrin1}
and \cite{manfrin2}.
In this paper we extend Manfrin's theory to the general situation of
(L3). We consider indeed both the strictly hyperbolic and the weakly
hyperbolic case, and a nonlinearity $m$ with a given continuity
modulus. In Theorem~{\mathbb{R}}f{thm:main} we prove global existence for
initial data in a suitable subset of the spaces involved in (L3). In
analogy with Manfrin's spaces, the definition ({\mathbb{R}}f{defn:m-space}) of
our subset is made in terms of the spectral resolution of initial
data. Of course our subset is not a vector space and it doesn't even
contain all analytic functions. Nevertheless in
Proposition~{\mathbb{R}}f{prop:sum} we show that this subset satisfies property
(M) in the spaces involved in (L3).
From the point of view of property (M) our result extends Manfrin's
one also in the framework (L1). In this case we obtain indeed
property (M) for initial data in $D(A^{3/4})\times D(A^{1/4})$ and a
locally Lipschitz continuous nonlinearity $m$, instead of initial data
in $D(A)\times D(A^{1/2})$ and $m\in C^{2}$.
This paper is organized as follows. In section~{\mathbb{R}}f{sec:prelim} we
recall the definition of continuity modulus and Gevrey-type functional
spaces, and we state the local existence result for the case (L3). In
section~{\mathbb{R}}f{sec:statements} we introduce our spaces and we state our
main results. In section~{\mathbb{R}}f{sec:proofs} we prove these results.
\setcounter{equation}{0}
\section{Preliminaries}\label{sec:prelim}
For the sake of simplicity we assume that $H$ admits a countable
complete orthonormal system $\{e_{k}\}_{k\geq 1}$ made by eigenvectors
of $A$. We denote the corresponding eigenvalues by $\lambda_{k}^{2}$
(with $\lambda_{k}\geq 0$), so that $Ae_{k}=\lambda_{k}^{2}e_{k}$ for every
$k\geq 1$.
Under this assumption we can work with Fourier series. However, any
definition or statement of this section can be easily extended to the
general setting just by using the spectral decomposition instead of
Fourier series. The interested reader is referred to \cite{ap} for
further details.
By means of the orthonormal system every $u\in H$ can be written in a
unique way in the form $u=\sum_{k=1}^{\infty}u_{k}e_{k}$, where
$u_{k}=\langle u,e_{k}\rangle$ are the Fourier components of $u$.
With these notations for every $\alpha\geq 0$ we have that
$$D(A^{\alpha}):=\left\{u\in H:\sum_{k=1}^{\infty}
\lambda_{k}^{4\alpha}u_{k}^{2}<+\infty\right\}.$$
Let now $\varphi:[0,+\infty)\to(0,+\infty)$ be any function. Then for
every $\alpha\geq 0$ and $r>0$ one can set
\begin{equation}
\trebar{u}_{\varphi,r,\alpha}^{2}:=\sum_{k=1}^{\infty}\lambda_{k}^{4\alpha}
u_{k}^{2} \exp\left(\strut r\varphi(\lambda_{k})\right),
\label{defn:trebar}
\end{equation}
and then define the spaces
$$\mathcal{G}_{\varphi,r,\alpha}(A):=
\left\{u\in H:\trebar{u}_{\varphi,r,\alpha}^{2}<+\infty\right\}.$$
These spaces are a generalization of the usual spaces of Sobolev,
Gevrey or analytic functions. They are Hilbert spaces with norm
$(|u|^{2}+\trebar{u}_{\varphi,r,\alpha}^{2})^{1/2}$. We also set
$$\mathcal{G}_{\varphi,\infty,\alpha}(A):=\bigcap_{r>0}\mathcal{G}_{\varphi,r,\alpha}(A).$$
A \emph{continuity modulus} is a continuous increasing function
$\omega:[0,+\infty)\to[0,+\infty)$ such that $\omega(0)=0$, and
$\omega(a+b)\leq\omega(a)+\omega(b)$ for every $a\geq 0$ and $b\geq
0$.
The function $m$ is said to be $\omega$-continuous if there exists a
constant $L\in{\mathbb{R}}$ such that
\begin{equation}
|m(a)-m(b)|\leq
L\,\omega(|a-b|)
\hspace{3em}
\forall a\geq 0,\ \forall b\geq 0.
\label{hp:m-ocont}
\end{equation}
The following result sums up the state of the art concerning existence
of local solutions. We refer to Theorem~2.1 and Theorem~2.2
in~\cite{hirosawa-main} for the existence part, to \cite{gg:k-derloss}
for some counterexamples, and to \cite{gg:k-uniq} for uniqueness
issues.
\begin{thmbibl}\label{thm:hirosawa}
Let $\omega$ be a continuity modulus, let
$m:[0,+\infty)\to[0,+\infty)$ be a (locally) $\omega$-continuous\ function, and
let $\varphi:[0,+\infty)\to(0,+\infty)$.
Let us assume that there exists a constant $\Lambda$ such that
\begin{equation}
\sigma
\omega\left(\frac{1}{\sigma}\right)\leq\Lambda\varphi(\sigma)
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall\sigma> 0
\label{hp:phi-ndg}
\end{equation}
in the strictly hyperbolic case, and
\begin{equation}
\sigma\leq\Lambda\varphi\left(\frac{\sigma}{
\sqrt{\omega(1/\sigma)}}\right)
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall\sigma> 0
\label{hp:phi-dg}
\end{equation}
in the weakly hyperbolic case.
Let
\begin{equation}
(u_{0},u_{1})\in
\mathcal{G}_{\varphi,r_{0},3/4}(A)\times\mathcal{G}_{\varphi,r_{0},1/4}(A)
\label{hp:hiro-data}
\end{equation}
for some $r_{0}>0$.
Then there exists $T>0$, and a nonincreasing function
$r:[0,T]\to(0,r_{0}]$ such that problem ({\mathbb{R}}f{pbm:h-eq}),
({\mathbb{R}}f{pbm:h-data}) admits at least one local solution
\begin{equation}
u\in C^{1}\left([0,T];\mathcal{G}_{\varphi,r(t),1/4}(A)\right)\cap
C^{0}\left([0,T];\mathcal{G}_{\varphi,r(t),3/4}(A)\right).
\label{th:reg-sol}
\end{equation}
\end{thmbibl}
\setcounter{equation}{0}
\section{Main result}\label{sec:statements}
Let $\mathcal{L}$ denote the set of all sequences $\{\rho_{n}\}$ of
positive real numbers such that $\rho_{n}\to +\infty$ as $n\to
+\infty$. Given $\varphi:[0,+\infty)\to(0,+\infty)$,
$\{\rho_{n}\}\in\mathcal{L}$, $\alpha\geq 0$, and $\beta\geq 0$ we
set
\begin{equation}
\mathcal{G}M_{\varphi,\{\rho_{n}\},\alpha}^{(\beta)}(A):=\left\{ u\in
H:\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}^{4\alpha}u_{k}^{2}
\exp\left(\rho_{n}^{\beta}\varphi(\lambda_{k})\right)\leq\rho_{n}
{\mathbb{Q}}uad\forall n\in{\mathbb{N}}\right\},
\label{defn:m-space}
\end{equation}
and then
$$\mathcal{G}M_{\varphi,\alpha}^{(\beta)}(A):=
\bigcup_{\{\rho_{n}\}\in\mathcal{L}}
\mathcal{G}M_{\varphi,\{\rho_{n}\},\alpha}^{(\beta)}(A).$$
These spaces are a generalization of Manfrin's spaces.
The following global existence result is the main result of this
paper.
\begin{thm}\label{thm:main}
Let $\omega$ be a continuity modulus, let
$m:[0,+\infty)\to[0,+\infty)$ be a function satisfying
({\mathbb{R}}f{hp:m-ocont}), let $\varphi:[0,+\infty)\to(0,+\infty)$, and
let $\{\rho_{n}\}\in\mathcal{L}$.
Let us assume that
\begin{itemize}
\item in the strictly hyperbolic case ({\mathbb{R}}f{hp:phi-ndg})
holds true for a suitable $\Lambda$, and
\begin{equation}
(u_{0},u_{1})\in
\mathcal{G}M_{\varphi,\{\rho_{n}\},3/4}^{(2)}(A)\times
\mathcal{G}M_{\varphi,\{\rho_{n}\},1/4}^{(2)}(A),
\label{hp:data-ndg}
\end{equation}
\item in the weakly hyperbolic case ({\mathbb{R}}f{hp:phi-dg})
holds true for a suitable $\Lambda$, and
\begin{equation}
(u_{0},u_{1})\in
\mathcal{G}M_{\varphi,\{\rho_{n}\},3/4}^{(3)}(A)\times
\mathcal{G}M_{\varphi,\{\rho_{n}\},1/4}^{(3)}(A).
\label{hp:data-dg}
\end{equation}
\end{itemize}
Then problem ({\mathbb{R}}f{pbm:h-eq}), ({\mathbb{R}}f{pbm:h-data})
admits at least one global solution $u(t)$ with
\begin{equation}
u\in C^{1}\left([0,+\infty);\mathcal{G}_{\varphi,r,3/4}(A)\right)\cap
C^{0}\left([0,+\infty);\mathcal{G}_{\varphi,r,1/4}(A)\right)
\label{hp:reg-sol}
\end{equation}
for every $r>0$.
\end{thm}
We conclude by speculating on these spaces. First of all it is easy to
prove that
\begin{equation}
\mathcal{G}M_{\varphi,\alpha}^{(\beta)}(A)\subseteq
\mathcal{G}_{\varphi,\infty,\alpha}(A)
\label{eq:inclusion}
\end{equation}
for every admissible values of the parameters. On one hand this
inclusion is ``very strict''. Roughly speaking indeed the
inequalities in definition ({\mathbb{R}}f{defn:m-space}) require that the
spectrum of $u$ ``has a big hole after each $\rho_{n}$''. For this
heuristic reason we used ``spectral gap solutions'' to denote the
solutions produced by Theorem~{\mathbb{R}}f{thm:main}.
On the other hand inclusion ({\mathbb{R}}f{eq:inclusion}) is ``not so strict''
in the sense that
$$\mathcal{G}M_{\varphi,\alpha}^{(\beta)}(A)+\mathcal{G}M_{\varphi,\alpha}^{(\beta)}(A)=
\mathcal{G}_{\varphi,\infty,\alpha}(A)$$
for any admissible values of the parameters. We state this property
more precisely in the case of pairs of initial data.
\begin{prop}\label{prop:sum}
Let $\varphi:[0,+\infty)\to(0,+\infty)$, and let
\begin{equation}
(u_{0},u_{1})\in
\mathcal{G}_{\varphi,\infty,3/4}(A)\times\mathcal{G}_{\varphi,\infty,1/4}(A).
\label{hp:prop}
\end{equation}
Then for every $\beta\geq 0$ there exist $\{\overline{\rho}_{n}\}$ and
$\{\widehat{\rho}_{n}\}$ in $\mathcal{L}$, and
\begin{equation}
(\overline{u}_{0},\overline{u}_{1})\in
\mathcal{G}M_{\varphi,\{\overline{\rho}_{n}\},3/4}^{(\beta)}(A)\times
\mathcal{G}M_{\varphi,\{\overline{\rho}_{n}\},1/4}^{(\beta)}(A),
\label{th:prop-1}
\end{equation}
\begin{equation}
(\widehat{u}_{0},\widehat{u}_{1})\in
\mathcal{G}M_{\varphi,\{\widehat{\rho}_{n}\},3/4}^{(\beta)}(A)\times
\mathcal{G}M_{\varphi,\{\widehat{\rho}_{n}\},1/4}^{(\beta)}(A),
\label{th:prop-2}
\end{equation}
such that $u_{0}=\overline{u}_{0}+\widehat{u}_{0}$ and
$u_{1}=\overline{u}_{1}+\widehat{u}_{1}$.
\end{prop}
\begin{rmk}
\begin{em}
Combining Theorem~{\mathbb{R}}f{thm:main} and
Proposition~{\mathbb{R}}f{prop:sum} we obtain the following statement:
every pair of initial conditions satisfying
({\mathbb{R}}f{hp:hiro-data}) with $r_{0}=\infty$ is the sum of two
pairs of initial conditions for which the solution is global.
We have thus extended to the general case the astonishing
aspect of Manfrin's result.
The extra requirement that $r_{0}=\infty$ is hardly
surprising. It is indeed a necessary condition for existence
of global solutions even in the theory of linear equations
with nonsmooth time dependent coefficients.
\end{em}
\end{rmk}
\begin{rmk}
\begin{em}
The $\omega$-continuity assumption on $m$ can be easily
relaxed to local $\omega$-continuity in all the cases where
there is a uniform-in-time estimate of $|A^{1/2}u(t)|$ in
terms of the initial data. We refer to the paragraph ``Energy
conservation'' in section~{\mathbb{R}}f{sec:proof-prelim} for further
details.
\end{em}
\end{rmk}
\begin{rmk}\label{rmk:reg-sol}
\begin{em}
It is possible to extend the result of Theorem~{\mathbb{R}}f{thm:main}
to larger spaces. A careful inspection of the proof reveals
that in the strictly hyperbolic case one can replace $\beta=2$
with any $\beta>1$, in the weakly hyperbolic case one can
replace $\beta=3$ with any $\beta>2$. It should also be
possible to enlarge these spaces in order to contain all
analytic functions, for which a global solution was already
known to exist.
Our choice ({\mathbb{R}}f{defn:m-space}) is optimized in order to
obtain both Theorem~{\mathbb{R}}f{thm:main} and
Proposition~{\mathbb{R}}f{prop:sum} under the more general assumptions
on $m$, and with a simple proof.
\end{em}
\end{rmk}
\setcounter{equation}{0}
\section{Proofs}\label{sec:proofs}
\subsection{Preliminaries}\label{sec:proof-prelim}
\paragraph{Estimates for a continuity modulus}
The following estimates are crucial in the proof of our main result
(see also Lemma~3.1 in \cite{gg:k-uniq}).
\begin{lemma}
Let $\omega:[0,+\infty)\to[0,+\infty)$ be a continuity modulus.
Then
\begin{eqnarray}
& \omega(\lambda x)\leq(1+\lambda)\omega(x)
{\mathbb{Q}}uad{\mathbb{Q}}uad\forall\lambda\geq 0,\ \forall x\geq 0; &
\label{th:omega-lambda} \\
{\mathbb{N}}oalign{
pace{1ex}}
& \displaystyle{\omega(x)\geq\omega(1)\frac{x}{x+1}}
{\mathbb{Q}}uad{\mathbb{Q}}uad \forall x\geq 0; &
\label{th:omega-est} \\
{\mathbb{N}}oalign{
pace{1ex}}
& \displaystyle{1+\frac{1}{\omega(x)}\leq\left(1+\frac{1}{\omega(1)}\right)
\left(1+\frac{1}{x}\right)}
{\mathbb{Q}}uad{\mathbb{Q}}uad\forall x>0.&
\label{th:omega-3}
\end{eqnarray}
\end{lemma}
{\sc Proof.}\
Inequality ({\mathbb{R}}f{th:omega-lambda}) can be easily proved by induction
on the integer part of $\lambda$ using the monotonicity and the
subadditivity of $\omega$. Inequality ({\mathbb{R}}f{th:omega-est}) follows
from ({\mathbb{R}}f{th:omega-lambda}) applied with $\lambda=1/x$. Inequality
({\mathbb{R}}f{th:omega-3}) follows from ({\mathbb{R}}f{th:omega-est}).
{\mathbb{Q}}ed
\paragraph{Energy conservation}
Let $u$ be any solution of ({\mathbb{R}}f{pbm:h-eq}) defined in an interval
$[0,T)$. Let us set
$$M(\sigma):=\int_{0}^{\sigma}m(s)\,ds
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall\sigma\geq 0,$$
and let us consider the usual Hamiltonian
$$\mathcal{H}(t):=|u'(t)|^{2}+M(|A^{1/2}u(t)|^{2}).$$
It is well known that $\mathcal{H}(t)$ is constant. In particular
\begin{equation}
|u'(t)|^{2}\leq\mathcal{H}(0)
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\in[0,T).
\label{est:u'}
\end{equation}
In the strictly hyperbolic case we have also that
$M(\sigma)\geq{\mathbb{N}}u\sigma$, hence
\begin{equation}
|A^{1/2}u(t)|^{2}\leq\frac{\mathcal{H}(0)}{{\mathbb{N}}u}
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\in[0,T).
\label{est:au}
\end{equation}
This provides an estimate of $|A^{1/2}u(t)|$ in terms of the initial
conditions. This type of estimate can be obtained also without the
strict hyperbolicity provided that the limit of $M(\sigma)$ as
$\sigma\to +\infty$ is $+\infty$ or at least larger than
$\mathcal{H}(0)$.
\paragraph{Convolutions}
In the next result we recall the properties of convolutions which are
needed in the sequel (we omit the standard proof).
\begin{lemma}\label{lemma:conv}
Let $\rho:{\mathbb{R}}\to [0,+\infty)$ be a function of class $C^{\infty}$,
with support contained in $[-1,1]$, and integral equal to 1.
Let $a>0$, and let $f:[0,a]\to{\mathbb{R}}$ be a continuous function. Let
us extend $f(x)$ to the whole real line by setting $f(x)=f(0)$ for
every $x\leq 0$, and $f(x)=f(a)$ for every $x\geq a$.
For every $\varepsilon>0$ let us set
$$f_{\varepsilon}(x):=\int_{{\mathbb{R}}}^{}f(x+\varepsilon
s)\rho(s)\,ds {\mathbb{Q}}uad{\mathbb{Q}}uad
\forall x\in{\mathbb{R}}.$$
Then $f_{\varepsilon}(x)$ has the following properties.
\begin{enumerate}
{\mathbb{R}}newcommand{(\arabic{enumi})}{(\arabic{enumi})}
\item $f_{\varepsilon}\in C^{\infty}({\mathbb{R}})$.
\item If $\mu_{1}\leq f(x)\leq\mu_{2}$ for every $x\in[0,a]$,
then $\mu_{1}\leq f_{\varepsilon}(x)\leq\mu_{2}$ for every
$x\in{\mathbb{R}}$ and every $\varepsilon>0$.
\item $|f_{\varepsilon}(0)|\leq\max\{|f(x)|:0\leq x\leq\varepsilon\}$ for
every $\varepsilon> 0$.
\item Let $\omega$ be a continuity modulus. Let us assume that
\begin{equation}
|f(x)-f(y)|\leq H\omega(|x-y|) {\mathbb{Q}}uad{\mathbb{Q}}uad \forall x\in[0,a],\
\forall y\in[0,a],
\label{hp:o-cont}
\end{equation}
for some $H\geq 0$. Then there exists a constant
$\gamma_{0}$ (independent on $\varepsilon$, $H$, and on the function
$f(t)$) such that
$$|f_{\varepsilon}(x)-f(x)|\leq\gamma_{0}H\omega(\varepsilon)
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall x\in{\mathbb{R}},\ \forall\varepsilon>0,$$
$$|f_{\varepsilon}'(x)|\leq\gamma_{0}H\,\displaystyle{\frac{\omega(\varepsilon)}{\varepsilon}}
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall x\in{\mathbb{R}},\ \forall\varepsilon>0.$$
\end{enumerate}
\end{lemma}
\paragraph{Maximal local solutions}
By ({\mathbb{R}}f{eq:inclusion}) assumptions ({\mathbb{R}}f{hp:data-ndg}) and
({\mathbb{R}}f{hp:data-dg}) imply that
$(u_{0},u_{1})\in\mathcal{G}_{\varphi,\infty,3/4}(A)\times
\mathcal{G}_{\varphi,\infty,1/4}(A)$. Therefore the existence of a local
solution to ({\mathbb{R}}f{pbm:h-eq}), ({\mathbb{R}}f{pbm:h-data}) follows from
Theorem~{\mathbb{R}}f{thm:hirosawa} both in the strictly hyperbolic and in the
weakly hyperbolic case. Since initial data satisfy
({\mathbb{R}}f{hp:hiro-data}) for every $r_{0}$, from the linear theory it
easily follows that the local solution satisfies ({\mathbb{R}}f{th:reg-sol})
for every $r(t)$.
By a standard argument any local solution can be continued to a
solution defined in a maximal interval $[0,T)$. If $T=+\infty$ there
is nothing to prove. In order to exclude that $T<+\infty$ we prove
that the time derivative of $|A^{1/2}u(t)|^{2}$ cannot blow-up in a
finite time. The proof of this a priori estimate, which is the basic
tool in all global existence results, is different in the strictly
hyperbolic and in the weakly hyperbolic case.
\subsection{The strictly hyperbolic case}
Let us introduce some constants. From the strict hyperbolicity
({\mathbb{R}}f{hp:s-h}) and estimate ({\mathbb{R}}f{est:au}) we have that
$${\mathbb{N}}u\leq\m{u(t)}\leq
\max\left\{m(\sigma):0\leq\sigma\leq
\frac{\mathcal{H}(0)}{{\mathbb{N}}u}\right\}=:\mu
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\geq 0.$$
Let $L$, $\Lambda$, $\gamma_{0}$ be the constants appearing in
({\mathbb{R}}f{hp:m-ocont}), ({\mathbb{R}}f{hp:phi-ndg}), and in Lemma~{\mathbb{R}}f{lemma:conv},
and let
$$\gamma_{1}:=\max\{1,\mu\}\cdot\max\left\{1,{\mathbb{N}}u^{-1}\right\},$$
$$H_{1}:=\max\left\{\left|\langle A^{3/4}u_{0},A^{1/4}u_{1}
\rangle\right|+1,\left(1+{\mathbb{N}}u^{-1}\right)
\mathcal{H}(0)+2\gamma_{1}+1\right\},$$
$$\gamma_{2}:=\gamma_{0}L\Lambda(2H_{1}+1)\left(
\frac{1}{{\mathbb{N}}u}+\frac{1}{\sqrt{{\mathbb{N}}u}}\right).$$
Since $\rho_{n}\to +\infty$ we can choose $n\in{\mathbb{N}}$ such that
\begin{equation}
\rho_{n}\geq\max\{\gamma_{2}T,1\}.
\label{defn:rhon}
\end{equation}
Let us set
$$S:=\sup\left\{\tau\leq T:\left|\langle A^{3/4}u(t),A^{1/4}u'(t)
\rangle\right|\leq H_{1}\rho_{n}\;\;\forall t\in[0,\tau]\right\}.$$
We remark that $S>0$ because
$\left|\langle A^{3/4}u_{0},A^{1/4}u_{1}
\rangle\right|<H_{1}\leq H_{1}\rho_{n}$.
Now we distinguish the case $S=T$ and $S<T$.
\subparagraph{\textmd{\emph{Case}} $S=T$}
The argument is quite standard. In the interval $[0,T)$ the function
$u(t)$ is the solution of the linear problem
\begin{equation}
v''(t)+c(t)Av(t)=0
\label{pbm:lin-eq}
\end{equation}
\begin{equation}
v(0)=u_{0},
{\mathbb{Q}}uad{\mathbb{Q}}uad
v'(0)=u_{1},
\label{pbm:lin-data}
\end{equation}
where
\begin{equation}
c(t):=\m{u(t)}.
\label{defn:c}
\end{equation}
Since $S=T$ in this case we have that
\begin{equation}
\left|\frac{\mathrm{d}}{\mathrm{d}t}|A^{1/2}u(t)|^{2}\right|=
2\left|\langle A^{3/4}u(t),A^{1/4}u'(t)
\rangle\right|\leq 2H_{1}\rho_{n}
\label{eq:c'}
\end{equation}
for every $t\in[0,T)$. It follows that $|A^{1/2}u(t)|^{2}$ is Lipschitz
continuous in $[0,T)$, hence $c(t)$ can be extended to an
$\omega$-continuous function defined in the closed interval $[0,T]$.
By the linear theory (see \cite{dgcs} and \cite{hirosawa-main})
problem ({\mathbb{R}}f{pbm:lin-eq}), ({\mathbb{R}}f{pbm:lin-data}) has a solution
$$v\in C^{0}\left([0,T];\mathcal{G}_{\varphi,r,3/4}(A)\right)\cap
C^{1}\left([0,T];\mathcal{G}_{\varphi,r,1/4}(A)\right)$$
for every $r>0$. Since the solution of the linear problem is unique,
this implies that there exist
$$\widehat{u}_{0}:=\lim_{t\to T^{-}}u(t)\in\mathcal{G}_{\varphi,\infty,3/4}(A),
{\mathbb{Q}}uad{\mathbb{Q}}uad
\widehat{u}_{1}:=\lim_{t\to T^{-}}u'(t)\in\mathcal{G}_{\varphi,\infty,1/4}(A).$$
Applying Theorem~{\mathbb{R}}f{thm:hirosawa} with initial data
$(\widehat{u}_{0},\widehat{u}_{1})$ one can therefore continue $u(t)$
on an interval $[0,T_{1})$ with $T_{1}>T$, which contradicts the
maximality of $T$.
\subparagraph{\textmd{\emph{Case}} $S<T$}
By the maximality of $S$ we have that necessarily
\begin{equation}
\left|\langle A^{3/4}u(S),A^{1/4}u'(S) \rangle\right|=
H_{1}\rho_{n}.
\label{eq:S-nec}
\end{equation}
Let us consider the function $c(t)$ defined according to
({\mathbb{R}}f{defn:c}). In this case ({\mathbb{R}}f{eq:c'}) holds true for every $t\in[0,S]$,
hence by ({\mathbb{R}}f{hp:m-ocont}) and ({\mathbb{R}}f{th:omega-lambda}) we have that
\begin{eqnarray*}
\left|c(t)-c(s)\right| & = & \left|\m{u(t)}-\m{u(s)}\right| \\
& \leq & L\,\omega\left(\left|
|A^{1/2}u(t)|^{2}-|A^{1/2}u(s)|^{2}\right|\right) \\
& \leq & L\,\omega(2H_{1}\rho_{n}|t-s|) \\
& \leq & L(2H_{1}\rho_{n}+1)\,\omega(|t-s|) \\
& \leq & L(2H_{1}+1)\rho_{n}\,\omega(|t-s|)
\end{eqnarray*}
for every $t$ and $s$ in $[0,S]$. Let us extend $c(t)$ outside the
interval $[0,S]$ as in Lemma~{\mathbb{R}}f{lemma:conv}, and let us set
\begin{equation}
c_{\ep}(t):=\int_{{\mathbb{R}}}^{}c(t+\varepsilon s)\rho(s)\,ds {\mathbb{Q}}uad{\mathbb{Q}}uad \forall
t\in{\mathbb{R}}.
\label{defn:cep}
\end{equation}
Since estimate ({\mathbb{R}}f{hp:o-cont}) holds true with
$H:=L(2H_{1}+1)\rho_{n}$, from statements (2) and (4) of
Lemma~{\mathbb{R}}f{lemma:conv} we deduce that
\begin{equation}
{\mathbb{N}}u\leqc_{\ep}(t)\leq\mu
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\in{\mathbb{R}},\ \forall\varepsilon>0,
\label{est:cep-nu-mu}
\end{equation}
\begin{equation}
|c_{\ep}(t)-c(t)|\leq\gamma_{0}L(2H_{1}+1)\rho_{n}\omega(\varepsilon)
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\in{\mathbb{R}},\ \forall\varepsilon>0,
\label{est:cep-c}
\end{equation}
\begin{equation}
|c_{\ep}'(t)|\leq\gamma_{0}L(2H_{1}+1)\rho_{n}\,
\displaystyle{\frac{\omega(\varepsilon)}{\varepsilon}}
{\mathbb{Q}}uad{\mathbb{Q}}uad
\forall t\in{\mathbb{R}},\ \forall\varepsilon>0.
\label{est:cep'}
\end{equation}
Let us consider the Fourier components $u_{k}(t)$ of $u(t)$, and let
us set
\begin{equation}
E_{k,\varepsilon}(t):=|u_{k}'(t)|^{2}+\lambda_{k}^{2}c_{\ep}(t)|u_{k}(t)|^{2}.
\label{defn:ekep}
\end{equation}
An easy computation shows that
\begin{eqnarray*}
E_{k,\varepsilon}'(t) & = & c_{\ep}'(t)\lambda_{k}^{2}|u_{k}(t)|^{2}+
2\lambda_{k}^{2}(c_{\ep}(t)-c(t))u_{k}(t)u_{k}'(t)\\
& \leq & \frac{|c_{\ep}'(t)|}{c_{\ep}(t)}c_{\ep}(t)\lambda_{k}^{2}|u_{k}(t)|^{2}+
\lambda_{k}\frac{|c_{\ep}(t)-c(t)|}{\sqrt{c_{\ep}(t)}}2|u_{k}'(t)|\cdot
\lambda_{k}\sqrt{c_{\ep}(t)}|u_{k}(t)|\\
& \leq & \frac{|c_{\ep}'(t)|}{c_{\ep}(t)}E_{k,\varepsilon}(t)+
\lambda_{k}\frac{|c_{\ep}(t)-c(t)|}{\sqrt{c_{\ep}(t)}}E_{k,\varepsilon}(t),
\end{eqnarray*}
hence by ({\mathbb{R}}f{est:cep-nu-mu}), ({\mathbb{R}}f{est:cep-c}), and
({\mathbb{R}}f{est:cep'}) we obtain that
\begin{equation}
E_{k,\varepsilon}'(t)\leq \gamma_{0}L(2H_{1}+1)\rho_{n}\left(
\frac{1}{{\mathbb{N}}u}\frac{\omega(\varepsilon)}{\varepsilon}+
\frac{1}{\sqrt{{\mathbb{N}}u}}\lambda_{k}\omega(\varepsilon)\right)E_{k,\varepsilon}(t)
{\mathbb{Q}}uad{\mathbb{Q}}uad\forall t\in[0,S].
\label{est:ekep}
\end{equation}
Let us consider now the eigenvalues $\lambda_{k}>\rho_{n}$, which are clearly
positive, and let us set $\varepsilon_{k}:=\lambda_{k}^{-1}$. By ({\mathbb{R}}f{hp:phi-ndg}) we
have that
$$\frac{\omega(\varepsilon_{k})}{\varepsilon_{k}}=\lambda_{k}\omega(\varepsilon_{k})=
\lambda_{k}\omega\left(\frac{1}{\lambda_{k}}\right)\leq\Lambda\varphi(\lambda_{k}).$$
Using these estimates in ({\mathbb{R}}f{est:ekep}) we obtain that
$$E_{k,\varepsilon_{k}}'(t)\leq \gamma_{0}L(2H_{1}+1)\rho_{n}\left(
\frac{1}{{\mathbb{N}}u}+\frac{1}{\sqrt{{\mathbb{N}}u}}\right)
\Lambda\varphi(\lambda_{k}) E_{k,\varepsilon_{k}}(t)=
\gamma_{2}\rho_{n}\varphi(\lambda_{k})E_{k,\varepsilon_{k}}(t).$$
Integrating this differential inequality and using ({\mathbb{R}}f{defn:rhon})
we find that
$$E_{k,\varepsilon_{k}}(t)\leq E_{k,\varepsilon_{k}}(0)
\exp\left(\gamma_{2}\rho_{n}\varphi(\lambda_{k})T\right)\leq E_{k,\varepsilon_{k}}(0)
\exp\left(\rho_{n}^{2}\varphi(\lambda_{k})\right)$$
for every $t\in[0,S]$. Thanks to ({\mathbb{R}}f{est:cep-nu-mu}) we obtain that
\begin{eqnarray*}
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2} & \leq &
\max\left\{1,{\mathbb{N}}u^{-1}\right\}E_{k,\varepsilon_{k}}(t)\\
& \leq & \max\left\{1,{\mathbb{N}}u^{-1}\right\}
\left(|u_{1k}|^{2}+\lambda_{k}^{2} c_{\varepsilon_{k}}(0)|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{2}\varphi(\lambda_{k})\right)\\
& \leq & \max\left\{1,{\mathbb{N}}u^{-1}\right\}\cdot\max\{1,\mu\}
\left(|u_{1k}|^{2}+\lambda_{k}^{2} |u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{2}\varphi(\lambda_{k})\right)\\
& = & \gamma_{1}\left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{2}\varphi(\lambda_{k})\right),
\end{eqnarray*}
where $u_{0k}$ and $u_{1k}$ denote the Fourier components of $u_{0}$
and $u_{1}$, respectively.
By assumption ({\mathbb{R}}f{hp:data-ndg}) we have therefore that
$$\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}\left(
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2}\right)\leq \gamma_{1}
\sum_{\lambda_{k}>\rho_{n}}\lambda_{k} \left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{2}\varphi(\lambda_{k})\right)\leq 2\gamma_{1}\rho_{n}$$
for every $t\in[0,S]$. On the other hand, by ({\mathbb{R}}f{est:u'}) and
({\mathbb{R}}f{est:au}) we have that
\begin{eqnarray*}
\sum_{\lambda_{k}\leq\rho_{n}}\lambda_{k}\left(
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2}\right) & \leq &
\rho_{n}\sum_{\lambda_{k}\leq\rho_{n}}\left(
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2}\right)\\
& \leq & \rho_{n}\left(|u'(t)|^{2}+|A^{1/2}u(t)|^{2}\right) \\
& \leq & \rho_{n}\left(\mathcal{H}(0)+
\frac{\mathcal{H}(0)}{{\mathbb{N}}u}\right)
\end{eqnarray*}
for every $t\in[0,S]$. In particular for $t=S$ we have that
\begin{eqnarray*}
\lefteqn{\hspace{-2em}\left|\langle A^{3/4}u(S),A^{1/4}u'(S) \rangle\right| \leq
|A^{3/4}u(S)|^{2}+|A^{1/4}u'(S)|^{2}} \\
{\mathbb{N}}oalign{
pace{1ex}}
\hspace{2em} & = & \sum_{\lambda_{k}\leq\rho_{n}}\lambda_{k}\left(
|u_{k}'(S)|^{2}+\lambda_{k}^{2}|u_{k}(S)|^{2}\right)+
\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}\left(
|u_{k}'(S)|^{2}+\lambda_{k}^{2}|u_{k}(S)|^{2}\right) \\
& \leq & \rho_{n}\left(\mathcal{H}(0)+
\frac{\mathcal{H}(0)}{{\mathbb{N}}u}+2\gamma_{1}\right) \\
& < & H_{1}\rho_{n}.
\end{eqnarray*}
This contradicts ({\mathbb{R}}f{eq:S-nec}).
\subsection{The weakly hyperbolic case}
Let us introduce some constants. Let $L$, $\Lambda$, $\gamma_{0}$ be
the constants appearing in ({\mathbb{R}}f{hp:m-ocont}), ({\mathbb{R}}f{hp:phi-dg}),
and in Lemma~{\mathbb{R}}f{lemma:conv}, and let
$$\gamma_{3}:=1+\frac{1}{\omega(1)},$$
$$\gamma_{4}:=\max\left\{\m{u(t)}:t\in[0,T/2]\right\}+
\max\left\{\omega(\sigma):0\leq \sigma\sqrt{\omega(\sigma)}\leq
1\right\},$$
$$\gamma_{5}:=\gamma_{3}(1+\gamma_{4})(\Lambda+1)$$
$$H_{2}:=\max\left\{\left|\langle A^{3/4}u_{0},A^{1/4}u_{1}
\rangle\right|+1,(|u_{0}|+1)\sqrt{\mathcal{H}(0)}+
\gamma_{5}+1)\right\},$$
$$\gamma_{6}:=1+\gamma_{0}L(2H_{2}+1).$$
Since $\rho_{n}\to +\infty$ we can choose $n\in{\mathbb{N}}$ such that
$\rho_{n}\geq 1$, and
\begin{equation}
\rho_{n}^{1/2}\geq T\sqrt{\mathcal{H}(0)},
\hspace{3em}
\rho_{n}^{1/2}\geq 4\gamma_{6}\Lambda T,
\hspace{3em}
\rho_{n}\geq\frac{2}{T\sqrt{\omega(T/2)}}.
\label{defn:rhon-w}
\end{equation}
Let us set
$$S:=\sup\left\{\tau\leq T:\left|\langle A^{3/4}u(t),A^{1/4}u'(t)
\rangle\right|\leq H_{2}\rho_{n}^{5/2}\ \ \forall
t\in[0,\tau]\right\}.$$
We remark that $S>0$ because $\left|\langle
A^{3/4}u_{0},A^{1/4}u_{1} \rangle\right|<H_{2}\leq
H_{2}\rho_{n}^{5/2}$.
If $S=T$ we can conclude as in the strictly hyperbolic case (using the
linear theory for the weakly hyperbolic case, for which we refer to
\cite{cjs}). So let us assume that $S<T$. By the maximality of $S$
we have that necessarily
\begin{equation}
\left|\langle A^{3/4}u(S),A^{1/4}u'(S) \rangle\right|=
H_{2}\rho_{n}^{5/2}.
\label{eq:S-nec-w}
\end{equation}
Let us consider the function $c(t)$ defined according to
({\mathbb{R}}f{defn:c}), let us extend it outside the interval $[0,S]$ as in
Lemma~{\mathbb{R}}f{lemma:conv}, and let us set
$$c_{\ep}(t):=\omega(\varepsilon)+\int_{{\mathbb{R}}}^{}c(t+\varepsilon s)\rho(s)\,ds
{\mathbb{Q}}uad{\mathbb{Q}}uad\forall t\in{\mathbb{R}}.$$
Arguing as in the strictly hyperbolic case we find that
$$\left|c(t)-c(s)\right| \leq L(2H_{2}+1)\rho_{n}^{5/2}\omega(|t-s|)$$
for every $t$ and $s$ in $[0,S]$. Therefore from statement (4) of
Lemma~{\mathbb{R}}f{lemma:conv} we deduce that
\begin{equation}
|c_{\ep}(t)-c(t)|\leq\left(1+\gamma_{0}L(2H_{2}+1)
\rho_{n}^{5/2}\right)\omega(\varepsilon)=
\gamma_{6}\rho_{n}^{5/2}\,\omega(\varepsilon),
\label{est:cep-c-w}
\end{equation}
\begin{equation}
|c_{\ep}'(t)|\leq\gamma_{0}L(2H_{2}+1)\rho_{n}^{5/2}
\displaystyle{\frac{\omega(\varepsilon)}{\varepsilon}}\leq
\gamma_{6}\rho_{n}^{5/2}\,
\displaystyle{\frac{\omega(\varepsilon)}{\varepsilon}}.
\label{est:cep'-w}
\end{equation}
Let us consider the Fourier components $u_{k}(t)$ of $u(t)$, and let
us define $E_{k,\varepsilon}(t)$ as in ({\mathbb{R}}f{defn:ekep}). Computing the time
derivative as in the strictly hyperbolic case, and using
({\mathbb{R}}f{est:cep-c-w}), ({\mathbb{R}}f{est:cep'-w}), and the fact that
$c_{\ep}(t)\geq\omega(\varepsilon)$ we find that
$$E_{k,\varepsilon}'(t)\leq\gamma_{6}\rho_{n}^{5/2}\left(
\frac{1}{\varepsilon}+\lambda_{k}\sqrt{\omega(\varepsilon)}\right)E_{k,\varepsilon}(t)
{\mathbb{Q}}uad{\mathbb{Q}}uad\forall t\in[0,S].$$
Now we choose $\varepsilon$ as a function of $k$. The function
$h(\sigma)=\sigma\sqrt{\omega(\sigma)}$ is invertible. Let us
consider the eigenvalues $\lambda_{k}>\rho_{n}$, which are clearly positive,
and let us set $\varepsilon_{k}:=h^{-1}(1/\lambda_{k})$. By ({\mathbb{R}}f{hp:phi-dg}) we have
that
\begin{equation}
\lambda_{k}\sqrt{\omega(\varepsilon_{k})}=\frac{1}{\varepsilon_{k}}\leq\Lambda
\varphi\left(\frac{1}{h(\varepsilon_{k})}\right)=
\Lambda\varphi(\lambda_{k}),
\label{est:Lambda}
\end{equation}
hence
$$E_{k,\varepsilon_{k}}'(t)\leq 2\gamma_{6}\rho_{n}^{5/2}\Lambda
\varphi(\lambda_{k})E_{k,\varepsilon_{k}}(t).$$
Integrating this differential inequality, and exploiting the second
condition in ({\mathbb{R}}f{defn:rhon-w}) we thus obtain that
$$E_{k,\varepsilon_{k}}(t)\leq E_{k,\varepsilon_{k}}(0)
\exp\left(2\rho_{n}^{5/2}\gamma_{6}\Lambda \varphi(\lambda_{k})T\right)\leq
E_{k,\varepsilon_{k}}(0)
\exp\left(\frac{1}{2}\rho_{n}^{3}\varphi(\lambda_{k})\right)$$
for every $t\in[0,S]$. In order to estimate $E_{k,\varepsilon_{k}}(0)$ we
need an estimate on $c_{\varepsilon_{k}}(0)$. To this end we first observe
that $h(\varepsilon_{k})=1/\lambda_{k}<1$, hence
\begin{equation}
\omega(\varepsilon_{k})\leq\max\{\omega(\sigma):0\leq h(\sigma)\leq 1\}.
\label{est:cep0-1}
\end{equation}
Moreover the last condition in ({\mathbb{R}}f{defn:rhon-w}) is equivalent to
$1/\rho_{n}\leq h(T/2)$. Therefore from the monotonicity of $h$ it
follows that
$$\varepsilon_{k}=h^{-1}\left(\frac{1}{\lambda_{k}}\right)\leq
h^{-1}\left(\frac{1}{\rho_{n}}\right)\leq
h^{-1}\left(h
\left(\frac{T}{2}\right)\right)=\frac{T}{2},$$
hence from statement (3) of Lemma~{\mathbb{R}}f{lemma:conv} we deduce that
\begin{equation}
\int_{{\mathbb{R}}}c(\varepsilon_{k}s)\rho(s)\,ds\leq
\max\{c(t):0\leq t\leq\varepsilon_{k}\}\leq
\max\{c(t):0\leq t\leq T/2\}.
\label{est:cep0-2}
\end{equation}
From ({\mathbb{R}}f{est:cep0-1}) and ({\mathbb{R}}f{est:cep0-2}) it follows that
$c_{\varepsilon_{k}}(0)\leq\gamma_{4}$, hence
$$E_{k,\varepsilon_{k}}(0)\leq\max\left\{1,c_{\ep}(0)\right\}
\left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right)\leq
(1+\gamma_{4})\left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right).$$
Moreover from ({\mathbb{R}}f{th:omega-3}) and ({\mathbb{R}}f{est:Lambda}) it follows
that
$$\max\left\{1,\frac{1}{\omega(\varepsilon_{k})}\right\}\leq
1+\frac{1}{\omega(\varepsilon_{k})}\leq
\gamma_{3}\left(1+\frac{1}{\varepsilon_{k}}\right)\leq
\gamma_{3}(1+\Lambda\varphi(\lambda_{k})).$$
Since $(1+\Lambda x)\leq(\Lambda +1)e^{x/2}$ for every $\Lambda\geq 0$
and every $x\geq 0$, we have in particular that
$$\max\left\{1,\frac{1}{\omega(\varepsilon_{k})}\right\}\leq
\gamma_{3}(1+\Lambda\varphi(\lambda_{k}))\leq
\gamma_{3}(1+\Lambda)\exp\left(\frac{1}{2}\varphi(\lambda_{k})\right)\leq$$
$$\leq\gamma_{3}(1+\Lambda)\exp\left(\frac{1}{2}\rho_{n}^{3}
\varphi(\lambda_{k})\right).$$
From all these estimates it follows that
\begin{eqnarray*}
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2} & \leq &
\max\left\{1,\frac{1}{\omega(\varepsilon_{k})}\right\}E_{k,\varepsilon_{k}}(t)\\
& \leq & \gamma_{3}(1+\Lambda)E_{k,\varepsilon_{k}}(0)
\exp\left(\rho_{n}^{3}\varphi(\lambda_{k})\right)\\
& \leq & \gamma_{3}(1+\Lambda)(1+\gamma_{4})
\left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{3}\varphi(\lambda_{k})\right) \\
& = & \gamma_{5} \left(|u_{1k}|^{2}+\lambda_{k}^{2}|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{3}\varphi(\lambda_{k})\right).
\end{eqnarray*}
By assumption ({\mathbb{R}}f{hp:data-dg}) we have therefore that
$$\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}\left(
|u_{k}'(t)|^{2}+\lambda_{k}^{2}|u_{k}(t)|^{2}\right)\leq \gamma_{5}
\sum_{\lambda_{k}>\rho_{n}}\lambda_{k} \left(|u_{1k}|^{2}+
\lambda_{k}^{2}|u_{0k}|^{2}\right)
\exp\left(\rho_{n}^{3}\varphi(\lambda_{k})\right)\leq 2\gamma_{5}\rho_{n}$$
for every $t\in[0,S]$, and in particular
\begin{eqnarray*}
\left|\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}^{2}u_{k}'(S)\cdot u_{k}(S)\right|
& \leq & \sum_{\lambda_{k}>\rho_{n}}\lambda_{k}^{2}|u_{k}'(S)|\cdot|u_{k}(S)|
\\
& \leq & \frac{1}{2} \sum_{\lambda_{k}>\rho_{n}}\left(
\lambda_{k}|u_{k}'(S)|^{2}+\lambda_{k}^{3}|u_{k}(S)|^{2}\right) \\
& \leq & \gamma_{5}\rho_{n}.
\end{eqnarray*}
On the other hand, by ({\mathbb{R}}f{est:u'}) and the
first condition in ({\mathbb{R}}f{defn:rhon-w}) we have that
$$|u(t)|\leq|u_{0}|+S\cdot\max\{|u'(t)|:t\in[0,S]\}
\leq|u_{0}|+T\cdot\sqrt{\mathcal{H}(0)}
\leq \left(|u_{0}|+1
\right)\rho_{n}^{1/2}$$
for every $t\in[0,S]$, hence
$$\left|\sum_{\lambda_{k}\leq\rho_{n}}\lambda_{k}^{2}u_{k}'(t) u_{k}(t)\right|
\leq\rho_{n}^{2}\left|\langle u(t),u'(t)\rangle\right|\leq
\rho_{n}^{2}|u(t)|\cdot|u'(t)| \leq \rho_{n}^{5/2}\left(|u_{0}|+1
\right)\sqrt{\mathcal{H}(0)}$$
for every $t\in[0,S]$. In particular for $t=S$ we have that
\begin{eqnarray*}
\left|\langle A^{3/4}u(S),A^{1/4}u'(S) \rangle\right| & \leq &
\left|\sum_{\lambda_{k}\leq\rho_{n}}\lambda_{k}^{2}u_{k}'(S)\cdot u_{k}(S)\right|+
\left|\sum_{\lambda_{k}>\rho_{n}}\lambda_{k}^{2}u_{k}'(S)\cdot u_{k}(S)\right| \\
{\mathbb{N}}oalign{
pace{1ex}}
& \leq & \rho_{n}^{5/2}\left(|u_{0}|+1
\right)\sqrt{\mathcal{H}(0)}+\gamma_{5}\rho_{n} \\
{\mathbb{N}}oalign{
pace{1ex}}
& < & H_{2}\rho_{n}^{5/2}.
\end{eqnarray*}
This contradicts ({\mathbb{R}}f{eq:S-nec-w}).
\subsection{Proof of Proposition~{\mathbb{R}}f{prop:sum}}
Let us recursively define a sequence $\rho_{n}$ as follows. First of
all we set $\rho_{0}=0$. Let us assume that a term $\rho_{n}$ has been
defined. Assumption ({\mathbb{R}}f{hp:prop}) implies in particular that
$$ (u_{0},u_{1})\in
\mathcal{G}_{\varphi,r,3/4}(A)\times\mathcal{G}_{\varphi,r,1/4}(A)$$
with $r=\rho_{n}^{\beta}$, hence
$$\sum_{k=1}^{\infty}u_{0k}^{2}\lambda_{k}^{3}\exp\left(
\rho_{n}^{\beta}\varphi(\lambda_{k})\right)<+\infty,
\hspace{3em}
\sum_{k=1}^{\infty}u_{1k}^{2}\lambda_{k}\exp\left(
\rho_{n}^{\beta}\varphi(\lambda_{k})\right)<+\infty.$$
We can therefore choose $\rho_{n+1}$ big enough in such a way that
$\rho_{n+1}\geq\rho_{n}+1$, and
$$\sum_{\lambda_{k}\geq\rho_{n+1}}u_{0k}^{2}\lambda_{k}^{3}\exp\left(
\rho_{n}^{\beta}\varphi(\lambda_{k})\right)\leq\rho_{n},
\hspace{3em}
\sum_{\lambda_{k}\geq\rho_{n+1}}^{\infty}u_{1k}^{2}\lambda_{k}\exp\left(
\rho_{n}^{\beta}\varphi(\lambda_{k})\right)\leq\rho_{n}.$$
Let $\overline{u}_{0}$ and $\overline{u}_{1}$ be the elements of $H$
whose Fourier components are given by
$$\overline{u}_{0k}:=\left\{
\begin{array}{ll}
0 & \mbox{if }\rho_{2k}\leq\lambda_{k}<\rho_{2k+1}, \\
u_{0k} & \mbox{if }\rho_{2k+1}\leq\lambda_{k}<\rho_{2k+2},
\end{array}
\right.
\hspace{1em}
\overline{u}_{1k}:=\left\{\begin{array}{ll}
0 & \mbox{if }\rho_{2k}\leq\lambda_{k}<\rho_{2k+1}, \\
u_{1k} & \mbox{if }\rho_{2k+1}\leq\lambda_{k}<\rho_{2k+2},
\end{array}
\right.$$
and let $\overline{\rho}_{n}:=\rho_{2n}$. We claim that
({\mathbb{R}}f{th:prop-1}) holds true. Indeed for every $n\in{\mathbb{N}}$ we have that
\begin{eqnarray*}
\sum_{\lambda_{k}>\overline{\rho}_{n}}^{\infty}
\overline{u}_{0k}^{2}\lambda_{k}^{3}\exp\left(
\overline{\rho}_{n}^{\beta}\varphi(\lambda_{k})\right) & = &
\sum_{\lambda_{k}>\rho_{2n}}^{\infty}\overline{u}_{0k}^{2}\lambda_{k}^{3}
\exp\left( \rho_{2n}^{\beta}\varphi(\lambda_{k})\right)\\
& = & \sum_{\lambda_{k}\geq\rho_{2n+1}}^{\infty}\overline{u}_{0k}^{2}\lambda_{k}^{3}
\exp\left( \rho_{2n}^{\beta}\varphi(\lambda_{k})\right) \\
& \leq & \sum_{\lambda_{k}\geq\rho_{2n+1}}^{\infty}u_{0k}^{2}\lambda_{k}^{3}
\exp\left( \rho_{2n}^{\beta}\varphi(\lambda_{k})\right) \\
{\mathbb{N}}oalign{
pace{1ex}}
& \leq & \rho_{2n}=\overline{\rho}_{n},
\end{eqnarray*}
and similarly for $\overline{u}_{1}$. Note that in the second equality
we exploited the spectral gap of $\overline{u}_{0}$, whose
components are equal to zero in the range $(\rho_{2n},\rho_{2n+1})$.
In the same way we can show that
$\widehat{u}_{0}:=u_{0}-\overline{u}_{0}$ and
$\widehat{u}_{1}:=u_{1}-\overline{u}_{1}$ satisfy ({\mathbb{R}}f{th:prop-2})
with $\widehat{\rho}_{n}:=\rho_{2n+1}$.
{\mathbb{Q}}ed
\label{NumeroPagine}
\end{document} |
\begin{document}
\thispagestyle{empty}
\title[A finiteness property for Chebyshev polynomials]{A finiteness
property for preperiodic points of Chebyshev polynomials}
\author{Su-Ion Ih}
\address{Su-Ion Ih\\
Department of Mathematics \\
University of Colorado at Boulder \\
Campus Box 395 \\
Boulder, CO 80309-0395 \\
USA}
\email{[email protected]}
\author{Thomas Tucker}
\address{Thomas Tucker\\
Department of Mathematics\\
University of Rochester\\
Rochester, NY 14627}
\email{[email protected]}
\subjclass[2000]{Primary 11G05, 11G35, 14G05, 37F10, Secondary
11J86, 11J71, 11G50}
\keywords{Chebyshev polynomials, equidistribution, integral points, preperiodic points}
\thanks{The second author was partially supported by NSA
Grant 06G-067.}
\begin{abstract}
Let $K$ be a number field with algebraic closure $\overline K$, let
$S$ be a finite set of places of $K$ containing the archimedean
places, and let $\varphi$ be a Chebyshev polynomial. We prove that
if $\alpha \in \overline K$ is not preperiodic, then there
are only finitely many preperiodic points $\beta \in \overline K$
which are $S$-integral with respect to $\alpha$.
\end{abstract}
\maketitle
\section{Introduction}
Let $K$ be a number field with algebraic closure $\overline K$, let
$S$ be a finite set of places of $K$ containing the archimedean
places, and let $\alpha, \beta \in {\overline K}$. We say that $\beta$ is
$S$-integral relative to $\alpha$ if no conjugate of $\beta$ meets any
conjugate of $\alpha$ at primes lying outside of $S$. More precisely,
this means that for any prime $v \notin S$ and any $K$-embeddings
$\sigma:K(\alpha) \longrightarrow {\overline K_v}$ and $\tau: K(\alpha) \longrightarrow
{\overline K_v}$, we have
\begin{equation*}
\left\{ \begin{array}{ll}
|\sigma(\beta)-\tau(\alpha)|_v \ge 1 & \text{if $|\tau(\alpha)|_v \le 1$\ ; \text{and} } \\
|\sigma(\beta)|_v \le 1 & \text{if $|\tau(\alpha)|_v > 1$\ .}
\end{array} \right.
\end{equation*}
Note that this definition extends naturally to the case where $\alpha$
is the point at infinity. We say that $\beta$ is $S$-integral
relative to the point at infinity if $|\sigma(\beta)|_v \leq 1$ for
all $v \notin S$ and all $K$-embeddings $\sigma:K(\beta) \longrightarrow {\overline
K}_v$. Thus, our $S$-integral points coincide with the usual
$S$-integers when $\alpha$ is the point at infinity. \vskip .05 in
In \cite{BIR}, the following conjecture is made.
\begin{conj}[Ih]\label{Ih conj}
Let $K$ be a number field, and
let $S$ be a finite set of places of $K$ that contains all the
archimedean places. If $\varphi: \mathbb{P}^1_K \longrightarrow \mathbb{P}^1_K$ is a
nonconstant rational function of degree $d > 1$ and $\alpha \in
\mathbb{P}^1(K)$ is non-preperiodic for $\varphi$, then there are at most
finitely many preperiodic points $\beta \in \mathbb{P}^1({\overline K}ar)$ that are
$S$-integral with respect to $\alpha$.
\end{conj}
In \cite{BIR}, it is proved that this conjecture holds when $\varphi$
is a multiplication-by-$n$ (for $n \geq 2$) map on an $\mathbb G_m$ or
on an elliptic curve. Recently, Petsche \cite{clay} has proved the
conjecture in the case where the point $\alpha$ is in the $v$-adic
Fatou set at every place of $K$. A similar problem, dealing with
points in inverse images of a single point rather than with
preperiodic points, has been treated by Sookdeo \cite{vijay}.
In this paper, we show that this conjecture is true for Chebyshev
polynomials. That is, we prove the following, where we note that
$\alpha$ may lie on the Julia set.
\begin{thm}\label{main} Let $\varphi$ be a Chebyshev polynomial.
Let $K$ be a number field, and let $S$ be a finite set of places of
$K$, containing all the archimedean places. Suppose that $\alpha \in K$
is not of type $\zeta + \zeta^{-1}$ for any root of unity $\zeta$.
Then the following set
$$ {\mathbb A^1}_{\varphi, \alpha, S} :=
\{ x \in {\overline {\mathbb Q}} : x \;
\textup{{\em{is $S$-integral with respect to}}}
\; \alpha \; \textup{{\em{and is}}} \;
\textup{${\varphi}$-{\em{preperiodic}}} \}
$$
is finite.
\end{thm}
This will follow easily from the following theorem.
\begin{thm}\label{comp}
Let $( x_n )_{n=1}^\infty$ be a nonrepeating sequence of preperiodic
points for a Chebyshev polynomial $\varphi$. Then for any non-preperiodic $\alpha$
in a number field $K$ and any place $v$ of $K$, we have
\begin{equation}\label{v}
\hat{h}_v(\alpha) = \lim_{n \to \infty} \frac{1}{ [ K(x_n) : K ] } \sum_{\sigma:
K(x_n)/K \hookrightarrow \overline{K}_v } \log | \sigma (x_n) - \alpha
|_v,
\end{equation}
where $\sigma: K(x_n)/K \hookrightarrow \overline K_v$ means
that $\sigma$ is an embedding of $K(x_n)$ into $\overline K_v$,
fixing $K$, here and in what follows.
\end{thm}
Indeed, we will prove Theorem~\ref{comp} slightly more generally
for any $\alpha \in K$ if $v \not | \infty$, while for any $\alpha \neq$
$-2$, 0, or 2 if $v | \infty$. (Note that the proof of Proposition~\ref{most}
actually works for any $\alpha \in [-2, 2] - \{-2, 0, 2 \}$.)
The proof of Theorem~\ref{main} is then similar to the proof for
$\mathbb G_m$ given in \cite{BIR}.
Specifically, the proof of Theorem~\ref{comp} breaks down into various
cases, depending on whether or not the place $v$ is finite or infinite
and whether or not the point $\alpha$ is in the Julia set at $v$. The
fact that the invariant measure for Chebyshev polynomials is not
uniform on $[-2,2]$ provides a slight twist.
The proof of Theorem~\ref{comp} is fairly simple when $v$ is
nonarchimedean. Likewise, when $v$ is archimedean but $\alpha$ is not
in the Julia set at $v$, the proof follows almost immediately from
an equidistribution result for continuous functions (see
\cite{bilu}). When $v$ is archimedean and $\alpha$ is in the Julia
set at $v$, however, the proof becomes quite a bit more difficult. In
particular, it is necessary to use A.~Baker's theorem on linear forms
in logarithms (see \cite{Baker}). We note that in all cases, our
techniques are similar to those of \cite{BIR}.
The derivation of Theorem~\ref{main} from Theorem~\ref{comp} goes as
follows: suppose, for contrary, that Theorem~\ref{main} were to be
false. Then we may further assume that the sequence
$(x_n)_{n=1}^\infty$ in Theorem\ref{comp} is a sequence of
$\alpha$-integral points. Then we have
\begin{eqnarray}
0
\ & < & \
\widehat h(\alpha)
\nonumber\\
\ & = & \
\sum_{\text{places} \ v \ \text{of} \ K }
\widehat h_v (\alpha)
\nonumber\\
\ & = & \
\sum_{\text{places} \ v \ \text{of} \ K }
\lim_{n \to \infty} \frac{1}{ [ K(x_n) : K ] }
\sum_{\sigma: K(x_n)/K \hookrightarrow \overline{K}_v } \log | \sigma (x_n) - \alpha |_v
\nonumber\\
\ & = &
\lim_{n \to \infty} \frac{1}{ [ K(x_n) : K ] }
\sum_{\text{places} \ v \ \text{of} \ K }
\sum_{\sigma:
K(x_n)/K \hookrightarrow \overline{K}_v } \log | \sigma (x_n) - \alpha
|_v
\nonumber\\
\ & = & \
0,
\nonumber
\end{eqnarray}
where the equality on the third line comes from Theorem~\ref{comp},
the integrality hypothesis on the $x_n$ enables us to switch
$\sum_{\text{places} \ v \ \text{of} \ K }$ and $\lim_{n \to \infty}$
to get the equality on the fourth line, and the last equality is
immediate from the product formula. This is a contradiction.
\section{Preliminaries}
\subsection{The Chebyshev polynomials}
\begin{defn}
$$
P_1 (z) := z, \;\;\; P_2 (z) := z^2 - 2; \;\; \textup{and}
$$
$$
P_{m+1} (z) + P_{m-1} (z) = z P_{m} (z) \;\; \textup{for all} \;
m \geq 2.
$$
Then a \emph{Chebyshev polynomial} is defined to be any of the $P_{m}$
($m \geq 2$).
\end{defn}
These polynomials satisfy the following properties (see \cite[Section
7]{Milnor}).
\begin{enumerate}
\item For any $m \geq 1$,
$P_m (\omega +\omega^{-1}) = \omega^m +\omega^{-m}$, equivalently
$P_m (2 \cos \theta) = 2 \cos (m \theta)$,
where $\omega \in \mathbb C^{\times}$ and $\theta \in \mathbb R$.
\item For any $\ell, m \geq 1$, $P_\ell \circ P_m =
P_{\ell m}$.
\item For any $m \geq 3$, $P_m$ has $m-1$ distinct critical
points in the finite plane, but only two critical values, i.e., $\pm 2$.
\end{enumerate}
\subsection{The dynamical systems of Chebyshev polynomials}
\begin{defn}
Let $\varphi$ be a Chebyshev polynomial.
The dynamical system induced by
$\varphi$ on ${\mathbb P}^1$ (or ${\mathbb A}^1$) is
called the \emph{(Chebyshev) dynamical system} with respect to $\varphi$
or the \emph{$\varphi$-dynamical system}. If $\varphi$ is clearly
understood from the context, we simply call it a
\emph{Chebyshev dynamical system} without reference to $\varphi$.
\end{defn}
\begin{prop}
For any Chebyshev polynomial $\varphi$,
the Julia set of the dynamical system induced by
$\varphi$ (resp.~$- \varphi$) is \textup{[$-2$, 2]}, which is
naturally identified as a subset of the real line on
the complex plane.
\end{prop}
\noindent {\bf {Proof.}} See \cite[Section 7]{Milnor}.
\begin{prop}~\label{prop;preper}
Let $\varphi$ be a Chebyshev polynomial. Then the finite
preperiodic points of the $\varphi$-dynamical system are
the elements of ${\overline K}$ of the form $\zeta + \zeta^{-1}$,
where $\zeta$ is a root of
unity.
\end{prop}
\begin{proof}
Take an element $z \in {\overline K}$. Then there is some $a \in {\overline K}$ such
that $z = a + \frac{1}{a}$, as can be seen by finding $a$ such that
$a^2 - az + 1 = 0$. Note that $a$ cannot be zero. Now if $a$ is not
a root of unity, then there is some place $w$ of $K(a)$ such that
$|a|_w > 1$. Thus, letting $m = \deg \varphi$ $(\geq 2)$, we have
$$ |\varphi^k(z)|_w =
\left| a^{m^k} + \frac{1}{a^{m^k}} \right|_w > |a|_w^{m^k} - 1,$$
so $|\varphi^k(z)|_w$ goes to infinity as $k \to \infty$. Hence $z$
cannot be preperiodic.
Conversely, if $z = \zeta + \zeta^{-1}$, where $\zeta$ is a root of
unity then there are some positive integers $j \not= k$ such that
$\zeta^{m^k} = \zeta^{m^j}$, which gives
$$ \varphi^k(z) = \zeta^{m^k} + \frac{1}{\zeta^{m^k}} = \varphi^j(z),$$
so $z$ is preperiodic for $\varphi$.
\end{proof}
\subsection{The canonical height attached to a dynamical system}
Let $\varphi$ be a Chebyshev polynomial of degree $m$ and let $v$ be a
place of a number field $K$. We define the local canonical height
$\hat{h}_v(\alpha)$ of a point $\alpha \in {\overline K}v$ associated to
$\varphi$ at any place $v$ of $K$ as
\begin{equation}\label{local}
\hat{h}_v(\alpha) = \lim_{k \to \infty} \frac{\log \max
(|\varphi^k(\alpha)|_v, 1)}{m^k}.
\end{equation}
\noindent This local canonical height has the property that
$$
\hat{h}_v(\varphi(\alpha)) = m \hat{h}_v(\alpha)$$
for any $\alpha \in
{\overline K}v$ (see \cite{CG} for details). Note that if $v$ is a
nonarchimedean place, then the Chebyshev dynamical system has good
reduction at $v$ and we have $\hat{h}_v(\alpha) = \log \max ( |\alpha|_v, 1)$.
When $\alpha \in {\overline K}$, we have
\begin{equation}\label{global}
\hat{h}(\alpha) = \sum_{\text{places $v$ of $K$}} \hat{h}_v(\alpha),
\end{equation}
where the left-hand side is the (global) canonical height of $\alpha$
associated to $\varphi$.
In the case of the places $v \mid \infty$, we will use the
$\varphi$-invariant measure $\mu_v := \mu_{v, \varphi}$ (see \cite{L}) for
$\varphi$ to calculate these local heights. It is worth noticing this
is not a uniform measure on $[-2, 2]$, unlike in the case of the
dynamical system on ${\mathbb P}^1$ with respect to the map $z \mapsto
z^2$, in which case the measure at archimedean places is the uniform
probability Haar measure on the unit circle centered at the origin
(see \cite{bilu}). The measure has more mass toward the end/boundary
points $\pm 2$ of the Julia set $[-2, 2]$. Further, the kernel ${
\frac{1}{\pi} } { \frac{1}{ \sqrt {4-x^2} } }$ has singularities at
the extreme points $\pm 2$.
When $v | \infty$, we have the following formula
for the local height
at $v$ (see \cite[Appendix B]{PST} or \cite{FR2}) for any $\alpha \in \mathbb{C}$:
\begin{equation}\label{gen}
\hat{h}_{v} (\alpha)= \int_{\mathbb{C}} \log | z
- \alpha |_v \; d \mu (z),
\end{equation}
where $\mu := \mu_{v, \varphi}$ is the unique $\varphi$-invariant measure
with support on the Julia
set of $\varphi$ at $v$.
Since any root of unity $\xi_k$, say $e^{2 \pi i/k}$, is preperiodic
for the the map sending $z$ to $z^m$, we see that $\xi_k + \xi_k^{-1} = 2
\cos(2\pi/k)$ is preperiodic for $\varphi$. Now, the preperiodic points
of $\varphi$ are equidistributed with respect to $\mu$ (see \cite{L, BH}),
so for any continuous function $f$ on $[-2,2]$ we have
$$
\lim_{k \to \infty} \frac{1}{k} \sum_{j=1}^k f(2 \cos(j \pi /k)) = \int_\mathbb{C} f
\, d \mu.$$
Thus $d \mu$ is the push-forward of the the uniform distribution on
$[0, \pi]$ under the map $\theta \mapsto 2 \cos \theta$, thus
$$ d \mu(x) = \frac{1}{\pi} \frac{d}{dx} \cos^{-1} (x/2) \, dx =
\frac{1}{\pi} \frac{1}{\sqrt{4 - x^2}} \, dx.$$
Thus, \eqref{gen} becomes
\begin{equation}\label{from-PST}
\hat{h}_{v} (\alpha) = {
\frac{1}{\pi} } \int_{-2}^{2} { \frac{1}{ \sqrt {4-x^2} } } \log | x
- \alpha |_v \; dx
\end{equation}
for any $\alpha \in \mathbb{C}$.
\section{Archimedean places}
\subsection{A counting lemma}
Let $K$ be a number field, and
let $I \subset [-2, 2]$ be an interval. For any root of unity
$\zeta \in \overline K$, write $x_{\zeta} := \zeta + \zeta^{-1}$. Let
$$
\mathcal N ( x_{\zeta}, I ) \; := \;
\# \{ \sigma (x_{\zeta}) \in I : \sigma \in
\mathbb{G}_{\mathrm{a}}l \big ( K (x_{\zeta})/K \big ) \}.
$$
\begin{lem}~\label{lem;counting}
Keep notation just above. Let $-2 \leq c < d \leq 2$, and let
$I := (c, d]$ be an interval.
Then for any real $0 < \gamma < 1$ and
any root of unity $\zeta \in \overline K$,
\begin{equation}\label{first}
\mathcal N ( x_{\zeta}, I )
\; = \;
{ \frac{[ K(x_{\zeta}) : K ]}{\pi} }
\Big ( \cos^{-1} { \frac{c}{2} } - \cos^{-1} { \frac{d}{2} } \Big )
\; + \; O_{\gamma} \big ([ K(x_{\zeta}) : K ]^{\gamma} \big )
\end{equation}
where $\cos^{-1}: [-1, 1] \rightarrow [0, \pi]$ is the $\arccos$
function. In particular, when $-2 < c < d < 2$, we may write
\begin{equation}\label{M}
\mathcal N ( x_{\zeta}, I ) \leq M [ K(x_{\zeta}) : K ](d-c)
+ O_{\gamma} \big ([ K(x_{\zeta}) : K ]^{\gamma} \big )
\end{equation}
where $M := M_{c,d}$
is the supremum of $\frac{1}{\sqrt{4 - x^2}}$ on $(c, d]$.
\end{lem}
\begin{proof}
Write $\zeta = e^{2\pi i { \frac{a}{N} } }$, where $N$ is a positive
integer and $1 \leq a \leq N$. Then note
\begin{eqnarray}
x_{\zeta} \in I
\; &\Longleftrightarrow& \;
e^{2\pi i { \frac{a}{N} } } + e^{-2\pi i { \frac{a}{N} } } \in I
\nonumber\\
\; &\Longleftrightarrow& \;
\cos \Big ( 2 \pi { \frac{a}{N} } \Big ) \in
\Big ( { \frac{c}{2} }, { \frac{d}{2} } \Big ] \nonumber\\
\; &\Longleftrightarrow& \;
a \in { \frac{N}{2 \pi} }
\Big [ \cos^{-1} { \frac{d}{2} }, \cos^{-1} { \frac{c}{2} } \Big ).
\nonumber
\end{eqnarray}
Then \eqref{first} follows immediately from
\cite[Prop. 1.3]{BIR}. To see that \eqref{M} holds, note that the
derivative of the function $\cos^{-1} (x/2)$ is $\frac{1}{\sqrt{4 -
x^2}}$. Thus, \eqref{M} is a consequence of \eqref{first} and
along with the Mean Value Theorem from calculus.
\end{proof}
Remark. In the above, more precisely, we may define $M$ to be
the supremum of $\frac{1}{\pi} \frac{1}{\sqrt{4 - x^2}}$ on $(c, d]$.
However, this difference will not matter for our later purpose. So
we will keep the above choice for $M$.
\subsection{Baker's lower bounds for linear forms in logarithms}
Here we state the theorem on
Baker's lower bounds for linear forms in logarithms,
(see \cite{Baker}[A.~Baker, Thm. 3.1, p. 22]).
\begin{thm}[Baker]
\label{thm;baker}
Suppose that
$e^{2 \pi i \theta_0} \in \overline {\mathbb Q}$.
Then there exists a constant $C := C(\theta_0) > 0$ such that
for any coprime $a, N \in \mathbb Z$ ($N \neq 0$ or $\pm 1$)
with ${ \frac{a}{N} } \neq \theta_0$,
$$
\Big | { \frac{a}{N} } - \theta_0 \Big | \; \geq \;
M^{-C}
$$
where $M := \max ( |a|, |N| )$.
\end{thm}
\begin{proof}
\begin{eqnarray}
\Big | { \frac{a}{N} } - \theta_0 \Big |
\; & = & \;
{ \frac{1}{2 \pi} }
\Big | { \frac{a}{N} } \cdot 2 \pi i - 2 \pi i \theta_0 \Big |. \nonumber
\end{eqnarray}
Then apply Baker's theorem to the absolute value of the right hand side
and adjust the resulting constant for ${ \frac{1} {2 \pi} }$. (Also
recall that $N \neq 0$ or $\pm 1$.)
\end{proof}
\section{The main theorem and its variant}
\subsection{The main theorem and its proof}
We will prove Theorem~\ref{comp} by breaking it into several cases. We
begin with the case where the place $v$ is finite. For the sake of
precision, we will state when we need $\alpha$ to be in $K$ and when it
suffices that it be in ${\overline K}v$.
\begin{prop}\label{prev}
Let $(\zeta_n)_{n=1}^{\infty}$ be a sequence of distinct roots of
unity, and write $x_n := \zeta_n + {\zeta_n}^{-1}$ for any $n \geq
1$. If $v$ is finite, then for any $\alpha \in {\overline K}v$, we have
\begin{equation}\label{finite}
\hat{h}_v(\alpha) = \lim_{n \rightarrow \infty}
{ \frac{1} { [ K(x_n) : K ] } } \sum_{\sigma:
K(x_n)/K \hookrightarrow \overline{K}_v } \log | \sigma (x_n) - \alpha
|_v.
\end{equation}
\end{prop}
\begin{proof}
If $| \alpha |_v > 1$, then $| \sigma (x_n) - \alpha |_v = |\alpha
|_v$. Thus, \eqref{finite} is immediate. Now, suppose that $| \alpha |_v \leq 1$.
Let $r < 1$ be a real
number. Let $x_m$ and $x_n$ satisfy that $| x_m - \alpha |_v \leq r$
and $| x_n - \alpha |_v \leq r$. Then observe
\begin{eqnarray}
r
& \; \geq \; & | (x_m - \alpha) - (x_n - \alpha) |_v \nonumber\\
& \; = \; & | x_m - x_n |_v \nonumber\\
& \; = \; &
\Big | (\zeta_m - \zeta_n) - { \frac{\zeta_m - \zeta_n}{\zeta_m \zeta_n}}
\Big |_v
\nonumber\\
& \; = \; & | \zeta_m - \zeta_n |_v \; | 1 - (\zeta_m \zeta_n)^{-1} |_v
\nonumber\\
& \; = \; & | 1 - \zeta_m^{-1} \zeta_n |_v \;
| 1 - (\zeta_m \zeta_n)^{-1} |_v. \nonumber
\end{eqnarray}
Hence either $| 1 - \zeta_m^{-1} \zeta_n |_v \leq \sqrt r$ or
$| 1 - (\zeta_m \zeta_n)^{-1} |_v \leq \sqrt r$.
In the first (resp.~second) case it follows that $\zeta_m^{-1} \zeta_n$
(resp.~$(\zeta_m \zeta_n)^{-1}$)
must have order equal to a power of the prime number
$\in {\mathbb Z}$ lying below $v$, and
that there are only finitely many choices for
$\zeta_m^{-1} \zeta_n$ (resp. $(\zeta_m \zeta_n)^{-1}$) in the first
(resp.~second) case. Thus, for any real $r < 1$,
there are only finitely many indices $n \geq 1$
such that $| x_n - \alpha |_v \leq r$, which immediately implies the
desired convergence in this case.
\end{proof}
We now treat the archimedean $v$ for which $\alpha$ is outside the Julia
set at $v$.
\begin{prop}
Let $x_n$ be as in Proposition~\ref{prev}. If $v$ is archimedean, then
for any $\alpha \in \mathbb{C} - [-2,2]$, we have
$$\hat{h}_v(\alpha) = \lim_{n \rightarrow \infty}
{ \frac{1} { [ K(x_n) : K ] } } \sum_{\sigma:
K(x_n)/K \hookrightarrow \overline{K}_v } \log | \sigma (x_n) - \alpha
|_v. $$
\end{prop}
\begin{proof}
From \eqref{gen}, we have
$$
\int_{\mathbb{C}} \log |z - \alpha|_v \, d \mu_v (z)= \hat{h}_v(\alpha),$$
where $\mu_v := \mu_{v, \varphi}$ is the invariant measure for
$\varphi$ at $v$. This
measure is supported on $[-2,2]$, so if $g$ is a function on $\mathbb{C}$
that agrees with $\log |z - \alpha|$ on $[-2,2]$ we have
\begin{equation}\label{agree}
\int_{\mathbb{C}} g(z) d \mu_v(z)= \hat{h}_v(\alpha).
\end{equation}
Let $\epsilon = \min_{w \in [-2,2]} |w - \alpha|$ (note that $\epsilon
\not= 0$ since $\alpha \notin [-2,2]$) and define $g(z)$ as
$$ g(z) = \min \Big(\log \max ( | z - \alpha |, \epsilon ), \log
(|\alpha| + 2) \Big).$$
Then $g$ is continuous and bounded on all of $\mathbb{C}$ and agrees with
$\log |z - \alpha|$ on $[-2,2]$. By \cite[Theorem 1.1]{bilu}, we have
$$ \int_{\mathbb{C}} g(z) \, d \mu_v(z) = \lim_{n \rightarrow \infty}
{ \frac{1} { [ K(x_n) : K ] } } \sum_{\sigma:
K(x_n)/K \hookrightarrow \overline{K}_v } g(\sigma(x_n)).$$
Since all $x_n \in [-2,2]$, this finishes the proof, using \eqref{agree}.
\end{proof}
Now, we come to the most difficult case.
\begin{prop}~\label{most}
Let $x_n$ be as in
Proposition~\ref{prev}.
If $v | \infty$ and $\alpha \in [-2,2]$ is not preperiodic, then we have
$$\hat{h}_v(\alpha) = \lim_{n \rightarrow \infty} \frac{1}{ [ K(x_n) : K
] } \sum_{\sigma: K(x_n)/K \hookrightarrow \overline{K}_v } \log |
\sigma (x_n) - \alpha |_v. $$
\end{prop}
\begin{proof}
We may assume that $\alpha \in K$.
If $v$ is archimedean and $\alpha \in K$ is
in $[-2,2]$, then we have $\hat{h}_v(\alpha) = 0$. This follows from
the fact that $\varphi$ maps $[-2,2]$ to itself, so if $\alpha \in
[-2,2]$, then $|\varphi^n(\alpha)|_v$ is bounded for all $n$, so
$\hat{h}_v(\alpha) = 0$ by \eqref{local}.
Note $|x|_v = |\tau (x)|$ for all $x \in \mathbb{C}$, where $\tau: K(x)/K
\hookrightarrow \mathbb{C}$ is associated to $v$ and $| \cdot |$ is the
usual absolute value on $\mathbb{C}$. To simplify our notation, we will
fix one $v | \infty$, suppress $v$ in the notation of the absolute
value, and use $| \cdot |$ according to this
observation, i.e., without loss of generality we will prove this
theorem for the place $v$ equal to the usual absolute value ($|z|
= \sqrt{z \overline z}$, $z \in \mathbb{C}$). However, we will keep $v$ in
the notation of the local height $\hat{h}_v$ to avoid any confusion with
the global height $\hat{h}$.
We may write
$
\alpha = e^{2 \pi i \theta_0} + e^{-2 \pi i \theta_0}
= 2 \cos (2 \pi \theta_0),
$
where $\theta_0 \in (- { \frac{1} {2} }, { \frac{1} {2} }]$. Note
$\alpha$ cannot be equal to $-2$, $2$, or 0 since we assume that $\alpha$
is not preperiodic.
Note that
$\int_{0}^{\epsilon} \log \big ( { \frac{t} {\epsilon} } \big ) dt =
-\epsilon$ for any $\epsilon > 0$.
Write
\begin{eqnarray}
x & \; = \; &
e^{2\pi i {\theta }} + e^{-2\pi i {\theta }} \;\; \;\; = \;
2 \cos (2\pi {\theta}); \;\;\;\;\;\;
\textup{and} \nonumber\\
x_n & \; = \; &
e^{2\pi i { \frac{a}{N} } } + e^{-2\pi i { \frac{a}{N} } } \; = \;
2 \cos \Big (2\pi { \frac{a}{N} } \Big ) \nonumber
\end{eqnarray}
where $a$ and $N (\neq 0)$ are integers (depending on $n \geq 1$), and
$\big | { \frac{a}{N} } \big | \leq 1$.
We recall that $\hat{h}_v(\alpha) = 0$ since for any $\alpha$ in $[-2,2]$,
the quantity $|P_m^k(\alpha)|$ is bounded for all $k \geq 1$. Thus, we have
\begin{equation}\label{delta}
\frac{1}{\pi} \int_{-2}^{2} { \frac{1}{ \sqrt
{4-x^2} } } \log | x - \alpha | \ dx = 0.
\end{equation}
Hence it will suffice to show, for all $n \gg 1$, that the quantity
\begin{equation} \label{quant}
\frac{1}{ [ K(x_n) : K ] } \Bigg |
\sum_{\sigma: K(x_n)/K \hookrightarrow \mathbb{C}} \log
|\sigma(x_n) - \alpha| \Bigg |
\end{equation}
can be made sufficiently small.
Fix $\epsilon > 0$.
By \eqref{delta}, we have
\begin{equation}\label{epsi}
\Bigg | \int_{\alpha - \delta}^{\alpha + \delta} { \frac{1}{ \sqrt
{4-x^2} } } \log | x - \alpha | \ dx \Bigg | < \epsilon,
\end{equation}
i.e., sufficiently small for all sufficiently small $\delta > 0$.
Let $g_\delta (z) = \log \max (|z - \alpha|, \delta)$. By
\eqref{epsi} and the fact that $0 > g_\delta(x) > \log |x - \alpha|$
for $x \in [\alpha - \delta, \alpha + \delta]$, we see that
\begin{equation}\label{epsi2}
\Bigg | \int_{-2}^{2} { \frac{1}{ \sqrt
{4-x^2} } } g_\delta(x) \ dx \Bigg | < \epsilon.
\end{equation}
By the equidistribution theorem of Baker/Rumely
(\cite{BREQUI}), Chambert-Loir (\cite{CL}) and Favre/
Rivera-Letelier
(\cite{FR2}), we see that for all sufficiently large $n$, the quantity
\begin{equation}\label{ep3}
\left| \Big( \frac{1}{\pi} \int_{-2}^{2} \frac{1}{ \sqrt {4-x^2}}
g_\delta(x) \; dx \Big) - \Big( \frac{1}{ [ K(x_n) : K ] }
\sum_{\sigma: K(x_n)/K \hookrightarrow \mathbb{C} }
g_\delta(\sigma(x_n)) \Big) \right|
\end{equation}
is sufficiently small. Thus, by \eqref{epsi2} it suffices to show
that
\begin{equation*}
\frac{1}{ [ K(x_n) : K ] }
\Bigg |
\sum_{\sigma: K(x_n)/K
\hookrightarrow \mathbb{C} } \big ( \log | \sigma(x_n) - \alpha | -
g_\delta(\sigma(x_n)) \big ) \Bigg |
\end{equation*}
is sufficiently small for all $n \gg 1$ and all sufficiently small
$\delta > 0$. Since $\log | \sigma(x_n) - \alpha | =
g_\delta(\sigma(x_n))$ outside of $[\alpha-\delta, \alpha+\delta]$, it
in turn it suffices to show that
\begin{equation}\label{a}
\frac{1}{ [ K(x_n) : K ] }
\Bigg |
\sum_{\substack{\sigma: K(x_n)/K
\hookrightarrow \mathbb{C}\\ \sigma(x_n) \in [\alpha - \delta, \alpha +
\delta] }}
\big ( \log | \sigma (x_n) - \alpha | - g_\delta(\sigma(x_n)) \big )
\Bigg |
\end{equation}
is sufficiently small for all $n \gg 1$ and all sufficiently
small $\delta > 0$.
Now, when $\delta > 0$ is small and $x$ is in $[\alpha - \delta,
\alpha + \delta]$, we have $0 > g_\delta(x) \geq \log |x - \alpha|$
and the quantity \eqref{a} is bounded above by
\begin{equation}\label{b}
\frac{1}{ [ K(x_n) : K ] }
\Bigg |
\sum_{\substack{\sigma: K(x_n)/K
\hookrightarrow \mathbb{C}\\ \sigma(x_n) \in [\alpha - \delta, \alpha +
\delta] }}
\log | \sigma (x_n) - \alpha |
\Bigg |.
\end{equation}
Hence, finally it suffices to show that \eqref{b} is sufficiently small
whenever $n$ is sufficiently large and $\delta > 0$ is
sufficiently small.
If we choose $\delta >0$ sufficiently small, we may assume that
\begin{equation}\label{M2}
\min_{x \in [\alpha - \delta, \alpha + \delta]} \left( \frac{1}{\sqrt{4 -
x^2}} \right) \geq \frac{1}{2} \max_{x \in [\alpha - \delta, \alpha
+ \delta]} \left( \frac{1}{\sqrt{4 - x^2}} \right).
\end{equation}
We define $M$ as
\begin{equation}\label{M3}
M := \max_{x \in [\alpha - \delta, \alpha
+ \delta]} \left( \frac{1}{\sqrt{4 - x^2}} \right)
\end{equation}
Choose a large positive integer $D$. For any $1 \leq i \leq D$
denote by $S_i$ the interval
$$[\alpha -
\delta + (i-1)(\delta/D), \alpha - \delta + i (\delta/D)].$$
Given any $n \gg 1$,
let $N_i := N_i (n)$ denote the number of $\sigma(x_n)$'s belonging to
$S_i$.
Note that $\log | \sigma (x_n) - \alpha | \leq 0$,
whenever $\sigma (x_n)$ belongs to any of the
$S_i$ $(1 \leq i \leq D)$.
For any $1 \leq i \leq D -1$, on $S_i$ we have
\begin{equation*}
\begin{split}
& \frac{1}{[K(x_n):K]} \biggl | \sum_{\substack{\sigma: K(x_n)/K
\hookrightarrow \mathbb{C} \\ \sigma(x_n)
\in S_i}} \log | \sigma(x_n) - \alpha| \biggl | \\
& \leq M
(\delta/D) \Big | \log |(D-i)
(\delta/D)| \Big | + O\left(\frac{1}{\sqrt{[K(x_n):K]}} \right) \Big | \log
\big ( (D-i)\delta/D \big ) \Big | \\
& \quad \quad \text{(by Lemma~\ref{lem;counting} with $\gamma = 1/2$)}\\
& \leq 2 \Bigg | \int_{S_{i+1}} (M/2) \log |x - \alpha| \, dx \Bigg |
+
O\left(\frac{1}{\sqrt{[K(x_n):K]}}
\right) \Big | \log \big ((D-i) \delta/D \big ) \Big |\\
& \leq 2 \Bigg |
\int_{S_{i+1}} \frac{1}{\sqrt{4 - x^2}} \log |x - \alpha| \, dx \Bigg |
+ O\left(\frac{1}{\sqrt{[K(x_n):K]}}\right) \Big |\log \big ( (D-i) \delta/D
\big ) \Big | \\
& \ \
\quad \text{ (by \eqref{M2} and \eqref{M3}) }.
\end{split}
\end{equation*}
Summing up over all $1 \leq i \leq D-1$ and applying \eqref{epsi} we
obtain
\begin{equation}\label{smaller}
\begin{split}
\frac{1}{[K(x_n):K]} \Bigg | \sum_{\substack{\sigma: K(x_n)/K
\hookrightarrow \mathbb{C} \\ \sigma(x_n)
\in [\alpha - \delta, \alpha - (\delta/D)]}} & \log |
\sigma(x_n) - \alpha| \Bigg |\\
& \leq 2 \epsilon + \frac{1}{\sqrt{[K(x_n):K]}}
C_2 D \big ( \big | \log (\delta/D) \bigl |
+ \log D \big ),
\end{split}
\end{equation}
for some constant $C_2 > 0$ independent of $n$ and $D$.
Similarly, we see that
\begin{equation}\label{bigger}
\begin{split}
\frac{1}{[K(x_n):K]} \Bigg |
\sum_{\substack{\sigma: K(x_n)/K \hookrightarrow \mathbb{C} \\ \sigma(x_n)
\in [\alpha + (\delta/D), \alpha + \delta]}} & \log |
\sigma(x_n) - \alpha| \Bigg | \\
& \leq 2 \epsilon + \frac{1}{\sqrt{[K(x_n):K]}}
C_3 D \big ( \big| \log (\delta/D) \big| + \log D \big ),
\end{split}
\end{equation}
for some constant $C_3 > 0$ independent of $n$ and $D$. Since
$|\log(1/D)|$ and $\log D$ grow more slowly than any power of $D$,
we see that
quantities \eqref{smaller} and \eqref{bigger} can be made sufficiently
small when $D$ is large and $[K(x_n):K] \geq D^4$.
Now, for all sufficiently small $\delta > 0$, we have
$$
0 \geq \log | x_n - \alpha | = \log
\big | 2 \cos \Big (2\pi { \frac{a}{N} } \Big ) -
2 \cos (2\pi {\theta_0}) \big | \geq \log
\Big | { \frac{a}{N} } -
\theta_0 \Big | + O(1)
$$
for all $x_n \in [\alpha - \delta, \alpha + \delta]$. When
$N$ is sufficiently large, Theorem~\ref{thm;baker} thus yields
$$
0 \geq \log | x_n - \alpha | \geq -C_4 {\log N} + O(1)
$$
where $C_4 > 0$ is a constant independent of $n$. This inequality
is true not only for $x_n$ itself, but also for all its $K$-Galois
conjugates that belong to $[\alpha - \delta, \alpha + \delta]$, i.e.,
after readjusting $C_4$ if necessary, we have
\begin{equation}\label{baker-log}
0 \geq \log | \sigma (x_n) - \alpha | \geq
- C_4 \log N
\end{equation}
for all $\sigma (x_n) \in [\alpha - \delta, \alpha +
\delta]$, where $C_4 > 0$ is a constant independent of
(all) $n \gg 1$. Thus, it follows from \eqref{M}
(again with $\gamma
= 1/4$) that we have
\begin{equation}\label{close}
\begin{split}
\frac{1}{[K(x_n):K]}
\Bigg |
\sum_{\substack{\sigma: K(x_n)/K \hookrightarrow \mathbb{C} \\
\sigma(x_n) \in [\alpha - (\delta/D), \alpha + (\delta/D)]}} \log
|\sigma(x_n) - \alpha |
\Bigg | \\
\leq C_4 M (\delta/D) \log N +
\frac{C_5
\log N}{[K(x_n):K]^{1/2} }
\end{split}
\end{equation}
where $C_5 > 0$ is a constant.
Write $\phi$ for the Euler function, and suppose that $N
\gg 1$. Note that
$$
[K(x_n) : K] \; \geq \;
\frac{[\mathbb Q (x_n) : {\mathbb Q}] }
{[K : \mathbb Q]} \; = \; { \frac{\phi (N)} {[K : \mathbb Q]} }
$$
and $\phi (N) \geq \sqrt N$ (see \cite[page 267, Thm 327]{HW}), and hence that
$[K(x_n):K]^{ \frac{1}{2} } \gg \sqrt[4]{N}$.
Now, let $D =
\lfloor \sqrt[4]{N} \rfloor $.
(Note this choice of $D$ is compatible with that of $D$ in
\eqref{smaller} and \eqref{bigger}.)
Then, when $N$ is sufficiently large, the right-hand
sides of \eqref{smaller} and \eqref{bigger} are both sufficiently small
and the right-hand side of \eqref{close} is also sufficiently
small. Combining equations \eqref{smaller}, \eqref{bigger}, and
\eqref{close} we then obtain that
$$
\frac{1}{[K(x_n):K]}
\Bigg |
\sum_{\sigma: K(x_n)/K \hookrightarrow
\mathbb{C} } \log |\sigma(x_n) - \alpha |
\Bigg | \;\;
\textup{is sufficiently small}.$$
Thus, we must have
$ \lim_{n \to \infty} \sum_{\sigma: K(x_n)/K \hookrightarrow
\mathbb{C} } \log |\sigma(x_n) - \alpha | = 0,$
as desired.
\end{proof}
The proof of Theorem~\ref{comp} is now immediate since the
Propositions above cover all $v$ and all non-preperiodic
$\alpha \in K$. Now, we are
ready to prove our main theorem, Theorem~\ref{main}.
\begin{proof}[Proof of Theorem~\ref{main}]
Let $S$ be a finite set of places of $K$ that includes all the archimedean
places. After extending $S$ to a larger finite set if
necessary, which only makes the set ${\mathbb
A^1}_{\varphi, \alpha, S}$ larger, we may assume that $S$ also
contains all the places $v$ for which $|\alpha|_v > 1$. Then for any $v
\notin S$ and any preperiodic point $x_n$ we have
\begin{equation}\label{zero}
\log | \sigma (x_n) - \alpha |_v = 0 \; \; \text{for any embedding
$\sigma: K(x_n)/K \longrightarrow {\overline K}v$.}
\end{equation}
Assume that $(x_n)_{n=1}^\infty$ is an infinite nonrepeating
sequence in ${\mathbb A^1}_{\varphi, \alpha, S}$. Since we can
interchange a limit with a finite sum, we have
\begin{equation}
\begin{split}
& \frac{1}{[ K : {\mathbb Q}]} \hat{h}(\alpha)
= \sum_{v \in S} \lim_{n \rightarrow \infty}
\frac{1}{[ K(x_n) : {\mathbb Q}]} \sum_{\sigma: K(x_n)/K
\hookrightarrow {\overline K}v } \log | \sigma (x_n) - \alpha |_v\\
& \text {(by \eqref{global}, \eqref{zero}, and Thm.~\ref{comp})
}\\
& = \lim_{n \rightarrow \infty} \sum_{v \in S} \frac{1}{[ K(x_n) :
{\mathbb Q}]} \sum_{\sigma: K(x_n)/K
\hookrightarrow {\overline K}v } \log | \sigma (x_n) - \alpha |_v
\; \; \text{(switching sum and limit)} \\
&= \lim_{n \rightarrow \infty} \sum_{\text{places $v$ of $K$}}
\frac{1}{[ K(x_n) : {\mathbb Q}]} \sum_{\sigma: K(x_n)/K
\hookrightarrow {\overline K}v } \log | \sigma (x_n) - \alpha |_v
\quad \text{(by \eqref{zero})}\\
& = 0 \quad \quad \quad \text{(by the product formula)}.
\end{split}
\end{equation}
Since $\alpha$ is not preperiodic, however, we have $\hat{h}(\alpha)
> 0$. Thus, we have a contradiction, so ${\mathbb A^1}_{\varphi,
\alpha, S}$ must be finite.
\end{proof}
\subsection{A variant of the Chebyshev dynamical systems}
We look at different Chebyshev polynomials defined by the
following recursion formula:
$$
Q_1 (z) := z, \;\;\; Q_2 (z) := z^2 + 2; \;\; \textup{and}
$$
$$
Q_{m+1} (z) - Q_{m-1} (z) = z Q_{m} (z) \;\; \textup{for all} \;
m \geq 2.
$$
The dynamical system induced by any of the $Q_{m}$
($m \geq 2$) on ${\mathbb A^{1}}$ (or
${\mathbb P^1}$) has properties similar to those
for the Chebyshev dynamical systems, for instance:
\begin{enumerate}
\item[{\rm (i)}] The Julia set is equal to the interval
$[-2, 2]$ on the $y$-axis;
\item[{\rm (ii)}] The preperiodic points are (either $\infty$ or)
the points of type $\zeta - \zeta^{-1}$, where $\zeta$ is a root of
unity.
\item[{\rm (iii)}] The corresponding measures $\mu_v$ satisfy
\begin{eqnarray}
\int_{ {\mathbb P^1} (\mathbb C_v)} \log |z-\alpha|_v \; d\mu_v =
\left\{
\begin{array}{ll} \log \max \{ |\alpha|_v, \; 1 \}, &
\text{if $v \not | \infty$}; \\
{ \frac{1}{\pi} } \int_{-2}^{2}
{ \frac{1} { \sqrt {4-y^2} } }
\log | yi - \alpha |_v \; dy, & \text{otherwise}
\end{array} \right. \nonumber
\end{eqnarray}
where $\alpha \in K$ ($K$ a number field), $v$ is a place of $K$, and
$dy$ is the usual Lebesgue measure on $[-2, 2]$. Note that the measure
$\mu_v$ ($v | \infty$) is supported on the interval $[-2, 2]$ on the
$y$-axis.
\end{enumerate}
\noindent It is then easy to see that arguments similar to the above
prove the following:
\begin{thm} Let $\psi$ be any of the $Q_m$ ($m \geq 2$).
Let $K$ be a number field, and let $S$ be a finite set of
places of $K$, containing all the infinite ones. Suppose that
$\alpha \in K$ is not of type $\zeta - \zeta^{-1}$ for any root of
unity $\zeta$. Then the following set
$$ {\mathbb A^1}_{\psi, \alpha, S} :=
\{ z \in {\overline {\mathbb Q}} : z \;
\textup{{\em{is $S$-integral with respect to}}}
\; \alpha \; \textup{{\em{and is}}} \;
\textup{${\psi}$-{\em{preperiodic}}} \}
$$
is finite.
\end{thm}
\def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$} \def$'$} \def\cprime{$'$} \def\cprime{$'${$'$}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
newtheorem{assumption}{Assumption}
title{\LARGE \bf Securing Infrastructure Facilities: When does proactive defense help?
}
\author{Manxi Wu, and Saurabh Amin
thanks{M. Wu is with the Institute for Data, Systems, and Society, and S. Amin is with the Department of Civil and Environmental Engineering, Massachusetts Institute of Technology (MIT), Cambridge, MA, USA
{ttsmall \{manxiwu,amins\}@mit.edu}}
}
\date{}
\title{\LARGE \bf Securing Infrastructure Facilities: When does proactive defense help?
}
\begin{abstract}
Infrastructure systems are increasingly facing new security threats due to the vulnerabilities of cyber-physical components that support their operation. In this article, we investigate how the infrastructure operator (defender) should prioritize the investment in securing a set of facilities in order to reduce the impact of a strategic adversary (attacker) who can target a facility to increase the overall usage cost of the system. We adopt a game-theoretic approach to model the defender-attacker interaction and study two models: normal-form game -- where both players move simultaneously; and sequential game -- where attacker moves after observing the defender's strategy. For each model, we provide a complete characterization of how the set of facilities that are secured by the defender in equilibrium vary with the costs of attack and defense. Importantly, our analysis provides a sharp condition relating the cost parameters for which the defender has the first mover advantage. Specifically, we show that to fully deter the attacker from targeting any facility, the defender needs to proactively secure all ``vulnerable facilities'' at an appropriate level of effort. We illustrate the outcome of the attacker-defender interaction on a simple transportation network. We also suggest a dynamic learning setup to understand how this outcome can affect the ability of imperfectly informed users to make their decisions about using the system in the post-attack stage.
textbf{Index terms}:
Infrastructure security, Normal form game, Sequential game.
end{abstract}
section{Introduction}
In this article, we consider the problem of strategic allocation of defense effort to secure one or more facilities of an infrastructure system that is prone to a targeted attack by a malicious adversary. The setup is motivated by the recent incidents and projected threats to critical infrastructures such as transportation, electricity, and urban water networks (\cite{moteff2004critical}, \cite{rinaldi2001identifying}, \cite{sandberg2015cyberphysical}, and \cite{leetaru_2015}). Two of the well-recognized security concerns faced by infrastructure operators are: (i) How to prioritize investments among facilities that are heterogeneous in terms of the impact that their compromise can have on the overall efficiency (or usage cost) of the system; and (ii) Whether or not an attacker can be fully deterred from launching an attack by proactively securing some of the facilities. Our work addresses these questions by focusing on the most basic form of strategic interaction between the system operator (defender) and an attacker, modeled as a normal form (simultaneous) or a sequential (Stackelberg) game. The normal form game is relevant to situations in which the attacker cannot directly observe the chosen security plan, whereas the sequential game applies to situations where the defender proactively secures some facilities, and the attacker can observe the defense strategy.
In recent years, many game-theoretical models have been proposed to study problems in cyber-physical security of critical infrastructure systems; see \cite{alpcan2010network}, and \cite{manshaei2013game} for a survey of these models. These models are motivated by the questions of strategic network design (\cite{dziubinski2013network}, \cite{laporte2010game} and \cite{schwartz2011network}), intrusion detection (\cite{chen2009game}, \cite{alpcan2003game}, \cite{dritsoula2012game}, and \cite{sethi2017value}), interdependent security (\cite{nguyen2009stochastic}, and \cite{amin2013security}), network interdiction (\cite{washburn1995two}, and \cite{dahan2015network}), and attack-resilient estimation and control (\cite{cardenas2011attacks}, and \cite{sridhar2012cyber}).
Our model is relevant for assessing strategic defense decisions for an infrastructure system viewed as a collection of facilities. In our model, each facility is considered as a distinct entity for the purpose of investment in defense, and multiple facilities can be covered by a single investment strategy. The attacker can target a single facility and compromise its operation, thereby affecting the overall operating efficiency of the system. Both players choose randomized strategies. The performance of the system is evaluated by a usage cost, whose value depends on the actions of both players. In particular, if an undefended facility is targeted by the attacker, it is assumed to be compromised, and this outcome is reflected as a change in the usage cost. Naturally, the defender aims to maintain a low usage cost, while the attacker wishes to increase the usage cost. The attacker (resp. defender) incurs a positive cost in targeting (resp. securing) a unit facility. Thus, both players face a trade-off between the usage cost and the attack/defense costs, which results in qualitatively different equilibrium regimes.
We analyze both normal form and sequential games in the above-mentioned setting. First, we provide a complete
characterization of the equilibrium structure in terms of the relative vulnerability of different facilities and the costs of defense/attack for both games. Secondly, we identify ranges of attack and defense costs for which the defender gets the first mover advantage by investing in proactive defense. Furthermore, we relate the outcome of this game (post-attack stage) to a dynamic learning problem in which the users of the infrastructure system are not fully informed about the realized security state (i.e. the identity of compromised facility).
We now outline our main results. To begin our analysis, we make the following observations. Analogous to \cite{dritsoula2017game}, we can represent the defender's mixed strategy by a vector with elements corresponding to the probabilities for each facility being secured. The defender's mixed strategy can also be viewed as her effort on each facility. Moreover, the attacker/defender only targets/secures facilities whose disruption will result in an increase in the usage cost (Proposition ref{strict_dominated}). If the increase in the usage cost of a facility is larger than the cost of attack, then we say that it is a vulnerable facility.
Our approach to characterizing Nash equilibrium (NE) of the normal form game is based on the fact that it is strategically equivalent to a zero-sum game. Hence, the set of attacker's equilibrium strategies can be obtained as the optimal solution set of a linear optimization program (Proposition ref{opt_eq}). For any given attack cost, we show that there exists a threshold cost of defense, which distinguishes two equilibrium regime types, named as type I and type II regimes. Theorem ref{attacker_strategy} shows that when the defense cost is lower than the cost threshold (type I regimes), the total attack probability is positive but less than 1, and all vulnerable facilities are secured by the defender with positive probability. On the other hand, when the defense cost is higher than the threshold (type II regimes), the total attack probability is 1, and some vulnerable facilities are not secured at all.
We develop a new approach to characterize the subgame perfect equilibrium (SPE) of the sequential game, noting that the strategic equivalence to zero-sum game no longer holds in this case. In this game, the defender, as the first mover, either proactively secures all vulnerable facilities with a threshold security effort so that the attacker does not target any facility, or leaves at least one vulnerable facility secured with an effort less than the threshold while the total attack probability is 1. For any attack cost, we establish another threshold cost of the defense, which is strictly higher than the corresponding threshold in the normal form game. This new threshold again distinguishes the equilibrium strategies into two regime types, named as type $\widetilde{\mathrm{I}}$ and type $\widetilde{\mathrm{II}}$ regimes. Theorem ref{theorem:SPE} shows that when the defense cost is lower than the cost threshold (type $\widetilde{\mathrm{I}}$ regimes), the defender can fully deter the attacker by proactively securing all vulnerable facilities with the threshold security effort. On the other hand, when the defense cost is higher than the threshold (type $\widetilde{\mathrm{II}}$ regimes), the defender maintains the same level of security effort as that in NE, while the total attack probability is 1.
Our characterization shows that both NE and SPE satisfy the following intuitive properties: (i) Both the defender and attack prioritize the facilities that results in a high usage cost when compromised; (ii) The attack and defense costs jointly determine the set of facilities that are targeted or secured in equilibrium. On one hand, as the attack cost decreases, more facilities are vulnerable to attack. On the other hand, as the defense cost decreases, the defender secures more facilities with positive effort, and eventually when the defense cost is below a certain threshold (defined differently in each game), all vulnerable facilities are secured with a positive effort; (iii) Each player's equilibrium payoff is non-decreasing in the opponent's cost, and non-increasing in her own cost.
It is well-known in the literature on two player games that so long as both players can choose mixed strategies, the equilibrium utility of the first mover in a sequential game is no less than that in a normal form game (\cite{bacsar1998dynamic} (pp. 126), \cite{von2004leadership}). However, cases can be found where the first mover advantage changes from positive to zero when the attacker's observed signal of the defender's strategy is associated with a noise (\cite{bagwell1995commitment}).
In the security game setting, the paper \cite{bier2007choosing} analyzed a game where there are two facilities, and the attacker's valuation of each facility is private information. They identify a condition under which the defender's equilibrium utility is strictly higher when his strategy can be observed by the attacker. In contrast, our model considers multiple facilities, and assumes that both players have complete information of the usage cost of each facility.
In fact, for our model, we are able to provide sharp conditions under which proactive defense strictly increases the defender's utility. Given any attack cost, unless the defense cost is ``relatively high'' (higher than the threshold cost in the sequential game), proactive defense is advantageous in terms of strictly improving the defender's utility and fully deterring the attack. However, if the defense cost is ``relatively medium'' (lower than the threshold cost in sequential game, but higher than that in the normal form game), a higher security effort on each vulnerable facility is required to gain the first mover advantage. Finally, if the defense cost is ``relatively low'' (lower than the threshold cost in the normal form game), then the defender can gain advantage by simply making the first move with the same level of security effort as that in the normal form game.
Note that our approach to characterizing NE and SPE can be readily extended to models with facility-dependent cost parameters and less than perfect defense. We conjecture that a different set of techniques will be required to tackle the more general situation in which the attacker can target multiple facilities at the same time; see \cite{dahan2015network} for related work in this direction. However, even when the attacker targets multiple facilities, one can find game parameters for which the defender is always strictly better off in the sequential game.
Finally, we provide a brief discussion on rational learning dynamics, aimed at understanding how the outcome of the attacker-defender interaction -- which may or may not result in compromise of a facility (state) -- effects the ability of system users to learn about the realized state through a repeated use of the system. A key issue is that the uncertainty about the realized state can significantly impact the ability of users to make decisions to ensure that their long-term cost corresponds to the true usage cost of the system. We explain this issue using a simple transportation network as an example, in which rational travelers (users) need to learn about the identity of the facility that is likely to be compromised using imperfect information about the attack and noisy realizations of travel time in each stage of a repeated routing game played over the network.
The results reported in this article contribute to the study on the allocation of defense resources on facilities against strategic adversaries, as discussed in \cite{powell2007defending} and \cite{bier2007choosing}. The underlying assumption that drives our analysis is that an attack on each facility can be treated independently for the purpose of evaluating its impact on the overall usage cost. Other papers that also make this assumption include \cite{bell2008attacker}, \cite{bier2013defending}, \cite{alderson2011solving}, and \cite{brown2006defending}. Indeed, when the impact of facility compromises are related to the network structure, facilities can no longer be treated independently, and the network structure becomes a crucial factor in analyzing the defense strategy (\cite{dziubinski2017you}). Additionally, network connections can also introduce the possibility of cascading failure among facilities, which is addressed in \cite{acemoglu2016network}, and \cite{goyal2014attack}. These settings are not covered by our model.
The paper is structured as follows: In Sec. ref{Sec:Model}, we introduce the model of both games, and discuss the modeling assumptions. We provide preliminary results to facilitate our analysis in Sec. ref{Sec:attack-defend}. Sec. ref{sec:generic_case} characterizes NE, and Sec. ref{sequential_section} characterizes SPE. Sec. ref{outcomes} compares both games. We discuss some extensions of our model and briefly introduce dynamic aspects in Sec. ref{example_sec}.
All proofs are included in the appendix.
section{The Model} \label{Sec:Model}
subsection{Attacker-Defender Interaction: Normal Form versus Sequential Games}\label{model_present}
Consider an infrastructure system modeled as a set of components (facilities) $\mathcal{E}$. To defend the system against an external malicious attack, the system operator (defender) can secure one or more facilities in $\mathcal{E}$ by investing in appropriate security technology. The set of facilities in question can include cyber or physical elements that are crucial to the functioning of the system.
These facilities are potential targets for a malicious adversary whose goal is to compromise the overall functionality of the system by gaining unauthorized access to certain cyber-physical elements.
The security technology can be a combination of proactive mechanisms (authentication and access control) or reactive ones (attack detection and response). Since our focus is on modeling the strategic interaction between the attacker and defender at a system level, we do not consider the specific functionalities of individual facilities or the protection mechanisms offered by various technologies.
We now introduce our game theoretic model. Let us denote a pure strategy of the defender as $s_d subseteq \mathcal{E}$, with $s_d in S_d = 2^{\mathcal{E}}$. The cost of securing any facility is given by the parameter $p_d in \mathbb{R}_{>0}$. Thus, the total defense cost incurred in choosing a pure strategy $s_d$ is $|s_d| p_dot p_d$, where $|s_d|$ is the cardinality of $s_d$ (i.e., the number of secured facilities). The attacker chooses to target a single facility $e in \mathcal{E}$ or not to attack. We denote a pure strategy of the attacker as $s_a in S_a =\mathcal{E}\cup \{emptyset\}$. The cost of an attack is given by the parameter $p_ain \mathbb{R}_{>0}$, and it reflects the effort that attacker needs to spend in order to successfully targets a single facility and compromise its operation.
We assume that prior to the attack, the usage cost of the system is $C_{\emptyset}$. This cost represents the level of efficiency with which the defender is able to operate the system for its users. A higher usage cost reflects lower efficiency. If a facility $e$ is targeted by the attacker but not secured by the defender, we consider that $e$ is compromised and the usage cost of the system changes to $C_{\e}$. Therefore, given any pure strategy profile $(s_d, s_a)$, the usage cost after the attacker-defender interaction, denoted $C(s_d, s_a)$, can be expressed as follows:
\begin{align}\label{Ceq}
C(s_d, s_a)=l_eft\{
\begin{array}{ll}
C_{\e}, &quad text{if $s_a=e$, and $s_d not \owns e$, }\\
C_{\emptyset}, &quad text{otherwise.}
end{array}
right.
end{align}
To study the effect of timing of the attacker-defender interaction, prior literature on security games has studied both normal form game and sequential games (\cite{alpcan2010network}). We study both models in our setting. In the normal form game, denoted $\Gamma$, the defender and the attacker move simultaneously. On the other hand, in the sequential game, denoted $\widetilde{\Gamma}$, the defender moves in the first stage and the attacker moves in the second stage after observing the defender's strategy. We allow both players to use mixed strategies. In $\Gamma$, we denote the defender's mixed strategy as $sign_d \deleq l_eft(sign_d(s_d)right)_{s_d in S_d} in Delta(S_d)$, where $sign_d(s_d)$ is the probability that the set of secured facilities is $s_d$. Similarly, a mixed strategy of the attacker is $sign_a \deleq l_eft(sign_a(s_a)right)_{s_a in S_a} in Delta(S_a)$, where $sign_a(s_a)$ is the probability that the realized action is $s_a$. Let $sigma=l_eft(sign_d, sign_aright)$ denote a mixed strategy profile. In $\widetilde{\Gamma}$, the defender's mixed strategy $\widetilde{sigma}_d \deleq l_eft(\widetilde{sigma}_d(s_d)right)_{s_d in S_d} in Delta(S_d)$ is defined analogously to that in $\Gamma$. The attacker's strategy is a map from $Delta(S_d)$ to $Delta(S_a)$, denoted by $\widetilde{sigma}_a(\widetilde{sigma}_d) \deleq l_eft(\widetilde{sigma}_a(s_a, \widetilde{sigma}_d)right)_{s_a in S_a} in Delta(S_a)$, where $\widetilde{sigma}_a(s_a, \widetilde{sigma}_d)$ is the probability that the realized action is $s_a$ when the defender's strategy is $\widetilde{sigma}_d$. A strategy profile in this case is denoted as $\widetilde{sigma} = l_eft(\widetilde{sigma}_d, \widetilde{sigma}_a(\widetilde{sigma}_d)right)$.
The defender's utility is comprised of two parts: the negative of the usage cost as given in eqref{Ceq} and the defense cost incurred in securing the system. Similarly, the attacker's utility is the usage cost net the attack cost. For a pure strategy profile $l_eft(s_d, s_aright)$, the utilities of defender and attacker can be respectively expressed as follows:
\begin{align*}
u_d(s_d, s_a)&=-\ell_\eq(s_d, s_a)-p_d p_dot |s_d|, quad u_a(s_d, s_a)=\ell_\eq(s_d, s_a)-p_a p_dot \mathds{1}\{s_a neq emptyset\}.
end{align*}
For a mixed strategy profile $l_eft(sign_d, sign_aright)$, the expected utilities can be written as:
\begin{subequations}\label{U_fun}
\begin{align}
U_d(sign_d, sign_a)&=sum_
{s_d in S_d} sum_{s_a in S_a}u_d(s_d, s_a)p_dot sign_a(s_a) p_dot sign_d(s_d)=-\mathbb{E}_{sigma} [\ell_\eq]-p_d p_dot\mathbb{E}_{sign_d} [|s_d|],\label{Ud} \\
U_a(sign_d, sign_a)&=sum_
{s_d in S_d} sum_{s_a in S_a}u_a(s_d, s_a)p_dot sign_a(s_a) p_dot sign_d(s_d) =\mathbb{E}_{sigma} [\ell_\eq]-p_a p_dot \mathbb{E}_{sign_a} [|s_a|],\label{Ua}
end{align}
end{subequations}
where $\mathbb{E}_{sigma} [\ell_\eq]$ is the expected usage cost, and $\mathbb{E}_{sign_d}[|s_d|]$ (resp. $\mathbb{E}_{sign_a} [|s_a|]$) is the expected number of defended (resp. targeted) facilities, i.e.:
\begin{align*}
\mathbb{E}_{sigma} [\ell_\eq]&=sum_
{s_a in S_a} sum_{s_d in S_d}\ell_\eq(s_d, s_a)p_dot sign_a(s_a) p_dot sign_d(s_d), \\
\mathbb{E}_{sign_d}[|s_d|] &=sum_{s_d in S_d}|s_d| sign_d(s_d), quad \mathbb{E}_{sign_a} [|s_a|]=sum_{e in \mathcal{E}}sign_a(e).
end{align*}
An equilibrium outcome of the game $\Gamma$ is defined in the sense of Nash Equilibrium (NE). A strategy profile $sigmawe=(sign_dwe, sign_awe)$ is a NE if:
\begin{align*}
U_d(sign_dwe, sign_awe) &\geq U_d(sign_d, sign_awe), quad \forall sign_d in Delta(S_d),\\
U_a(sign_dwe, sign_awe) &\geq U_a(sign_dwe, sign_a), quad \forall sign_a in Delta (S_a).
end{align*}
In the sequential game $\widetilde{\Gamma}$, the solution concept is that of a Subgame Perfect Equilibrium (SPE), which is also known as Stackelberg equilibrium. A strategy profile $\widetilde{sigma}we=(\widetilde{sigma}_dwe, \widetilde{sigma}_awe(\widetilde{sigma}_d))$ is a SPE if:
\begin{subequations}\label{SPE_utility}
\begin{align}
U_d(\widetilde{sigma}_dwe, \widetilde{sigma}_awe(\widetilde{sigma}_dwe)) &\geq U_d(\widetilde{sigma}_d, \widetilde{sigma}_awe(\widetilde{sigma}_d)), quad \forall \widetilde{sigma}_d in Delta(S_d),\label{SPE:def}\\
U_a(\widetilde{sigma}_d, \widetilde{sigma}_awe(\widetilde{sigma}_d)) &\geq U_a(\widetilde{sigma}_d, \widetilde{sigma}_a(\widetilde{sigma}_d)), quad \forall \widetilde{sigma}_d in Delta(S_d), quad \forall \widetilde{sigma}_a(\widetilde{sigma}_d) in Delta(S_a). \label{SPE:subgame}
end{align}
end{subequations}
Since both $S_d$ and $S_a$ are finite sets, and we consider mixed strategies, both NE and SPE exist.
input{model_discussion}
section{Rationalizable Strategies and Aggregate Defense Effort}\label{Sec:attack-defend}
We introduce two preliminary results that are useful in our subsequent analysis. Firstly, we show that the defender's strategy can be equivalently represented by a vector of facility-specific security effort levels. Secondly, we identify the set of rationalizable strategies of both players.
For any defender's mixed strategy $sign_d in Delta(S_d)$, the corresponding emph{security effort vector} is $rho(sigdn)=l_eft(rho_\e(sigdn)right)_{e in \mathcal{E}}$, where $rho_\e(sigdn)$ is the probability that facility $e$ is secured:
\begin{align}\label{eq:ped}
rho_\e(sigdn)=sum_{s_d ni e} sign_d(s_d).
end{align}
In other words, $rho_\e(sigdn)$ is the level of security effort exerted by the defender on facility $e$ under the security plan $sign_d$. Since $sign_d(s_d) \geq 0$ for any $s_d in S_d$, we obtain that $0 l_eq rho_\e(sigdn)=sum_{s_d ni e} sign_d(s_d) l_eq sum_{s_d in S_d} sign_d(s_d) = 1$. Hence, any $sign_d$ induces a valid probability vector $rho in [0,1]^{|\mathcal{E}|}$. In fact, any vector $rho in [0, 1]^{|\mathcal{E}|}$ can be induced by at least one feasible $sign_d$. The following lemma provides a way to explicitly construct one such feasible strategy.
\begin{lemma}\label{lemma:stra_construct}
Consider any feasible security effort vector $rho in [0, 1]^{|\mathcal{E}|}$. Let $m$ be the number of distinct positive values in $rho$, and define $rho_{(i)}$ as the $i$-th largest distinct value in $rho$, i.e. $rho_{(1)} > \dots > rho_{(m)}$. The following defender's strategy is feasible and induces $rho$:
\begin{subequations}\label{stra_construct}
\begin{align}
&sign_d(l_eft\{e in \mathcal{E}| rho_e \geq rho_{(i)}right\})=rho_{(i)}-rho_{(i)}pone, quad \forall i=1, \dots, m-1\\
&sign_d(l_eft\{e in \mathcal{E}| rho_e \geq rho_{(m)} right\})= rho_{(m)}, \\
&sign_d(emptyset)=1-rho_{(1)}.
end{align}
For any remaining $s_d in S_d$, $sign_d(s_d)=0$.
end{subequations}
end{lemma}
We now re-express the player utilities in eqref{U_fun} in terms of $l_eft(rho(sigdn), sign_aright)$ as follows:
\begin{subequations}\label{U_fun_rewrite}
\begin{align}
U_d(sign_d, sign_a)&=-sum_{s_a in S_a} l_eft(sum_
{s_d in S_d} sign_d(s_d) C(s_d, s_a)right)sign_a(s_a)-l_eft(sum_{s_d in S_d}|s_d|sign_d(s_d)right) p_d notag \\
&=-sum_{e in \mathcal{E}} l_eft(sum_
{s_d in S_d} sign_d(s_d) C(s_d, e)right)sign_a(e)- C_{\emptyset} sign_a(emptyset)-l_eft(sum_{e in \mathcal{E}} rho_\e(sigdn)right) p_dnotag \\
&stackrel{eqref{Ceq}}{=}-sum_{e in \mathcal{E}} l_eft(l_eft(sum_{s_d ni e}sign_d(s_d)right) C_{\emptyset}+l_eft(1-sum_{s_d ni e}sign_d(s_d)right) C_eright)sign_a(e)-C_{\emptyset} sign_a(emptyset) notag\\
& quad ~ -l_eft(sum_{e in \mathcal{E}} rho_\e(sigdn)right) p_d notag\\
&=-sum_{e in \mathcal{E}} l_eft(rho_\e(sigdn) l_eft(l_eft(C_{\emptyset}-C_{\e}right) sign_a(e) +p_dright) +C_{\e} sign_a(e)right)-C_{\emptyset} sign_a(emptyset), \label{Ud_rewrite}\\
U_a(sign_d, sign_a)&=sum_{e in \mathcal{E}} l_eft(rho_\e(sigdn) l_eft(C_{\emptyset}-C_{\e}right) sign_a(e) +C_{\e} sign_a(e)right)+C_{\emptyset} sign_a(emptyset) - l_eft(sum_{e in \mathcal{E}} sign_a(e)right)p_a. \label{Ua_rewrite}
end{align}
end{subequations}
Thus, for any given attack strategy and any two defense strategies, if the induced security effort vectors are identical, then the corresponding utility for each player is also identical. Henceforth, we denote the player utilities as $U_d(rho, sign_a)$ and $U_a(rho, sign_a)$, and use $sign_d$ and $rho_\e(sigdn)$ interchangeably in representing the defender's strategy.
For the sequential game $\widetilde{\Gamma}$, we analogously denote the security effort vector given the strategy $\widetilde{sigma}_d$ as $thetass(\widetilde{sigma}_d)$, and the defender's utility (resp. attacker's utility) as $U_ds(thetass, \widetilde{sigma}_a)$ (resp. $U_as(thetass, \widetilde{sigma}_a)$).
We next characterize the set of rationalizable strategies. Note that the post-attack usage cost $C_{\e}$ can increase or remain the same or even decrease, in comparison to the pre-attack cost $C_{\emptyset}$. Let the facilities whose damage result in an increased usage cost be grouped in the set $\mathcal{E}bar$. Similarly, let $\mathcal{E}hat$ denote the set of facilities such that a damage to any one of them has no effect on the usage cost. Finally, the set of remaining facilities is denoted as $\mathcal{E}dag$. Thus:
\begin{subequations}
\begin{align}
\mathcal{E}bar &\deleq l_eft\{e in \mathcal{E} | C_{\e}>C_{\emptyset}right\}, \label{Ebar}\\
\mathcal{E}hat&\deleq l_eft\{e in \mathcal{E} v_ert C_{\e}=C_{\emptyset}right\},\label{Ehat}\\
\mathcal{E}dag &\deleq l_eft\{e in \mathcal{E} v_ert C_{\e}<C_{\emptyset}right\}. \label{Edag}
end{align}
end{subequations}
Clearly, $\mathcal{E}bar \cup \mathcal{E}hat \cup \mathcal{E}dag=\mathcal{E}$. The following proposition shows that in a rationalizable strategy profile, the defender does not secure facilities that are not in $\mathcal{E}bar$, and the attacker only considers targeting the facilities that are in $\mathcal{E}bar$.
\begin{proposition}\label{strict_dominated}
The rationalizable action sets for the defender and attacker are given by $2^{\mathcal{E}bar}$ and $\mathcal{E}bar \cup \{emptyset\}$, respectively. Hence, any equilibrium strategy profile $l_eft(rho^{*}, sign_aweright)$ in $\Gamma$ (resp. $l_eft(tilde{rho}^{*}, \widetilde{sigma}_aweright)$ in $\widetilde{\Gamma}$) satisfies:
\begin{alignat*}{2}
rho_\e^{*}&=sign_awe(e)=0, &&quad \forall e in \mathcal{E} setminus \mathcal{E}bar, \\
tilde{rho}^{*}_\e&=\widetilde{sigma}_awe(e, thetass)=0, &&quad \forall e in \mathcal{E} setminus \mathcal{E}bar, quad \forall thetass in [0, 1]^{\mathcal{E}}.
end{alignat*}
end{proposition}
If $\mathcal{E}bar=emptyset$, then the attacker/defender does not attack/secure any facility in equilibrium. Henceforth, to avoid triviality, we assume $\mathcal{E}bar neq emptyset$. Additionally, we define a partition of facilities in $\mathcal{E}bar$ such that all facilities with identical $C_e$ are grouped in the same set. Let the number of distinct values in $\{C_{\e}\}_{e in \mathcal{E}bar}$ be $\mathcal{E}barp$, and $C_{(k)}$ denote the $k$-th highest distinct value in the set $\{C_{\e}\}_{e in \mathcal{E}bar}$. Then, we can order the usage costs as follows:
\begin{align}\label{order}
C_{(1)} > C_{(2)} > \dots > C_{(\mathcal{E}barp)}>C_{\emptyset}.
end{align}
We denote $\mathcal{E}bari$ as the set of facilities such that if any $e in \mathcal{E}bari$ is damaged, the usage cost $C_{\e}=C_{(k)}$, i.e. $\mathcal{E}bari \deleq l_eft\{e in \mathcal{E}bar| C_{\e}=C_{(k)}right\}$. We also define $E_{(k)} \deleq |\mathcal{E}bari|$. Clearly, $\cup_{k=1}^{\mathcal{E}barp} \mathcal{E}bari=\mathcal{E}bar$, and $sum_{k=1}^{\mathcal{E}barp} E_{(k)}=|\mathcal{E}bar|$. Facilities in the same group have identical impact on the infrastructure system when compromised.
section{Normal Form Game $\Gamma$}\label{sec:generic_case}
In this section, we provide complete characterization of the set of NE for any given attack and defense cost parameters in game $\Gamma$. In Sec. ref{zero_sum_gamen}, we show that $\Gamma$ is strategically equivalent to a zero-sum game, and hence the set of attacker's equilibrium strategies can be solved by a linear program. In Sec. ref{in_regime}, we show that the space of cost parameters $(p_a, p_d) in \mathbb{R}_{>0}^2$ can be partitioned into qualitatively distinct equilibrium regimes.
subsection{Strategic Equivalence to Zero-Sum Game}\label{zero_sum_gamen}
Our notion of strategic equivalence is the same as the best-response equivalence defined in \cite{rosenthal1974correlated}. If $\Gamma$ and another game $\Gamma^0$ are strategically equivalent, then given any strategy of the defender (resp. attacker), the set of attacker's (resp. defender's) best responses is identical in the two games. This result forms the basis of characterizing the set of NE.
We define the utility functions of the game $\Gamma^0$ as follows:
\begin{subequations}\label{zero_utility}
\begin{align}
U^0_d(sign_d, sign_a)&=-\mathbb{E}_{sigma} [\ell_\eq]-\mathbb{E}_{sign_d}[|s_d|] p_dot p_d+ p_a p_dot \mathbb{E}_{sign_a}[|s_a|],\label{zero_utility_defend}\\
U_azero(sign_d, sign_a)&=\mathbb{E}_{sigma} [\ell_\eq]+\mathbb{E}_{sign_d}[|s_d|]p_dot p_d-p_a p_dot \mathbb{E}_{sign_a}[|s_a|]. \label{zero_utility_attack}
end{align}
end{subequations}
Thus, $\Gamma^0$ is a zero-sum game. We denote the set of defender's (resp. attacker's) equilibrium strategies in $\Gamma^0$ as $Sigma_d^0$ (resp. $Sigma_a^0$).
\begin{lemma}\label{zero_sum}
The normal form game $\Gamma$ is strategically equivalent to the zero sum game $\Gamma^0$. The set of defender's (resp. attacker's) equilibrium strategies in $\Gamma$ is $Sigma^{*}_d equiv Sigma_d^{0}$ (resp. $Sigma^{*}_awe equiv Sigma_a^{0}$). Furthermore, for any $sign_dwe in Sigma^{*}_d$ and any $sign_awe in Sigma^{*}_a$, $(sign_dwe, sign_awe)$ is an equilibrium strategy profile of $\Gamma$.
end{lemma}
Based on Lemma ref{zero_sum}, the set of attacker's equilibrium strategies $Sigma^{*}_awe$ can be expressed as the optimal solution set of a linear program.
\begin{proposition}\label{opt_eq}
The set $Sigma^{*}_awe$ is the optimal solution set of the following optimization problem:
\begin{subequations}\label{maxmin_min}
\begin{align}
\max_{sign_a} quad &V(sign_a) notag \\
s.t. quad & V(sign_a)=sum_{e in \mathcal{E}bar} \min l_eft\{sign_a(e)p_dot l_eft( C_{\emptyset}-p_aright)+p_d,~ sign_a(e) p_dot l_eft(C_{\e}-p_aright)right\}+sign_a(emptyset) p_dot C_{\emptyset}, \label{V}\\
&sum_{e in \mathcal{E}bar} sign_a(e) +sign_a(emptyset)=1, \label{feasible_one}\\
& sign_a(emptyset) \geq 0, quad sign_a(e) \geq 0, quad \forall e in \mathcal{E}bar. \label{feasible_two}
end{align}
end{subequations}
Furthermore, eqref{maxmin_min} is equivalent to the following linear optimization program:
\begin{subequations}\label{linear_maxmin}
\begin{align}
\max_{sign_a, v} quad &sum_{e in \mathcal{E}bar} v_e+sign_a(emptyset) p_dot C_{\emptyset} notag\\
s.t. quad & sign_a(e) p_dot l_eft(C_{\emptyset}-p_aright)+p_d-v_e \geq 0, quad \forall e in \mathcal{E}bar, \label{bound_1}\\
& sign_a(e) p_dot l_eft(C_{\e}-p_aright)-v_e \geq 0, quad \forall e in \mathcal{E}bar,\label{bound_2}\\
&sum_{e in \mathcal{E}bar} sign_a(e)+sign_a(emptyset)=1,\label{sum_signa}\\
& sign_a(emptyset) \geq 0, quad sign_a(e) \geq 0, quad \forall e in \mathcal{E}bar. \label{non-negative}
end{align}
end{subequations}
where $v=l_eft(v_eright)_{e in \mathcal{E}bar}$ is an $|\mathcal{E}bar|$-dimensional variable.
end{proposition}
In Proposition ref{opt_eq}, the objective function $V(sign_a)$ is a piecewise linear function in $sign_a$. Furthermore, given any $sign_a$ and any $e in \mathcal{E}bar$, we can write:
\begin{align}
&\min l_eft\{sign_a(e) p_dot l_eft(C_{\emptyset}-p_aright)+p_d,~ sign_a(e) p_dot l_eft(C_{\e}-p_aright)right\}notag\\
=&
l_eft\{
\begin{array}{ll}
sign_a(e) p_dot l_eft(C_{\emptyset}-p_aright)+p_d & quad text{if $sign_a(e) > \frac{p_d}{C_{\e}-C_{\emptyset}}$,}\\
sign_a(e)p_dot l_eft(C_{\e}-p_aright) & quad text{if $sign_a(e) l_eq \frac{p_d}{C_{\e}-C_{\emptyset}}$.}
end{array}
right.\label{threshold}
end{align}
Thus, we can observe that if $sign_a(e)$ equals to $p_d/l_eft(C_{\e}-C_{\emptyset}right)$, then $-sign_a(e) p_dot C_{\emptyset}-p_d= -sign_a(e) p_dot C_{\e}$, i.e. if a facility $e$ is targeted with the threshold attack probability $p_d/(C_{\e}-C_{\emptyset})$, the defender is indifferent between securing $e$ versus not. The following lemma analyzes the defender's best response to the attacker's strategy, and shows that no facility is targeted with probability higher than the threshold probability in equilibrium.
\begin{lemma}\label{only_attacked}
Given any strategy of the attacker $sign_a in Delta(S_a)$, for any defender's security effort $rho$ that is a best response to $sign_a$, denoted $rho in BR(sign_a)$, the security effort $rho_e$ on each facility $e in \mathcal{E}$ satisfies:
\begin{align}\label{best_response_normal}
rho_e l_eft\{
\begin{array}{ll}
=0, &quad \forall e in l_eft\{\mathcal{E}bar|sign_a(e)<\frac{p_d}{C_{\e}-C_{\emptyset}}right\} \cup \mathcal{E}hat \cup \mathcal{E}dag, \\
in [0, 1], & quad \forall e in l_eft\{\mathcal{E}bar|sign_a(e)=\frac{p_d}{C_{\e}-C_{\emptyset}}right\}, \\
=1, &quad \forall e in l_eft\{\mathcal{E}bar|sign_a(e)>\frac{p_d}{C_{\e}-C_{\emptyset}}right\}.
end{array}
right.
end{align}
Furthermore, in equilibrium, the attacker's strategy $sign_awe$ satisfies:
\begin{subequations}\label{upper_bound}
\begin{alignat}{2}
sign_awe(e) &l_eq \frac{p_d}{C_{\e}-C_{\emptyset}}, &&quad \forall e in \mathcal{E}bar,
\label{sub:upper_bound}\\
sign_awe(e) & = 0, &&quad \forall e in \mathcal{E} setminus \mathcal{E}bar. \label{zero_out}
end{alignat}
end{subequations}
end{lemma}
Lemma ref{only_attacked} highlights a key property of NE: The attacker does not target at any facility $ein \mathcal{E}bar$ with probability higher than the threshold $p_d/(C_{\e}-C_{\emptyset})$, and the defender allocates a non-zero security effort only on the facilities that are targeted with the threshold probability.
Intuitively, if a facility $e$ were to be targeted with a probability higher than the threshold $p_d/(C_{\e}-C_{\emptyset})$, then the defender's best response would be to secure that facility with probability 1, and the attacker's expected utility will be $-C_{\emptyset}-p_asign_a(e)$, which is smaller than $-C_{\emptyset}$ (utility of no attack). Hence, the attacker would be better off by choosing the no attack action.
Now, we can re-write $V(sign_a)$ as defined in eqref{maxmin_min} as follows:
\begin{align}
V(sign_a)&stackrel{eqref{upper_bound}}{=}sum_{e in \{\mathcal{E}bar|sign_a(e) l_eq \frac{p_d}{C_{\e}-C_{\emptyset}}\}} sign_a(e) l_eft(C_{\e}-p_aright)+C_{\emptyset} p_dot sign_a(emptyset),\label{re-express-V}
end{align}
and the set of attacker's equilibrium strategies maximizes this function.
subsection{Characterization of NE in $\Gamma$}\label{in_regime}
We are now in the position to introduce the equilibrium regimes. Each regime corresponds to a range of cost parameters such that the qualitative properties of equilibrium (i.e. the set of facilities that are targeted and secured) do not change in the interior of each regime.
We say that a facility $e$ is emph{vulnerable} if $C_{\e}-p_a>C_{\emptyset}$. Therefore, given any attack cost $p_a$, the set of vulnerable facilities is given by $\{\mathcal{E}bar| C_{\e}-p_a>C_{\emptyset}\}$. Clearly, only vulnerable facilities are likely targets of the attacker. If $p_a> C_{(1)}-C_{\emptyset}$, then there are no vulnerable facilities. In contrast, if $p_a < C_{(1)}-C_{\emptyset}$, we define the following threshold for the per-facility defense cost:
\begin{align}\label{cdbar}
p_dbar \deleq \frac{1}{sum_{e in \{\mathcal{E}bar| C_{\e}-p_a >C_{\emptyset}\}} \frac{1}{C_{\e}-C_{\emptyset}}}.
end{align}
We can check that for any $i=1, \dots, \mathcal{E}barp-1$ (resp. $i=\mathcal{E}barp$), if $C_{(i+1)}-C_{\emptyset}l_eqp_a< C_{(i)}-C_{\emptyset}$ (resp. $0<p_a<C_{(K)}-C_{\emptyset}$), then
\begin{align}\label{cd_accurate}
p_dbar=l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}.
end{align}
Recall from Lemma ref{only_attacked} that $sign_awe(e)$ is upper bounded by the threshold attack probability $p_d/(C_{\e}-C_{\emptyset})$. If the defense cost $p_d<p_dbar$, then $sum_{k=1}^{i} \frac{E_{(k)} p_d}{C_{(k)}-C_{\emptyset}}<1$, which implies that even when the attacker targets each vulnerable facility with the threshold attack probability, the total probability of attack is still smaller than 1. Thus, the attacker must necessarily choose not to attack with a positive probability. On the other hand, if $p_d>p_dbar$, then the no attack action is not chosen by the attacker in equilibrium.
Following the above discussion, we introduce two types of regimes depending on whether or not $p_d$ is higher than the threshold $p_dbar$. In type I regimes, denoted as $\{\Lambda^i | i=0, \dots, \mathcal{E}barp\}$, the defense cost $p_d<p_dbar$, whereas in type II regimes, denoted as $\{\Lambda_j | j=1, \dots, \mathcal{E}barp\}$, the defense cost $p_d>p_dbar$. Hence, we say that $p_d$ is ``relatively low'' (resp. ``relatively high'') in comparison to $p_a$ in type I regimes (resp. type II regimes). We formally define these $2K+1$ regimes as follows:
\begin{enumerate}[label=(\alph*)]
item Type I regimes $\Lambda^i$, $i=0, \dots, \mathcal{E}barp$:
\begin{itemize}
item If $i=0$:
\begin{align}\label{regimei_first}
p_a> C_{(1)}-C_{\emptyset}, text{ and }p_d>0
end{align}
item If $i=1, \dots, \mathcal{E}barp-1$:
\begin{align}\label{regimei_notlast}
C_{(i+1)}-C_{\emptyset} < p_a < C_{(i)}-C_{\emptyset}, text{ and } 0<p_d< l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}
end{align}
item If $i=\mathcal{E}barp$:
\begin{align}\label{regimei_last}
0 < p_a < C_{(\mathcal{E}barp)}-C_{\emptyset}, text{ and } 0<p_d<l_eft(sum_{k=1}^{\mathcal{E}barp} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}
end{align}
end{itemize}
item Type II regimes, $\Lambda_j$, $j=1, \dots, \mathcal{E}barp$:
\begin{itemize}
item If $j=1$:
\begin{align}\label{regime_j_1}
0<p_a< C_{(1)}-C_{\emptyset}, text{ and } p_d> l_eft(\frac{E_{(1)}}{C_{(1)}-C_{\emptyset}}right)^{-1}
end{align}
item If $j=2, \dots, \mathcal{E}barp$:
\begin{align}\label{regime_j_rest}
0<p_a< C_{(j)}-C_{\emptyset}, text{ and } l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}< p_d <l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}
end{align}
end{itemize}
end{enumerate}
We now characterize equilibrium strategy sets $Sigma^{*}_dwe$ and $Sigma^{*}_awe$ in the interior of each regime.\footnote{For the sake of brevity, we omit the discussion of equilibrium strategies when cost parameters lie exactly on the regime boundary, although this case can be addressed using the approach developed in this article.}
\begin{theorem}\label{attacker_strategy}
The set of NE in each regime is as follows:
\begin{enumerate}[label=(\alph*)]
item Type I regimes $\Lambda^i$:
\begin{itemize}
item If $i=0$,
\begin{subequations}
\begin{align}
rho_\e^{*}&=0, quad \forall e in \mathcal{E} \label{defender_regime_i_0}\\
sign_awe(emptyset)&=1. \label{unique_zero}
end{align}
end{subequations}
item If $i=1, \dots, \mathcal{E}barp$,
\begin{subequations}\label{attack_n_1}
\begin{alignat}{2}
rho_\e^{*}&=\frac{C_{(k)}-p_a-C_{\emptyset}}{C_{(k)}-C_{\emptyset}},&& quad \forall e in \mathcal{E}bar_{(k)}, quad \forall k=1, \dots, i \label{regime_last_sub}\\
rho_\e^{*}&=0, &&quad \forall e in \mathcal{E} setminus l_eft(\cup_{k=1}^{i} \mathcal{E}bar_{(k)}right)\label{regime_last_zero}\\
sign_awe(e)&= \frac{p_d}{C_{(k)}-C_{\emptyset}}, &&quad \forall e in \mathcal{E}bari, quad \forall k=1, \dots, i \label{multiple_bound}\\
sign_awe(emptyset)&=1-sum_{e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}} sign_awe(e). &&\label{6d}
end{alignat}
end{subequations}
end{itemize}
item Type II regimes $\Lambda_j$:
\begin{itemize}
item $j=1$:
\begin{subequations}\label{attack_one}
\begin{alignat}{2}
rho_\e^{*}&=0, &&quad \forall e in \mathcal{E} \label{regime_1_unique_p}\\
0 l_eq sign_awe(e) &l_eq \frac{p_d}{C_{(1)}-C_{\emptyset}} &&quad \forall e in \mathcal{E}bar_{(1)}, \label{sub:upper_one}\\
sum_{e in \mathcal{E}bar_{(1)}}sign_awe(e)&=1. &&\label{sum_j_one}
end{alignat}
end{subequations}
item $j=2, \dots, \mathcal{E}barp$:
\begin{subequations}\label{regime_k_attack}
\begin{alignat}{2}
rho_\e^{*}&=\frac{C_{(k)}-C_{(j)}}{C_{(k)}-C_{\emptyset}}, quad \forall e in \mathcal{E}bar_{(k)}, &&quad \forall k=1, \dots, j-1 \label{defend_k_p}\\
rho_\e^{*}&=0, &&quad \forall e in \mathcal{E} setminus l_eft(\cup_{k=1}^{j-1} \mathcal{E}bar_{(k)}right) \\
sign_awe(e) &= \frac{p_d}{C_{(k)}-C_{\emptyset}}, && quad \forall e in \mathcal{E}bar_{(k)}, quad \forall k=1, \dots, j-1\label{regime_k_1}\\
0 l_eq sign_awe(e) &l_eq \frac{p_d}{C_{(j)}-C_{\emptyset}}, &&quad \forall e in \mathcal{E}bar_{(j)}\label{regime_k_2}\\
sum_{e in \mathcal{E}bar_{(j)}}sign_awe(e)&=1-sum_{k=1}^{j-1}\frac{p_d p_dot E_{(k)}}{C_{(k)}-C_{\emptyset}}.&& \label{regime_k_3}
end{alignat}
end{subequations}
end{itemize}
end{enumerate}
end{theorem}
Let us discuss the intuition behind the proof of Theorem ref{attacker_strategy}.
Recall from Proposition ref{opt_eq} and Lemma ref{only_attacked} that the set of attacker's equilibrium strategies $Sigma_a^{*}$ is the set of feasible mixed strategies that maximizes $V(sign_a)$ in eqref{re-express-V}, and the attacker never targets at any facility $e in \mathcal{E}$ with probability higher than the threshold $p_d/(C_{\e}-C_{\emptyset})$. Also recall that the costs $\{C_{(k)}\}_{k=1}^K$ are ordered according to eqref{order}. Thus, in equilibrium, the attacker targets the facilities in $\mathcal{E}bar_{(k)}$ with the threshold attack probability starting from $k=1$ and proceeding to $k=2, 3, \dots \mathcal{E}barp$ until either all the vulnerable facilities are targeted with the threshold attack probability (and no attack is chosen with remaining probability), or the total attack probability reaches 1.
Again, from Lemma ref{only_attacked}, we know that the defender secures the set of facilities that are targeted with the threshold attack probability with positive effort. The equilibrium level of security effort ensures that the attacker gets an identical utility in choosing any pure strategy in the support of $sign_awe$, and this utility is higher or equal to that of choosing any other pure strategy.
The distinctions between the two regime types are summarized as follows:
\begin{enumerate}
item In type I regimes, the defense cost $p_d< p_dbar$. The defender secures all vulnerable facilities with a positive level of effort. The attacker targets at each vulnerable facility with the threshold attack probability, and the total probability of attack is less than 1.
item In type II regimes, the defense cost $p_d>p_dbar$. The defender only secures a subset of targeted facilities with positive level of security effort. The attacker chooses the facilities in decreasing order of $C_{\e}-C_{\emptyset}$, and targets each of them with the threshold probability until the attack resource is exhausted, i.e. the total probability of attack is 1.
end{enumerate}
section{Sequential game $\widetilde{\Gamma}$}\label{sequential_section}
In this section, we characterize the set of SPE in the game $\widetilde{\Gamma}$ for any given attack and defense cost parameters. The sequential game $\widetilde{\Gamma}$ is no longer strategically equivalent to a zero-sum game. Hence, the proof technique we used for equilibrium characterization in game $\Gamma$ does not work for the game $\widetilde{\Gamma}$. In Sec. ref{spe_preparation}, we analyze the attacker's best response to the defender's security effort vector. We also identify a threshold level of security effort which determines whether or not the defender achieves full attack deterrence in equilibrium. In Sec. ref{spe_subsec}, we present the equilibrium regimes which govern the qualitative properties of SPE.
subsection{Properties of SPE}\label{spe_preparation}
By definition of SPE, for any security effort vector $thetass in [0, 1]^{|\mathcal{E}|}$ chosen by the defender in the first stage, the attacker's equilibrium strategy in the second stage is a best response to $thetass$, i.e. $\widetilde{sigma}_awe(thetass)$ satisfies eqref{SPE:subgame}. As we describe next, the properties of SPE crucially depend on a threshold security effort level defined as follows:
\begin{align}\label{pebar}
\widehat{rho}_\e \deleq \frac{C_{\e}-p_a-C_{\emptyset}}{C_{\e}-C_{\emptyset}}, quad \forall e in \mathcal{E}bar.
end{align}
The following lemma presents the best response correspondence $BR(thetass)$ of the attacker:
\begin{lemma}\label{best_response_sequential}
Given any $thetass in [0, 1]^{|\mathcal{E}|}$, if $thetass$ satisfies $tilde{rho}_\e\geq \widehat{rho}_\e$, for all $e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}$, then $BR(thetass)=Delta(\mathcal{E}rho \cup \{emptyset\})$, where:
\begin{align}\label{Erho}
\mathcal{E}rho \deleq l_eft\{\mathcal{E}bar l_eftv_ert C_{\e}-p_a>C_{\emptyset}, quad tilde{rho}_\e= \widehat{rho}_\eright.right\}.
end{align}
Otherwise, $BR(thetass)=Delta(\mathcal{E}max)$, where:
\begin{align}\label{Emax}
\mathcal{E}max\deleq\underset{e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}}{\mathrm{argmax}} l_eft\{tilde{rho}_\e C_{\emptyset}+ (1-tilde{rho}_\e)C_{\e} right\}.
end{align}
end{lemma}
In words, if each vulnerable facility $e$ is secured with an effort higher or equal to the threshold effort $\widehat{rho}_\e$ in eqref{pebar}, then the attacker's best response is to choose a mixed strategy with support comprised of all vulnerable facilities that are secured with the threshold level of effort (i.e., $\mathcal{E}rho$ as defined in eqref{Erho}) and the no attack action. Otherwise, the support of attacker's strategy is comprised of all vulnerable facilities (pure actions) that maximize the expected usage cost (see eqref{Emax}). In particular, no attack action is not chosen in attacker's best response.
Now recall that any SPE $(tilde{rho}^{*}, \widetilde{sigma}_awe(tilde{rho}^{*}))$ must satisfy both eqref{SPE:def} and eqref{SPE:subgame}. Thus, for an equilibrium security effort $tilde{rho}^{*}$, an attacker's best response $\widetilde{sigma}_a(tilde{rho}^{*}) in BR(tilde{rho}^{*})$ is an equilibrium strategy only if both these constraints are satisfied. The next lemma shows that depending on whether the defender secures each vulnerable facility $e$ with the threshold effort $\widehat{rho}_\e$ or not, the total attack probability in equilibrium is either 0 or 1. Thus, the defender being the first mover determines
whether the attacker is fully deterred from conducting an attack or not. Additionally, in SPE, the security effort on each vulnerable facility $e$ is no higher than the threshold effort $\widehat{rho}_\e$, and the security effort on any other edge is 0.
\begin{lemma}\label{zero_or_one}
Any SPE $(tilde{rho}^{*}, \widetilde{sigma}_awe(tilde{rho}^{*}))$ of the game $\widetilde{\Gamma}$ satisfies the following property:
\begin{align*}
sum_{e in \mathcal{E}bar}\widetilde{sigma}_awe(e, tilde{rho}^{*})= l_eft\{
\begin{array}{ll}
0, & quad text{if $tilde{rho}^{*}_\e\geq \widehat{rho}_\e, quad \forall e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}$},\\
1, & quad text{otherwise.}
end{array}
right.
end{align*}
Additionally, for any $e in \{\mathcal{E}bar| C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e l_eq \widehat{rho}_\e$. For any $e in \mathcal{E} setminus \{\mathcal{E}bar| C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e =0$.
end{lemma}
The proof of this result is based on the analysis of following three cases:
noindent\underline{Case 1}: There exists at least one facility $e in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$ such that $tilde{rho}^{*}_\e<\widehat{rho}_\e$. In this case, by applying Lemma ref{best_response_sequential}, we know that $\widetilde{sigma}_awe(tilde{rho}^{*}) in BR(tilde{rho}^{*}) = Delta(\mathcal{E}max)$, where $\mathcal{E}max$ is defined in eqref{Emax}. Hence, the total attack probability is 1.
noindent\underline{Case 2}: For any $e in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e>\widehat{rho}_\e$. In this case, the set $\mathcal{E}rho$ defined in eqref{Erho} is empty. Hence, Lemma ref{best_response_sequential} shows that the total attack probability is 0.
noindent\underline{Case 3}: For any $e in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e \geq \widehat{rho}_\e$, and the set $\mathcal{E}rho$ in eqref{Erho} is non-empty. Again from Lemma ref{best_response_sequential}, we know that $\widetilde{sigma}_awe(tilde{rho}^{*}) in BR(tilde{rho}^{*}) = Delta(\mathcal{E}rho \cup \{emptyset\})$. Now assume that the attacker chooses to target at least one facility $e in \mathcal{E}rho$ with a positive probability in equilibrium. Then, the defender can deviate by slightly increasing the security effort on each facility in $\mathcal{E}rho$. By introducing such a deviation, the defender's security effort satisfies the condition of Case 2, where the total attack probability is 0. Hence, this results in a higher utility for the defender. Therefore, in any SPE $l_eft(tilde{rho}^{*}, \widetilde{sigma}_awe(tilde{rho}^{*})right)$, one cannot have a second stage outcome in which the attacker targets facilities in $\mathcal{E}rho$. We can thus conclude that the total attack probability must be 0 in this case.
In both Cases 2 and 3, we say that the attacker is emph{fully deterred}.
Clearly, these three cases are exhaustive in that they cover all feasible security effort vectors, and hence we can conclude that the total attack probability in equilibrium is either 0 or 1. Additionally, since the attacker is fully deterred when each vulnerable facility is secured with the threshold effort, the defender will not further increase the security effort beyond the threshold effort on any vulnerable facility. That is, only Cases 1 and 3 are possible in equilibrium.
subsection{Characterization of SPE}\label{spe_subsec}
Recall that in Sec. ref{sec:generic_case}, type I and type II regimes for the game $\Gamma$ can be distinguished based on a threshold defense cost $p_dbar$. It turns out that in $\widetilde{\Gamma}$, there are still $2 \mathcal{E}barp+1$ regimes. Again, each regime denotes distinct ranges of cost parameters, and can be categorized either as type $\widetilde{\mathrm{I}}$ or type $\widetilde{\mathrm{II}}$. However, in contrast to $\Gamma$, the regime boundaries in this case are more complicated; in particular, they are non-linear in the cost parameters $p_a$ and $p_d$.
To introduce the boundary $p_dtil(p_a)$, we need to define the function $p_dij(p_a)$ for each $i=1, \dots, K$ and $j=1, \dots, i$ as follows:
\begin{align}\label{cdij}
p_dij(p_a) = l_eft\{
\begin{array}{ll}
\frac{C_{(1)}-C_{\emptyset}}{sum_{k=1}^{i} E_{(k)}-sum_{k=1}^i\frac{p_a E_{(k)}}{C_{(k)}-C_{\emptyset}}}, & quad text{if } j=1,\\
&\\
\frac{C_{(j)}-C_{\emptyset}}{l_eft(C_{(j)}-C_{\emptyset}right) p_dot l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right) + sum_{k=j}^{i} E_{(k)}-sum_{k=1}^{i} \frac{p_a E_{(k)}}{C_{(k)}-C_{\emptyset}}}, & quad text{if } j=2, \dots, i.
end{array}
right.
end{align}
For any $i=1, \dots, K$, and any attack cost $C_{(i+1)}-C_{\emptyset} l_eq p_a< C_{(i)}-C_{\emptyset}$, but $0<p_a<C_{(K)}-C_{\emptyset}$ if $i=K$, the threshold $p_dtil(p_a)$ is defined as follows:
\begin{align}\label{cdtil}
p_dtil(p_a)=l_eft\{
\begin{array}{ll}
p_dij(p_a), & quad text{if $\frac{sum_{k=j+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq p_a<\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$, and $j=1, \dots, i-1$,}\\
p_d^{ii}(p_a), & quad text{if $0l_eq p_a<\frac{E_{(i)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$}.
end{array}
right.
end{align}
\begin{lemma}\label{comparison_lemma}
Given any attack cost $0l_eqp_a<C_{(1)}-C_{\emptyset}$, the threshold $p_dtil(p_a)$ is a strictly increasing and continuous function of $p_a$.
Furthermore, for any $0<p_a<C_{(1)}-C_{\emptyset}$, $p_dtil(p_a)>p_dbar$. If $p_a=0$, $p_dtil(0)=\bar{p_d}(0)$. If $p_a to C_{(1)}-C_{\emptyset}$, $p_dtil(p_a) to +infty$.
end{lemma}
Since $p_dtil(p_a)$ is a strictly increasing and continuous function function of $p_a$, the inverse function $p_dtilinv(p_d)$ is well-defined. Now we are ready to formally define the regimes for the game $\widetilde{\Gamma}$:
\begin{enumerate}
item Type $\widetilde{\mathrm{I}}$ regimes $\widetilde{\Lambda}^i$, $i=0, \dots, \mathcal{E}barp$:
\begin{itemize}
item If $i=0$:
\begin{align}\label{regimesi_first}
p_a>C_{(1)}-C_{\emptyset}, text{ and } quad p_d>0.
end{align}
item If $i=1, \dots, \mathcal{E}barp-1$:
\begin{align}\label{regimesi_middle}
C_{(i+1)}-C_{\emptyset} <p_a< C_{(i)}-C_{\emptyset}, text{ and } quad 0<p_d< p_dtil(p_a).
end{align}
item If $i=\mathcal{E}barp$:
\begin{align}\label{regimesi_last}
0<p_a< C_{(\mathcal{E}barp)}-C_{\emptyset}, text{ and } quad 0<p_d< p_dtil(p_a).
end{align}
end{itemize}
item Type $\widetilde{\mathrm{II}}$ regimes $\widetilde{\Lambda}_j$, $j=1, \dots, \mathcal{E}barp$:
\begin{itemize}
item If $j=1$:
\begin{align}\label{regimej_constraint_1}
0< p_a< p_dtilinv(p_d), text{ and } quad p_d> l_eft(\frac{E_{(1)}}{C_{(1)}-C_{\emptyset}}right)^{-1}
end{align}
item If $j=2, \dots, \mathcal{E}barp$:
\begin{align}\label{regimej_constraint}
0< p_a< p_dtilinv(p_d), text{ and } quad l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}<p_d<l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}
end{align}
end{itemize}
end{enumerate}
Analogous to the discussion in Section ref{in_regime}, we say $p_d$ is ``relatively low'' in type $\widetilde{\mathrm{I}}$ regimes, and ``relatively high'' in type $\widetilde{\mathrm{II}}$ regimes. We now provide full characterization of SPE in each regime.
\begin{theorem}\label{theorem:SPE}
The defender's equilibrium security effort vector $tilde{rho}^{*}=l_eft(tilde{rho}^{*}_\eright)_{e in \mathcal{E}}$ is unique in each regime. Specifically, SPE in each regime is as follows:
\begin{enumerate}
item Type $\widetilde{\mathrm{I}}$ regimes $\widetilde{\Lambda}^i$:
\begin{itemize}
item If $i=0$,
\begin{subequations}\label{SPE_i_0}
\begin{alignat}{2}
tilde{rho}^{*}_\e&=0, &&quad \forall e in \mathcal{E}, \\
\widetilde{sigma}_awe(emptyset, thetass)&=1, &&quad \forall thetass in [0, 1]^{|\mathcal{E}|}.
end{alignat}
end{subequations}
item If $i=1, \dots, \mathcal{E}barp$,
\begin{subequations}\label{SPE_i}
\begin{alignat}{2}
tilde{rho}^{*}_\e&=\frac{C_{(k)}-p_a-C_{\emptyset}}{C_{(k)}-C_{\emptyset}}, && quad \forall e in \mathcal{E}bari, quad \forall k=1, \dots, i,\label{SPE_i_defender_positive} \\
tilde{rho}^{*}_\e&=0, &&quad \forall e in \mathcal{E}setminus l_eft(\cup_{k=1}^{i} \mathcal{E}bariright),\label{SPE_i_defender_zero}\\
\widetilde{sigma}_awe(emptyset, tilde{rho}^{*})&=1,&&\\
\widetilde{sigma}_awe(thetass) &in BR(thetass), &&quad \forall thetass in [0, 1]^{|\mathcal{E}|}setminus tilde{rho}^{*}. \label{SPE_i_attacker}
end{alignat}
end{subequations}
end{itemize}
item Type $\widetilde{\mathrm{II}}$ regimes $\widetilde{\Lambda}_j$:
\begin{itemize}
item If $j=1$,
\begin{subequations}\label{SPE_j_1}
\begin{alignat}{2}
tilde{rho}^{*}_\e&=0, &&quad \forall e in \mathcal{E},\\
\widetilde{sigma}_awe(tilde{rho}^{*})&inDelta(\mathcal{E}bar_{(1)}), &&\\
\widetilde{sigma}_awe(thetass)&in BR(thetass),&& quad \forall thetass in [0, 1]^{|\mathcal{E}|}setminus tilde{rho}^{*}. \label{SPE_j_1_attacker}
end{alignat}
end{subequations}
item If $j=2, \dots, \mathcal{E}barp$,
\begin{subequations}\label{SPE_j}
\begin{alignat}{2}
tilde{rho}^{*}_\e&=\frac{C_{(k)}-C_{(j)}}{C_{(k)}-C_{\emptyset}}, &&quad \forall e in \mathcal{E}bari,quad \forall k=1, \dots, j-1,\\
tilde{rho}^{*}_\e&=0, &&quad \forall e in \mathcal{E} setminus l_eft(\cup_{k=1}^{j-1} \mathcal{E}bariright),\\
\widetilde{sigma}_awe(tilde{rho}^{*})&inDeltal_eft(\cup_{k=1}^{j}\mathcal{E}bar_{(k)}right), &&\\
\widetilde{sigma}_awe(thetass)&in BR(thetass), &&quad \forall thetass in [0, 1]^{|\mathcal{E}|}setminus tilde{rho}^{*}.\label{SPE_j_attacker}
end{alignat}
end{subequations}
end{itemize}
end{enumerate}
end{theorem}
In our proof of Theorem ref{theorem:SPE} (see Appendix ref{proof_sequential}), we take the approach by first constructing a partition of the space $(p_a, p_d) in \mathbb{R}_{>0}^2$ defined in eqref{partition}, and then characterizing the SPE for cost parameters in each set in the partition (Lemmas ref{sequential_type1}--ref{type_2_sequential}). Theorem ref{theorem:SPE} follows directly by regrouping/combining the elements of this partition such that each of the new partition has qualitatively identical equilibrium strategies.
From the discussion of Lemma ref{zero_or_one}, we know that only Cases 1 and 3 are possible in equilibrium, and that in any SPE, the security effort on each vulnerable facility $e$ is no higher than the threshold effort $\widehat{rho}_\e$. It turns out that for any attack cost, depending on whether the defense cost is lower or higher than the threshold cost $p_dtil(p_a)$, the defender either secures each vulnerable facility with the threshold effort given by eqref{cdtil} (type $\widetilde{\mathrm{I}}$ regime), or there is at least one vulnerable facility that is secured with effort strictly less than the threshold (type $\widetilde{\mathrm{II}}$ regimes):
\begin{itemize}
item In type $\widetilde{\mathrm{I}}$ regimes, the defense cost $p_d<p_dtil(p_a)$. The defender secures each vulnerable facility with the threshold effort $\widehat{rho}_\e$. The attacker is fully deterred.
item In type $\widetilde{\mathrm{II}}$ regimes, the defense cost $p_d>p_dtil(p_a)$. The defender's equilibrium security effort is identical to that in NE of the normal form game $\Gamma$. The total attack probability is 1.
end{itemize}
section{Comparison of $\Gamma$ and $\widetilde{\Gamma}$}\label{outcomes}
Sec. ref{utility_comparison} deals with the comparison of players' equilibrium utilities in the two games. In Sec. ref{NE_SPE}, we compare the equilibrium regimes and discuss the distinctions in equilibrium properties of the two games. This leads us to an understanding of the effect of timing of play, i.e. we can identify situations in which the defender gains by proactively investing in securing all of the vulnerable facilities at an appropriate level of effort.
subsection{Comparison of Equilibrium Utilities}\label{utility_comparison}
The equilibrium utilities in both games are unique, and can be directly derived using Theorems ref{attacker_strategy} and ref{theorem:SPE}. We denote the equilibrium utilities of the defender and attacker in regime $\Lambda^i$ (resp. $\Lambda_j$) as $U_d^{\Lambda^i}$ and $U_a^{\Lambda^i}$ (resp. $U_d^{\Lambda_j}$ and $U_a^{\Lambda_j}$) in $\Gamma$, and $U_ds^{\widetilde{\Lambda}^i}$ and $U_as^{\widetilde{\Lambda}^i}$ (resp. $U_ds^{\widetilde{\Lambda}_j}$ and $U_as^{\widetilde{\Lambda}_j}$) in regime $\widetilde{\Lambda}^i$ (resp. $\widetilde{\Lambda}_j$) in $\widetilde{\Gamma}$.
\begin{proposition}\label{utility}
In both $\Gamma$ and $\widetilde{\Gamma}$, the equilibrium utilities are unique in each regime. Specifically,
\begin{enumerate}[label=(\alph*)]
item Type I $(\widetilde{\mathrm{I}})$ regimes $\Lambda^i$ $(\widetilde{\Lambda}^i)$:
\begin{itemize}
item If $i=0$:
\begin{align*}
U_d^{\Lambda_0}&=U_ds^{\widetilde{\Lambda}^{0}}=-C_{\emptyset}, text{ and } quad
U_a^{\Lambda_0}= U_as^{\widetilde{\Lambda}^0}=C_{\emptyset}.\\
end{align*}
item If $i=1, \dots, \mathcal{E}barp$:
\begin{alignat*}{2}
U_di&=-C_{\emptyset}-l_eft(sum_{k=1}^{i} E_{(k)}right)p_d, quad &&text{ and } quad U_ai=C_{\emptyset}, \\
U_ds^{\widetilde{\Lambda}^i}&=-C_{\emptyset}-l_eft(sum_{k=1}^{i} \frac{l_eft(C_{\e}-p_a-C_{\emptyset}right)E_{(k)}}{C_{\e}-C_{\emptyset}}right) p_d, quad &&text{and } quad U_as^{\widetilde{\Lambda}^i}=C_{\emptyset}.
end{alignat*}
end{itemize}
item Type II ($\widetilde{\mathrm{II}}$) regimes $\Lambda_j$ ($\widetilde{\Lambda}_j$):
\begin{itemize}
item If $j=1$:
\begin{align*}
U_d^{\Lambda_1}&=U_ds^{\widetilde{\Lambda}_{1}}=-C_{(1)},text{ and } quad U_a^{\Lambda_1}=U_as^{\widetilde{\Lambda}_{1}}=C_{(1)}-p_a.
end{align*}
item If $j=2, \dots, \mathcal{E}barp$:
\begin{align*}
U_dj&=U_ds^{\widetilde{\Lambda}_j}=-C_{(j)}-sum_{k=1}^{j-1} \frac{l_eft(C_{(k)}-C_{(j)}right) p_dE_{(k)}}{C_{(k)}-C_{\emptyset}} , text{ and } quad U_aj=U_as^{\widetilde{\Lambda}_j}=C_{(j)}-p_a.\\
end{align*}
end{itemize}
end{enumerate}
end{proposition}
From our results so far, we can summarize the similarities between the equilibrium outcomes in $\Gamma$ and $\widetilde{\Gamma}$. While most of these conclusions are fairly intuitive, the fact that they are common to both game-theoretic models suggests that the timing of defense investments do not play a role as far as these insights are concerned.
Firstly, the support of both players equilibrium strategies tends to contain the facilities, whose compromise results in a high usage cost. The defender secures these facilities with a high level of effort in order to reduce the probability with which they are targeted by the attacker.
Secondly, the attack and defense costs jointly determine the set of facilities that are targeted or secured in equilibrium. On one hand, the set of vulnerable facilities increases as the cost of attack decreases. On the other hand, when the cost of defense is sufficiently high, the attacker tends to conduct an attack with probability 1. However, as the defense cost decreases, the attacker randomizes the attack on a larger set of facilities. Consequently, the defender secures a larger set of facilities with positive effort, and when the cost of defense is sufficiently small, all vulnerable facilities are secured by the defender. Thirdly, each player's equilibrium payoff is non-decreasing in the opponent's cost, and non-increasing in her own cost. Therefore, to increase her equilibrium payoff, each player is better off as her own cost decreases and the opponent's cost increases.
subsection{First Mover Advantage}\label{NE_SPE}
We now focus on identifying parameter ranges in which the defender has the first mover advantage, i.e., the defender in SPE has a strictly higher payoff than in NE. To identify the first mover advantage, let us recall the expressions of type I regimes for $\Gamma$ in eqref{regimei_first}--eqref{regimei_last} and type $\widetilde{\mathrm{I}}$ regimes for $\widetilde{\Gamma}$ in eqref{regimesi_first}--eqref{regimesi_last}. Also recall that, for any given cost parameters $p_a$ and $p_d$, the threshold $p_dbar$ (resp. $p_dtil(p_a)$) determines whether the equilibrium outcome is of type I or type II regime (resp. type $\widetilde{\mathrm{I}}$ or $\widetilde{\mathrm{II}}$ regime) in the game $\mathcal{G}amma$ (resp. $\widetilde{\Gamma}$). Furthermore, from Lemma ref{comparison_lemma}, we know that the cost threshold $p_dbar$ in $\Gamma$ is smaller than the threshold $p_dtil(p_a)$ in $\widetilde{\Gamma}$. Thus, for all $i=1, \dots, \mathcal{E}barp$, the type I regime $\Lambda^i$ in $\Gamma$ is a proper subset of the type $\widetilde{\mathrm{I}}$ regime $\widetilde{\Lambda}^i$ in $\widetilde{\Gamma}$. Consequently, for any $l_eft(p_a, p_dright) in \mathbb{R}_{>0}^2$, we can have one of the following three cases:
\begin{itemize}
item $0<p_d< p_dbar$: The defense cost is relatively low in both $\Gamma$ and $\widetilde{\Gamma}$. We denote the set of $l_eft(p_a, p_dright)$ that satisfy this condition as $L$ (emph{low} cost). That is,
\begin{align}\label{L_set}
L \deleq l_eft\{l_eft(p_a, p_dright)|0<p_d< p_dbar right\}=\cup_{i=0}^{\mathcal{E}barp} \Lambda^i.
end{align}
item $p_dbar<p_d< p_dtil(p_a)$: The defense cost is relatively high in $\Gamma$, but relatively low in $\widetilde{\Gamma}$. We denote the set of $l_eft(p_a, p_dright)$ that satisfy this condition as $M$ (emph{medium} cost). That is,
\begin{align}\label{M_set}
M \deleq l_eft\{l_eft(p_a, p_dright)|p_dbar<p_d< p_dtil(p_a) right\}=\cup_{i=1}^{\mathcal{E}barp} l_eft(\widetilde{\Lambda}^i setminus \Lambda^iright).
end{align}
item $p_d>p_dtil(p_a)$: The defense cost is relatively high in both $\Gamma$ and $\widetilde{\Gamma}$. We denote the set of $l_eft(p_a, p_dright)$ that satisfy this condition as $H$ (emph{high} cost). That is,
\begin{align*}
H \deleq l_eft\{l_eft(p_a, p_dright)|p_d>p_dtil(p_a)right\}=\cup_{j=1}^{\mathcal{E}barp} \widetilde{\Lambda}_j.
end{align*}
end{itemize}
We next compare the properties of NE and SPE for cost parameters in each set based on Theorems ref{attacker_strategy} and ref{theorem:SPE}, and Propositions ref{utility}.
\begin{itemize}
item Set $L$:
noindentemph{Attacker}: In $\Gamma$, the total attack probability is nonzero but smaller than 1, whereas in $\widetilde{\Gamma}$, the attacker is fully deterred. The attacker's equilibrium utility is identical in both games, i.e., $U_a=U_as$.
noindentemph{Defender}: The defender chooses identical equilibrium security effort in both games, i.e. $rho^{*}=tilde{rho}^{*}$, but obtains a higher utility in $\widetilde{\Gamma}$ in comparison to that in $\Gamma$, i.e., $U_d<U_ds$.
item Set $M$:
noindentemph{Attacker}: In $\Gamma$, the attacker conducts an attack with probability 1, whereas in $\widetilde{\Gamma}$ the attacker is fully deterred. The attacker's equilibrium utility is lower in $\widetilde{\Gamma}$ in comparison to that in $\Gamma$, i.e., $U_a>U_as$.
noindentemph{Defender}: The defender secures each vulnerable facility with a strictly higher level of effort in $\widetilde{\Gamma}$ than in $\Gamma$, i.e. $tilde{rho}^{*}_\e>rho_\e^{*}$ for each vulnerable facility $e in \{\mathcal{E} |C_{\e}-p_a>C_{\emptyset}\}$. The defender's equilibrium utility is higher in $\widetilde{\Gamma}$ in comparison to that in $\Gamma$, i.e., $U_d<U_ds$.
item Set $H$:
noindentemph{Attacker}: In both games, the attacker conducts an attack with probability 1, and obtains identical utilities, i.e. $U_a=U_as$.
noindentemph{Defender}: The defender chooses identical equilibrium security effort in both games, i.e., $rho^{*}=tilde{rho}^{*}$, and obtains identical utilities, i.e. $U_d=U_ds$.
end{itemize}
Importantly, the key difference between NE and SPE comes from the fact that in $\widetilde{\Gamma}$, the defender as the leading player is able to influence the attacker's strategy in her favor.
Hence, when the defense cost is relatively medium or low (both sets $M$ and $L$), the defender can proactively secure all vulnerable facilities with the threshold effort to fully deter the attack, which results in a higher defender utility in $\widetilde{\Gamma}$ than in $\Gamma$. Thus, we say the defender has the first-mover advantage when the cost parameters lie in the set $M$ or $L$. However, the reason behind the first-mover advantage differs in each set:
\begin{itemize}
item In set $M$, the defender needs to proactively secure all vulnerable facilities with strictly higher effort in $\widetilde{\Gamma}$ than that in $\Gamma$ to fully deter the attacker.
item In set $L$, the defender secures facilities in $\widetilde{\Gamma}$ with the same level of effort as that in $\Gamma$, and the attacker is still deterred with probability 1.
end{itemize}
On the other hand, in set $H$, the defense cost is so high that the defender is not able to secure all targeted facilities with an adequately high level of security effort. Thus, the attacker conducts an attack with probability 1 in both games, and the defender no longer has first-mover advantage.
Finally, for the sake of illustration, we compute the parameter sets $L$, $M$, and $H$ for transportation network with three facilities (edges); see Fig. ref{three_facility}. If an edge $e in \mathcal{E}$ is not damaged, then the cost function is $ell_e(w_e)$, which increases in the edge load $w_e$. If edge $e$ is successfully compromised by the attacker, then the cost function changes to $ell_e^{\otimes}(w_e)$, which is higher than $ell_e(w_e)$ for any edge load $w_e>0$.
\begin{figure}[htp]
\ell_\entering
includegraphics[width=0.5textwidth]{three_route_no_function.PNG}
p_aption{Three edge network}
\label{three_facility}
end{figure}
The network faces a set of non-atomic travelers with total demand $D=10$. We define the usage cost in this case as the average cost of travelers in Wardrop equilibrium \cite{correa2011wardrop}. Therefore, the usage costs corresponding to attacks to different edges are $C_1=20$, $C_2=19$, $C_3=18$ and the pre-attack usage cost is $C_{\emptyset}=17$. From eqref{order}, $\mathcal{E}barp=3$, and $\mathcal{E}bar_{(1)}=\{e_1\}$, $\mathcal{E}bar_{(2)}=\{e_2\}$ and $\mathcal{E}bar_{(3)}=\{e_3\}$.
In Fig. ref{ten_regime}, we illustrate the regimes of both $\Gamma$ and $\widetilde{\Gamma}$, and the three sets $H$, $M$, and $L$ distinguished by the thresholds $p_dbar$ and $p_dtil(p_a)$.
\begin{figure}[H]
\ell_\entering
\begin{subfigure}{0.32 textwidth}
includegraphics[width=textwidth]{regime.PNG}
p_aption{}
end{subfigure}
\begin{subfigure}{0.32 textwidth}
includegraphics[width=textwidth]{spe_regime_ppt_noshade.png}
p_aption{}
end{subfigure}
\begin{subfigure}{0.32 textwidth}
includegraphics[width=textwidth]{H_M_L.png}
p_aption{}
end{subfigure}
p_aption{(a) Regimes of NE in $\Gamma$, (b) Regimes of SPE in $\widetilde{\Gamma}$, (c) Comparison of NE and SPE. }
\label{ten_regime}
end{figure}
section{Model Extensions and Dynamic Aspects}\label{example_sec}
In this section, we first discuss how relaxing our modeling assumptions influence our main results. Next we introduce a dynamic setup in which the users of the infrastructure system face uncertainty about the outcome of attacker-defender interaction (i.e., identity of the compromised facility), and follow a repeated learning procedure to make their usage decisions.
subsection{Relaxing Model Assumptions}\label{extension}
Our discussion centers around extending our results when the following modeling aspects are included: facility-dependent cost parameters, less than perfect defense, and attacker's ability to target multiple facilities.
\begin{enumerate}
item emph{Facility-dependent attack and defense costs.}
Our techniques for equilibrium characterization of games $\Gamma$ and $\widetilde{\Gamma}$ --- as presented in Sections ref{sec:generic_case} and ref{sequential_section} respectively --- can be generalized to the case when attack/defense costs are non-homogeneous across facilities. We denote the attack (resp. defense) cost for facility $ein\mathcal{E}$ as $p_ae$ (resp. $p_de$). However, an explicit characterization of equilibrium regimes in each game can be quite complicated due to the multidimensional nature of cost parameters.
In normal form game $\Gamma$, it is easy to show that the attacker's best response correspondence in Lemma ref{only_attacked} holds except that the threshold attack probability for any facility $e in \mathcal{E}bar$ now becomes $p_de/(C_{\e}-C_{\emptyset})$. The set of vulnerable facilities is given by $\{\mathcal{E}|C_{\e}-p_ae>C_{\emptyset}\}$. The attacker's equilibrium strategy is to order the facilities in decreasing order of $C_{\e}-p_ae$, and target the facilities in this order each with the threshold probability until either all vulnerable facilities are targeted or the total probability of attack reaches 1. As in Theorem ref{attacker_strategy}, the former case happens when the cost parameters lie in a type I regime, and the latter case happens for type II regimes, although the regime boundaries are more complicated to describe. In equilibrium, the defender chooses the security effort vector to ensure that the attacker is indifferent among choosing any of the pure actions that are in the support of equilibrium attack strategy.
In the sequential game $\widetilde{\Gamma}$, Lemmas ref{best_response_sequential} and ref{zero_or_one} can be extended in a straightforward manner except that the threshold security effort for any vulnerable facility $e in \{\mathcal{E}|C_{\e}-C_{\emptyset}>p_ae\}$ is given by $\widehat{rho}_\e=(C_{\e}-p_ae-C_{\emptyset})/(C_{\e}-C_{\emptyset})$. The SPE for this general case can be obtained analogously to Theorem ref{theorem:SPE}, i.e. comparing the defender's utility of either securing all vulnerable facilities with the threshold effort to fully deter the attack, or choosing a strategy that is identical to that in $\Gamma$. These cases happen when the cost parameters lie in (suitably defined) Type $\widetilde{\mathrm{I}}$ and Type $\widetilde{\mathrm{II}}$ regimes, respectively. The main conclusion of our analysis also holds: the defender obtains a higher utility by proactively defending all vulnerable facilities when the facility-dependent cost parameters lie in type $\widetilde{\mathrm{I}}$ regimes.
item emph{Less than perfect defense in addition to facility-dependent cost parameters.}
Now consider that the defense on each facility is only successful with probability $\gamma in (0,1)$, which is an exogenous technological parameter. For any security effort vector $rho$, the actual probability that a facility $e$ is not compromised when targeted by the attacker is $\gamma rho_e$. Again our results on NE and SPE in Sec. ref{sec:generic_case} -- Sec. ref{sequential_section} can be readily extended to this case. However, the expressions for thresholds for attack probability and security effort level need to be modified.
In particular, for $\Gamma$, in Lemma ref{only_attacked}, the threshold attack probability on any facility $e in \mathcal{E}bar$ is $p_de/\gamma(C_{\e}-C_{\emptyset})$. For $\widetilde{\Gamma}$, the threshold security effort $\widehat{rho}_\e$ for any vulnerable facility $e in \{\mathcal{E}|C_{\e}-C_{\emptyset}>p_de\}$ is $(C_{\e}-p_ae-C_{\emptyset})/\gamma(C_{\e}-C_{\emptyset})$. If this threshold is higher than 1 for a particular facility, then the defender is not able to deter the attack from targeting it.
item emph{Attacker's ability to target multiple facilities.}
If the attacker is not constrained to targeting a single facility, his pure strategy set would be $S_a=2^{\mathcal{E}}$. Then for a pure strategy profile $l_eft(s_d, s_aright)$, the set of compromised facilities is given by $s_a setminus s_d$, and the usage cost $C_{s_a setminus s_d}$.
Unfortunately, our approach cannot be straightforwardly applied to this case. This is because the mixed strategies cannot be equivalently represented as probability vectors with elements representing the probability of each facility being targeted or secured. In fact, for a given attacker's strategy, one can find two feasible defender's mixed strategies that induce an identical security effort vector, but result in different players utilities. Hence, the problem of characterizing defender's equilibrium strategies cannot be reduced to characterizing the equilibrium security effort on each facility.
Instead, one would need to account for the attack/defense probabilities on all the subsets of facilities in $\mathcal{E}$. This problem is beyond the scope of our paper, although a related work \cite{dahan2015network} has made some progress in this regard.
end{enumerate}
Finally, we briefly comment on the model where all the three aspects are included. So long as players' strategy sets are comprised of mixed strategies, the defender's equilibrium utility in $\widetilde{\Gamma}$ must be higher or equal to that in $\Gamma$. This is because in $\widetilde{\Gamma}$, the defender can always choose the same strategy as that in NE to achieve a utility that is no less than that in $\Gamma$. Moreover, one can show the existence of cost parameters such that the defender has strictly higher equilibrium utility in SPE than in NE. In particular, consider that the attacker's cost parameters $l_eft(p_aeright)_{ein\mathcal{E}}$ in this game are such that there is only one vulnerable facility $ebar in \mathcal{E}$ such that $C_{ebar}-C_{\emptyset}>p_{a, ebar}$, and the threshold effort on that facility $\widehat{rho}_{ebar}=l_eft(C_{ebar}-p_{a,ebar}-C_{\emptyset}right)/\gamma(C_{ebar}-C_{\emptyset})<1$. In this case, if the defense cost $p_{d, ebar}$ is sufficiently low, then by proactively securing the facility $ebar$ with the threshold effort $\widehat{rho}_{ebar}$, the defender can deter the attack completely and obtain a strictly higher utility in $\widetilde{\Gamma}$ than that in $\Gamma$. Thus, for such cost parameters, the defender gets the first mover advantage in equilibrium.
subsection{Rational Learning Dynamics}\label{example_dynamic}
We now discuss an approach for analyzing the dynamics of usage cost after a security attack. Recall that the attacker-defender model enables us to evaluate the vulnerability of individual facilities to a strategic attack for the purpose of prioritizing defense investments. One can view this model as a way to determine the set of possible post-attack states, denoted $s in S \deleq \mathcal{E}\cup \{emptyset\}$. In particular, we consider situations in which the distribution of the system state, denoted $theta in Delta(S)$, is determined by an equilibrium of attacker-defender game ($\Gamma$ or $\widetilde{\Gamma}$). In $\Gamma$, for each $s in S$, the probability $theta(s)$ is given as follows:
\begin{align}\label{ps}
theta(s)=l_eft\{
\begin{array}{ll}
sign_awe(e) p_dot (1-rho_\e^{*}), quad & quad text{if $s=e$,}\\
1-sum_{e in \mathcal{E}} theta(e), quad & quad text{if $s=emptyset$.}
end{array}
right.
end{align}
For $\widetilde{\Gamma}$ the probability distribution $theta$ can be analogously defined in terms of $\widetilde{sigma}_awe$ and $tilde{rho}^{*}$.
Let the realized state be $s=e$, i.e., the facility $e in\mathcal{E}$ is compromised by the attacker. If this information is known perfectly to all the users immediately after the attack, they can shift their usage choices in accordance to the new state. Then the cost resulting from the users' choices indeed corresponds to the usage cost $C_s=C_{e}$, which governs the realized payoffs of both attacker and defender. However, from our results (Theorems ref{attacker_strategy} and ref{theorem:SPE}), it is apparent that the support of equilibrium player strategies (and hence the support of $theta$) can be quite large. Due to inherent limitations in perfectly diagnosing the location of attack, in some situations, the users may not have full knowledge of the realized state. Then, the issues of how users with imperfect information make their decisions in a repeated learning setup, and whether or not the long-run usage cost converges to the actual cost $C_{e}$ become relevant.
To contextualize the above issues, consider the situation in which a transportation system is targeted by an external hacker, and that the operation of a single facility is compromised. Furthermore, the nature of attack is such that travelers are not able to immediately know the identity of this facility. This situation can arise when the diagnosis of attack and/or dissemination of information about the attack is imperfect. Examples include cyber-security attacks to transportation facilities that can result in hard-to-detect effects such as compromised traffic signals of a major intersection, or tampering of controllers governing the access to a busy freeway corridor. Then, one can study the problem of learning by rational but imperfectly informed travelers using a repeated routing game model. We now discuss the basic ideas behind the study of this problem. A more rigorous treatment is part of our ongoing work, and will be detailed in a subsequent paper.
Let the stages of our repeated routing game be denoted as $t in T =\{1, 2, \dots\}$. In this game, travelers are imperfectly informed about the network state. In particular, in each stage $tin T$, they maintain a belief about the state $theta^t$. The initial belief $theta^0$ can be different from the prior state distribution $theta$. However, we require that $theta^0$ is absolutely continuous with respect to $theta$ (\cite{kalai1993subjective}):
\[\forall s in S, quad theta(s)>0, quad Rightarrow quad theta^0(s)>0. \]
That is, the initial belief of travelers does not rule out any possible state.
The solution concept we use for this repeated game is textsl{Markov-perfect Equilibrium} (see \cite{maskin2001markov}), in which travelers use routes with the smallest expected cost based on the belief in each stage. Equivalently, the equilibrium routing strategy in stage $t$ is a Wardrop equilibrium of the stage game with belief $theta^t$ (\cite{correa2011wardrop}). We also consider that at the end of each stage, travelers receive noisy information of the realized costs on routes that are taken. However, no information is available for routes that are not chosen by any traveler. Based on the received information, travelers update their belief of the state using Bayes' rule.
We note that numerous learning schemes have been studied in the literature; for e.g. fictitious play (\cite{brown1951iterative}, \cite{fudenberg1995consistency}, and \cite{hofbauer2002global}); reinforcement learning (\cite{beggs2005convergence}, \cite{cominetti2012adaptive} and \cite{cominetti2010payoff}), and regret minimizations (\cite{blum2006routing} and \cite{marden2007regret}). These learning schemes typically assume that strategies in each stage are determined by a certain function of the history payoff or actions. To explain the learning dynamics
in our set-up we consider that in each stage travelers are rational, and they aim to maximize the payoff myopically based on their current belief about other travelers' strategies. The players update their beliefs based on observed actions on the play-path. This so-called rational learning dynamics has been investigated by \cite{battigalli1992learning}, \cite{fudenberg1995learning}, \cite{kalai1993subjective}, and \cite{kalai2015learning}. Our model is different from the ones in literature in that travelers are uncertain about the payoff functions, but correctly anticipate the opponents' strategies. Additionally, the information of the payoff in each stage is noisy and limited (only the realized costs on the taken routes are known).
The game can be understood easily via an example of a transportation network in Fig. ref{three_facility}. In each stage $t$, travelers with inelastic demand $D$ choose route $r_1$ ($e_2-e_1$) or route $r_2$ ($e_3-e_1$). We denote the equilibrium routing strategy in stage $t$ as $q^{t*}(theta^t)=l_eft(q^{t*}_r(theta^t)right)_{r in \{r_1, r_2\}}$, where $q^{t*}_r(theta^t)$ is the demand of travelers using route $r$ given the belief $theta^t$. Hence, aggregate flow on edge $e_2$ (resp. $e_3$) is $w_2^{t*}(theta^t)=q_1^{t*}(theta^t)$ (resp. $w_3^{t*}(theta^t)=q_2^{t*}(theta^t)$), and the aggregate flow on edge $e_1$ is $w_1^{t*}(theta^t)=D$. Each stage game is a congestion game, and hence admits a potential function. The equilibrium routing strategy $q^{t*}(theta^t)$ can be computed efficiently for this game. Moreover, in each stage, the equilibrium is essentially unique in that the equilibrium edge load is unique for a given belief (\cite{sandholm2001potential}).
The realized cost on each edge $e in \mathcal{E}$, denoted $c_e^s(w_e^{t*}(theta^t))$, equals to the cost (shown in Fig. ref{three_facility} for the example network) plus a random variable $epe$:
\begin{align}\label{epe}
c_e^s(q^{t*}(theta^t))=l_eft\{
\begin{array}{ll}
ell_e^\otimes(w_e^{t*}(theta^t))+epe, & quad text{if $s=e$,}\\
ell_e(w_e^{t*}(theta^t))+epe, & quad text{otherwise.}
end{array}
right.
end{align}
We illustrate two cases that can arise in rational learning:
\begin{itemize}
item emph{Long-run usage cost equals to $C_s$ for any $s in \{e_1, e_2, e_3, emptyset\}$.}
Consider the case where the initial belief is $theta(e_1)=1/12$, $theta(e_2)=1/3$, $theta(e_3)=1/12$, $theta(emptyset)=1/2$ (The initial belief can be any probability vector which satisfies the continuity assumption). For any $e in \mathcal{E}$, the random variable $epe$ in eqref{epe} is distributed as $U[-3, 3]$. The total demand $D=10$. Fig. ref{theta_1}--ref{theta_4} show how the belief of each state evolves. We see that eventually travelers learn the true state, and hence the long-run usage cost converges to the actual post-attack usage cost $C_s$, even though initially all travelers are imperfectly informed about the state.
\begin{figure}[htp]
\ell_\entering
\begin{subfigure}[b]{0.40 textwidth}
includegraphics[width=textwidth]{theta_1.pdf}
p_aption{$s=e_1$.}
\label{theta_1}
end{subfigure}
~
\ell_\entering
\begin{subfigure}[b]{0.40textwidth}
includegraphics[width=textwidth]{theta_2.pdf}
p_aption{$s=e_2$.}
\label{theta_2}
end{subfigure}\\
\begin{subfigure}[b]{0.40textwidth}
includegraphics[width=textwidth]{theta_3.pdf}
p_aption{$s=e_3$.}
\label{theta_3}
end{subfigure}
~
\ell_\entering
\begin{subfigure}[b]{0.40textwidth}
includegraphics[width=textwidth]{theta_4.pdf}
p_aption{$s=emptyset$.}
\label{theta_4}
end{subfigure}
p_aption{Rational learning leads to the usage cost of the true state.}
\label{fig:correct_prior_converge}
end{figure}
item emph{Long-run usage cost is higher than $C_s$. }
Consider the case when, as a result of attack on edge $e_2$, the cost function on $e_2$ changes to $ell_2^\otimes(w_2)=7/3w_2+50$. The total demand is $D=5$, and the initial belief is $theta(e_1)=1/12$, $theta(e_2)=1/3$, $theta(e_3)=1/12$, $theta(emptyset)=1/2$. Starting from this initial belief, travelers exclusively take route $r_2$, and hence they do not obtain any information about $e_2$. Even when the realized state is $s=emptyset$, travelers end up repeatedly taking $r_2$ as if $e_2$ is compromised. Thus, the long-run average cost is $C_{e_2}$, which is higher than the cost corresponding to the true state $C_{\emptyset}$. Therefore, rational learning dynamics can lead to long-run inefficiency. We illustrate the equilibrium routing strategies and beliefs in each stage in Fig. ref{q_self_confirm} and Fig. ref{theta_self_confirm} respectively.
\begin{figure}[htp]
\begin{subfigure}[b]{0.40 textwidth}
includegraphics[width=textwidth]{q_self_confirm.pdf}
p_aption{}
\label{q_self_confirm}
end{subfigure}
~
\ell_\entering
\begin{subfigure}[b]{0.40textwidth}
includegraphics[width=textwidth]{theta_self_confirm.pdf}
p_aption{}
\label{theta_self_confirm}
end{subfigure}
p_aption{Learning leads to long-run inefficiency $\mathbf{s}=emptyset$: (a) Equilibrium routing strategies; (b) Beliefs. }
\label{fig:self_confirm}
end{figure}
end{itemize}
These cases illustrate that if the post-attack state is not perfectly known by the users of the system, then the cost experienced by the users depend on the learning dynamics induced by the repeated play of rational users. Particularly, the learning dynamics can induce a higher usage cost in the long-run in comparison to the cost corresponding to the true state. Following previously known results \cite{fudenberg1993steady}, one can argue that if sufficient amount of ``off-equilibrium'' experiments are conducted by travelers, then the learning will converge to Wardrop equilibrium with the true state. However, such experiments are in general not costless.
As a final remark, we note another implication of proactive defense strategy in ranges of attack/ defense cost parameters where the first-mover advantage holds. In particular, when the cost parameters are in the sets $L$ and $M$ as given in eqref{L_set}-eqref{M_set}, the attack is completely deterred in the sequential game $\widetilde{\Gamma}$ and there is no uncertainty in the realized state. In such a situation,
one does not need to consider uncertainty in the travelers' belief about the true state and issue of long-run inefficiency due to learning behavior does not arise.
section*{Acknowledgments}
We are sincerely thankful to Prof. Georges Zaccour and two anonymous referees whose constructive comments helped us to improve our initial manuscript. We thank seminar participants at MIT, HEC Montreal, University of Pennsylvania, and NYU Abu Dhabi for helpful comments. The authors are grateful to Professors Alexandre Bayen, Patrick Jaillet, Karl Johansson, Patrick Loiseau, Samer Madanat, Hani Mahmassani, Asu Ozdaglar, Galina Schwartz, Demos Teneketzis, Rakesh Vohra, Dan Work, Georges Zaccour for insightful comments and discussions in the early phase of this research. This work was supported in part by Singapore-MIT Alliance for Research and Technology (SMART) Center for Future Mobility (FM), NSF grant CNS 1239054, NSF CAREER award CNS 1453126.
newpage
\begin{appendix}
section{Proofs of Section ref{Sec:attack-defend}}\label{appendix_three}
noindentemph{Proof of Lemma ref{lemma:stra_construct}.}
We first show that the strategy in eqref{stra_construct} is feasible. Since $rho_{(1)} l_eq 1$, and for any $i=1, \dots, m-1$, $rho_{(i)}-rho_{(i)}pone>0$, $sign_d(s_d)$ is non-negative for any $s_d in S_d$. Additionally,
\begin{align*}
sum_{s_d in S_d}sign_d(s_d)&=sign_dl_eft(emptysetright)+sum_{i=1}^{m-1} sign_dl_eft(l_eft\{e in \mathcal{E}| rho_e \geq rho_{(i)}right\}right)+sign_dl_eft(l_eft\{e in \mathcal{E}| rho_e \geq rho_{(m)} right\}right)\\
&=l_eft(1-rho_{(1)}right)+sum_{i=1}^{m-1}l_eft(rho_{(i)}-rho_{(i)}poneright)+rho_{(m)}\\
&=1-rho_{(1)}+rho_{(1)}-rho_{(m)}+rho_{(m)}\\
&=1.
end{align*}
Thus, $sign_d$ in eqref{stra_construct} is a feasible strategy of the defender. Now we check that $sign_d$ in eqref{stra_construct} indeed induces $rho$. Consider any $e in \mathcal{E}$ such that $rho_e=0$. Then, since $e notin l_eft\{\mathcal{E}|rho_e \geq rho_{(i)} right\}$ for any $i=1, \dots, m$, and $e notin emptyset$, for any $s_d ni e$, we must have $sign_d(s_d)=0$. Thus, $sum_{s_d ni e} sign_d(s_d)=0=rho_e$. Finally, for any $j=1, \dots, m$, consider any $e in \mathcal{E}$, where $rho_e=rho_{(j)}$:
\begin{align*}
sum_{s_d ni e} sign_d(s_d)=sum_{i=j}^{m} sign_dl_eft(l_eft\{e in \mathcal{E}| rho_e \geq rho_{(i)}right\}right)=rho_{(j)}.
end{align*}
Therefore, $sign_d$ in eqref{stra_construct} induces $rho$.
qed
noindentemph{Proof of Proposition ref{strict_dominated}.}
We prove the result by the principal of iterated dominance. We first show that any $s_d$ such that $s_d nsubseteq \mathcal{E}bar$ is strictly dominated by the strategy $s_dp =s_d p_ap \mathcal{E}bar$. Consider any pure strategy of the attacker, $s_a in \mathcal{E}$, the utilities of the defender with strategy $s_d$ and $s_dp$ are as follows:
\begin{align*}
u_d(s_d, s_a)&=-C(s_d, s_a)-|s_d| p_d=-C(s_d, s_a)-(|s_dp|+|s_d setminus \mathcal{E}bar|) p_d, \\
u_d(s_dp, s_a)&=-C(s_dp, s_a)-|s_dp| p_d.
end{align*}
If $s_a in \mathcal{E}bar$ or $s_a notin s_d$ or $s_a=emptyset$, then $C(s_d, s_a)=C(s_dp, s_a)$, and thus $U_d(s_d, s_a)<U_d(s_dp, s_a)$. If $s_a=e in s_d setminus \mathcal{E}bar$, then $e notin \mathcal{E}bar$, and $C_{\e} l_eq C_{\emptyset}$. We have $C(s_d, s_a)=C_{\emptyset} \geq C_{\e}=C(s_dp, s_a)$, and thus $U_d(s_dp, s_a) \geq -C(s_d, s_a)-|s_dp| p_d> U_d(s_d, s_a)$. Therefore, any $s_d$ such that $s_d nsubseteq \mathcal{E}bar$ is a strictly dominated strategy. Hence, in $\Gamma$, any equilibrium strategy of the defender satisfies $sign_dwe(s_d)=0$. From eqref{eq:ped}, we know that $rho_\e^{*}=0$ for any $e in \mathcal{E}setminus \mathcal{E}bar$.
We denote the set of defender's pure strategies that are not strictly dominated as $S_dbar=\{s_d | s_d subseteq \mathcal{E}bar\}$. Consider any $s_d in S_dbar$, we show that any $s_a in \mathcal{E}setminus \mathcal{E}bar$ is strictly dominated by strategy $emptyset$. The utility functions of the attacker with strategy $s_a$ and $emptyset$ are as follows:
\begin{align*}
u_a(s_d, s_a)&=C(s_d, s_a)-p_a,\\
u_a(s_d, emptyset)&=C(s_d, emptyset).
end{align*}
Since $s_d subseteq \mathcal{E}bar$ and $s_a in \mathcal{E}setminus \mathcal{E}bar$, $s_a notin s_d$, thus $C(s_d, s_a)=C_{s_a}l_eq C_{\emptyset}$. However, $C(s_d, emptyset) = C_{\emptyset}$ and $p_a >0$. Therefore, $U_a(s_d, emptyset)> U_a(s_d, s_a)$. Hence, any $s_a in \mathcal{E}setminus \mathcal{E}bar$ is strictly dominated. Hence, in equilibrium, the probability of the attacker choosing facility $e in \mathcal{E}setminus \mathcal{E}bar$ is 0 in $\Gamma$.
We can analogously argue that in $\widetilde{\Gamma}$, $tilde{rho}^{*}_\e=0$ and $\widetilde{sigma}_awe(e, thetass)=0$ for any $e in \mathcal{E}setminus \mathcal{E}bar$.
qed
section{Proofs of Section ref{sec:generic_case}}\label{appendix_NE_proof}
noindentemph{Proof of Lemma ref{zero_sum}.}
The utility functions of the attacker with strategy $sign_a$ in $\Gamma^0$ and $\Gamma$ are related as follows:
\begin{align*}
U_azero(sign_d, sign_a) =U_a(sign_d, sign_a)+\mathbb{E}_{sign_d}l_eft[|s_d|right]p_dot p_d.
end{align*}
Thus, for a given $sign_d$, any $sign_a$ that maximizes $U_azero(sign_d, sign_a)$ also maximizes $U_a(sign_d, sign_a)$. So the set of best response strategies of the attacker in $\Gamma^0$ is identical to that in $\Gamma$. Analogously, given any $sign_a$, the set of best response strategies of the defender in $\Gamma$ is identical to that in $\Gamma^0$. Thus, $\Gamma^0$ and $\Gamma$ are strategically equivalent, i.e. they have the same set of equilibrium strategy profiles. Using the interchangeability property of equilibria in zero-sum games, we directly
obtain that for any $sign_dwe in Sigma^{*}_d$ and any $sign_awe in Sigma^{*}_a$, $(sign_dwe, sign_awe)$ is an equilibrium strategy profile. qed
noindentemph{Proof of Proposition ref{opt_eq}.}
From Lemma ref{zero_sum}, the set of attacker's equilibrium strategies $Sigma^{*}_awe$ is the optimal solution of the following maximin problem:
\begin{subequations}\label{maxmin}
\begin{align}
\max_{sign_a} quad &\min_{s_d in S_d} l_eft\{sum_{e in \mathcal{E}bar} l_eft(C(s_d, e)+|s_d| p_d-p_aright)p_dot sign_a(e)+ l_eft(C(s_d, emptyset) +|s_d| p_dright) p_dot sign_a(emptyset) right\}notag \\
s.t. quad &sum_{e in \mathcal{E}bar} sign_a(e)+ sign_a(emptyset)=1, \label{sum_sig}\\
&sign_a(emptyset) \geq 0, quad sign_a(e) \geq 0, quad \forall e in \mathcal{E}bar. \label{eq:non-negative}
end{align}
end{subequations}
Given any $s_d in S_d$, we can express the objective fucntion in eqref{maxmin} as follows:
\begin{align*}
&sum_{e in \mathcal{E}bar} l_eft(C(s_d, e)+|s_d| p_d-p_aright)p_dot sign_a(e)+ l_eft(C(s_d, emptyset) +|s_d| p_dright) p_dot sign_a(emptyset) \\
=&sum_{e in \mathcal{E}bar} l_eft(C(s_d, e)-p_aright)p_dot sign_a(e)+ C(s_d, emptyset) sign_a(emptyset) + |s_d| p_d p_dot l_eft(sum_{e in \mathcal{E}}sign_a(e)+sign_a(emptyset)right)\\
stackrel{eqref{sum_sig}}{=}&sum_{e in \mathcal{E}bar}sign_a(e)p_dot l_eft( C(s_d, e)-p_aright)+ |s_d| p_d+ sign_a(emptyset) p_dot C_{\emptyset}\\
=&sum_{e in \mathcal{E}bar}sign_a(e) p_dot l_eft(C(s_d, e)-p_aright)+p_d p_dot l_eft(sum_{e in \mathcal{E}bar} \mathbbm{1}\{s_d ni e\}right)+ sign_a(emptyset) p_dot C_{\emptyset} \\
=&sum_{e in \mathcal{E}bar}l_eft(sign_a(e) p_dot l_eft(C(s_d, e)-p_aright)+p_d p_dot \mathbbm{1}\{s_d ni e\}right)+sign_a(emptyset) p_dot C_{\emptyset} \\
stackrel{eqref{Ceq}}{=}&sum_{e in s_d} l_eft(sign_a(e)p_dot l_eft(C_{\emptyset}-p_aright)+p_dright)+sum_{e in \mathcal{E}bar setminus s_d} sign_a(e) p_dot l_eft(C_{\e}-p_aright)+ sign_a(emptyset) p_dot C_{\emptyset}.
end{align*}
Therefore, we can write:
\begin{align*}
&\min_{s_d in S_d}l_eft\{sum_{e in \mathcal{E}bar} l_eft(C(s_d, e)+|s_d| p_d-p_aright)p_dot sign_a(e)+ l_eft(C(s_d, emptyset) +|s_d| p_dright) p_dot sign_a(emptyset) right\} \\
=&\min_{s_d in S_d}l_eft\{ sum_{e in s_d} l_eft(sign_a(e)p_dot l_eft(C_{\emptyset}-p_aright)+p_dright)+sum_{e in \mathcal{E}bar setminus s_d} sign_a(e) p_dot l_eft(C_{\e}-p_aright)+ sign_a(emptyset) p_dot C_{\emptyset} right\}\\
=&sum_{e in \mathcal{E}bar} \min l_eft\{sign_a(e)p_dot l_eft( C_{\emptyset}-p_aright)+p_d,~ sign_a(e) p_dot l_eft(C_{\e}-p_aright)right\}+sign_a(emptyset) p_dot C_{\emptyset} \\
=&V(sign_a).
end{align*}
Thus eqref{maxmin} is equivalent to eqref{maxmin_min}, and $Sigma^{*}_awe$ is the optimal solution set of eqref{maxmin_min}
By introducing an $|\mathcal{E}bar|$-dimensional variable $v=l_eft(v_eright)_{e in \mathcal{E}bar}$, eqref{maxmin_min} can be changed to a linear optimization program eqref{linear_maxmin}, and $Sigma^{*}_awe$ is the optimal solution set of eqref{linear_maxmin}. qed
noindentemph{Proof of Lemma ref{only_attacked}.}
We first argue that the defender's best response is in eqref{best_response_normal}. For edge $e in \mathcal{E}$ such that $sign_a(e)<\frac{p_d}{C_{\e}-C_{\emptyset}}$, we have $l_eft(C_{\emptyset}-C_{\e}right)sign_a(e)+p_d>0$. Since $rho in BR(sign_a)$ maximizes $U_d(sign_d, sign_a)$ as given in eqref{Ud_rewrite}, $rho_e$ must be 0. Additionally, Proposition ref{strict_dominated} ensures that for any $e in \mathcal{E} setminus \mathcal{E}bar$, $rho_e$ is 0.
Analogously, if $sign_a(e)>\frac{p_d}{C_{\e}-C_{\emptyset}}$, then $l_eft(C_{\emptyset}-C_{\e}right)sign_a(e)+p_d<0$, and the best response $rho_e=1$. Finally, if $sign_a(e)=\frac{p_d}{C_{\e}-C_{\emptyset}}$, any $rho_e in [0, 1]$ can be a best response.
We next prove eqref{upper_bound}.
We show that if a feasible $sign_a$ violates eqref{sub:upper_bound}, i.e., there exists a facility, denoted $ebarnew in \mathcal{E}bar$ such that $sign_a(ebarnew) > \frac{p_d}{C_{ebarnew}-C_{\emptyset}}$, then $sign_a$ cannot be an equilibrium strategy. There are two cases:
\begin{enumerate}[label=(\alph*)]
item There exists another facility $ebarp in \mathcal{E}bar$ such that $sign_a(ebarp) <\frac{p_d}{C_{ebarp}-C_{\emptyset}}$. Consider an attacker's strategy $sign_ap$ defined as follows:
\begin{align*}
sign_ap(e)&=sign_a(e), quad \forall e in \mathcal{E}bar setminus \{ebarnew, ebarp\}, quad sign_ap(emptyset)=sign_a(emptyset),\\
sign_ap(ebarnew)&=sign_a(ebarnew)-epsilon, \\
sign_ap(ebarp)&=sign_a(ebarp)+epsilon,
end{align*}
where $epsilon$ is a sufficiently small positive number so that $sign_ap(ebarnew) > \frac{p_d}{C_{ebarnew}-C_{\emptyset}}$ and $sign_ap(ebarp) <\frac{p_d}{C_{ebarp}-C_{\emptyset}}$. We obtain:
\begin{align*}
V(sign_ap)-V(sign_a)stackrel{}{=}epsilon l_eft(C_{ebarp}-C_{\emptyset}right)>0
end{align*}
The last inequality holds from eqref{Ebar} and $ebarp in \mathcal{E}bar$. Therefore, $sign_a$ cannot be an attacker's equilibrium strategy.
item If there does not exist such $ebarnew$ as defined in case (a), then for any $e in \mathcal{E}bar$, we have $sign_a(e) \geq \frac{p_d}{C_{e}-C_{\emptyset}}$. Now consider $sign_ap$ as follows:
\begin{align*}
sign_ap(e)&=sign_a(e), quad \forall e in \mathcal{E} setminus \{ebarnew\}, \\%quad sign_ap(emptyset)=sign_a(emptyset),\\
sign_ap(ebarnew)&=sign_a(ebarnew)-epsilon, \\
sign_ap(emptyset)&=sign_a(emptyset)+epsilon,
end{align*}
where $epsilon$ is a sufficiently small positive number so that $sign_ap(ebarnew) > \frac{p_d}{C_{ebarnew}-C_{\emptyset}}$. We obtain:
\begin{align*}
V(sign_ap)-V(sign_a)stackrel{}{=}epsilon l_eft(C_{\emptyset}-l_eft(C_{\emptyset}-p_aright)right)= epsilon p_a>0.
end{align*}
Therefore, $sign_a$ also cannot be an attacker's equilibrium strategy.
end{enumerate}
Thus, we can conclude from cases (a) and (b) that in equilibrium $sign_awe$ must satisfy eqref{sub:upper_bound}. Additionally, from Proposition ref{strict_dominated}, eqref{zero_out} is also satisfied. qed
noindentemph{Proof of Theorem ref{attacker_strategy}.}
We first prove the attacker's equilibrium strategies in each regime. From Proposition ref{opt_eq} and Lemma ref{only_attacked}, we know that $sign_awe$ maximizes $V(sign_a)$, which can be equivalently re-written as in eqref{re-express-V}. We analyze the attacker's equilibrium strategy set in each regime subsequently:
\begin{enumerate}[label=(\alph*)]
item Type I regimes $\Lambda^i$:
\begin{itemize}
item $i=0$:\\
Since $p_a > C_{(1)}-C_{\emptyset}$, we must have $C_{\emptyset} > C_{\e}-p_a$ for any $e in \mathcal{E}bar$. There is no vulnerable facility, and thus $sign_awe(emptyset)=1$.
item $i=1, \dots, \mathcal{E}barp$:\\
Since $p_d$ satisfies eqref{regimei_notlast} or eqref{regimei_last}, we obtain:
\begin{align}\label{sum_smaller_1}
sum_{e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}}\frac{p_d}{C_{\e}-C_{\emptyset}}=sum_{k=1}^{i} \frac{p_d p_dot E_{(k)}}{C_{(k)}-C_{\emptyset}} < 1
end{align}
Therefore, the set of feasible attack strategies satisfying eqref{multiple_bound}-eqref{6d} is a non-empty set. We also know from Lemma ref{only_attacked} that $sign_awe$ satisfies eqref{sub:upper_bound}. Again from eqref{regimei_notlast} or eqref{regimei_last}, for any $k=1, \dots, i$, we have $C_{(k)}-p_a > C_{\emptyset}$ and for any $k=i+1, \dots, \mathcal{E}barp$, we have $C_{(k)}-p_a < C_{\emptyset}$. Since $\{C_{(k)}\}_{k=1}^K$ satisfy eqref{order}, to maximize $V(sign_a)$ in eqref{re-express-V}, the optimal solution must satisfy eqref{multiple_bound}-eqref{6d}.
end{itemize}
item Type II regimes $\Lambda_j$:
\begin{itemize}
item $j=1$: From eqref{regime_j_1}, we know that:
\begin{align}\label{larger_one}
1=sum_{e in \mathcal{E}bar_{(1)}} sign_awe(e)< \frac{p_d E_{(1)}}{C_{(1)}-C_{\emptyset}}.
end{align}
Thus, the set of feasible attack strategies satisfying eqref{sub:upper_one}-eqref{sum_j_one} is a non-empty set. Additionally, from Lemma ref{only_attacked}, we know that $sign_awe$ satisfies eqref{sub:upper_one}. Since $C_{(1)}>C_{(k)}$ for any $k=2, \dots, \mathcal{E}barp$, and $C_{(1)}-p_a>C_{\emptyset}$. From eqref{re-express-V} and eqref{larger_one}, we know that in equilibrium the attacker targets facilities in $\mathcal{E}bar_{(1)}$ with probability 1. The set of strategies satisfying eqref{sub:upper_one}-eqref{sum_j_one} maximizes eqref{re-express-V}, and thus is the set of attacker's equilibrium strategies.
item $j=2, \dots, \mathcal{E}barp$: From eqref{regime_j_rest}, we know that:
\begin{align*}
0 < 1-sum_{k=1}^{j-1}\frac{p_d p_dot E_{(k)}}{C_{(k)}-C_{\emptyset}} < \frac{p_d p_dot E_{(j)}}{C_{(j)}-C_{\emptyset}}.
end{align*}
Thus, the set of feasible attack strategies satisfying eqref{regime_k_1}-eqref{regime_k_3} is a non-empty set. From Lemma ref{only_attacked}, we know that $sign_awe$ satifies eqref{regime_k_2}. Since $\{C_{(k)}\}_{k=1, \dots, j}$ satisfies the ordering in eqref{order}, in order to maximize $V(sign_a)$ in eqref{re-express-V}, $sign_awe$ must also satisfy eqref{regime_k_1} and eqref{regime_k_3}, and the remaining facilities are not targeted.
end{itemize}
end{enumerate}
We next prove the defender's equilibrium security effort. By definition of Nash equilibrium, the probability vector $rho^{*}$ is induced by an equilibrium strategy if and only if it satisfies the following two conditions:
\begin{enumerate}
item $rho^{*}$ is a best response to any $sign_awe in Sigma^{*}_awe$.
item Any attacker's equilibrium strategy is a best response to $rho^{*}$, i.e. the attacker has identical utilities for choosing any pure strategy in his equilibrium support set, and the utility is no less than that of any other pure strategies.
end{enumerate}
Note that in both conditions, we require $rho^{*}$ to be a best response to emph{any} attacker's equilibrium strategy. This is because given any $sign_awe in Sigma^{*}_awe$, $l_eft(rho^{*}, sign_aweright)$ is an equilibrium strategy profile (Lemma ref{zero_sum}).
We now check these conditions in each regime:
\begin{enumerate}[label=(\alph*)]
item Type I regimes $\Lambda^i$:
\begin{itemize}
item If $i=0$:\\
Since $sign_awe(e)=0$ for any $e in \mathcal{E}$. From Lemma ref{only_attacked}, the best response of the defender is $rho_\e^{*}=0$ for any $e in \mathcal{E}$.
item If $i=1, \dots, \mathcal{E}barp$:\\
From Lemma ref{only_attacked}, we know that $rho_\e^{*}=0$ for any $e in \mathcal{E} setminus l_eft(\cup_{k=1}^{i} \mathcal{E}bar_{(k)}right)$. Since $sign_awe(emptyset)>0$, $rho_\e^{*}$ must ensure that the attacker's utility of choosing any facility $e in \cup_{k=1}^{i}\mathcal{E}bar_{(k)}$ is identical to that of choosing no attack $emptyset$. Consider any $e in \cup_{k=1}^{i}\mathcal{E}bar_{(k)}$:
\begin{alignat*}{2}
&&U_a(rho^{*}, e)&=U_a(rho^{*}, emptyset),\\
stackrel{eqref{Ua_rewrite}}{Rightarrow} quad &&rho_\e^{*} l_eft(C_{\emptyset}-p_aright)+(1-rho_\e^{*}) l_eft(C_{\e}-p_aright)&=C_{\emptyset},\\% quad \forall e in \mathcal{E}bar, \\
Rightarrow quad &&rho_\e^{*}&=\frac{C_{\e}-p_a-C_{\emptyset}}{C_{\e}-C_{\emptyset}}, quad \forall e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}.
end{alignat*}
For any $ebarnew in \mathcal{E} setminus l_eft(\cup_{k=1}^{i}\mathcal{E}bar_{(k)}right)$, since $rho^{*}_{ebarnew}=0$, the attacker receives utility $C_{ebarnew}-p_a$ by targeting $ebarnew$, which is lower than $C_{\emptyset}$. Therefore, $rho^{*}$ in eqref{regime_last_sub}-eqref{regime_last_zero} satisfies both conditions (1) and (2). $rho^{*}$ is the unique equilibrium strategy.
end{itemize}
item Type II regimes $\Lambda_j$:
\begin{itemize}
item If $j=0$:\\
Consider an attacker's strategy $sign_a$ such that:
\begin{align*}
sign_a(e)&=\frac{1}{E_{(1)}}, quad \forall e in \mathcal{E}bar_{(1)}, \\
sign_a(e)&=0, quad \forall e in \mathcal{E} setminus \mathcal{E}bar_{(1)}.
end{align*}
Since $p_d$ satisfies eqref{regime_j_1}, we know that $\frac{1}{E_{(1)}}< \frac{p_d}{C_{(1)}-C_{\emptyset}}$. One can check that $sign_a$ satisfies eqref{sub:upper_one}-eqref{sum_j_one}, and thus $sign_a in Sigma^{*}_awe$. Therefore, we know from Lemma ref{only_attacked} that $rho_\e^{*}=0$ for any $e in \mathcal{E}$.
item If $j=1, \dots, \mathcal{E}barp$:\\
Analogous to our discussion for $j=0$, the following is an equilibrium strategy of the attacker:
\begin{align*}
sign_awe(e)&=\frac{p_d}{C_{\e}-C_{\emptyset}}, quad \forall e in \cup_{k=1}^{j-1}\mathcal{E}bar_{(k)}, \\
sign_awe(e)&=\frac{1}{E_{(j)}} l_eft(1- sum_{i=1}^{j-1} \frac{p_d E_{(k)}}{C_{(k)}-C_{\emptyset}}right), quad \forall e in \mathcal{E}bar_{(j)}, \\
sign_awe(e)&=0, quad \forall e in \mathcal{E} setminus l_eft(\cup_{k=1}^{j} \mathcal{E}bar_{(k)}right).
end{align*}
From Lemma ref{only_attacked}, we immediately obtain that $rho_\e^{*}=0$ for any $e in \mathcal{E} setminus l_eft(\cup_{k=1}^{j-1} \mathcal{E}bar_{(k)}right)$.
Furthermore, for any $e in \cup_{k=1}^{j-1}\mathcal{E}bar_{(k)}$, the utility of the attacker in choosing $e$ must be the same as the utility for choosing any facility in $\mathcal{E}bar_{(j)}$, which is $C_{(j)}-p_a$. Therefore, for any $e in \cup_{k=1}^{j-1}\mathcal{E}bar_{(k)}$, $rho^{*}$ satisfies:
\begin{alignat*}{3}
&& &&U_a(rho^{*}, e)&=C_{(j)}-p_a, \\
&&stackrel{eqref{Ua_rewrite}}{Rightarrow} quad &&rho_\e^{*} l_eft(C_{\emptyset}-p_aright)+(1-rho_\e^{*}) l_eft(C_{(k)}-p_aright)&=C_{(j)}-p_a, \\%quad \forall e in \mathcal{E}bari, quad \forall i=1, \dots, k-1, \\
&&stackrel{text{ }}{Rightarrow} quad &&rho_\e^{*}&=\frac{C_{(k)}-C_{(j)}}{C_{(k)}-C_{\emptyset}}.
end{alignat*}
Additionally, for any $e in \mathcal{E} setminus l_eft(\cup_{k=1}^{j} \mathcal{E}bar_{(k)}right)$, the utility for the attacker targeting $e$ is $C_{\e}-p_a$, which is smaller than $C_{(j)}-p_a$. Thus, both condition (1) and (2) are satisfied. $rho^{*}$ is the unique equilibrium security effort.
end{itemize}
end{enumerate}
section{Proofs of Section ref{sequential_section}}\label{proof_sequential}
noindentemph{Proof of Lemma ref{best_response_sequential}.}
For any non-vulnerable facility $e$, the best response strategy $\widetilde{sigma}_a$ must be such that $\widetilde{sigma}_a(e, thetass)=0$ for any $thetass$.
Now consider any $e in \{\mathcal{E}| C_{\e}-p_a>C_{\emptyset}\}$. If $tilde{rho}_\e > \widehat{rho}_\e$, then we can write:
\begin{align}\label{emptyset_dominated}
U_a(thetass, e)=tilde{rho}_\e C_{\emptyset}+(1-tilde{rho}_\e) C_{\e}- p_a < C_{\emptyset}=U_a(thetass, emptyset).
end{align}
That is, the attacker's expected utility of targeting the facility $e$ is less than the expected utility of no attack. Thus, in any attacker's best response, $\widetilde{sigma}_a(e, thetass)=0$ for any such facility $e$. Additionally, if $tilde{rho}_\e = \widehat{rho}_\e$, then $U_a(e, thetass)=U_a(emptyset, thetass)$, i.e. the utility of targeting such facility is identical with the utility of choosing no attack, and is higher than that of any other pure strategies. Hence, the set of best response strategies of the attacker is $Delta(\mathcal{E}rho \cup\{emptyset\})$, where $\mathcal{E}rho$ is the set defined in eqref{Erho}.
Otherwise, if there exists a facility $e in \{\mathcal{E}| C_{\e}-p_a>C_{\emptyset}\}$ such that $tilde{rho}_\e <\widehat{rho}_\e$, then we obtain:
\[U_a(thetass, e)=tilde{rho}_\e C_{\emptyset}+(1-tilde{rho}_\e) C_{\e}- p_a > C_{\emptyset}=U_a(thetass, emptyset).\]
Thus, no attack cannot be chosen in any best response strategy, which implies that the attacker chooses to attack with probability 1. Finally, $\mathcal{E}max$ is the set of facilities which incur the highest expected utility for the attacker given $thetass$, thus $BR(thetass) = Delta(\mathcal{E}max)$. qed
noindentemph{Proof of Lemma ref{zero_or_one}.}
We first prove that the total attack probability is either 0 or 1 in any SPE. We discuss the following three cases separately:
\begin{itemize}
item There exists at least one single facility $e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}$ such that $tilde{rho}^{*}_\e<\widehat{rho}_\e$.\\
Since $\widetilde{sigma}_awe(tilde{rho}^{*}) in BR(tilde{rho}^{*})$, from Lemma ref{best_response_sequential}, we know that $sum_{e in \mathcal{E}bar} \widetilde{sigma}_awe(e, tilde{rho}^{*})=1$.
item For all $e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e> \widehat{rho}_\e$, i.e. the set $\mathcal{E}rho$ in eqref{Erho} is empty. \\
Since $\widetilde{sigma}_awe(tilde{rho}^{*}) in BR(tilde{rho}^{*})$, from Lemma ref{best_response_sequential}, we know that no edge is targeted in SPE, i.e. $sum_{e in \mathcal{E}bar} \widetilde{sigma}_awe(e, tilde{rho}^{*})=0$.
item For all $e in \{\mathcal{E}bar | C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e\geq \widehat{rho}_\e$, and the set $\mathcal{E}rho$ in eqref{Erho} is non-empty. \\
For the sake of contradiction, we assume that in SPE, there exists a facility $e in \mathcal{E}rho$ such that $\widetilde{sigma}_awe(e, tilde{rho}^{*})>0$, i.e. $\widetilde{sigma}_awe(emptyset, tilde{rho}^{*})<1$. Then, we can write $U_d(tilde{rho}^{*}, \widetilde{sigma}_awe(tilde{rho}^{*}))$ as follows:
\begin{align}\label{Ud_indifferent}
U_d(tilde{rho}^{*}, \widetilde{sigma}_awe(tilde{rho}^{*}))=-C_{\emptyset}-(1-\widetilde{sigma}_awe(emptyset, tilde{rho}^{*})) p_a -l_eft(sum_{e in \mathcal{E}bar} tilde{rho}^{*}_\eright) p_d.
end{align}
Now, consider $thetass'$ as follows:
\begin{alignat*}{2}
tilde{rho}_e^{'}&=tilde{rho}^{*}_\e+epsilon> \widehat{rho}_\e, &&quad \forall e in \mathcal{E}rho, \\
tilde{rho}_e^{'}&=tilde{rho}^{*}_\e=0, &&quad \forall e in \mathcal{E} setminus \mathcal{E}rho,
end{alignat*}
where $epsilon$ is a sufficiently small positive number. Given such a $thetass'$, we know from Lemma ref{best_response_sequential} that the unique best response is $\widetilde{sigma}_a(emptyset, thetass')=1$. Therefore, the defender's utility is given by:
\begin{align*}
U_d(thetass', \widetilde{sigma}_a(thetass'))&=-C_{\emptyset} - l_eft(sum_{e in \mathcal{E}} tilde{rho}_e^{'}right) p_d.
end{align*}
Additionally,
\begin{align*}
U_d(thetass', \widetilde{sigma}_a(thetass'))-U_d(tilde{rho}^{*}, \widetilde{sigma}_a(tilde{rho}^{*})) = (1-\widetilde{sigma}_a(emptyset, tilde{rho}^{*})) p_a -epsilon p_d |\mathcal{E}rho|.
end{align*}
Since $epsilon$ is sufficiently small and $\widetilde{sigma}_a(emptyset, tilde{rho}^{*})<1$, we obtain that $U_d(thetass', \widetilde{sigma}_a(thetass'))> U_d(tilde{rho}^{*}, \widetilde{sigma}_a(tilde{rho}^{*}))$. Therefore, $tilde{rho}^{*}$ cannot be a SPE. We can conclude that in this case, the attacker chooses not to attack with probability 1.
end{itemize}
We next show that in any SPE, the defender's security effort on each vulnerable facility $e$ is no higher than the threshold $\widehat{rho}_\e$ defined in eqref{pebar}. Assume for the sake of contradiction that there exists a facility $ebar in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$ such that $thetass_{\bar{\e}} >\widehat{rho}_\ebar$.
We discuss the following two cases separately:\\
\begin{itemize}
item The set $ebarp in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}, tilde{rho}_\e<\widehat{rho}_\e\}$ is non-empty. We know from Lemma ref{best_response_sequential} that $BR(thetass)=Delta(\mathcal{E}max)$, where the set $\mathcal{E}max$ in eqref{Emax} is the set of facilities which incur the highest utility for the attacker. Clearly, $\mathcal{E}max subseteq \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}, tilde{rho}_\e<\widehat{rho}_\e\}$, and hence $ebar notin \mathcal{E}max$.
We consider $thetass'$ such that $thetass_{\bar{\e}}p=thetass_{\bar{\e}}-epsilon$, where $epsilon$ is a sufficiently small positive number, and $tilde{rho}_e^{'}=tilde{rho}_\e$ for any other facilities.
Then $thetass_{\bar{\e}}p> \widehat{rho}_\ebar$ still holds, and the set $\mathcal{E}max$ does not change. The attacker's best response strategy remains to be $BR(thetass')=Delta(\mathcal{E}max)$. Hence, the utility of the defender given $thetass'$ increases by $epsilon p_d$ compared to that given $thetass$, because the expected usage cost $\mathbb{E}_{sigma}[C]$ does not change, but the expected defense cost decreases by $epsilon p_d$.
Thus, such $thetass$ cannot be the defender's equilibrium effort.
item For all $e in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}_\e \geq \widehat{rho}_\e$. We have already argued that $\widetilde{sigma}_awe(emptyset, thetass)=1$ in this case. Since the defense cost $p_d>0$, if there exists any $e$ such that $tilde{rho}_\e>\widehat{rho}_\e$, then by decreasing the security effort on $e$, the utility of the defender increases. Therefore, such $thetass$ cannot be an equilibrium strategy of the defender.
end{itemize}
From both cases, we can conclude that for any $e in \{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$, $tilde{rho}^{*}_\e l_eq \widehat{rho}_\e$
Finally, any non-vulnerable facilities $e in \mathcal{E} setminus \{\mathcal{E}|C_{\e}-p_a>C_{\emptyset}\}$ will not be targeted, hence we must have $tilde{rho}^{*}_\e=0$.
qed
noindentemph{Proof of Lemma ref{comparison_lemma}.}
We first show that the threshold $p_dtil(p_a)$ as given in eqref{cdtil} is a well-defined function of $p_a$. Given any $0l_eq p_a < C_{(1)}-C_{\emptyset}$, there is a unique $i in \{1, \dots, K\}$ such that $C_{(i+1)}-C_{\emptyset} l_eq p_a< C_{(i)}-C_{\emptyset}$. Now, we need to show that there is a unique $j in \{1, \dots, i\}$ such that $\frac{sum_{k=j+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq p_a<\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$ (or $0l_eq p_a<\frac{E_{(i)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$ if $j=i$). Note that functions $\{p_dij\}_{j=1}^{i}$ are defined on the range $l_eft[0, ~\frac{sum_{k=1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right]$. Since $\{C_{(k)}\}_{k=1}^{i}$ satisfies eqref{order}, we have:
\begin{align*}
\frac{sum_{k=1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}} \geq \frac{sum_{k=1}^{i}E_{(k)}}{\frac{1}{C_{(i)}-C_{\emptyset}} sum_{k=1}^{i} E_{(k)}}=C_{(i)}-C_{\emptyset}.
end{align*}
Hence, for any $C_{(i+1)}-C_{\emptyset} l_eq p_a< C_{(i)}-C_{\emptyset}$, the value $p_dtil(p_a)$ is defined as $p_dij(p_a)$ for a unique $j in \{1, \dots, i\}$. Therefore, we can conclude that for any $0 l_eq p_a< C_{(1)}-C_{\emptyset}$, $p_dtil(p_a)$ is a well-defined function.
We next show that $p_dtil(p_a)$ is continuous and strictly increasing in $p_a$. Since for any $i=1, \dots, K$, and any $j=1, \dots, i$, the function $p_dij(p_a)$ is continuous and strictly increasing in $p_a$, $p_dtil(p_a)$ must be piecewise continuous and strictly increasing in $p_a$. It remains to be shown that $p_dtil(p_a)$ is continuous at $p_a in l_eft\{C_{(i)}-C_{\emptyset}right\}_{i=2}^{K} \cup l_eft\{\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right\}_{j=1, \dots, i, i=1, \dots K}$.
We now show that for any $i=2, \dots, K$, $p_dtil(p_a)$ is continuous at $C_{(i)}-C_{\emptyset}$. Consider $p_a=C_{(i)}-C_{\emptyset}-epsilon$ where $epsilon$ is a sufficiently small positive number. There is a unique $\hat{j} in \{1, \dots, i\}$ such that $p_dtil(p_a)=p_d^{i\hat{j}}(p_a)$. We want to argue that $\hat{j} neq i$:
\begin{alignat*}{2}
&&p_a p_dot l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)&=l_eft(C_{(i)}-C_{\emptyset}-epsilonright) p_dot l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)\\
&& &= E_{(i)}+sum_{k=1}^{i-1}\frac{l_eft(C_{(i)}-C_{\emptyset}right) E_{(k)}}{C_{(k)}-C_{\emptyset}}-epsilon l_eft(sum_{k=1}^{i}\frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)> E_{(i)}, \\
Rightarrow &&quad p_a&=C_{(i)}-C_{\emptyset}-epsilon > \frac{E_{(i)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}
end{alignat*}
Thus, $\hat{j} in \{1, \dots, i-1\}$, and from eqref{cdtil}, $\frac{sum_{k=\hat{j}+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq C_{(i)}-C_{\emptyset}-epsilon <\frac{sum_{k=\hat{j}}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$. Since $epsilon$ is a sufficiently small positive number, we have:
\begin{alignat*}{2}
&&sum_{k=\hat{j}+1}^{i}E_{(k)} &l_eq l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right) p_dot l_eft(C_{(i)}-C_{\emptyset}-epsilonright)\\
&& &=E_{(i)}+sum_{k=1}^{i-1}\frac{l_eft(C_{(i)}-C_{\emptyset}right) E_{(k)}}{C_{(k)}-C_{\emptyset}}-epsilon l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)\\
Rightarrow && quad sum_{k=\hat{j}+1}^{i-1}E_{(k)} &l_eq sum_{k=1}^{i-1}\frac{l_eft(C_{(i)}-C_{\emptyset}right) E_{(k)}}{C_{(k)}-C_{\emptyset}}+ epsilon l_eft(sum_{k=1}^{i-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)\\
Rightarrow && quad \frac{sum_{k=\hat{j}+1}^{i-1}E_{(k)}}{sum_{k=1}^{i-1}\frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}} &l_eq C_{(i)}-C_{\emptyset}+epsilon.
end{alignat*}
Analogously, we can check that $ C_{(i)}-C_{\emptyset}+epsilon<\frac{sum_{k=\hat{j}}^{i-1}E_{(k)}}{sum_{k=1}^{i-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$. Hence, from eqref{cdtil}, when $p_a=C_{(i)}-C_{\emptyset}+epsilon$, we have $p_dtil(p_a)=p_d^{i-1\hat{j}}(p_a)$. Then,
\begin{align*}
\lim_{p_a to l_eft(C_{(i)}-C_{\emptyset}right)^{-}}p_dtil(p_a)&=\lim_{epsilon to 0} p_d^{i\hat{j}}(C_{(i)}-C_{\emptyset}-epsilon)\\
&stackrel{eqref{cdij}}{=}\frac{C_{(\hat{j})}-C_{\emptyset}}{l_eft(C_{(\hat{j})}-C_{\emptyset}right) p_dot l_eft(sum_{k=1}^{\hat{j}-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right) + sum_{k=\hat{j}}^{i-1} E_{(k)}-sum_{k=1}^{i-1} \frac{p_a E_{(k)}}{C_{(k)}-C_{\emptyset}}}\\
&=\lim_{epsilon to 0} p_d^{i-1\hat{j}}(C_{(i)}-C_{\emptyset}+epsilon)=\lim_{p_a to l_eft(C_{(i)}-C_{\emptyset}right)^{+}}p_dtil(p_a).
end{align*}
Thus, $p_dtil(p_a)$ is continuous at $C_{(i)}-C_{\emptyset}$ for any $i=2, \dots, K$.
For any $i=1, \dots, K$, we next show that $p_dtil(p_a)$ is continuous
at $p_a=\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$ for $j=1, \dots, i$:
\begin{align*}
\lim_{p_a to l_eft(\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)^{-}}p_dtil(p_a)&=p_dijl_eft(\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)=l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}\\
&=p_d^{i(j-1)}l_eft(\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)=\lim_{p_a to l_eft(\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)^{+}}p_dtil(p_a).
end{align*}
Hence, we can conclude that $p_dtil(p_a)$ is continuous and strictly increasing in $p_a$.
Additionally, for any $i=1, \dots, \mathcal{E}barp$, consider any $p_a$ such that $C_{(i+1)}-C_{\emptyset}<p_al_eq C_{(i)}-C_{\emptyset}$ (or $0<p_al_eq C_{(\mathcal{E}barp)}-C_{\emptyset}$ if $i=\mathcal{E}barp$), then for any $j=1, \dots, i$, we have:
\begin{align*}
p_dij(p_a)& stackrel{eqref{cdij}}{=}\frac{C_{(j)}-C_{\emptyset}}{l_eft(C_{(j)}-p_a-C_{\emptyset}right) p_dot l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right) + sum_{k=j}^{i} \frac{l_eft(C_{(k)}-p_a-C_{\emptyset}right) E_{(k)}}{C_{(k)}-C_{\emptyset}}}\\
&>\frac{C_{(j)}-C_{\emptyset}}{l_eft(C_{(j)}-C_{(i+1)}right) p_dot l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right) + sum_{k=j}^{i} \frac{l_eft(C_{(k)}-C_{(i+1)}right) E_{(k)}}{C_{(k)}-C_{\emptyset}}}\\
&= \frac{C_{(j)}-C_{\emptyset}}{l_eft(C_{(j)}-C_{(i+1)}right) p_dot l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)} \\
&stackrel{eqref{order}}{>} l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}\\
&stackrel{eqref{cd_accurate}}{=}p_dbar.
end{align*}
Therefore, for any $0<p_a<C_{(1)}-C_{\emptyset}$, we have:
\begin{align}\label{comparison}
p_dtil(p_a)stackrel{eqref{cdtil}}{\geq} \min_{j=1, \dots, i}p_dij(p_a)>p_dbar,
end{align}
Finally, if $p_a=0$, then we know that $p_dtil(0)=p_d^{KK}(0)$. From eqref{cdij}, we can check that $p_d^{KK}(0)=l_eft(sum_{k=1}^{K} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}=\bar{p_d}(0)$. If $p_a$ approaches $C_{(1)}-C_{\emptyset}$, then $p_dtil(p_a)=p_d^{11}(p_a)$, and we have:
\begin{align*}
\lim_{p_a to C_{(1)}-C_{\emptyset}}p_dtil(p_a)stackrel{eqref{cdij}}{=}\lim_{p_a to C_{(1)}-C_{\emptyset}} \frac{C_{(1)}-C_{\emptyset}}{ E_{(1)}-\frac{p_a E_{(1)}}{C_{(1)}-C_{\emptyset}}} = +infty
end{align*}
qed
We define the partition as:
\begin{align}\label{partition}
\mathcal{P}\deleq l_eft\{l_eft\{ \Lambda^iright\}_{i=0}^{\mathcal{E}barp}, l_eft\{\Lambda^ijright\}_{j=1, \dots, i, i=1, \dots, \mathcal{E}barp,} right\},
end{align}
where $l_eft\{\Lambda^iright\}_{i=0}^{K}$ are type I regimes in the normal form game defined in eqref{regimei_first}-eqref{regimei_last}, and $\Lambda^ij$ is the set of $l_eft(p_d, p_aright)$, which satisfy:
\begin{subequations}\label{partition}
\begin{align}
p_d &in l_eft\{
\begin{array}{ll}
l_eft(l_eft(\frac{E_{(1)}}{C_{(1)}-C_{\emptyset}}right)^{-1}, +inftyright), & quad text{if $j=1$,} \\
l_eft(l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}, l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}right), & quad text{if $j=2, \dots, \mathcal{E}barp$,}
end{array}
right.\label{cd_partition}
\\
p_a &in l_eft\{
\begin{array}{ll}
l_eft(C_{(i+1)}-C_{\emptyset}, C_{(i)}-C_{\emptyset}right), & quad text{if $i=1, \dots, \mathcal{E}barp-1$,} \\
l_eft(0, C_{(\mathcal{E}barp)}-C_{\emptyset}right), & quad text{if $i=\mathcal{E}barp$,}
end{array}
right.\label{ca_partition}
end{align}
end{subequations}
We can check that sets in $\mathcal{P}$ are disjoint, and cover the whole space of $l_eft(p_d, p_aright)$. Lemma ref{sequential_type1} characterizes SPE in sets $l_eft\{ \Lambda^iright\}_{i=0}^{\mathcal{E}barp}$, and Lemma ref{type_2_sequential} characterizes SPE in sets $l_eft\{\Lambda^ijright\}_{i=1, j=1}^{i=\mathcal{E}barp, j=i}$.
\begin{lemma}\label{sequential_type1}
In $\widetilde{\Gamma}$, for any $l_eft(p_a, p_dright)$ in the set $\Lambda^i$, where $i=0, \dots, \mathcal{E}barp$:
\begin{itemize}
item If $i=0$, then SPE is as given in eqref{SPE_i_0}.
item If $i=1, \dots, \mathcal{E}barp$: then SPE is as given in eqref{SPE_i}.
end{itemize}
end{lemma}
emph{Proof of Lemma ref{sequential_type1}.}
\begin{itemize}
item If $i=0$:\\
The set of vulnerable facilities $\{\mathcal{E}bar|C_{\e}-p_a>C_{\emptyset}\}$ is empty. Thus, $\widetilde{sigma}_awe(emptyset, thetass)=1$, and $tilde{rho}^{*}_\e=0$ for all $e in \mathcal{E}$.
item For any $i=1, \dots, \mathcal{E}barp$:\\
The set of vulnerable facilities is $\cup_{k=1}^{i} \mathcal{E}bar_{(k)}$. From Lemma ref{zero_or_one}, we have already known that for any $e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}$, $tilde{rho}^{*}_\e l_eq \widehat{rho}_\e$. Assume for the sake of contradiction that there exists a facility $ebar in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}$ such that $thetass_{\bar{\e}} <\widehat{rho}_\ebar$. From Lemma ref{best_response_sequential}, we know that $\widetilde{sigma}_awe(emptyset, thetass)=0$, and $BR(thetass)=Delta(\mathcal{E}max)$, where $\mathcal{E}max$ is in eqref{Emax}. Clearly, $\mathcal{E}max subseteq \cup_{k=1}^{i} \mathcal{E}bari$. We define $\lambdada$ as follows:
\begin{align*}
\lambdada &=\max_{e in \cup_{k=1}^{i} \mathcal{E}bari} l_eft\{tilde{rho}_\e C_{\emptyset}+ (1-tilde{rho}_\e) C_{\e}right\}=tilde{rho}_\e C_{\emptyset}+ (1-tilde{rho}_\e) C_{\e}, quad \forall e in \mathcal{E}max.
end{align*}
The utility of the defender can be written as:
\begin{align*}
U_d(thetass, \widetilde{sigma}_awe(thetass))=-\lambdada-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\e right)p_dot p_d.
end{align*}
We now consider $thetass'$ as follows:
\begin{alignat*}{2}
tilde{rho}_e^{'}&=tilde{rho}_\e+\frac{epsilon}{C_{\e}-C_{\emptyset}}, &&quad \forall e in \mathcal{E}max, \\
tilde{rho}_e^{'}&=tilde{rho}_\e, &&quad \forall e in \mathcal{E} setminus\mathcal{E}max,
end{alignat*}
where $epsilon$ is a sufficiently small positive number. Under this deviation, we can check that the set $\mathcal{E}max$ does not change, but $\lambdada$ changes to $\lambdada-epsilon$. Therefore, the defender's utility can be written as:
\begin{align*}
&U_d(thetass', \widetilde{sigma}_a(thetass'))=-\lambdada+epsilon -l_eft(sum_{e in \mathcal{E}} thetass'_e right)p_dot p_d=-\lambdada+epsilon-l_eft(sum_{e in \mathcal{E}} thetass_e right)p_dot p_d -sum_{e in \mathcal{E}max}\frac{epsilon p_d}{C_{\e}-C_{\emptyset}}\\
=&U_d(thetass, \widetilde{sigma}_a(thetass))+epsilon l_eft(1-sum_{e in \mathcal{E}max}\frac{ p_d}{C_{\e}-C_{\emptyset}}right)\geq U_d(thetass, \widetilde{sigma}_a(thetass))+epsilon l_eft(1-sum_{e in \cup_{k=1}^{i}\mathcal{E}bari}\frac{ p_d}{C_{\e}-C_{\emptyset}}right)\\
=& U_d(thetass, \widetilde{sigma}_a(thetass))+epsilon l_eft(1-sum_{k=1}^{i}\frac{ p_d E_{(k)}}{C_{\e}-C_{\emptyset}}right)stackrel{eqref{regimei_notlast}}{>}U_d(thetass, \widetilde{sigma}_a(thetass)).
end{align*}
Therefore, such $thetass$ cannot be an equilibrium strategy profile. We thus know that $tilde{rho}^{*}$ is as given in eqref{SPE_i}. The attacker's equilibrium strategy can be derived from Lemmas ref{best_response_sequential} and ref{zero_or_one} directly. qed
end{itemize}
\begin{lemma}\label{type_2_sequential}
For $l_eft(p_a, p_dright)$ in $\Lambda^ij$, where $i=1, \dots, \mathcal{E}barp$, and $j=1, \dots, i$, there are two cases of SPE:
\begin{itemize}
item If $p_d>p_dij$, where $p_dij$ is as given in eqref{cdij}:
\begin{itemize}
item If $j=1$, then SPE is as given in eqref{SPE_j_1}.
item If $j=2, \dots, i$, then SPE is as given in eqref{SPE_j}.
end{itemize}
item If $p_d<p_dij$, then the SPE is as given in eqref{SPE_i}.
end{itemize}
end{lemma}
emph{Proof of Lemma ref{type_2_sequential}.}
Consider cost parameters in the set $\Lambda^ij$ defined in eqref{partition}, where $i=1, \dots, \mathcal{E}barp$ and $j=1, \dots, i$. The set of vulnerable facilities is $\cup_{k=1}^{i} \mathcal{E}bar_{(k)}$. From Lemma ref{zero_or_one}, we know that the defender can either secure all vulnerable facilities $e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}$ with the threshold effort $\widehat{rho}_\e$ defined in eqref{pebar}, or leave at least one vulnerable facility secured less than the threshold effort. We discuss the two cases separately:
\begin{enumerate}
item[(1)] If any $e in \cup_{k=1}^{i} \mathcal{E}bar_{(k)}$ is secured with the threshold effort $\widehat{rho}_\e$, then from Lemma ref{zero_or_one}, we know that the total probability of attack is 0. The defender's utility can be written as:
\begin{align}
U_d(\widehat{rho}, \widetilde{sigma}_awe(\widehat{rho}))=-C_{\emptyset}-l_eft(sum_{k=1}^{i} \frac{l_eft(C_{(k)}-p_a-C_{\emptyset}right) p_dot E_{(k)}}{C_{(k)}-C_{\emptyset}}right) p_dot p_d. \label{ud_case2}
end{align}
item[(2)] If the set $\{\mathcal{E}|C_{\e}-p_a>C_{\emptyset}, quad tilde{rho}_\e < \widehat{rho}_\e\}$ is non-empty, then we define $\widetilde{P}$ as the set of feasible $thetass$ in this case. We denote $tilde{rho}^\dagger$ as the secure effort vector that incurs the highest utility for the defender among all $thetass in \widetilde{P}$. Then, $tilde{rho}^\dagger$ can be written as:
\begin{align}
&tilde{rho}^\dagger in \underset{thetass in \widetilde{P}}{\mathrm{argmax}}~ U_d(thetass, \widetilde{sigma}_awe(thetass))=\underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_awe(thetass)right)}[C]-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_dright) notag \\
=&\underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_awe(thetass)right)}[C]-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d + l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)right)p_dot p_a- l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)right)p_dot p_a right). \label{psssone}
end{align}
We know from Lemma ref{best_response_sequential} that $\widetilde{sigma}_awe(emptyset, thetass)=0$. Therefore, $sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)=1$, and eqref{psssone} can be re-expressed as:
\begin{align*}
tilde{rho}^\dagger &in \underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_awe(thetass)right)}[C]-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d + l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)right)p_dot p_a- p_a right)\\
&=\underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_awe(thetass)right)}[C]-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d + l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)right)p_dot p_a right).
end{align*}
Since in equilibrium, the attacker chooses the best response strategy, we have:
\begin{align}\label{zero_sum_again}
\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_awe(thetass)right)}[C]- l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_awe(e, thetass)right)p_dot p_a =\max_{\widetilde{sigma}_a in Delta(S_a)} l_eft(\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_aright)}[C]- l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_a(e)right) p_dot p_aright).
end{align}
Hence, $tilde{rho}^\dagger$ can be re-expressed as:
\begin{align*}
tilde{rho}^\dagger&stackrel{eqref{zero_sum_again}}{=} \underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\max_{\widetilde{sigma}_a in Delta(S_a)} l_eft(\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_aright)}[C]- l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_a(e)right) p_dot p_aright)-l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d right)\\
&=\underset{thetass in \widetilde{P}}{\mathrm{argmax}} l_eft(-\max_{\widetilde{sigma}_a in Delta(S_a)} l_eft(\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_aright)}[C]-l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_a(e)right) p_dot p_a +l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d right)right)\\
&=\underset{thetass in \widetilde{P}}{\mathrm{argmax}}\min_{\widetilde{sigma}_a in Delta(S_a)} l_eft(-\mathbb{E}_{l_eft(thetass, \widetilde{sigma}_aright)}[C]+l_eft(sum_{e in \mathcal{E}} \widetilde{sigma}_a(e)right) p_dot p_a -l_eft(sum_{e in \mathcal{E}} tilde{rho}_\eright)p_dot p_d right)\\
&stackrel{eqref{zero_utility_defend}}{=} \underset{thetass in \widetilde{P}}{\mathrm{argmax}}\min_{\widetilde{sigma}_a in Delta(S_a)} ~ U^0_d(thetass, \widetilde{sigma}_a).
end{align*}
Therefore, $tilde{rho}^\dagger$ is the defender's equilibrium strategy in the zero sum game, which is identical to the equilibrium strategy in the normal form game (recall Lemma ref{zero_sum}). From Theorem ref{attacker_strategy}, when $p_a$ and $p_d$ are in $\Lambda^ij$, $tilde{rho}^\dagger$ is in eqref{SPE_j} (or eqref{SPE_j_1} if $j=1$).
The defender's utility in this case is:
\begin{align}
U_d(tilde{rho}^\dagger, \widetilde{sigma}_awe(tilde{rho}^\dagger))=-C_{(j)}-l_eft(sum_{k=1}^{j-1} \frac{l_eft(C_{(k)}-C_{(j)}right) p_dot E_{(k)}}{C_{(k)}-C_{\emptyset}}right) p_dot p_d. \label{ud_case1}
end{align}
end{enumerate}
Finally, by comparing $U_d$ in eqref{ud_case1} and eqref{ud_case2}, we can check that if $p_d>p_dij$, then $U_d(tilde{rho}^\dagger, \widetilde{sigma}_awe(tilde{rho}^\dagger))> U_d(\widehat{rho}, \widetilde{sigma}_awe(\widehat{rho}))$. Thus, SPE is in eqref{SPE_j} (or eqref{SPE_j_1} if $j=1$). If $p_d<p_dij$, then $U_d(tilde{rho}^\dagger, \widetilde{sigma}_awe(tilde{rho}^\dagger))< U_d(\widehat{rho}, \widetilde{sigma}_awe(\widehat{rho}))$, and SPE is in eqref{SPE_i}. qed
noindentemph{Proof of Theorem ref{theorem:SPE}.}
\begin{itemize}
item[(a)] Type $\widetilde{\mathrm{I}}$ regimes $\widetilde{\Lambda}^i$:
\begin{itemize}
item If $i=0$:\\
There is no vulnerable facility. Therefore, the attacker chooses not to attack with probability 1, and the defender does not secure any facility. SPE is as given in eqref{SPE_i_0}.
item If $i=1, \dots, \mathcal{E}barp$:\\
Consider any $C_{(i+1)}-C_{\emptyset} < p_a < C_{(i)}-C_{\emptyset}$. From Lemma eqref{comparison_lemma}, we know that $p_dtil(p_a)> p_dbar$, where $p_dtil(p_a)$ is defined in eqref{cdtil} and $p_dbar$ is as defined in eqref{cdbar}. From Lemma ref{sequential_type1}, we know that SPE is as given in eqref{SPE_i} for any $p_d< p_dbar$.
It remains to be shown that for any $p_dbar l_eq p_d < p_dtil(p_a)$, SPE is also as given in eqref{SPE_i}. For any $C_{(i+1)}-C_{\emptyset} l_eq p_a <C_{(i)}-C_{\emptyset}$, there is a unique $\hat{j} in \{1, \dots, i\}$ such that $\frac{sum_{k=\hat{j}+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq p_a<\frac{sum_{k=\hat{j}}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$, and from eqref{cdtil}, we have:
\begin{align*}
p_dtil(p_a)&=p_d^{i\hat{j}}(p_a)\geq p_d^{i\hat{j}}l_eft(\frac{sum_{k=\hat{j}+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)stackrel{eqref{cdij}}{=}l_eft(sum_{k=1}^{\hat{j}} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}, \\
p_dtil(p_a)&=p_d^{i\hat{j}}(p_a)<p_d^{i\hat{j}}l_eft(\frac{sum_{k=\hat{j}}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)=l_eft(sum_{k=1}^{\hat{j}-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}.
end{align*}
Consider any $j=\hat{j}+1, \dots, i$, and any $l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1} l_eq p_d <l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}$, the cost parameters $(p_a, p_d)$ are in the set $\Lambda^ij$ as defined in eqref{partition}. Additionally, from our definition of $\hat{j}$, we know that $p_a>\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$. We now show that in $\Lambda^ij$, $p_d<p_dij(p_a)$:
\begin{align*}
p_dij(p_a)stackrel{eqref{cdij}}{>}p_dijl_eft(\frac{sum_{k=j}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}right)=l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}stackrel{eqref{partition}}{>}p_d.
end{align*}
Hence, from Lemma ref{type_2_sequential}, we know that for any $l_eft(sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}l_eq p_d l_eq l_eft(sum_{k=1}^{\hat{j}} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}$, SPE is as given in eqref{SPE_i}. For any $l_eft(sum_{k=1}^{\hat{j}} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}<p_d< p_dtil(p_a)$, the cost parameters $(p_a, p_d)$ are in the set $\Lambda_i^{\hat{j}}$, and $p_d<p_dtil(p_a)=p_d^{i\hat{j}}(p_a)$. Again from Lemma ref{type_2_sequential}, SPE is in eqref{SPE_i}.
Therefore, we can conclude that in regime $\widetilde{\Lambda}^i$, SPE is in eqref{SPE_i}.
end{itemize}
item[(b)] Type $\widetilde{\mathrm{II}}$ regimes $\widetilde{\Lambda}_j$, where $j=1, \dots, K$:\\
Since $p_dtil(p_a)$ is strictly increasing in $p_a$ and $\lim_{p_a to C_{(1)}-C_{\emptyset}}p_dtil(p_a)=+infty$, we know that for any $p_d>0$, $p_a<p_dtil^{-1}(p_d)<C_{(1)}-C_{\emptyset}$. Therefore, we can re-express $\widetilde{\Lambda}^1$ as follows:
\begin{align}
\widetilde{\Lambda}^1&stackrel{eqref{regimej_constraint_1}}{=} l_eft\{l_eft(p_a, p_dright) l_eft v_ert p_a< p_dtil^{-1}(p_d), ~ p_d > l_eft(\frac{E_{(1)}}{C_{(1)}-C_{\emptyset}}right)^{-1} right.right\}notag\\
&=l_eft\{l_eft(p_a, p_dright) l_eft v_ert p_d> p_dtil(p_a), ~ p_d > l_eft( \frac{E_{(1)}}{C_{(1)}-C_{\emptyset}}right)^{-1}, 0 l_eq p_a l_eq C_{(1)}-C_{\emptyset} right.right\}notag\\
& stackrel{eqref{partition}}{=}=\bigcup_{i=1}^K l_eft(\Lambda^ij \bigcap l_eft\{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)right\}right).\label{re_express_regimesone}
end{align}
For any $j=2, \dots, K$, if $p_a>C_{(j)}-C_{\emptyset}$, then from Lemma ref{comparison_lemma}, we have:
\begin{align}\label{empty_set}
p_dtil(p_a)> p_dbar stackrel{eqref{cd_accurate}}{\geq} l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}.
end{align}
Therefore, for any $p_d<l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}$, we know that $p_a<p_dtil^{-1}(p_d)<C_{(j)}-C_{\emptyset}$. Analogous to eqref{re_express_regimesone}, we re-express the set $\widetilde{\Lambda}_j$ as follows:
\begin{align*}
\widetilde{\Lambda}_j &stackrel{eqref{regimej_constraint}}{=} l_eft\{l_eft(p_a, p_dright) l_eft v_ert p_a< p_dtil^{-1}(p_d), ~ l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}l_eq p_d < l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1} right.right\}\\
&stackrel{eqref{empty_set}}{=}l_eft\{l_eft(p_a, p_dright) l_eft v_ert \begin{array}{l}
p_d> p_dtil(p_a), ~ l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}l_eq p_d < l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}, \\
0 l_eq p_a l_eq C_{(j)}-C_{\emptyset}end{array} right.right\}\\
&stackrel{eqref{partition}}{=}\bigcup_{i=j}^K l_eft(\Lambda^ij \bigcap \{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)\}right).
end{align*}
We next show that for any $j=1, \dots, K$, and any $i=j, \dots, K$, the set $\Lambda^ij p_ap \{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)\} subseteq \Lambda^ij p_ap \{l_eft(p_a, p_dright)|p_d>p_dij(p_a)\}$. Consider any cost parameters $l_eft(p_a, p_dright)$ in the set $\Lambda^ijp_ap \{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)\}$, from eqref{cdtil}, we can find $\hat{j}$ such that $\frac{sum_{k=\hat{j}+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq p_a<\frac{sum_{k=\hat{j}}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$, and $p_dtil(p_a)=p_d^{i\hat{j}}(p_a)$. We discuss the following three cases separately:
\begin{itemize}
item If $\hat{j} > j$, then we must have $p_a<\frac{sum_{k=\hat{j}}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}l_eq\frac{sum_{k=j+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$. Hence, from eqref{cdij}, $p_dij(p_a)<l_eft(sum_{k=1}^{j} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}$. From the definition of the set $\Lambda^ij$ in eqref{partition}, we know that $p_d>p_dij(p_a)$ in this set, and thus $l_eft(p_a, p_dright) in \Lambda^ijp_ap \{l_eft(p_a, p_dright)|p_d> p_dij(p_a)\}$.
item If $\hat{j}=j$, then we directly obtain that $l_eft(p_a, p_dright) in \Lambda^ijp_ap \{l_eft(p_a, p_dright)|p_d> p_dij(p_a)\}$.
item If $\hat{j}<j$, then since $p_a \geq \frac{sum_{k=\hat{j}+1}^{i}E_{(k)}}{sum_{k=1}^{i} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}}$, from eqref{cdij}, we have $p_dtil(p_a)=p_d^{i\hat{j}}(p_a)\geq l_eft(sum_{k=1}^{\hat{j}} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1} \geq l_eft(sum_{k=1}^{j-1} \frac{E_{(k)}}{C_{(k)}-C_{\emptyset}}right)^{-1}$. From the definition of the set $\Lambda^ij$ in eqref{partition}, the set $\Lambda^ijp_ap \{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)\}$ is empty, and thus can be omitted.
end{itemize}
We can conclude from all three cases that $\Lambda^ij p_ap \{l_eft(p_a, p_dright)|p_d> p_dtil(p_a)\} subseteq \Lambda^ij p_ap \{l_eft(p_a, p_dright)|p_d>p_dij(p_a)\}$. Therefore, from Lemma ref{type_2_sequential}, SPE is in eqref{SPE_j} (or eqref{SPE_j_1} if $j=1$) in the regime $\widetilde{\Lambda}_j$.
end{itemize}
qed
end{appendix}
end{document} |
\begin{document}
\title[Entanglement of two atoms using Rydberg blockade]{Analysis of the entanglement between two individual atoms using global Raman rotations}
\author{A. Ga\"{e}tan, C. Evellin, J. Wolters, P. Grangier, T. Wilk and A. Browaeys}
\address{Laboratoire Charles Fabry, Institut d'Optique, CNRS, Univ Paris-Sud,
Campus Polytechnique, RD 128,
91127 Palaiseau cedex, France}
\date{\today}
\begin{abstract}
Making use of the Rydberg blockade, we generate entanglement between two atoms
individually trapped in two optical tweezers. In this paper we detail the analysis of the
data and show that we can determine the amount of entanglement between the atoms
in the presence of atom losses during the entangling sequence. Our model takes into account states outside the qubit basis and allows us to perform a partial reconstruction of the density matrix describing the two atom state. With this method we extract the amount of entanglement between pairs of atoms still trapped after the entangling sequence and measure the fidelity with respect to the expected Bell state. We find a fidelity $F_{\rm pairs} =0.74(7)$ for the 62\% of atom pairs remaining in the traps at the end of the entangling sequence.
\end{abstract}
\pacs{32.80.Rm, 03.67.Bg, 32.80.Pj, 42.50.Ct, 42.50.Dv}
\maketitle
\section{Introduction}
Entanglement between two particles can be generated by designing and manipulating
interactions between them. For example, the entanglement in ion systems relies on the Coulomb interaction between the ions~\cite{BlattWinelandNat08}.
Entanglement is therefore difficult to produce in neutral atom systems, due to their weaker interactions. One solution, implemented in the first demonstration of entanglement between
neutral atoms, makes use of a high-Q cavity to mediate the interaction
between transient atoms~\cite{Hagley97}.
Another more recent approach uses ultra-cold atoms in optical lattices and the short-range s-wave interaction that occurs when their wavepackets overlap. This leads to the preparation of entangled states of a chain of atoms~\cite{Mandel03} or
pairs of atoms~\cite{Anderlini07}. This approach requires ground state cooling of atoms in their trapping potential and the ability to overlap their wavepackets during a controllable amount of time.
Furthermore, although there has been tremendous progress in this direction recently~\cite{Bakr09}, it is not easy to address atoms in optical lattices with a spacing between the wells of less than a micrometer.
An alternative approach is to store atoms in traps that are separated by
several micrometers in order to have addressability using standard optical techniques, and to avoid motional control of the atoms~\cite{Bergamini04,Nelson07}. One then needs an interaction which can act at long distance. Atoms in Rydberg states do provide such a long range interaction, which can reach several MHz at a distance of 10 micrometers. Moreover, this interaction can be switched on and off at will by placing the atoms in a Rydberg state for a controllable amount of time.
This approach using Rydberg interaction has been proposed theoretically as a
way to perform fast quantum gates~\cite{Jaksch00, Lukin01, Saffman05} and
is intrinsically deterministic and scalable to more than two atoms.
Recent proposals extend this idea to the generation of various
entangled states~\cite{Moller08, Mueller09}.
Recently, two experiments implemented Rydberg interactions to demonstrate a cNOT
gate~\cite{Isenhower09} and to generate entanglement between two atoms trapped in optical tweezers~\cite{Wilk09}. In the present paper, we analyze in detail the experiment of reference~\cite{Wilk09}. We explain how we extract the amount of entanglement with
a method based on global rotations of the state of the atoms.
The paper is organized as follows. In section~\ref{sectionblockade} we present the principle of the experiment. In section~\ref{sectionsetup} we detail the setup as well as the experimental sequence. We show that some atoms are lost during the sequence. In section~\ref{sectiontheory} we present the model used to extract the amount of entanglement, which takes into account the loss of atoms. In the following sections we present the experimental results:
in section~\ref{sectionlosses} we quantify the atom losses and in section~\ref{sectionfidelity} we describe the partial tomography of the density matrix, extract the value of the fidelity and discuss the factors limiting this value.
\section{Rydberg blockade and entanglement}\label{sectionblockade}
The principle of the experiment relies on the Rydberg blockade effect demonstrated recently with two single atoms~\cite{Urban09, Gaetan09}. Due to their large electric dipole when they are in a Rydberg state $|r\rangle$, two atoms $a$ and $b$ interact strongly if they are close enough. This interaction leads to a shift $\Delta E$ of the doubly excited state $|r,r\rangle$.
As a consequence, a laser field coupling a ground state $|\!\uparrow\rangle$ and a Rydberg state $|r\rangle$ (with Rabi frequency $\Omega_{\uparrow r}$) cannot excite both atoms at the same time, provided that the linewidth of the excitation is smaller than $\Delta E$. In this blockade regime, the two-atom system behaves like an effective two-level system~\cite{Gaetan09}:
the ground state $|\!\uparrow,\uparrow\rangle$ is coupled to the excited state
\begin{equation}\label{eqno1}
|\Psi_{\rm r}\rangle = \frac{1} {\sqrt{2}}
(e^{i\mathbf{k}\cdot\mathbf{r}_a}|r,\uparrow\rangle +
e^{i\mathbf{k}\cdot\mathbf{r}_{b}}|\!\uparrow,r\rangle),
\end{equation}
where $\mathbf{k}=\mathbf{k}_{\rm R}+\mathbf{k}_{\rm B} $ is the sum of the wave vectors of the red (R) and blue (B)
lasers used for the two-photon excitation (see section~\ref{sectionsetup} and figure~\ref{figure1}b)
and $\mathbf{r}_{a/b}$ are the positions of the atoms.
The coupling strength between these states is enhanced by a factor $\sqrt{2}$ with respect to the one between $|\!\uparrow\rangle$ and $|r\rangle$ for a single atom~\cite{Gaetan09}. Thus, starting from $|\!\uparrow,\uparrow\rangle$, a pulse of duration $\pi/(\sqrt{2}\,\Omega_{\uparrow r})$ prepares the state $|\Psi_{\rm r}\rangle$.
To produce entanglement between the atoms in two ground states, the Rydberg state
$|r\rangle$ is mapped onto another ground state $|\!\downarrow\rangle$ using the same blue laser
and an additional red laser (wave vector $\mathbf{k'}_{\rm R}$)
with a pulse of duration $\pi/\Omega_{r \downarrow}$ ($\Omega_{r \downarrow}$ is the two-photon Rabi frequency). This sequence results in the entangled state
\begin{equation}\label{eqno2}
|\Psi\rangle = \frac{1} {\sqrt{2}} (|\!\downarrow,\uparrow\rangle + e^{i \phi}|\!\uparrow,\downarrow\rangle),
\end{equation}
with $\phi = (\mathbf{k}_{\rm R} -\mathbf{k'}_{\rm R})\cdot (\mathbf{r}_{b} - \mathbf{r}_{a}) $,
assuming that the positions of the atoms are frozen~\footnote{For a
discussion of the case where atoms move, see reference~\cite{Wilk09}.}.
As the light fields are propagating in the same direction and the energy
difference between the two ground states is small, $\mathbf{k}_{\rm R}\simeq \mathbf{k'}_{\rm R}$.
This procedure therefore generates in a deterministic way the well defined entangled state with $\phi =0$, which is the $|\Psi^+\rangle$ Bell state.
\section{Experimental setup and procedure}\label{sectionsetup}
Our experimental setup is depicted in Fig.~\ref{figure1}(a).
\begin{figure}
\caption{
(a) Experimental setup. Two atoms are held at a distance of 4~$\mu$m in two optical tweezers formed by focused laser beams at 810~nm (not shown). The fluorescence of each atom is directed onto separate avalanche photodiodes (APDs). The $\sigma^+$-polarized 475~nm laser has a waist of 25~$\mu$m and is directed along the z-axis, the two 795~nm lasers have waists of 130~$\mu$m, copropagate along the x-axis and have both linear polarization, one along the quantization axis, the other perpendicular. The 475~nm and 795~nm lasers have powers of 30~mW and 15~mW, respectively, which correspond to Rabi frequencies $\Omega_B/(2\pi) \sim 25$~MHz and $\Omega_R/(2\pi) \sim 300$~MHz.
(b) Atomic level structure and lasers used for the excitation towards the Rydberg state. The 475~nm laser and the two 795~nm lasers are tuned to the two photon transitions from $|\!\uparrow\rangle$ to $|r\rangle$ and from $|r\rangle$ to $|\!\downarrow\rangle$.}
\label{figure1}
\end{figure}
Two $^{87}$Rb atoms are held in two optical tweezers separated by 4~$\mu$m. The interatomic axis is aligned with a magnetic field ($B$=9~G), which defines the quantization axis and lifts the degeneracy of the Zeeman sublevels. The tweezers are formed by two laser beams at 810~nm which are sent at a small angle through a microscope objective focusing the beams to a waist of 0.9~$\mu$m. Atoms are captured from an optical molasses and, due to the small trapping volume, either one or no atom is captured in each trap~\cite{Schlosser01}. The same objective collects the fluorescence light of the atoms induced by the molasses beams at 780~nm. The light coming from each trapped atom is directed onto separate avalanche photodiodes (APDs) which allows us to discriminate for each trap whether an atom is present or not.
The relevant levels of $^{87}$Rb are shown in Fig.~\ref{figure1}(b). We have chosen the Rydberg state $|r\rangle=|58d_{3/2},F=3,M=3\rangle$. The interaction energy between two atoms in this state is enhanced by a F\"{o}rster resonance~\cite{Walker08} which leads to a calculated interaction energy $\Delta E/h\approx 50$~MHz for a distance between the atoms of 4~$\mu$m~\cite{Gaetan09}. The qubit ground states considered for the entanglement are $|\!\downarrow\rangle= |F=1,M=1\rangle$ and $|\!\uparrow\rangle=|F=2,M=2\rangle$ of the $5s_{1/2}$ manifold, separated in frequency by 6.8~GHz. To excite one atom from $|\!\uparrow\rangle$ to $|r\rangle$, we use a two-photon transition with a $\pi$-polarized laser at 795~nm and a $\sigma^+$-polarized laser at 475~nm. The frequency of the 795~nm laser is blue-detuned by $\delta$=600~MHz from the transition from $|\!\uparrow\rangle$ to $(5p_{1/2},F=2)$ in order to reduce spontaneous emission. The measured Rabi frequency of the two-photon transition from $|\!\uparrow\rangle$ to $|r\rangle$ is $\Omega_{\uparrow r}/2\pi\approx 6$~MHz for a single atom. We use the same 475~nm laser for the transition from $|r\rangle$ to $|\!\downarrow\rangle$, but a second 795~nm laser, linearly polarized perpendicular to the quantization axis, with a frequency 6.8~GHz higher to address state $|\!\downarrow\rangle$. The measured Rabi frequency for this second two-photon transition is $\Omega_{r\downarrow}/2\pi\approx 5$~MHz. The two 795~nm lasers are phase-locked to each other using a beat-note technique and fast electronic correction.
The two lasers are also used to drive Raman rotations between the qubit states $|\!\uparrow\rangle$ and $|\!\downarrow\rangle$. We observe Rabi oscillations between $|\!\uparrow\rangle$ and $|\!\downarrow\rangle$ with an amplitude of 0.95, which includes the fidelity of state initialization and state detection. We set the Rabi frequency of the Raman transition to $\Omega_{\uparrow\downarrow}= 2\pi\times250$~kHz.
We read out the atomic state by applying a push-out laser beam resonant to the $F$=2 to $F$=3 transition of the D2-line~\cite{Jones07}, which ejects atoms that are in state $|\!\uparrow\rangle$ (or in other $M$-states of the $F=2$ ground level) from the trap. Only atoms that are in $|\!\downarrow\rangle$ (or in other $M$-states of the $F=1$ level) will stay in the trap and will be detected.
\begin{figure}
\caption{Experimental sequence used to entangle two atoms and analyze the entanglement.
The state preparation is done by optical pumping. For clarity the horizontal time axis is not on scale.}
\label{expsequence}
\end{figure}
The experimental sequence is shown in figure~\ref{expsequence}.
An experiment starts upon detection of an atom in each trap (trap depth 3.5~mK). After turning off the cooling beams, we ramp adiabatically the trap depth down to 0.5 mK and optically pump the atoms in $|\!\uparrow\rangle$~\footnote{This reduction of the dipole trap depth decreases the temperature of the atoms and we have found that it also
leads to a better optical pumping.}.
This is done by a 600~$\mu$s optical pumping phase with a $\sigma^+$-polarized laser coupling the levels $(5s_{1/2}, F=2)$ and $(5p_{3/2}, F=2)$ and a repumping laser from $(5s_{1/2}, F=1)$ to $(5p_{3/2}, F=2)$. Afterwards we switch off the dipole trap while we apply the excitation and mapping pulses towards the Rydberg state and back. The excitation pulse has a duration of $\pi/ (\sqrt{2}\,\Omega_{\uparrow r}) \approx 70$~ns to excite state $|\Psi_{\rm r}\rangle$. The mapping pulse has a duration $\pi/\Omega_{r\downarrow}\approx 110$~ns.
The trap is then turned on again. In order to analyze the produced two-atom state, we drive global Raman rotations on the two atoms (see below) and the push-out laser is applied. Subsequently, we ramp up the depth of the dipole trap to its initial value
and record for each trap whether the atom is present or not.
We repeat the experiment 100 times for each Raman rotation angle $\theta = \Omega_{\uparrow\downarrow} \tau$ ($\tau$ is the duration of the Raman pulse). We then extract the probabilities $P_{a}(\theta)$ and $P_{b}(\theta)$ to recapture an atom in trap $a$ or $b$, the joint probabilities $P_{01}(\theta)$ and $P_{10}(\theta)$ to lose atom $a$ and recapture atom $b$ or vice versa, as well as probabilities $P_{11}(\theta)$ and $P_{00}(\theta)$ to recapture or lose both atoms, respectively, assigning $0$ to a loss and $1$ to a recapture.
Our state-detection scheme, based on the push out technique, identifies any atom $a$ or $ b$ when it is in state $|\!\downarrow\rangle$~\footnote{This statement assumes that there is no other Zeeman state of the $(5s_{1/2},F=1)$ manifold populated than $|\!\downarrow\rangle = |5s_{1/2}, F=1, M=1\rangle $. This is indeed the case in our experiment, as explained in section~\ref{sectionlosses}.}.
However, it does not discriminate between atoms in state $|\!\uparrow\rangle$ and atoms that could be lost during the sequence. As a consequence we have to evaluate the amount of these additional losses. We have measured in a separate experiment the probability $p_{\rm recap}$ to recapture a pair of atoms after the excitation and mapping pulses, without applying the push-out laser. We have found $p_{\rm recap}=0.62(3)$, which shows that
the losses of one or both atoms cannot be neglected. We have incorporated these losses in the analysis of our measurement results, using a model that is detailed in the next section.
\section{Theoretical model}\label{sectiontheory}
To take into account the loss of atoms, we introduce a set of additional states $\{|x\rangle\}$,
extending the basis of each atom to
($|\!\uparrow\rangle, |\!\downarrow\rangle, \{|x\rangle\}$)
and we describe the two-atom system by the density matrix $\hat{\rho}$ in this extended
basis. We assume that these additional states $\{|x\rangle\}$,
corresponding to an atom leaving the qubit basis,
cannot be distinguished from state $|\!\uparrow\rangle$ by the state detection.
The exact nature of states $\{|x\rangle\}$ will be detailed in section~\ref{sectionlosses},
but we already note that in our case they can come either from an atom leaving its trap (physical loss)
or from an atom still trapped but ending up in an unwanted state,
outside the qubit basis $\{|\!\uparrow\rangle, |\!\downarrow\rangle\}$.
The losses of one or two atoms
are given by the sum of the diagonal elements
$L_{\rm total}=\sum_x (P_{\uparrow x} + P_{\downarrow x} + P_{x \uparrow}+P_{x \downarrow}) +\sum_{x,x'}P_{xx'} $.
We assume that the states $\{|x\rangle\}$ are not coupled to $|\!\downarrow\rangle$ or $|\!\uparrow\rangle$ by the Raman lasers, and that they are not coupled between each other.
The Raman rotation for the two atoms can then be described by the operator
$R_{a\otimes b}(\theta,\varphi)= R_a(\theta,\varphi)\otimes R_b(\theta,\varphi)$
where $R_{a/b}(\theta, \varphi)$ is given by the matrix
\begin{equation}
R_{a/b}(\theta, \varphi) = \left( \begin{array}{ccc} \cos{\frac{\theta}{2}} & i
e^{i\varphi} \sin{\frac{\theta}{2}} & 0 \\ i e^{-i\varphi}
\sin{\frac{\theta}{2}} & \cos{\frac{\theta}{2}} & 0 \\ 0 & 0 & \hat{1} \\
\end{array} \right)_{|\!\uparrow\rangle,|\!\downarrow\rangle, \{|x\rangle\}}\ , \label{matrice_rotation}
\end{equation}
where $\hat{1}$ stands for the identity matrix, $\theta = \Omega_{\uparrow\downarrow}\tau$ and
$\varphi$ is the phase difference between the two Raman lasers.
The two atoms are exposed to the same laser field and undergo a
rotation with the same $\theta$ and $\varphi$.
After the rotation the density matrix of the produced state is
$\hat\rho_{\rm rot}(\theta, \varphi)= R_{a\otimes b}(\theta,\varphi)
\hat\rho R_{a\otimes b}^{\dagger}(\theta,\varphi)$.
The idea behind this approach is to transform the coherences (off-diagonal matrix element) into populations that can be directly measured.
In our experiment, we do not control the phase $\varphi$ of the Raman lasers with respect to the phase of the atomic states. This comes ultimately from the fact that the atoms are loaded in the dipole traps at random, so that there is no phase relation with respect to the microwave used to generate the Raman transition.
This phase $\varphi$ varies randomly from shot-to-shot over $2\pi$. Our measurement results are therefore averaged over $\varphi$.
When averaging $\langle\hat\rho_{\rm rot}(\theta, \varphi)\rangle_\varphi$, all
coherences of $\hat\rho_{\rm rot}$ average out, apart from the off-diagonal
element $\rho_{\downarrow\uparrow,\uparrow\downarrow}$ relevant to characterize
state $|\Psi^+\rangle$.
We then calculate the expressions averaged over $\varphi$ of the probabilities $P_{a/b}(\theta)$ as well as $P_{11}(\theta)$ and $\Pi(\theta)=P_{11}(\theta)+P_{00}(\theta)-P_{01}(\theta)-P_{10}(\theta)$
as a function of the matrix elements of $\hat{\rho}$.
As our state detection identifies a recapture (1) with the atom being in state $|\!\downarrow\rangle$, we get for the probability to recapture atom $a$ independently of the state of atom $b$:
\begin{eqnarray}\label{Pa}
P_{a}(\theta) &=& P_{\downarrow \downarrow}(\theta)+P_{\downarrow\uparrow}(\theta)+
\sum_{x}P_{\downarrow x}(\theta)\\\nonumber
&=&\frac{1}{2}\left[P_{\uparrow\downarrow}+ P_{\downarrow\uparrow}+ P_{\uparrow\uparrow}+P_{\downarrow\downarrow} +\sum_{x} (P_{\uparrow x}+P_{\downarrow x})\right]\\\nonumber
&+&\frac{1}{2}\left[P_{\downarrow\downarrow}- P_{\uparrow\uparrow}+P_{\downarrow\uparrow}-
P_{\uparrow\downarrow} +\sum_{x} (P_{\downarrow x}-P_{\uparrow x})\right]\cos{\theta}\ .
\end{eqnarray}
In this formula and in the following ones,
$P_{n,m}(\theta)= \langle n,m| \hat{\rho}_{\rm rot}|n,m\rangle$, and $P_{n,m}=P_{n,m}(0)$ with $\{n, m\} \in\{\downarrow,\uparrow,x\}$.
Similarly, the probability to recapture atom $b$ independently of the state of atom $a$ is \begin{eqnarray}\label{Pb}
P_{b}(\theta)&=& P_{\downarrow \downarrow}(\theta)+P_{\uparrow\downarrow}(\theta)+
\sum_{x}P_{x\downarrow }(\theta)\\\nonumber
&=&\frac{1}{2}\left[P_{\uparrow\downarrow}+ P_{\downarrow\uparrow}+ P_{\uparrow\uparrow}+P_{\downarrow\downarrow} +
\sum_{x} (P_{x\uparrow}+ P_{x\downarrow})\right]\\\nonumber
&+&\frac{1}{2}\left[P_{\downarrow\downarrow}- P_{\uparrow\uparrow}+ P_{\uparrow\downarrow}-
P_{\downarrow\uparrow} +\sum_{x} (P_{x\downarrow }- P_{x\uparrow })\right]\cos{\theta}
\end{eqnarray}
We also introduce the probability $L_{a}$ that atom $a$ lays outside the qubit basis
$\{|\!\uparrow\rangle,|\!\downarrow\rangle\}$, given by $L_{a}=\sum_{x} (P_{x\uparrow}+P_{x\downarrow})+\sum_{x,x'}P_{x,x'}$ and similarly for atom $b$,
$L_{b}=\sum_{x} P_{\uparrow x}+P_{\downarrow x}+\sum_{x,x'}P_{x,x'}$.
From expression~(\ref{Pa}) and~(\ref{Pb}) the probabilities $L_{a}$ and $L_{b}$ are related to the mean value of $P_{a/b}(\theta)$ by the expression
\begin{equation}\label{formuleLa}
\langle P_{a/b}(\theta)\rangle = \frac{1}{2} (1-L_{a/b})\ .
\end{equation}
This expression is intuitive: the mean value of the probability for an atom to be recaptured, i.e. the atom is in state $|\!\downarrow\rangle$, is 1/2 when there is no additional loss during the entangling sequence. When we take into account the probability to lose the atom, we simply multiply the probability in the absence of additional loss, 1/2, with the probability to stay in the qubit basis $1-L_{a/b}$.
The calculation gives the joint probability to recapture both atoms at the end
of the Raman rotation:
\begin{eqnarray}\label{P11}
P_{11}(\theta) &=& P_{\downarrow \downarrow}(\theta)\\\nonumber
&=&\frac{1}{8}\left[P_{\uparrow\downarrow}+
P_{\downarrow\uparrow}+
2\Re(\rho_{\downarrow\uparrow,\uparrow\downarrow})+
3(P_{\uparrow\uparrow}+P_{\downarrow\downarrow})\right]\\\nonumber
&+&\frac{1}{2}(P_{\downarrow\downarrow}-P_{\uparrow\uparrow})\cos{\theta}\\\nonumber
&+&\frac{1}{8}\left[P_{\downarrow\downarrow}+
P_{\uparrow\uparrow}-P_{\uparrow\downarrow}-P_{\downarrow\uparrow}-2\Re(\rho_{\downarrow\uparrow,\uparrow\downarrow})\right]\cos{2\theta}\ .
\end{eqnarray}
Here, ${\Re }$ denotes the real part. This expression exhibits terms oscillating at frequencies $\Omega_{\uparrow\downarrow}$
and $2\Omega_{\uparrow\downarrow}$. The term at $\Omega_{\uparrow\downarrow}$
reflects the imbalance between the states $|\!\uparrow,\uparrow\rangle$ and $|\!\downarrow,\downarrow\rangle$. We note also that this expression of $P_{11}$ does not involve any loss terms, as it characterizes situations where both atoms are present at the end of the sequence. That is why we focus on this quantity for extracting the amount of entanglement between the two atoms.
Finally, we calculate the signal $\Pi (\theta )$, which is is equal to the parity~\cite{Turchette98} when there are no losses from the qubit basis. We find the expression:
\begin{eqnarray}\label{Parite}
\Pi({\theta} )&=& \frac{1}{2}\left[P_{\downarrow\downarrow}+P_{\uparrow\uparrow}
-P_{\uparrow\downarrow}-P_{\downarrow\uparrow}
+2\Re(\rho_{\downarrow\uparrow,\uparrow\downarrow})+2\sum_{x,x'}P_{xx'}\right]\\\nonumber
&+&\sum_{x} (P_{x\uparrow}+P_{\uparrow x}-P_{x\downarrow}-P_{\downarrow x})\cos{\theta}\\\nonumber
&+&\frac{1}{2}\left[P_{\downarrow\downarrow}+P_{\uparrow\uparrow}
-P_{\uparrow\downarrow}-P_{\downarrow\uparrow}-
2\Re(\rho_{\downarrow\uparrow,\uparrow\downarrow})\right]\cos{2\theta}
\end{eqnarray}
This formula also presents oscillations at two frequencies, the one at $\Omega_{\uparrow\downarrow}$ being related this time to events where only one of the two atoms are present.
As a final remark on this model we point out that a global rotation with no control over
the phase $\varphi$ would not be suitable to analyze the Bell states
$|\Psi^{-}\rangle=( |\!\uparrow,\downarrow\rangle - |\!\downarrow,\uparrow\rangle)/\sqrt{2}$
and $|\Phi^{\pm}\rangle=( |\!\uparrow,\uparrow\rangle \pm |\!\downarrow,\downarrow\rangle)/\sqrt{2}$.
As an example, the antisymetric state $|\Psi^{-}\rangle$ does not change under the rotation~\cite{Turchette98}, whatever the
phase $\varphi$. For the states $|\Phi^{\pm}\rangle$, the coherence
$\rho_{\downarrow\downarrow,\uparrow\uparrow}$ acquires under the rotation a phase
factor $e^{-i2\varphi}$. On a single realization of the experiment, the phase
$\varphi$ is fixed but the average over many realizations cancels out. The robustness of $|\Psi^+\rangle$
under fluctuations of $\varphi$ is reminiscent of the fact that this state lies in a decoherence free subspace~\cite{Haffner05}.
In the remaining part of the paper, we will use this model to extract from a single set of data the probability to lose one and two atoms, as well as the amount of entanglement.
\section{Analysis of the losses}\label{sectionlosses}
\begin{figure}
\caption{Measured probabilities $P_{a}
\label{figPab}
\end{figure}
Figure~\ref{figPab} shows the recapture probabilities $P_{a/b}(\theta)$ for each atom for different values of the Raman rotation angle. From equation~(\ref{formuleLa}) and the mean value of $P_{a/b}(\theta)$ deduced from the data we find $L_a=L_b = 0.22(1)$, confirming that the loss probability is the same for both atoms.
Assuming independent losses for atoms $a$ and $b$ we find the probability to lose at least one of the two atoms $L_{\rm total}=L_a(1-L_{b}) + L_b(1-L_{a}) + L_a L_b = 0.39(2)$.
The recapture probability of a pair of atoms in the qubit basis $\{|\!\uparrow\rangle,|\!\downarrow\rangle\}$ is then $L_{\rm total}= 1-{\rm tr}\hat{\rho}$, restricting the trace to pairs of atoms still present at the end of the entangling sequence in the states $|\!\uparrow\rangle$ and $|\!\downarrow\rangle$.
The loss channels can be separated in three classes.
In the first category, independent of the Rydberg excitation and Raman rotation, we measured losses during the time the trap is switched off ($\sim 3\%$) as well as
errors in the detection of the presence of the atom ($\sim 3\%$). For this first category, the loss channels $\{|x\rangle\}$ correspond to an atom in any internal state but which is lost from the tweezers.
In the second category, the losses are also physical and
occur during the entangling and mapping pulses. These losses
correspond to situations where one or two atoms have left the dipole traps,
and are therefore absent when the Raman rotation and the measurement take place.
These losses are independent of the state detection and are mostly related to the fact that an
atom left in the Rydberg state is lost, since it is not trapped in the dipole trap.
Using a model based on Bloch equations including the 5 relevant states
($|\!\uparrow\rangle$, $|\!\downarrow\rangle$, $|5s_{1/2}, F=2,M=1\rangle$, $|5p_{1/2}, F=2,M=2\rangle$
and $|r\rangle$), we identify the following scenarios.
Firstly, spontaneous emission from the $5p_{1/2 }$ state populates the state $|\!\downarrow\rangle$ from which $\sim 7~\%$ of the atoms get excited to the Rydberg state by the mapping pulse.
Secondly, intensity fluctuations ($5~\%$) and frequency fluctuations ($3$~MHz) of
the excitation lasers reduce the efficiency of the mapping pulse so that $\sim 7~\%$ of the atoms will not be transferred back from the Rydberg state to $|\!\downarrow\rangle$.
For this second class, the loss channel $|x\rangle$ is any Rydberg states $|r\rangle$
which can be coupled by the two-photon transition including the one resulting,
e.g. from an imperfect polarization of the lasers.
The third class of losses corresponds to atoms that are still present at the end of the entangling
and mapping sequence, but which are in states different from $|\!\uparrow\rangle$
and $|\!\downarrow\rangle$, that is outside the qubit basis when the state
measurement is performed. Because of the selection rules, the main possibility in our case is the spontaneous emission from the $5p_{1/2}$ to state $|x\rangle=|5s_{1/2}, F=2,\,M=1\rangle$ during the entangling and mapping pulses which is calculated to be only
$\sim 2 \%$, due to a small branching ratio.
This third contribution is therefore small in our case.
By adding the contributions of the three categories of losses we find
a loss probability for each atom of 0.22, in agreement with the measured values
of $L_{a/b}$.
Finally, we compare the probability $1-L_{\rm total}=0.61(2)$ for a pair of atoms to be
in states $|\!\uparrow\rangle$ or $|\!\downarrow\rangle$ with
the probability $p_{\rm recap}=0.62(3)$ to recapture both atoms,
irrespective of their internal states.
Both values are almost identical, confirming that the dominant mechanism is a
physical loss of the atoms before the state measurement.
\section{Partial state reconstruction and fidelity}\label{sectionfidelity}
\begin{figure}
\caption{Measured probability $P_{11}
\label{figP11}
\end{figure}
In order to analyse the two-atom state
we focus on the the joint recapture probability for atom pairs $P_{11}(\theta)$ shown in figure~\ref{figP11}, since it incorporates no loss terms, as shown in equation~(\ref{P11}).
For the maximally entangled state $|\Psi^{+}\rangle$, $P_{11}(\theta)$
should
oscillate between 0 and $1/2$ at a frequency $2\Omega_{\uparrow\downarrow}$,
while here the data show oscillations at two frequencies $\Omega_{\uparrow\downarrow}$ and
$2\Omega_{\uparrow\downarrow}$, with a reduced amplitude.
From the measurement of $P_{11}(\theta)$ and the expression~(\ref{P11}), we extract $P_{\downarrow\downarrow} = P_{11}(0)$ and $P_{\uparrow\uparrow} = P_{11}(\pi)$. Combining the value of the total losses $L_{\rm total}$ and the normalization condition $P_{\uparrow\downarrow} +P_{\downarrow\uparrow}+P_{\uparrow\uparrow} +P_{\downarrow\downarrow}+ L_{\rm total}=1$,
we get $P_{\uparrow\downarrow} +P_{\downarrow\uparrow}$.
The mean value $\langle P_{11}(\theta)\rangle = [P_{\downarrow\uparrow} + P_{\uparrow\downarrow} + 3 P_{\downarrow\downarrow} + 3P_{\uparrow\uparrow} +2{\Re} (\rho_{\downarrow\uparrow,\uparrow\downarrow})] / 8$ yields $\Re(\rho_{\downarrow\uparrow,\uparrow\downarrow})$.
Table~\ref{summary} summarizes the complete information about the density
matrix $\hat\rho$ one can extract from global Raman rotations without control of $\varphi$.
\begin{table}
\begin{center}
\begin{tabular}{c c c}
\hline
\hline
Matrix elements & &Experimental values\\
\hline
$\rho_{\downarrow\downarrow,\downarrow\downarrow}=P_{\downarrow\downarrow}$ & &$0.06\pm 0.02$\\
$\rho_{\uparrow\uparrow,\uparrow\uparrow}=P_{\uparrow\uparrow}$ & &$0.09\pm 0.02$\\
$\rho_{\downarrow\uparrow,\downarrow\uparrow}+\rho_{\uparrow\downarrow,\uparrow\downarrow} =P_{\downarrow\uparrow}+P_{\uparrow\downarrow}$& &$0.46\pm 0.03$\\
${\Re}(\rho_{\downarrow\uparrow,\uparrow\downarrow}) $& &$0.23\pm 0.04$\\
\hline
\hline
\end{tabular}
\caption{Measured values of the density matrix elements characterizing the state prepared in the experiment extracted from $P_{11}(\theta)$. The error bars are statistical. Note that the restriction to the states $|\!\uparrow\rangle$ and $|\!\downarrow\rangle$ leads to ${\rm tr}(\hat\rho) = 0.61$
because of the loss $L_{\rm total} = 0.39(2)$ from the qubit basis.}\label{summary}
\end{center}
\end{table}
As a cross-check of our data analysis, we look at the signal $\Pi (\theta )$ which is shown in figure~\ref{figparity}. For the maximally entangled state $|\Psi^{+}\rangle$,
the parity should
oscillate between $-1$ and $+1$ with a frequency of $2\Omega_{\uparrow \downarrow}$,
while here the observed $\Pi (\theta )$ oscillates at two frequencies,
$\Omega_{\uparrow\downarrow}$ and $2\Omega_{\uparrow\downarrow}$ with reduced amplitude.
From equation~(\ref{Parite}) we calculate $\Pi (\pi/2) = 2{\Re} (\rho_{\downarrow\uparrow,\uparrow\downarrow}) + \sum_{x,x'}P_{xx'}$.
Under the assumption that losses are independent for atoms $a$ and $b$, as mentioned in section~\ref{sectionlosses},
$L_{\rm total}=L_a + L_b - L_a L_b$.
Combining this formula with the expressions of $L_{\rm total}$, $L_{a}$ and $L_{b}$ given in section~\ref{sectiontheory}, we get $\sum_{x,x'}P_{xx'}=L_{a} L_{b}$.
We then deduce the coherence ${\Re}(\rho_{\downarrow\uparrow,\uparrow\downarrow}) = 0.22(4)$, which is in good agreement with the value deduced from the analysis of $P_{11}(\theta)$ described above.
\begin{figure}
\caption{Measured signal $\Pi(\theta)$ for different durations of the analysing Raman pulse. The data are fitted by a function of the form $y_{0}
\label{figparity}
\end{figure}
Our analysis allows us to calculate the fidelity of the
entangling operation. This fidelity $F$ is defined by $F=\langle\Psi^+ |\hat\rho| \Psi^+\rangle =
(P_{\downarrow\uparrow} + P_{\uparrow\downarrow})/2 +
{\Re} (\rho_{\downarrow\uparrow,\uparrow\downarrow})$
with respect to the expected $|\Psi^+\rangle$ Bell
state~\cite{Sackett00}.
From the values in table~\ref{summary},
we get $F=0.46(4)$.
This fidelity $F$ is defined with respect to the initial number
of atom pairs and includes events for which one or two atoms have been
lost physically during the entangling sequence.
That means $F$ characterizes the whole entangling
operation which is mainly limited by atom losses.
As $F<0.5$, this value does not prove entanglement between the atoms.
The quantum nature of the correlations between the two atoms is revealed
if we calculate the fidelity $F_{\rm pairs} = F/p_{\rm recap}$
which characterizes the pairs of atoms effectively present at the end of the entangling sequence
before state detection.
From $p_{\rm recap}=0.62(3)$, we calculate $F_{\rm pairs} = 0.74(7)$.
This approach to take into account atom losses, is very similar to the one used
in Bell inequality tests with photons based on {\it one-way polarizers}~\cite{Freedman72, Aspect81}.
In these experiments,
the absence of a photon detection after the polarizer can be due to a
photon with orthogonal polarization, or a photon that has been
lost before reaching the polarizer.
Therefore, the total number of
detected photon pairs is first measured by removing the polarizers, then the measurement
of the polarization correlation is performed and the results are renormalized by the
total number of photon pairs.
Our analysis gives also access to the fidelity $F_{\uparrow\downarrow} = F/{\rm tr}\hat{\rho}$ which characterizes the entanglement of atom pairs which are still in the qubit basis $\{|\!\uparrow\rangle,|\!\downarrow\rangle\}$. We find $F_{\uparrow\downarrow}=0.75(7)$, which is very close to $F_{\rm pairs}$ since the main mechanism for atom losses is the physical loss of one or two atoms from their traps. The fact that $F_{\rm pairs}>0.5$ and $F_{\uparrow\downarrow} > 0.5$
proves that the two atoms are entangled.
We can identify two effects lowering the fidelity with respect to the ideal case. Firstly, an imperfect Rydberg blockade leads to the excitation of both atoms (probability $\sim 10\%$~\cite{Gaetan09}) and their subsequent mapping to the state $|\!\downarrow,\!\downarrow\rangle$, resulting in a non-zero component of $P_{\downarrow\downarrow}$. Secondly, the excess value of $P_{\uparrow\uparrow}$ is explained by spontaneous emission from the state $5p_{1/2}$ as well as imperfect Rydberg excitation from the two atom state $|\!\uparrow,\!\uparrow\rangle$. We note that in the present status of the experiment, the influence of the residual motion of the atoms
in their traps is negligible on the fidelity (for more details, see~\cite{Wilk09}).
\section{Conclusion}
In conclusion, we have used global Raman rotations to analyze the entanglement of two atoms which is created using
the Rydberg blockade. Our analysis is based on a model taking into account losses of atoms. We have found that the 62\% pairs of atoms remaining at the end of the sequence are in a state with a fidelity 0.74(7) with respect to the expected $|\Psi^+\rangle$, showing the non-classical origin of the correlations. Future work will be devoted to the measurement of the coherence time of the entangled state, as well as to the improvement of the fidelity and the state detection scheme.
\begin{acknowledgments}
We thank M. Barbieri, M. M\"uller, R. Blatt, D. Kielpinski and P. Maunz for
discussions. We acknowledge support from the European Union through the Integrated Project SCALA, IARPA and the Institut Francilien de Recherche sur les Atomes Froids (IFRAF). A. Ga\"{e}tan and C. Evellin are
supported by a DGA fellowship. T. Wilk is supported by IFRAF.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{Projective background of the infinitesimal rigidity
ewline of frameworks}
\begin{abstract}
We present proofs of two classical theorems. The first one, due to Darboux and Sauer, states that infinitesimal rigidity is a projective invariant; the other one establishes relations (infinitesimal Pogorelov maps) between the infinitesimal motions of a Euclidean framework and of its hyperbolic and spherical images.
The arguments use the static formulation of infinitesimal rigidity. The duality between statics and kinematics is established through the principles of virtual work. A geometric approach to statics, due essentially to Grassmann, makes both theorems straightforward. Besides, it provides a simple derivation of the formulas both for the Darboux-Sauer correspondence and for the infinitesimal Pogorelov maps.
\end{abstract}
\section{Introduction}
\label{sec:Intro}
\subsection{Infinitesimal rigidity}
A \emph{framework} is a collection of bars joined together at their ends by universal joints. A framework is called \emph{rigid}, if it cannot be flexed at the joints without deforming the bars; or, equivalently, if it can be moved only as a rigid body. The mathematical formalization of this is straightforward: a framework is a collection of points with distances between some pairs of them fixed; rigidity means that the points cannot be moved without changing one of those distances.
A framework is \emph{infinitesimally rigid} if its nodes cannot be moved in such a manner that the lengths of the bars remain constant in the first order. In practice, an infinitesimally flexible framework allows a certain amount of movement, even if it is rigid in the above sense.
A classical result on infinitesimal rigidity is the Legendre-Cauchy-Dehn theorem, \cite{Leg94}, \cite{Cau13}, \cite{Dehn16}: \emph{Every convex 3-dimensional polyhedron is infinitesimally rigid.} The theorem can be restated in the language of frameworks: The framework consisting of the vertices, edges and all face diagonals of a convex polyhedron is infinitesimally rigid. In fact, ``all face diagonals'' is redundant: it suffices to triangulate the faces arbitrarily, without adding new vertices. See \cite{Whi84a}, where this is generalized to higher dimensions.
For more information on different concepts of rigidity and an overview of results in this area, see the survey article \cite{Con93}.
Another classical but undeservedly little known result is the projective invariance of infinitesimal rigidity. For discrete structures it was first noticed by Rankine in 1863; it was proved by Darboux for smooth surfaces and by Liebmann and Sauer for frameworks, see Section \ref{subsec:rem}. Closely related to the projective invariance is the fact, discovered by Pogorelov, that a Euclidean framework can be turned into a hyperbolic or spherical one, respecting the infinitesimal rigidity.
The present paper contains proofs of these two properties of infinitesimal rigidity. The idea behind the proofs is not new, but we hope that a modern exposition might be useful. Our interest was stimulated by new applications that the projective properties of infinitesimal rigidity found in recent years, \cite{Scl05}, \cite{Scl06}, \cite{Fil08}.
There are further manifestations of the projective nature of the infinitesimal rigidity, such as its relations with polarity \cite{Whi87, Whi89} and Maxwell's theorem on projected polyhedra \cite{Whi82}.
Now let us state the two theorems in a precise way.
\subsection{Darboux-Sauer correspondence}
A framework $P$ in the Euclidean space ${\mathbb E}^d$ can be mapped by a projective map $\Phi:{\mathbb R}P^d \to {\mathbb R}P^d$ to another framework $\Phi(P)$. Here we assume that an affine embedding of ${\mathbb E}^d$ into ${\mathbb R}P^d$ is fixed and that $\Phi$ maps no vertex of $P$ to infinity. Frameworks $P$ and $\Phi(P)$ are called \emph{projectively equivalent}.
\begin{thm}[Darboux-Sauer correspondence]
\label{thm:DS}
Let $P$ and $P'$ be two projectively equivalent frameworks in ${\mathbb E}^d$. Then $P'$ is infinitesimally rigid iff $P$ is infinitesimally rigid. Moreover, the number of degrees of freedom of $P$ and $P'$ coincide.
\end{thm}
By the number of degrees of freedom of a framework we mean the dimension of the space of its infinitesimal motions modulo trivial ones. An infinitesimal motion is called trivial if it can be extended to an infinitesimal motion of ${\mathbb E}^d$ (equivalently, if it moves $P$ as a rigid body).
More specifically, let $\Phi$ be a projective map such that $P' = \Phi(P)$. Then $\Phi$ induces a bijection $\Phi^{\mathrm{kin}}$ between the space of infinitesimal motions of $P$ and the space of infinitesimal motions of $P'$ that maps trivial motions to trivial ones. We call the map $\Phi^{\mathrm{kin}}$ the (kinematic) \emph{Darboux-Sauer correspondence}.
\subsection{Infinitesimal Pogorelov maps}
Here is a simple way to describe the infinitesimal Pogorelov map. Consider a framework $P$ that is contained in a disk ${\mathbb D}^d \subset {\mathbb E}^d$. When the interior of ${\mathbb D}^d$ is viewed as Klein model of the hyperbolic space ${\mathbb H}^d$, the Euclidean framework $P$ turns into a hyperbolic framework $P^{\mathbb H}$. Pogorelov proved that $P^{\mathbb H}$ is infinitesimally rigid iff $P$ is; moreover, there is a canonical way to associate to every infinitesimal motion of $P$ an infinitesimal motion of $P^{\mathbb H}$ (with trivial motions going to trivial ones). This association is called the infinitesimal Pogorelov map.
Now let's be formal. Make the following identifications:
\begin{eqnarray}
{\mathbb E}^d & = & \{x \in {\mathbb R}^{d+1}|\; x^0 = 1\};\\
{\mathbb H}^d & = & \{x \in {\mathbb R}^{d+1}|\; x^0 > 0,\, \|x\|_{1,d}=1\}; \label{eqn:H^d}\\
{\mathbb S}^d & = & \{x \in {\mathbb R}^{d+1}|\; \|x\|=1\}, \label{eqn:S^d}
\end{eqnarray}
where $\|\cdot\|$ denotes the Euclidean norm, and $\|\cdot\|_{1,d}$ denotes the Minkowski norm of signature $(+,-,\ldots,-)$ in ${\mathbb R}^{d+1}$.
The projection from the origin of ${\mathbb R}^{d+1}$ defines the maps
\begin{eqnarray}
\Pi_{\mathbb H}: {\mathbb D}^d & \to & {\mathbb H}^d;\\
\Pi_{\mathbb S}: {\mathbb E}^d & \to & {\mathbb S}^d,
\end{eqnarray}
where ${\mathbb D}^d$ is the open unit disk in ${\mathbb E}^d \subset {\mathbb R}^{d+1}$ centered at $(1,0,\ldots,0)$.
To a framework $P$ in ${\mathbb E}^d$ there correspond frameworks $P^{\mathbb H} = \Pi_{\mathbb H}(P)$ and $P^{\mathbb S} = \Pi_{\mathbb S}(P)$ in ${\mathbb H}^d$ and ${\mathbb S}^d$. Note that $P^{\mathbb H}$ is defined iff $P \subset {\mathbb D}^d$.
\begin{thm}[Infinitesimal Pogorelov maps]
\label{thm:PogMaps}
Let $P$ be a framework in~${\mathbb E}^d$. Then the following are equivalent:
\begin{itemize}
\item the Euclidean framework $P$ is infinitesimally rigid;
\item (for $P \subset {\mathbb D}^d$) the hyperbolic framework $P^{\mathbb H}$ is infinitesimally rigid;
\item the spherical framework $P^{\mathbb S}$ is infinitesimally rigid.
\end{itemize}
Moreover, frameworks $P$, $P^{\mathbb H}$, and $P^{\mathbb S}$ have the same number of degrees of freedom.
\end{thm}
Again, both statements of the theorem follow from the fact that there exist bijections between infinitesimal motions of frameworks $P$, $P^{\mathbb H}$, and $P^{\mathbb S}$ that map trivial motions to trivial ones. These bijections are called the \emph{infinitesimal Pogorelov maps}.
\subsection{Plan of the paper}
Section \ref{sec:Classical} contains preliminary material. The focus here is on the equivalence between infinitesimal rigidity and static rigidity expressed in Theorem \ref{thm:InfStat}. This theorem is a direct consequence of \emph{principles of virtual work} (Lemma~\ref{lem:Dual}).
Section \ref{sec:Proj} develops ``projective statics'' and ``projective kinematics''. The goal is to define motions and loads within projective geometry, which makes the projective invariance of infinitesimal rigidity straightforward. Geometric description of Darboux-Sauer correspondence is derived.
With infinitesimal rigidity defined in projective terms, it is not hard to relate the kinematics of frameworks $P$, $P^{\mathbb H}$, and $P^{\mathbb S}$. This is done in Section~\ref{sec:InfPog}, where formulas for the infinitesimal Pogorelov maps are also derived.
\subsection{Examples}
\label{subsec:Exls}
Let us illustrate Theorem \ref{thm:DS} with some examples. Among the frameworks with a given combinatorics, the infinitesimally flexible ones sometimes have a nice geometric description. By Theorem \ref{thm:DS}, the description can always be made in projective terms.
\begin{exl}
Blaschke \cite{Bla20} and Liebmann \cite{Lie20} proved the following:
\begin{quote}
Let $P$ be a framework combinatorially equivalent to the skeleton of the octahedron. Color the triangles spanned by the edges of~$P$ black and white so that neighbors have different colors. The framework $P$ is infinitesimally flexible iff the planes of the four black triangles intersect, maybe at infinity. As a corollary, the planes of four white triangles intersect iff the planes of the four black ones do.
\end{quote}
\begin{figure}
\caption{Examples of infinitesimally flexible octahedra. Left: antiprism twisted by $90^\circ$. Right: the points $A$, $B$, $C$, $D$ lie in one plane.}
\label{fig:Blaschke}
\end{figure}
Figure \ref{fig:Blaschke} shows two configurations satisfying this condition. At the left is an example from \cite{Wun65}. It is obtained from a straight antiprism over a regular triangle by rotating one of the bases by $90^\circ$. It is easy to see that the horizontal shaded triangle is cut by the planes of the other three shaded triangles along its medians. Hence the four shaded planes intersect at a point. The right-hand example is due to Liebmann and is also depicted in \cite{Glu75}. Here the points $A$, $B$, $C$, and $D$ are assumed to lie in one plane. Since each of the four shaded planes contains one of the lines $AB$ or $CD$, they all pass through the intersection point of these lines.
\end{exl}
\begin{exl}
Consider the planar framework at the left of Figure \ref{fig:Desargues}. The lines matching the vertices of the two triangles are parallel. This implies that the velocity field represented by arrows is an infinitesimal motion. Hence, infinitesimally flexible will be any framework where the three matching lines are concurrent. In fact, this is a necessary and sufficient condition:
\begin{quote}
The planar framework on the right hand side of Figure \ref{fig:Desargues} is infinitesimally flexible iff the three lines $a$, $b$, $c$ intersect.
\end{quote}
Note that this condition is equivalent to the framework being a projection of a skeleton of a 3-polytope, so that the statement is a special case of Maxwell's theorem, \cite{Whi82}.
\end{exl}
\begin{figure}
\caption{The framework at the left is infinitesimally flexible. The framework at the right is infinitesimally flexible iff the lines $a$, $b$, $c$ intersect.}
\label{fig:Desargues}
\end{figure}
\begin{exl}
Walter Whiteley \cite{Whi84c} shows how to derive from Theorem~\ref{thm:DS} the following statement:
\begin{quote}
Let $P$ be a framework in the Euclidean space ${\mathbb E}^d$ with combinatorics of a bipartite graph. If all of the vertices of $P$ lie on a non-degenerate quadric, then $P$ is infinitesimally flexible.
\end{quote}
Assume that all of the vertices of $P$ lie on the sphere. Move all the white vertices towards the center of the sphere, and all the black vertices in the opposite direction, see Figure \ref{fig:Bipartite}. It is easy to see that the distances between white and black vertices don't change in the first order. Thus $P$ is infinitesimally flexible. Since any non-degenerate quadric is a projective image of the sphere, $P$ is also infinitesimally flexible when it is inscribed in a quadric.
\begin{figure}
\caption{The framework at the left is infinitesimally flexible. Due to the projective invariance of infinitesimal rigidity, the framework at the right is also infinitesimally flexible.}
\label{fig:Bipartite}
\end{figure}
The question about rigidity of bipartite frameworks was studied in \cite{BolRot80}. A characterization of infinitesimally flexible complete bipartite frameworks is given in \cite{Whi84b}.
\end{exl}
\section{Infinitesimal and static rigidity}
\label{sec:Classical}
\subsection{Frameworks}
\label{subsec:Frameworks}
Let $(\mathcal{V}, \mathcal{E})$ be a graph with vertex set $\mathcal{V}$ and edge set $\mathcal{E}$. We denote the vertices by letters $i, j, \ldots$, and an edge joining the vertices $i$ and $j$ by $ij$.
\begin{dfn}
\label{dfn:Framework}
A \emph{framework} in ${\mathbb E}^d$ with graph $(\mathcal{V}, \mathcal{E})$ is a map
$$
\begin{array}{rrcl}
P: & \mathcal{V} & \to & {\mathbb E}^d,\\
& i & \mapsto & p_i
\end{array}
$$
such that $p_i \ne p_j$ whenever $ij \in \mathcal{E}$.
\end{dfn}
In other words, a framework is a straight-line drawing of a graph in ${\mathbb E}^d$, with self-intersections (even non-transverse ones) allowed. The motivation for studying frameworks comes from mechanical linkages; namely, the edges of a framework should be considered as rigid bars, and the vertices as universal joints.
Throughout the paper we assume that the vertices $(p_i)_{i \in \mathcal{V}}$ of the framework span the space ${\mathbb E}^d$ affinely. This is not a crucial restriction: if the framework lies in an affine subspace of ${\mathbb E}^d$, then its infinitesimal motions can be decomposed into the direct sum of infinitesimal motions inside $\mathrm{span} \{p_i\}$ and arbitrary displacements in directions orthogonal to $\mathrm{span} \{p_i\}$.
\begin{rem}
We use different notations for the Euclidean space ${\mathbb E}^d$ and for the vector space ${\mathbb R}^d$. Informally speaking, ${\mathbb E}^d$ consists of points, ${\mathbb R}^d$ consists of vectors. We obtain ${\mathbb E}^d$ from ${\mathbb R}^d$ by ``forgetting'' the origin. The tangent space at every point of ${\mathbb E}^d$ is ${\mathbb R}^d$ with the standard scalar product. Also, every pair of points $p$, $p'$ in ${\mathbb E}^d$ determines a vector $p'-p \in {\mathbb R}^d$.
\end{rem}
\subsection{Infinitesimal rigidity}
\label{subsec:ClassInf}
A continuous motion of the framework $P$ is a family $P(t)$ of frameworks ($t$ ranges over a neighborhood of $0$) such that $P(0) = P$ and the length of every bar does not depend on $t$:
\begin{equation}
\label{eqn:BarLengths}
\|p_i(t) - p_j(t)\| = \mathrm{const}_{ij} \mbox{ for every }ij \in \mathcal{E}.
\end{equation}
If $P(t)$ is differentiable, then differentiating \eqref{eqn:BarLengths} at $t=0$ yields
$$
\langle p_i - p_j, \dot p_i - \dot p_j \rangle = 0 \mbox{ for every }ij \in \mathcal{E}.
$$
This motivates the following definition.
\begin{dfn}
\label{dfn:InfMot}
A \emph{velocity field} on the framework $P$ is a map
$$
\begin{array}{rrcl}
Q: & \mathcal{V} & \to & {\mathbb R}^d,\\
& i & \mapsto & q_i.
\end{array}
$$
A velocity field on $P$ is called an \emph{infinitesimal motion} of $P$ iff
\begin{equation}
\label{eqn:InfMot}
\langle p_i - p_j, q_i - q_j \rangle = 0 \mbox{ for every }ij \in \mathcal{E}.
\end{equation}
\end{dfn}
Since the conditions \eqref{eqn:InfMot} are linear in $Q$, infinitesimal motions of the framework $P$ form a vector space. Denote this vector space by $\mathcal{Q}_{\mathrm{mot}}$.
Let $\{\Phi_t\}$ be a differentiable family of isometries of ${\mathbb E}^d$ such that $\Phi_0 = \mathrm{id}$. The vector field on ${\mathbb E}^d$ given by
$$
Q(x) = \left.\frac{d\Phi_t(x)}{dt}\right|_{t=0}
$$
is called an \emph{infinitesimal isometry} of ${\mathbb E}^d$. An infinitesimal motion of $P$ that is the restriction of an infinitesimal isometry of ${\mathbb E}^d$ is called \emph{trivial}. The space of trivial infinitesimal motions of $P$ is denoted by $\mathcal{Q}_{\mathrm{triv}}$.
\begin{dfn}
\label{dfn:InfRig}
The framework $P$ is called \emph{infinitesimally rigid} iff all its infinitesimal motions are trivial.
The dimension of the quotient space $\mathcal{Q}_{\mathrm{mot}}/\mathcal{Q}_{\mathrm{triv}}$ is called the number of \emph{kinematic degrees of freedom} of the framework $P$.
\end{dfn}
The framework $P$ is called \emph{rigid} iff every continuous motion $P(t)$ has the form $\Phi_t \circ P$ with $\Phi_t$ a continuous family of isometries of ${\mathbb R}^d$. Intuition suggests that an infinitesimally rigid framework should be rigid. This is true \cite{Con80, RotWhi81}, but not straightforward since not every continuous motion can be reparametrized into a smooth one.
In the opposite direction, rigidity does not imply infinitesimal rigidity. Any of the frameworks on figures \ref{fig:Blaschke}--\ref{fig:Bipartite} can serve as an example.
\subsection{Static rigidity}
\label{subsec:ClassStat}
In the statics of rigid body, a force is represented as a line-bound vector. A collection of forces does not necessarily reduce to a single force.
\begin{dfn}
\label{dfn:Force}
A \emph{force} is a pair $(p,f)$ with $p \in {\mathbb E}^d$, $f \in {\mathbb R}^d$. A \emph{system of forces} is a formal sum $\sum_i (p_i,f_i)$ that may be transformed according to the following rules:
\begin{enumerate}
\setcounter{enumi}{-1}
\item a force with a zero vector is a zero force:
$$
(p,0) \sim 0;
$$
\item forces at the same point can be added and scaled as usual:
$$
\lambda_1(p,f_1) + \lambda_2(p,f_2) \sim (p, \lambda_1f_1+\lambda_2f_2);
$$
\item a force may be moved along its line of action:
$$
(p,f) \sim (p + \lambda f, f).
$$
\end{enumerate}
\end{dfn}
In ${\mathbb E}^2$, any system of forces is equivalent either to a single force or to a so called ``couple'' $(p_1, f) + (p_2, -f)$ with $p_1-p_2 \nparallel f$.
\begin{dfn}
\label{dfn:Load}
A \emph{load} on the framework $P$ is a map
$$
\begin{array}{rrcl}
F: & \mathcal{V} & \to & {\mathbb R}^d,\\
& i & \mapsto & f_i.
\end{array}
$$
A load is called an \emph{equilibrium load} iff the system of forces $\sum_{i \in \mathcal{V}} (p_i, f_i)$ is equivalent to a zero force.
\end{dfn}
A rigid body responds to an equilibrium load by interior stresses that cancel the forces of the load.
\begin{dfn}
\label{dfn:Stress}
A \emph{stress} on the framework $P$ is a map
$$
\begin{array}{rrcl}
\Omega: & \mathcal{E} & \to & {\mathbb R},\\
& ij & \mapsto & \omega_{ij}.
\end{array}
$$
The stress $\Omega$ is said to \emph{resolve} the load $F$ iff
\begin{equation}
\label{eqn:LoadResolv}
f_i + \sum_{j \in \mathcal{V}} \omega_{ij} (p_j - p_i) = 0 \mbox{ for all }i \in \mathcal{V},
\end{equation}
where we assume $\omega_{ij} = 0$ for all $ij \notin \mathcal{E}$.
\end{dfn}
We denote the vector space of equilibrium loads by $\mathcal{F}_{\mathrm{eq}}$, and the vector space of resolvable loads by $\mathcal{F}_{\mathrm{res}}$. It is easy to see that only an equilibrium load can be resolved: $\mathcal{F}_{\mathrm{res}} \subset \mathcal{F}_{\mathrm{eq}}$.
\begin{dfn}
The framework $P$ is called \emph{statically rigid} iff every equilibrium load on $P$ can be resolved.
The dimension of the quotient space $\mathcal{F}_{\mathrm{eq}}/\mathcal{F}_{\mathrm{res}}$ is called the number of \emph{static degrees of freedom} of the framework $P$.
\end{dfn}
\subsection{Relation between infinitesimal and static rigidity}
\label{subsec:InfStat}
Define a pairing between velocity fields and loads on the framework~$P$:
\begin{equation}
\label{eqn:Pairing}
\langle Q, F \rangle = \sum_{i \in \mathcal{V}} \langle q_i, f_i \rangle.
\end{equation}
Clearly, this pairing is non-degenerate, thus it induces a duality between the space of velocity fields and the space of loads.
The following theorem provides a link between statics and kinematics of frameworks.
\begin{thm}
\label{thm:InfStat}
The pairing \eqref{eqn:Pairing} induces a duality
$$
\mathcal{Q}_{\mathrm{mot}}/\mathcal{Q}_{\mathrm{triv}} \cong \left( \mathcal{F}_{\mathrm{eq}}/\mathcal{F}_{\mathrm{res}} \right)^*
$$
between the space of non-trivial infinitesimal motions and the space of non-resolvable equilibrium loads.
In particular, a framework is infinitesimally rigid iff it is statically rigid.
For an infinitesimally flexible framework, the number of kinematic degrees of freedom is equal to the number of static degrees of freedom.
\end{thm}
\begin{proof}
This follows from Lemma \ref{lem:Dual} and from the canonical isomorphism $(V_1/V_2)^* \cong V_2^\perp/V_1^\perp$ for any pair of vector subspaces $V_1 \supset V_2$ of a space~$V$.
\end{proof}
\begin{lem}[Principles of virtual work]
\label{lem:Dual}
Under the pairing \eqref{eqn:Pairing},
\begin{enumerate}
\item
the space of infinitesimal motions is the orthogonal complement of the space of resolvable loads:
$$
\mathcal{Q}_{\mathrm{mot}} = (\mathcal{F}_{\mathrm{res}})^\perp;
$$
\item
the space of trivial infinitesimal motions is the orthogonal complement of the space of equilibrium loads:
$$
\mathcal{Q}_{\mathrm{triv}} = (\mathcal{F}_{\mathrm{eq}})^\perp.
$$
\end{enumerate}
\end{lem}
\begin{proof}
The space of resolvable loads is spanned by the loads $(F^{ij})_{ij \in \mathcal{E}}$ with components
$$
\begin{array}{rcl}
f^{ij}_i & = & p_i - p_j\\
f^{ij}_j & = & p_j - p_i\\
f^{ij}_k & = & 0 \quad \mbox{ for }k \ne i, j.
\end{array}
$$
The orthogonality condition $\langle Q, F^{ij} \rangle = 0$ is equivalent to $\langle q_i-q_j, p_i - p_j \rangle = 0$. Thus $Q \in (\mathcal{F}_{\mathrm{res}})^\perp$ iff $Q$ is an infinitesimal motion, and the first principle is proved.
Let us prove that $\mathcal{Q}_{\mathrm{triv}} \supset (\mathcal{F}_{\mathrm{eq}})^\perp$. Let $Q$ be a velocity field that annihilates every equilibrium load. The load $F^{ij}$ defined in the previous paragraph is an equilibrium load for every $i,j \in \mathcal{V}$ (with $ij$ not necessarily in $\mathcal{E}$). The equations $\langle Q, F^{ij} \rangle = 0$ imply that $Q$ infinitesimally preserves pairwise distances between the points $(p_i)_{i \in \mathcal{V}}$. Therefore $Q$ can be extended to an infinitesimal isometry of ${\mathbb E}^d$, that is $Q \in \mathcal{Q}_{\mathrm{triv}}$.
Let us prove $\mathcal{Q}_{\mathrm{triv}} \subset (\mathcal{F}_{\mathrm{eq}})^\perp$. Let $Q$ be the restriction of an infinitesimal isometry of ${\mathbb E}^d$. We have to show that $\langle Q, F \rangle = 0$ for every equilibrium load $F$. Since the system of forces $\sum_{i \in \mathcal{V}} (p_i, f_i)$ corresponding to $F$ is equivalent to zero, there is a sequence of transformations as in Definition \ref{dfn:Force} that leads from $\sum_i (p_i, f_i)$ to $0$. It is not hard to show that the number $\langle Q, F \rangle$ remains unchanged after each transformation (if a force $(p',f')$ with a new application point $p'$ appears, then we substitute for $q'$ in the expression $\langle q', f' \rangle$ the velocity vector of our global infinitesimal isometry). Since $F$ vanishes at the end, we have $\langle Q, F \rangle = 0$ also at the beginning.
\end{proof}
\begin{cor}
$$
\dim \mathcal{F}_{\mathrm{eq}} = d\,|\mathcal{V}| - \binom{d+1}2
$$
\end{cor}
\begin{proof}
Due to Lemma \ref{lem:Dual}, $\dim \mathcal{F}_{\mathrm{eq}} = d\,|\mathcal{V}| - \dim \mathcal{Q}_{\mathrm{triv}} = d\,|\mathcal{V}| - \binom{d+1}2$.
\end{proof}
Let $\Phi: {\mathbb E}^d \to {\mathbb E}^d$ be an affine isomorphism. The framework $P' = \Phi \circ P$ is called \emph{affinely equivalent} to $P$.
\begin{cor}
\label{cor:InfAff}
Infinitesimal rigidity is an affine invariant. Moreover, for any two affinely equivalent frameworks there is a canonical bijection between their infinitesimal motions that restricts to a bijection between trivial infinitesimal motions.
Explicitly, let $\Phi: x \mapsto Ax + b$ be an affine isomorphism of ${\mathbb E}^d$, written in an orthonormal coordinate system. Then the map that relates infinitesimal motions of $P$ with infinitesimal motions of $\Phi \circ P$ is $(A^*)^{-1}$.
\end{cor}
\begin{proof}
Static rigidity is affinely invariant in a straightforward way. Definitions in Section \ref{subsec:ClassStat} use only the affine structure of ${\mathbb E}^d$, and not the metric structure. Given an affine isomorphism $\Phi: x \mapsto Ax + b$, the transformation of forces $\Phi^{\mathrm{stat}}: f \mapsto Af$ maps equilibrium loads to equilibrium ones and resolvable to resolvable.
In order to obtain a transformation $\Phi^{\mathrm{kin}}$ of velocity fields, it suffices to require that $\langle \Phi^{\mathrm{kin}}(q), \Phi^{\mathrm{stat}}(f) \rangle = \langle q, f \rangle$ for any $q,f$. This implies the formula $(\Phi^{\mathrm{kin}})^{-1}: q \mapsto A^*q$.
\end{proof}
An alternative proof of the affine invariance of infinitesimal rigidity can be found in \cite{Bla20}.
\subsection{Rigidity matrix}
\label{subsec:RigMat}
The rigidity matrix is a standard tool for computing infinitesimal motions and the number of degrees of freedom of a framework.
\begin{dfn}
\label{dfn:RigMat}
The \emph{rigidity matrix} of a framework $P$ is an $\mathcal{E} \times \mathcal{V}$ matrix with vector entries:
$$
\mathcal{R} = \; \scriptstyle{ij}
\stackrel{\scriptstyle{i}}{\left(\begin{array}{ccc}
& \vdots & \\
\cdots & p_i - p_j & \cdots \\
& \vdots &
\end{array}\right)}.
$$
It has the pattern of the edge-vertex incidence matrix of the graph $(\mathcal{V}, \mathcal{E})$, with $p_i - p_j$ on the intersection of the row $ij$ and the column $i$.
\end{dfn}
Note that the rows of $\mathcal{R}$ are exactly the loads $(F^{ij})_{ij \in \mathcal{E}}$ that span the space $\mathcal{F}_{\mathrm{res}}$, see the proof of Lemma \ref{lem:Dual}. The following proposition is just a reformulation of the first principle of virtual work (Lemma \ref{lem:Dual}, first part), together with its proof.
\begin{prp}
\label{prp:RigMat}
Consider $\mathcal{R}$ as the matrix of a map $({\mathbb R}^d)^\mathcal{V} \to {\mathbb R}^\mathcal{E}$. Then the following holds:
$$
\begin{array}{lcl}
\ker \mathcal{R} & = & \mathcal{Q}_{\mathrm{mot}};\\
\operatorname{im} \mathcal{R}^\top & = & \mathcal{F}_{\mathrm{res}}.
\end{array}
$$
\end{prp}
\begin{cor}
The framework is infinitesimally rigid iff
$$
\operatorname{rk} \mathcal{R} = d\, |\mathcal{V}| - \binom{d+1}2.
$$
\end{cor}
\begin{proof}
Indeed, $\operatorname{rk} \mathcal{R} = d\, |\mathcal{V}| - \dim \ker \mathcal{R}$ which is equal to $d\, |\mathcal{V}| - \dim \mathcal{Q}_{\mathrm{mot}}$ by Proposition \ref{prp:RigMat}. By definition, the framework is infinitesimally rigid iff $\mathcal{Q}_{\mathrm{mot}} = \mathcal{Q}_{\mathrm{triv}}$. Since $\mathcal{Q}_{\mathrm{mot}} \subset \mathcal{Q}_{\mathrm{triv}}$ and $\dim \mathcal{Q}_{\mathrm{triv}} = \binom{d+1}2$, the proposition follows.
\end{proof}
\section{Projective interpretation of rigidity}
\label{sec:Proj}
In this section we prove Theorem \ref{thm:DS}. For that, the static formulation of the infinitesimal rigidity turns out to be the most suitable. The proof amounts to redefining a force in projective terms, compatibly with Definition \ref{dfn:Force}. This is done in Section \ref{subsec:ProjStat}. In the same section we obtain formulas describing the correspondence between the loads in two projectively equivalent frameworks. In Section \ref{subsec:ProjKin} we derive from these formulas of static correspondence formulas of kinematic correspondence, using the duality from Section \ref{subsec:InfStat}.
Finally, we introduce projective analogs of notions of kinematics from Section \ref{subsec:ClassInf}.
Recall that we identify the Euclidean space ${\mathbb E}^d$ with the affine hyperplane $\{x^0 = 1\}$ of ${\mathbb R}^{d+1}$. This induces an affine embedding of ${\mathbb E}^d$ into ${\mathbb R}P^d$. We write points of ${\mathbb R}P^d$ as equivalence classes $[x]$ of points of ${\mathbb R}^{d+1} \setminus \{0\}$.
\subsection{Projective statics}
\label{subsec:ProjStat}
\begin{dfn}
\label{dfn:ProjFram}
A \emph{projective framework} with graph $(\mathcal{V},\mathcal{E})$ is a map
$$
\begin{array}{rrcl}
X: & \mathcal{V} & \to & {\mathbb R}P^d,\\
& i & \mapsto & [x_i]
\end{array}
$$
such that $[x_i] \ne [x_j]$ whenever $ij \in \mathcal{E}$.
\end{dfn}
An affine embedding of ${\mathbb E}^d$ into ${\mathbb R}P^d$ associates a projective framework to every Euclidean framework.
\begin{dfn}
A force applied at a point $[x] \in {\mathbb R}P^d$ is a decomposable bivector divisible through $x$.
\end{dfn}
Thus every force at $[x]$ can be written as $x \wedge y \in \Lambda^2 {\mathbb R}^{d+1}$.
Let $(p,f)$ be a force in the sense of Definition \ref{dfn:Force}, i.e. $p \in {\mathbb E}^d \subset {\mathbb R}^{d+1}$, $f \in T_p {\mathbb E}^d \cong {\mathbb R}^d = \{x^0 = 0\}$. Associate with $(p,f)$ the bivector $p \wedge f$.
\begin{prp}
\label{prp:ProjStatCorr}
The extension of the map
\begin{equation}
\label{eqn:StatMap}
(p,f) \mapsto p \wedge f
\end{equation}
by linearity is well-defined and establishes an isomorphism of systems of forces on ${\mathbb E}^d$ with $\Lambda^2 {\mathbb R}^{d+1}$.
\end{prp}
\begin{proof}
The extension is well-defined since the equivalence relations from Definition \ref{dfn:Force} are respected by the map \eqref{eqn:StatMap}.
Let us prove that the map is surjective. It suffices to show that any decomposable bivector $x \wedge y \in \Lambda^2 {\mathbb R}^{d+1}$ is an image of a system of forces. If the plane spanned by $x$ and $y$ is not contained in ${\mathbb R}^d$, then there is a point $p \in \operatorname{span}\{x,y\} \cap {\mathbb E}^d$, and hence $x \wedge y = p \wedge f$ for an appropriate $f \in {\mathbb R}^d$. Otherwise, $x,y \in {\mathbb R}^d$. In this case represent $x$ as $x_1+x_2$ with $x_1,x_2 \notin {\mathbb R}^d$. Then the sum $x_1 \wedge y +x_2 \wedge y$ corresponds to a force couple.
To prove the injectivity, it suffices to show that the space of systems of forces on ${\mathbb E}^d$ has dimension at most $\binom{d+1}{2} = \dim \Lambda^2 {\mathbb R}^{d+1}$. This follows from an easy fact that any force can be written as a linear combination of forces from the set $\{(p_i, p_i - p_j)|\: i < j\}$, where $p_0, \ldots, p_d$ is a set of affinely independent points in ${\mathbb E}^d$.
\end{proof}
Due to Proposition \ref{prp:ProjStatCorr}, the following definitions are compatible with definitions of Section \ref{subsec:ClassStat}.
\begin{dfn}
A load on a projective framework $X$ is a map
$$
\begin{array}{rrcl}
G: & \mathcal{V} & \to & \Lambda^2 {\mathbb R}^{d+1},\\
& i & \mapsto & g_i,
\end{array}
$$
where $g_i$ is a force at $[x_i]$. A load is called an equilibrium load iff $\sum_{i \in \mathcal{V}} g_i = 0$.
\end{dfn}
\begin{dfn}
Let $X$ be a projective framework with graph $(\mathcal{V}, \mathcal{E})$. Denote by $\mathcal{E}_{\mathrm{or}}$ the set of oriented edges: $\mathcal{E}_{\mathrm{or}} = \{(i,j)|\: ij \in \mathcal{E}\}.$ A stress on $X$ is a map
$$
\begin{array}{rrcl}
W: & \mathcal{E}_{\mathrm{or}} & \to & \Lambda^2 {\mathbb R}^{d+1},\\
& (i,j) & \mapsto & w_{ij}
\end{array}
$$
such that $w_{ij} \in \Lambda^2 \operatorname{span}\{x_i,x_j\}$ and $w_{ij} = -w_{ji}$.
The stress $W$ is said to resolve the load $G$ iff for all $i \in \mathcal{V}$ we have
$$
g_i = \sum_j w_{ij}.
$$
\end{dfn}
\begin{prp}
\label{prp:DS_Stat}
Let $P$ and $P'$ be two frameworks in ${\mathbb E}^d \subset {\mathbb R}P^d$ such that $P' = \Phi \circ P$, where $\Phi: {\mathbb R}P^d \to {\mathbb R}P^d$ is a projective map. Then there is an isomorphism between the spaces of equilibrium loads on $P$ and $P'$ that maps resolvable loads to resolvable ones.
\end{prp}
\begin{proof}
Choose a representative $M \in GL({\mathbb R}^{d+1})$ for~$\Phi$ and denote by $X$ and $X'$ the projective frameworks associated to $P$ and $P'$.
The map $M$ induces a map $M_*: \Lambda^2 {\mathbb R}^{d+1} \to \Lambda^2 {\mathbb R}^{d+1}$. Being a linear isomorphism, $M_*$ maps equilibrium loads on $X$ to equilibrium loads on $X'$, and resolvable ones to resolvable ones. Due to Proposition \ref{prp:ProjStatCorr}, the (projective) loads on $X$, respectively $X'$, nicely correspond to (Euclidean) loads on $P$, respectively $P'$. This yields the desired isomorphism between the spaces of loads on $P$ and $P'$.
\end{proof}
Denote by $\Phi^{\mathrm{stat}}$ the isomorphism between the spaces of loads on $P$ and $P'$ constructed in the proof of Proposition \ref{prp:DS_Stat}. It consists of a family of isomorphisms
$$
\Phi^{\mathrm{stat}}_p: T_p {\mathbb E}^d \to T_{\Phi(p)} {\mathbb E}^d, \quad p \in {\mathbb E}^d.
$$
Since the construction involves the choice of a representative $M$, the isomorphism $\Phi^{\mathrm{stat}}$ is determined only up to scaling. The next two propositions describe $\Phi^{\mathrm{stat}}$ explicitly.
\begin{prp}
\label{prp:A_p1}
Let $L \subset {\mathbb E}^d$ be the hyperplane that is sent to infinity by the projective map $\Phi$. Then
\begin{equation}
\label{eqn:A_p1}
\Phi^{\mathrm{stat}}_p(f) = h_L(p) h_L(p+f) \cdot \left(\Phi(p+f) - \Phi(p)\right), \quad \mbox{if } p+f \notin L,
\end{equation}
where $h_L$ denotes the signed distance to the hyperplane $L$.
In other words: to obtain $\Phi^{\mathrm{stat}}_p(f)$, map the application point and the endpoint of the vector $(p,f)$ by the map $\Phi$, and scale the resulting vector by the product of distances of these points from the hyperplane $L$.
\end{prp}
If $(p,f)$ is such that $p+f \in L$, then \eqref{eqn:A_p1} contains an indeterminacy. In this case the map $\Phi^{\mathrm{stat}}_p$ can be extended to $f$ by continuity or by linearity.
\begin{proof}
Consider $P$ as a projective framework. Choose a representative $M \in GL({\mathbb R}^{d+1})$ of $\Phi$. The vector $\Phi^{\mathrm{stat}}_p(f)$ is uniquely determined by the equation
\begin{equation}
\label{eqn:CondA_p}
M_*(p \wedge f) = \Phi(p) \wedge \Phi^{\mathrm{stat}}_p(f)
\end{equation}
and the condition $\Phi^{\mathrm{stat}}_p(f) \in {\mathbb R}^d$.
Denote $x': = M(p)$. Then we have
$$
x' = \lambda \cdot \Phi(p).
$$
It is not hard to see that
$$
\lambda = c \cdot h_L(p)
$$
for some constant $c$ independent of $p$. Thus we have
\begin{eqnarray*}
M_*(p \wedge f) & = & M_*(p \wedge (p+f)) = c^2 h_L(p) h_L(p+f) \cdot \Phi(p) \wedge \Phi(p+f)\\
& = & c^2 h_L(p) h_L(p+f) \cdot \Phi(p) \wedge \left(\Phi(p+f) - \Phi(p)\right).
\end{eqnarray*}
Choosing $M$ so that $c=1$, we obtain \eqref{eqn:A_p1} from \eqref{eqn:CondA_p}.
\end{proof}
\begin{prp}
\begin{equation}
\label{eqn:A_p2}
\Phi^{\mathrm{stat}}_p = h_L^2(p) \cdot \mathrm{d}\Phi_p,
\end{equation}
where $\mathrm{d}\Phi_p$ is the differential of the map $\Phi$ at the point $p$.
\end{prp}
\begin{proof}
This follows from \eqref{eqn:A_p1} by replacing $f$ with $tf$ and taking the limit as $t \to 0$.
There is also a simple direct proof. From the proof of Proposition \ref{prp:DS_Stat}, it is immediate that the vectors $\Phi^{\mathrm{stat}}_p(f)$ and $\mathrm{d}\Phi_p(f)$ are collinear for every~$f$. Since $\Phi^{\mathrm{stat}}_p$ and $\mathrm{d}\Phi_p$ are linear maps, this implies
\begin{equation}
\label{eqn:Prop}
\Phi^{\mathrm{stat}}_p = \lambda(p) \cdot \mathrm{d}\Phi_p,
\end{equation}
and it remains to determine the function $\lambda(p)$. Consider two arbitrary points $p_1$ and $p_2$ that are not mapped to infinity by $\Phi$ and the forces $p_2-p_1$ at $p_1$ and $p_1-p_2$ at $p_2$. Since these forces are in equilibrium, so must be their images. Thus we have
\begin{equation}
\label{eqn:condA_p2}
\frac{\lambda(p_1)}{\lambda(p_2)} = \frac{\|d\Phi_{p_2}(p_1-p_2)\|}{\|d\Phi_{p_1}(p_1-p_2)\|}.
\end{equation}
To compute the right hand side, restrict the map $\Phi$ to the line $p_1p_2$. In a coordinate system with the origin at the intersection point of $p_1p_2$ with the hyperplane $L$, this restriction takes the form $x \mapsto c/x$. Since the derivative of $c/x$ is proportional to $x^{-2}$, and $x$ is proportional to $h_L$, \eqref{eqn:Prop} and \eqref{eqn:condA_p2} imply \eqref{eqn:A_p2} (we can forget about $c$ because $\Phi^{\mathrm{stat}}$ is defined up to scaling).
\end{proof}
\subsection{Projective kinematics}
\label{subsec:ProjKin}
\begin{prp}
\label{prp:DS_Kin}
Let $P$ and $P'$ be two frameworks in ${\mathbb E}^d \subset {\mathbb R}P^d$ such that $P' = \Phi \circ P$, where $\Phi: {\mathbb R}P^d \to {\mathbb R}P^d$ is a projective map. Then there is an isomorphism $\Phi^{\mathrm{kin}}$ between the infinitesimal motions of $P$ and $P'$ that maps trivial infinitesimal motions to trivial ones.
The map $\Phi^{\mathrm{kin}}$ consists of a family of isomorphisms $\Phi^{\mathrm{kin}}_p: T_p{\mathbb E}^d \to T_{\Phi(p)}{\mathbb E}^d$ given by
\begin{equation}
\label{eqn:Phi^kin}
\Phi^{\mathrm{kin}}_p = h_L^{-2}(p) \cdot (\mathrm{d}\Phi_p^{-1})^*,
\end{equation}
where $h_L$ denotes the signed distance to the hyperplane $L$ sent to infinity by~$\Phi$.
\end{prp}
\begin{proof}
This is a direct consequence of Theorem \ref{thm:InfStat}, Proposition \ref{prp:DS_Stat} and formula \eqref{eqn:A_p2}.
\end{proof}
For the sake of completeness and for the reason of curiosity, let us find the projective counterparts to the notions of kinematics.
Let $X$ be a framework in ${\mathbb R}P^d$ with graph $(\mathcal{V}, \mathcal{E})$.
\begin{dfn}
A velocity vector at a point $[x] \in {\mathbb R}P^d$ is an element of the vector space $(\Lambda^2 {\mathbb R}^{d+1})^*/\Lambda^2 x^\perp$, where $x^\perp \subset ({\mathbb R}^{d+1})^*$ denotes the orthogonal complement of $x$.
\end{dfn}
Here is the motivation for this definition.
\begin{lem}
For a projective framework, the vector space of velocities at $[x]$ is dual to the vector space of forces at $[x]$.
\end{lem}
\begin{proof}
Indeed, the space of forces at $x$ is $x \wedge {\mathbb R}^{d+1} \subset \Lambda^2 {\mathbb R}^{d+1}$. For a subspace $W$ of a vector space $V$, there is a canonical isomorphism $W^* \cong V^*/W^\perp$. Since $(x \wedge {\mathbb R}^{d+1})^\perp = \Lambda^2 x^\perp$, the proposition follows.
\end{proof}
\begin{dfn}
A velocity field $(\tau_i)_{i \in \mathcal{V}}$ on a projective framework $X$ is called an infinitesimal motion iff
\begin{equation}
\label{eqn:ProjInfMot}
\langle x_i \wedge x_j, \tau_i - \tau_j \rangle = 0 \mbox{ for every }ij \in \mathcal{E}.
\end{equation}
An infinitesimal motion is called trivial iff there exists $\tau \in (\Lambda^2 {\mathbb R}^{d+1})^*$ such that $\tau_i = \tau + \Lambda^2 x_i^\perp$ for all $i \in \mathcal{V}$.
\end{dfn}
Note that the difference $\tau_i - \tau_j \in (\Lambda^2 {\mathbb R}^{d+1})^*/(x_i \wedge x_j)^\perp$ is well-defined since $\Lambda^2 x_i^\perp \subset (x_i \wedge x_j)^\perp \supset \Lambda^2 x_j^\perp$.
Let us establish a correspondence with the notions of Section~\ref{subsec:ClassInf}. Recall that the Euclidean space ${\mathbb E}^d$ is identified with the hyperplane $\{x^0 = 1\} \subset {\mathbb R}^{d+1}$. Consider a framework $P$ in ${\mathbb E}^d$ as a projective framework $X$ with $x_i = p_i$. To any classical velocity vector $q \in T_p {\mathbb E}^d$ associate a projective velocity vector $\tau \in (p \wedge {\mathbb R}^{d+1})^*$ given by
\begin{equation}
\label{eqn:q_to_tau}
\langle p \wedge y, \tau \rangle := \langle y, q \rangle \mbox{ for every }y \in T_p{\mathbb E}^d.
\end{equation}
(The angle brackets at the right hand side mean the scalar product in $T_p {\mathbb E}^d$.) Conversely, for every $\tau \in (\Lambda^2 {\mathbb R}^{d+1})^*/\Lambda^2 p^\perp$ consider the (well-defined) covector $p \lrcorner \tau \in p^\perp \subset ({\mathbb R}^{d+1})^*$. Restrict $p \lrcorner \tau$ to ${\mathbb R}^d$, identify ${\mathbb R}^d$ with $T_p {\mathbb E}^d$ by parallel translation, and identify $T_p {\mathbb E}^d$ with $T^*_p {\mathbb E}^d$ using the scalar product. Denote the result by~$q$:
\begin{equation}
\label{eqn:tau_to_q}
q := (p \lrcorner \tau|_{{\mathbb R}^d})^*.
\end{equation}
It is not hard to see that \eqref{eqn:q_to_tau} and \eqref{eqn:tau_to_q} define an isomorphism from $T_p {\mathbb E}^d$ to $(\Lambda^2 {\mathbb R}^{d+1})^*/\Lambda^2 x^\perp$ and its inverse.
\begin{lem} \label{lem:EucProj}
Let $P$ be a framework on ${\mathbb E}^d$, and let $X$ be the corresponding projective framework on ${\mathbb R}P^d$. Let $Q$ be a velocity field on $P$, and let $T$ be the velocity field on $X$ associated to $Q$ via \eqref{eqn:q_to_tau}. Then $T$ is an infinitesimal motion of $X$ if and only if $Q$ is an infinitesimal motion of $P$, and $T$ is trivial if and only if $Q$ is trivial.
\end{lem}
\begin{proof}
Equation \eqref{eqn:q_to_tau} implies
$$
\langle p_i - p_j, q_i - q_j \rangle = \langle p_i - p_j, q_i \rangle - \langle p_i - p_j, q_j \rangle = -\langle p_i \wedge p_j, \tau_i - \tau_j \rangle.
$$
Therefore $T$ satisfies \eqref{eqn:ProjInfMot} iff $Q$ satisfies \eqref{eqn:InfMot}.
Assume that $T$ is trivial: $\tau_i = \tau + \Lambda^2 x_i^\perp$ for some $\tau$. Then for every $i,j \in \mathcal{V}$ we have
$$
\langle q_i - q_j, p_i - p_j \rangle = \langle p_i \lrcorner \tau - p_j \lrcorner \tau, p_i - p_j \rangle = 0,
$$
which implies that $Q$ can be extended to an infinitesimal isometry of ${\mathbb E}^d$. Thus if $T$ is trivial, so is $Q$. To prove the inverse implication, compute the dimensions of the spaces of trivial motions. For the Euclidean framework $P$ it is equal to $\binom{d+1}2$ (recall that $(p_i)_{i \in \mathcal{V}}$ affinely span ${\mathbb E}^d$ by assumption). For the corresponding projective framework it is equal to the rank of the map
$$
(\Lambda^2 {\mathbb R}^{d+1})^* \to \bigoplus_{i \in \mathcal{V}} (\Lambda^2 {\mathbb R}^{d+1})^*/\Lambda^2 p_i^\perp.
$$
Since the vectors $(p_i)_{i \in \mathcal{V}}$ span ${\mathbb R}^{d+1}$, this map is injective, so its rank is equal to $\dim (\Lambda^2 {\mathbb R}^{d+1})^* = \binom{d+1}2$.
\end{proof}
\subsection{Remarks}
\label{subsec:rem}
Grassmann introduced in his book of 1844 ``Die lineale Ausdehn\-ungs\-lehre'' the bivector representation of forces acting on a rigid body (in terms of what we now call Grassmann coordinates). A good account on that is given in \cite{Kle09}. As Klein remarks, ``This book... is written in a style that is extraordinarily obscure, so that for decades it was not considered nor understood. Only when similar trains of thought came from other sources were they recognized belatedly in Grassmann's book.''
Once spelled out, the bivector representation of forces readily implies the projective invariance of static rigidity. Apparently, this was observed by Rankine in \cite{Ran63}, where he writes ``...theorems discovered by Mr. Sylvester ... obviously give at once the solution of the question''. Unfortunately, we don't know which theorems are meant; probably this is something similar to Proposition \ref{prp:ProjStatCorr}.
An exposition of these elegant but unfortunately little known ideas, along with additional references, can be found in \cite{CW82}, \cite{Whi84c}.
It seems that the observation of Rankine wasn't given much attention, because the next mention of the projective invariance of static rigidity I am aware of is 1920 in the paper \cite{Lie20} of Liebmann. Liebmann proves it only for frameworks with $|\mathcal{E}| = d\, |\mathcal{V}| - \binom{d+1}2$ that contain $d$ pairwise connected joints. In this case the rigidity matrix can be reduced to a square matrix by fixing the positions of these $d$ joints. Infinitesimal or static rigidity is then equivalent to vanishing of the determinant of this square matrix. Liebmann shows that the determinant is multiplied with a non-zero factor when the framework undergoes a projective transformation. This argument can probably be extended to the general case, but doesn't seem to produce a correspondence between the loads or velocity fields of two projectively equivalent frameworks.
Sauer in \cite{Sau35a} gives a proof of the projective invariance of static rigidity using Grassmann coordinates of forces and finds formula \eqref{eqn:A_p1}. In \cite{Sau35b}, Sauer proves the projective invariance of infinitesimal rigidity in an independent way.
For smooth surfaces in ${\mathbb R}^3$, the projective invariance of infinitesimal rigidity is proved by Darboux \cite{Dar96}.
Other proofs can be found in Wunderlich \cite{Wun82} for frameworks, and Volkov \cite{Vol74} for smooth manifolds.
The association $\Phi \mapsto \Phi^{\mathrm{stat}}$ as described by formulas \eqref{eqn:A_p1} and \eqref{eqn:A_p2} fails to be a functor. Namely, the equation
$$
(\Phi \circ \Psi)^{\mathrm{stat}}_p = \Phi^{\mathrm{stat}}_{\Psi(p)} \circ \Psi^{\mathrm{stat}}_p
$$
holds only up to a constant factor, because the definition of $\Phi^{\mathrm{stat}}$ involved choosing a representative $M \in GL({\mathbb R}^{d+1})$ of $\Phi \in PGL({\mathbb R}^{d+1})$. If one would like to have functoriality, one should choose the matrix $M$ in $SL_\pm({\mathbb R}^{d+1}) = \{M \in GL({\mathbb R}^{d+1})|\; \det M = \pm 1\}$. When $d$ is even, there are two possibilities, but they lead to the same map $\Phi^{\mathrm{stat}}$ due to $(-x) \wedge (-y) = x \wedge y$. Choosing $M$ in $SL_\pm({\mathbb R}^{d+1})$ changes the formulas \eqref{eqn:A_p1} and \eqref{eqn:A_p2} by a constant factor that depends on $\Phi$. It would be interesting to know whether this factor has a geometric meaning.
Notions of statics clearly have a homological flavor: equilibrium loads are kind of cycles, resolvable loads are kind of boundaries. This is easy to formalize; in the projective interpretation of Section \ref{subsec:ProjStat} we have a chain complex
$$
\bigoplus_\mathcal{E} \Lambda^2 \operatorname{span}\{x_i,x_j\} \stackrel{\delta}\longrightarrow \bigoplus_\mathcal{V} x_i \wedge {\mathbb R}^{d+1} \stackrel{\epsilon}\longrightarrow \Lambda^2 {\mathbb R}^{d+1}
$$
with appropriately defined maps, so that $\ker \epsilon$ consists of equilibrium loads, and $\operatorname{im} \delta$ of resolvable loads on framework $X$. For kinematics, there is a dual cochain complex
$$
\bigoplus_\mathcal{E} (\Lambda^2 {\mathbb R}^{d+1})^*/(x_i \wedge x_j)^\perp \stackrel{d}\longleftarrow \bigoplus_\mathcal{V} (\Lambda^2 {\mathbb R}^{d+1})^*/\Lambda^2 x_i^\perp \stackrel{\iota}\longleftarrow (\Lambda^2 {\mathbb R}^{d+1})^*
$$
with $d = \delta^*, \iota = \epsilon^*$. The maps $\delta$ and $d$ can be expressed through the rigidity matrix defined in Section \ref{subsec:RigMat}.
There exist higher-dimensional generalizations of statics, see \cite{TWW95}, \cite{TW00}, \cite{Lee96}. By duality they are related to the algebra of weights \cite{McM93}, \cite{McM96}, and to the combinatorial intersection cohomology \cite{Bra06}. Algebraic properties of the arising chain complexes can be used to prove deep theorems on the combinatorics of simplicial polytopes \cite{Sta80}, \cite{Kal87}, \cite{McM93}.
\section{Infinitesimal Pogorelov maps}
\label{sec:InfPog}
\subsection{Proof of Theorem \ref{thm:PogMaps}}
The definitions of frameworks in ${\mathbb H}^d$ and ${\mathbb S}^d$ repeat Definition \ref{dfn:Framework}. Let us define infinitesimal motions.
\begin{dfn}
\label{dfn:HInfMot}
Let $P$ be a framework in ${\mathbb H}^d$ or ${\mathbb S}^d$ with graph $(\mathcal{V}, \mathcal{E})$. A velocity field is a collection $(q_i)_{i \in \mathcal{V}}$ of tangent vectors at the vertices of the framework: $q_i \in T_{p_i}{\mathbb H}^d$, respectively $q_i \in T_{p_i}{\mathbb S}^d$.
A velocity field $(q_i)$ is called an infinitesimal motion of $P$ iff
$$
\left.\frac{d}{dt}\right|_{t=0} \operatorname{dist}(p_i(t), p_j(t)) = 0
$$
for every family $(p_i(t))$ such that $p_i(0) = p_i$, $\dot p_i(0) = q_i$.
An infinitesimal motion is called trivial iff it is generated by a differentiable family of isometries of ${\mathbb H}^d$, respectively ${\mathbb S}^d$.
\end{dfn}
Recall that we identify ${\mathbb H}^d$ and ${\mathbb S}^d$ with subsets of ${\mathbb R}^{d+1}$ according to \eqref{eqn:H^d} and \eqref{eqn:S^d}. The following lemma is straightforward.
\begin{lem}
\label{lem:ScalProd}
A velocity field $(q_i)_{i \in \mathcal{V}}$ is an infinitesimal motion of $P$ iff ${\langle p_i-p_j, q_i-q_j \rangle = 0}$ for every $ij \in \mathcal{E}$. Here $\langle \cdot\, , \cdot \rangle$ denotes the Minkowski or the Euclidean scalar product in ${\mathbb R}^{d+1}$, according to whether $P$ is a hyperbolic or a spherical framework.
The tangent space at $p$ is identified with a vector subspace of ${\mathbb R}^{d+1}$.
\end{lem}
Embeddings \eqref{eqn:H^d} and \eqref{eqn:S^d} allow to associate with every framework $P$ in ${\mathbb H}^d$ or ${\mathbb S}^d$ a projective framework $X$. Exactly as in the Euclidean case, formulas \eqref{eqn:q_to_tau} and \eqref{eqn:tau_to_q} define a natural bijection between velocity fields on frameworks $P$ and $X$.
\begin{lem}
\label{lem:HypProj}
Let $P$ be a framework in ${\mathbb H}^d$ or ${\mathbb S}^d$, and let $X$ be the corresponding projective framework. Let $Q$ be a velocity field on $P$, and let $T$ be the velocity field on $X$ associated with $Q$. Then $T$ is an infinitesimal motion of $X$ if and only if $Q$ is an infinitesimal motion of $P$, and $T$ is trivial if and only if $Q$ is trivial.
\end{lem}
\begin{proof}
Due to Lemma \ref{lem:ScalProd}, the arguments from the proof of Lemma \ref{lem:EucProj} can be applied.
\end{proof}
Theorem \ref{thm:PogMaps} now follows from Lemmas \ref{lem:EucProj} and \ref{lem:HypProj}.
\subsection{Computing Pogorelov maps}
Let $P$ be a Euclidean framework with associated hyperbolic and spherical frameworks $P^{\mathbb H}$ and $P^{\mathbb S}$. Our proof of Theorem \ref{thm:PogMaps} shows that there are natural bijections between velocity fields on $P$, $P^{\mathbb H}$, and $P^{\mathbb S}$ that map infinitesimal motions to infinitesimal motions and respect the triviality property. Let us denote the vector fields associated with $Q = (q_i)$ by $Q^{\mathbb H} = (q^{\mathbb H}_i)$ and $Q^{\mathbb S} = (q^{\mathbb S}_i)$.
\begin{prp}
\label{prp:PogForm}
The velocity fields $Q$, $Q^{\mathbb H}$, and $Q^{\mathbb S}$ are related by the equations
\begin{eqnarray*}
q_i & = & \operatorname{pr}(\sqrt{1-\|p_i-c\|^2} \cdot q_i^{\mathbb H});\\
q_i & = & \operatorname{pr}(\sqrt{1+\|p_i-c\|^2} \cdot q_i^{\mathbb S}),
\end{eqnarray*}
with $\|\cdot\|$ denoting the Euclidean scalar product. Here $c \in {\mathbb E}^d \subset {\mathbb R}^{d+1}$ is the point with coordinates $(1,0,\ldots,0)$ (the ``tangent point'' of ${\mathbb E}^d$, ${\mathbb H}^d$, and ${\mathbb S}^d$), and $\operatorname{pr}: {\mathbb R}^{d+1} \to {\mathbb R}^d$ is the projection $(x^0, x^1, \ldots, x^d) \mapsto (x^1,\ldots,x^d)$.
\end{prp}
\begin{proof}
For brevity, let us omit the index $i$ and denote by $p^{\mathbb H}$ the vertex of framework $P^{\mathbb H}$ corresponding to the vertex $p$ of $P$. Let $\tau$ be a velocity vector at the vertex $[p]$ in the underlying projective framework. By definition, we have
\begin{eqnarray*}
q & = & (p \lrcorner \tau|_{T_p{\mathbb E}^d})^*;\\
q^{\mathbb H} & = & (p^{\mathbb H} \lrcorner \tau|_{T_{p^{\mathbb H}}{\mathbb H}^d})^*.
\end{eqnarray*}
Since $p^{\mathbb H} \lrcorner \tau \in p^\perp$, we have $q^{\mathbb H} = (p^{\mathbb H} \lrcorner \tau)^*$, where this time $\alpha \mapsto \alpha^*$ denotes the isomorphism ${\mathbb R}^{d+1} \to ({\mathbb R}^{d+1})^*$ induced by the Minkowski scalar product. Also, it is not hard to show that $q = \operatorname{pr}((p \lrcorner \tau)^*)$. From
$$
p^{\mathbb H} = \sqrt{1-\|p_i-c\|^2} \cdot p
$$
we obtain the first formula of the proposition. The formula connecting $q$ with $q^{\mathbb S}$ is proved similarly, replacing the Minkowski scalar product in ${\mathbb R}^{d+1}$ with the Euclidean one.
\end{proof}
\subsection{Remarks}
A different derivation of the formulas of Proposition \ref{prp:PogForm} can be found in~\cite{SW07}.
In addition to infinitesimal Pogorelov maps there are finite Pogorelov maps, \cite{Pog73}. They associate with a pair of isometric hypersurfaces $P_1, P_2$ in ${\mathbb E}^d$ pairs $P_1^{\mathbb H}, P_2^{\mathbb H}$ and $P_1^{\mathbb S}, P_2^{\mathbb S}$ of isometric hypersurfaces in the hyperbolic and in the spherical space, respectively.
Liebmann in \cite{Lie20} proves the projective invariance of static rigidity for a certain class of frameworks, see Section \ref{subsec:rem}. After developing statics and kinematics in an arbitrary Cayley metric (which was also done by Lindemann \cite{Lin74}), he proves that the static rigidity of a framework doesn't depend on the choice of the metric.
In the smooth case, Volkov \cite{Vol74} proves that a map between Riemannian manifolds that sends geodesics to geodesics maps infinitesimally flexible hypersurfaces to infinitesimally flexible ones. Since projective maps of ${\mathbb E}^d$ to itself and gnomonic projections of the Euclidean space to the spherical and hyperbolic spaces send geodesics to geodesics, Volkov's theorem includes Darboux' and Pogorelov's as special cases.
There also exist infinitesimal Pogorelov maps to frameworks in the de Sitter space $\mathrm{d}\mathbb{S}^d$ (the one-sheeted hyperboloid $\{\|x\|_{1,d} = -1\}$ with the metric induced by the Minkowski metric). The metric on $\mathrm{d}\mathbb{S}^d$ is Lorentzian of constant curvature 1. The polar dual to a hyperbolic polyhedron is a de Sitter polyhedron. Thus, the Pogorelov map from ${\mathbb H}^d$ to $\mathrm{d}\mathbb{S}^d$ can be given a more geometric meaning, when the relations between polarity and infinitesimal rigidity are taken into account.
\end{document} |
\begin{document}
\title{Twisting quasi-alternating links}
\author{Abhijit Champanerkar}
\address{Department of Mathematics, College of Staten Island, City University of New York}
\email{[email protected]}
\thanks{The first author is supported by NSF grant DMS-0844485.}
\author{Ilya Kofman}
\address{Department of Mathematics, College of Staten Island, City University of New York}
\email{[email protected]}
\thanks{The second author is supported by NSF grant DMS-0456227 and a PSC-CUNY grant.}
\subjclass[2000]{Primary 57M25}
\keywords{Khovanov homology, knot Floer homology, pretzel link}
\date{\today}
\begin{abstract}
\noindent
Quasi-alternating links are homologically thin for both Khovanov
homology and knot Floer homology. We show that every
quasi-alternating link gives rise to an infinite family of
quasi-alternating links obtained by replacing a crossing with an
alternating rational tangle. Consequently, we show that many pretzel
links are quasi-alternating, and we determine the thickness of
Khovanov homology for ``most'' pretzel links with arbitrarily many
strands.
\end{abstract}
\commby{Daniel Ruberman}
\maketitle
\section{Introduction}
In \cite{CM_PO}, quasi-alternating links were shown to be homologically
thin for both Khovanov homology and knot Floer homology. It is the
most inclusive known description of homologically thin links. However, the
recursive definition makes it difficult to decide whether a given knot
or link is quasi-alternating.
\begin{definition}\label{qadef}(\cite{OzSz_double_covers})
The set $\mathcal{Q}$ of quasi-alternating links is the smallest set of links
satisfying the following properties:
\begin{itemize}
\item The unknot is in $\mathcal{Q}$.
\item If the link $\mathcal L$ has a diagram $L$ with a crossing $c$ such that
\begin{enumerate}
\item both smoothings of $c$, $L_0$ and $L_{\infty}$ as in Figure \ref{skeinfig}, are in $\mathcal{Q}$;
\item $\det(L)=\det(L_0)+\det(L_{\infty})$;
\end{enumerate}
$\!$then $\mathcal L$ is in $\mathcal{Q}$.
\end{itemize}
We will say that $c$ as above is a {\em quasi-alternating crossing} of
$L$, and that $L$ is {\em quasi-alternating at} $c$.
\end{definition}
An oriented $3$--manifold $Y$ is called an {\em {\rm L}--space} if $b_1(Y)=0$
and $|H_1(Y;\,\mathbb Z)|=rk\widehat{HF}(Y)$, where $\widehat{HF}$ denotes the
Heegaard-Floer homology. For a link $\mathcal L$ in $S^3\!$, let $\Sigma(\mathcal L)$ denote its branched double cover.
The following property is the reason for interest in quasi-alternating links:
\begin{prop}[Proposition 3.3 \cite{OzSz_double_covers}]\label{Lprop}
If $\mathcal L$ is a quasi-alternating link, then $\Sigma(\mathcal L)$ is an {\rm L}--space.
\end{prop}
In this note, we show that a quasi-alternating crossing can be
replaced by an alternating rational tangle to obtain another
quasi-alternating link (Theorem \ref{maintheorem}). Thus, the set of
quasi-alternating links includes many non-trivial infinite families.
For pretzel links, their Khovanov homology and knot Floer homology
have been computed only for $3$--strand pretzel links (see
\cite{OSz_2004, Eftekhary, Suzuki}). By repeatedly applying Theorem
\ref{maintheorem}, we show that many pretzel links with arbitrarily many
strands are quasi-alternating (Theorem \ref{pretzeltheorem}(1)).
Therefore, their respective homologies can be computed from the
signature and the Jones and Alexander polynomials\footnote{A closed formula
for the Kauffman bracket of arbitrary pretzel links is given in
\cite{cjp}; for the Alexander polynomial, see \cite{Hironaka} and references
therein.}. Most other pretzel links are not quasi-alternating since
their Khovanov homology lies on exactly three diagonals
(Theorem \ref{pretzeltheorem}(2)). Thus,
Theorem \ref{pretzeltheorem} gives the thickness of Khovanov
homology for ``most'' pretzel links with arbitrarily many strands.
In Section \ref{10crossqa}, we apply Theorem \ref{maintheorem} to complete
the classification of quasi-alternating knots up to 10 crossings.
Finally, the Turaev genus plus two bounds the homological thickness
for both Khovanov homology and knot Floer homology \cite{Manturov, dkh,
Lowrance}. The Turaev genus is preserved after inserting a rational
tangle as in Theorem \ref{maintheorem}.
We do not know of any quasi-alternating links with Turaev genus greater than one.
\begin{question}
Do there exist quasi-alternating links with arbitrary Turaev genus?
\end{question}
\section{Twisting quasi-alternating links}
Let $c$ be a quasi-alternating crossing of a link diagram $L$, as in Definition \ref{qadef}.
We consider $c$ as a $2$-tangle with marked endpoints. Using Conway's notation for
rational tangles, let $\varepsilon(c)=\pm 1$, according to whether the
overstrand has positive or negative slope. We will say that a
rational $2$-tangle $\tau=C(a_1,\ldots,a_m)$ {\em extends} $c$ if
$\tau$ contains $c$, and for all $i,\ \varepsilon(c)\cdot a_i \geq 1$.
In particular, $\tau$ is an alternating rational tangle.
\begin{theorem}\label{maintheorem}
If $L$ is a quasi-alternating link diagram, let $L'$ be obtained by
replacing any quasi-alternating crossing $c$ with an alternating
rational tangle that extends $c$. Then $L'$ is quasi-alternating.
\end{theorem}
We start with some background needed for the proof. For any connected
link diagram $L$, we can associate a connected graph $G(L)$, called the
Tait graph of $L$, by checkerboard coloring complementary regions of
$L$, assigning a vertex to every shaded region, an edge to every
crossing and a $\pm$ sign to every edge as follows:
$$ \includegraphics[height=1cm]{qaedge}.$$
The signs are all equal if and only if $L$ is alternating.
\begin{lemma}\label{det_st}
For any spanning tree $T$ of $G(L)$, let $v(T)$ be the number
of positive edges in $T$. Let $s_v(L)=\#\{$spanning trees $T$ of $G(L)\ |\ v(T)=v\}$. Then
$$ \det(L) = \left|\sum_v (-1)^v\, s_v(L) \right|. $$
\end{lemma}
\begin{proof}
Thistlethwaite \cite{thistlethwaite} gave an expansion of the
Jones polynomial $V_L(t)$ in terms of spanning trees $T$ of $G(L)$.
Using this expansion, in \cite{KHshort} for any spanning tree $T$, we defined a grading $u(T)$, such that
$$V_L(t) = (-1)^w\, t^{(3w+k)/4}\sum_{T \subset G} (-1)^{u(T)}\, t^{u(T) -v(T)}$$
where $w$ is the writhe of $L$, and $k$ is a constant that depends on $G$
(see the proof of Proposition 2 \cite{KHshort}).
Thus,
$$ \det(L) = |V_L(-1)| = \left| \sum_{T \subset G} (-1)^{u(T)} (-1)^{u(T) -v(T)}\right| = \left|\sum_{v\geq 0} (-1)^v\, s_v(L) \right|.$$
\end{proof}
\begin{proof}{\em (Theorem \ref{maintheorem})\ } Let $L$ be a
quasi-alternating diagram at crossing $c$. We may assume that
$\varepsilon(c)=1$ by rotating $L$ as needed. For $n\in\mathbb Z$, let $L^n$ denote
the link diagram with $|n|$ additional crossings inserted at $c$,
which are vertical positive half-twists if $n>0$, and horizontal
negative half-twists if $n<0$. The cases $n=\pm 2$ are shown in
Figure \ref{skeinfig}.
\begin{figure}
\caption{Crossing $c$ of $L$, its smoothings $L_0$ and $L_{\infty}
\label{skeinfig}
\end{figure}
We first show that $L^n$ is quasi-alternating at any inserted
crossing.
Suppose $n\geq 0$. We checkerboard color $L$ such that the edge $e$
in $G(L)$ that corresponds to $c$ is positive. With the induced
checkerboard coloring, the Tait graph $G(L_0)$ has the edge $e$
contracted; $G(L_{\infty})$ has the edge $e$ deleted; and $G(L^n)$ has
$n$ additional vertices on $e$, dividing it into $n+1$ positive edges
$\{e_0,\ldots,e_n\}$.
For every spanning tree $T$ of $G(L)$ such that $e\in T$,
there is a unique spanning tree $T'$ of $G(L_0)$, and a unique spanning
tree $T''$ of $G(L^n)$ such that $\{e_0,\ldots,e_n\}\subset T''$, then
$v(T'')=v(T)+n=v(T')+1+n$.
For every spanning tree $T$ of $G(L)$ such that $e\notin T$,
there is a unique spanning tree $T'$ of $G(L_{\infty})$, and
there are spanning trees $T_0,\ldots,T_n$ of $G(L^n)$ such that
$e_i\notin T_i$, then for $0\leq i\leq n$, $v(T_i)=v(T)+n=v(T')+n$.
For each $v$,
\begin{equation}\label{eq1}
s_v(L^n) = s_{v-n-1}(L_0) + (n+1)s_{v-n}(L_{\infty}).
\end{equation}
Therefore,
\begin{equation}\label{eq2}
\sum_v (-1)^v\, s_v(L^n) = \sum_v (-1)^v\, s_{v-n-1}(L_0) + (n+1)\sum_v (-1)^v\,
s_{v-n}(L_{\infty}).
\end{equation}
By (\ref{eq1}) with $n=0$, $s_v(L) = s_{v-1}(L_0) + s_{v}(L_{\infty})$. Hence
\begin{equation}\label{eq3}
\sum_v (-1)^v\, s_v(L)= \sum_v (-1)^v\, s_{v-1}(L_0) +
\sum_v (-1)^v\, s_v(L_{\infty}).
\end{equation}
Let $x=\sum_v (-1)^v\, s_{v-1}(L_0)$ and $y=\sum_v (-1)^v\, s_v(L_{\infty})$, so $\det(L_0)=|x|$ and $\det(L_{\infty})=|y|$.
Since $L$ is quasi-alternating at $c$, $\det(L)=\det(L_0) + \det(L_{\infty})$, that is $|x+y|=|x|+|y|$.
Therefore, $x\cdot y\geq 0$.
It now follows from (\ref{eq2}) for $n\geq 0$,
\begin{eqnarray}
\nonumber
\left|\sum_v (-1)^v\, s_v(L^n)\right| &=& |(-1)^nx+(n+1)(-1)^ny|=|x|+(n+1)|y| \\
\label{eq5}
\det(L^n) &=& \det(L_0) + (n+1)\det(L_{\infty}).
\end{eqnarray}
Let $c$ denote any crossing in $L^n$ added to $L$ as above.
Let $L^n_0$ and $L^n_{\infty}$ denote the corresponding resolutions of
$L^n$ at $c$. We have $L^n_{\infty}=L_{\infty}$ and $L^n_0=L^{n-1}$ as links.
For $n\geq 1$, (\ref{eq5}) implies
\begin{eqnarray*}
\det(L^n) &=& \det(L_0) + (n+1)\det(L_{\infty})\\
&=& \big(\det(L_0) + n\det(L_{\infty})\big) + \det(L_{\infty})\\
&=& \det(L^{n-1}) + \det(L^n_{\infty})\\
&=& \det(L^n_0) + \det(L^n_{\infty}).
\end{eqnarray*}
We have that $L^0=L$ and $L^n_{\infty}=L_{\infty}$ as links, and hence are
quasi-alternating. If $L^{n-1}=L^n_0$ is quasi-alternating, then
$L^n$ is quasi-alternating by the equations above. By induction, $L^n$
is quasi-alternating at $c$, so $L^n$ is quasi-alternating at every
inserted crossing.
Suppose $n\leq 0$. If $L$ is quasi-alternating at $c$ then the mirror
image $\tilde{L}$ is also quasi-alternating at $c$ since $\det(\tilde{L})=\det(L)$.
Applying the argument above to $\tilde{L}$, and then reflecting proves
this case for $L$.
Since every inserted crossing is quasi-alternating in $L^n$, we can
iterate the construction above. Let $\tau=C(a_1,\ldots,a_m)$ be an
alternating rational tangle that extends $c$. Since $\varepsilon(c)\cdot a_i
\geq 1$ for all $i$, $L^{a_i}$ is a vertical positive twist operation,
and $L^{-a_i}$ is a horizontal negative twist operation. We now
construct:
$$L'= \left(\left((L^{-a_m})^{a_{m-1}}\right)^{-a_{m-2}}\cdots\right)^{(-1)^{m} (a_1-\varepsilon(c))}.$$
For example, see Figure \ref{iteration}. The resulting link $L'$ is
quasi-alternating, and is obtained from $L$ by replacing $c$ with
$C(a_1,a_2,\ldots,a_m)$. \end{proof}
\begin{figure}
\caption{Crossing $c$ with $\varepsilon(c)=1$ is replaced by $C(5,3,2)$. Steps shown are $L \to L^{-2}
\label{iteration}
\end{figure}
\
The following lemma will be used in the next section.
\begin{lemma} \label{connectsum}
If $K$ and $L$ are any quasi-alternating knot diagrams, then $K\# L$ is quasi-alternating.
\end{lemma}
\begin{proof} The proof is by induction on $\det(K)$. For quasi-alternating $K$, if $\det(K)=1$ then $K$
is the unknot, so the result follows. Otherwise, $K$ is
quasi-alternating at a crossing $c$, so the two smoothings at $c$,
$K_0$ and $K_{\infty}$, are quasi-alternating.
Since $\det(K)=\det(K_0)+\det(K_{\infty})$, both $\det(K_0)< \det(K)$ and $\det(K_{\infty}) < \det(K)$.
By induction, both $K_0\# L$ and $K_{\infty}\# L$ are quasi-alternating.
Moreover,
\begin{eqnarray*}
\det(K\# L) &=& \det(K)\det(L) = \left(\det(K_0)+\det(K_{\infty})\right)\det(L) \\
&=& \det(K_0)\det(L) + \det(K_{\infty})\det(L) \\
&=& \det(K_0\# L) + \det(K_{\infty}\# L).
\end{eqnarray*}
Therefore, $K\# L$ is quasi-alternating at $c$.
\end{proof}
\section{Pretzel links}
\begin{minipage}{2.4 in}
\begin{center}
\includegraphics[width=1.75 in]{pretzelontorus}
\end{center}
\end{minipage}
\begin{minipage}{2.4in}
Alternating links are quasi-alternating, so we only consider
non-alternating pretzel links. For all $p_i,\, q_j \geq 1$, let
$P(p_1,\ldots,p_n, -q_1,\ldots, -q_m)$ denote the $(m+n)$--strand
pretzel link. As shown in figure on the left, the standard diagram
of a non-alternating pretzel link can be made alternating on the
torus and so has Turaev genus one.
\end{minipage}
\\
\begin{prop}\label{pretzelKH}
If $m,n \geq 2$ and all $p_i,\, q_j \geq 2$, then the Khovanov
homology of $P(p_1,\ldots,p_n,-q_1,\ldots -q_m)$ has thickness exactly $3$.
More generally, any non-alternating Montesinos link, obtained
by replacing $p_i>1$ (resp. $q_j>1$) half-twists with
a rational tangle that extends at least two of these crossings, also
has thickness exactly $3$.
\end{prop}
\begin{proof} It is easy to check that for $n,\, m \geq 2$ and $p_i,\, q_j \geq
2$, the standard diagram for $P(p_1,\ldots,p_n,-q_1,\ldots,-q_m)$ is
adequate. By Proposition 5.1 of \cite{KhPatterns}, it has thick Khovanov
homology. Since the Turaev genus of non-alternating pretzel links is
one, the thickness bounds for Khovanov homology given in \cite{dkh,
Manturov} are achieved by pretzel links whose Khovanov homology is
thick. Since adequacy (and Turaev genus) is preserved after replacing
any half-twists with a rational tangle that extends at least two of these crossings,
the same argument applies to these Montesinos links.
\end{proof}
Similarly, it follows that every non-alternating Montesinos link (see
\cite{BZ} for their classification) has Turaev genus one. However, some
of these have thin Khovanov homology.
\begin{theorem}\label{pretzeltheorem} \
\begin{enumerate}
\item For $n\geq 1$, $p_i \geq 1$ for all $i$, and $q > \min(p_1,\ldots,p_n)$,
the pretzel link $P(p_1,\ldots,p_n, -q)$ is quasi-alternating.
\item For $m,n \geq 2$ and all $p_i,\, q_j \geq 2$, the pretzel link $P(p_1,\ldots,p_n,-q_1,\ldots -q_m)$ is not quasi-alternating.
\end{enumerate}
Both statements are true for all permutations of $p_i$'s and $q_j$'s, and reflections of all these pretzel links.
\end{theorem}
\begin{proof}
We first prove part (1) by induction on $n$. For
$n=1$, $P(p_1,-q) = T(2,p_1-q)$, which is quasi-alternating since
$p_1-q\neq 0$.
Let $n\geq 1$ and $p_i \geq 1$ for all $i$. Suppose for $q >
\min(p_1,\ldots,p_n),\ P(p_1,\ldots,p_n, -q)$ is quasi-alternating.
Consider $P(p_1,\ldots,p_n,p_{n+1}, -q)$. Without loss of generality,
$q > p_{i_0}$ for some $i_0$, with $1\leq i_0 \leq n$.
Let $L=P(p_1,\ldots,p_n,1,-q)$, and let $c$ be the crossing on the
$(n+1)$st strand. Let $T(2,k)$ denote the alternating torus link.
Then $L_0=T(2,p_1)\# \ldots \# T(2,p_n)\#T(2,-q)$
is quasi-alternating by Lemma \ref{connectsum}, and
$L_{\infty}=P(p_1,\ldots,p_n,-q)$ is quasi-alternating by the
induction hypothesis.
By Lemma 4.3 of \cite{DFKLS2},
$${\rm det}(P(p_1,\ldots, p_n,-q_1,\ldots,-q_m)) =
\left|\prod_{i=1}^n p_i \prod_{j=1}^m q_j \left(\sum_{i=1}^n \frac{1}{p_i} -
\sum_{j=1}^m \frac{1}{q_j}\right)\right|.$$
Now, $q>p_{i_0}$ implies $\displaystyle{\frac{1}{p_{i_0}}-\frac{1}{q}>0}$, so
$$q \prod_{i=1}^n p_i \left(1+\sum_{i\neq i_0}\frac{1}{p_i}+\left(\frac{1}{p_{i_0}}-\frac{1}{q}\right)\right)
=q \prod_{i=1}^n p_i \left(\sum_{i\neq i_0}\frac{1}{p_i}+\left(\frac{1}{p_{i_0}}-\frac{1}{q}\right)\right)
+ q\prod_{i=1}^n p_i.$$
Therefore, $\det(L)=\det(L_0)+\det(L_{\infty})$, which proves that
$L=P(p_1,\ldots,p_n,1,-q)$ is quasi-alternating at $c$. By Theorem
\ref{maintheorem}, $L=P(p_1,\ldots,p_n,p_{n+1},-q)$ is quasi-alternating
for all $p_{n+1} \geq 1$. This completes the proof of part (1) by induction.
By Proposition \ref{pretzelKH} the pretzel links in part (2) have thick Khovanov homology, so they are not quasi-alternating.
The arguments above remain essentially unchanged for all permutations of $p_i$'s
and $q_j$'s, and for all reflections, given by negating every $p_i$
and $q_j$.
\end{proof}
\begin{remark}\rm
Widmer \cite{Widmer} extended Theorem \ref{pretzeltheorem} to certain Montesinos links.
\end{remark}
\subsection{$3$--strand pretzel links}\label{3strands}
Theorem \ref{pretzeltheorem} still leaves open the quasi-alternating
status of many pretzel links. For certain $3$--strand pretzel links,
this can be deduced from previous results. As above, the statements
below for $P(p_1,p_2,-q)$ are true for all permutations of $p_i$'s and
$q$, and reflections of all these pretzel links.
According to \cite{Josh_Greene, Lisca_Stipsicz}, for the pretzel link $L=P(p_1,p_2,-q)$
with $p_1, p_2, q\geq 2$, $\Sigma(L)$ is an {\rm L}--space if and only if
\begin{enumerate}
\item $q \geq \min\{p_1,p_2\}$; or
\item $q = \min\{p_1,p_2\}-1\ $ and $\ \max\{p_1,p_2\}\leq 2q+1$.
\end{enumerate}
By Proposition \ref{Lprop}, $3$--strand pretzel links that satisfy
neither (1) nor (2) above are not quasi-alternating.
Together with Theorem \ref{pretzeltheorem}(1), this leaves open
the quasi-alternating status for the following $3$--strand pretzel links:
\begin{question}\
\begin{enumerate}
\item For $p \geq q \geq 2$, is $P(p,q,-q)$ quasi-alternating?
\item For $ 2q +1 \geq p \geq q+1 \geq 3$, is $P(p, q+1, -q)$ quasi-alternating?
\end{enumerate}
\end{question}
Note that $P(3,3,-3)= 9_{46}$ and $P(4,3,-3)=10_{140}$. In
addition, thick Khovanov homology or knot Floer homology precludes
some of these links from being quasi-alternating (see \cite{OSz_2004,
Eftekhary, Suzuki}). For example, $P(k,2,-2)$ has thick Khovanov
homology for $2 \leq k \leq 5$ according to KhoHo \cite{KhoHo}.
\section{Quasi-alternating knots up to 10 crossings}
\label{10crossqa}
Manolescu \cite{Manolescu08} showed that all KH-thin knots up to 9 crossings,
except $9_{46}$ are quasi-alternating. Among 42 non-alternating
10-crossing knots, 32 are KH-thin. Baldwin \cite{Baldwin08} showed
that 10 knots among these, which are closed $3$--braids, are
quasi-alternating. Greene \cite{Josh_Greene} showed that eight more
knots $10_{150}$,$10_{151}$,$10_{156}$, $10_{158}$, $10_{163}$,
$10_{164}$, $10_{165}$, $10_{166}$ are quasi-alternating. We show
that except for $10_{140}$, the remaining 13 knots are
quasi-alternating. In the table below, we give the knot and its
Conway notation (see \cite{Kawauchi}). For our computations, we replaced
the rational tangle indicated in bold with a crossing of same sign,
and checked that this crossing is quasi-alternating in the new
diagram. It follows from Theorem \ref{maintheorem} that the knot is
quasi-alternating.
\begin{center}
\begin{tabular}{|c|c||c|c||c|c|}
\hline
$10_{129}$ & $\textbf{32}, 21, 2-$ & $10_{135}$ & $221, \textbf{21}, 2-$ &
$10_{146}$ & $22, \textbf{21}, 21-$ \\
\hline
$10_{130}$ & $311, \textbf{3}, 2-$ & $10_{137}$ & $\textbf{22}, 211, 2-$ &
$10_{147}$ & $211, \textbf{3}, 21-$ \\
\hline
$10_{131}$ & $311, \textbf{21}, 2-$ & $10_{138}$ & $\textbf{211}, 211, 2-$ &
$10_{160}$ & $-30: \textbf{20}: 20$ \\
\hline
$10_{133}$ & $\textbf{23}, 21, 2-$ & $10_{142}$ & $31, \textbf{3}, 3-$ & & \\
\hline
$10_{134}$ & $221, \textbf{3}, 2-$ & $10_{144}$ & $31, \textbf{21}, 21-$ & & \\
\hline
\end{tabular}
\end{center}
As a result, all KH-thin knots up to 10 crossings, except $9_{46}$ and
$10_{140}$, are quasi-alternating. Shumakovitch \cite{Shumakovitch}
informed us that $9_{46}$ and $10_{140}$ have thick odd Khovanov
homology, so they are not quasi-alternating.
\end{document} |
\begin{document}
\title{Can Dehn surgery yield three connected summands?}
\author{James Howie}
\address{ James Howie\\
Department of Mathematics and Maxwell Institute for Mathematical Sciences\\
Heriot--Watt University\\
Edinburgh EH14 4AS }
\email{ [email protected]}
\subjclass{Primary 57M25}
\keywords{Dehn surgery, Cabling Conjecture}
\maketitle
\begin{abstract}
A consequence of the Cabling Conjecture of Gonzalez-Acu\~{n}a and Short
is that Dehn surgery on a knot in $S^3$ cannot produce a manifold with more
than two connected summands. In the event that some Dehn surgery produces
a manifold with three or more connected summands, then the surgery parameter
is bounded in terms of the bridge number by a result of Sayari. Here this
bound is sharpened, providing further evidence in favour of the Cabling Conjecture.
\end{abstract}
\section{Introduction}
The Cabling Conjecture of Gonzalez-Acu\~{n}a and Short \cite{GAS}
asserts that Dehn surgery on a knot in $S^3$ can produce a reducible
$3$-manifold only if the knot is a cable knot and the surgery slope is
that of the cabling annulus.
The Cabling Conjecture is known to hold in many special cases
\cite{HM,HS,LZ,MT,Mu,Sch,Wu}.
If $k$ is the $(p,q)$-cable on a knot $K$, then the cabling annulus
on $k$ has slope $pq$, and the corresponding surgery manifold
$M(k,pq)$ splits as a connected sum $$M(K,p/q)\# L(p,q)$$
\cite{GL}. (Here $L(p,q)$ is a lens space.) In particular both connected
summands are prime \cite{GL}. Thus the Cabling Conjecture implies the
weaker conjecture below:
\begin{conjecture}[Two summands conjecture]
Let $k$ be a knot in $S^3$ and $r\in\mathbb{Q}\cup\{\infty\}$ a slope.
Then the Dehn surgery manifold $M(k,r)$ cannot be expressed as a
connected sum of three non-trivial manifolds.
\end{conjecture}
Since any knot group has {\em weight} $1$
(in other words, is the normal closure of a single element),
the same is true for any homomorphic image of a knot group.
Thus the two summands conjecture would follow from the group-theoretic
conjecture below, which remains an open problem.
\begin{conjecture}
A free product of three non-trivial groups has weight
at least $2$.
\end{conjecture}
The best known upper bound for the number of connected summands in $M(k,r)$
is $3$, obtained by combining results of Sayari \cite{Sa1},
Valdez S\'{a}nchez \cite{V} and the author \cite{H}. These results also show
that, should some $M(k,r)$ have
three connected summands, then two of these must be lens spaces
(necessarily with fundamental groups of coprime orders) and the
third must be a $\mathbb{Z}$-homology sphere. (See \cite{H} for details.)
Suppose that $k$ is a knot in $S^3$ with bridge number $b$,
and that the $3$-manifold $M$ obtained by performing Dehn surgery
on $k$ with surgery parameter $r$ has more than two connected
summands. It is known from the work of Gordon and Luecke \cite{GL}
that $r$ must be an integer.
If $\ell_1,\ell_2$ are the orders of the fundamental groups
of the lens spaces, then Sayari \cite{Sa2} has proved that $|r|=\ell_1\ell_2\le (b-1)(b-2)$.
In this paper we shall prove the following inequality.
\begin{thmA}\label{main}
Let $k$ be a knot in $S^3$ with bridge-number $b$. Suppose that $r$ is a slope
on $k$ such that $M=M(k,r)=M_1\# M_2\# M_3$ where $M_1,M_2$ are
lens spaces and $M_3$ is a homology sphere but not a homotopy sphere.
Then $$|\pi_1(M_1)|+|\pi_1(M_2)|\le b+1.$$
\end{thmA}
As an immediate consequence, we obtain a sharpening of Sayari's inequality.
\begin{corA}\label{maincor}
Under the hypotheses of Theorem \ref{main} we have
$$|r|=|\pi_1(M_1)|\cdot |\pi_1(M_2)|\le b(b+2)/4.$$
\end{corA}
We use the standard techniques of intersection graphs
developed by Scharlemann \cite{Sch1} and by Gordon and Luecke \cite{CGLS,GL2,GL3}.
In \S \ref{graphs} below, we recall the construction of the intersection
graphs in the particular context of this problem. A key feature
of these is the existence of {\em Scharlemann cycles}, which correspond in
a well-understood way to the lens space summands. In \S \ref{schar} we
show that, should the inequality $\ell_1+\ell_2\le b+1$ fail, then
we can find, trapped between two Scharlemann cycles, a {\em sandwiched disk}
(see Definition \ref{sand}). We then show in \S \ref{sd} that sandwiched disks
are impossible, which completes our proof.
\section{The graphs}\label{graphs}
Throughout the remainder of the paper, we assume that the manifold
$M=M(k,r)$ obtained by $r$-Dehn surgery on $k\subset S^3$
is a connected sum of three factors $M_1,M_2,M_3$,
where $M_1$ and $M_2$ are lens spaces while $M_3$
is a (prime) integer homology sphere. Note that, since $\pi_1(M)$
has weight $1$, the orders $\ell_1,\ell_2$ of
$\pi_1(M_1)$ and $\pi_1(M_2)$ are necessarily coprime.
It follows that the factors
$M_1,M_2,M_3$ are pairwise non-homeomorphic.
An essential embedded sphere $\Sigma\subset M$ necessarily separates,
with one component of $M\smallsetminus\Sigma$ homeomorphic to a punctured $M_s$ and the other
to a punctured $M_t\# M_u$, where $\{s,t,u\}=\{1,2,3\}$. We will
say that such a $\Sigma$ {\em separates} $M_s$ and $M_t$ (and also
separates $M_s$ and $M_u$).
Let $P_1,P_2$ be disjoint planar surfaces in the exterior $X(k)$
of $k$ (the complement of an open regular neighbourhood of $k$ in $S^3$) that extend
to essential spheres $\widehat{P_1},\widehat{P_2}\subset M$ such that
$\widehat{P_i}$ separates $M_i$ and $M_3$. Assume also
that $P_1,P_2$ have the smallest possible number of boundary components
amongst all such planar surfaces.
A standard argument ensures that we may also choose
$P_1,P_2$ to be disjoint (without increasing the number of boundary
components of either).
Following Gabai \cite[Section 4(A)]{G}, we put $k$ in thin position,
find a level surface $Q$ for $k$ and isotope $P:=P_1\cup P_2$
such that $P$ meets $Q$ transversely, and such that no component of $Q\cap P$ is an
arc that is boundary-parallel in $P$.
(The minimality condition in the definition of $P_1$ and $P_2$ ensures also that
no component of $Q\cap P$ is a boundary-parallel arc in $Q$.)
The number $q$ of boundary components of $Q$ is necessarily even, and is bounded
above by twice the bridge number, $q\le 2b$. We can complete $Q$ to
a sphere $\widehat{Q}\subset S^3$ by attaching $q$ meridional disks.
We denote the intersection graph of $P_i$ and $Q$ in $\widehat{Q}$
by $G_i$ for $i=1,2$. The ({\em fat}) vertices of $G_i$ are the meridional disks
$\widehat{Q}\smallsetminus Q$, and the edges are the components of $P_i\cap Q$
(some of which may be closed curves rather than arcs).
Each fat vertex contains precisely one point of intersection of $k$ with
$\widehat{Q}$, so a choice of orientation for $k$ and for $Q$ induces an
orientation on the collection of fat vertices -- that is, a partition of
fat vertices into two types, which we call {\em positive} and
{\em negative}. There are precisely $q/2$ vertices of each type.
Note that the graphs $G_1$ and $G_2$ have the same vertex set
but disjoint edges sets. Let $G_Q$ denote their union: $G_Q:=G_1\cup G_2$.
Similarly, we denote the intersection graph of $P$ and $Q$ in
$\widehat{P}=\widehat{P_1}\cup\widehat{P_2}$ by $G_P$ (noting that this graph
is the union of two disjoint non-empty subgraphs $G_{P_i}:=G_P\cap\widehat{P_i}$, $i=1,2$,
and hence is not connected).
The edges incident at a vertex $v$ of $G_Q$ are labelled by the boundary
components of $P$. These labels always occur in the same cyclic order around
$v$ (subject to change of orientation). We choose a numbering $1,\dots,p$
of $\pi_0(\partial P)$ in such a way that the labels $1,\dots,p$ always occur
in that cyclic order around each vertex of $G_Q$ (without loss of generality,
clockwise for positive vertices and anti-clockwise for negative vertices).
The corner at a vertex $v$ between the edges labelled $x$ and $x+1$ (modulo $p$)
is also given a label: $g_x$ if $v$ is positively oriented, and $g_x^{-1}$ if
$v$ is negatively oriented. Note that corners are arcs in $\partial X(k)$
with endpoints in $P$. In the usual set-up for intersection disks, $P$
is connected, and one can interpret the labels $g_x^{\pm 1}$
as elements of $\pi_1(M)$ (relative to a base-point on $P$). In our context
it is more natural to interpret $g_x^{\pm 1}$ as an element of the path-groupoid
$\Pi=\pi(M,P)$, whose elements are (free) homotopy classes of maps of pairs
from $([0,1],\{0,1\})$ to $(M,P)$. Thus $\Pi$ is a connected $2$-vertex
groupoid whose vertex groups are isomorphic to $\pi_1(M)$.
Let $T\subset M$ denote the Dehn-filling solid torus, and $k'\subset T$
its core (a knot in $M$).
A {\em Scharlemann cycle} in
$G_i$ is a cycle $C$ bounding a disk-component $\Delta$ of $\widehat{Q}\smallsetminus G_i$
(which we call a {\em Scharlemann disk}), such that each edge of $C$, regarded as an
arc in $P_i$, joins two fixed components of $\partial P_i$ ($x$ and $y$, say).
Thus each edge of $C$ has label $x$ at one end, and $y$ at the other.
Since $x,y$ are consecutive edges of $G_i$ at each vertex of $C$, the edges
of $G_Q\cap\Delta$ between $x$ and $y$ at $v$ belong to $G_{3-i}$ and correspond to
intersection points of $k'$ with $P_{3-i}$. Since $P_{3-i}$ is separating,
it follows that
$x-y$ is odd, and hence from the {\em parity rule} (see for example \cite[page 386]{GL2})
that all vertices of $C$ have the same orientation.
It is well-known (see for example \cite{CGLS,GL2}) that any Scharlemann cycle
in $G_i$ corresponds to a lens-space summand of $M$.
We have set things up in such a way that this summand is necessarily
isotopic to $M_i$, which leads to the following observation.
(Compare also \cite[Lemma 2.1]{Hoff}, which states a similar
conclusion under slightly different hypotheses.)
\begin{lemma}\label{samelength}
Any Scharlemann cycle in $G_i$ has length $\ell_i:=|\pi_1(M_i)|$.
\end{lemma}
\begin{proof}
Without loss of generality, we may assume that $i=1$.
Let $C$ be a Scharlemann cycle in $G_1$, and $\Delta$ the corresponding
Scharlemann disk. Assume that $x,y$ are the labels on the edges of $C$.
Following \cite{CGLS,GL2}, we construct a twice punctured
lens space in $M$ as follows.
The fat vertices of $G_{P_1}$ can be regarded as
meridional slices of the filling solid torus $T$.
The fat vertices $x$ and $y$ divide $T$ into two $1$-handles,
one of which -- $H$, say -- satisfies
$\partial\Delta\subset P_1\cup \partial H$.
Then a regular neighbourhood $L$ of $\widehat{P_1}\cup H\cup\Delta$ is a twice-punctured
lens space, with $\pi_1(L)\cong\mathbb{Z}_\ell$, where $\ell$ is the length of $C$.
One component of $\partial L$ is $\widehat{P_1}$. The second component $\Sigma$ has
precisely two fewer points of intersection with $k'$ than $\widehat{P_1}$.
By the uniqueness of the prime decomposition $M=M_1\# M_2\# M_3$,
$L$ is homeomorphic to a twice-punctured
copy of $M_1$ or of $M_2$. In the latter case,
$\Sigma$ also separates $M_1$ from $M_3$, which contradicts the minimality
hypothesis on $P_1$.
Hence $L$ is homeomorphic to a twice-punctured copy of $M_1$,
whence $\ell=\ell_1$ as claimed.
\end{proof}
More generally,
we have the following essentially well-known result, which is an important tool
in our proof.
Define the $2$-complex $K$ as follows. $K$ has two vertices, labelled
$1$ and $2$, and $p$ edges, labelled $g_1,\dots,g_p$. The initial (resp. terminal)
vertex of $g_i$ is $1$ or $2$ depending on whether the vertex $i$ (resp. $i+1$)
of $G_P$ is contained in $P_1$ or in $P_2$. The $2$-cells of $K$ are in one-to-one
correspondence with the disk-regions of $G_Q$; the attaching map for a $2$-cell
being read off from the corner-labels of the corresponding region of $G_Q$.
\begin{lemma}\label{pres}
Let $K_0$ be a subcomplex of $K$
with $H^1(K_0,\mathbb{Z})=\{0\}$.
If $K_0$ is connected then $M$ has a connected summand with fundamental group
isomorphic to $\pi_1(K_0)$. If $K_0$ is disconnected, then $M$ has a connected
summand with fundamental group
isomorphic to $\pi_1(K_0,1)*\pi_1(K_0,2)$.
\end{lemma}
\begin{proof}
The intersection of $\widehat{P}$ with the filling solid torus $T$
is precisely the set of fat vertices of $G_P$, each of which
is a meridional disk in $T$.
These disks divide $T$ into $1$-handles
$H_1,\dots, H_p$, where $H_i$ is the section of $T$ between the fat vertices $i$ and $i+1$
(modulo $p$).
Suppose first that $K_0$ is connected. Define $K'$ to be the union of the following
subsets of $M$:
\begin{enumerate}
\item $P_1$ if $K_0$ contains the vertex $1$ of $K$;
\item $P_2$ if $K_0$ contains the vertex $2$ of $K$;
\item the one-handle $H_i$ for each edge $g_i\in K_0$;
\item the disk-region of $G_Q$ corresponding to each $2$-cell of
$K_0$.
\end{enumerate}
It is easy to check that $K'$ is connected, and that $\pi_1(K')\cong\pi_1(K_0)$.
Let $N$ be a regular neighbourhood of $K'$ in $M$
Then $N$ is a compact, connected, orientable $3$-manifold with
$\pi_1(N)\cong \pi_1(K_0)$ and hence $H^1(N,\mathbb{Z})=\{0\}$. It follows that
$\partial N$ consists entirely of spheres, by Poincar\'{e} duality.
Capping off each boundary component of $N$ by a ball yields a closed manifold $\widehat{N}$
with $\pi_1(\widehat{N})\cong\pi_1(N)\cong \pi_1(K_0)$, and $\widehat{N}$ is a connected summand of $M$ since
$N\subset M$.
Next suppose that $K_0$ is disconnected. Then $K_0$ contains both vertices $1,2$
of $K$, but no edge from $1$ to $2$. Choose an edge $g_z$ of $K$ joining $1$ to $2$, and define
$K_1=K_0\cup\{g_z\}$. Then $K_1$ is connected and $\pi_1(K_1)\cong\pi_1(K_0,1)*\pi_1(K_0,2)$.
Replacing $K_0$ by $K_1$ in the above gives the result.
\end{proof}
\begin{corollary}\label{3lens}
No subcomplex of $K$ has fundamental group which is a free product of three or more
finite cyclic groups.
\end{corollary}
\begin{proof}
Suppose that $K$ has such a subcomplex. Then by Lemma \ref{pres} $M$ has a connected summand
which is the connected sum of three lens spaces. This contradicts \cite[Corollary 5.3]{H}.
\end{proof}
Finally, the element $R=g_1g_2\dots g_p\in\pi_1(M)$ is a {\em weight element} -- that is,
its normal closure is the whole of $\pi_1(M)$ --
since it is represented by a meridian in $S^3\smallsetminus k$. This leads to the following
observation, which will be useful later.
\begin{lemma}\label{bigon}
Let $x\in\{1,\dots,p\}$. Then there is at least one integer $i\in\{1,\dots,(p-2)/2\}$ such
that no $2$-gonal region of $G_Q$ has corners $g_{x+i}$ and $g_{x-i}$ (or $g_{x+i}^{-1}$ and
$g_{x-i}^{-1}$).
\end{lemma}
\begin{proof}
Otherwise we have $g_{x+i}=g_{x-i}^{-1}$ in $\pi_1(M)$ for each $i=1,\dots,(p-2)/2$,
and hence the weight element $W=g_1\dots g_p$ is conjugate to a word of the
form $g_xUg_yU^{-1}$ (where $U=g_{x+1}\cdots g_{x+(p-2)/2}$ and $y=x+\frac{p}{2}$ modulo $p$).
Moreover, $g_x$ is conjugate in $\pi_1(M)$ to an element
of $\pi_1(M_i)$ for $i\in\{1,2,3\}$,
and a similar statement holds for $g_y$. Hence $W$ belongs to the normal closure in
$\pi_1(M)=\pi_1(M_1)*\pi_1(M_2)*\pi_1(M_3)$ of the free factors containing
conjugates of $g_x$ and $g_y$. Since all three free factors are non-trivial, this normal
subgroup is proper, which contradicts
the fact that $W$ is a weight element.
\end{proof}
\section{Analysis of Scharlemann cycles}\label{schar}
By \cite[Proposition 2.8.1]{GL2} there are Scharlemann cycles in $G_1$ and
in $G_2$. In this section we show that, if $\ell_1+\ell_2$ is big enough, then
these form a configuration we call a {\em sandwiched disk} (which we will show
in the next section to be impossible). Our next two results should be
compared to \cite[Lemmas 3.2 and 5.3]{Sa2} and \cite[Theorem 2.4]{GL3} respectively,
where the conclusions are similar but the hypotheses slightly different.
\begin{lemma}\label{disjoint}
If $\Delta$ is a Scharlemann disk in bounded by a Scharlemann cycle in $G_1$
(resp. $G_2$) then $\Delta$ contains no edges of $G_2$ (resp. $G_1$).
\end{lemma}
\begin{proof}
Suppose that $\Delta$ is bounded by a Scharlemann cycle $C$ in $G_1$,
and that it contains edges of $G_2$. By \cite[Proposition 2.8.1]{GL2}
we know that there exists a Scharlemann cycle in $G_2\cap\Delta$.
We will find such a Scharlemann cycle explicitly, and use it to obtain a contradiction.
Recall that $C$ has length $\ell_1$, by Lemma \ref{samelength}.
Let $v_1,\dots,v_{\ell_1}$ denote the vertices of $C$ in cyclic
order. Each edge of $C$ has labels $x$ and $x+2t+1$, say, which correspond to vertices
in $G_{P_1}$, and the intermediate labels $x+1,\dots,x+2t$ correspond to vertices
of $G_{P_2}$. (Necessarily, these are even in number and alternating in orientation,
since they correspond to consecutive intersection points of $k'$ with $\widehat{P_2}$
between two consecutive intersection points of $k'$ with $\widehat{P_1}$.)
The graph $Y:=G_2\cap\Delta$ has $\ell_1$ vertices, each of valence $2t$
and each of the same orientation (which we assume to be positive).
If $\ell_1=2$, then every edge of $Y$ joins $v_1$ to $v_2$. Such an edge has
labels $x+j$ at one end and $x+2t+1-j$ at the other, for some $j$. The two edges
whose labels are $x+t$ and $x+t+1$ bound a $2$-gonal region, and hence form a Scharlemann
cycle of length $2$. But then $\ell_1=\ell_2=2$, contradicting the fact that
$\ell_1,\ell_2$ are coprime.
Suppose then that $\ell_1>2$. There must be a vertex $v_j$ in $C$ that is joined
only to $v_{j-1}$ and $v_{j+1}$ (subscripts modulo $\ell_1$)
by edges of $Y$. In particular
there are two consecutive vertices of $C$ that are joined by $s\ge t$ edges
of $Y$. The resulting $s$ $2$-gonal regions of $G_Q\cap\Delta$
give rise to relations $g_{x+j}g_{x+2t-j}=1$ for $0\le j\le s-1$ in the
path-groupoid $\Pi=\pi(M,P)$. But all the corners of the Scharlemann disk $\Delta$
have label $h:=g_xg_{x+1}\cdots g_{x+2t}$,
so $h$ has order $\ell_1$ in $\Pi$. Hence $g_{x+t}$ also has order $\ell_1>2$.
Hence also $s=t$ in the above, for otherwise $g_{x+t}^2=1$ in $\Pi$.
Choose a pair $v_i,v_j$ of vertices of $C$ with $i<j-1$ with $j-i$ minimal
subject to the condition that $v_i,v_j$ are joined by an edge of
$Y$.
Then each pair $(v_i,v_{i+1}),\dots,(v_{j-1},v_j)$ is joined by {\em precisely}
$t$ edges of $Y$, so there is an edge joining $v_i$ and $v_j$
that has labels $x+t$ and $x+t+1$, and this forms part of a Scharlemann cycle
of length $j+1-i$ in $G_2$. (See Figure 1.)
\begin{center}
\scalebox{0.6}[0.6]{\includegraphics{sch4}}
\\
Figure 1
\end{center}
Since $g_{x+t}$ has order
$\ell_1$ in $\Pi$, we deduce that $\ell_1=\ell_2$, which again contradicts
the fact that $\ell_1,\ell_2$ are coprime.
\end{proof}
In particular, if $C$ is a Scharlemann cycle in $G_1$ or $G_2$, then the two
labels appearing on the edges of $C$ are consecutive (modulo $p$): say $x,x+1$.
We call $x$ the {\em label} of $C$. Note that all the corners of
the corresponding Scharlemann disk have the same label $g_x$ or $g_x^{-1}$.
\begin{corollary}\label{samelabel}
Any two Scharlemann cycles in $G_1$ (respectively, in $G_2$)
have the same label.
\end{corollary}
\begin{proof}
Let $C,C'$ be Scharlemann cycles in $G_1$, bounding Scharlemann disks
$\Delta,\Delta'$ respectively. By Lemma \ref{disjoint}, $\Delta$ and
$\Delta'$ contain no edges of $G_2$, so are Scharlemann disks of $G_Q$.
By Lemma \ref{samelength} each of $C,C'$ has length $\ell_1$. Suppose
that $C$ has label $x$ and $C'$ has label $y\ne x$. Then $K$
has a subcomplex $K_0$ with one vertex $1$, two edges $g_x,g_y$
and two $2$-cells $\Delta,\Delta'$, so that
$$\pi_1(K_0)=\langleg_x,g_y|g_x^{\ell_1}=g_y^{\ell_1}=1\rangle\cong\mathbb{Z}_{\ell_1}*\mathbb{Z}_{\ell_1}.$$
In particular, $\pi_1(K_0)$ has weight $2$, so cannot be isomorphic
to a free factor of $\pi_1(M)$, which contradicts Lemma \ref{pres}.
\end{proof}
\begin{definition}\label{sand}
A {\em sandwiched disk} in $\widehat{Q}$ is a disk $D\subset\widehat{Q}$
such that:
\begin{enumerate}
\item[(a)] $\partial D$ is the union of a subpath $a_1$ of a Scharlemann cycle
$C_1\subset G_1$ and a subpath $a_2$ of a Scharlemann cycle
$C_2\subset G_2$, with $a_1\cap a_2=\partial a_1=\partial a_2$;
\item[(b)] there are no vertices of $G_Q$ in the interior of $D$.
\end{enumerate}
\end{definition}
\begin{lemma}\label{sandw}
If $|\pi_1(M_1)|+|\pi_1(M_2)|>(q+2)/2$, then there exists
a sandwiched disk $D\subset \widehat{Q}$.
\end{lemma}
\begin{proof}
As observed in \cite[p. 551]{Hoff} and \cite[Lemma 6.1]{Sa2}, we know that there are at least
two Scharlemann cycles in $G_1$ -- necessarily
with disjoint sets of vertices, since they have the same label (Lemma \ref{samelabel}). Similarly there
are at least two Scharlemann cycles in $G_2$ -- again with the same label and hence
with disjoint sets of vertices.
By hypothesis, at least one of $\ell_1=|\pi_1(M_1)|$, $\ell_2:=|\pi_1(M_2)|$
is greater than $q/4$. Without loss of generality, assume that $\ell_1>q/4$.
Then $G_1$ must contain precisely two Scharlemann cycles, one of each
possible orientation. Let us call them $C_1^+$ and $C_1^-$,
and let $\Delta_1^\pm$ denote the Scharlemann disks bounded by $C_1^\pm$.
Now let $C_2,C_2'$ denote two disjoint Scharlemann cycles in $G_2$,
and $\Delta_2,\Delta_2'$ the corresponding Scharlemann disks.
Since $\ell_1+\ell_2>q/2$, $C_2$ must intersect $C_1^+$ (if the vertices
of $C_2$ are positive) or $C_1^-$ (if the vertices of $C_2$ are negative).
On the other hand, consideration of vertex orientations shows that
$C_2$ cannot intersect both $C_1^+$ and $C_1^-$. Similar remarks apply to
$C_2'$.
Now $(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$ consists only of some number
($t$, say) of vertices.
Then $\Delta:=\Delta_1^+\cup \Delta_1^-\cup \Delta_2\cup \Delta_2'$ has precisely two components,
$2\ell_1+2\ell_2-t$ vertices, $2\ell_1+2\ell_2$ edges, and four $2$-cells.
The complement of $\Delta$ in $\widehat{Q}$ thus contains $t-1$ components,
one of which is an annulus and $t-2$ are disks.
But $\widehat{Q}\smallsetminus\Delta$ also contains precisely
$q-2\ell_1-2\ell_2+t$ vertices. Since $2\ell_1+2\ell_2\ge q+4$, this
number is at most $t-4$. Hence there are at least two
disk-components of $\widehat{Q}\smallsetminus\Delta$ that contain no vertices of
$G_Q$.
Moreover, each vertex of $(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$ appears
twice in $\partial(\widehat{Q}\smallsetminus\Delta)$. Each occurrence separates an arc of
$C_1^+\cup C_1^-$ from an arc of $C_c\cup C_2'$ in $\partial(\widehat{Q}\smallsetminus\Delta)$,
so each component of $\partial(\widehat{Q}\smallsetminus\Delta)$ contains an even number of vertices
of $(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$.
The number of boundary components of $\widehat{Q}\smallsetminus\Delta$
is precisely $t$. If the vertices in $C_2$ and those in $C_2'$ have the same
orientation, then one of $C_1^+,C_1^-$ is a boundary component of $\widehat{Q}\smallsetminus\Delta$
containing no vertices of $(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$.
With that exception, each boundary component of $\widehat{Q}\smallsetminus\Delta$
contains at least at least $2$ vertices of $(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$.
It follows that there is at
least one disk component $D$ of
$\widehat{Q}\smallsetminus\Delta$ whose boundary contains precisely two vertices of
$(C_1^+\cup C_1^-)\cap(C_2\cup C_2')$ and whose interior contains no vertices of $G_Q$.
Any such $D$ is, by definition, a sandwiched disk.
\end{proof}
\section{Analysis of sandwiched disks}\label{sd}
In this section we complete the proof of our upper bound on $|r|$
by showing that sandwiched disks do not exist. This result holds with
no assumptions on $\ell_1$ or $\ell_2$, so may have wider applications.
We assume throughout that $G_1$, $G_2$ contain Scharlemann cycles of length
$\ell_1,\ell_2$ respectively, with labels $x_1,x_2$ respectively.
\begin{lemma}\label{pover2}
Let $D$ be a sandwiched disk with $\partial D=a_1\cup a_2$,
where $a_1,a_2$ are sub-paths of Scharlemann cycles in $G_1,G_2$ respectively.
Then no two consecutive vertices of $a_1$ (or of $a_2$)
are joined by $p/2$ edges
in $G_Q$.
\end{lemma}
\begin{proof}
Suppose that two vertices of (say) $a_1$ are joined by
$p/2$ edges. Then there are $2$-gonal regions $D_i$ in $G_Q\cap D$
such that the corner labels of $D_i$ are $g_{x_1+i}$ and $g_{x_1-i}$.
This contradicts Lemma \ref{bigon}.
\end{proof}
\begin{corollary}\label{cor1}
Let $D,a_1,a_2$ be as in Lemma \ref{pover2}. If two vertices
of $a_1$ (or of $a_2$) are connected by an edge in $G_Q$, then they are
consecutive vertices of $a_1$ (respectively, of $a_2$).
\end{corollary}
\begin{proof}
Let $w_0,\dots,w_t$ be the vertices of $a_1$, in order. Suppose that
$w_i,w_j$ are joined by an edge in $G_Q$, where $j>i+1$, and that $j-i$
is minimal for such pairs of vertices. Then $w_{i+1}$ has precisely two
neighbours in $G_Q$: $w_i$ and $w_{i+2}$. By Lemma \ref{pover2} it is connected
to each by fewer than $p/2$ edges, contradicting the fact that it has
valence $p$.
\end{proof}
\begin{corollary}\label{cor2}
Let $D,a_1,a_2$ be as in Lemma \ref{pover2}.
Each of $a_1,a_2$ has length greater than $1$, and each interior
vertex of $a_1$ (respectively $a_2$) is joined to an interior
vertex of $a_2$ (respectively $a_1$) by an edge of $G_Q\cap D$.
\end{corollary}
\begin{proof}
If $a_1,a_2$ both have length $1$, then every edge of $G_Q\cap D$
joins the two common endpoints $u,v$ of $a_1$ and $a_2$.
Without loss of generality, the edges of $G_Q\cap D$ incident at $u$
have labels $x_1+1,x_1+2,\dots,x_2$, while those incident at
$v$ have labels $x_2+1,x_2+2,\dots,x_1$.
Hence $|x_1-x_2|=p/2$, and
$D$ contains precisely $p/2$
arcs joining $u$ to $v$.
But this contradicts Lemma \ref{pover2}.
If $w$ is an interior vertex of (say) $a_1$, then $w$ has two
neighbours in $\partial D$. It is joined to each of these by strictly
fewer than $p/2$ arcs, by Lemma \ref{pover2}, and hence is also joined to a third
vertex in $G_Q$. Since all the edges of $G_Q$ incident at $w$ are
contained in $D$, this third vertex is also in $\partial D$.
By Corollary \ref{cor1} it cannot be a vertex of $a_1$,
so it must be an interior vertex of $a_2$.
\end{proof}
\begin{lemma}\label{ycycles}
Let $D$ be a sandwiched disk in $G_Q$.
Then there are no Scharlemann cycles in $G_Q\cap D$.
\end{lemma}
\begin{proof}
Any Scharlemann cycle $C$ in $G_Q\cap D$ is a Scharlemann cycle in $G_1$ or in
$G_2$, so has label $x_1$ or $x_2$ by Lemma \ref{samelabel}. Assume without loss of generality that
$C$ has label $x_2$. For any vertex $v$ of $a_2$, the corner labelled $g_{x_2}$
does not lie in $D$, so the vertices of $C$ are interior vertices of $a_1$.
By Corollary \ref{cor1}, the vertices of $C$ must be pairwise consecutive
vertices of $a_1$, and hence $C$ has length $2$. Moreover, if $v_1,v_2$ are the
vertices of $C$, then $v_1,v_2$ are connected by edges labelled $x_1+1,\dots x_2$
at one end (say the $v_1$ end), and by edges labelled $x_2+1,\dots x_1$ at the
other ($v_2$) end. In particular, they are joined by at least $p/2$ edges, contradicting
Lemma \ref{pover2}.
\end{proof}
\begin{corollary}\label{cor3}
If there is a sandwiched disk $D$ in $G_Q$ such that $\partial D=a_1\cup a_2$
where $a_i$ is a subpath of a Scharlemann cycle with label $x_i$, then
$|x_1-x_2|=p/2$.
\end{corollary}
\begin{proof}
Let $a_1\cap a_2=\{u,v\}$.
Without loss of generality, $x_1=p$ and the edges of $G_Q\cap D$ meeting
$u$ are labelled $1,\dots x_2$ at $u$, while those meeting
$v$ are labelled $x_2+1,\dots,p$ at $v$. If (say) $x_2<p/2$,
then there is a label $y$ with $x_2<y\le p$ such that $y$
does not appear as either label of any edge meeting $u$ that
is contained in $D$. Consider the subgraph $\Gamma$ of
$G_Q\cap D$ that is obtained by removing $u$ and its incident
edges. At each vertex of $\Gamma$, the edge labelled $y$
leads to another vertex of $\Gamma$. Since all vertices of
$\Gamma$ are positive, it follows that $\Gamma$ contains a
great $y$-cycle, and hence a Scharlemann cycle by \cite[Lemma 2.6.2]{CGLS}.
This contradicts Lemma \ref{ycycles}.
\end{proof}
\begin{theorem}\label{nosand}
There are no sandwiched disks in $G_Q$.
\end{theorem}
\begin{proof}
We assume that there is a sandwiched disk $D$ in $G_Q$, and derive
a contradiction.
Suppose that $\partial D=a_1\cup a_2$, where $a_i$ is a subpath of
a Scharlemann cycle $C_i$.
Let $x_i$ be the label of $C_i$.
By Corollary \ref{cor3}, it follows that $|x_1-x_2|=p/2$.
Let $u,v$ denote the common vertices of $a_1,a_2$.
By Corollary \ref{cor1} each of $a_1,a_2$ has length greater than $1$.
Let $s_1,s_2$ be the vertices of $a_1,a_2$ respectively which are
adjacent to $u$,
and let $t_1,t_2$ be the vertices of $a_1,a_2$ respectively which are
adjacent to $v$. (Note that neither of the possibilities $s_1=t_1$, $s_2=t_2$
is excluded at this stage.)
By Corollary \ref{cor1} again, $s_1$ is connected to a vertex of $a_2$ other than $u,v$
by an edge contained in $D$. Similarly, $s_2$ is connected to a vertex of $a_1$ other than $u,v$
by an edge contained in $D$. These edges cannot cross; hence $s_1$ and $s_2$ are
joined by an edge. Similarly $t_1$ and $t_2$ are joined by an edge. Hence each of $u,v$
is incident at a triangular region of $G_Q\cap D$: call them $\Delta_u$ and $\Delta_v$.
Suppose that the edges of $G_Q\cap D$ that are incident at $u$ have labels
$x_1+1,\dots,x_2$ at $u$, and suppose that $i$ of these edges
(namely those with labels $x_1+1,\dots,x_1+i$) are connected to $s_1$.
Then these edges have labels $x_1,x_1-1,\dots,x_1-i+1$ at $s_1$, and together they
bound $i-1$ $2$-gonal faces of $G_Q$, of which the $j$'th has corner labels
$g_{x_1+j}$ and $g_{x_1-j}$.
The remaining $(p-2i)/2$ edges of $G_Q\cap D$ incident at $u$ join $u$ to $s_2$.
They have labels
$x_1+i+1,\dots,x_2$ at $u$, and $x_1-i,\dots,x_2+1$ at $s_2$. Together they
bound $(p-2i-2)/2$ $2$-gonal regions of $G_Q$, the $j$'th of which has corner
labels $g_{x_2-j}$ and $g_{x_2+j}$. Thus the triangular region $\Delta_u$ of
$D\cap G_Q$ that is incident at $u$ has corner labels $g_y$ at $u$ and $g_z$
at each of $s_1$ and $s_2$, where $y=x_1+i$ and $z=x_1-i$ (modulo $p$).
We can now perform a similar analysis on the edges of $G_Q\cap D$ that are incident
at $v$. Note, however, that for all $j\in\{1,\dots,(p-2)/2\}\smallsetminus\{i\}$
there is a $2$-gonal region of $G_Q\cap D$ with corner labels $g_{x_1-j}$ and $g_{x_1+j}$.
By Lemma \ref{bigon} there cannot be a $2$-gonal region of $G_Q\cap D$ with corner
labels $g_{x_1-i}$ and $g_{x_1+i}$. It follows that there are also precisely $i$
edges joining $v$ to $t_1$, and $(p-2i)/2$ joining $v$ to $t_2$. The triangular
region $\Delta_v$ of $D\cap G_Q$ that is incident at $v$ then has corner labels
$g_z$ at $v$ and $g_y$ at each of $t_1,t_2$, where $y=x_1+i$ and $z=x_1-i$
as above (see Figure 2).
\begin{center}
\scalebox{0.8}[0.6]{\includegraphics{sch3}}
\\
Figure 2
\end{center}
Finally, let $K_0$ denote the (disconnected) subcomplex of $K$ with
vertices $\{0,1\}$, edges $\{g_{x_1},g_{x_2},g_y,g_z\}$
and $2$-cells $\{\Delta_1,\Delta_2,\Delta_u,\Delta_v\}$.
Then by Lemma \ref{pres}, $M$ has a connected summand with fundamental group
$$\pi_1(K_0,1)*\pi_1(K_0,2)\cong\langleg_{x_1},g_{x_2},g_y,g_z|g_{x_1}^{\ell_1}=g_{x_2}^{\ell_2}=g_yg_z^2=g_zg_y^2=1\rangle
\cong \mathbb{Z}_{\ell_1}*\mathbb{Z}_{\ell_2}*\mathbb{Z}_3.$$
But this contradicts Corollary \ref{3lens}, which completes the proof.
\end{proof}
\begin{theorem}[= Theorem \ref{main}]\label{inequality}
Let $k$ be a knot in $S^3$ with bridge-number $b$. Suppose that $r$ is a slope
on $k$ such that $M=M(k,r)=M_1\# M_2\# M_3$ where $M_1,M_2$ are
lens spaces and $M_3$ is a homology sphere but not a homotopy sphere.
Then $$|\pi_1(M_1)|+|\pi_1(M_2)|\le b+1.$$
\end{theorem}
\begin{proof}
As discussed in \S \ref{graphs}, we put $k$ in thin position, and choose a level surface
$Q$ and disjoint planar surfaces $P_1,P_2$ such that
\begin{itemize}
\item $P_i$ extends to a sphere in $M$ separating $M_i$ from $M_3$, and has
fewest boundary components among all such;
\item no component of $Q\cap P_i$ is a boundary-parallel arc in $Q$ or $P_i$.
\end{itemize}
By Gordon and Luecke \cite{GL}, there are Scharlemann cycles $C_i$ in $G_i$
for $i=1,2$. Moreover, the Scharlemann cycle $C_i$ has length $\ell_i:=|\pi_1(M_i)|$
and bounds a disk-region $\Delta_i$ of $G_Q$.
If $\ell_1+\ell_2>b+1\ge (q+2)/2$, then
by Lemma \ref{sandw}
there is at least one sandwiched disk $D$ in $G_Q$.
But this contradicts Theorem \ref{nosand}.
Hence $\ell_1+\ell_2\le b+1$ as claimed.
\end{proof}
\begin{corollary}[= Corollary \ref{maincor}]
With the hypotheses and notation of Theorem \ref{inequality}, we have
$$|r|=|\pi_1(M_1)|\cdot |\pi_1(M_2)| \le \frac{b(b+2)}{4}.$$
\end{corollary}
\begin{proof}
Let $\ell_1=|\pi_1(M_1)|$ and $\ell_2=|\pi_1(M_2)|$.
The equation $|r|=\ell_1\cdot\ell_2$ comes from computing
$|H_1(M,\mathbb{Z})|$ in two different ways.
Given that $\ell_1,\ell_2$ are distinct positive integers,
the inequality $\ell_1\cdot\ell_2\le b(b+2)/4$ follows easily
from Theorem \ref{inequality}.
\end{proof}
\end{document} |
\begin{document}
\title{So What is Class Number 2?}
\markright{Class Number 2}
\author{Scott T. Chapman}
\maketitle
\begin{abstract} Using factorization properties, we give several characterizations for a ring of algebraic integers to have class number at most 2.
\end{abstract}
\section{Introduction.}
I was recently at an algebra colloquium when some questions involving small class numbers of algebraic number rings arose. Of the 30 or so participants, almost everyone in the room recognized that an algebraic number
ring is a unique factorization domain (or UFD) if and only if its class number is one (i.e., the ideal class group of $R$ is trivial). Almost no one in the room was aware of the following theorem of Carlitz, which is well known among mathematicians
who work in the theory of nonunique factorizations (see \cite{GHKb} for a general reference to this area).
\begin{CarlitzThm}\cite{Ca}\label{Carlitz}
Let $R$ be an algebraic number ring. $R$ has class number at most 2 if and only if whenever $\alpha_1,\ldots, \alpha_n,\beta_1, \ldots, \beta_m$ are irreducible elements of $R$
with
\begin{equation}\label{CarlitzEq}\tag{$\dagger$}
\alpha_1\cdots \alpha_n=\beta_1\cdots \beta_m
\end{equation}
then $n=m$.
\end{CarlitzThm}
An integral domain $D$ in which each nonzero nonunit can be factored as a product of irreducible elements is known as an \textit{atomic domain}. An atomic domain $D$ that satisfies the condition in Carlitz's theorem (i.e., satisfies \eqref{CarlitzEq}) is
known as a \textit{half-factorial domain} (or \textit{HFD}). Notice that a UFD is an HFD and hence, if $R$ exactly has class number 2, it is an example of an HFD that is not a UFD (the classic such example is $\mathbb{Z}[\sqrt{-5}]$ and the nonunique factorization $6=2\cdot 3 = (1+\sqrt{-5})(1-\sqrt{-5})$). Thus, the Carlitz theorem can be restated as follows.
\begin{CarlitzThmR}\label{CarlitzR}
Let $R$ be an algebraic number ring. $R$ has class number at most 2 if and only if $R$ is a half-factorial domain.
\end{CarlitzThmR}
Carlitz's theorem was the beginning of quantitative and qualitative research into nonunique factorizations in integral domains and monoids. This research began with papers concerning HFD's (see \cite{Sk}, \cite{Z1}, \cite{Z2}, and a comprehensive survey article \cite{ChapCoy}) and has expanded into the study of a host of combinatorial constants that measure deviation of factorizations from the UFD condition. The purpose of this article is not to deeply explore the general topic of factorization, but to give a series of factorization-inspired characterizations of class number 2. We do this solely in terms of algebraic number fields and thus avoid the abstraction and generality that more difficult factorization problems entail. Our characterizations will involve constants of increasing complexity,
and in light of this, we will offer the various needed definitions directly before each result. We hope that our work gives the reader a better appreciation of Carlitz's theorem and its related substantive factorization problems. For those who want a more in-depth treatment of nonunique factorizations, several recent papers on this topic can be found in this \textsc{Monthly} (\cite{ChapBag}, \cite{Ger1}, \cite{OnPel}).
Throughout we assume an understanding of abstract algebra at the level of \cite{Ga} and a basic familiarity with algebraic number theory at the level of \cite{Ma}. (An approach that might be more friendly to a novice can be found in \cite{PD}.)
For clarity, we review the basic definitions necessary for the remainder of this work. If $\mathbb{Q}$ represents the field of rational numbers, then an algebraic number field $K$ is any finite extension of $\mathbb{Q}$. An element $\alpha \in K$ is an algebraic integer if it is a root of a monic polynomial in $\mathbb{Z}[X]$. By \cite[Theorem 6.2]{PD}, the set $R$ of algebraic integers in $K$ is an integral domain, which we refer to as an algebraic number ring.
When dealing with an algebraic number ring $R$, we use the usual notions of divisibility from the theory of integral domains. Let $\mathcal{A}(R)$ represent the set of irreducible elements (or atoms) of $R$, $\mathcal{U}(R)$ the set of units of $R$, and $R^\bullet$ the set of nonzero nonunits of $R$. Recall that $x$ and $y$ in $R$ are associates if there is a unit $u\in R$ with $x=uy$. If $x, y$, and $z$ are in $R$ with $y=xz$, then we say that $x$ divides $y$ and denote this by $x\, |\, y$.
Let $\mathcal{I}(R)$ denote the set of ideals of $R$. If $x\in R$, then let $(x)$ represent the principal ideal generated by $x$ and $\mathcal{P}(R)$ the subset of $\mathcal{I}(R)$ consisting of principal ideals of $R$. For $I$ and $J$ in $\mathcal{I}(R)$, set
\[
IJ = \left\{\sum_{i=1}^n a_ib_j \,\mid\, a_i\in I \mbox{ and }b_j\in J\right\}.
\]
Using \cite[Theorem 8.1]{PD}, it is easy to argue that $IJ$ is another ideal of $R$ which is known as the product of
$I$ and $J$. If $I, J$, and $K$ are ideals
of $R$ with $J=IK$, then we borrow the notation used above for elements and say that $I \, |\, J$.
Define an equivalence relation on $\mathcal{I}(R)$ by $I\sim J$ if and only if there exist $\alpha$ and $\beta$ in $R$ with $(\alpha)I=(\beta)J$. If $[I]$ represents the equivalence class of the ideal $I$ under $\sim$, then by \cite[Lemma 10.1]{PD} the operation
\[
[I]+[J] = [IJ]
\]
is well-defined. By \cite[Theorem 8.13]{PD}, the set $\mathcal{C}(R)=\mathcal{I}(R)/\sim$ forms an abelian group under $+$ called the class group of $R$. By \cite[Theorem 10.3]{PD}, $\lvert \mathcal{C}(R) \rvert$
is finite and is known as the class number of $R$. As previously mentioned, classical algebraic number theory (\cite[Theorem 9.4]{PD}) asserts that $R$ is a unique factorization domain if and only if its class number is one.
Throughtout the rest of our work we will use freely the fact asserted in \cite{Ca} that every ideal class of $\mathcal{C}(R)$ contains infinitely many prime ideals.
To completely understand how elements factor in an algebraic number ring $R$, we will need this fundamental result concerning the factorizations of ideals in $R$.
\begin{Fundamental} \cite[Theorem 8.27]{PD}
Let $R$ be an algebraic number ring. If $I$ is an ideal of $R$, then there exists a unique (up to order) list of not necessarily distinct prime ideals
$\mathfrak{p}_1$, $\mathfrak{p}_2, \ldots , \mathfrak{p}_k$ of $R$ such that
\begin{equation}\label{fund}\tag{$\star$}
I=\mathfrak{p}_1\mathfrak{p}_2\cdots \mathfrak{p}_k.
\end{equation}
\end{Fundamental}
\noindent The key to comprehending factorizations in $R$ lies in understanding products of the form \eqref{fund} where $\sum_{i=1}^k [\mathfrak{p}_i]=0$ in $\mathcal{C}(R)$ (see Lemma \ref{irreducibles} below).
\section{More on the Carlitz Characterization.}
We open with a few simple lemmas which will prove useful, especially in our later work. The first will characterize the irreducible elements of $R$ in terms of the class group.
\begin{lemma}\label{irreducibles}
Let $R$ be an algebraic number ring and $x$ a nonzero nonunit of $R$ with
\[
(x)=\mathfrak{p}_1\cdots \mathfrak{p_n},
\]
where $n\geq 1$ and the $\mathfrak{p}_i$'s are not necessarily distinct prime ideals of $R$. The element $x$ is irreducible in $R$ if and only if
\begin{enumerate}
\item $\sum [\mathfrak{p}_i] =0$, and
\item if $S\subsetneq \{1,\ldots n\}$ is a nonempty subset then $\sum_{i \in S}[\mathfrak{p}_i]\neq 0$.
\end{enumerate}
\end{lemma}
\begin{proof} ($\Rightarrow$) That $\sum [\mathfrak{p}_i] =0$ follows from the definition of the class group. Suppose there is a proper subset $S$ of $ \{1,\ldots n\}$ with $\sum_{i \in S}[\mathfrak{p}_i]= 0$. Let $S'=\{1,\ldots ,n\}-S$. Then both
\[
\sum_{i \in S}[\mathfrak{p}_i]=0\mbox{ and } \sum_{i \in S'}[\mathfrak{p}_i]=0
\]
and hence there are nonunits $y$ and $z$ in $R$ with
\[
(y)=\prod_{i\in S}\mathfrak{p}_i \mbox{ and } (z)= \prod_{i\in S'}\mathfrak{p}_i.
\]
Thus, there is a unit $u\in R$ with $x=uyz$ and $x$ is not irreducible.
($\Leftarrow$) Suppose that $x=yz$ in $R$. By the fundamental theorem of ideal theory in $R$, there are nonempty subsets $S, S'$ of $\{1, \ldots ,n\}$ so that
\[
(y)=\prod_{i\in S}\mathfrak{p}_i\mbox{ and } (z)=\prod_{i\in S'} \mathfrak{p}_i.
\]
Then $\sum_{i\in S} [\mathfrak{p}_i] =0$ contradicting condition (2). This completes the proof.
\end{proof}
\begin{example}\label{ex1} We illustrate the results of the lemma with some examples. Let $\mathfrak{p}$ be a nonprincipal prime ideal of $R$ with $|[\mathfrak{p}]|=n$ (where $|[\mathfrak{p}]|$ represents of order of $[\mathfrak{p}]$ in $\mathcal{C}(R)$). Then
\[
\mathfrak{p}^n=(x),
\]
where $x$ is irreducible in $R$. Moreover, if $\mathfrak{q}$ is any prime ideal taken from class $-[\mathfrak{p}]$, then
\[
\mathfrak{p}\mathfrak{q}=(y),
\]
where $y$ is irreducible in $R$. Hence, in the case where $|\mathcal{C}(R)|=2$, an irreducible element takes one of three forms:
\begin{enumerate}
\item[(i)] $\alpha$ where $(\alpha)=\mathfrak{p}$ for a principal prime ideal $\mathfrak{p}$ of $R$;
\item[(ii)] $\alpha$ where $(\alpha)=\mathfrak{p}^2$ for a nonprincipal prime ideal $\mathfrak{p}$ of $R$;
\item[(iii)] $\alpha$ where $(\alpha)=\mathfrak{p}\mathfrak{q}$ where $\mathfrak{p}$ and $\mathfrak{q}$ are distinct nonprincipal prime ideals of $R$.
\end{enumerate}
In case (i), the irreducible $\alpha$ is actually a prime element; in case (ii), $\alpha$ is called \textit{ramified}; and in case (iii), $\alpha$ is called \textit{split}.
\end{example}
Lemma \ref{irreducibles} implies some important finiteness conditions. A sequence of elements $g_1, \ldots ,g_n$ from an abelian group $G$ that satisfies the sum condition in the lemma (i.e., $g_1+\cdots + g_n=0$ and no proper subsum of this sum is zero)
is known as a \textit{minimal zero-sequence}. A good reference on the interplay between factorizations in an algebraic number ring and minimal zero-sequences is \cite{GRu}. An elementary exercise (see for example \cite{Chap}) shows that the number of minimal zero-sequences in a finite abelian group is finite. Since there are finitely many, there is a finite constant known as $D(G)$ that bounds above the number of elements in this minimal zero-sequence. The computation of $D(G)$, known as the Davenport constant of $G$, is elusive and better left to our references (\cite{Chap} is a good source). These facts imply the following corollary.
\begin{corollary}\label{thecor}
Let $x\in R^\bullet$ where $R$ is an algebraic number ring.
\begin{enumerate}
\item[(1)] The element $x$ has finitely many nonasscociated irreducible factorizations.
\item[(2)] If $x$ is irreducible and $(x) = \mathfrak{p}_1\cdots \mathfrak{p}_k$, then $k\leq D(\mathcal{C}(R))$.
\end{enumerate}
\end{corollary}
Having established that irreducible factorizations are essentially finite in number, we produce one below which will be of particular interest.
\begin{lemma}\label{carlitzlemma}
Let $R$ be a ring of algebraic integers of class number greater than 2. Then there are not necessarily distinct irreducible elements $\alpha_1, \alpha_2, \beta_1, \beta_2$, and $\beta_3$ such that
\begin{equation}\label{basic}\tag{$\ddagger$}
\alpha_1\alpha_2 = \beta_1\beta_2\beta_3.
\end{equation}
\end{lemma}
\begin{proof}
Suppose that $\mathcal{C}(R)$ contains an element $g$ with $|g|=n>2$. Let $\mathfrak{p}_1$ be a prime ideal of $R$ taken from class $g$, $\mathfrak{p}_2$ a prime ideal taken from class $2g$, $\mathfrak{p}_3$ a prime ideal taken
from class $(n-2)g$, and $\mathfrak{p}_4$ a prime ideal taken from class $(n-1)g$. (In the cases $n=3$ or $4$, you can pick these ideals distinctly.) Define the irreducible elements $\alpha$, $\beta$, $\gamma$, and $\delta$ of $R$ by
\begin{enumerate}
\item $(\alpha) = \mathfrak{p}_1\mathfrak{p}_4$,
\item $(\beta) = \mathfrak{p}_1^2\mathfrak{p}_3$,
\item $(\gamma) = \mathfrak{p}_2\mathfrak{p}_3$,
\item $(\delta) = \mathfrak{p}_2\mathfrak{p}_4^2$.
\end{enumerate}
The ideal equation $(\mathfrak{p}_1^2\mathfrak{p}_3)(\mathfrak{p}_4^2\mathfrak{p}_2) = (\mathfrak{p}_1\mathfrak{p}_4)^2(\mathfrak{p}_2\mathfrak{p}_3)$ yields that
\[
\beta\delta = u\alpha^2\gamma
\]
for some $u\in \mathcal{U}(R)$.
If all the nonidentity elements of $\mathcal{C}(R)$ are of order 2, then let $g_1$ and $g_2$ be such elements with $g_1\neq g_2$. Suppose further that $g_3=g_1+g_2$. Thus, $g_1, g_2$, and $g_3$ are all
distinct elements of $\mathcal{C}(R)$ of order 2. If $\mathfrak{p}_1$, $\mathfrak{p}_2$, and $\mathfrak{p}_3$ are prime ideals of $\mathcal{C}(R)$ taken from the classes $g_1$, $g_2$, and $g_3$ respectively, then
\[
\mathfrak{p}_1^2 = (\beta_1),\; \mathfrak{p}_2^2 = (\beta_2),\; \mathfrak{p}_3^2=(\beta_3),\; \mbox{ and } \mathfrak{p}_1\mathfrak{p_2}\mathfrak{p_3} = (\alpha)
\]
with $\beta_1$, $\beta_2$, $\beta_3$, and $\alpha$ irreducible elements of $R$. Thus in $R$ we have
\[
\alpha^2 = u\beta_1\beta_2\beta_3
\]
for some unit $u$ of $R$. This completes the proof.
\end{proof}
We are now in a position to offer a very short proof of Carlitz's theorem.
\begin{proof}[Proof of Carlitz's Theorem] ($\Leftarrow$) If $R$ is half-factorial, then \eqref{basic} implies that $|\mathcal{C}(R)|\leq 2$.
($\Rightarrow$) Let $x\in R^\bullet$ with
\[
(x)=\mathfrak{q}_1\cdots \mathfrak{q}_n\mathfrak{p}_1\cdots \mathfrak{p}_m,
\]
where the prime ideals $\mathfrak{q}_i$ are principal and the prime ideals $\mathfrak{p}_j$ are not principal. By our remarks in Example \ref{ex1}, $m$ is even and any factorization of $x$ into irreducibles has length $n + \frac{m}{2}$. Thus (2) holds and the proof
is complete.
\end{proof}
\section{Characterizations Involving the Length Set.} If $R$ is an algebraic number ring and $x$ a nonzero nonunit of $R$, then set
\[
\mathcal{L}(x)=\{ k\, |\, \exists \mbox{ irreducibles } \alpha_1, \ldots , \alpha_k \in R \mbox{ with } x=\alpha_1\cdots \alpha_k\}.
\]
The set $\mathcal{L}(x)$ is known as the set of lengths of $x$ and a general \textsc{Monthly} survey on this topic can be found in \cite{Ger1}. Corollary \ref{thecor} implies
that $|\mathcal{L}(x)|<\infty$ for any $x\in R^\bullet$. By Carlitz's theorem, if $R$ has class number 2, then $\mathcal{L}(x)=\{k\}$ for some $k\in\mathbb{N}_0$, and if $|\mathcal{C}(R)|>2$, then Lemma \ref{carlitzlemma} implies that
there is an $x\in R$ with $|\mathcal{L}(x)|>1$. Set
\[
L(x)=\max\, \mathcal{L}(x), \; \ell(x)=\min\, \mathcal{L}(x),
\]
and
\[
\rho(x)=\frac{L(x)}{\ell (x)}.
\]
Since $L(x)<\infty$, $\rho(x)$ is a rational $q\geq 1$ which is known as the \textit{elasticity} of $x$ in $R$. We can turn this combinatorial constant into a global descriptor by setting
\[
\rho(R) = \sup\{\rho(x)\, |\, x\in R\}.
\]
Hence, $R$ is half-factorial if and only if $\rho(R)=1$ and by Lemma \ref{carlitzlemma}, if $R$ has class number greater than 2, then $\rho(R)\geq \frac{3}{2}$. A
detailed study of elasticity in number rings can be found in \cite{V} and a more general survey on the subject in \cite{And}. In \cite{V} it is established that
\[
\rho(R)= \frac{D(\mathcal{C}(R)}{2},
\]
where again $D(\mathcal{C}(R))$ represents Davenport's constant.
A more precise version of the elasticity has recently become popular in the literature. Let $k\in \mathbb{N}$ and set
\[
\rho_k(R) =\sup\{\sup\, \mathcal{L}(x)\, |\, \min\, \mathcal{L}(x) \leq k\mbox{ for } x\in R^\bullet\}.
\]
Using Corollary \ref{thecor} along with \cite[Proposition 1.4.2]{GHKb}, the fact that $R$ is an algebraic number ring yields a slightly simpler version of this definition:
\[
\rho_k(R) =\sup\{\max\, \mathcal{L}(x)\, |\, k \in \mathcal{L}(x) \mbox{ for } x\in R^\bullet\}.
\]
We prove a few convenient facts concerning the $\rho_k(R)$'s.
\begin{lemma}\label{rhok}
If $R$ is an algebraic number ring, then the following assertions hold.
\begin{enumerate}
\item[(1)] $\rho_1(R)=1$.
\item[(2)] $\rho_k(R) \geq k$ for all $k\in \mathbb{N}$.
\item[(3)] For each $k\in \mathbb{N}$, $\rho_k(R)<\infty$.
\item[(4)] For each $k\in \mathbb{N}$, $\rho_k(R)<\rho_{k+1}(R)$.
\end{enumerate}
\end{lemma}
\begin{proof}
The proof of (1) follows directly from the definition of an irreducible element. For (2), if $x$ is a prime element of $R$, then $\mathcal{L}(x^k)=\{k\}$ so $k\in\mathcal{L}(x^k)$ and $k=\max \mathcal{L}(x^k)$. That $\rho_k(R)\geq k$ now follows.
For (3), suppose that $k\in \mathcal{L}(x)$ for some $x\in R^\bullet$. Thus $x=\alpha_1\cdots \alpha_k$ where each $\alpha_i\in \mathcal{A}(R)$. Write each $(\alpha_i)=\mathfrak{p}_{i,1}\cdots \mathfrak{p}_{i,t_i}$ where each $\mathfrak{p}_{i,j}$ is a prime
ideal of $R$. By our previous comment, each $t_i\leq D(\mathcal{C}(R))$. Thus, $(x)$ factors into at most $k\cdot D(\mathcal{C}(R))$ prime ideals, which also bounds the length of a factorization of $x$ into irreducibles. Hence $\max \mathcal{L}(x)\leq
k\cdot D(\mathcal{C}(R))$ for each $x\in R^\bullet$ and thus $\rho_k(R)\leq k\cdot D(\mathcal{C}(R))$.
For (4), suppose $m=\rho_k(R)$. Then there are irreducible elements $\alpha_1,\ldots, \alpha_k$, $\beta_1,\ldots, \beta_m$ of $R$ with $\alpha_1\cdots \alpha_k=\beta_1\cdots \beta_m$. If $x$ is any irreducible element of $R$, then
$x\alpha_1\cdots \alpha_k=x\beta_1\cdots \beta_m$ and hence $\rho_{k+1}(R)\geq m+1 > \rho_k(R)$.
\end{proof}
The true relationship between $\rho(R)$ and the $\rho_k(R)$'s can be found in \cite[Proposition 6.3.1]{GHKb}:
\[
\rho(R) = \sup \left\{ \frac{\rho_k(R)}{k}\, |\, k\in \mathbb{N}\right\} = \lim_{k\rightarrow \infty} \frac{\rho_k(R)}{k}.
\]
Lemma \ref{carlitzlemma} again allows us to make an immediate deduction. (Part of this result can be found prior to \cite[Proposition 1.4.2]{GHKb}.)
\begin{theorem}
Let $R$ be an algebraic number ring. The following statements are equivalent.
\begin{enumerate}
\item[(1)] $R$ has class number less than or equal to 2.
\item[(2)] $\rho(R)=1$.
\item[(3)] $\rho_2(R) = 2$.
\item[(4)] $\rho_k(R) = k$ for some $k\geq 2$.
\item[(5)] For all irreducibles $x$ and $y$ in $R$, $\mathcal{L}(xy) = \{2\}$.
\item[(6)] For all irreducibles $x$ and $y$ in $R$, $|\mathcal{L}(xy)|=1$.
\end{enumerate}
\end{theorem}
\begin{proof} Assertions (1) and (2) are equivalent by the Carlitz theorem. If (2) holds, then every $\mathcal{L}(x)$ with $2\in \mathcal{L}(x)$ is of the form $\{2\}$. Thus $\max \mathcal{L}(x)=2$ which yields $\rho_2(R)=2$ and (3) holds.
Clearly (3) implies (4). Assume (4) holds. If $R$ has class number greater than 2, then Lemma \ref{carlitzlemma} implies that $\rho_2(R)\geq 3$.
It easily follows from Lemma \ref{rhok} item (4) and induction that $\rho_k(R)>k$ for all $k\geq 2$, a contradiction. Thus $R$ has class number at most 2 and (1) holds. Hence (1), (2), (3), and (4) are equivalent.
If (3) holds, then $2\in \mathcal{L}(x)$ implies that $2=\max\mathcal{L}(x)$ and $\mathcal{L}(x)=\{2\}$, which yields (5). Statements (5) and (6) are equivalent by the definition of the length set. If (6) holds, then $|\mathcal{L}(xy)|=1$ implies
that $\max \mathcal{L}(xy)=2$, which in turn yields (3). This completes the proof.
\end{proof}
Let's take a slightly different look at the length set. Given an algebraic number ring $R$ and $x$ a nonzero nonunit, suppose that
\[
\mathcal{L}(x)=\{n_1, \ldots ,n_k\}
\]
where $n_1<n_2 < \cdots < n_k$. The delta set of $x$ is defined as
\[
\Delta(x)=\{n_i - n_{i-1}\, |\, 2\leq i \leq k\}
\]
with $\Delta(x)=\emptyset$ if $k=1$. We can convert this local descriptor into a global one by setting
\[
\Delta(R) =\bigcup_{x\in R^\bullet} \Delta(x).
\]
When $R$ is a Krull domain (a more general structure than an algebraic number ring) a great deal is known about the
structure of $\Delta(R)$ (see \cite[Section 6.7]{GHKb} and \cite{Sch}).
We show how the notion of the $\Delta$-set fits in with class number 2.
\begin{theorem}
Let $R$ be an algebraic number ring. Then $R$ has class number at most 2 if and only if $\Delta (R)=\emptyset$.
\end{theorem}
\begin{proof} The implication ($\Rightarrow$) clearly holds by Carlitz's Theorem. For ($\Leftarrow$), if $\Delta(R)=\emptyset$ and $R$ has class number greater than 2,
then Lemma \ref{carlitzlemma} yields a contradiction. This completes the proof.
\end{proof}
\section{Beyond the Length Set.} Our characterizations to this point have been solely dependent
on the length set. We now consider an invariant that relies on individual factorizations as much as or
more than the set $\mathcal{L}(x)$. It offers a numeric measure of how far an
element is from being prime.
\begin{definition} \label{firstdef} Let $R$ be an algebraic number ring.
For $x\in R^\bullet$, we define
$\omega(x) = n$ if $n$ is the smallest positive integer with the
property that whenever $x\mid a_1\cdots a_t$, where each
$a_i\in \mathcal{A}(R)$, there is a $T\subseteq \{1,2,\dots, t\}$ with
$|T|\leq n$ such that $x\mid \prod_{k\in T}a_k$. If no such $n$
exists, then $\omega(x)=\infty$. For $x\in \mathcal{U}(R)$, we define
$\omega(x)=0$. Finally, set
\[
\omega(R)=\sup\{\omega(\alpha)\, |\, \alpha\in \mathcal{A}(R)\}.
\]
\end{definition}
\noindent The definition above is taken from \cite{CSP}, but there are several other equivalent versions that can be found in the literature (see \cite{AndChap}). It follows directly from the definition that an element $x\in R$ is prime if and only if $\omega(x)=1$. The survey paper \cite{OnPel} is a good general reference on the $\omega$-function and we illustrate Definition \ref{firstdef} by appealing directly to the class number 2 case.
\begin{example}\label{omegaex}
Suppose that $R$ is an algebraic number ring of class number 2. We use the classification of irreducible elements of $R$ given in Example \ref{ex1} to determine the $\omega$-values of the irreducibles of $R$. If $\alpha$ is a prime element, then $\omega(\alpha)=1$. So, let $\alpha$ be a nonprime element of $\mathcal{A}(R)$ where $(\alpha)=\mathfrak{p}^2$ for a nonprincipal prime ideal $\mathfrak{p}$ of $R$. Thus $\omega(x)>1$, so suppose that $\alpha \, |\, \beta_1\cdots \beta_r$ where each $\beta_i$ is irreducible in $R$ and $r\geq 2$. Hence, either one of the $\beta_i$'s is of the form $(\beta_i)=\mathfrak{p}^2$, or there are irreducibles $\beta_i$ and $\beta_j$ (with $i\neq j$) so that $(\beta_i)=\mathfrak{p}\mathfrak{q}_1$ and $(\beta_j)=\mathfrak{p}\mathfrak{q}_2$ where $\mathfrak{q}_1$ and $\mathfrak{q}_2$ are nonprincipal prime ideals of $R$ distinct from $\mathfrak{p}$. In the first case, $\alpha$ is an associate of $\beta_i$ and in the second, $\alpha \, |\, \beta_i\beta_j$ and hence $\omega(\alpha)=2$. A similar argument shows that $\omega(\alpha)=2$ if $(\alpha)=\mathfrak{p}\mathfrak{q}$ where $\mathfrak{p}$ and $\mathfrak{q}$ are distinct nonprincipal prime ideals of $R$.
\end{example}
We introduce an aid
which will simplify the computation of $\omega(x)$.
\begin{definition}
Let $x\in R^\bullet$ where $R$ is an algebraic number ring.
A \textit{bullet} for $x$ is a product $\beta_1\cdots \beta_r$ of irreducible elements $\beta_1, \ldots ,\beta_r$ of $R$ such that
\begin{enumerate}
\item[(i)] $x$ divides the product $\beta_1\cdots \beta_r$, and
\item[(ii)] for each $1\leq i\leq r$, $x$ does not divide $\beta_1\cdots \beta_r/\beta_i$.
\end{enumerate}
The set of bullets of $x$ is denoted $\mathrm{bul}(x)$.
\end{definition}
The notion of bullet gives us a nice tool to compute $\omega(x)$. To see this, if $\beta_1\cdots \beta_r$ is a bullet for $x\in R^\bullet$, then $x$ divides no product of the form $\beta_1\cdots \beta_r/\beta_i$ for any $i$, and by definition
$\omega(x) \geq r$. On the other hand, if $\alpha_1\cdots \alpha_t$ is a product of $t$ irreducibles of $R$ with
$x\, |\, \alpha_1\cdots \alpha_t$ and $\alpha_1\cdots \alpha_t$ is not a bullet of $x$, then some subproduct of
$\alpha_1\cdots \alpha_t$ must be a bullet. We have essentially shown the following (a complete proof can be found in \cite[Proposition 2.10]{OnPel}) .
\begin{proposition}\label{hilo} If $R$ is an algebraic number ring and $x\in R^\bullet$, then
\[
\omega(x) = \sup\{r\, |\, \beta_1\cdots \beta_r\in \mathrm{bul}(x)\mbox{ where each }\beta_i\in \mathcal{A}(R)\}.
\]
\end{proposition}
Hence, for $R$ with class number 2, Example \ref{omegaex} shows that $\omega(R)=2$. Proposition \ref{hilo} implies another nice
finiteness condition.
\begin{corollary}\label{almosthilo} Let $R$ be an algebraic number ring and $x\in\mathcal{A}(R)$. Then
\[
\omega(x)\leq D(\mathcal{C}(R))<\infty
\]
and hence $\omega(R) \leq D(\mathcal{C}(R))<\infty$.
\end{corollary}
\noindent In fact, the interested reader can find a proof that $\omega(R)=D(\mathcal{C}(R))$ in \cite[Corollary 3.3]{AndChap}.
\begin{proof}[Proof of Corollary \ref{almosthilo}] We prove only the first assertion, as the second follows directly from it.
Let $x\in \mathcal{A}(R)$.
Write $(x)=\mathfrak{p}_1^{t_1}\cdots \mathfrak{p}_k^{t_k}$ for distinct prime ideals $\mathfrak{p}_1, \ldots, \mathfrak{p}_k$ in $R$.
Let $\alpha_1, \ldots ,\alpha_n$ be irreducibles of $R$ such that $x\, |\, \alpha_1\cdots \alpha_n$. For each $\mathfrak{p}_i$ choose a minimal subset $T_i\subseteq \{1, \ldots, n\}$ so that $\mathfrak{p}_i^{t_i}\, |\, (\prod_{j\in T_i}\alpha_j )$.
Set $A_i=\{\alpha_j\, |\, j\in T_i\}$. By the minimality of $T_i$, each $(\alpha_j)$, with $\alpha_j$ in $A_i$, is divisible by $\mathfrak{p}_i$ and hence $|A_i|\leq t_i$. If $A=\cup_{j=1}^k A_i$, then by Corollary \ref{thecor}, $|A|\leq t_1+\cdots +t_k \leq D(\mathcal{C}(R))$.
By using the multiplicative properties
of prime ideals, we obtain that $x\, |\, \prod_{\alpha_i\in A} \alpha_i$,
which completes the proof.
\end{proof}
A slight adjustment in the proof of Corollary \ref{almosthilo} yields a class number 2 characterization (see \cite[Theorem 3.4]{AndChap}).
\begin{theorem}
Let $R$ be an algebraic number ring. Then $R$ has class number at most 2 if and only if $\omega(R)\leq 2$.
\end{theorem}
\begin{proof}
While the argument is trivial using the remark directly following Corollary \ref{almosthilo}, for completeness we offer a proof.
Our work in Example \ref{omegaex}, along with the fact that class number 1 trivially implies $\omega(R)=1$, yields ($\Rightarrow$).
For ($\Leftarrow$), assume $\omega(R)\leq 2$ and that $R$ has class number greater than 2. We pivot in a manner similar to Lemma \ref{carlitzlemma}. Suppose $\mathcal{C}(R)$ has an element $g$ with $|g|=n>2$. Let
$\mathfrak{p}_1, \ldots ,\mathfrak{p}_n$ be distinct prime ideals of $R$ with $[\mathfrak{p}_i]=g$. Let $x\in \mathcal{A}(R)$ be such
that $(x)=\mathfrak{p}_1\cdots \mathfrak{p}_n$. If for each $1\leq i\leq n$ the irreducible $\alpha_i$ is such that
$(\alpha_i)=\mathfrak{p}_i^n$, then it is clear that $\alpha_1\cdots \alpha_n$ is a bullet for $x$ and $\omega(x)\geq n>2$.
If $\mathcal{C}(R)$ only has nontrivial elements of order 2, then let $\alpha, \beta_1, \beta_2$, and $\beta_3$ be the irreducibles constructed in the second part of the proof of Lemma \ref{carlitzlemma}.
As in the previous case, $\beta_1\beta_2\beta_3$ is a bullet for $\alpha$ and $\omega(x)\geq 3>2$.
In either case, $\omega(R) >2$, which completes the proof.
\end{proof}
\section{The Grand Finale!} In these pages we have accomplished a lot. To demonstrate, we tie it all together
in one last tribute to class number 2.
\begin{Nutshell}
Let $R$ be an algebraic number ring. The following statements are equivalent.
\begin{enumerate}
\item[(1)] $R$ has class number at most 2.
\item[(2)] $R$ is a half-factorial domain.
\item[(3)] $\rho(R)=1$.
\item[(4)] $\rho_2(R) = 2$.
\item[(5)] $\rho_k(R) = k$ for some $k\geq 2$.
\item[(6)] For all irreducibles $x$ and $y$ in $R$, $\mathcal{L}(xy) = \{2\}$.
\item[(7)] For all irreducibles $x$ and $y$ in $R$, $|\mathcal{L}(xy)|=1$.
\item[(8)] $\Delta (R)=\emptyset$.
\item[(9)] $\omega(R)\leq 2$.
\end{enumerate}
\end{Nutshell}
We note that our work has not endeavored to determine exactly how many irreducible factorizations there are of an element $x$ in a class number 2 algebraic number ring $R$. If $R$ has class number 2, then a formula
for this computation is contained in \cite{CHR}.
A detailed study of the asymptotic behavior of factorizations in rings with class number 2 can be found in \cite{HK}.
A more general approach to counting irreducible factorizations (with no restrictions on the class number) can be found in \cite{mystery}.
\begin{acknowledgment}{Acknowledgments.} The author gratefully acknowledges support under an Academic Leave during the fall of 2017 funded by Sam Houston State University.
He would also like to thank the referees and editor Susan Colley
for comments that greatly improved the exposition of this paper.
\end{acknowledgment}
\begin{biog}
\item[Scott Chapman] is Scholar in Residence and Distinguished Professor of Mathematics
at Sam Houston State University in Huntsville, Texas. In December of 2016 he finished
a five year appointment as Editor of the American Mathematical Monthly. His editorial
work, numerous publications in the area of non-unique factorizations, and years of
directing REU Programs, led to his designation in 2017 as a Fellow of the American
Mathematical Society.
\begin{affil}
Department of Mathematics and Statistics, Sam Houston State University, Box 2206, Huntsville, TX 77341\\
[email protected]
\end{affil}
\end{biog}
\eject
\end{document} |
\begin{document}
\title{Dynamics of the Tippe Top -- properties of numerical solutions versus the dynamical equations}
{\abstract We study the relationship between numerical solutions for inverting Tippe Top and the structure of the dynamical equations. The numerical solutions confirm oscillatory behaviour of the inclination angle $\theta(t)$ for the symmetry axis of the Tippe Top. They also reveal further fine features of the dynamics of inverting solutions defining the time of inversion. These features are partially understood on the basis of the underlying dynamical equations.}
\noindent{\it Key words: tippe top; rigid body; nonholonomic mechanics; numerical solutions}
\section{Introduction}
A Tippe Top (TT) is modeled by an axially symmetric sphere of mass $m$ and radius $R$ with center of mass ($CM$) shifted w.r.t. the geometrical center $O$ by $R\alpha$, $0<\alpha<1$ (see Fig. \ref{TT_diagram}). In a toy TT it is achieved by cutting off a slice of a sphere and by substituting it with a small peg that is used for spinning the TT. The sphere is rolling and gliding on a flat surface and subjected to the gravitational force $-mg\hat{z}$.
When spun slowly on the spherical part the TT spins wobbly for some time and comes to a standstill due to loss of energy caused by spinning friction. However when the initial spin is sufficiently fast the TT displays a counterintuitive behaviour of flipping upside down to spin on the peg with $CM$ above the geometrical center $O$. It continues spinning for some time until it falls down due to frictional loss of energy.
This flipping behaviour of TT we call {\it inversion}.
\noindent To describe motion of the TT we choose (as in \cite{Nisse2,Nisse3}) a fixed inertial reference frame $(\widehat{X},\widehat{Y},\widehat{Z})$ with $\widehat{X}$ and $\widehat{Y}$ parallel to the supporting plane and with vertical $\widehat{Z}$. We place the origin of this system in the supporting plane. Let $(\hat{x},\hat{y},\hat{z})$ be a frame defined through rotation around $\widehat{Z}$ by an angle $\varphi$, where $\varphi$ is the angle between the plane spanned by $\widehat{X}$ and $\widehat{Z}$ and the plane spanned by the points $CM$, $O$ and $A$.
The third reference frame $(\mathbf{\hat{1}},\mathbf{\hat{2}},\mathbf{\hat{3}})$, with origin at $CM$, is defined by rotating $(\hat{x},\hat{y},\hat{z})$ by an angle $\theta$ around $\hat{y}$. Thus $\mathbf{\hat{3}}$ is parallel to the symmetry axis, and $\theta$ is the angle between $\hat{z}$ and $\mathbf{\hat{3}}$. This frame is not fully fixed in the body. The axis $\mathbf{\hat{2}}$ points behind the plane of the picture of Fig.~\ref{TT_diagram}.
\begin{figure}
\caption{Diagram of the TT. Note that $\mathbf{a}
\label{TT_diagram}
\end{figure}
We let $\mathbf{s}$ denote the position of $CM$ w.r.t. the origin of the frame $(\widehat{X},\widehat{Y},\widehat{Z})$ and the vector from $CM$ to $A$ is $\mathbf{a}=R(\alpha\mathbf{\hat{3}}-\hat{z})$.
The orientation of the body w.r.t. the inertial reference frame $(\hat{X},\hat{Y},\hat{Z})$ is described by the Euler angles $(\theta,\varphi,\psi)$, where $\psi$ is the rotation angle of the sphere about the symmetry axis. With this notation, the angular velocity of the TT is $\boldsymbol{\omega}=-\dot{\varphi}\sin\theta\mathbf{\hat{1}}+\dot{\theta}\mathbf{\hat{2}}+(\dot{\psi}+\dot{\varphi}\cos\theta)\mathbf{\hat{3}}$, and we denote $\omega_3 :=\dot{\psi}+\dot{\varphi}\cos\theta$.
The principal moments of inertia along the axes $(\mathbf{\hat{1}},\mathbf{\hat{2}},\mathbf{\hat{3}})$ are denoted by $I_1=I_2$ and $I_3$, so the inertia tensor $\mathbb{I}$ will have components $(I_1,I_1,I_3)$ with respect to the $(\mathbf{\hat{1}},\mathbf{\hat{2}},\mathbf{\hat{3}})$-frame. The axes $\mathbf{\hat{1}}$ and $\mathbf{\hat{2}}$ will be principal axes due to the axial symmetry of TT.
Motion of TT is described by the standard Newton equations for a rolling and gliding rigid body. They have the vector form
\begin{equation}
\label{TTequ} m\mathbf{\ddot{s}}=\mathbf{F}-mg\hat{z},\quad \mathbf{\dot{L}}=\mathbf{a}\times\mathbf{F},\quad\mathbf{\dot{\hat{3}}}=\boldsymbol{\omega}\times\mathbf{\hat{3}}=\frac{1}{I_1}\left(\mathbf{L}\times\mathbf{\hat{3}}\right),
\end{equation}
where $\mathbf{F}=\mathbf{F}_{R}+\mathbf{F}_{f}=g_n\hat{z}-\mu g_n\mathbf{v}_A$ is the external force acting on the TT at the point of support $A$. In this model $\mathbf{F}=\mathbf{F}_{R}+\mathbf{F}_{f}$ consists of a vertical normal force $g_n\geq 0$ and a viscous-type friction force $-\mu g_{n}\mathbf{v}_A$, acting against the gliding velocity $\mathbf{v}_A$, with $\mu\geq 0$ the friction coefficient.
Equations \eqref{TTequ} admit Jellett's integral of motion $\lambda=-\mathbf{L\cdot a}$, $\dot{\lambda}=-\mathbf{\dot{L}}\cdot\mathbf{a}-\mathbf{L}\cdot\mathbf{\dot{a}}=-(\mathbf{a\times F})\cdot\mathbf{a}-\mathbf{L}\cdot(\frac{R\alpha}{I_1}(\mathbf{L}\times\mathbf{\hat{3}}))=0$. The total energy $E=\frac{1}{2}m\mathbf{\dot{s}}^2+\frac{1}{2}\boldsymbol{\omega}\cdot\mathbf{L}+mg\mathbf{s}\cdot\hat{z}$ is monotonically decreasing since $\dot{E}=\mathbf{v}_{A}\cdot\mathbf{F}=-\mu g_n|\mathbf{v}_A|^2$. The rolling and gliding solutions satisfy the one-sided constraint $(\mathbf{s+a})\cdot\hat{z}=0$ and its derivative satisfy $\mathbf{v}_{A}\cdot\hat{z}=0$. When Eqs. \eqref{TTequ} are expressed in terms of the Euler angles we get \cite{Nisse2,Nisse4}:
\begin{align}
\label{ddth}\ddot{\theta}=&\frac{\sin\theta}{I_1}\left(I_1\dot{\varphi}^2\cos\theta-I_3\omega_3\dot{\varphi}-R\alpha g_n\right)+\frac{R\mu g_n\nu_x}{I_1}(1-\alpha\cos\theta),&\\
\label{ddph}\ddot{\varphi}=&\frac{I_3\dot{\theta}\omega_3-2I_1\dot{\theta}\dot{\varphi}\cos\theta-\mu g_n\nu_y R(\alpha-\cos\theta)}{I_1\sin\theta},&\\
\label{dot_om}\dot{\omega}_3=&-\frac{\mu g_n\nu_y R\sin\theta}{I_3},&\\
\label{nu_x}\dot{\nu}_x=&\frac{R\sin\theta}{I_1}\left(\dot{\varphi}\omega_3\left(I_3(1-\alpha\cos\theta)-I_1\right)+g_nR\alpha(1-\alpha\cos\theta)-I_1\alpha(\dot{\theta}^2+\dot{\varphi}^2\sin^2\theta)\right)\nonumber\\
&\qquad-\frac{\mu g_n\nu_x}{mI_1}\left( I_1+mR^2(1-\alpha\cos\theta)^2\right)+\dot{\varphi}\nu_y,\\
\label{nu_y}\dot{\nu}_y=&-\frac{\mu g_n\nu_y}{mI_1 I_3}\left(I_1I_3+mR^2 I_3(\alpha-\cos\theta)^2+mR^2I_1\sin^2\theta\right)\nonumber\\
&+\frac{\omega_3\dot{\theta} R}{I_1}\left(I_3(\alpha-\cos\theta)+I_1\cos\theta\right)-\dot{\varphi}\nu_x.
\end{align}
These equations are a complicated nonlinear dynamical system for 6 unknowns. The value of the normal force $g_n$ in the dynamical equations is determined by the second derivative $\frac{d^2}{dt^2}(\mathbf{s+a})\cdot\hat{z}=0$ of the contact constraint:
\begin{equation}
\label{equ_for_g_n}g_n=\frac{mgI_1+mR\alpha(\cos\theta(I_1\dot{\varphi}^2\sin^2\theta+I_1\dot{\theta}^2)-I_3\dot{\varphi}\omega_3\sin^2\theta)}{I_1+mR^2\alpha^2\sin^2\theta-mR^2\alpha\sin\theta(1-\alpha\cos\theta)\mu\nu_x}.
\end{equation}
Equations \eqref{ddth}--\eqref{nu_y} admit the same Jellett integral that expressed through Euler angles reads $\lambda=RI_1\dot{\varphi}\sin^2\theta-I_3\omega_3(\alpha-\cos\theta)$.
\noindent An asymptotic analysis of TT inversion \cite{Eben,Mars,RSG} provides sufficient conditions for physical parameters that the TT have to satisfy and for initial conditions so that TT is inverting.
\begin{theorem}\label{inversion_cond}
For a Tippe Top with parameters satisfying $1-\alpha<\gamma=\frac{I_1}{I_3}<1+\alpha$, an inverted spinning solution is the only Lyapunov stable state in the asymptotic set of nongliding solutions $M=\{(\mathbf{L},\mathbf{\hat{3}},\mathbf{v}_A):\dot{E}=-\mu g_n|\mathbf{v}_A|^2=0\}$ provided that $\lambda>\max\left\{\lambda_{\text{thres}}=\frac{\sqrt{mgR^3\alpha I_3}(1+\alpha)^2}{\sqrt{1+\alpha-\gamma}},\lambda_{\text{up}}=\frac{\sqrt{mgR^3\alpha I_3}(1-\alpha)^2}{\sqrt{\alpha+\gamma-1}}\right\}$.
\end{theorem}
\begin{remark}
When $1-\alpha<1-\alpha^2<\gamma<1+\alpha$, then $\lambda_{\text{thres}}>\lambda_{\text{up}}$.
\end{remark}
\begin{remark}
Inversion of TT for $\lambda>\max\{\lambda_{\text{thres}},\lambda_{\text{up}}\}$ is a consequence of the LaSalle theorem applied to each solution having positive value $g_{n}(t)>0$ of the normal force \cite{RSG}. A direct application of the LaSalle theorem would require specification of an initial compact invariant set, which is difficult to define due to the one-sided constraint $(\mathbf{s+a})\cdot\hat{z}=0$ that does not exclude existence of solutions having negative $g_n(t)<0$.
\end{remark}
\begin{corollary}
When $\lambda>\max\{\lambda_{\text{thres}},\lambda_{\text{up}}\}$ then the asymptotic set $M$ contains only one solution and every solution with $g_n(t)>0$ is asymptotically approaching the inverted spinning solution $(\mathbf{L},\mathbf{\hat{3}},\mathbf{v}_A)=\left(\frac{\lambda}{R(1+\alpha)}\hat{z},\hat{z},0\right)$.
\end{corollary}
Application of the LaSalle theorem to TT inversion provides only an existential result. It states when a TT inverts but it says nothing about dynamical behaviour of solutions during the inversion.
There have been attempts to study dynamics of inversion by applying a gyroscopic balance condition \cite{Moff,Ued} where the quantity $\xi=I_3\omega_3-I_1\dot{\varphi}\cos\theta$ is assumed to be close to zero. As has been pointed out in these articles, this condition is useful for explaining rising of a rotating egg. For TT it is approximately satisfied in a certain neighborhood of the angle $\theta=\frac{\pi}{2}$ when the equator of the TT is in contact with the supporting plane. The use of TT equations simplified through the condition $\xi\approx0$ leads to an oversimplified equation of the form $\dot{\theta}=\frac{\mu g_n R^2(1-\alpha\cos\theta)^2}{\lambda}\nu_y$ providing a monotonously increasing $\theta(t)$ as a solution \cite{Moff}. It is intuitively comprehensible that such a solution may reflect correctly some type of averaged behaviour of the inclination angle $\theta(t)$ during inversion. The problem is however that no suitable definition of an averaged angle $\theta(t)$ is available. As is well known from numerical simulations $\theta(t)$ oscillates (meaning that $\dot{\theta}(t)$ changes sign many times) about a logistic type curve during inversion.
\noindent A rigorous approach showing oscillatory behaviour of TT inverting solutions has been proposed in \cite{Rau,Nisse2,Nisse4}. It is based on an integrated form of TT equations that are equivalent to the Euler angle equations and to the dynamical Eqs. \eqref{TTequ}. We get the integrated form of TT equations by considering functions which are integrals of motion for the rolling axisymmetric sphere, the modified energy (with $\mathbf{\dot{s}}=-\boldsymbol{\omega}\times\mathbf{a}$):
\begin{align}
\label{Etilde_def}\tilde{E}=&\frac{1}{2}m\mathbf{\dot{s}}^2+\frac{1}{2}\boldsymbol{\omega}\cdot\mathbf{L}+mg\mathbf{s}\cdot\hat{z}=\frac{1}{2}\left(I_1\dot{\varphi}^2\sin^2\theta+I_1\dot{\theta}^2+I_3\omega_{3}^2\right)+mgR(1-\alpha\cos\theta)\nonumber\\
&+\frac{1}{2}mR^2\bigg[(\alpha-\cos\theta)^2(\dot{\theta}^2+\dot{\varphi}^2\sin^2\theta)+\sin^2\theta(\dot{\theta}^2+\omega_{3}^2+2\omega_3\dot{\varphi}(\alpha-\cos\theta))\bigg].
\end{align}
and the Routh function:
\begin{equation}
\label{Routh_def}D(\theta,\omega_3)=\omega_3\sqrt{I_1I_3+mR^2I_3(\alpha-\cos\theta)^2+mR^2I_1\sin^2\theta}=I_3\omega_3\sqrt{d(\cos\theta)},
\end{equation}
where $d(\cos\theta)=\gamma+\sigma(\alpha-\cos\theta)^2+\sigma\gamma(1-\cos^2\theta)$, $\sigma=\frac{mR^2}{I_3}$.
For the rolling and gliding TT, equations of motion \eqref{ddth}--\eqref{nu_y} are equivalent to the following equations:
\begin{align}
&\frac{d}{dt}\lambda(\theta,\dot{\theta},\dot{\varphi},\omega_3)=0,\\
&\frac{d}{dt}D(\theta,\omega_3)=\frac{\gamma m}{\alpha\sqrt{d(\hat{z}\cdot\mathbf{\hat{3}})}}(\hat{z}\times\mathbf{a})\cdot\mathbf{\dot{v}}_A,\\
&\frac{d}{dt}\tilde{E}(\theta,\dot{\theta},\dot{\varphi},\omega_3)=m(\boldsymbol{\omega}\times\mathbf{a})\cdot\mathbf{\dot{v}}_A\\
&\frac{d}{dt}m\mathbf{\dot{r}}=-\mu g_n\mathbf{v}_A,
\end{align}
where $\mathbf{r}=\mathbf{s}-s_{\hat{z}}\hat{z}$. From these equations it follows that for any given solution $(\theta(t),\dot{\theta}(t),\dot{\varphi}(t),\omega_3(t),\nu_x(t),\nu_y(t))$ the functions $D(t)$, $\tilde{E}(t)$ depend on time as $D(t)=D(\theta(t),\omega_3(t))$, $\tilde{E}(t)=\tilde{E}(\theta(t),\dot{\theta}(t),\dot{\varphi}(t),\omega_3)$ and then {\it this} solution satisfies the three equations
\begin{align}
\label{lambda_equ}\lambda &=RI_1\dot{\varphi}\sin^2\theta-RI_3\omega_3(\alpha-\cos\theta),\\
\label{Routh_equ}D(t)&=I_3\omega_3\sqrt{d(\cos\theta)},\\
\label{E_equ}\tilde{E}(t)&=\frac{1}{2}\left(I_1\dot{\varphi}^2\sin^2\theta+I_1\dot{\theta}^2+I_3\omega_{3}^2\right)+mgR(1-\alpha\cos\theta)\nonumber\\
&+\frac{1}{2}mR^2\bigg[(\alpha-\cos\theta)^2(\dot{\theta}^2+\dot{\varphi}^2\sin^2\theta)+\sin^2\theta(\dot{\theta}^2+\omega_{3}^2+2\omega_3\dot{\varphi}(\alpha-\cos\theta))\bigg],
\end{align}
and the equation $m\mathbf{\ddot{r}}=-\mu g_n\mathbf{v}_A$.
The three first equations are decoupled from the last one and on their own they recall the problem of a purely rolling axisymmetric sphere solved by Chaplygin \cite{Chap}. By eliminating $\dot{\varphi}(t)$, $\omega_3(t)$ from \eqref{lambda_equ} and \eqref{Routh_equ} and substituting into \eqref{E_equ} one obtains a single first order differential equation for $\theta(t)$:
\begin{equation}
\label{METT}\tilde{E}(t)=g(\cos\theta)\dot{\theta}^2+V(\cos\theta,D(t),\lambda),
\end{equation}
where $g(\cos\theta)=\frac{1}{2}I_3\left(\sigma((\alpha-\cos\theta)^2+1-\cos^2\theta)+\gamma\right)$ and
\begin{equation}
V(z,D(t),\lambda)=mgR(1-\alpha z)+\frac{(\lambda\sqrt{d(z)}+RD(t)(\alpha-z))^2}{2I_3R^2\gamma^2(1-z^2)}+\frac{(R^2D(t)^2-\sigma\lambda^2)}{2R^2I_1}.
\end{equation}
The equation \eqref{METT} has the same algebraic form as the separation equation for a rolling sphere but here $D(t)$, $\tilde{E}(t)$ are time-dependent, so the equation is not separable or explicitly solvable. It is called the {\it Main Equation for the Tippe Top}.
Each solution of the TT equations satisfies its own main equation with suitable functions $D(t)$, $\tilde{E}(t)$ which are {\it a priori unknown}. But all these equations have the same algebraic form and they can be understood as describing a particle with variable mass $2g(\cos\theta)$ moving in a potential well $V(\cos\theta,D(t),\lambda)$ that is deforming in time. In a generic situation the particle reflects many times between the walls of $V$ and, due to energy dissipation, its position $\theta(t)$ goes toward the minimum $\theta_{\min}(t)$ of the potential.
By the LaSalle theorem every Tippe Top satisfying theorem \ref{inversion_cond} {\it has} to invert. For inverting solutions $\lim_{t\to-\infty}\theta(t)=0$, $\lim_{t\to\infty}\theta(t)=\pi$, $\lim_{t\to-\infty}\mathbf{L}(t)=L_0\hat{z}$ and $\lim_{t\to\infty}\mathbf{L}(t)=L_1\hat{z}$ and all pairs $D(t)$, $\tilde{E}(t)$ satisfy the same asymptotic conditions that can be found from the Jellett integral: $\lambda=R(1-\alpha)L_0=R(1+\alpha)L_1$. So $\lim_{t\to-\infty}D(t)=D_0=\frac{\lambda\sqrt{d(1)}}{R(1-\alpha)}$, $\lim_{t\to\infty}D(t)=D_1=-\frac{\lambda\sqrt{d(-1)}}{R(1+\alpha)}$ and $\lim_{t\to-\infty}\tilde{E}(t)=\tilde{E}_0=\frac{\lambda^2}{2R^2I_3(1-\alpha)^2}+mgR(1-\alpha)$, $\lim_{t\to\infty}\tilde{E}(t)=\tilde{E}_1=\frac{\lambda^2}{2R^2I_3(1+\alpha)^2}+mgR(1+\alpha)$.
This means that in the plane $(D,\tilde{E})$ each inverting solution draws a curve $(D(t),\tilde{E}(t))$ that is moving in finite time from a small neighborhood of $(D_0,\tilde{E}_0)$ to a small neighborhood of $(D_1,\tilde{E}_1)$, as the effective potential $V(\cos\theta,D(t),\lambda)$ is deformed by $D(t)$.
\noindent In \cite{Nisse4} the deformation is considered in the special case when the effective potential $V(z,D(t),\lambda)$ is a rational function of $\cos\theta$. This occurs when the physical parameters satisfy $1-\alpha^2<\gamma<1$, $\sigma=\frac{1-\alpha}{\gamma+\alpha^2-1}$, conditions that may be realized by parameters of a real toy TT. The analysis of the potential is then simplified, and it has been proved that
\begin{proposition}\label{nbhds_of_poles}
Assume that $\lambda>\lambda_{\text{thres}}=\frac{\sqrt{mgR^3I_3\alpha}(1+\alpha)^2}{\sqrt{1+\alpha-\gamma}}$.
\begin{itemize}
\item[i)] For any (small) $\epsilon>0$ there is a $\delta_{-}(\epsilon,\lambda)>0$ such that for every positive $\delta<\delta_{-}(\epsilon,\lambda)$ the potential $V(z,D,\lambda)$ has a minimum $z_{\min}$ in the interval $[-1,-1+\epsilon]$ for $D=D_1+\frac{\delta}{R(1+\alpha)\sqrt{\gamma+\alpha^2-1}}$ .
\item[ii)] For any (small) $\epsilon>0$ there is a $\delta_{+}(\epsilon,\lambda)>0$ such that for every positive $\delta<\delta_{+}(\epsilon,\lambda)$ the potential $V(z,D,\lambda)$ has a minimum $z_{\min}$ in the interval $[1-\epsilon,1]$ for $D=D_0-\frac{\delta}{R(1-\alpha)\sqrt{\gamma+\alpha^2-1}}$.
\end{itemize}
\end{proposition}
This proposition states that if $\lambda>\lambda_{\text{thres}}$, so that TT is inverting, then the minimum $z_{\min}$ of $V(z,D(t),\lambda)$ is moving from an $\epsilon$-neighborhood of $z=1$ to an $\epsilon$-neighborhood of $z=-1$. In \cite{Nisse4} it has also been shown that the time of passage for $\theta(t)$ between two turning points has a uniform bound $T_{\text{upp}}=21.95\left(\frac{RI_3\gamma(\alpha+1-\gamma)}{\alpha\lambda+\alpha RD\sqrt{\gamma+\alpha^2-1}}\right)$. If the time of inversion is an order of magnitude larger $T_{\text{inv}}>10 T_{\text{upp}}$, then the angle $\theta(t)$ has to perform several oscillations during the inversion.
The motion of the symmetry axis $\mathbf{\hat{3}}(t)$ can be seen as nutational motion within a belt that is moving from the neighborhood of the north pole of the unit sphere $S^2$ to the neighborhood of the south pole. As an inverting solution is approaching the inverted asymptotic state $(\mathbf{L},\mathbf{\hat{3}},\mathbf{v}_A)=\left(\frac{\lambda}{R(1+\alpha)}\hat{z},\hat{z},0\right)$ the velocity $\dot{\theta}(t)\to 0$ and the total energy $E(t)\to E_1=\lim_{t\to\infty}V(z_{\min}(t),D(t),\lambda)$.
This qualitative analysis of inverting solutions shows oscillatory character of inverting solutions and no more. The dynamical equations make it difficult to derive rigorous statements about further properties of solutions.
\noindent As we shall show below these properties of inverting solutions are well confirmed by numerical simulations made with the use of the Python 2.7 open source library SciPy \cite{Scipy} for realistic values of parameters corresponding to a typical toy TT available commercially. These simulations also show that generic dynamics of inverting TT has other distinctive features that does not follow from the theoretical analysis presented above. They have yet to be found from the underlying dynamical equations and such derivation is a daunting challenge.
In order to understand better these fine features of inverting solutions we adopt a converse approach of analysing how the results of simulations {\it agree} with the dynamical equations.
It should be stated clearly that such analysis does not provide proofs of existence of these new features but it improves understanding of relationships that hold between dynamical variables of an inverting TT. This enables formulation of some specific hypotheses that may have a chance of being rigorously proved from the dynamical equations.
\section{Simulations of a toy TT}
\subsection{Behaviour of a reference inverting solution}
We have taken realistic values of the physical parameters for a typical toy TT; $m=0.02$ kg, $R=0.02$ m, $\alpha=0.3$, $I_3=\frac{2}{5}mR^2$, $I_1=\frac{131}{350}mR^2$ and $g=9.82\text{ m/s}^2$. They satisfy the rationality condition $\frac{mR^2}{I_3}=\frac{1-\alpha}{\gamma+\alpha^2-1}$. We also choose $\mu=0.3$. These values are close to the parameter values used in simulations by other authors \cite{Coh,Ued} .
We study initial conditions (IC) with $\lambda\approx 2\lambda_{\text{thres}}$ and with small $\theta(0)=0.1$ rad that give inversion of the TT. As reference initial conditions we take (similarly as in \cite{Coh,Ued})
\begin{equation}
\label{IC_ref}IC_{\text{ref}}(0)=\{\theta(0)=0.1 \text{ rad},\;\dot{\theta}(0)=0,\;\dot{\varphi}(0)=0,\;\omega_3(0)=155 \text{ rad}/s,\;\nu_x(0)=0,\;\nu_y(0)=0\}
\end{equation}
The use of IC with $\dot{\varphi}(0)\approx0$ is justified by a typical demonstration of a toy TT that is initially started with strong spin (here $\dot{\psi}(0)=155$ rad/s) about the axis $\mathbf{\hat{3}}$ before it hits the supporting plane.
The consistency of calculated values of the dynamical variables has been checked by finding that the value of Jellett's integral $\lambda=RI_1\dot{\varphi}\sin^2\theta-RI_3\omega_3(\alpha-\cos\theta)$ is constant with precision of order $10^{-6}\lambda$. The condition $\lambda=\text{constant}$ essentially connects $\dot{\varphi}$ and $\omega_3=\dot{\psi}+\dot{\varphi}\cos\theta$, and initially $\omega_3(0)\approx\dot{\varphi}(0)+\dot{\psi}(0)$ since $\theta(0)=0.1$ and $\cos\theta\approx 1$.
Quite remarkably inverting solutions with $\lambda\approx 2\lambda$ and small $\dot{\varphi}(0)\approx0$ display the same universal behaviour (see Fig. \ref{total_plots1}) consisting of three main phases separated by two moments $t_{\theta\approx0}$, $t_{\theta\approx\pi}$ of singular behaviour of $\dot{\varphi}(t)$ when this angular velocity changes sign from large negative values to large positive values. At the same moments the angle $\theta(t)$ takes correspondingly locally minimal value close to 0 at $t_{\theta\approx0}$ and then locally maximal value close to $\pi$ at $t_{\theta\approx\pi}$.
\noindent The three phases are thus:
\begin{itemize}
\item[1)] Initial sychronisation of dynamical variables (that ends with singular values for angular velocities $\dot{\varphi}$, $\dot{\psi}$).
\item[2)] Climbing phase, when $\theta(t)$ climbs in an oscillatory way from $\theta\approx 0$ towards values of $\theta$ close to $\theta\approx \pi$. It ends with singular values of angular velocities $\dot{\varphi}$, $\dot{\psi}$.
\item[3)] Asymptotic stabilisation phase when $\theta(t)$ slowly approaches $\theta\approx\pi$.
\end{itemize}
The existence of singular behaviour is not affected by small perturbations of IC. Thus $t_{\theta\approx0}$ may be called an {\it initiation time} and $t_{\theta\approx\pi}$ an {\it ending time} for the climbing of $\theta(t)$. A {\it climbing time} is the quantity $T_{\text{inv}}=t_{\theta\approx \pi}-t_{\theta\approx0}$ from the first definite change of sign of $\dot{\varphi}(t)$ at $\theta(t)\approx0$ to the second change of sign of $\dot{\varphi}(t)$ at $\theta(t)\approx\pi$.
The initial synchronisation phase is the time required to accelerate the angular velocity $\dot{\varphi}$ to about $80$ rad/s needed for the climbing phase to start.
When values of dynamical variables attained close to the first time of singularity are taken as initial conditions
\begin{equation}
IC(0)=IC(t_{\theta\approx0})=\{\theta(t_{\theta\approx 0}),\;\dot{\theta}(t_{\theta\approx0}),\;\dot{\varphi}(t_{\theta\approx0}),\;\omega_3(t_{\theta\approx0}),\;\nu_x(t_{\theta\approx0}),\;\nu_y(t_{\theta\approx0})\},
\end{equation}
the solution $\theta(t)$ climbs immediately.
The graph for the inclination angle $\theta(t)$ in Fig. \ref{total_plots1}a displays the main features of an inverting TT. It has a general form of a logistic type curve superposed with small amplitude oscillations. These oscillations are visible in the graph of $\dot{\theta}(t)$ that changes sign frequently. Zeros of $\dot{\theta}(t)$ correspond to subsequent local maximum and minimum points of the $\theta(t)$-curve (see Fig. \ref{total_plots1}a). These properties confirm the picture implied by the Main Equation for the TT saying that the symmetry axis $\mathbf{\hat{3}}(t)$ performs nutational motion within a narrow band that, during the inversion, is shifting on the unit sphere $S^2$ from a neighborhood of the north pole to a neighborhood of the south pole. Large values of $\dot{\theta}(t)$ during the climbing phase reflect widening of the nutational band when $\theta(t)$ is crossing $\frac{\pi}{2}$. The band is narrowing (and $\dot{\theta}(t)\to 0$) when $\mathbf{\hat{3}}(t)$ is approaching the south pole of $S^2$.
In the graph \ref{total_plots1}b for $\dot{\varphi}(t)$ and $\dot{\psi}(t)$ we can discern an unexpected phenomenon (also visible in corresponding graphs published in \cite{Coh,Ued}) of sudden increase, by orders of magnitude, of oscillation amplitudes in vicinity of the initiation time $t_{\theta\approx0}\approx 3.2$ s and in vicinity of the ending time $t_{\theta\approx\pi}\approx 7.87$ s. Both times are clearly distinguished by a change of sign of high amplitude oscillations.
The strong oscillations of $\dot{\varphi}(t)$, $\dot{\psi}(t)$ in vicinity of $t_{\theta\approx0}$ and of $t_{\theta\approx\pi}$, visible in Fig. \ref{total_plots1}b, compensate each other because the variable $\omega_3(t)=\dot{\psi}(t)+\dot{\varphi}(t)\cos\theta(t)$ (Fig. \ref{total_plots2}b) is (unexpectedly) an almost monotonously decreasing function from the initial value of $155$ rad/s to about $-85$ rad/s.
The decreasing behaviour of $\omega_3$ reflects inversion of direction of axis $\mathbf{\hat{3}}$ (with respect to the direction of $\mathbf{L}$ and $\boldsymbol{\omega}$) and also reflects frictional loss of rotational energy. The curve $\omega_3(t)$ is in the beginning and at the end almost horizontal. During the climbing phase of TT between 3.2 and 7.86 seconds the symmetry axis $\mathbf{\hat{3}}$ turns upside down, so the projection $\boldsymbol{\omega}\cdot\mathbf{\hat{3}}$ of the predominantly vertical angular velocity $\boldsymbol{\omega}$ on $\mathbf{\hat{3}}$ changes sign.
The function $\omega_3(t)$ is not, however, everywhere monotonously decreasing because its derivative $\dot{\omega}_3=-\frac{R\mu g_n}{I_3}\nu_y\sin\theta$ may take positive values when $\nu_y$ becomes negative in a neighborhood of $t_{\theta\approx 0}$. Indeed, the graph for $\dot{\omega}_3(t)$ in Fig. \ref{total_plots2}b acquires positive spikes in a neighborhood of $t_{\theta\approx 0}$.
The value $g_n(t)$ of the reaction force $g_n(t)\hat{z}$ stays positive all time during the inversion (Fig. \ref{total_plots2}c) but it oscillates strongly during the climbing phase. So these solutions fulfill the assumption needed for validity of the LaSalle type theorem \ref{inversion_cond}. Values of $g_n(t)$ are oscillating about $mg$ -- the value of the reaction force when a static TT is standing on its bottom.
\noindent Further information about behaviour of $\dot{\varphi}(t)$ can be deduced from the expression for Jellett's integral \eqref{lambda_equ} rewritten as
\begin{align}
\label{phi_dot_equ}\dot{\varphi}(t)\sin^2\theta(t)&=\frac{\lambda}{RI_3}+\omega_3(t)(\alpha-\cos\theta(t))=-\omega_3(0)(\alpha-\cos\theta(0))+\omega_3(t)(\alpha-\cos\theta(t))\nonumber\\
&=\omega_3(0)(\alpha-\cos\theta(0))\left(1-\frac{\omega_3(t)(\alpha-\cos\theta(t))}{\omega_3(0)(\alpha-\cos\theta(0))}\right),
\end{align}
since $\lambda=-RI_3\omega_3(0)(\alpha-\cos\theta(0))$. In the vicinity of $t_{\theta\approx 0}$ the r.h.s. of \eqref{phi_dot_equ} is a small quantity because in the quotient $\omega_3(t)$ ($\approx 154$ rad/s) is close to $\omega_3(0)=155$ rad/s and both angles $\theta(t)$, $\theta(0)$ are small.
The formula \eqref{phi_dot_equ} makes it easier to understand the interplay between $\theta(t)$ and $\dot{\varphi}(t)$ during the inversion. Since $\sin^2\theta\geq 0$ the sign of $\dot{\varphi}(t)$ is the same as the sign of the r.h.s. of \eqref{phi_dot_equ}. The part of the $\dot{\varphi}(t)$-graph left of $t_{\theta\approx 0}\approx 3.2$s shows that the quantity $\left(1-\frac{\omega_3(t)(\alpha-\cos\theta(t))}{\omega_3(0)(\alpha-\cos\theta(0))}\right)$ changes sign many times. As the r.h.s. of \eqref{phi_dot_equ} is small, because the quotient is close to 1, the amplitude of $\dot{\varphi}(t)$ can increase on approaching $t_{\theta\approx 0}$ only if $\sin^2\theta(t)$ becomes very small, and the axis $\mathbf{\hat{3}}$ almost hits the north pole of $S^2$. At time $t_{\theta\approx 0}$ the term $\left(1-\frac{\omega_3(t)(\alpha-\cos\theta(t))}{\omega_3(0)(\alpha-\cos\theta(0))}\right)$ acquires a positive sign and the amplitude of $\dot{\varphi}(t)$ decreases with rising angle $\theta(t)$. This is the beginning of inversion.
The moment of initiation of inversion of TT at $3.2$s noticed at graphs for $\dot{\varphi}(t)$ and $\dot{\psi}(t)$ is also visible in the graph \ref{total_plots2}a for the gliding velocities $\nu_x(t)$, $\nu_{y}(t)$. Both velocities are oscillatory but initially $\nu_{x}$ (blue) is positive and has larger value than $\nu_y$ (green) that oscillates close to $0$.
Remarkably the graph of $\nu_y(t)$ crosses the graph of $\nu_x(t)$ at $t_{\theta\approx 0}\approx 3.2$s and from this moment the minimal of $\nu_y(t)$ are consistently higher than the minima of $\nu_x(t)$. During the climbing phase the oscillatory function $\nu_y(t)$ is positive and its values increase by about two orders of magnitude in comparison with values during the synchronatisation phase. Oscillations of $\nu_x(t)$ also increase and the mean value of $\nu_x$ becomes slightly negative. When the climbing is finished at $t_{\theta\approx\pi}\approx 7.86$s the graph of $\nu_y$ again crosses the graph of $\nu_x$ from above and minima of $\nu_y(t)$ become consistently lower than minima of $\nu_x(t)$.
\subsection{Testing initial conditions for inverting solutions}
The reference solution with IC \eqref{IC_ref} corresponds to a TT spun rapidly about the $\mathbf{\hat{3}}$ axis with $\omega_3(0)=\dot{\psi}(0)=155$ rad/s and launched upon a table with a small inclination angle $\theta(0)=0.1$ rad. We vary IC while keeping the value of Jellett's integral $\lambda\approx 2\lambda_{\text{thres}}$ above the threshold value, which is sufficient for inversion of TT.
As $\lambda=RI_1\dot{\varphi}\sin^2\theta-RI_3\omega_3(\alpha-\cos\theta)$ (with $\omega_3=\dot{\psi}+\dot{\varphi}\cos\theta$) does not depend on $\dot{\theta}$, $\nu_x$ or $\nu_y$ it is natural to consider first how, for fixed $\theta(0)=0.1$ rad, the initial distribution of the angular velocity between $\omega_3(0)$ and $\dot{\varphi}(0)$ affects the character of inverting solutions and then to study additional influence of nonvanishing IC for $\nu_x(0)$, $\nu_y(0)$ and $\dot{\theta}(0)$. Changes of the initial inclination angle $\theta(0)$ are also tested.
For testing influence of nonzero initial angular velocity $\dot{\varphi}(0)$ we consider $\dot{\varphi}(0)$ being maximally of the same order of magnitude as $\omega_3(0)$ and belonging to the range of $\pm 200$ rad/s while keeping $\nu_x(0)=\nu_y(0)=\dot{\theta}(0)=0$. As $\theta=0.1$ is small, $\cos\theta=0.995\approx 1$, $\sin\theta=0.01\approx0$, so $\dot{\varphi}(0)\in [-200,200]$ affects the value of $\lambda$ as little as $1\%$, so that $\lambda\approx 2\lambda_{\text{thres}}$.
An increase of $\dot{\varphi}(0)$ from 0 to 83.1 shortens the length of the initial synchronisation phase so that $t_{\theta\approx 0}$ goes to zero when $\dot{\varphi}(0)\approx 83.1$. With further increase of $\dot{\varphi}(0)$ the synchronisation phase disappears and the climbing time is shortened.
The initiation time $t_{\theta\approx0}$ also decreases as $\dot{\varphi}(0)$ becomes negative. Remarkably the IC with $\dot{\varphi}(0)\approx 0$ have the longest synchronatisation phase for the whole range $\dot{\varphi}(0)\in[-200,200]$. It may be related to the fact that the total energy is close to its minimal value when $\lambda=2\lambda_{\text{thres}}$ is kept fixed.
\noindent Another distinguished range of angular velocities is $\dot{\varphi}(0)\in[195,200]$ (with $\theta(0)=0.1$) that gives rise to solutions with very small amplitude oscillations for all dynamical variables (see Fig. 8). The increase of $\theta(t)$ becomes monotonous starting from $\theta=0.8$. Solutions with $\dot{\varphi}(0)\in[195,200]$ become even more smooth when $\theta(0)=0.01$, and the increase of $\theta(t)$ is monotononous starting from time $0.3$s and angle $\theta(t)=0.02$. Their initiation time is zero and their ending time is about $t_{\theta\approx\pi}=7.77$.
For testing stability of qualitative behaviour of the reference solution and solutions with $\dot{\varphi}(0)\in[-200,200]$ we have varied the additional initial conditions for $\nu_x(0)$, $\nu_y(0)$ and $\dot{\theta}(0)$ within the maximal range of variability of $\nu_x(t)\in[-0.3,0.3]$, $\nu_y(t)\in[-0.8,0.8]$ and $\dot{\theta}(t)\in[-15,15]$ displayed by the reference solution \eqref{IC_ref}.
The general pattern is that with adding large nonzero $\nu_x(0)$, $\nu_y(0)$ or $\dot{\theta}(0)$ the synchronisation phase may reappear, the climbing time remains below 7-8 s, and the main qualitative features of inverting remain intact.
The price for taking large values of $\nu_x(0)$, $\nu_y(0)$ or $\dot{\theta}(0)$ is that the dynamical variables $\nu_x(t)$, $\nu_y(t)$ and $\dot{\theta}(t)$ oscillate strongly with amplitudes staying within the same maximal range variability $\nu_x(t)\in[-0.3,0.3]$, $\nu_y(t)\in[-0.8,0.8]$ and $\dot{\theta}(t)\in[-15,15]$.
Small perturbations not exceeding $10\%$ of the range of variability of $\nu_x(t)$, $\nu_y(t)$ and $\dot{\theta}(t)$ do not change much the parameters and behaviour of inverting solutions.
Changing of the initial inclination angle $\theta(0)$ affects the amplitude of oscillation for the dynamical variables $\nu_x(t)$, $\nu_y(t)$ and $\dot{\theta}(t)$ and the climbing time. A general rule is that decreasing $\theta(0)$ below $0.1$ reduces oscillations, smooths out solution curves and increases the climbing time. Increase of $\theta(0)$ makes amplitudes of oscillations larger and decreases the climbing time. Beyond the angle $\theta(0)=0.3$ the term $\alpha-\cos\theta$ starts to play a role and $\omega_3(0)$ has to be increased to keep $\lambda\approx 2\lambda_{\text{thres}}$.
\subsection{Transfer of energy between modes and illustration of the Main equation for the Tippe Top approach}
The total energy consists of three components:
\begin{align}
\label{energy_split}E&=E_{\text{trans}}+E_{\text{rot}}+E_{\text{pot}}=\frac{1}{2}m\mathbf{\dot{s}}^2+\frac{1}{2}\boldsymbol{\omega}\cdot\mathbf{L}+mg\mathbf{s}\cdot\hat{z}\nonumber\\
&=\frac{1}{2}m\left[(\nu_x\cos\theta-R\dot{\theta}(\alpha-\cos\theta))^2+(\nu_y-R\sin\theta(\omega_3+\dot{\varphi}(\alpha-\cos\theta)))^2+(\nu_x\sin\theta+R\dot{\theta}\sin\theta)^2\right]\nonumber\\
&+\frac{1}{2}\left[I_1(\dot{\theta}^2+\dot{\varphi}^2\sin^2\theta)+I_3\omega_{3}^2\right]+mgR(1-\alpha\cos\theta),
\end{align}
since $\mathbf{\dot{s}}=\mathbf{v}_A-\boldsymbol{\omega}\times\mathbf{a}$. Their time-dependence is shown in Fig. \ref{energy_plot} where we see that $E_{\text{trans}}$, $E_{\text{rot}}$ and $E_{\text{pot}}$ add up to the total energy $E(t)$ that is monotonically decreasing.
The components $E_{\text{trans}}$, $E_{\text{rot}}$ and $E_{\text{pot}}$ behave in an oscillatory way since their time derivatives (see below) change sign many times. These oscillations are not directly seen in the main graph because the relative variation of the energy components are small. They become visible in the magnification of the curve (see insert in Fig. \ref{energy_plot}).
In Fig. \ref{energy_plot} the potential energy $E_{\text{pot}}$ (light blue line) increases and the numerical ratio of energies seen in the figure $E_{\text{pot}}(t=8)/E_{\text{pot}}(t=0)=\frac{0.005106}{0.002755}=1.8534$ differs from the theoretical value $\frac{mgR(1+\alpha)}{mgR(1-\alpha)}=\frac{1.3}{0.7}=1.8571$ only by $0.2\%$.
The translational energy $E_{\text{trans}}$ for the chosen IC \eqref{IC_ref} is initially small, about $0.34E_{\text{pot}}(t=0)$ and it goes to zero as TT inverts and approaches asymptotically the inverted spinning state having fixed center of mass. During the inversion the greatest part of the energy is contained in the rotational energy mode $E_{\text{rot}}(t)$.
The transfer of energy between energy modes becomes visible when we calculate the derivative
\begin{align}
\dot{E}&=\frac{d}{dt}\left(\frac{1}{2}m\mathbf{\dot{s}}^2+\frac{1}{2}\boldsymbol{\omega}\cdot\mathbf{L}+mg\mathbf{s}\cdot\hat{z}\right)\nonumber\\
&=\mathbf{\dot{s}}\cdot(\mathbf{F}_{R}+\mathbf{F}_{f}-mg\hat{z})+\boldsymbol{\omega}\cdot\left[\mathbf{a}\times(\mathbf{F}_{R}+\mathbf{F}_{f})\right]+mg\mathbf{\dot{s}}\cdot\hat{z}\nonumber\\
&=(\mathbf{v}_{A}-\boldsymbol{\omega}\times\mathbf{a})\cdot(\mathbf{F}_{R}+\mathbf{F}_{f})-mg\mathbf{\dot{s}}\cdot\hat{z}+\boldsymbol{\omega}\cdot\left[\mathbf{a}\times(\mathbf{F}_{R}+\mathbf{F}_{f})\right]+mg\mathbf{\dot{s}}\cdot\hat{z}\nonumber\\
&=\mathbf{v_A}\cdot\mathbf{F}_f-(\boldsymbol{\omega}\times\mathbf{a})\cdot(\mathbf{F}_{R}+\mathbf{F}_{f})+\boldsymbol{\omega}\cdot\left[\mathbf{a}\times(\mathbf{F}_{R}+\mathbf{F}_{f})\right]=\mathbf{v_A}\cdot\mathbf{F}_f=-\mu g_n\mathbf{v}_{A}^2,
\end{align}
by using the dynamical Eqs. \eqref{TTequ}, the equality $\dot{\boldsymbol{\omega}}\cdot\mathbf{L}=\boldsymbol{\omega}\cdot\mathbf{\dot{L}}$ valid due to axial symmetry of TT, and $\mathbf{v}_{A}\cdot\hat{z}=0$. In this calculation $\mathbf{v_A}\cdot\mathbf{F}_f=-\mu g_n\mathbf{v}_{A}^2$ is the rate of frictional loss of energy, $mg\mathbf{\dot{s}}\cdot\hat{z}$ is the rate of energy transfer from the translational part into potential energy $E_{\text{pot}}$. The term $\boldsymbol{\omega}\cdot\left[\mathbf{a}\times(\mathbf{F}_{R}+\mathbf{F}_{f})\right]$ is the work performed in unit time by the torque $\mathbf{a}\times(\mathbf{F}_{R}+\mathbf{F}_{f})$ and it transfers energy between the translational and rotational components.
The graphs for $(\dot{E},\dot{E}_{\text{pot}},\dot{E}_{\text{trans}})$ (Fig. \ref{energy_diff_plot}), which describe velocity of energy transfer between modes, are oscillatory. The derivative for the total energy $\dot{E}(t)=-\mu g_n\mathbf{v}_{A}^2\leq 0$ is non-positive but it becomes close to zero at some instants of time when both $\nu_x(t)$ and $\nu_y(t)$ are close to zero in a neighborhood of the initiation time $t_{\theta\approx0}=3.2$ s. The graph of $\dot{E}_{\text{trans}}$ is predominantly negative and has general shape similar to $\dot{E}(t)$. This reflects the fact that most of the energy lost to friction comes from the rotational and the translational components.
\noindent Figure \ref{torque_plot} shows time-dependence of components of the torque vector calculated w.r.t. the center of mass
\begin{equation}
\boldsymbol{\tau}=\mathbf{a}\times(\mathbf{F}_R+\mathbf{F}_f)=\mathbf{a}\times(g_n\hat{z}-\mu g_n\mathbf{v}_A)
\end{equation}
These components are all predominantly negative w.r.t. the chosen moving reference frame $(\hat{x},\hat{y},\hat{z})$. The components $\boldsymbol{\tau}_x=-R(1-\alpha\cos\theta)\mu g_n\nu_y$ and $\boldsymbol{\tau}_z=-R\alpha\mu g_n\nu_y\sin\theta$ are negative whenever $\nu_y>0$, and $\boldsymbol{\tau}_y=-R\alpha g_n\sin\theta+R\mu g_n\nu_x(1-\alpha\cos\theta)$ is negative whenever $\nu_x<0$ or $\nu_x$ is sufficiently small.
As explained in section 1 each inverting solution of TT equations satisfies its own Main Equation for the Tippe Top $\tilde{E}(t)=\frac{1}{2}(2g(\cos\theta)\dot{\theta}^2)+V(\cos\theta,D(t),\lambda)$ with $\tilde{E}(t)$, $D(t)$ calculated from \eqref{Etilde_def} and \eqref{Routh_def}. Remember that $\tilde{E}(t)$ is the part of the whole energy \eqref{energy_split} not depending on the gliding velocity $\mathbf{v}_{A}$. The function $D(t)$ decreases from the value $D_0\approx\frac{\lambda\sqrt{d(1)}}{R(1-\alpha)}=9.381\cdot10^{-4}$ to $D_1\approx -\frac{\lambda\sqrt{d(-1)}}{R(1+\alpha)}=-6.667\cdot 10^{-4}$ and in diagram \ref{total_plots3}a the curve $(D(t),\tilde{E}(t))$ goes from the initial boundary value ($D_0\approx 9.381\cdot10^{-4},\tilde{E}_0\approx 4.912\cdot 10^{-2}$) to the final boundary value $(D_1\approx-6.667\cdot 10^{-4},\tilde{E}_1\approx 1.734\cdot10^{-2})$. The shape of the curve reflects the oscillatory behaviour of $\tilde{E}(t)$ while $D(t)$ (in Fig. \ref{total_plots3}b) is almost monotonously decreasing since $D(t)=I_3\omega_3(t)\sqrt{d(\cos\theta(t))}$.
The blue curve in Fig. \ref{total_plots3}c shows oscillatory behaviour of the modified rotational energy $\tilde{E}(t)$ in relation to the green curve $E(t)$ representing total energy. The modified rotational energy $\tilde{E}(t)$ is consistently larger than $E(t)$ during inversion. This does not contradict the conservation of energy when we look closer at $\tilde{E}=E-\frac{1}{2}m\mathbf{v}_{A}^2+m\mathbf{v}_{A}\cdot(\boldsymbol{\omega}\times\mathbf{a})$. For an inverting TT the angular velocity $\boldsymbol{\omega}$ remains close to be parallel with the $\hat{z}$ axis and the product $\boldsymbol{\omega}\times\mathbf{a}$ points behind the plane of the picture in Fig. \ref{TT_diagram}. The direction of rotation of TT (with $\boldsymbol{\omega}$ almost parallel to $\hat{z}$) causes $\nu_y=\mathbf{v}_{A}\cdot\hat{y}$ to be positive and to point also behind the plane of Fig. \ref{TT_diagram}.
Thus the term
\begin{align}
m\mathbf{v}_{A}\cdot(\boldsymbol{\omega}\times\mathbf{a})-\frac{1}{2}m\mathbf{v}_{A}^2&\approx\frac{1}{2}m(\nu_x\hat{x}+\nu_y\hat{y})\cdot\left(2(\boldsymbol{\omega}\times\mathbf{a})\hat{y}-(\nu_x\hat{x}+\nu_y\hat{y})\right)\nonumber\\
\label{est_v_A}&\approx-\frac{1}{2}m\nu_{x}^2+\frac{1}{2}m\nu_y(2|\boldsymbol{\omega}\times\mathbf{a}|-\nu_y)
\end{align}
may be positive if $\nu_{y}>\nu_x$ and $2|\boldsymbol{\omega}\times\mathbf{a}|-\nu_y=2|\boldsymbol{\omega}||\mathbf{a}|\sin\theta-\nu_y>0$ is sufficiently large.
In Fig. \ref{total_plots2}b we cannot estimate the size of each term, but as we see in Fig.~\ref{TT_diagram}, $|\mathbf{a}|$ is growing during inversion, $|\boldsymbol{\omega}|$ is decreasing and $\sin\theta$ is growing until the axis $\mathbf{\hat{3}}$ passes $\theta=\frac{\pi}{2}$. This may altogether keep the term $(2|\boldsymbol{\omega}\times\mathbf{a}|-\nu_y)$ positive and sufficiently large to make \eqref{est_v_A} positive. This is consistent with the graph \ref{total_plots2}c where the difference $\tilde{E}-E$ becomes largest in the middle of inversion when $\theta\approx\frac{\pi}{2}$.
\section{Summary and conclusions}
Application of the LaSalle principle and stability analysis of asymptotic solutions provides necessary conditions for physical parameters and for initial conditions (IC) so that the Tippe Top inverts. Analysis of dynamical behaviour of inverting solutions is indeed difficult since one deals with a nonlinear, nonintegrable dynamical system of 6 degrees of freedom.
In this paper we have numerically studied properties of solutions starting at small initial inclination angle $\theta(0)=0.1$ rad and with $\lambda\approx 2\lambda_{\text{thres}}$ to learn how such solutions depend on the choice of the remaining IC. Numerical simulations confirm that all such sample solutions invert. Closer analysis of numerical solutions have also shown new interesting features of dynamical behaviour and how they depend on changes of the remaining IC while keeping $\theta(0)=0.1$ and $\lambda\approx 2\lambda_{\text{thres}}$.
\noindent For solutions with $\nu_x(0)=\nu_y(0)=\dot{\theta}(0)=0$ and $\omega_3(0)=155$ rad/s, a study of dependence on $\dot{\varphi}(0)\in[-200,200]$ shows that solutions with $\dot{\varphi}(0)>83$ rad/s start to invert directly and solutions with $\dot{\varphi}(0)\leq 83$ require a synchronisation time-interval for $\dot{\varphi}(t)$, $\nu_x(t)$ and $\nu_y(t)$ before $\theta(t)$ starts to climb. The climbing {\it initiation time} $t_{\theta\approx0}$ is distinguished:
\begin{itemize}
\item[a)] by the inclination angle being close to zero $\theta(t_{\theta\approx0})\approx0$,
\item[b)] by high amplitude oscillations of $\dot{\varphi}(t)$ in the vicinity of $t_{\theta\approx 0}$,
\item[c)] by reversal of amplitude of oscillations of $\dot{\varphi}(t)$ from large negative values to large positive values of $\dot{\varphi}(t)$,
\item[d)] by the fact that at time $t_{\theta\approx0}$ the graph of $\nu_{y}(t)$ crosses the graph of $\nu_x(t)$ from below and increases by 1-2 orders of magnitude.
\end{itemize}
For all tested IC the climbing time-interval ends at the {\it ending time} $t_{\theta\approx\pi}$ distinguished by another reversal of high amplitude oscillations of $\dot{\varphi}(t)$. These oscillations are well visible in all graphs of a fully inverting Tippe Top. At $t_{\theta\approx\pi}$ the graph of $\nu_y(t)$ crosses the graph of $\nu_x(t)$ from above. So for the climbing solutions there appears to exist a naturally defined {\it climbing time} interval $[0,t_{\theta\approx\pi}-t_{\theta\approx 0}]$ or $[0,t_{\theta\approx\pi}]$ (if $\dot{\varphi}(0)>83$ and the climbing starts immediately) during which the angle $\theta$ increases to $\pi$. Perturbations of the reference IC \eqref{IC_ref} with nonzero values of $\nu_x(0)$, $\nu_y(0)$ and $\dot{\theta}(0)$, belonging to the range of variability of $\nu_x(t)$, $\nu_y(t)$ and $\dot{\theta}(t)$ for the reference solution, preserves the same main features of inverting solutions but amplitudes of oscillations for all dynamical variables usually increases. This may be related to the fact that the total energy for the reference solution is close to the minimal value of total energy among solutions having $\lambda\approx 2\lambda_{\text{thres}}$.
\noindent We have discussed how the observed dynamical behaviour of solutions is related to of the dynamical equations (2)--(6). Such analysis does not provide a proof of the observed dynamical features but it sheds light on the relationships between the main dynamical variables during the inversion. This may serve as a good starting point for further numerical experiments studying dynamics of inverting solutions of a Tippe Top.
We have also illustrated how energy is transfered during the inversion between three energy modes $E_{\text{trans}}(t)$, $E_{\text{rot}}(t)$ and $E_{\text{pot}}(t)$ that together add up to the monotonously decreasing total energy $E(t)$. This transfer is due to the torque that changes the components of angular velocity and performs work needed for energy transfer.
The description of TT inversion through the Main Equation of the Tippe Top has been illustrated by the graph of the modified rotational energy $\tilde{E}(t)$ versus total energy $E(t)$ and by a picture of the curve $(D(t),\tilde{E}(t))$ that controls deformation of the effective potential $V(\cos\theta,D(t),\lambda)$ and motion of $\theta(t)$ inside the potential.
\noindent The numerical study of inverting solutions confirms the predictions about inversion of TT known from analysis of stability of asymptotic solutions. Additionally it confirms oscillatory behaviour of the angle $\theta(t)$ as predicted by the Main Equation for the Tippe Top approach. It also reveals further intricate features of Tippe Top behaviour that are only partially understood on the basis of dynamical equations. They asks for further research in this direction.
\begin{figure}
\caption{Plots obtained by numerically integrating Eqs. \eqref{ddth}
\label{total_plots1}
\end{figure}
\begin{figure}
\caption{Plots obtained as in figure \ref{total_plots1}
\label{total_plots2}
\end{figure}
\begin{figure}
\caption{Evolution of the energy $E(t)$ and its components $E_{\text{rot}
\label{energy_plot}
\end{figure}
\begin{figure}
\caption{Evolution of the derivative of the energy $\dot{E}
\label{energy_diff_plot}
\end{figure}
\begin{figure}
\caption{The components of the torque $\boldsymbol{\tau}
\label{torque_plot}
\end{figure}
\begin{figure}
\caption{Plot $a$ shows the curve $(D(t),\tilde{E}
\label{total_plots3}
\end{figure}
\begin{figure}
\caption{Plot for adjusted initial conditions \eqref{IC_ref}
\label{phi_plot}
\end{figure}
\end{document} |
\begin{document}
\title{Unbounded discrepancy in Frobenius numbers}
\author{Jeffrey Shallit\\
School of Computer Science \\
University of Waterloo\\
Waterloo, ON N2L 3G1 \\
Canada\\
{\tt [email protected] }\\
\and
James Stankewicz\footnote{Partially supported by NSF VIGRE grant
DMS-0738586}\\
Department of Mathematics \\
University of Georgia \\
Athens, GA 30602 \\
USA \\
{\tt [email protected] }}
\maketitle
\begin{abstract}
Let $g_j$ denote the largest integer that is represented exactly $j$
times as a non-negative integer linear combination of $\lbrace x_1,
\ldots, x_n\rbrace$. We show that for any $k > 0$, and
$n = 5$, the quantity $g_0 - g_k$ is unbounded. Furthermore, we
provide examples with $g_0 > g_k$ for $n \geq 6$ and $g_0 > g_1$
for $n \geq 4$.
\end{abstract}
\section{Introduction}
Let $X = \lbrace x_1, x_2, \ldots, x_n \rbrace$ be a set of distinct positive
integers such that $\gcd(x_1, x_2, \ldots, x_n) = 1$. The
{\sl Frobenius number} $g(x_1, x_2, \ldots, x_n)$ is defined to be the
largest integer that cannot be expressed as a non-negative integer
linear combination of the
elements of $X$. For example, $g(6, 9, 20) = 43$.
The Frobenius number --- the name comes from the fact that Frobenius
mentioned it in his lectures, although he apparently never wrote about it ---
is the subject of a huge literature, which is admirably summarized in the
book of Ram\'{\i}rez Alfons\'{\i}n \cite{ramirez}.
Recently, Brown et al.\ \cite{brown} considered a generalization of
the Frobenius number, defined as follows: $g_j(x_1, x_2, \ldots, x_n)$ is
largest integer
having exactly $j$ representations as a non-negative
integer linear combination of $x_1, x_2, \ldots, x_n$.
(If no such integer exists, Brown et al.\ defined $g_j$ to be $0$, but
for our purposes, it seems more reasonable to leave it undefined.)
Thus $g_0$ is just
$g$, the ordinary Frobenius number. They observed that, for a fixed
$n$-tuple $(x_1, x_2, \ldots, x_n)$, the function $g_j(x_1, x_2, \ldots, x_n)$
need not be increasing (considered as a function of $j$). For example,
they gave the example
$g_{35} (4,7,19) = 181$ while $g_{36}(4,7,19) = 180$. They asked if there
are examples for which $g_1 < g_0$. Although they did not say so,
it makes sense to impose the condition that
no $x_i$ can be written as a non-negative integer linear
combination of the others, (*)
\noindent for otherwise we have trivial examples such as
$g_0(4, 5, 8, 10) = 11$ and $g_1 (4,5,8,10) = 9$. We call a tuple
satisfying (*) a {\sl reasonable} tuple.
In this note we show that the answer to the question of
Brown et al.\ is ``yes'', even for reasonable tuples.
For example, it is easy to verify that
$g_0 (8,9,11,14,15) = 21$, while
$g_1 (8,9,11,14,15) = 20$. But we prove much more:
we show that
$$g_0 (2n-2, 2n-1, 2n, 3n-3, 3n) = n^2 - O(n),$$ while for
any fixed $k \geq 1$ we have $g_k (2n-2, 2n-1, 2n, 3n-3, 3n) = O(n)$.
It follows that for this parameterized
$5$-tuple and all $k \geq 1$,
we have $g_0 - g_k \rightarrow \infty$ as $n \rightarrow \infty$.
\section{The main result}
We define $X_n = \lbrace 2n-2, 2n-1, 2n, 3n-3, 3n \rbrace$.
It is easy to see that this is a reasonable
$5$-tuple for $n \geq 5$.
If we can write $t$ as a non-negative linear combination
of the elements of $X_n$, we say $t$ has a representation or is
representable.
We define $R(j) $ to be the number of distinct representations of $j$
as a non-negative integer linear combination of the elements of $X_n$.
\begin{theorem}
\begin{enumerate}[$($a$)$]
\item $g_k (X_n) = (6k+3)n - 1$ for $n > 6k+3$, $k \geq 1$.
\item $g_0(X_n) = n^2 - 3n +1$ for $n \geq 6$;
\end{enumerate}
\label{ref1}
\end{theorem}
Before we prove Theorem~\ref{ref1}, we need some lemmas.
\begin{lemma}
\begin{enumerate}[$($a$)$]
\item $R( (6k+3)n - 1) \geq k$ for $n \geq 4$ and $k \geq 1$.
\item $R( (6k+3)n - 1) = k$ for $n > 6k+3$ and $k \geq 1$.
\end{enumerate}
\label{lem1}
\end{lemma}
\begin{proof}
First, we note that
\begin{equation}
(6k+3)n - 1 = 1 \cdot (2n-1) + (3t-1) \cdot (2n) + (2(k-t)+1)\cdot (3n)
\label{eq1}
\end{equation}
for any integer $t$ with $1 \leq t \leq k$. This provides at least $k$ distinct
representations for $(6k+3)n - 1$ and proves (a). We call these
$k$ representations {\sl special}.
To prove (b), we need to see that the $k$ special representations given by
(\ref{eq1}) are, in fact, all representations that can occur.
Suppose that $(a,b,c,d,e)$ is a $5$-tuple of non-negative integers such that
\begin{equation}
a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n) = (6k+3) n - 1 .
\label{eq2}
\end{equation}
Reducing this equation modulo $n$, we get
$ -2a -b -3d \equiv -1 $ (mod $n$). Hence there exists an integer
$m$ such that $2a + b + 3d = mn + 1$. Clearly $m$ is non-negative.
There are two cases to consider: $m = 0$ and $m \geq 1$.
If $m = 0$, then $2a + b + 3d = 1$, which, by the non-negativity of the
coefficients $a, b, d$ implies that $a=d=0$ and $b = 1$. Thus by
(\ref{eq2}) we get $ 2n-1 + 2cn + 3en = (6k+3) n - 1$, or
\begin{equation}
2c + 3e = 6k+1.
\label{eq3}
\end{equation}
Taking both sides modulo $2$, we see that
$e \equiv 1$ (mod $2$), while taking both sides modulo $3$, we see that
$c \equiv 2$ (mod $3$). Thus we can write $e = 2r+1$, $c = 3s-1$, and
substitute in (\ref{eq3}) to get $k = r + s $. Since $s \geq 1$, it follows that
$0 \leq r \leq k-1$, and this gives our set of $k$ special representations
in (\ref{eq1}).
If $m \geq 1$, then $n +1 \leq mn+1 = 2a+b + 3d$, so
$n \leq 2a+b+3d-1$. However, we know that
$(6k+3)n - 1 \geq a(2n-2) + b(2n-1) + d(3n-3) > (n-1) (2a+b+3d)$.
Hence $(6k+3)n > (n-1)(2a+b+3d) + 1 > (n-1)(2a+b+3d-1) \geq (n-1)n$.
Thus $6k+3 > n-1$. It follows that if $n > 6k+3$, then this case
cannot occur, so all the representations of $(6k+3)n - 1$ are
accounted for by the $k$ special representations given in (\ref{eq1}).
\end{proof}
We are now ready to prove Theorem~\ref{ref1} (a).
\begin{proof}
We already know from Lemma~\ref{lem1} that for $n > 6k+3$, the
number $N := (6k+3)n - 1$ has exactly $k$ representations. It now suffices
to show that if $t$ has exactly $k$ representations, for $k \geq 1$, then
$t \leq N$.
We do this by assuming $t$ has at least one representation, say
$t = a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n)$, for
some $5$-tuple of non-negative integers $(a,b,c,d,e)$.
Assuming these integers are large enough (it suffices to assume
$a,b,c,d,e \geq 3$),
we may take advantage of the internal
symmetries of $X_n$ to obtain additional representations with
the following swaps.
\begin{enumerate}[$($a$)$]
\item $3(2n) = 2(3n)$;
hence
$$a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n) $$
$$ = a(2n-2) + b(2n-1) + (c+3)(2n) + d(3n-3) + (e-2)(3n).$$
\item $3(2n-2) = 2(3n-3)$; hence $$ a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n)$$ $$= (a+3)(2n-2) + b(2n-1) + c(2n) + (d-2)(3n-3) + e(3n).$$
\item $2n-2 + 2n = 2(2n-1)$; hence $$ a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n)$$ $$= (a+1)(2n-2) + (b-2)(2n-1) + (c+1)(2n) + d(3n-3) + e(3n).$$
\item $2n-2 + 2n-1 + 2n = 3n-3 + 3n$; hence $$ a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n)$$ $$= (a+1)(2n-2) + (b+1)(2n-1) + (c+1)(2n) + (d-1)(3n-3) + (e-1)(3n).$$
\end{enumerate}
We now do two things for each possible swap: first, we show that the requirement
that $t$ have exactly $k$ representations imposes upper bounds on the
size of the coefficients.
Second, we swap until we have a representation which can be conveniently bounded in terms of $k$.
\begin{enumerate}[$($a$)$]
\item If $\lfloor {e \over 2} \rfloor +
\lfloor {c \over 3} \rfloor \geq k$, we can find at least $k+1$
representations of $t$. Thus we can find a representation of $t$ with
$c \leq 2$ and $e \leq 2k-1$.
\item Similarly, if $\lfloor {d \over 2} \rfloor +
\lfloor {a \over 3} \rfloor \geq k$, we can find at least $k+1$
representations of $t$. Thus we can find a representation of $t$
with $d \leq 2k-1$ and $a \leq 2$. Combining this with (a), we can
find a representation with $a,c \leq 2$ and $d + e \leq 2k -1$.
\item If $\lfloor {b \over 2} \rfloor + \min\{ a,c \} \geq k$,
we can find at least $k+1$ representations of $t$.
Thus we can find a representation of $t$ with $| b - \min\{ a,c \} | \leq 1$.
If we start with the assumption $a, c \leq 2$, this ensures that
$\min\{a,b,c\} \leq \lfloor {{a+b+c}\over 3} \rfloor \leq
\min\{ a,b,c \} + 1$ and $\max\{a,b,c\} - \min\{a,b,c\} \leq 3$.
\item If $\min\{ a,b,c \} + \min\{ d,e \} \geq k$ we can find at least $k+1$
representations of $t$. When this swap is followed by (a) or (b) (if necessary)
we can find a representation with $d + e \leq 2k -1$, $a+b+c \leq 3$ and $a,c \leq 2$.
\end{enumerate}
Putting this all together, we see that $t \leq (2n-1) + 2 (2n) +
(2k-1)(3n) = (6k+3)n - 1$, as desired.
\end{proof}
In order to prove Theorem~\ref{ref1} (b), we need a lemma.
\begin{lemma}
The integers $k(n-1), k(n-1)+1, \ldots, kn$ are representable for
$k = 2$ and $k \geq 4$ and for $n \geq 4$.
\label{lem2}
\end{lemma}
\begin{proof}
We prove the result by induction on $k$. The base cases
are $k = 2,4$, and we have the representations given below:
\begin{eqnarray*}
4n-4 &=& 2 (2n-2) \\
4n-3 &=& (2n-2) + (2n-1) \\
4n-2 &=& 2(2n-1) \\
4n-1 &=& (2n-1) + (2n) \\
4n &=& 2 (2n).
\end{eqnarray*}
Now suppose $ln-m$ is representable for $4 \leq l < k$ and
$0 \leq m \leq l$. We want to show that $kn-t$ is representable for
$0 \leq t \leq k$. There are three cases, depending on $k$ (mod $3$).
If $k \equiv 0$ (mod 3), and $k \geq 4$, then $(k-2)n - t = kn - t - 2n$ is
representable if $t \leq k-2$; otherwise $(k-2)n - t + 2 = kn - t - (2n-2)$
is representable. By adding $2n$ or $2n+2$, respectively, we get a
representation for $kn-t$.
If $k \equiv 1$ (mod 3), and $k \geq 4$, or if
$k \equiv 2$ (mod 3), then $(k-3)n - t = kn -t - 3n$ is
representable if $t \leq k-3$; otherwise $(k-3)n -t + 3 = kn-t - (3n-3)$
is representable. By adding $3n$ or $3n+3$, respectively, we get a
representation for $kn-t$.
\end{proof}
Now we prove Theorem~\ref{ref1} (b).
\begin{proof}
First, let's show that every
integer $> n^2 - 3n+1$ is representable. Since if $t$ has a representation,
so does $t+2n-2$, it suffices to show that the $2n-2$ numbers
$n^2 -3n+2, n^2-3n+3, \ldots, n^2-n-1$ are representable.
We use Lemma~\ref{lem2} with $k = n-2$ to see that the numbers
$(n-2)(n-1) = n^2-3n+2, \ldots, (n-2)n = n^2-2n$ are all representable.
Now use Lemma~\ref{lem2} again with $k = n-1$ to see that the numbers
$(n-1)(n-1) = n^2-2n+1, \ldots, (n-1)n = n^2-n$ are all representable.
We therefore conclude that every integer $> n^2-3n+1$ has a representation.
Finally, we show that $n^2-3n+1$ does not have a representation.
Suppose, to get a contradiction, that it does:
$$ n^2 -3n+1 = a(2n-2) + b(2n-1) + c(2n) + d(3n-3) + e(3n).$$
Reducing modulo $n$ gives
$1 \equiv -2a -b -3d $ (mod $n$), so there exists an integer $m$ such
that $2a+b+3d = mn-1$. Since $a, b, d$ are non-negative, we must have
$m \geq 1$.
Now $n^2-3n+1 \geq a(2n-2)+ b(2n-1) + d(3n-3) > (n-1)(2a+b+3d)$.
Thus
\begin{equation}
n^2 -3n+1 \geq (n-1)(mn-1) = mn^2 -(m+1)n + 1.
\label{eq5}
\end{equation}
If $m = 1$,
we get $n^2 - 3n+1 \geq n^2 - 2n + 1$, a contradiction. Hence $m \geq 2$.
From (\ref{eq5}) we get $(m-1)n^2 - (m-2)n \leq 0$. Since $n \geq 1$, we get
$(m-1)n - (m-2) \leq 0$, a contradiction.
\end{proof}
\section{Additional remarks}
One might object to our examples because the numbers are not pairwise
relatively prime. But there also exist reasonable
$5$-tuples with $g_0 > g_1$ for which
all pairs are relatively prime: for example,
$g_0(9,10,11,13,17) = 25$, but $g_1(9,10,11,13,17) = 24$.
More generally one can use the techniques in this paper to show that
$g_0(10n-1, 15n-1, 20n-1, 25n, 30n-1) = 50n^2 -1$ and
$g_1(10n-1, 15n-1, 20n-1, 25n, 30n-1) = 50n^2 - 5n$ for $n \geq 1$, so that
$g_0 - g_1 \rightarrow \infty$ as $n \rightarrow \infty$.
For $k \geq 2$, let $f(k)$ be the least non-negative integer $i$
such that
there exists a reasonable $k$-tuple $X$ with $g_i(X) > g_{i+1}(X)$.
A priori $f(k)$ may not exist. For example, if $k = 2$, then we have
$g_i (x_1, x_2) = (i+1)x_1x_2 - x_1 - x_2$, so
$g_i (x_1, x_2) < g_{i+1} (x_1, x_2)$ for all $i$.
Thus $f(2)$ does not exist.
In this paper, we have shown that $f(5) = 0$.
This raises the obvious question of other values of $f$.
\begin{theorem}
We have $f(i) = 0$ for $i \geq 4$.
\end{theorem}
\begin{proof}
As mentioned in the Introduction, the example $(8,9,11,14,15)$ shows that
$f(5) = 0$.
For $i = 4$, we have the example $g_0(10,15,32,48) = 101$ and
$g_1(10,15,32,48) = 99$, so $f(4) = 0$. (This is the reasonable quadruple
with $g_0 > g_1$ that minimizes the largest element.)
We now provide a class of examples for $i \geq 6$. For $n \geq 6$
define $X_n$ as follows:
$$X_n = (n+1, n+4, n+5, [n+7..2n+1], 2n+3, 2n+4),$$
where by $[a..b]$ we mean the list $a, a+1, a+2, \ldots, b$.
For example, $X_8 = (9, 12, 13, 15, 16, 17, 19, 20)$. Note that $X_n$ is
of cardinality $n$. We make the following three claims for $n \geq 6$.
\begin{enumerate}[$($a$)$]
\item $X_n$ is reasonable.
\item $g_0 (X_n) = 2n+7$.
\item $g_1 (X_n) = 2n+6$.
\end{enumerate}
(a): To see that $X_n$ is reasonable, assume that some element $x$ is in the
${\mathbb N}$-span of the other elements. Then either $x = ky$ for some
$k \geq 2$, where $y$ is the smallest element of $X_n$, or $x \geq y+z$, where
$y, z$ are the two smallest elements of $X_n$. It is easy to see both
of these lead to contradictions.
(b) and (c): Clearly $2n+7$ is not representable, and $2n+6$ has the
single representation $(n+1) + (n+5)$. It now suffices to show that every
integer $\geq 2n+8$ has at least two representations. And to show this,
it suffices to show that all integers in the range $[2n+8..3n+8]$ have
at least two representations.
Choosing $(n+4) + [n+7..2n+1]$ and $(n+5)+[n+7..2n+1]$ gives two distinct
representations for all numbers in the interval $[2n+12..3n+5]$. So it
suffices to handle the remaining cases $2n+8, 2n+9, 2n+10, 2n+11,
3n+6, 3n+7, 3n+8$. This is done as follows:
\begin{alignat*}{2}
2n+8 &= (n+1)+(n+7) & \ = & \ 2(n+4) \\
2n+9 &= (n+4)+(n+5) & \ = & \
\begin{cases}
3(n+1), & \text{if $n = 6$}; \\
(n+1)+(n+8), & \text{if $n \geq 7$.}
\end{cases} \\
2n+10 &= 2(n+5) & \ = & \
\begin{cases}
(n+1)+(2n+3), & \text{if $n = 6$}; \\
3(n+1), & \text{if $n = 7$}; \\
(n+1)+(n+9), & \text{if $n \geq 8$.}
\end{cases} \\
2n+11 &= (n+4)+(n+7) & \ = & \
\begin{cases}
(n+1)+(2n+4), & \text{if $n = 6$}; \\
(n+1)+(2n+3), & \text{if $n = 7$}; \\
3(n+1) , & \text{if $n = 8$}; \\
(n+1)+(n+10), & \text{if $n \geq 9$.}
\end{cases} \\
3n+6 &= 2(n+1) + (n+4) & \ = & \ (n+5) + (2n+1) \\
3n+7 &= 2(n+1) + (n+5) & \ = & \ (n+4) + (2n+3) \\
3n+8 &= (n+5) + (2n+3) & \ = & \ (n+4) + (2n+4).
\end{alignat*}
\end{proof}
We do not know the value of $f(3)$.
The example
\begin{align*}
g_{14}(8,9,15) &= 172 \\
g_{15}(8,9,15) &= 169
\end{align*}
shows that $f(3) \leq 14$.
\begin{conjecture} $f(3) = 14$.
\end{conjecture}
We have checked all triples with largest element $\leq 200$, but have
not found any counterexamples.
\section{Acknowledgments}
We thank the referee for useful comments.
Thanks also go to Dino Lorenzini who sent us a list of comments after
we submitted this paper. Among them was an encouragement to make more
use of the formula of Brown et al, which led to the example that shows that
$f(4) = 0$.
\end{document} |
\begin{document}
\title{Deep and Confident Prediction for Time Series at Uber}
\author{\IEEEauthorblockN{Lingxue Zhu\IEEEauthorrefmark{1}}
\IEEEauthorblockA{
Department of Statistics,\\
Carnegie Mellon University\\
Pittsburgh, Pennsylvania 15213\\
Email: [email protected]}
\thanks{\IEEEauthorrefmark{1} This work was done during an internship at Uber Technologies.}
\and
\IEEEauthorblockN{Nikolay Laptev}
\IEEEauthorblockA{Uber Technologies\\
San Francisco, California 94103\\
Email: [email protected]}
}
\maketitle
\begin{abstract}
Reliable uncertainty estimation for time series prediction is critical in many fields, including physics, biology, and manufacturing.
At Uber,
probabilistic time series forecasting is used for robust prediction of number of trips during special events, driver incentive allocation, as well as real-time anomaly detection across millions of metrics.
Classical time series models are often used in conjunction with a probabilistic formulation for uncertainty estimation. However, such models are hard to tune, scale, and add exogenous variables to.
Motivated by the recent resurgence of Long Short Term Memory networks, we propose a novel end-to-end Bayesian deep model that provides time series prediction along with uncertainty estimation.
We provide detailed experiments of the proposed solution on completed trips data, and successfully apply it to large-scale time series anomaly detection at Uber.
\end{abstract}
\begin{IEEEkeywords}
Bayesian neural networks, predictive uncertainty, time series, anomaly detection.
\end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction}
Accurate time series forecasting and reliable estimation of the prediction uncertainty are critical for anomaly detection, optimal resource allocation, budget planning, and other related tasks. This problem is challenging, especially during high variance segments (e.g., holidays, sporting events), because extreme event prediction depends on numerous external factors that can include weather, city population growth, or marketing changes (e.g., driver incentives) \cite{doi:10.1177/1012690204043462} that all contribute to the uncertainty of the forecast.
These exogenous variables, however, are difficult to incorporate in many classical time series models, such as those found in the standard $R$ \textit{forecast}\cite{forecast} package. In addition, these models usually require manual tuning to set model and uncertainty parameters.
Relatively recently, time series modeling based on the Long Short Term Memory (LSTM) model \cite{Hochreiter:1997:LSM:1246443.1246450} has gained popularity due to its end-to-end modeling, ease of incorporating exogenous variables, and automatic feature extraction abilities \cite{Assaad:2008:NBA:1297420.1297576}. By providing a large amount of data across numerous dimensions, it has been shown that an LSTM network can model complex nonlinear feature interactions \cite{DBLP:journals/corr/OgunmoluGJG16}, which is critical for modeling complex extreme events. A recent paper \cite{laptev:2017:1273496} has shown that a neural network forecasting model is able to outperform classical time series methods in cases with long, interdependent time series.
However, the problem of estimating the uncertainty in time-series predictions using neural networks remains an open question. The prediction uncertainty is important for assessing how much to trust the forecast produced by the model, and has profound impact in anomaly detection.
The previous model proposed in \cite{laptev:2017:1273496} had no information regarding the uncertainty. Specifically, this resulted in a large false anomaly rates during holidays where the model prediction has large variance.
In this paper, we propose a novel end-to-end model architecture for time series prediction, and quantify the prediction uncertainty using Bayesian Neural Network, which is further used for large-scale anomaly detection.
Recently, Bayesian neural networks (BNNs) have garnered increasing attention as a principled framework to provide uncertainty estimation for deep models.
Under this framework, the prediction uncertainty can be decomposed into three types: {\it model uncertainty}, {\it inherent noise}, and {\it model misspecification}. Model uncertainty, also referred to as epistemic uncertainty, captures our ignorance of the model parameters, and can be reduced as more samples being collected. Inherent noise, on the other hand, captures the uncertainty in the data generation process and is irreducible. These two sources have been previously recognized with successful application in computer visions \cite{kendall2017uncertainties}.
The third uncertainty from model misspecification, however, has been long-overlooked. This captures the scenario where the testing samples come from a different population than the training set, which is often the case in time series anomaly detection.
Similar ideas have gained attention in deep learning under the concept of adversarial examples in computer vision \cite{goodfellow2014explaining}, but its implication in prediction uncertainty remains unexplored. Here, we propose a principled solution to incorporate this uncertainty using an encoder-decoder framework. To the best of our knowledge, this is the first time that misspecification uncertainty has been successfully applied to prediction and anomaly detection in a principled way.
In summary, this paper makes the following contributions:
\begin{itemize}
\item Provides a generic and scalable uncertainty estimation implementation for deep prediction models.
\item Quantifies the prediction uncertainty from three sources: (i) model uncertainty, (ii) inherent noise, and (iii) model misspecification. The third uncertainty has been previously overlooked, and we propose a potential solution with an encoder-decoder.
\item Motivates a real-world anomaly detection use-case at Uber
that uses Bayesian Neural Networks with uncertainty estimation to improve performance at scale.
\end{itemize}
The rest of this paper is organized as follows: Section~\ref{sec:related} gives an overview of previous work on time series prediction for both classical and deep learning models, as well as the various approaches for uncertainty estimation in neural networks. The approach of Monte Carlo dropout (MC dropout) is used in this paper due to its simplicity, strong generalization ability, and scalability. In Section~\ref{sec:method}, we present our uncertainty estimation algorithm that accounts for the three different sources of uncertainty. Section~\ref{sec:evaluation} provides detailed experiments to evaluate the model performance on
Uber trip data,
and lays out a successful application to large-scale anomaly detection for millions of metrics at Uber.
Finally, Section~\ref{sec:conclusion} concludes the paper.
\section{Related Works}
\label{sec:related}
\subsection{Time Series Prediction}
Classical time series models, such as those found in the standard $R$ \textit{forecast}\cite{forecast} package are popular methods to provide an univariate base-level forecast. These models usually require manual tuning to set seasonality and other parameters. Furthermore, while there are time series models that can incorporate exogenous variables \cite{wei1994time}, they suffer from the curse of dimensionality and require frequent retraining. To more effectively deal with exogenous variables, a combination of univariate modeling and a machine learning model to handle residuals was introduced in \cite{2015arXiv150702537O}. The resulting two-stage model, however, is hard to tune, requires manual feature extraction and frequent retraining, which is prohibitive to millions of time series.
Relatively recently, time series modeling based on LSTM \cite{Hochreiter:1997:LSM:1246443.1246450} technique gained popularity due to its end-to-end modeling, ease of incorporating exogenous variables, and automatic feature extraction abilities \cite{Assaad:2008:NBA:1297420.1297576}. By providing a large amount of data across numerous dimensions, it has been shown that an LSTM approach can model complex extreme events by allowing nonlinear feature interactions \cite{DBLP:journals/corr/OgunmoluGJG16, laptev:2017:1273496}.
While uncertainty estimation for classical forecasting models has been widely studied \cite{622683}, this is not the case for neural networks. Approaches such as a modified loss function or using a collection of heterogenous networks \cite{gal2017concrete} were proposed, however they require changes to the underlying model architecture. A more detailed review is given in the next section.
In this work, we use a simple and scalable approach for deep model uncertainty estimation that builds on \cite{gal2016dropout}. This framework provides a generic error estimator that runs in production
at Uber-scale
to mitigate against bad decisions (e.g., false anomaly alerts)
resulting from poor forecasts due to high prediction variance.
\subsection{Bayesian Neural Networks}
\label{sec:bnn}
Bayesian Neural Networks (BNNs) introduce uncertainty to deep learning models from a Bayesian perspective. By giving a prior to the network parameters $W$, the network aims to find the {\it posterior distribution} of $W$, instead of a point estimation.
This procedure is usually referred to as posterior inference in traditional Bayesian models. Unfortunately, due to the complicated non-linearity and non-conjugacy in deep models, exact posterior inference is rarely available; in addition, most traditional algorithms for approximate Bayesian inference cannot scale to the large number of parameters in most neural networks.
Recently, several approximate inference methods are proposed for Bayesian Neural Networks. Most approaches are based on variational inference that optimizes the variational lower bound, including stochastic search \cite{paisley2012variational}, variational Bayes \cite{kingma2013auto}, probabilistic backpropagation \cite{hernandez2015probabilistic}, Bayes by BackProp \cite{blundell2015weight} and its extension \cite{fortunato2017bayesian}. Several algorithms further extend the approximation framework to $\alpha$-divergence optimization, including \cite{hernandez2016black, li2017dropout}. We refer the readers to \cite{gal2016uncertainty} for a more detailed and complete review of these methods.
All of the aforementioned algorithms require different training methods for the neural network. Specifically, the loss function must be adjusted to different optimization problems, and the training algorithm has to be modified in a usually non-trivial sense. In practice, however, an out-of-the-box solution is often preferred, without changing the neural network architecture and can be directly applied to the previously trained model. In addition, most existing inference algorithms introduce extra model parameters, sometimes even double, which is difficult to scale given the large amount of parameters used in practice.
This paper is inspired by the Monte Carlo dropout (MC dropout) framework proposed in \cite{gal2016dropout} and \cite{Gal2015Theoretically}, which requires no change of the existing model architecture and provides uncertainty estimation almost for free. Specifically, stochastic dropouts are applied after each hidden layer, and the model output can be approximately viewed as a random sample generated from the posterior predictive distribution \cite{gal2016uncertainty}. As a result, the model uncertainty can be estimated by the sample variance of the model predictions in a few repetitions. Details of this algorithm will be reviewed in the next section.
The MC dropout framework is particularly appealing to practitioners because it is generic, easy to implement, and directly applicable to any existing neural networks.
However, the exploration of its application to real-world problems remains extremely limited. This paper takes an important step forward by successfully adapting this framework to conduct time series prediction and anomaly detection at large scale.
\section{Method}
\label{sec:method}
Given a trained neural network $f^{\hat{W}}(\cdot)$ where $\hat{W}$ represents the fitted parameters, as well as a new sample $x^*$, our goal is to evaluate the uncertainty of the model prediction, $\hat{y}^* = f^{\hat{W}}(x^*)$. Specifically, we would like to quantify the prediction standard error, $\eta$, so that an approximate $\alpha$-level prediction interval can be constructed by
\begin{equation}
[\hat{y}^* - z_{\alpha/2} \eta, ~ \hat{y}^* + z_{\alpha/2} \eta]
\end{equation}
where $z_{\alpha/2}$ is the upper $\alpha/2$ quantile of a standard Normal. This prediction interval is critical for various tasks. For example, in anomaly detection, anomaly alerts will be fired when the observed value falls outside the constructed 95\% interval. As a result, underestimating $\eta$ will lead to high false positive rates.
In the rest of this section, we will present our uncertainty estimation algorithm in Section~\ref{sec:uncertainty}, which accounts for three different sources of prediction uncertainties. This framework can be generalized to any neural network architectures. Then, in Section~\ref{sec:model-design}, we will present our neural network design for predicting time series at Uber.
\subsection{Prediction Uncertainty}
\label{sec:uncertainty}
We denote a neural network as function $f^W(\cdot)$, where $f$ captures the network architecture, and $W$ is the collection of model parameters.
In a Bayesian neural network, a prior is introduced for the weight parameters, and the model aims to fit the optimal posterior distribution. For example, a Gaussian prior is commonly assumed:
\[W \sim N(0, I)\]
We further specify the data generating distribution $p(y \,|\, f^W(x))$. In regression, we often assume
\[ y\,|\, W \sim N(f^W(x), \sigma^2) \]
with some noise level $\sigma$. In classification, the softmax likelihood is often used. For time series prediction, we will focus on the regression setting in this paper.
Given a set of $N$ observations $X=\{x_1, ..., x_N\}$ and $Y=\{y_1, ..., y_N\}$, Bayesian inference aims at finding the posterior distribution over model parameters $p(W \,|\, X, Y)$. With a new data point $x^*$, the prediction distribution is obtained by marginalizing out the posterior distribution:
\[ p(y^* \,|\, x^*) = \int_W p(y^* \,|\, f^W(x^*)) p(W \,|\, X, Y)\, dW \]
In particular, the variance of the prediction distribution quantifies the prediction uncertainty, which can be further decomposed using law of total variance:
\begin{equation}
\begin{split}
\textrm{Var}(y^* \,|\, x^*) & =
\textrm{Var}\left[ \mathbb{E}(y^* \,|\, W, x^*) \right] +
\mathbb{E}\left[\textrm{Var}(y^* \,|\, W, x^*) \right] \\
& = \textrm{Var}(f^W(x^*)) + \sigma^2
\end{split}
\label{eq:var-decompose}
\end{equation}
Immediately, we see that the variance is decomposed into two terms: (i) $\textrm{Var}(f^W(x^*))$, which reflects our ignorance over model parameter $W$, referred to as the {\it model uncertainty}; and (ii) $\sigma^2$ which is the noise level during data generating process, referred to as the {\it inherent noise}.
An underlying assumption for (\ref{eq:var-decompose}) is that $y^*$ is generated by the same procedure. However, this is not always the case in practice. In anomaly detection, in particular, it is expected that certain time series will have unusual patterns, which can be very different from the trained model. Therefore, we propose that a complete measurement of prediction uncertainty should be a combination from three sources: (i) model uncertainty, (ii) model misspecification, and (iii) inherent noise level.
The following sections provide details on how we handle these three terms.
\subsubsection{Model uncertainty}
The key to estimating model uncertainty is the posterior distribution $p(W\,|\, X, Y)$, also referred to as Bayesian inference. This is particularly challenging in neural networks because the non-conjugacy due to nonlinearities. There have been various research efforts on approximate inference in deep learning (see Section~\ref{sec:bnn} for a review). Here, we follow the idea in \cite{gal2016dropout} and \cite{Gal2015Theoretically} to approximate model uncertainty using Monte Carlo dropout (MC dropout).
The algorithm proceeds as follows: given a new input $x^*$, we compute the neural network output with stochastic dropouts at each layer. That is, randomly dropout each hidden unit with certain probability $p$. This stochastic feedforward is repeated $B$ times, and we obtain $\{\hat{y}^*_{(1)}, ..., \hat{y}^*_{(B)}\}$. Then the model uncertainty can be approximated by the sample variance:
\begin{equation}
\widehat{\textrm{Var}}(f^W(x^*)) = \frac{1}{B} \sum_{b=1}^B \left(\hat{y}^*_{(b)} - \overline{\hat{y}}^* \right)^2
\end{equation}
where $\overline{\hat{y}}^* = \frac{1}{B} \sum_{b=1}^B \hat{y}^*_{(b)}$ \cite{gal2016dropout}.
There has been recent work done on choosing the optimal dropout probability $p$ adaptively by treating it as part of the model parameter, but this approach requires modifying the training phase \cite{gal2017concrete}. In practice, we find that the uncertainty estimation is usually robust within a reasonable range of $p$.
\subsubsection{Model misspecification}
Next, we address the problem of capturing potential model misspecification. In particular, we would like to capture the uncertainty when predicting unseen samples with very different patterns from the training data set. We propose to account for this source of uncertainty by introducing an encoder-decoder to the model framework. The idea is to train an encoder that extracts the representative features from a time series, in the sense that a decoder can reconstruct the time series from the encoded space. At test time, the quality of encoding of each sample will provide insight on how close it is to the training set. Another way to think of this approach is that we first fit a latent embedding space for all training time series using an encoder-decoder framework. Then, we measure the distance between test cases and training samples in the embedded space.
The next question is how to incorporate this uncertainty in the variance calculation. Here, we take a principled approach by connecting the encoder, $g(\cdot)$, with a prediction network, $h(\cdot)$, and treat them as one large network $f = h(g(\cdot))$ during inference. \figurename~\ref{fig:model} illustrates such an inference network, and Algorithm~\ref{algo:dropout} presents the MC dropout algorithm. Specifically, given an input time series $x = \{x_1, ..., x_T\}$, the encoder $g(\cdot)$ constructs the learned embedding $e = g(x)$, which is further concatenated with external features, and the final vector is fed to the final prediction network $h$. During this feedforward pass, MC dropout is applied to all layers in both the encoder $g$ and the prediction network $h$. As a result, the random dropout in the encoder perturbs the input intelligently in the embedding space, which accounts for potential model misspecification and gets further propagated through the prediction network. Here, variational dropout for recurrent neural networks \cite{Gal2015Theoretically} is applied to the LSTM layers in the encoder, and regular dropout \cite{gal2016dropout} is applied to the prediction network.
\begin{algorithm}[H]
\begin{algorithmic}[1]
\renewcommand{\textbf{Input:}}{\textbf{Input:}}
\renewcommand{\textbf{Output:}}{\textbf{Output:}}
\REQUIRE data $x^*$, encoder $g(\cdot)$, prediction network $h(\cdot)$, dropout probability $p$, number of iterations $B$
\ENSURE prediction $\hat{y}^*_{mc}$, uncertainty $\eta_1$
\\
\FOR {$b = 1$ to $B$}
\STATE $e^*_{(b)} \leftarrow$ {\it VariationalDropout}$(g(x^*), p)$
\STATE $z^*_{(b)} \leftarrow \textrm{Concatenate}(e^*_{(b)}, \textrm{extFeatures})$
\STATE $\hat{y}^*_{(b)} \leftarrow $ {\it Dropout} $(h(z^*_{(b)}), p)$
\ENDFOR
\textit{// prediction}
\STATE $\hat{y}^*_{mc} \leftarrow \frac{1}{B} \sum_{b=1}^B \hat{y}^*_{(b)}$
\textit{// model uncertainty and misspecification}
\STATE $\eta_1^2 \leftarrow \frac{1}{B} \sum_{b=1}^B (\hat{y}^*_{(b)} - \hat{y}^* )^2$
\RETURN $\hat{y}^*_{mc}, \, \eta_1$
\end{algorithmic}
\caption{MCdropout}
\label{algo:dropout}
\end{algorithm}
\subsubsection{Inherent noise}
Finally, we estimate the inherent noise level $\sigma^2$. In the original MC dropout algorithm \cite{gal2016dropout}, this parameter is implicitly determined by a prior over the smoothness of $W$. As a result, the model could end up with drastically different estimations of the uncertainty level depending on this pre-specified smoothness (see \cite{gal2016uncertainty}, chapter 4). This dependency is undesirable in anomaly detection, because we want the uncertainty estimation to also have robust frequentist coverage, but it is rarely the case that we would know the correct noise level {\it a priori}.
Here, we propose a simple and adaptive approach that estimates the noise level via the residual sum of squares, evaluated on an independent held-out validation set. Specifically, let $f^{\hat{W}}(\cdot)$ be the fitted model on training data, and $X'=\{x'_1, ..., x'_V\}, Y'=\{y'_1, ..., y'_V\}$ be an independent validation set, then we estimate $\sigma^2$ via
\begin{equation}
\hat{\sigma}^2 = \frac{1}{V} \sum_{v=1}^V \left( y'_v - f^{\hat{W}}(x'_v) \right)^2 \,.
\end{equation}
Note that $(X', Y')$ are independent from $f^{\hat{W}}(\cdot)$,
and if we further assume that $f^{\hat{W}}(x'_v)$ is an unbiased estimation of the true model, we have
\begin{equation}
\begin{split}
\mathbb{E}(\hat{\sigma}^2) &=
\sigma^2 + \frac{1}{V} \sum_{v=1}^V \mathbb{E} \left[ f^{\hat{W}}(x'_v) - f^{W}(x'_v) \right]^2 \\
&= \sigma^2 + \textrm{Var}_{\rm TRN}(f^{\hat{W}}(x'_v))
\end{split}
\end{equation}
where $\textrm{Var}_{\rm TRN}$ is w.r.t the training data, which decreases as the training sample size increases, and $\to 0$ as the training sample size $N \to \infty$. Therefore, $\hat{\sigma}^2$ provides an asymptotically unbiased estimation on the inherent noise level. In the finite sample scenario, it always overestimates the noise level and tends to be more conservative.
The final inference algorithm combines inherent noise estimation with MC dropout, and is presented in Algorithm~\ref{algo:inference}.
\begin{algorithm}[H]
\begin{algorithmic}[1]
\renewcommand{\textbf{Input:}}{\textbf{Input:}}
\renewcommand{\textbf{Output:}}{\textbf{Output:}}
\REQUIRE data $x^*$, encoder $g(\cdot)$, prediction network $h(\cdot)$, dropout probability $p$, number of iterations $B$
\ENSURE prediction $\hat{y}^*$, predictive uncertainty $\eta$
\\
\textit{// prediction, model uncertainty and misspecification}
\STATE $\hat{y}^*, \, \eta_1 \leftarrow$ {\it MCdropout} $(x^*, g, h, p, B)$
\textit{// Inherent noise}
\FOR {$x'_v$ {\bf in} validation set $\{x'_1, ..., x'_V\}$}
\STATE $\hat{y'}_v \leftarrow h(g(x'_v))$
\ENDFOR
\STATE $\eta_2^2 \leftarrow \frac{1}{V} \sum_{v=1}^V \left( \hat{y'}_v - y'_v \right)^2$
\textit{// total prediction uncertainty}
\STATE $\eta \leftarrow \sqrt{\eta_1^2 + \eta_2^2}$
\RETURN $\hat{y}^*, \, \eta$
\end{algorithmic}
\caption{Inference}
\label{algo:inference}
\end{algorithm}
\subsection{Model Design}
\label{sec:model-design}
The complete architecture of the neural network is shown in \figurename~\ref{fig:model}. The network contains two major components: (i) an encoder-decoder framework that captures the inherent pattern in the time series, which is learned during pre-training step, and (ii) a prediction network that takes input from both the learned embedding from encoder-decoder, as well as any potential external features to guide the prediction. We discuss the two components in more details below.
\begin{figure}
\caption{Neural network architecture, with a pre-training phase using a LSTM encoder-decoder, followed by a prediction network, with input being the learned embedding concatenated with external features.}
\label{fig:model}
\end{figure}
\subsubsection{Encoder-decoder}
Prior to fitting the prediction model, we first conduct a pre-training step to fit an encoder that can extract useful and representative embeddings from a time series. The goals are to ensure that (i) the learned embedding provides useful features for prediction and (ii) unusual input can be captured in the embedded space, which will get further propagated to the prediction network in the next step.
Here, we use an encoder-decoder framework with two-layer LSTM cells.
Specifically, given a univariate time series $\{x_t\}_t$, the encoder reads in the first $T$ timestamps $\{x_1, ..., x_T\}$, and constructs a fixed-dimensional embedding state. After then, from this embedding state, the decoder constructs the following $F$ timestamps $\{x_{T+1}, ..., x_{T+F}\}$ with guidance from $\{x_{T-F+1}, ..., x_T\}$ (\figurename~\ref{fig:model}, bottom panel).
The intuition is that in order to construct the next few timestamps, the embedding state must extract representative and meaningful features from the input time series.
This design is inspired from the success of video representation learning using a similar architecture \cite{srivastava2015unsupervised}.
\subsubsection{Prediction network}
After the encoder-decoder is pre-trained, it is treated as an intelligent feature-extraction blackbox. Specifically, the last LSTM cell states of the encoder are extracted as learned embedding. Then, a prediction network is trained to forecast the next one or more timestamps using the learned embedding as features. In the scenario where external features are available, these can be concatenated to the embedding vector and passed together to the final prediction network.
Here, we use a multi-layer perceptron as the prediction network. We will show in Section~\ref{sec:trip} that the learned embedding from the encoder successfully captures interesting patterns from the input time series. In addition, including external features significantly improves the prediction accuracy during holidays and special events (see Section~\ref{sec:evaluation})
\subsubsection{Inference}
After the full model is trained, the inference stage involves only the encoder and the prediction network (\figurename~\ref{fig:model}, left panel). The complete inference algorithm is presented in Algorithm~\ref{algo:inference}, where the prediction uncertainty, $\eta$, contains two terms: (i) the model and misspecification uncertainty, estimated by applying MC dropout to both the encoder and the prediction network, as presented in Algorithm~\ref{algo:dropout}; and (ii) the inherent noise level, estimated by the residuals on a held-out validation set. Finally, an approximate $\alpha$-level prediction interval is constructed by $[\hat{y}^* - z_{\alpha/2} \eta, ~ \hat{y}^* + z_{\alpha/2} \eta]$, where $z_{\alpha/2}$ is the upper $\alpha/2$ quantile of a standard Normal.
Two hyper-parameters need to be specified in Algorithm~\ref{algo:inference}: the dropout probability, $p$, and the number of iterations, $B$. As for the dropout probability, we find in our experiments that the uncertainty estimation is relatively stable across a range of $p$, and we choose the one that achieves the best performance on the validation set. As for the number of iterations, the standard error of the estimated prediction uncertainty is proportional to $1/\sqrt{B}$. We measure the standard error across different repetitions, and find that a few hundreds of iterations are usually suffice to achieve a stable estimation.
\section{Evaluation}
\label{sec:evaluation}
This section contains two sets of results. We first evaluate the model performance on a moderately sized data set of daily trips
processed by the Uber platform.
We will evaluate the prediction accuracy and the quality of uncertain estimation during both holidays and non-holidays. We will also present how the encoder recognizes the day of the week pattern in the embedding space. Next, we will illustrate the application of this model to real-time large-scale anomaly detection for millions of metrics at Uber.
\subsection{Results on
Uber
Trip Data}
\label{sec:trip}
\subsubsection{Experimental settings}
In this section, we illustrate the model performance using the daily completed trips over four years across eight representative large cities in U.S. and Canada, including Atlanta, Boston, Chicago, Los Angeles, New York City, San Francisco, Toronto, and Washington D.C. We use three years of data as the training set, the following four months as the validation set, and the final eight months as the testing set. The encoder-decoder is constructed with two-layer LSTM cells, with 128 and 32 hidden states, respectively. The prediction network has three fully connected layers with {\it tanh} activation, with 128, 64, and 16 hidden units, respectively.
Samples are constructed using a sliding window with step size one, where each sliding window contains the previous 28 days as input, and aims to forecast the upcoming day. The raw data are log-transformed to alleviate exponential effects. Next, within each sliding window, the first day is subtracted from all values, so that trends are removed and the neural network is trained for the incremental value. At test time, it is straightforward to revert these transformations to obtain predictions at the original scale.
\subsubsection{Prediction performance}
We compare the prediction accuracy among four different models:
\begin{enumerate}
\item {\bf Last-Day}: A naive model that uses the last day's completed trips as the prediction for the next day.
\item {\bf QRF}: Based on the naive last-day prediction, a quantile random forest (QRF) is further trained to estimate the holiday lifts, i.e., the ratio to adjust the forecast during holidays. The final prediction is calculated from the last-day forecast multiplied by the estimated ratio.
\item {\bf LSTM}: A vanilla LSTM model with similar size as our model. Specifically, a two-layer sacked LSTM is constructed, with 128 and 32 hidden states, respectively, followed by a fully connected layer for the final output. This neural network also takes 28 days as input, and predicts the next day.
\item {\bf Our Model}: Our model that combines an encoder-decoder and a prediction network, as described in \figurename~\ref{fig:model}.
\end{enumerate}
Table~\ref{tab:prediction} reports the Symmetric Mean Absolute Percentage Error (SMAPE) of the four models, evaluated on the testing set. We see that using a QRF to adjust for holiday lifts is only slightly better than the naive prediction. On the other hand, a vanilla LSTM neural network provides an average of 26\% improvement across the eight cities. As we further incorporate the encoder-decoder framework and introduce external features for holidays to the prediction network (\figurename~\ref{fig:model}), our proposed model achieves another 36\% improvement in prediction accuracy. Note that when using LSTM and our model, only one generic model is trained, where the neural network is not tuned for any city-specific patterns; nevertheless, we still observe significant improvement on SMAPE across all cities when compared to traditional approaches.
\begin{table}[!t]
\renewcommand{1.3}{1.3}
\caption{SMAPE of Four Different Prediction Models, Evaluated on the Test Data.}
\label{tab:prediction}
\centering
\begin{tabular}{| c | c | c | c | c |}
\hline
{\bf City} & {\bf Last-Day} & {\bf QRF} & {\bf LSTM} & {\bf Our Model}\\
\hline
Atlanta & 15.9 & 13.2 & 11.0 & 7.3 \\
\hline
Boston & 13.6 & 15.4 & 10.0 & 8.2 \\
\hline
Chicago & 16.0 & 12.7 & 9.5 & 6.1 \\
\hline
Los Angeles & 12.3 & 10.9 & 8.5 & 4.7 \\
\hline
New York City & 11.5 & 10.9 & 8.7 & 6.1 \\
\hline
San Francisco & 10.7 & 11.8 & 7.3 & 4.5 \\
\hline
Toronto & 15.2 & 11.7 & 10.0 & 5.3 \\
\hline
Washington D.C. & 13.0 & 13.3 & 8.2 & 5.2 \\
\hline
{\bf Average} & {\bf 13.5} & {\bf 12.5} & {\bf 9.2} & {\bf 5.9} \\
\hline
\end{tabular}
\end{table}
Finally, \figurename~\ref{fig:sf-prediction} visualizes the true values and our predictions during the testing period in San Francisco as an example. We observe that accurate predictions are achieved not only in regular days, but also during holiday seasons.
\begin{figure}
\caption{Daily completed trips in San Francisco during eight months of the testing set. True values are shown with the orange solid line, and predictions are shown with the blue dashed line, where the 95\% prediction band is shown as the grey area. Exact values are anonymized. }
\label{fig:sf-prediction}
\end{figure}
\subsubsection{Uncertainty estimation}
Next, we evaluate the quality of our uncertainty estimation by calibrating the empirical coverage of the prediction intervals. Here, the dropout probability is set to be 5\% at each layer, and Table~\ref{tab:coverage} reports the empirical coverage of the 95\% predictive intervals under three different scenarios:
\begin{enumerate}
\item {\bf PredNet}: Use only model uncertainty estimated from MC dropout in the prediction network, with no dropout layers in the encoder.
\item {\bf Enc+Pred}: Use MC dropout in both the encoder and the prediction network, but without the inherent noise level. This is the term $\eta_1$ in Algorithm~\ref{algo:inference}.
\item {\bf Enc+Pred+Noise}: Use the full prediction uncertainty $\eta$ as presented in Algorithm~\ref{algo:inference}, including $\eta_1$ as in 2), as well as the inherent noise level $\eta_2$.
\end{enumerate}
\begin{table}[!t]
\renewcommand{1.3}{1.3}
\caption{Empirical Coverage of 95\% Predictive Intervals, Evaluated on the Test Data.
}
\label{tab:coverage}
\centering
\begin{tabular}{| c | c | c | c |}
\hline
{\bf City} & {\bf PredNet} & {\bf Enc+Pred} & {\bf Enc+Pred+Noise}\\
\hline
Atlanta & 78.33\% & 91.25\% & 94.30\% \\
\hline
Boston & 85.93\% & 95.82\% & 99.24\% \\
\hline
Chicago & 71.86\% & 80.23\% & 90.49\% \\
\hline
Los Angeles & 76.43\% & 92.40\% & 94.30\% \\
\hline
New York City & 76.43\% & 85.55\% & 95.44\% \\
\hline
San Francisco & 78.33\% & 95.06\% & 96.20\% \\
\hline
Toronto & 80.23\% & 90.87\% & 94.68\% \\
\hline
Washington D.C. & 78.33\% & 93.54\% & 96.96\% \\
\hline
{\bf Average} & {\bf 78.23\%} & {\bf 90.59\%} & {\bf 95.20\%} \\
\hline
\end{tabular}
\end{table}
By comparing {\tt PredNet} with {\tt Enc+Pred}, it is clear that introducing MC dropout to the encoder network is critical, which significantly improves the empirical coverage from 78\% to 90\% by capturing potential model misspecification. In addition, by further accounting for the inherent noise level, the empirical coverage of the final uncertainty estimation, {\tt Enc+Pred+Noise}, nicely centers around 95\% as desired.
One important use-case of the uncertainty estimation is to provide insight for unusual patterns in the time series. \figurename~\ref{fig:barplot-holiday} shows the estimated predictive uncertainty on six U.S. holidays in the testing data. We see that New Year's Eve has significantly higher uncertainty than all other holidays. This pattern is consistent with our previous experience, where New Year's Eve is usually the most difficult day to predict.
\begin{figure}
\caption{Estimated prediction standard deviations on six U.S. holidays during testing period for eight cities. Exact values are anonymized. }
\label{fig:barplot-holiday}
\end{figure}
\subsubsection{Embedding features}
As illustrated previously, the encoder is critical for both improving prediction accuracy, as well as for estimating prediction uncertainty. One natural follow-up question is whether we can interpret the embedding features extracted by the encoder. This can also provide valuable insights for model selection and anomaly detection. Here, we visualize our training data, each being a 28-day time series segment, in the embedding space. We use the last LSTM cell in the encoder, and project its cell states to 2D for visualization using PCA (\figurename~\ref{fig:embedding}). The strongest pattern we observe is day of the week, where weekdays and weekends form different clusters, with Fridays usually sitting in between. We do not observe city-level clusters, which is probably due to the fact all cities in this data set are large cities in North America, where riders and drivers tend to have similar behaviors.
\begin{figure}
\caption{Training set of time series, visualized in the embedding space. Each point represents a 28-day segment, colored by the day of the week of the last day. We evaluate the cell states of the two LSTM layers, where the first layer with dimension 128 is plotted on the left, and second layer with dimension 32 is plotted on the right. PCA is used to project into 2D space for visualization.}
\label{fig:embedding}
\end{figure}
\subsection{Application to Anomaly Detection
at Uber
}
At Uber,
we track millions of metrics each day to monitor the status of various services across the company. One important application of uncertainty estimation is to provide real-time anomaly detection and deploy alerts for potential outages and unusual behaviors. A natural approach is to trigger an alarm when the observed value falls outside of the 95\% predictive interval. There are two main challenges we need to address in this application:
\begin{itemize}
\item Scalability: In order to provide real-time anomaly detection at the current scale, each predictive interval must be calculated within a few milliseconds during inference stage.
\item Performance: With highly imbalanced data, we aim to reduce the false positive rate as much as possible to avoid unnecessary on-call duties, while making sure the false negative rate is properly controlled so that real outages will be captured.
\end{itemize}
\subsubsection{Scalability}
Our model inference is implemented in Go. Our implementation involves efficient matrix manipulation operations, as well as stochastic dropout by randomly setting hidden units to zero with pre-specified probability. A few hundred stochastic passes are executed to calculate the prediction uncertainty, which is updated every few minutes for each metric. We find that the uncertainty estimation step adds only a small amount of computation overhead and can be conducted within ten milliseconds per metric.
\subsubsection{Performance}
Here, we illustrate the precision and recall of this framework on an example data set containing 100 metrics with manual annotation available, where 17 of them are true anomalies. Note that the neural network was previously trained on a separate and much larger data set. By adding MC dropout layers in the neural network, the estimated predictive intervals achieved 100\% recall rate and a 80.95\% precision rate. \figurename~\ref{fig:anomaly} visualizes the neural network predictive intervals on four representative metrics, where alerts are correctly fired for two of them. When applying this framework to all metrics, we observe a 4\% improvement in precision compared to the previous ad-hoc solution, which is substantial at Uber's scale.
\begin{figure}
\caption{Four example metrics during a 12-hour span, and anomaly detection is performed for the following 30 minutes. All metrics are evaluated by minutes. The neural network constructs predictive intervals for the following 30 minutes, visualized by the shaded area in each plot.
{\bf (a)}
\label{fig:normal-1}
\label{fig:normal-2}
\label{fig:anomaly-1}
\label{fig:anomaly-2}
\label{fig:anomaly}
\end{figure}
\section{Conclusion}
\label{sec:conclusion}
We have presented an end-to-end neural network architecture for uncertainty estimation used
at Uber.
Using the MC dropout technique and model misspecification distribution, we showed a simple way to provide uncertainty estimation for a neural network forecast at scale while providing a 95\% uncertainty coverage. A critical feature about our framework is its applicability to any neural network without modifying the underlying architecture.
We have used the proposed uncertainty estimate to measure special event (e.g., holiday) uncertainty and to improve anomaly detection accuracy. For special event uncertainty estimation, we found New Year's Eve to be the most uncertain time. Using the uncertainty information, we adjusted the confidence bands of an internal anomaly detection model to improve precision during high uncertainty events, resulting in a 4\% accuracy improvement, which is large given the number of metrics we track
at Uber.
Our future work will be focused on utilizing the uncertainty information for neural network debugging during high error periods.
\end{document} |
\begin{document}
\title{\bf On a generalization of Kelly's combinatorial lemma}
\author{Aymen BEN AMIRA $^1$, Jamel DAMMAK $^1$, Hamza SI KADDOUR $^{2, {\ast}}$\\
\centerline{$^{1}$ {\scriptstylemall Department of Mathematics, Faculty of Sciences of Sfax, B.P. 802, 3018 Sfax, Tunisia}}\\
\centerline{$^{2}$ {\scriptstylemall ICJ, Department of Mathematics, University of Lyon, University Claude-Bernard Lyon1,}}\\
\centerline{\scriptstylemall 43 Bd du 11 Novembre 1918, 69622 Villeurbanne Cedex, France}}
\maketitle
\footnotetext{$^{\ast}$ Correspondence: [email protected]}
\footnotetext{{2000 Mathematical Subject Classification:} 05C50, 05C60.}
\noindent {\bf Abstract:}
Kelly's combinatorial lemma is a basic tool in the study of Ulam's reconstruction conjecture. A generalization in terms of a family of $t$-elements subsets of a $v$-element set was given by Pouzet. We consider a version of this generalization modulo a prime $p$.
We give illustrations to graphs and tournaments.\\
\noindent {\bf Key words:} {Set, matrix, graph, tournament, isomorphism}
\scriptstyleection{Introduction} \label{section def}
Kelly's combinatorial lemma is the assertion that the number $s(F,G)$ of induced subgraphs of a given graph $G$, isomorphic to $F$, is determined by the deck of $G$, provided that $\vert V(F)\vert < \vert V(G)\vert$, namely $s(F,G) = \frac{1}{\vert V(G)\vert - \vert V(F)\vert} \scriptstyleum_{x\in V(G)} s(F,G_{-x})$ (where $G_{-x}$ is the graph induced by $G$ on $ V(G)\scriptstyleetminus \{x\}$).\\
In terms of a family $\mathcal F$ of $t$-elements subsets of a $v$-element set, it simply says that
$\vert \mathcal F \vert = \frac{1}{v-t} \scriptstyleum_{x\in V(G)} \vert \mathcal F_{-x} \vert$ where
$ \mathcal F_{-x}:= \mathcal F \cap [E\scriptstyleetminus \{x\}]^t$. \\
Pouzet \cite{Pm,Pm2} gave the following extension of this result.
\begin{lemma} (M. Pouzet \cite{Pm}) \label{lem po}
Let $t$ and $r$ be integers, $V$ be a set of size $v\geq t+r$ elements,
$U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$. If for every subset
$K$ of $k=t+r$ elements of $V$, the number of elements of $U$ which are contained
in $K$ is equal to the number of elements of $U'$ which are contained in $K$,
then for every finite subsets $T'$ and $K'$ of $V$, such that $T'$ is
contained in $K'$ and $K'\scriptstyleetminus T'$ has at least $t+r$ elements, the
number of elements of $U$ which contain $T'$ and are contained in $K'$ is equal
to the number of elements of $U'$ which contain $T'$ and are contained in $K'$.
\end{lemma}
In particular if $\vert V \vert \geq 2t+r=t+k$, we have this particular version of the combinatorial lemma of Pouzet :
\begin{lemma} (M. Pouzet \cite{Pm}) \label{particular mp}
Let $v,t$ and $k$ be integers, $V$ be a set of $v$ elements with $t\leq min{(k,v-k)}$,
$U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$. If for every subset
$K$ of $k$ elements of $V$, the number of elements of $U$ which are contained
in $K$ is equal to the number of elements of $U'$ which are contained in $K$,
then $U=U'$.
\end{lemma}
We denote by $n(U,K)$ the number of elements of $U$ which are contained
in $K$, thus Lemma \ref{particular mp} says that if $n(U,K)=n(U',K)$ for every subset
$K$ of $k$ elements of $V$ then $U=U'$.
Here we consider the case where $n(U,K)\equiv n(U',K)$ modulo a prime $p$ for every subset
$K$ of $k$ elements of $V$; our main result, Theorem \ref{thm js}, is then a version, modulo a prime $p$, of the particular version of the combinatorial lemma of Pouzet.\\
Kelly's combinatorial lemma is a basic tool in the study of Ulam's reconstruction conjecture.
Pouzet's combinatorial lemma has been used several times in reconstruction problems (see for example \cite{ ABB, B, BD, BL, D1, D2}). Pouzet gave a proof of his lemma via a counting argument \cite{Pm2} and latter by using linear algebra (related to incidence matrices) \cite{Pm} (the paper was published earlier).
Let $n,p$ be positive integers, the decomposition of $n=\scriptstyleum_{i=0}^{n(p)} n_i p^i$ in the basis $p$ is also denoted $[n_0,n_1,\dots ,n_{n(p)}]_p$ where $n_{n(p)}\neq 0$ if and only if $n\neq 0$.
\begin{theorem} \label{thm js}
Let $p$ be a prime number. Let $v,t$ and $k$ be non-negative integers,
$k=[k_0,k_1,\dots , k_{k(p)}]_p$, $t=[t_0,t_1,\dots , t_{t(p)}]_p$. Let
$V$ be a set of $v$ elements with $t\leq min{(k,v-k)}$,
$U$ and $U'$ be sets of subsets $T$ of $t$ elements of $V$.
We assume that for every subset
$K$ of $k$ elements of $V$, the number of elements of $U$ which are contained
in $K$ is equal (mod $p$) to the number of elements of $U'$ which are contained in $K$.\\
1) If $k_i=t_i$ for all $i<t(p)$ and $k_{t(p)}\geq t_{t(p)}$, then $U=U'$.\\
2) If $t=t_{t(p)}p^{t(p)}$ and $k=\scriptstyleum_{i={t(p)}+1}^{k(p)} k_{i}p^i$, we have $U=U'$, or one of the sets $U,U'$ is the set of all $t$ element-subsets of $V$ and the other is empty, or (whenever $p=2$) for all $t$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$.
\end{theorem}
Our proof of Theorem \ref{thm js} is an application of properties of incidence matrices due to D.H. Gottlieb \cite{Go}, W. Kantor \cite{KA} and R.M. Wilson \cite{W}, we use Wilson's Theorem (Theorem \ref{thm Wilson}). \\
In a reconstruction problem of graphs up to complementation \cite{dlps1}, Wilson's Theorem yielded the following result:
\begin{theorem} (\cite{dlps1})\label{k=0[4],p=2}
Let $k$ be an integer, $2\leq k\leq v-2$, $k\equiv 0$ (mod $4$). Let $G$ and $G'$ be two graphs on the same set $V$ of $v$
vertices (possibly infinite). We assume that $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$. Then $G'=G$ or $G'=\overline {G}$.
\end{theorem}
Here we look for similar results whenever $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ modulo a prime $p$. As an illustration of Theorem \ref{thm js}, we obtain the following result.
\begin{theorem}\label{k=2[4]}
Let $p$ be a prime number and $k$ be an integer, $2\leq k\leq v-2$. Let $G$ and $G'$ be two graphs on the same set $V$ of $v$
vertices (possibly infinite). We assume that for all k-element subsets $K$ of $V$, $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ (mod $p$).\\
1) If $p\geq3$, $k\not\equiv 0,1 \ (mod \ p)$, then $G'=G$.\\
2) If $p\geq3$, $k\equiv 0$ (mod $p$),
then $G'=G$, or one of the graphs $G,G'$ is the complete graph and the other is the empty graph.\\
3) If $p=2$, $k\equiv 2$ (mod $4$), then $G'=G$.
\end{theorem}
We give another illustrations of Theorem \ref{thm js}, to graphs in section \ref{section graphs}, and to tournaments
in section \ref{section tournaments}.
\scriptstyleection{Incidence matrices}
We consider the matrix $W_{t\;k}$ defined as follows :
Let $V$ be a finite set, with $v$ elements. Given non-negative integers $t,k$, let $W_{t\;k}$ be the ${v \choose t}$ by ${v \choose k}$ matrix of $0$'s and $1$'s, the rows of which are indexed by the $t$-element subsets
$T$ of $V$, the columns are indexed by the $k$-element subsets $K$
of $V$, and where the entry $W_{t\;k}(T,K)$ is $1$ if
$T\scriptstyleubseteq K$ and is $0$ otherwise. The matrix transpose of $W_{t\;k}$ is denoted $^tW_{t\;k}$.\\
We say that a matrix $D$ is a {\it {diagonal form}} for a matrix $M$ when $D$ is diagonal and there exist unimodular matrices (square integral matrices which have integral inverses) $E$ and $F$ such that $D=EMF$. We do not require that $M$ and $D$ are square; here "diagonal" just means that the $(i,j)$ entry of $D$ is $0$ if $i\neq j$.
A fundamental result, due to R.M.Wilson \cite{W}, is the following.
\begin{theorem} (R.M. Wilson \cite{W}) \label{thm Wilson+} For $t\leq min{(k,v-k)}$, $W_{t\; k}$ has as a diagonal form the ${v \choose t}\times {v \choose k}$ diagonal matrix with diagonal entries
$$ {k-i \choose t-i}\ \mbox{with multiplicity}\ {v \choose i} - {v \choose i-1}, \ \ \ i=0,1,\dots ,t.$$
\end{theorem}
Clearly from Theorem \ref{thm Wilson+}, $rank\ W_{t\; k}$ over the field $\mathbb{Q}$ is ${v \choose t}$, that is
Theorem \ref{gottlieb-kantor} due to Gottlieb \cite{Go}. On the other hand, from Theorem \ref{thm Wilson+}, follows $rank\ W_{t\; k}$ over the field $\mathbb{Z}/p\mathbb{Z}$, as given by Theorem \ref{thm Wilson}.
\begin{theorem} (R.M. Wilson \cite{W}) \label{thm Wilson} For $t\leq min{(k,v-k)}$, the rank of $W_{t\; k}$ modulo a prime $p$ is
$$ \scriptstyleum {v \choose i}- {v\choose
i - 1}
$$
where the sum is extended over those indices $i$, $0\leq i\leq t$, such that $p$
does not divide the binomial coefficient ${k-i \choose
t-i}$.
\end{theorem}
In the statement of the theorem, ${v \choose -1}$
should be interpreted as zero.\\
A fundamental result, due to D.H. Gottlieb \cite{Go}, and independently W. Kantor \cite {KA}, is this:
\begin{theorem} (D.H. Gottlieb \cite{Go}, W. Kantor \cite {KA}) \label{gottlieb-kantor} For $t\leq min{(k,v-k)}$, $W_{t\; k}$ has full row rank over the field $\mathbb{Q}$ of rational numbers.
\end{theorem}
It is clear that $t\leq min{(k,v-k)}$ implies ${v \choose t}\leq {v \choose k}$ then, from Theorem \ref{gottlieb-kantor}, we have the following result :
\begin{corollary}\label{rk-gottlieb-kantor} For $t\leq min{(k,v-k)}$, the rank of
$W_{t\; k}$ over the field $\mathbb{Q}$ of rational numbers is ${v \choose t}$ and thus $Ker(^tW_{t\; k})=\{0\}$.
\end{corollary}
If $k:=v-t$ then, up to a relabelling, $W_{t\; k}$ is the adjacency matrix $A_{t,v}$
of the {\it Kneser graph} $KG(t,v)$ \cite{GoRo}, graph whose vertices are the $t$-element
subsets of $V$, two subsets forming an edge if they are disjoint.
The eigenvalues of Kneser graphs are computed in \cite{GoRo} (Theorem 9.4.3), and thus an equivalent form of Theorem \ref{gottlieb-kantor} is:
\begin{theorem} \label{Ka} $A_{t, v}$ is non-singular for $t\leq \frac{v}{2}$.
\end{theorem}
We characterize values of $t$ and $k$ so that $dim \ Ker(^tW_{t\; k})\in \{0,1\}$
and give a basis of $Ker(^tW_{t\; k})$, that appears in the following result.
\begin{theorem} \label{thm js2}
Let $p$ be a prime number. Let $v,t$ and $k$ be non-negative integers,
$k=[k_0,k_1,\dots , k_{k(p)}]_p$, $t=[t_0,t_1,\dots , t_{t(p)}]_p$, $t\leq min{(k,v-k)}$.
We have:\\
1) $k_j=t_j$ for all $j<t(p)$ and $k_{t(p)}\geq t_{t(p)}$ if and only if $Ker(^tW_{t\; k})=\{0\}$ \mbox{(mod $p$)}.\\
2) $t=t_{t(p)}p^{t(p)}$ and $k=\scriptstyleum_{i={t(p)}+1}^{k(p)}k_ip^i$ if and only if
$\dim Ker (^tW_{t\; k})= 1$\ \mbox{(mod $p$)} and $\{(1,1,\cdots ,1)\}$ is a basis of $Ker (^tW_{t\; k})$.
\end{theorem}
The proof of Theorem \ref{thm js2} uses Lucas's Theorem.
The notation $a\mid b$ (resp. $a\nmid b$) means $a$ divide $b$ (resp. $a$ not divide $b$).
\begin{theorem} (Lucas's Theorem \cite{Lucas}) \label{lucas}
Let $p$ be a prime number, $t,k$ be positive integers,
$t\leq k$, $t=[t_0,t_1,\dots ,t_{t(p)}]_p$ and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$. Then
$${k \choose t} = \prod_{i=0}^{t(p)} {k_i \choose t_i} \ (mod \ p),\ \mbox{where} \
{k_i \choose t_i} =0\ \mbox{if} \ t_i>k_i.$$
\end{theorem}
As a consequence of Theorem \ref{lucas}, we have the following result which is very useful in this paper.
\begin{corollary}\label{cor-lucas}
Let $p$ be a prime number, $t,k$ be positive integers,
$t\leq k$, $t=[t_0,t_1,\dots ,t_{t(p)}]_p$ and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$. Then
\begin{center}
$p \vert {k \choose t}$ if and only if there is $i\in \{0,1,\dots , t(p)\}$ such that $t_i>k_i$.
\end{center}
\end{corollary}
\noindent{\bf{Proof of Theorem \ref{thm js2}.}} 1) We begin by the direct implication.
We will prove $p \nmid {{k-i} \choose {t-i}}$ for all $i=[i_0,i_1,\dots , i_{t(p)}]\in \{0,\dots ,t\}$ with
$i_{t(p)}\leq t_{t(p)}$.
Since $k_j=t_j$ for all $j< t(p)$, then $(t-i)_j=(k-i)_j$ for all $j < t(p)$.
As $k_{t(p)} \geq t_{t(p)}\geq i_{t(p)}$ then $(k-i)_{t(p)} \geq (t-i)_{t(p)}$, thus, by Corollary \ref{cor-lucas}, $p \nmid {{k-i} \choose {t-i}}$ for all $i\in \{0,1, \dots , t\}$.
Now from Theorem \ref{thm Wilson},
$rank \ W_{tk} = \scriptstyleum_{i=0}^{t} {v \choose i} - {v \choose {i-1}}=
{v \choose t}$. Then the kernel of $^tW_{t\; k}\ \mbox{(mod $p$)} \ \mbox{is}\ \{0\}$.\\
Now we prove the converse implication. From Theorem \ref{thm Wilson+}, $Ker(^tW_{t\; k})=\{0\}$ implies $p \nmid {k-i \choose t-i}$ for all $i\in \{0,1, \dots , t\}$, in particular
$p \nmid {k \choose t}$. Then by Corollary \ref{cor-lucas},
$k_j\geq t_j$ for all $j \leq t(p)$.
We will prove that $k_j = t_j$ for all $j \leq t(p)-1$.
By contradiction, let $s$ be the least integer in $\{0,1, \dots , t(p)-1\}$, such that $k_s>t_s$.
We have
$(t-(t_s+1)p^s)_s = p-1$, $(k-(t_s+1)p^s)_s = k_s-t_s-1$ and $p-1>k_s-t_s-1$. From Corollary \ref{cor-lucas},
$p \mid {{k-(t_s+1)p^s} \choose {t-(t_s+1)p^s}}$, that is impossible.\\
2) Set $n:=t(p)$. We begin by the direct implication. Since $0=k_n<t_n$ then, by Corollary \ref{cor-lucas}, $p \vert {{k} \choose {t}}$.
We will prove $p \nmid {{k-i} \choose {t-i}}$ for all $i=[i_0,i_1,\dots , i_{n}]\in \{1,2,\dots ,t\}$. \\
Since $k_j=t_j=0$ for all $j<n$, then $(t-i)_j=(k-i)_j$ for all $j < n$.
From $t_n\geq i_n$, we have $(t-i)_n\in \{t_n- i_n,t_n- i_n-1\}$.
Note that $(k-i)_n\in\{p- i_n-1,p- i_n\}$ and $p-i_n-1\geq t_n-i_n$; thus $(k-i)_n \geq (t-i)_n$. So for all $j\leq n$,
$(k-i)_j \geq (t-i)_j$. Then, by Corollary \ref{cor-lucas}, $p \nmid {{k-i} \choose {t-i}}$ for all $i\in \{1,2,\dots ,t\}$. Now from Theorem \ref{thm Wilson},
$rank \ W_{tk} = \scriptstyleum_{i=1}^{t} {v \choose i} - {v \choose {i-1}}=
{v \choose t}-1$, and thus $\dim Ker (^tW_{t\; k})= 1$. Now
$(1,1,\cdots ,1)W_{t\; k}=({k \choose t},{k \choose t},\cdots ,{k \choose t})$.\\
Since $p \mid {k \choose t}$, then
$(1,1,\cdots ,1)W_{t\; k}\equiv0$ (mod $p$).
Then $\{(1,1,\cdots ,1)\}$ is a basis of the kernel of $^tW_{t\; k}$ (mod $p$).\\
Now we prove the converse implication. Since $\{(1,1,\cdots ,1)\}$ is a basis of the kernel of $^tW_{t\; k}$ (mod $p$) and
$(1,1,\cdots ,1)W_{t\; k}=({k \choose t},{k \choose t},\cdots ,{k \choose t})$, then $p \mid {k \choose t}$. Since
$dim \ Ker(^tW_{t\; k})=1$, then from Theorem \ref{thm Wilson},
$p \nmid {k-i \choose t-i}$ for all $i\in \{1,2,\dots ,t\}$.\\
First, let us prove that $t=t_np^n$. Note that $t_n\neq 0$ since $t\neq 0$.
Since $p \vert {k \choose t}$ then, from Corollary \ref{cor-lucas}, there is an integer $j\in \{0,1,\dots ,n\}$ such that
$t_j > k_j$. Let $A:=\{ j<n\ : \ t_j\neq 0\}$. By contradiction,
assume $A\neq \emptyset$. \\
Case 1. There is $j\in A$ such that $t_j > k_j$. We have $(t-p^n)_j = t_j$, $ (k-p^n)_j=k_j$.
Then from Corollary \ref{cor-lucas}, we have $p \mid {{k-p^n} \choose {t-p^n}}$, that is impossible.\\
Case 2. For all $j\in A$, $t_j \leq k_j$. Then $t_n > k_n$.
We have $(t-p^j)_n = t_n$, $ (k-p^j)_n=k_n$.
Then, from Corollary \ref{cor-lucas}, we have $p \mid {{k-p^j} \choose {t-p^j}}$, that is impossible.\\
From the above two cases, we deduce $t=t_np^n$.\\
Secondly, since $p \vert {{k} \choose {t}}$, then by Corollary \ref{cor-lucas}, $t_n>k_n$.
Let us show that $k_n=0$.
By contradiction, if $k_n\neq 0$ then
$(t-p^n)_n=t_{n}-1> k_n-1=(k-p^n)_n$. From Corollary \ref{cor-lucas},
$p \mid {{k-p^n} \choose {t-p^n}}$, that is impossible.
Let $s\in \{0,1,\dots ,n-1\}$, let us show that $k_s=0$.
By contradiction, if $k_s\neq 0$ then, $(t-p^s)_s =p-1$, $(k-p^s)_s = k_s-1$, thus $(t-p^s)_s > (k-p^s)_s$
so, from Corollary \ref{cor-lucas},
$p \mid {{k-p^s} \choose {t-p^s}}$, that is impossible.\endproof
\scriptstyleection{Proof of Theorem \ref{thm js}.}
Let $T_1,T_2, \cdots ,T_{{v \choose t}}$ be an enumeration of the $t$-element subsets of $V$, let $K_1,K_2, \cdots ,K_{{v \choose k}}$ be an enumeration of the $k$-element
subsets of $V$ and $W_{t\; k}$ be the matrix of the $t$-element subsets versus the $k$-element subsets.
Let $w_U$ be the row matrix $(u_1,u_2, \cdots , u_{v \choose t})$ where $u_i=1$ if $T_i\in U$, $0$ otherwise. We have
$$w_UW_{t\; k}=(\vert \{T_i\in U : T_i \scriptstyleubseteq K_1\}\vert , \cdots ,\vert \{ T_i\in U : T_i \scriptstyleubseteq K_{{v \choose k}} \}\vert).$$
$$w_{U'}W_{t\; k}=(\vert \{T_i\in U' : T_i \scriptstyleubseteq K_1\}\vert , \cdots ,\vert \{ T_i\in U' : T_i \scriptstyleubseteq K_{{v \choose k}} \}\vert).$$
Since for all $j\in \{1,\dots ,{v \choose k}\}$, the number of elements of $U$ which are contained in $K_j$ is equal (mod $p$) to the number of elements of $U'$ which are contained in $K_j$, then $(w_U-w_{U'})W_{t\; k}=0$ \ \mbox{(mod $p$)}, so
$w_U-w_{U'}\in Ker (^tW_{t\; k})$.\\
1) Assume $k_i=t_i$ for all $i<t(p)$ and $k_{t(p)}\geq t_{t(p)}$. From 1) of Theorem \ref{thm js2}, $w_U-w_{U'}=0$, that gives $U=U'$.\\
2) Assume $t=t_{t(p)}p^{t(p)}$ and $k=\scriptstyleum_{i={t(p)}+1}^{k(p)}k_ip^i$. From 2) of Theorem \ref{thm js2}, there is an integer $\lambda \in [0,p-1]$ such that $w_U-w_{U'}=\lambda (1,1,\cdots ,1)$. It is clear that $\lambda \in \{0,1,-1\}$.
If $\lambda =0$ then $U=U'$. If $\lambda =1$ and $p\geq 3$ then
$U=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U'=\emptyset$. If $\lambda =1$ and $p= 2$ then
$U=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U'=\emptyset$, or $T\in U$ if and only if $T\not\in U'$.
If $\lambda =-1$ and $p\geq 3$ then $U=\emptyset$,
$U'=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$.
If $\lambda =-1$ and $p= 2$ then
$U'=\{T_1,T_2, \cdots ,T_{{v \choose t}}\}$, $U=\emptyset$, or $T\in U$ if and only if $T\not\in U'$.
\endproof
\scriptstyleection{Illustrations to graphs} \label{section graphs}
Our notations and terminology follow \cite {Bo}.
A \textit{digraph} $G = (V, E)$ or $G=(V(G),E(G))$, is formed
by a finite set $V$ of vertices and a set $E$ of pairs of distinct vertices, called {\it arcs} of $G$. The {\it order} (or {\it cardinal}) of $G$ is the number of its vertices. If $K$ is a subset of $V$, the {\it restriction} of $G$ to $K$, also called the {\it induced subdigraph} of $G$ on $K$ is the digraph $G_{\restriction K}:= (K, K^2\cap E)$. If $K=V\scriptstyleetminus \{x\}$, we denote this digraph by $G_{-x}$. Let $G = (V, E)$ and $G' = (V', E')$ be two digraphs. A one-to-one correspondence $f$ from $V$ onto $V'$ is an {\it isomorphism from} $G$ {\it onto} $G'$ provided that for $x, y \in V$, $(x, y) \in E$ if and only if $(f(x), f(y)) \in E'$. The digraphs $G$ and $G'$ are then said to be {\it isomorphic}, which is denoted by $G \scriptstyleimeq G^{\prime}$. A subset $I$ of $V$ is an
\textit{interval} \cite{FR,ST} (or a {\it clan} \cite{ER}, or an {\it homogenous subset} \cite{TG}) of $G$ provided that for all $a, b\in
I$ and $x \in V\scriptstyleetminus I$, $(a, x) \in E(G)$ if and only if $(b, x)\in E(G)$, and the same for $(x,a)$ and $(x,b)$. For example $\emptyset$, $\{x\}$
where $x \in V$, and $V$ are intervals of $G$, called {\it trivial intervals}. A digraph is then said to be {\it indecomposable} \cite{ST} (or {\it primitive }\cite{ER}) if all its intervals are trivial, otherwise it is said to be {\it decomposable}.\\
We say that $G$ is a {\it graph} (resp. {\it tournament}) when for every distinct vertices $x,y$ of $V$, $(x, y) \in E$ if and only if $(y, x)\in E$ (resp $(x, y) \in E$ if and only if $(y, x)\not\in E$); we say that $\{x,y\}$ is an {\it edge} of the graph $G$ if $(x,y)\in E$, thus $E$ is identified with a subset of $[V]^2$, the set of pairs $\{x,y\}$ of distinct elements of $V$.\\
Let $G= (V, E)$ be a graph,
the {\it complement} of $G$ is the graph $\overline G:= (V, [V]^2\scriptstyleetminus E)$. We denote by $e(G):=\vert E(G)\vert $ the number of edges of $G$.
The {\it degree} of a vertex $x$
of $G$, denoted $d_G(x)$, is the number of edges which contain $x$.
A $3$-element subset $T$ of $V$ such that all pairs belong to $E(G)$ is a {\it triangle} of $G$.
Let $T(G)$ be the set
of {\it triangles} of $G$ and let $t(G):=\mid T(G)\mid$.
A $3$-element subset of $V$ which is a triangle of $G$ or of $\overline G$ is a $3$-{\it homogeneous} subset of $G$.
We set
$H^{(3)}(G):=T(G)\cup T(\overline G)$, the set of $3$-homogeneous subsets of $G$, and $h^{(3)}(G):=\mid H^{(3)}(G)\mid$.\\
\noindent{\bf Another proof of Theorem \ref{k=0[4],p=2} using Theorem \ref{thm js}.} Here
$p=2$, $t=2=[0,1]_p$ and $k=[0,0,k_2,\dots]_p$. From 2) of Theorem \ref{thm js}, $U=U'$, or one of the sets $U,U'$ is the set of all $2$ element-subsets of $V$ and the other is empty, or for all $2$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$. Thus $G'=G$ or $G'=\overline{G}$.\endproof
\\
\noindent{\bf Proof of Theorem \ref{k=2[4]}.}
We set $U:=E(G)$, $U':=E(G')$.
For all $K\scriptstyleubseteq V$ with $\vert K\vert = k$, we have:
$\{\{x,y\}\scriptstyleubseteq K : \{x,y\} \in U\}= E(G_{\restriction K})$ and
$\{\{x,y\}\scriptstyleubseteq K : \{x,y\} \in U'\}= E(G'_{\restriction K})$.
Since $e(G_{\restriction K}) \equiv e(G'_{\restriction K})$ (mod $p$), then
$\vert\{\{x,y\}\scriptstyleubseteq K : \{x,y\} \in U\}\vert \equiv \vert\{\{x,y\}\scriptstyleubseteq K : \{x,y\} \in U'\}\vert$ (mod $p$).\\
1) $p\geq3$, $t=2=[2]_p$ and $k_0\geq 2$. From 1) of Theorem \ref{thm js}, $U=U'$, thus $G=G'$.\\
2) $p\geq3$, $t=2=[2]_p$ and $k_0=0$. From 2) of Theorem \ref{thm js}, we have $U=U'$ or
one of $U,U'$ is the set of all $2$-elements subsets of $V$ and the other is empty.
Then $G=G'$ or one of the graphs $G,G'$ is the complete graph and the other is the empty graph.\\
3) $p=2$, $t=2=[0,1]_p$ and $k=[0,1,k_2,\dots]_p$. From 1) of Theorem \ref{thm js}, we have $U=U'$, thus $G=G'$.
\endproof
\\
The following result concerns graphs $G$ and $G'$ such that $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ modulo a prime $p$,
for all $k$-element subsets $K$ of $V$.
\begin{theorem} \label{Ka+lem+TR} Let $G$ and $G'$ be two graphs on the
same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $3\leq k\leq v-3$.\\
1) If $h^{(3)}(G_{\restriction K})=h^{(3)}(G'_{\restriction K})$
for all $k$-element subsets $K$ of $V$ then $G$ and $G'$ have the same $3$-element homogeneous sets.\\
2) Assume $p\geq 5$. If $k\not\equiv 1,2 \ (mod \ p)$ and $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $G$ and $G'$ have the same $3$-element homogeneous sets.\\
3) If ($p=2$ and $k\equiv 3 \ (mod \ 4)$) or ($p=3$ and $3\mid k$), and $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $G$ and $G'$ have the same $3$-element homogeneous sets.
\end{theorem}
{\parindent0pt {\bf Proof.\ }} $H^{(3)}(G)=\{\{a,b,c\}: G_{\restriction \{a,b,c\}} \ \mbox{is a $3$-element homogeneous set}\}$.
We set $U:=H^{(3)}(G)$ and $U':=H^{(3)}(G')$.
For all $K\scriptstyleubseteq V$ with $\vert K\vert = k$, we have:
$\{T\scriptstyleubseteq K : T \in U\}=H^{(3)}_{G_{\restriction K}}$ and
$\{T\scriptstyleubseteq K : T \in U'\}=H^{(3)}_{G'_{\restriction K}}$. Set $t:=\mid T\mid =3$.\\
1) Since $h^{(3)}(G_{\restriction K})=h^{(3)}(G'_{\restriction K})$
for all $k$-element subsets $K$ of $V$ then
$\vert\{T\scriptstyleubseteq K : T \in U\}\vert = \vert\{T\scriptstyleubseteq K : T \in U'\}\vert$. From Lemma \ref{particular mp} it follows that $U=U'$, then
$G$ and $G'$ have the same $3$-element homogeneous sets. \\
2) Since $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$ then
$\vert\{T\scriptstyleubseteq K : T \in U\}\vert \equiv \vert\{T\scriptstyleubseteq K : T \in U'\}\vert$ (mod $p$). \\
Case 1. $p\geq5$, $t=3=[3]_p$, $k=[k_0,\dots]_p$ and $t_0=3\leq k_0$. From 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\
Case 2. $p\geq5$, $t=3=[3]_p$, $k=[0,k_1,\dots]_p$.
By Ramsey's Theorem \cite {Ra}, every graph with at least $6$ vertices contains
a $3$-element homogeneous set. Then $U$ and $U'$ are nonempty, so from 2) of Theorem \ref{thm js}, $U=U'$, thus
$G$ and $G'$ have the same $3$-element homogeneous sets.\\
3) Since $h^{(3)}(G_{\restriction K})\equiv h^{(3)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$ then
$\vert\{T\scriptstyleubseteq K : T \in U\}\vert \equiv \vert\{T\scriptstyleubseteq K : T \in U'\}\vert$ (mod $p$). \\
Case 1. $p=2$, $t=3=[1,1]_p$ and $k\equiv 3 \ (mod \ 4)$. In this case, $k=[1,1,k_2, \dots]_p$, then
from 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\
Case 2. $p=3$, $t=3=[0,1]_p$ and $k=[0,k_1,\dots,k_{k(p)}]_p$.\\
Case 2.1. $k_1\in\{1,2\}$, then from 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same $3$-element homogeneous sets.\\
Case 2.2. $k_1=0$.
By Ramsey's Theorem \cite {Ra}, every graph with at least $6$ vertices contains
a $3$-element homogeneous set. Then $U$ and $U'$ are nonempty, so from 2) of Theorem \ref{thm js}, $U=U'$, thus
$G$ and $G'$ have the same $3$-element homogeneous sets.
\endproof
\\
Let $G=(V,E)$ be a graph.
From \cite{ST}, every indecomposable graph of size $4$ is isomorphic to $P_4=\left(\{0,1,2,3\},\{\{0,1\},\{1,2\},\{2,3\}\}\right)$. Let ${\mathcal P}^{(4)}(G)$ be the set of indecomposable induced subgraphs of $G$ of size $4$,
we set $p^{(4)}(G):=\vert{\mathcal P}^{(4)}(G)\vert$. The following result concerns graphs $G$ and $G'$ such that $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ modulo a prime $p$,
for all $k$-element subsets $K$ of $V$.
\begin{theorem} \label{Ka+lem+P4} Let $G$ and $G'$ be two graphs on the
same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $4\leq k\leq v-4$.\\
1) If $p^{(4)}(G_{\restriction K})=p^{(4)}(G'_{\restriction K})$
for all $k$-element subsets $K$ of $V$ then $G$ and $G'$ have the same indecomposable sets of size $4$.\\
2) Assume $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$.\\
a) If $p\geq 5$ and $k\not\equiv 1,2,3 \ (mod \ p)$, then $G$ and $G'$ have the same indecomposable sets of size $4$.\\
b) If ($p=2$, $4\mid k$ and $8\nmid k$) or ($p=3$, $3\mid k-1$ and $9\nmid k-1$), then $G$ and $G'$ have the same indecomposable sets of size $4$.\\
c) If $p=2$ and $8\mid k$, then $G$ and $G'$ have the same indecomposable sets of size $4$, or for all $4$-element subsets $T$ of $V$, $G_{\restriction T}$ is indecomposable if and only if $G'_{\restriction T}$ is decomposable.
\end{theorem}
{\parindent0pt {\bf Proof.\ }}
Let $U:=\{T\scriptstyleubseteq V : \vert T\vert = 4, \ G_{\restriction T}\scriptstyleimeq P_4 \}={\mathcal P}^{(4)}(G) $,
$U':=\{T\scriptstyleubseteq V : \vert T\vert = 4, \ G'_{\restriction T}\scriptstyleimeq P_4 \}={\mathcal P}^{(4)}(G') $.
For all $K\scriptstyleubseteq V$, we have
$\{T\scriptstyleubseteq K : T\in U\}= {\mathcal P}_4( G_{\restriction K})$ and
$\{T\scriptstyleubseteq K : T\in U'\}= {\mathcal P}_4( G'_{\restriction K})$. Set $t:= \vert T \vert =4$.\\
1) Since $p^{(4)}(G_{\restriction K})=p^{(4)}(G'_{\restriction K})$ then
$\vert\{T\scriptstyleubseteq K : T\in U\}\vert=\vert\{T\scriptstyleubseteq K : T\in U'\}\vert$.
From Lemma \ref{particular mp}, $U=U'$, then $G$ and $G'$ have the same indecomposable sets of size $4$.\\
2) We have $p^{(4)}(G_{\restriction K})\equiv p^{(4)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $\vert\{T\scriptstyleubseteq K : T\in U\}\vert \equiv \vert\{T\scriptstyleubseteq K : T\in U'\}\vert$ (mod $p$).\\
a) Case 1. $p\geq5$, $t=4=[4]_p$, $k=[k_0,\dots]_p$ and $t_0=4\leq k_0$. From 1) of Theorem \ref{thm js} we have $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\
Case 2. $p\geq5$, $t=4=[4]_p$, $k=[0,k_1,\dots]_p$. Since in every graph of order $5$, there is a restriction of size $4$ not isomorphic to $P_4$ then, from 2) of Theorem \ref{thm js}, $U=U'$, thus
$G$ and $G'$ have the same indecomposable sets of size $4$.\\
b)
Case 1. $p=2$, $t=4=[0,0,1]_p$ and $k=[0,0,1,k_3,\dots,k_{k(p)}]_p$.
From 1) of Theorem \ref{thm js}, we have $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\
Case 2. $p=3$, $t=4=[1,1]_p$, $k=[1,k_1,\dots,k_{k(p)}]_p$ and $t_1=1\leq k_1$. From 1) of Theorem \ref{thm js}, $U=U'$, thus $G$ and $G'$ have the same indecomposable sets of size $4$.\\
c) We have $p=2$, $t=4=[0,0,1]_p$, $k=[0,0,0,k_3,\dots,k_{k(p)}]_p$. Since in every graph of order $5$, there is a restriction of size $4$ not isomorphic to $P_4$, then from 2) of Theorem \ref{thm js}, $U=U'$, or for all $4$-element subsets $T$ of $V$, $T\in U$ if and only if $T\not\in U'$. Thus
$G$ and $G'$ have the same indecomposable sets of size $4$, or for all $4$-element subsets $T$ of $V$, $G_{\restriction T}$ is indecomposable if and only if $G'_{\restriction T}$ is decomposable.
\endproof
\\
In a reconstruction problem of graphs up to complementation \cite{dlps1}, Wilson's Theorem yielded the following result:
\begin{theorem} (\cite{dlps1})\label{k=1[4]}
Let $G$ and $G'$ be two graphs on the same set $V$ of $v$
vertices (possibly infinite). Let $k$ be an integer, $5\leq k\leq v-2$,
$k\equiv 1$ (mod $4$). Then the following properties are equivalent:\\
(i) $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$; and $G_{\restriction K}$, $G'_{\restriction K}$ have the same $3$-homogeneous subsets;\\
(ii) $G'= G$ or $G'= \overline G$.
\end{theorem}
Here, we just want to point out that we can obtain a similar result for $k\equiv 3$ (mod $4$), namely Theorem \ref{k=3[4]}, using the same proof as that of Theorem \ref{k=1[4]}.
The {\it boolean sum} $G\dot{+} G'$
of two graphs $G=(V,E)$ and $G'=(V,E')$ is the graph $U$ on $V$ whose edges are pairs $e$ of
vertices such that $e\in E$ if and only if $e\notin E'$.
\begin{theorem}\label{k=3[4]}
Let $G$ and $G'$ be two graphs on the same set $V$ of $v$
vertices (possibly infinite). Let $k$ be an integer, $3\leq k\leq v-2$,
$k\equiv 3$ (mod $4$). Then the following properties are equivalent:\\
(i) $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ for all $k$-element subsets $K$ of $V$; and $G_{\restriction K}$, $G'_{\restriction K}$ have the same $3$-homogeneous subsets;\\
(ii) $G'= G$.
\end{theorem}
{\parindent0pt {\bf Proof.\ }}
It is exactly the same as that of Theorem \ref{k=1[4]} (see (\cite{dlps1}).
The implication $(ii)\mathbb{R}ightarrow (i)$ is trivial. We prove $(i)\mathbb{R}ightarrow (ii)$.
We suppose $V$ finite, we set $U:= G\dot{+} G'$, let $T_1,T_2, \cdots , T_{{v \choose 2}}$ be an enumeration of the $2$-element subsets of $V$, let $K_1,K_2,\cdots ,K_{{v \choose k}}$ be an enumeration of the $k$-element
subsets of $V$.
Let $w_U$ be the row matrix $(u_1,u_2,\cdots , u_{v \choose 2})$ where $u_i=1$ if $T_i$ is an edge of $U$, $0$ otherwise.\\ We have
$w_UW_{2\; k}=(e(U_{\restriction K_1}),e(U_{\restriction K_2}),\cdots , e(U_{\restriction K_{v \choose k}}))$.
From the facts that $e(G_{\restriction K})$ has the same parity as $e(G'_{\restriction K})$ and $e(U_{\restriction K})=e(G_{\restriction K})+e(G'_{\restriction K})-2e(G_{\restriction K}\cap G'_{\restriction K})$ for all $k$-element subsets $K$, $w_U$ belongs to the kernel of $^tW_{2\; k}$ over the $2$-element field. According to Theorem \ref{thm Wilson}, the rank of $W_{2k}$ (mod $2$) is
${v \choose 2} -v+1$. Hence $\dim Ker(^tW_{2\; k})=v-1$.
We give a similar claim as Claim 2.8 of \cite{dlps1}, the proof is identical.
\begin{claim} \label{bipartite}Let $k$ be an integer such that $3\leq k\leq v-2$,
$k\equiv 3$ (mod $4$), then the kernel of $^t W_{2\; k}$ consists of complete bipartite graphs (including the empty graph).
\end{claim}
{\parindent0pt {\bf Proof.\ }} Let us recall that a {\it star-graph} of $v$ vertices consists of a vertex linked to all other vertices, those $v-1$ vertices forming an independent set.
First we prove that each star-graph $S$ belongs to $\mathbb{K}$, the kernel of $^t W_{2\; k}$. Let $w_S$ be the row matrix $(s_1,s_2,\cdots , s_{v \choose 2})$ where $s_i=1$ if $T_i$ is an edge of $S$, $0$ otherwise. We have
$w_SW_{2\; k}=(e(S_{\restriction K_1}),e(S_{\restriction K_2}),\cdots , e(S_{\restriction K_{v \choose k}}))$. For all $i\in \{1,\dots ,{v \choose k}\}$, $e(S_{\restriction K_i})=k-1$ if $1\in K_i$, $0$ otherwise. Since $k$ is odd, each star-graph $S$ belongs to $\mathbb{K}$.
The vector space (over the $2$-element field) generated by the star-graphs on $V$ consists of all complete bipartite graphs; since $v\geq3$, these are distinct from the complete graph (but include the empty graph). Moreover, its dimension is $v-1$ (a basis being made of star-graphs). Since $\dim Ker(^tW_{2\; k})=v-1$, then $\mathbb{K}$ consists of complete bipartite graphs as claimed.\endproof
A {\it claw} is a star-graph on four vertices, that is a graph made of a vertex joined to three other vertices, with no edges between these three vertices. A graph is {\it claw-free} if no induced subgraph is a claw.
\begin{claim} \label{clawfree} (\cite{dlps1}) Let $G$ and $G'$ be two graphs on the same set and having the same $3$-homogeneous subsets, then the boolean sum $U: =G\dot {+} G'$ is claw-free.
\end{claim}
From Claim \ref{bipartite}, $U$ is a complete bipartite graph and,
from Claim \ref{clawfree},
$U$ is claw-free. Since $v\geq 5$, it follows that $U$ is the empty graph. Hence $G'=G$ as claimed.
\endproof
\scriptstyleection{Illustrations to tournaments} \label{section tournaments}
Let $T=(V,E)$ be a tournament.
For two distinct vertices $x$ and $y$ of $T$, $x\longrightarrow_Ty$ (or simply $x\longrightarrow y$) means that $(x, y)\in E$ and $(y, x)\not\in E$. For $A\scriptstyleubseteq V$ and $y\in V$, $A\longrightarrow y$ means $x\longrightarrow y$ for all $x\in A$.
The {\it degree} of a vertex $x$
of $T$ is $d_T(x):=\vert\{ y\in V:x\longrightarrow y\}\vert$. We denote by $T^*$ {\it the dual} of $T$ that is $T^*=(V,E^*)$ with $(x,y)\in E^*$ if and only if $(y,x)\in E$.
A {\it transitive} tournament or a {\it total order } or {\it$k$-chain} (denoted $O_k$) is a tournament of cardinality $k$, such that for $x, y, z \in V$, if $x\longrightarrow y$ and $y\longrightarrow z$, then $x\longrightarrow z$. If $x$ and $y$ are two distinct vertices of a total order, the notation $x < y$ means that $x\longrightarrow y$.
The tournament $C_3 :=\{\{0,1,2\}, \{(0,1),(1,2),(2,0)\}\}$ (resp. $C_4:=(\{0,1,2,3\},\{(0,3), (0,1), (3,1), (1,2), (2,0), (2,3)\})$) is a $3$-{\it cycle} (resp. $4$-{\it cycle}).
A {\it diamond} is a
tournament on $4$ vertices admitting only one interval of cardinality $3$ which is a $3$-cycle.
Up to isomorphism, there are exactly two diamonds $\delta^{+}$ and $\delta^{-}=(\delta^{+})^{*}$, where $\delta^+$ is the tournament defined on $\{0, 1, 2, 3\}$ by $\delta^+_{\restriction\{0, 1, 2\}} = C_3$ and $\{0, 1, 2\}\rightarrow 3 $. A tournament isomorphic to $\delta^+$ (resp. isomorphic to $\delta^-$) is said to be a {\it positive diamond} (resp. {\it negative diamond}).
The {\it boolean sum} $U:=T\dot{+} T'$
of two tournaments $T=(V,E)$ and $T'=(V,E')$, is the graph $U$ on $V$ whose edges are pairs $\{x,y\}$ of
vertices such that $(x,y)\in E$ if and only if $(x,y)\notin E'$.\\
\begin{figure}
\caption{Cycle $C_3$, Cycle $C_4$, Positive Diamond, Negative Diamond.}
\label{diamond}
\end{figure}
\begin{theorem} \label{tournois}Let $T=(V,E)$ and $T'=(V,E')$ be two tournaments. Let $p$ be a prime number and $k$ be an integer, $2\leq k\leq v-2$. Let $G:=T \dot{+} T'$. We assume that for all $k$-element subsets $K$ of $V$, $e(G_{\restriction K})\equiv 0$ (mod $p$).\\
1) If $p\geq3$, $k \not\equiv 0,1$ (mod $p$), then $T'=T$.\\
2) If $p\geq 3$, $k\equiv 0$ (mod $p$), then $T'=T$ or $T'=T^*$.\\
3) If $p=2$, $k\equiv 2$ (mod $4$), then $T'=T$.\\
4) If $p=2$, $k\equiv 0$ (mod $4$), then $T'=T$ or $T'=T^*$.
\end{theorem}
{\parindent0pt {\bf Proof.\ }}
We set $G':=$ The empty graph. Then $e(G_{\restriction K})\equiv e(G'_{\restriction K})$
(mod $p$).\\
1) From 1) of Theorem \ref{k=2[4]}, $G$ is the empty graph, then $T'=T$.\\
2) From 2) of Theorem \ref{k=2[4]}, $G$ is empty or the complete graph, then $T'=T$ or $T'=T^*$.\\
3) From 3) of Theorem \ref{k=2[4]}, $G$ is the empty graph, then $T'=T$.\\
4) From Theorem \ref{k=0[4],p=2}, $G$ is the empty graph or the complete graph, then $T'=T$ or $T'=T^*$.
\endproof
\\
Let $T$ be a tournament, we set $C^{(3)}(T):=\{\{a,b,c\} : T_{\restriction \{a,b,c\}} \ \mbox{is a $3$-cycle} \}$, and $c^{(3)}(T):=\mid C^{(3)}(T)\mid$. Let $T=(V,E)$ and $T'=(V,E')$ be two tournaments, let $k$ be a non-negative integer, $T$ and $T'$ are $k$-{\it hypomorphic} \cite{Bou-Lop,Lr} (resp. $k$-{\it hypomorphic} up to duality)
if for every $k$-element subset $K$ of $V$, the induced subtournaments $T'_{\restriction K}$ and $T_{\restriction K}$ are isomorphic (resp. $T'_{\restriction K}$ is isomorphic to
$T_{\restriction K}$ or to $T^*_{\restriction K}$). We say that $T$ and $T'$ are ($\leq k$)-{\it hypomorphic}
if $T$ and $T'$ are $h$-hypomorphic for every $h\leq k$. Similarly, we say that $T$ and $T'$ are $(\leq k)$-{\it hypomorphic up to duality} if $T$ and $T'$ are $h$-hypomorphic up to duality for every $h\leq k$.
\begin{theorem} Let $T$ and $T'$ be two tournaments on the
same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $3\leq k\leq v-3$.\\
1) If $c^{(3)}(T_{\restriction K})=c^{(3)}(T'_{\restriction K})$
for all $k$-element subsets $K$ of $V$ then $T$ and $T'$ are $(\leq 3)$-hypomorphic.\\
2) Assume $p\geq5$. If $k\not\equiv 1,2$ (mod $p$), and $c^{(3)}(T_{\restriction K})\equiv c^{(3)}(T'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $T$ and $T'$ are $(\leq 3)$-hypomorphic.\\
3) If ($p=2$ and $k\equiv3$ (mod $4$)) or ($p=3$ and $3\mid k$), and $c^{(3)}(G_{\restriction K})\equiv c^{(3)}(G'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $T$ and $T'$ are $(\leq 3)$-hypomorphic.
\end{theorem}
{\parindent0pt {\bf Proof.\ }}
Since every tournament, of cardinality $\geq 4$, has at least
a restriction of cardinality $3$ which is not a $3$-cycle, then the proof is similar to that of Theorem \ref{Ka+lem+TR}.\endproof
Let $T$ be a tournament, we set $D^+_4(T):=\{\{a,b,c,d\} : T_{\restriction \{a,b,c,d\}} \scriptstyleimeq \delta^+ \}$, $D^-_4(T):=\{\{a,b,c,d\} : T_{\restriction \{a,b,c,d\}} \scriptstyleimeq \delta^- \}$, $d^+_4(T):=\mid D^+_4(T)\mid$ and $d^-_4(T):=\mid D^-_4(T)\mid$.
It is well-known that every subtournament of order $4$ of a tournament is either a diamond, a $4$-chain, or a $4$-cycle subtournament. We have $c^{(3)}(O_4)=0$, $c^{(3)}(\delta^+)=c^{(3)}(\delta^-)=1$, $c^{(3)}(C_4)=2$ and $C_4\scriptstyleimeq C_4^*$.
\begin{theorem}\label{tournament}
Let $T$ and $T'$ be two $(\leq 3)$-hypomorphic tournaments on the
same set $V$ of $v$ vertices. Let $p$ be a prime number and $k$ be an integer, $4\leq k\leq v-4$.\\
1) If $d^+_4(T_{\restriction K})=d^+_4(T'_{\restriction K})$
for all $k$-element subsets $K$ of $V$ then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\
2) Assume $d^+_4(T_{\restriction K})\equiv d^+_4(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$.\\
a)
If $p\geq5$ and, $k \not\equiv 1,2,3$ (mod $p$), then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\
b) If ($p=3$, $3\mid k-1$ and $9\nmid k-1$) or ($p=2$, $4\mid k$ and $8\nmid k$), then $T'$ and $T$ are $(\leq 5)$-hypomorphic.\\
c) If $p=2$ and $8\mid k$, then $T'$ and $T$ are $(\leq 5)$-hypomorphic or for all $4$-elements subset $S$ of V, $T_{\restriction S}$ is isomorphic to $\delta^+$ if and only if $T'_{\restriction S}$ is isomorphic to $\delta^-$.
\end{theorem}
{\parindent0pt {\bf Proof.\ }} To prove that $T'$ and $T$ are ($\leq 5$)-hypomorphic, the following lemma shows that it is sufficient to prove that $T'$ and $T$ are ($\leq 4$)-hypomorphic.
\begin{lemma} \label{hypomorphe} \cite{B}
Let $T$ and $T'$ be two $(\leq 4)$-hypomorphic tournaments
on at least $5$ vertices. Then, $T$ and $T'$ are
$(\leq 5)$-hypomorphic.
\end{lemma}
Now, let $U^+:=\{S\scriptstyleubseteq V, \ T_{\restriction S} \scriptstyleimeq \delta^+ \}=D^+_4(T) $,
$U'^+:=D^+_4(T') $, $U^-:=D^-_4(T)$ and
$U'^-:=D^-_4(T') $.
\begin{claim}\label{3hyp4hyp}
If $T$ and $T'$ are $(\leq 3)$-hypomorphic and $U^+=U'^+$, then $U^-=U'^-$; $T$ and $T'$ are $(\leq 5)$-hypomorphic.
\end{claim}
{\parindent0pt {\bf Proof.\ }}
Let $S\in U^-$, $T_{\restriction S}\scriptstyleimeq \delta^-$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic, then $T'_{\restriction S}\scriptstyleimeq \delta^+$ or $T'_{\restriction S}\scriptstyleimeq \delta^-$. We have $\{S\scriptstyleubseteq V, \ T'_{\restriction S} \scriptstyleimeq \delta^+ \}=\{S\scriptstyleubseteq V, \ T_{\restriction S} \scriptstyleimeq \delta^+ \}$, then $T'_{\restriction S}\scriptstyleimeq \delta^-$, $S\in U'^-$ and $U^-=U'^-$. So, for $X\scriptstyleubset V$, if $T_{\restriction X}$ is a diamond then $T'_{\restriction X} \scriptstyleimeq T_{\restriction X}$.\\
Now we prove that $T$ and $T'$ are $4$-hypomorphic. Let $X\scriptstyleubset V$ such that $|X|=4$. If $ T_{\restriction X} \scriptstyleimeq C_4$, then $c^{(3)}(T_{\restriction X})=2$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic then $c^{(3)}(T'_{\restriction X})=2$, thus $T'_{\restriction X} \scriptstyleimeq T_{\restriction X} \scriptstyleimeq C_4$. The same, if $ T_{\restriction X} \scriptstyleimeq O_4$ then $T'_{\restriction X} \scriptstyleimeq T_{\restriction X} \scriptstyleimeq O_4$. So, $T'$ and $T$ are ($\leq 4$)-hypomorphic. Then, From Lemma \ref{hypomorphe}, $T'$ and $T$ are ($\leq 5$)-hypomorphic.
\endproof
From Claim \ref{3hyp4hyp}, it is sufficient to prove that $U^+=U'^+$.\\
For all $K\scriptstyleubseteq V$ with $\vert K\vert = k$, we have
$\{S\scriptstyleubseteq K : S\in U^+\}= D^+_4( T_{\restriction K})$ and
$\{S\scriptstyleubseteq K : S\in U'^+\}= D^+_4( T'_{\restriction K})$. \\
1) Since $d^+_4(T_{\restriction K})=d^+_4(T'_{\restriction K})$ then
$\vert\{S\scriptstyleubseteq K : S\in U^+\}\vert=\vert\{S\scriptstyleubseteq K : S\in U'^+\}\vert$.
From Lemma \ref{particular mp}, we have $U^+=U'^+$.\\
2) We have $d^+_4(T_{\restriction K})\equiv d^+_4(T'_{\restriction K})$ (mod $p$)
for all $k$-element subsets $K$ of $V$, then $\vert\{S\scriptstyleubseteq K : S\in U^+\}\vert \equiv \vert\{S\scriptstyleubseteq K : S\in U'^+\}\vert$ (mod $p$).\\
a) Case 1. $p\geq5$, $t=4=[4]_p$, $k=[k_0,\dots]_p$ and $t_0=4\leq k_0$. From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\
Case 2. $p\geq5$, $t=4=[4]_p$, $k=[0,k_1,\dots]_p$. Since every tournament of cardinality $\geq 5$ has at least a restriction of cardinality $4$ which is not a diamond, then from 2) of Theorem \ref{thm js}, $U^+=U'^+$.\\
b)
Case 1. $p=3$, $t=4=[1,1]_p$, $k=[1,k_1,\dots,k_{k(p)}]_p$ and $t_1=1\leq k_1$. From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\
Case 2. $p=2$, $t=4=[0,0,1]_p$ and $k=[0,0,1,k_3,\dots,k_{k(p)}]_p$.\\
From 1) of Theorem \ref{thm js} we have $U^+=U'^+$.\\
c) We have
$p=2$, $t=4=[0,0,1]_p$, $k=[0,0,0,k_3,\dots,k_{k(p)}]_p$. Since every tournament of cardinality $\geq 5$ has at least a restriction of cardinality $4$ which is not a diamond, and the fact that $T$ and $T'$ are $3$-hypomorphic, then from 2) of Theorem \ref{thm js}, $U^+=U'^+$, thus $T'$ and $T$ are ($\leq 5$)-hypomorphic, or for all $4$-element subsets $S$ of V, $T_{\restriction S}$ is isomorphic to $\delta^+$ if and only if $T'_{\restriction S}$ is isomorphic to $\delta^-$.
\endproof
\\
Given a digraph $S=(\{0,1,\dots ,m-1\},A)$, where $m\geq 1$
is an integer, for $i\in \{0,1,\dots ,m-1\}$ we associate a
digraph $G_{i}=(V_{i},A_{i})$, with $|V_i|\geq 1$, such
that the $V_{i}$'s are mutually disjoint. The
\textit{lexicographic sum} of $S$ by the digraphs $G_i$ or
simply the S-\textit{sum} of the $G_{i}$'s, is the digraph
denoted by $S(G_{0},G_{1},\dots ,G_{m-1})$ and defined on
the union of the $V_{i}$'s as follows: given $x\in V_{i}$
and $y\in V_{j}$, where $i,j\in \{0,1,\dots ,m-1\}$, $(x,y)$
is an arc of $S(G_{0},G_{1},\dots ,G_{m-1})$ if either $i=j$
and $(x,y)\in A_{i}$ or $i\neq j$ and $(i,j)\in A$: this
digraph replaces each vertex $i$ of $S$ by $G_{i}$.
We say that the vertex $i$ of $S$ is \textit{dilated by} $G_{i}$.\\
Let $h$ be a non-negative integer. The integers below are
considered modulo $2h+1$. The {\it circular tournament} $T_{2h+1}$ (see Figure $2$) is
defined on $\{0,1, \dots , 2h\}$ by :
${T_{2h+1}}_{\restriction \{0,1, \dots , h\}}$ is the usual total order on
$\{0,1, \dots , h\}, {T_{2h+1}}_{\restriction \{h + 1,\dots, 2h\}}$ is also
the usual order on $\{h + 1,h + 2, \dots , 2h\}$, however
$ \{i + 1,i + 2, . . .\dots , h\}\longrightarrow_{T_{2h+1}}
i+h+1 \longrightarrow_{T_{2h+1}}\{0,1, \dots , i\}$ for every
$i\in \{0,1, \dots , h - 1\}$. A tournament $T$ is said to be an
element of $D(T_{2h+1})$ if $T$ is obtained by dilating each
vertex of $T_{2h+1}$ by a finite chain $p_{i}$, then
$T = T_{2h+1}(p_{0},p_{1},\dots ,p_{2h})$.
We recall that
$T_{2h+1}$ is indecomposable and $D(T_{2h+1})$ is the class
of finite tournaments without diamond \cite{Lr}.\\
We define the tournament $\beta^+_6:= T_{3}(p_{0},p_{1},p_{2})$ with $p_0=(0<1<2)$, $p_1=(3<4)$ and $|p_2|=1$ (see Figure $3$). We set $\beta^-_6:=(\beta^+_6)^*$.
For a tournament $T=(V,E)$, we set $B^+_6(T):=\{S\scriptstyleubseteq V : T_{\restriction S} \scriptstyleimeq \beta^+_6 \}$,
$B^-_6(T):=\{S\scriptstyleubseteq V : T_{\restriction S} \scriptstyleimeq \beta^-_6 \}$,
$b^+_6(T):=\mid B^+_6(T)\mid$ and $b^-_6(T):=\mid B^-_6(T)\mid$.\\
Two tournaments $T$ and $T'$ on the same vertex set $V$ are \textit{hereditarily isomorphic} if for all
$X\scriptstyleubseteq V$, $T_{\restriction X}$ and $T'_{\restriction X}$ are isomorphic \cite{BBN}.
\begin{figure}
\caption{Circular tournament $T_{2h+1}
\end{figure}
\begin{figure}
\caption{$\beta_6^+$.}
\end{figure}
Let $G=(V,E)$ and $G'=(V,E')$ be two
$(\leq 2)$-hypomorphic digraphs. Denote $D_{G,G'}$ the
binary relation on $V$ such that: for $x\in V$,
$x D_{G,G'}x$; and for $x \neq y\in V$, $x D_{G,G'}y$ if
there exists a sequence $x_{0} =x, . . . , x_{n} =y$ of
elements of $V$ satisfying $(x_{i}, x_{i+1})\in E$ if and
only if $(x_{i}, x_{i+1})\notin E'$, for all $i$,
$0 \leq i \leq n - 1$. The relation $D_{G,G'}$ is an
equivalence relation called {\it the difference relation},
its classes are called {\it difference classes}.
Using difference classes, G. Lopez
\cite{L1,ls} showed that if $T$ and $T'$ are
($\leq 6$)-hypomorphic then $T$ and $T'$ are isomorphic.
One may deduce the next corollary.
\begin{corollary}\label{l1} (\cite{L1,ls}) Let $T$ and $T'$ be two tournaments. We have the following properties:\\
1) If $T$ and $T'$ are $(\leq 6)$-hypomorphic then $T$ and $T'$ are hereditarily
isomorphic.\\
2) If for each
equivalence class $C$ of $D_{T,T'}$, $C$ is an interval of $T$ and $T'$, and
$T'_{\restriction C}$, $T_{\restriction C}$ are $(\leq 6)$-hypomorphic, then $T$ and $T'$ are hereditarily isomorphic.
\end{corollary}
\begin{lemma}
\label{41}
\cite{L2}
Given two $(\leq 4)$-hypomorphic tournaments $T$
and $T'$, and $C$ an equivalence class of $ D_{T,T'}$, then:\\
1) $C$ is an interval of $T'$ and $T$. \\
2) Every $3$-cycle in
$T_{\restriction C}$ is reversed in $T'_{\restriction C}$.\\
3) There exists an integer $h\geq 0$ such that $T_{\restriction C}=T_{2h+1}(p_0,p_1,\dots,p_{2h})$ and $T'_{\restriction C}=T^*_{2h+1}(p'_0,p'_1,\dots,p'_{2h})$ with $p_i$, $p'_i$ are chains on the same basis, for all $i\in \{0,1,\dots ,2h\}$.
\end{lemma}
\begin{theorem}
Let $T$ and $T'$ be two $(\leq 4)$-hypomorphic tournaments on the
same set $V$ of $v$ vertices. Let $p$ be a prime number and $k=[k_0,k_1,\dots ,k_{k(p)}]_p$ be an integer, $6\leq k\leq v-6$.\\
1) If $b^+_6(T_{\restriction K})=b^+_6(T'_{\restriction K})$ for all $k$-element subsets $K$ of $V$ then $T'$ and $T$ are hereditarily isomorphic.\\
2) Assume $b^+_6(T_{\restriction K})\equiv b^+_6(T'_{\restriction K})$ (mod $p$) for all $k$-element subsets $K$ of $V$.\\
a) If $p\geq7$, and $k_0\geq 6$ or $k_0=0$, then $T'$ and $T$ are hereditarily
isomorphic.\\
b) If ($p=5$, $k_0=1$ and $k_1\neq 0$) or ($p=3$, $k_0=0$ and $k_1=2$) or ($p=3$ and $k_0=k_1=0$)
or ($p=2$, $k_0=0$ and $k_1=k_2=1$), then $T'$ and $T$ are hereditarily
isomorphic.
\end{theorem}
{\parindent0pt {\bf Proof.\ }}
Let $U^+:=\{S\scriptstyleubseteq V, \ T_{\restriction S} \scriptstyleimeq \beta^+_6 \}=B^+_6(T) $,
$U'^+:=B^+_6(T') $, $U^-:=\{S\scriptstyleubseteq V, \ T_{\restriction S} \scriptstyleimeq \beta^-_6 \}=B^-_6(T) $,
$U'^-:= B^-_6(T') $.\\
Every tournament of cardinality $\geq 7$ has at least a restriction of cardinality $6$ which is not isomorphic to $\beta^+_6$ and $\beta^-_6$.
Then for all cases,
similarly to the proof of Theorem \ref{tournament}, we have $U^+=U'^+$.\\
Let $C$ be an equivalence class of $D_{T,T'}$, $S\in U^-$, $T_{\restriction S}\scriptstyleimeq \beta_6^-$. Since $T$ and $T'$ are ($\leq 3$)-hypomorphic, then $T'_{\restriction S}\scriptstyleimeq \beta^+_6$ or $T'_{\restriction S}\scriptstyleimeq \beta^-_6$. We have $\{S\scriptstyleubseteq V, \ T'_{\restriction S} \scriptstyleimeq \beta^+_6 \}=\{S\scriptstyleubseteq V, \ T_{\restriction S} \scriptstyleimeq \beta^+_6 \}$, then $T'_{\restriction S}\scriptstyleimeq \beta^-_6$, $S\in U'^-$ and $U^-=U'^-$. Let $X\scriptstyleubseteq C$ such that $|X|=6$; if $T_X\scriptstyleimeq\beta^+_6$ then, from 2) of Lemma \ref{41}, $T'_X\scriptstyleimeq\beta^-_6$, that is impossible, so $T_C$ and $T'_C$ has not a restriction of cardinality $6$ isomorphic to $\beta^+_6$ and $\beta^-_6$. \\
Now we will prove that $T_{\restriction C}$ and $T'_{\restriction C}$ are $(\leq 6)$-hypomorphic.\\
From 3) of Lemma \ref{41}, there exists an integer $h\geq 0$ such that
$T_{\restriction C}=T_{2h+1}(p_0,p_1,\dots,p_{2h})$, with $p_i$ is a chain and $a_i\in p_i$ for all $i\in \{0,1,\dots ,2h\}$ . Since $T_{\restriction C}$ hasn't a tournament isomorphic to $\beta_6^+$, then $h\leq3$. Indeed, if $h\geq4$, then $T_{\restriction\{a_0,a_1,a_2,a_3,a_4,a_{3+h}\}}\scriptstyleimeq \beta_6^+$, and $\{a_0,a_1,a_2\}$, $\{a_3,a_4\}$ are two intervals of $T_{\restriction\{a_0,a_1,a_2,a_3,a_4,a_{3+h}\}}$, that is impossible.\\
a) If $h=3$, then $T_{\restriction C}=T_7$. Indeed, if $a_0,b_0\in V(p_0)$ then $T_{\restriction\{a_0,b_0,a_1,a_2,a_3,a_5\}}\scriptstyleimeq \beta_6^+$, and $\{a_0,b_0,a_1\}$, $\{a_2,a_3\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,a_2,a_3,a_5\}}$, that is impossible.\\
b) If $h=2$, then $T_{\restriction C}=T_5$, or $T_{\restriction C}$ is obtained by dilating one vertex of $T_5$ by a chain of cardinality $2$. Indeed :
Case 1. $a_0,b_0,c_0\in V(p_0)$, then $T_{\restriction\{a_0,b_0,c_0,a_1,a_2,a_3\}}\scriptstyleimeq \beta_6^+$ and $\{a_0,b_0,c_0\}$, $\{a_1,a_2\}$ are two intervals of $T_{\restriction\{a_0,b_0,c_0,a_1,a_2,a_3\}}$, that is impossible.
Case 2. If $a_i,b_i\in V(p_i)$ for all $i\in\{0,1\}$, then $T_{\restriction\{a_0,b_0,a_1,b_1,a_3,a_4\}}\scriptstyleimeq \beta_6^+$ and $\{a_0,b_0,a_4\}$, $\{a_1,b_1\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,b_1,a_3,a_4\}}$, that is impossible.
Case 3. If $a_i,b_i\in V(p_i)$ for all $i\in\{0,2\}$, then $T_{\restriction\{a_0,b_0,a_1,a_2,b_2,a_4\}}\scriptstyleimeq \beta_6^+$ and $\{a_0,b_0,a_1\}$, $\{a_2,b_2\}$ are two intervals of $T_{\restriction\{a_0,b_0,a_1,a_2,b_2,a_4\}}$, that is impossible.\\
c) If $h=1$, then $T_{\restriction C}$ is obtained by dilating one vertex of $C_3$ by a chain or by dilating two or three vertices of $C_3$ by a chain of cardinality $2$.\\
d) If $h=0$, then $T_{\restriction C}$ is a chain.\\
In all cases, $T_{\restriction C}$ and $T'_{\restriction C}$ are ($\leq 6$)-hypomorphic. From 1) of Lemma \ref{41}, $C$ is an interval of $T'$ and $T$. Then, from 2) of Corollary \ref{l1}, $T$ and $T'$ are hereditarily isomorphic.
\endproof
\end{document} |
\begin{document}
\newcommand{\gamma}{\gammamma}
\newcommand{\Gamma}{\Gammamma}
\newcommand{\kappa}{\kappappa}
\newcommand{\tilde e}{\tilde e}
\newcommand{\varphi}{\varphi}
\newcommand{\Omega}{\Omegaega}
\newcommand{\sigma}{\sigmagma}
\newcommand{\Sigma}{\Sigmagma}
\newcommand{\delta}{\deltalta}
\newcommand{\Delta}{\Deltalta}
\newcommand{\lambda}{\lambdambda}
\newcommand{\Lambda}{\Lambdambda}
\newcommand{\epsilon}{\epsilonsilon}
\newcommand{\vartheta}{\vartheta}
\newcommand{\varthetat}{{\widetilde \vartheta}}
\newcommand{\varrho}{\varrho}
\newcommand{{\widetilde \varrho}}{{\widetilde \varrho}}
\deltaf{\alpha}{{{\alpha}pha}}
\deltaf{\mathbb R}{{\mathbb R}}
\deltaf{(D)}{{(D)}}
\newcount\icount
\deltaf\DD#1#2{\icount=#1
\ifnum\icount<1
\,_{ 0}\kern -.1em D^{#2}_{\kern -.1em x}
\else
\,_{x}\kern -.2em D^{#2}_1
\fi
}
\deltaf\DDRI#1#2{\icount=#1
\ifnum\icount<1
\,_{-\infty}^{\kern 1em R}\kern -.2em D^{#2}_{\kern -.1em x}
\else
\,_{x}^R \kern -.2em D^{#2}_\infty
\fi
}
\deltaf\DDR#1#2{\icount=#1
\ifnum\icount<1
_{0}^{ \kern -.1em R} \kern -.2em D^{#2}_{\kern -.1em x}
\else
_{x}^{ \kern -.1em R} \kern -.2em D^{#2}_{\kern -.1em 1}
\fi
}
\deltaf\DDCI#1#2{\icount=#1
\ifnum\icount<1
\,_{-\infty}^{\kern 1em C} \kern -.2em D^{#2}_{\kern -.1em x}
\else
\,_{x}^C \kern -.2em D^{#2}_\infty
\fi
}
\deltaf\DDC#1#2{\icount=#1
\ifnum\icount<1
\,_{0}^C \kern -.2em D^{#2}_{\kern -.1em x}
\else
\,_{x}^C \kern -.2em D^{#2}_1
\fi
}
\deltaf\Hd#1{\widetilde H^{#1}(D)}
\deltaf\Hdi#1#2{\icount=#1
\ifnum\icount<1
\widetilde H_{L}^{#2}{(D)}
\else
\widetilde H_{R}^{#2}{(D)}
\fi
}
\title[Galerkin FEM for space-fractional diffusion]
{Error Analysis of Finite Element Methods for Space-Fractional Parabolic Equations}
\author {Bangti Jin \and Raytcho Lazarov \and Joseph Pasciak \and Zhi Zhou}
\address {Department of Mathematics, University of California, Riverside, University Ave. 900,
Riverside, CA 92521 (\texttt{[email protected]})}
\address {Department of Mathematics, Texas A\&M University, College Station, TX 77843-3368
({\texttt{lazarov, pasciak, [email protected]}})}
\date{started May 21, 2013; today is \today}
\maketitle
\begin{abstract}
We consider an initial/boundary value problem for one-dimensional fractional-order parabolic equations
with a space fractional derivative of Riemann-Liouville type and order ${\alpha}pha\in (1,2)$. We study
a spatial semidiscrete scheme with the standard Galerkin finite element method with piecewise linear
finite elements, as well as fully discrete schemes based on the backward Euler method and Crank-Nicolson
method. Error estimates in the $L^2{(D)}$- and $H^{{\alpha}pha/2}{(D)}$-norm are derived for the semidiscrete
scheme, and in the $L^2{(D)}$-norm for the fully discrete schemes. These estimates are for both smooth
and nonsmooth initial data, and are expressed directly in terms of the smoothness of the
initial data. Extensive numerical results are presented to illustrate the theoretical results.
\end{abstract}
\section{Introduction}\lambdabel{sec:intro}
We consider the following initial/boundary value problem for a space fractional-order parabolic
differential equation (FPDE) for $u(x,t)$:
\begin{equation}\lambdabel{eqn:fpde}
\begin{aligned}
u_t- {\DDR0 {{\alpha}}} u&= f,\quad x\in D=(0,1), \ 0<t\le T,\\
u(0,t)&=u(1,t)=0, \quad 0<t\le T,\\
u(x,0)&=v,\quad x\in D,
\end{aligned}
\end{equation}
where ${\alpha}\in(1,2)$ is the order of the derivative, $f\in L^2(0,T;L^2{(D)})$, and $\DDR0 {\alpha}pha u$ refers to
the Riemann-Liouville fractional derivative of order ${\alpha}$, defined in \eqref{Riemann} below, and
$T>0$ is fixed. In case of ${\alpha}pha=2$, the fractional derivative $\DDR0{\alpha}pha u$ coincides with
the usual second-order derivative $u''$ \cite{KilbasSrivastavaTrujillo:2006}, and then
model \eqref{eqn:fpde} recovers the classical diffusion equation.
The classical diffusion equation is often used to describe diffusion processes. The use of a Laplace
operator in the equation rests on a Brownian motion assumption on the random motion of individual
particles. However, over last few decades, a number of studies \cite{BensonWheatcraftMeerschaert:2000,
HatanoHatano:1998,MetzlerKlafter:2000} have shown that anomalous diffusion, in which the mean square
variances grows faster (superdiffusion) or slower (subdiffusion) than that in a
Gaussian process, offers a superior fit to experimental data observed in some
processes, e.g., viscoelastic materials, soil contamination, and underground water flow. In particular,
at a microscopic level, the particle motion might be dependent, and can frequently take very large steps,
following some heavy-tailed probability distribution. The long range correlation and large jumps can cause
the underlying stochastic process to deviate significantly from Brownian motion for the classical diffusion
process. Instead, a Levy process is considered to be more appropriate. The macroscopic
counterpart is space fractional diffusion equations (SpFDEs) \eqref{eqn:fpde}, and we refer to
\cite{BensonWheatcraftMeerschaert:2000} for the derivation and relevant physical explanations. Numerous
experimental studies have shown that SpFDEs can provide accurate description of the superdiffusion
process.
Because of the extraordinary modeling capability of SpFDEs, their accurate numerical solution has become
an important task. A number of numerical methods, prominently the finite difference method, have been
developed for the time-dependent superdiffusion process in the literature. The finite difference scheme is
usually based on a shifted Gr\"{u}nwald formula for the Riemann-Liouville fractional derivative in space.
In \cite{TadjeranMeerschaert:2007, TadjeranMeerschaertScheffler:2006}, the stability, consistency and
convergence were shown for the
finite difference scheme with the Crank-Nicolson scheme in time.
In these works, the convergence rates
are provided under the a priori assumption that the solution $u$ to \eqref{eqn:fpde} is sufficiently smooth,
which unfortunately is not justified in general, cf. Theorem \ref{thm:fullreg}.
In this work, we develop a finite element method for \eqref{eqn:fpde}. It is based on the variational formulation
of the space fractional boundary value problem, initiated in \cite{ErvinRoop:2006,ErvinRoop:2007} and recently
revisited in \cite{JinLazarovPasciak:2013a}. We establish $L^2{(D)}$- and $\Hd{{\alpha}pha/2}$-norm error
estimates for the space semidiscrete scheme, and $L^2{(D)}$-norm estimates for fully discrete schemes, using
analytic semigroup theory \cite{ItoKappel:2002}. Specifically,
we obtained the following results. First, in Theorem
\ref{thm:existence} we establish the
existence and uniqueness of a weak solution $u\in L^2(0,T;\Hd{{\alpha}pha/2})$ of \eqref{eqn:fpde}
(see Section \ref{sec:prelim} for
the definitions of the space $\Hd\beta$ and the operator $A$) and in Theorem \ref{thm:fullreg}
show an enhanced regularity $u\in C((0,T];\Hdi0{{\alpha}-1+\beta})$
with $\beta\in[0,1/2)$, for $v\in L^2{(D)}$. Second, in Theorems \ref{thm:semismooth} and \ref{thm:seminonsmooth}
we show that the semidiscrete finite element solution $u_h(t)$ with
suitable discrete initial value $u_h(0)$ satisfies the a priori error bound
\begin{equation*}
\|u_h(t)-u(t)\|_{L^2{(D)}}+h^{\frac{{\alpha}pha}{2}-1+
\beta}\|u_h(t)-u(t)\|_{\Hd{\frac{{\alpha}pha}{2}}}\leq Ch^{{\alpha}pha-2+2\beta}t^{l-1}\|A^lv\|_{L^2{(D)}}, \, \, l=0,1,
\end{equation*}
with $h$ being the mesh size and any $\beta\in[0,1/2)$. Further we derived error estimates
for the fully discrete solution $U^n$, with $\tau$ being the time step size and $t_n=n\tau$,
for the backward Euler method and Crank-Nicolson method.
For the backward Euler method, in Theorems \ref{thm:fullsmooth:euler}
and \ref{thm:fullnonsmooth:euler}, we establish the following error estimates
\begin{equation*}
\|u(t_n)-U^n\|_{L^2{(D)}}\leq C (h^{{\alpha}-2+2\beta} + \tau)t_n^{l-1}\| A^lv \|_{L^2{(D)}} \quad l=0,1,
\end{equation*}
and for the Crank-Nicolson method, in Theorems \ref{thm:fullsmooth:cn} and \ref{thm:fullnonsmooth:cn}, we prove
\begin{equation*}
\|u(t_n)-U^n\|_{L^2{(D)}}\leq C (h^{{\alpha}-2+2\beta} + \tau^2t_n^{-1})t_n^{l-1}\| A^l v \|_{L^2{(D)}}.
\end{equation*}
These error estimates cover both smooth and nonsmooth initial data and the bounds are directly
expressed in terms of the initial data $v$. The case of nonsmooth initial data is especially
interesting in inverse problems and optimal control.
The rest of the paper is organized as follows. In Section \ref{sec:prelim}, we introduce preliminaries on
fractional derivatives and related continuous and discrete variational formulations. Then in Section
\ref{sec:weak}, we discuss the existence and uniqueness of a weak solution to \eqref{eqn:fpde} using a
Galerkin procedure, and show the regularity pickup by the semigroup theory. Further, the properties
of the discrete semigroup $E_h(t)$ are discussed. The error analysis for the semidiscrete scheme is
carried out in Section \ref{sec:semidiscrete}, and that for fully discrete schemes based on the backward
Euler method and the Crank-Nicolson method is provided in Section \ref{sec:fullydiscrete}. Numerical
results for smooth and nonsmooth initial data are presented in Section \ref{sec:numeric}. Throughout, we
use the notation $c$ and $C$, with or without a subscript, to denote a generic constant, which may change at
different occurrences, but it is always independent of the solution $u$, time $t$, mesh size $h$
and time step size $\tau$.
\section{Fractional derivatives and variational formulation}\lambdabel{sec:prelim}
In this part, we describe fundamentals of fractional calculus, the variational problem for the source
problem with a Riemann-Liouville fractional derivative, and discuss the finite element discretization.
\subsection{Fractional derivatives}
We first briefly recall the Riemann-Liouville fractional derivative. For any positive non-integer real number
$\beta$ with $n-1 < \beta < n$, $n\in \mathbb{N}$, the left-sided Riemann-Liouville fractional
derivative $\DDR0\beta u$ of order $\beta$ of the function $u\in C^n[0,1]$ is defined by \cite[pp. 70]{KilbasSrivastavaTrujillo:2006}:
\begin{equation}\lambdabel{Riemann}
\DDR0\beta u =\frac {d^n} {d x^n} \bigg({_0\hspace{-0.3mm}I^{n-\beta}_x} u\bigg) .
\end{equation}
Here $_0\hspace{-0.3mm}I^{\gammamma}_x$ for $\gammamma>0$
is the left-sided Riemann-Liouville fractional integral operator of order $\gammamma$ defined by
\begin{equation*}
({\,_0\hspace{-0.3mm}I^\gammamma_x} f) (x)= \frac 1{\Gammamma(\gammamma)} \int_0^x (x-t)^{\gammamma-1} f(t)dt,
\end{equation*}
where $\Gammamma(\cdot)$ is Euler's Gamma function defined by $\Gammamma(x)=\int_0^\infty t^{x-1}e^{-t}dt$.
The right-sided versions of fractional-order integral and derivative are defined analogously, i.e.,
\begin{equation*}
({_x\hspace{-0.3mm}I^\gammamma_1} f) (x)= \frac 1{\Gammamma(\gammamma)}\int_x^1 (x-t)^{\gammamma-1}f(t)\,dt\quad\mbox{and}\quad
\DDR1\beta u =(-1)^n\frac {d^n} {d x^n} \bigg({_x\hspace{-0.3mm}I^{n-\beta}_1} u\bigg) .
\end{equation*}
Now we introduce some function spaces. For any $\beta\ge 0$, we denote $H^\beta{(D)}$ to be
the Sobolev space of order $\beta$ on the unit interval $D=(0,1)$, and $\Hd \beta $ to be the set of
functions in $H^\beta{(D)}$ whose extension by zero to ${\mathbb R}$ are in $H^\beta({\mathbb R})$. Analogously, we define
$\Hdi 0 \beta$ (respectively, $\Hdi 1 \beta$) to be the set of functions $u$ whose extension by zero
$\tilde{u}$ is in $H^\beta(-\infty,1)$ (respectively, $H^\beta(0,\infty)$). Here for $u\in \Hdi 0
\beta$, we set $\|u\|_{\Hdi 0\beta}:=\|\tilde{u}\|_{H^\beta(-\infty,1)}$ with an analogous definition
for the norm in $\Hdi 1 \beta$. The fractional derivative operator $\DDR0\beta$ is well defined for functions in
$C^n[0,1]$, and can be extended continuously from $\Hdi0{\alpha}pha$ to $L^2{(D)}$ (\cite[Lemma 2.6]{ErvinRoop:2006},
\cite[Theorem 2.2]{JinLazarovPasciak:2013a}).
\subsection{Variational formulation and its discretization}
Now we recall the variational formulation for the source problem
\begin{equation*}
-\DDR0{\alpha}pha u = f,
\end{equation*}
with $u(0)=u(1)=0$, and $f\in L^2{(D)}$. The proper variational formulation is given by
\cite{JinLazarovPasciak:2013a}: find $u\in U\equiv \Hd{{\alpha}pha/2}$ such that
\begin{equation}\lambdabel{eqn:varrl}
A(u,\psi) = \lambdangle f,\psi\rangle \quad \forall \psi\in U,
\end{equation}
where the sesquilinear form $A(\cdot,\cdot)$ is given by
\begin{equation*}
A(\varphi,\psi)=-\left({\DDR0{{\alpha}/2}} \varphi,\ \DDR1{{\alpha}/2}\psi\right).
\end{equation*}
It is known (\cite[Lemma 3.1]{ErvinRoop:2006}, \cite[Lemma 4.2]{JinLazarovPasciak:2013a}) that
the sesquilinear form $A(\cdot,\cdot)$ is coercive on the space $U$, i.e., there is
a constant $c_0$ such that for all $\psi\in U$
\begin{equation}\lambdabel{A-coercive}
\Re A(\psi,\psi) \ge c_0 \| \psi\|^2_{U},
\end{equation}
where $\Re$ denotes taking the real part,
and continuous on $U$, i.e., for all $\varphi,\psi\in U$
\begin{equation}\lambdabel{continuous}
|A(\varphi,\psi)| \le C_0 \| \varphi \|_{U}\| \psi \|_{U}.
\end{equation}
Then by Riesz representation theorem, there exists a unique bounded linear
operator $\widetilde A: \Hd {{\alpha}/2} \rightarrow H^{-{\alpha}/2}{(D)}$ such that
\begin{equation*}
A(\varphi,\psi)= \lambdangle \widetilde A \varphi, \psi \rangle ,\quad \forall \varphi,\psi \in \Hd {{\alpha}/2}.
\end{equation*}
Define $D(A)=\{ \psi \in \Hd {{\alpha}/2}: \widetilde A \psi \in L^2{(D)}\}$
and an operator $A : D(A)\rightarrow L^2{(D)}$ by
\begin{equation}\lambdabel{eqn:A}
A(\varphi,\psi)=(A\varphi,\psi),\ \varphi \in D(A),\, \psi \in \Hd {{\alpha}/2}.
\end{equation}
\begin{remark}\lambdabel{rmk::singular}
The domain $D(A)$ has a complicated structure: it consists of functions of the form $I_0^{\alpha}pha
f - (I_0^{\alpha}pha f)(1)x^{{\alpha}pha-1}$, where $f\in L^2{(D)}$ \cite{JinLazarovPasciak:2013a}. The
term $x^{{\alpha}pha-1}\in\Hdi0{{\alpha}pha-1+\beta}$, $\beta\in[0,1/2)$, appears because it is in the
kernel of the operator $\DDR0{\alpha}pha$. Hence, $D(A) \subset \Hdi0{{\alpha}-1+\beta}\cap
\Hd {{\alpha}/2}$ and it is dense in $L^2{(D)}$.
\end{remark}
The next result shows that the linear operator $A$ is sectorial, which means that
\begin{enumerate}
\item the resolvent set $\varrhoo(A)$ contains the sector
$\Sigmagma_{\theta}=\left\{ z: \theta \le |\arg z| \le \pi \right\}$ for $\theta\in(0,\pi/2)$;
\item
$ \| (\lambda I-A)^{-1} \| \le M/|\lambdambda|$ for $\lambdambda \in \Sigmagma_{\theta}$ and some constant $M$.
\end{enumerate}
Then we have the following important lemma (cf. \cite[pp.\,94, Theorem 3.6]{ItoKappel:2002}),
for which we sketch a proof for completeness.
\begin{lemma}\lambdabel{lem:sectorial}
The linear operator $A$ defined in \eqref{eqn:A} is sectorial on $L^2{(D)}$.
\end{lemma}
\begin{proof}
For all $\varphi \in D(A)$,
we obtain by \eqref{A-coercive} and \eqref{continuous}
\begin{equation*}
\begin{split}
|(A\varphi,\varphi)|\le C_0\| \varphi \|_{\Hd{{\alpha}/2}}^2 \le \frac{C_0}{c_0}\Re(A\varphi,\varphi).
\end{split}
\end{equation*}
Thus $\mathcal{N}(A)$, the numerical range of $A$, which is defined by
\begin{equation*}
\mathcal{N}(A)=\left\{ (A\varphi,\varphi): \varphi \in D(A)
~~\text{and}~~\| \varphi \|_{L^2{(D)}}=1 \right\},
\end{equation*}
is included in the sector
$\Sigmagma_0 = \left\{z:0\le |\arg(z)|\le \deltalta_0\right\}$,
with $\deltalta_0=\arccos\left(c_0/C_0\right)$.
Now we choose $\deltalta_1\in(\deltalta_0,\frac{\pi}{2})$ and set
$\Sigmagma_{\deltalta_1}=\left\{z:\deltalta_1\le |\arg(z)| \le \pi\right\}$.
Then by \cite[p.\,310, Propositon C.3.1]{Hasse_book},
the resolvent set $\varrhoo(A)$ contains $\Sigmagma_{\deltalta_1}$
and for all $\lambda\in \Sigmagma_{\deltalta_1}$
\begin{equation*}
\| (\lambda I-A)^{-1} \|\le \frac{1}{\text{dist}(\lambda,\overline{\mathcal{N}(A)})}
\le \frac{1}{\text{dist}(\lambda,\Sigmagma_0)}\le \frac{1}{\sigman(\deltalta_1-\deltalta_0)}\frac{1}{|\lambda|}.
\end{equation*}
That completes the proof of this lemma.
\end{proof}
The next corollary is an immediate consequence of Lemma \ref{lem:sectorial}.
\begin{corollary}\lambdabel{cor:semigroup}
The linear operator $A$ is the infinitesimal generator of an analytic semigroup
$E(t)=e^{-At}$ on $L^2{(D)}$.
\end{corollary}
\begin{proof}
It follows directly from Lemma \ref{lem:sectorial} and standard semigroup theory, cf.,
\cite[Theorem 3.4, Proposition 3.9 and Theorem 3.19]{ItoKappel:2002}.
\end{proof}
\subsection{Finite element discretization}\lambdabel{FEM}
We introduce a finite element approximation based on an equally spaced partition of the interval
$D$. We let $h=1/m $ be the mesh size with $m>1$ being a positive integer, and consider the nodes
$x_j=jh$, $j=0,\ldots,m$. We then define $U_h$ to be the set of continuous
functions in $U$ which are linear when restricted to the subintervals $[x_i,x_{i+1}]$,
$i=0,\ldots,m-1$, i.e.,
\begin{equation*}
U_h = \left\{\chi\in C_0(\overline{D}): \chi \mbox{ is linear over } [x_i,x_{i+1}], \, i=0,\ldots,m \right\}.
\end{equation*}
We define the discrete operator $A_h: U_h \rightarrow U_h$ by
\begin{equation*}
(A_h \varphi, \chi) = A(\varphi,\chi),\quad \forall \varphi,\chi \in U_h.
\end{equation*}
The lemma below is a direct corollary of properties \eqref{A-coercive} and \eqref{continuous}
of the bilinear form $A(\cdot,\cdot) $:
\begin{lemma}\lambdabel{lem:Ah}
The discrete operator $A_h$ satisfies
\begin{equation*}
\begin{split}
\Re(A_h\psi,\psi) &\ge c_0\|\psi \|_{\Hd {{\alpha}/2}}^2,\quad \psi \in U_h,\\
|(A_h\varphi,\psi)| &\le C_0\|\varphi \|_{\Hd {{\alpha}/2}}\|\psi \|_{\Hd{{\alpha}/2}},\quad \varphi,\psi \in U_h.
\end{split}
\end{equation*}
\end{lemma}
\begin{remark}\lambdabel{rmk:Ahsectorial}
By Lemma \ref{lem:Ah} and repeating the argument in the proof of Lemma
\ref{lem:sectorial}, we can show that $A_h$ is a sectorial operator on
$U_h$ with the same constant as $A$.
\end{remark}
Next we recall the Ritz projection $R_h:\Hd{{\alpha}pha/2} \rightarrow U_h$
and the $L^2{(D)}$-projection $P_h: L^2{(D)}\rightarrow U_h$, respectively, defined by
\begin{equation}\lambdabel{eqn:Ritz}
\begin{aligned}
A(R_h\psi,\chi)&=A(\psi,\chi) \quad \forall \psi\in \Hd{{\alpha}pha/2}, \ \chi\in U_h,\\
(P_h\varphi,\chi) &= (\varphi,\chi)\quad \forall \varphi\in L^2{(D)},\ \chi\in U_h.
\end{aligned}
\end{equation}
We shall also need the adjoint problem in the error analysis.
Similar to \eqref{eqn:A}, we define the adjoint operator $A^*$ as
\begin{equation*}
A(\varphi,\psi)=(\varphi,A^*\psi),\quad \forall \varphi \in \Hd{{\alpha}pha/2},\ \psi \in D(A^*),
\end{equation*}
where the domain $D(A^*)$ of $A^*$ satisfies $D(A^*) \subset \widetilde
H_R^{{\alpha}-1+\beta}{(D)}\cap \Hd {{\alpha}/2}$ and it is dense in $L^2{(D)}$.
Further, the discrete analogue $A_h^*$ of $A^*$ is defined by
\begin{equation*}
A(\varphi,\psi)=(\varphi,A_h^*\psi),\quad \forall \varphi, \psi \in U_h.
\end{equation*}
\section{Variational formulation of fractional-order parabolic problem}\lambdabel{sec:weak}
The variational formulation of problem \eqref{eqn:fpde} is to find $u(t)\in U$ such that
\begin{equation}\lambdabel{variational}
(u_t,\varphi) + A(u,\varphi) = (f,\varphi) \quad \forall \varphi \in U,
\end{equation}
and $u(0)=v$. We shall establish the well-posedness of the variational formulation \eqref{variational}
using a Galerkin procedure, and an enhanced regularity estimate via analytic semigroup theory. Further,
the properties of the discrete semigroup are discussed.
\subsection{Existence and uniqueness of the weak solution}
First we state an existence and uniqueness of a weak solution, following a Galerkin
procedure \cite{Evans:2010}. To this end, we choose an orthogonal basis $\{\omega_k(x)=
\sqrt{2}\sigman k\pi x\}$ in both $L^2{(D)}$ and $H_0^1{(D)}$ and orthonormal in $L^2{(D)}$. In
particular, by the construction, the $L^2{(D)}$-orthogonal projection operator $P$ into
$\mathrm{span}\{\omega_k\}$ is stable in both $L^2{(D)}$ and $H_0^1{(D)}$, and by interpolation,
it is also stable in $\Hd \beta$ for any $\beta\in[0,1]$. Now we fix a positive integer
$m$, and look for a solution $u_m(t)$ of the form
\begin{equation*}
u_m(t):= \sum_{k=1}^m c_k(t) \omega_k
\end{equation*}
such that for $k=1,2\ldots,m$
\begin{equation}\lambdabel{eqn:appr1}
c_k(0)=(v,\omega_k), \quad
(u_m',\omega_k)+A(u_m,\omega_k)=(f,\omega_k),\quad 0\leq t\leq T.
\end{equation}
The existence and uniqueness of $u_m$ follows directly from the standard theory for ordinary
differential equation systems. With the finite-dimensional approximation $u_m$ at hand,
one can deduce the following existence and uniqueness result. The proof is rather
standard, and it is given in Appendix \ref{app:existence} for completeness.
\begin{theorem}\lambdabel{thm:existence}
Let $f\in L^2(0,T;L^2{(D)})$ and $v\in L^2{(D)}$. Then there exists a unique
weak solution $u\in L^2(0,T;\Hd{{\alpha}pha/2})$ of \eqref{variational}.
\end{theorem}
\deltaf{\chi}{{\chi}}
Now we study the regularity of the solution $u$ using semigroup theory \cite{ItoKappel:2002}.
By Corollary \ref{cor:semigroup} and the classical semigroup theory, the solution $u$ to the
initial boundary value problem \eqref{eqn:fpde} with $f\equiv0$ can be represented as
\begin{equation*}
u(t) = E(t) v,
\end{equation*}
where $E(t)=e^{-tA}$ is the semigroup generated by the sectorial operator $A$, cf. Corollary
\ref{cor:semigroup}.
Then we have an improved regularity by \cite[p.\,104, Corolary 1.5]{Pazy_book}.
\begin{theorem}\lambdabel{thm:fullreg}
For every $v \in L^2{(D)}$, the homogeneous initial-boundary value problem \eqref{variational}
(with $f=0$) has a unique solution $u(x,t) \in C([0,T];L^2{(D)})\cap C((0,T];D(A))$.
\end{theorem}
Further, we have the following $L^2{(D)}$ estimate.
\begin{lemma}\lambdabel{lem:smoothing1}
There is a constant C such that
\begin{equation*}
\| A^{\gamma} E(t) \psi \|_{L^2{(D)}} \le Ct^{-\gamma} \|\psi\|_{L^2{(D)}}.
\end{equation*}
\end{lemma}
\begin{proof}
The cases $\gamma=0$ and $\gamma=1$ have been proved in \cite[pp. 91, Theorem 6.4 (iii)]{Thomee:2006}.
With the contour $\Gammamma=\left\{z:z=\varrhoo e^{\pm \mathrm{i}\deltalta_1}, \varrhoo\ge 0 \right\}$,
the case of $\gammamma\in(0,1)$ follows by
\begin{equation*}
\begin{split}
\| A^{\gamma} E(t) \psi \|_{L^2{(D)}}
& = \left| \hspace{-0.5mm}\left|\frac1{2\pi \mathrm{i}} \int_{\Gammamma} z^{\gamma} e^{-zt} R(z;A) \psi \, dz \right|\hspace{-0.5mm}\right|_{L^2{(D)}}\\
& \le C\|\psi \|_{L^2{(D)}} \int_0^{\infty} \varrhoo^{\gamma-1}e^{-\varrhoo t} d\varrhoo\le C t^{-\gamma}\|\psi \|_{L^2{(D)}}.
\end{split}
\end{equation*}
\end{proof}
\subsection{Properties of the semigroup $E_h(t)$}
Let $E_h(t)=e^{-A_ht}$ be the semigroup generated by the operator $A_h$.
Then it satisfies a discrete analogue of Lemma \ref{lem:smoothing1}.
\begin{lemma}\lambdabel{lem:Ahprop4}
There exists a constant $C>0$
such that for $\chi \in U_h$
\begin{equation*}
\| A_h^{\gamma}E_h(t)\chi \|_{L^2{(D)}} \le Ct^{-\gamma} \| \chi \|_{L^2{(D)}}.
\end{equation*}
\end{lemma}
\begin{proof}
It follows directly from Remark \ref{rmk:Ahsectorial} and Lemma \ref{lem:smoothing1}.
\end{proof}
Last we recall the Dunford-Taylor spectral representation of a rational function $r(A_h)$ of
the operator $A_h$, when $r(z)$ is bounded in a sector in the right half
plane \cite[Lemma 9.1]{Thomee:2006}.
\begin{lemma}\lambdabel{lem:semigroup}
Let $r(z)$ be a rational function that is bounded for $|\arg z|\leq \deltalta_1$, $|z|\geq\epsilonsilon>0$,
and for $|z|\geq R$. Then if $\epsilonsilon>0$ is so small that $\{z: |z|\leq\epsilonsilon \}\subset \varrhoo(A_h)$, we have
\begin{equation*}
r(A_h) = r(\infty)I + \frac{1}{2\pi\mathrm{i}}\int_{\Gammamma_\epsilonsilon\cup\Gammamma_\epsilonsilon^R\cup\Gammamma^R}r(z)R(z;A_h)dz,
\end{equation*}
where $R(z;A_h)=(zI-A_h)^{-1}$ is the resolvent operator, $\Gammamma_\epsilonsilon^R = \{z:
|\arg z|=\deltalta_1, \epsilonsilon\leq |z|\leq R \}$, $\Gammamma_\epsilonsilon = \{z: |z|=\epsilonsilon,\ |\arg z|\leq \deltalta_1\}$, and
$\Gammamma^R = \{z:\ |z|=R, \deltalta_1\leq |\arg z| \leq \pi\}$, and with the closed
path of integration oriented in the negative sense.
\end{lemma}
\begin{remark}\lambdabel{rem:semigroup}
The representation in Lemma \ref{lem:semigroup} holds true for any function $f(z)$
which is analytic in a neighborhood of $\{z:|\arg z|\leq \deltalta_1, |z|\ge \epsilonsilon \}$,
including at $z=\infty$.
\end{remark}
\section{Error estimates for semidiscrete Galerkin FEM}\lambdabel{sec:semidiscrete}
In this section, we derive $L^2{(D)}$- and $\Hd {{\alpha}pha/2}$-norm error estimates for the
semidiscrete Galerkin FEM: find $u_h(t)\in U_h$ such that
\begin{equation}\lambdabel{fem}
\begin{split}
{( u_{h,t},\varphi)}+ A(u_h,\varphi)&= {(f, \varphi)},
\quad \forall \varphi\in U_h,\ T \ge t >0.
\quad u_h(0)=v_h,
\end{split}
\end{equation}
where $v_h\in U_h$ is an approximation to the initial data $v$. We shall discuss
the case of smooth and nonsmooth initial data, i.e. $v\in D(A)$ and $v\in L^2{(D)}$,
separately.
\subsection{Error estimate for nonsmooth initial data}
First we consider nonsmooth initial data, i.e., $v\in L^2{(D)}$. We follow the approach
due to Fujita and Suzuki \cite{FujitaSuzuki:1991}. First, we have the following important
lemma. Here we shall use the constant $\deltalta_1$ and the contour $\Gammamma=\left\{z:z
=\varrhoo e^{\pm \mathrm{i}\deltalta_1}, \varrhoo\ge 0 \right\}$ defined in the proof of Lemma \ref{lem:sectorial}.
\begin{lemma}\lambdabel{lem:werror}
There exists a constant $C>0$ such that for any $ \varphi\in \Hd {{\alpha}/2}$ and
$z\in \Gammamma$
\begin{equation*}
|z| \| \varphi \|_{L^2{(D)}}^2 + \| \varphi \|_{\Hd {{\alpha}/2}}^2 \le C\left|z\| \varphi \|_{L^2{(D)}}^2 -A(\varphi,\varphi)\right|.
\end{equation*}
\end{lemma}
\begin{proof}
We use the notation $\deltalta_0$ and $\deltalta_1$
from the proof of Lemma \ref{lem:sectorial}.
Then we choose $\deltalta'$ such that $\deltalta'\in(\deltalta_0,\deltalta_1)$
and let $c'=C_0\cos \deltalta'$,
cf. Fig. \ref{fig:integral_path}(a). By setting $\gammamma=c_0-c'>0$, we have
\begin{equation*}
\Re A(\varphi,\varphi)-\gamma \|\varphi\|_{\Hd {{\alpha}/2}}^2 \ge c'\|\varphi\|_{\Hd {{\alpha}/2}}^2 \ge \cos\deltalta'|A(\varphi,\varphi)|.
\end{equation*}
By dividing both sides by $\|\varphi\|_{L^2{(D)}}^2$, this yields
\begin{equation*}
|A(\varphi,\varphi)|/\| \varphi \|_{L^2{(D)}}^2 \in \Sigmagma_{\varphi}= \left\{ z:|\arg\left (z-\gammamma
\|\varphi\|_{\Hd {{\alpha}/2}}^2/\| \varphi \|_{L^2{(D)}}^2\right )| \le \deltalta'\right\}.
\end{equation*}
Note that for $z\in \Gammamma$, there holds, cf. Fig. \ref{fig:integral_path}(a)
\begin{equation*}
\text{dist}(z,\Sigmagma_{\varphi}) \ge |z| \sigman(\deltalta_1-\deltalta')+
\gammamma\|\varphi\|_{\Hd {{\alpha}/2}}^2/\| \varphi \|_{L^2{(D)}}^2 \sigman\deltalta'.
\end{equation*}
Consequently, for $z\in \Gammamma$ we get
\begin{equation}\lambdabel{eqn:proofcontrol}
\begin{split}
\left|z\| \varphi \|_{L^2{(D)}}^2 -A(\varphi,\varphi)\right|&\ge \|\varphi\|_{L^2{(D)}}^2 \text{dist} (z,\Sigmagma_{\varphi}) \\
& \ge |z| \|\varphi\|_{L^2{(D)}}^2 \sigman(\deltalta_1-\deltalta')+\gammamma\|\varphi\|_{\Hd {{\alpha}/2}}^2\sigman\deltalta'\\
& \ge \frac1C \left( |z|\| \varphi \|_{L^2{(D)}}^2 + \| \varphi \|_{\Hd {{\alpha}/2}}^2\right),
\end{split}
\end{equation}
and this completes the proof.
\end{proof}
\begin{figure}
\caption{Integration path $\Sigmagma_{\deltalta_1}
\end{figure}
The next result gives estimates on the resolvent $R(z;A)v$ and its discrete analogue.
\begin{lemma}\lambdabel{lem:wbound}
Let $ v\in L^2{(D)}$, $z\in \Gammamma$,
$w=R(z;A)v$, and $w_h=R(z;A_h)P_h v$. Then for $\beta\in[0,1/2)$, there holds
\begin{equation}\lambdabel{eqn:wboundHa}
\| w_h-w \|_{L^2{(D)}} + h^{{\alpha}/2-1+\beta}\| w_h-w \|_{\Hd{{\alpha}/2}}
\le Ch^{{\alpha}-2+2\beta}\| v \|_{L^2{(D)}}.
\end{equation}
\end{lemma}
\begin{proof}
By the definition, $w$ and $w_h$ should respectively satisfy
\begin{equation*}
\begin{aligned}
z(w,\varphi)-A(w,\varphi)&=(v,\varphi),\quad \forall \varphi \in U,\\
z(w_h,\varphi)-A(w_h,\varphi)&=(v,\varphi),\quad \forall \varphi\in U_h.
\end{aligned}
\end{equation*}
Upon subtracting these two identities, it gives an orthogonality relation for $e=w-w_h$:
\begin{equation}\lambdabel{eqn:worthog}
z(e,\varphi) - A(e, \varphi) = 0, \quad \forall \varphi\in U_h.
\end{equation}
This and Lemma \ref{lem:werror} imply that for any $\chi\in U_h$
\begin{equation*}
\begin{split}
|z| \| e\|_{L^2{(D)}}^2 + \| e \|_{\Hd {{\alpha}/2}}^2
& \le C\left|z\| e \|_{L^2{(D)}}^2 -A(e,e)\right| \\
& = C\left|z(e,w-\chi) -A(e,w-\chi)\right|.
\end{split}
\end{equation*}
By taking $\chi=\pi_h w$, the finite element interpolant of $w$,
and the Cauchy-Schwarz inequality, we obtain
\begin{equation}\lambdabel{eqn:control2}
\begin{aligned}
|z| \| e \|_{L^2{(D)}}^2 + \| e \|_{\Hd {{\alpha}/2}}^2
& \le C \left(|z| h^{{\alpha}/2}\|e\|_{L^2{(D)}}\|w\|_{\Hd {{\alpha}/2}}\right.\\
&\qquad +\left. h^{{\alpha}/2-1+\beta}\|e\|_{\Hd {{\alpha}/2}}\| w \|_{H^{{\alpha}-1+\beta}{(D)}} \right).
\end{aligned}
\end{equation}
Appealing again to Lemma \ref{lem:werror} with the choice $\varphi=w$, we arrive at
\begin{equation*}
|z| \|w \|_{L^2{(D)}}^2 + \|w\|_{\Hd {{\alpha}/2}}^2 \le C|((zI-A)w,w)|\le C\| v \|_{L^2{(D)}}\| w\|_{L^2{(D)}}.
\end{equation*}
Consequently
\begin{equation}\lambdabel{eqn:wbound2}
\begin{aligned}
\|w \|_{L^2{(D)}} \le C|z|^{-1}\| v \|_{L^2{(D)}}\quad\mbox{and}\quad
\|w \|_{\Hd {{\alpha}/2}} \le C|z|^{-1/2}\| v \|_{L^2{(D)}}.
\end{aligned}
\end{equation}
It remains to bound $\|w\|_{H^{{\alpha}-1+\beta}{(D)}}$. To this end, we deduce from \eqref{eqn:wbound2} that
\begin{equation*}
\begin{split}
\| w \|_{H^{{\alpha}-1+\beta}{(D)}} & \le C\| Aw \|_{L^2{(D)}}= C\| (A-zI+zI)R(z;A)v \|_{L^2{(D)}}\\
&\le C\left(\| v \|_{L^2{(D)}}+|z|\|w\|_{L^2{(D)}}\right)\le C\| v\|_{L^2{(D)}}.
\end{split}
\end{equation*}
It follows from this and \eqref{eqn:control2} that
\begin{equation*}
|z| \| e\|_{L^2{(D)}}^2 + \| e\|_{\Hd {{\alpha}/2}}^2
\le Ch^{{\alpha}/2-1+\beta}\| v \|\left(|z|^{1/2}\| e\|_{L^2{(D)}} + \|e\|_{\Hd {{\alpha}/2}} \right),
\end{equation*}
i.e.,
\begin{equation}\lambdabel{eqn:control3}
|z| \|e\|_{L^2{(D)}}^2 + \| e\|_{\Hd {{\alpha}/2}}^2\le Ch^{{\alpha}-2+2\beta}\| v \|_{L^2{(D)}}^2.
\end{equation}
from which follows directly the $\Hd{{\alpha}pha/2}$-norm of the error $e$. Next we deduce the $L^2{(D)}$-norm
of the error $e$ by a duality argument: given $\varphi \in L^2{(D)}$, we define $\psi$ and $\psi_h$ respectively by
\begin{equation*}
\psi=R(z;A^*)\varphi \quad\mbox{and}\quad \psi_h=R(z;A_h^*)P_h\varphi.
\end{equation*}
Then by duality
\begin{equation*}
\|e \|_{L^2{(D)}} \le \sup_{\varphi \in L^2{(D)}}\frac{|(e,\varphi)|}{\|\varphi\|_{L^2{(D)}}}
=\sup_{\varphi \in L^2{(D)}}\frac{|z(e,\psi)-A(e,\psi)|}{\|\varphi\|_{L^2{(D)}}}.
\end{equation*}
Meanwhile it follows from \eqref{eqn:worthog} and \eqref{eqn:control3} that
\begin{equation*}
\begin{split}
|z(e,\psi)-A(e,\psi)|
& = |z(e,\psi-\psi_h)-A(e,\psi-\psi_h)|\\
& \le |z|\|e\|_{L^2{(D)}}\| \psi-\psi_h \|_{L^2{(D)}}+ C\|e\|_{\Hd{{\alpha}/2}}\| \psi-\psi_h \|_{\Hd{{\alpha}/2}}\\
& \le Ch^{{\alpha}-2+2\beta} \| v \|_{L^2{(D)}}\| \varphi \|_{L^2{(D)}}.
\end{split}
\end{equation*}
This completes proof of the lemma.
\end{proof}
Now we can state our first error estimate.
\begin{theorem}\lambdabel{thm:seminonsmooth}
Let $u$ and $u_h$ be solutions of problem \eqref{variational} and \eqref{fem} with $v\in L^2{(D)}$
and $v_h=P_h v$, respectively. Then for $t>0$, there holds for any $\beta\in[0,1/2)$:
\begin{equation*}
\| u(t)-u_h(t) \|_{L^2{(D)}} + h^{{\alpha}/2-1+\beta}\| u(t)-u_h(t) \|_{\Hd{{\alpha}/2}}
\le C h^{{\alpha}-2+2\beta} t^{-1} \| v \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
Note the error $e(t):=u(t)-u_h(t)$ can be represented as
\begin{equation*}
e(t)=\frac1{2\pi\mathrm{i}}\int_{\Gammamma} e^{-zt}(w-w_h) \,dz,
\end{equation*}
where the contour $\Gammamma=\left\{z:z=\varrhoo e^{\pm \mathrm{i}\deltalta_1}, \varrhoo\ge
0 \right\}$, and $w=R(z;A)v$ and $w_h=R(z;A_h)P_h v$. By Lemma \ref{lem:wbound}, we have
\begin{equation*}
\begin{split}
\| e(t)\|_{\Hd {{\alpha}/2}} &\le C \int_{\Gammamma} |e^{-zt}| \|w-w_h\|_{\Hd{{\alpha}/2}} \,dz\\
& \le Ch^{{\alpha}/2-1+\beta}\| v \|_{L^2{(D)}}\int_0^{\infty} e^{-\varrhoo t \cos\deltalta_1}\,d\varrhoo \le Ch^{{\alpha}/2-1+\beta}t^{-1}\| v \|_{L^2{(D)}}.
\end{split}
\end{equation*}
A similar argument also yields the $L^2{(D)}$-estimate.
\end{proof}
\subsection{Error estimate for smooth initial data}
Next we turn to the case of smooth initial data, i.e., $v\in D(A)$. In order to obtain a
uniform bound of the error, we employ an alternative integral representation. With $v_h
=R_h v$, then there holds
\begin{equation*}
\begin{split}
u(t)-u_h(t)&=\int_{\Gammamma} e^{-zt}(R(z;A)v-R(z;A_h)R_hv )\,dz\\
&=\int_{\Gammamma_{\deltalta_1}^t} e^{-zt}(R(z;A)v-R(z;A_h)R_hv )\,dz,
\end{split}
\end{equation*}
where $\Gammamma_{\deltalta_1}^t=\Gammamma_1\cup \Gammamma_2 \cup \Gammamma_t$,
$\Gammamma_1=\left\{z:z=\varrhoo e^{\mathrm{i}\deltalta_1}, \varrhoo\ge t^{-1} \right\}$,
$\Gammamma_2=\left\{z:z=\varrhoo e^{-\mathrm{i}\deltalta_1}, \varrhoo\ge t^{-1} \right\}$,
and $\Gammamma_t=\left\{z:z=t^{-1} e^{\mathrm{i}\theta}, \deltalta_1 \le |\theta| \le \pi \right\}$,
cf. Fig. \ref{fig:integral_path}(b). Then using the identities
\begin{equation*}
R(z;A)=AA^{-1}R(z;A)=A(z^{-1}R(z;A)-z^{-1}A^{-1})=z^{-1}R(z;A)A-z^{-1}I
\end{equation*}
and $\int_{\Gammamma_{\deltalta_1}^t} e^{-st}z^{-1} \,dz=0$, the error $u(t)-u_h(t)$ can be represented as
\begin{equation}\lambdabel{eqn:smootherrorrep}
u(t)-u_h(t)=\int_{\Gammamma_{\deltalta_1}^t} z^{-1}e^{-zt}(w-w_h) \,dz,
\end{equation}
where $w=R(z;A)Av$ and $w_h=R(z;A_h)A_hR_hv$.
\begin{lemma}\lambdabel{lem:werror2}
For any $\varphi\in \Hd {{\alpha}/2}$ and $z\in \Gammamma_{\deltalta_1}^t$, there holds
\begin{equation*}
|z| \| \varphi \|_{L^2{(D)}}^2 + \| \varphi \|_{\Hd {{\alpha}/2}}^2 \le C\left|z\| \varphi \|_{L^2{(D)}}^2 -A(\varphi,\varphi)\right|.
\end{equation*}
\end{lemma}
\begin{proof}
Note that $\Gammamma_1 \cup \Gammamma_2 \subset \Gammamma$, thus it suffices to consider $\Gammamma_t$.
Set $z_t= t^{-1}e^{\mathrm{i}\deltalta_1}$, then it is obvious that for $z\in \Gammamma_t$ and
$\varphi\in \Hd {{\alpha}/2}$ we have $\text{dist}(z,\Sigmagma_{\varphi}) \ge \text{dist}(z_t,\Sigmagma_{\varphi})$,
cf. Fig. \ref{fig:integral_path}(b). Thus the argument in proving \eqref{eqn:proofcontrol}
yields the desired result.
\end{proof}
\begin{remark}\lambdabel{rem:wbound2}
For $ v\in L^2{(D)}$, $ z\in \Gammamma_t$, let $w=R(z;A)v$ and $w_h=R(z;A_h)P_h v$. Then
the argument in Lemma \ref{lem:wbound} and Lemma \ref{lem:werror2} yield the
estimate \eqref{eqn:wboundHa}.
\end{remark}
\begin{theorem}\lambdabel{thm:semismooth}
Let $u$ and $u_h$ be solutions of problem \eqref{variational} and \eqref{fem} with
$v\in D(A)$ and $v_h=R_h v$, respectively. Then for any $\beta\in[0,1/2)$, there holds
\begin{equation*}
\| u(t)-u_h(t) \|_{L^2{(D)}} + h^{{\alpha}/2-1+\beta}\| u(t)-u_h(t) \|_{\Hd{{\alpha}/2}}
\le C h^{{\alpha}-2+2\beta} \| Av \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
Let $w=R(z;A)Av$ and $w_h=R(z;A_h)A_hR_hv$. Together with the identity $A_h R_h= P_h A$,
Remark \ref{rem:wbound2} gives
\begin{equation*}
\| w_h-w \|_{L^2{(D)}} + h^{{\alpha}/2-1+\beta}\| w_h-w \|_{\Hd{{\alpha}/2}}
\le Ch^{{\alpha}-2+2\beta}\| Av \|_{L^2{(D)}}.
\end{equation*}
Now it follows from this and the representation \eqref{eqn:smootherrorrep} that
\begin{equation*}
\begin{split}
\| u(t)-u_h(t) \|_{\Hd {{\alpha}/2}} &\le C \int_{\Gammamma_{\deltalta_1}^t}
|z^{-1}| |e^{-zt}| \|w-w_h\|_{\Hd{{\alpha}/2}} \,dz\\
& \le Ch^{{\alpha}/2-1+\beta}\| A v \|_{L^2{(D)}} \int_{\Gammamma_{\deltalta_1}^t}
|z^{-1}| |e^{-zt}|\,dz.\\
\end{split}
\end{equation*}
It suffices to bound the integral term. First we note that
\begin{equation*}
\int_{\Gammamma_1} |z^{-1}| |e^{-zt}|\,dz
=\int_{t^{-1}}^{\infty} \varrhoo^{-1} e^{-\varrhoo t \cos{\deltalta_1}} \, d\varrhoo
\le \int_{\cos{\deltalta_1}}^{\infty} x^{-1} e^{-x} \, dx \le C,
\end{equation*}
which is also valid for the integral on the curve $\Gammamma_2$. Further, we have
\begin{equation*}
\begin{split}
\int_{\Gammamma_t} |z^{-1}| |e^{-zt}|\,dz
& =\int_{\deltalta_1}^{2\pi-\deltalta_1} e^{\cos\theta} \, d\theta \le C.
\end{split}
\end{equation*}
Hence we obtain the $\Hd{{\alpha}pha/2}$-estimate. The $L^2{(D)}$-estimate follows analogously.
\end{proof}
\section{Error analysis for fully discrete scheme}\lambdabel{sec:fullydiscrete}
Now we turn to error estimates for fully discrete schemes, obtained with
either the backward Euler method or the Crank-Nicolson method in time.
\subsection{Backward Euler method}\lambdabel{subsec:backward}
We first consider the backward Euler method for approximating the first-order
time derivative: for $n=1,2,\ldots,N$
\begin{equation*}
U^n-U^{n-1}+\tau A_h U^n=0,
\end{equation*}
with $U^0=v_h$ which is an approximation of the initial data $v$. Consequently
\begin{equation}\lambdabel{eqn:backward}
U^n=(I+\tau A_h)^{-n} v_h, \quad U^0=v_h, \quad n=1,2,...,N.
\end{equation}
By the standard energy method, the backward Euler method is unconditionally stable, i.e.,
for any $n\in\mathbb{N}$, $ \|(I+\tau A_h)^{-n}\| \le 1.$
To analyze the scheme \eqref{eqn:backward}, we need
the following smoothing property
\cite{FujitaMizutan:1976}.
\begin{lemma}\lambdabel{lem:Ahcontrol}
For $n \in \mathbb{N}$, $n \ge \gamma>0$ and $s>0$,
there exists a constant $C>0$, depending on $\gamma$ only, such that
\begin{equation}\lambdabel{eqn:backeulersmoothing}
\| A_h^{\gamma}(I+sA_h)^{-n} \| \le Cn^{-\gamma}s^{-\gamma}.
\end{equation}
\end{lemma}
\begin{proof}
Let $r(z)=\frac1{1+z}$. Then by \cite[Lemma 9.2]{Thomee:2006}, for an arbitrary $R>0$
and $\theta \in (0,\frac{\pi}{2})$, there exist constants $c,\ C>0$ and $\epsilon \in (0,1)$ such that
\begin{equation}\lambdabel{eqn:rationalbound}
\begin{split}
|r(z)| \le \left\{\begin{array}{ll}
e^{C|z|}, & \forall |z| \le \epsilon,\\
e^{-c|z|}, & \forall |z|\le R,\, |\arg z|\le \theta.
\end{array}\right.
\end{split}
\end{equation}
Clearly, \eqref{eqn:backeulersmoothing} is equivalent to
$
\| (nsA_h)^{\gamma}r(sA_h)^n \| \le C.
$
The fact that $A_h$ is sectorial implies that $sA_h$, $s>0$, is also sectorial on $X_h$.
Hence it suffices to show
\begin{equation*}
\|(nA_h)^{\gamma}r(A_h)^n \| \le C,
\end{equation*}
Let $F_n(z)=(nz)^{\gamma}r(z)^n$.
Since $r(\infty)=0$, by Lemma \ref{lem:semigroup} and Remark \ref{rem:semigroup}
\begin{equation*}
F_n(A_h)=\frac{1}{2\pi\mathrm{i}} \int_{\Gamma_{\epsilon/n}\cup{\Gamma_{\epsilon/n}^{nR}}\cup{\Gamma^{nR}}}F_n(z)R(z;A_h)\,dz.
\end{equation*}
First, by \eqref{eqn:rationalbound}, we deduce that for $z \in \Gamma_{\epsilon/n}$
\begin{equation*}
|F_n(z)| \le (n|z|)^{\gamma} e^{cn|z|} = \epsilon ^ {\gamma} e^{c\epsilon} \le C .
\end{equation*}
Thus we have
\begin{equation*}
\bigg|\hspace{-0.6mm}\bigg|\frac{1}{2\pi \mathrm{i}} \int_{\Gamma_{\epsilon/n}}F_n(z)R(z;A_h)\,dz \bigg|\hspace{-0.6mm}\bigg|
\le C \frac{\epsilon}{n} \sup_{z\in\Gamma_{\epsilon/n}} \| R(z;A_h) \| \le C.
\end{equation*}
Next, we note
\begin{equation*}
\begin{split}
\bigg|\hspace{-0.6mm}\bigg|\frac{1}{2\pi\mathrm{i}} &\int_{\Gamma_{\epsilon/n}^{nR}}F_n(z)R(z;A_h)\,dz\bigg|\hspace{-0.6mm}\bigg|
\le C \int_{\epsilon/n}^{nR} (n\varrho)^\gamma e^{-cn\varrho} \varrho^{-1}\,d\varrho\\
&\le C \int_{\epsilon}^{n^2R} \varrhoo^{\gamma-1} e^{-\varrhoo}\,d\varrhoo
\le C \int_{0}^{\infty} \varrhoo^{\gamma-1} e^{-\varrhoo}\,d\varrhoo \leq C.
\end{split}
\end{equation*}
Last, there holds $|1+nz|^{-1} \le C (n|z|)^{-1}$ for $|z|\ge1$.
Hence for $z \in \Gamma^{nR}$,
\begin{equation*}
|F_n(z)| \le C n^{2\gamma-n}R^{\gamma-n} \le C,\quad \forall n\ge \gamma.
\end{equation*}
Thus we have the following bound for the integral on the curve $\Gammamma^{nR}$:
\begin{equation*}
\bigg|\hspace{-0.6mm}\bigg|\frac{1}{2\pi\mathrm{i}} \int_{\Gamma^{nR}}F_n(z)R(z;A_h)\,dz \bigg|\hspace{-0.6mm}\bigg|
\le C nR \sup_{z\in\Gamma^{nR}} \| R(z;A_h) \| \le C.
\end{equation*}
This completes the proof of the lemma.
\end{proof}
Now we derive an error estimate for the fully discrete scheme
\eqref{eqn:backward} in case of smooth initial data, i.e., $v\in D(A)$.
\begin{theorem}\lambdabel{thm:fullsmooth:euler}
Let $u$ and $U^n$ be solutions of problem \eqref{variational} and \eqref{eqn:backward} with $v\in D(A)$
and $U^0=R_h v$, respectively. Then for $t_n=n\tau$ and any $\beta\in[0,1/2)$, there holds
\begin{equation*}
\| u(t_n)- U^n \|_{L^2{(D)}} \le C (h^{{\alpha}-2+2\beta} + \tau)\| Av \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
Note that the error $e^n=u(t_n)-U^n$ can be split into
\begin{equation*}
e^n= (u(t_n) - u_h(t_n)) + (u_h(t_n) - U^n) := {\widetilde \varrho}^n + \varthetat^n,
\end{equation*}
where $u_h$ denotes the semidiscrete Galerkin solution with $v_h=R_h v$.
By Theorem \ref{thm:semismooth}, the term ${\widetilde \varrho}^n$ satisfies the following estimate
\begin{equation*}
\|{\widetilde \varrho}^n \|_{L^2{(D)}} \le C h^{{\alpha}-2+2\beta}\| Av\|_{L^2{(D)}}.
\end{equation*}
Next we bound the term $\varthetat^n$. Note that for $n\ge1$,
\begin{equation}\lambdabel{eqn:euler-err}
\begin{split}
\varthetat^n & = E_h(n\tau)-(I+\tau A_h)^{-n} v_h \\
& = -\int_0^{\tau} \frac{d}{ds}\left(E_h(n(\tau-s))(I+sA_h)^{-n}v_h\right)\, ds\\
& = -\int_0^{\tau}nsA_h^2 E_h(n(\tau-s))(I+sA_h)^{-n-1}v_h\, ds.
\end{split}
\end{equation}
Then by Lemmas \ref{lem:Ahprop4} and \ref{lem:Ahcontrol} we have
\begin{equation*}
\begin{split}
\|\varthetat^n\|_{L^2{(D)}} & \le C n^{1/2} \int_0^{\tau} s(\tau-s)^{-1/2}\|A_h^{3/2} (I+sA_h)^{-n-1} R_h v \|_{L^2{(D)}} \, ds \\
& \le C n^{1/2} \int_0^{\tau} s^{1/2} (n+1)^{-1/2} (\tau-s)^{-1/2} \|A_h R_h v \|_{L^2{(D)}} \, ds \\
& \le C\tau\| A_h R_h v \|_{L^2{(D)}}.
\end{split}
\end{equation*}
The desired result follows from the identity $A_hR_h=P_hA$ and the $L^2{(D)}$-stability of the projection $P_h$.
\end{proof}
Next we give an error estimate for $L^2{(D)}$ initial data $v$.
\begin{theorem}\lambdabel{thm:fullnonsmooth:euler}
Let $u$ and $U^n$ be solutions of problem \eqref{variational} and \eqref{eqn:backward} with $v\in L^2{(D)}$
and $U^0=P_h v$, respectively. Then for $t_n=n\tau$ and any $\beta\in[0,1/2)$, there holds
\begin{equation*}
\| u(t_n)- U^n \|_{L^2{(D)}} \le C (h^{{\alpha}-2+2\beta} + \tau)t_n^{-1}\| v \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
Like before, we split the error $e^n=u(t_n)-U^n$ into
\begin{equation}\lambdabel{eqn:fullsplit}
e^n= (u(t_n) - u_h(t_n)) + (u_h(t_n) - U^n) := {\widetilde \varrho}^n + \varthetat^n,
\end{equation}
where $u_h$ denotes the semidiscrete Galerkin solution with $v_h=P_h v$.
In view of Theorem \ref{thm:seminonsmooth},
it remains to estimate the term $\varthetat^n$. By identity \eqref{eqn:euler-err} and Lemmas \ref{lem:Ahcontrol} and
\ref{lem:Ahprop4}, we have for $n\ge1$
\begin{equation*}
\begin{split}
\|\varthetat^n\|_{L^2{(D)}} & \le C n \int_0^{\tau} s \|A_h^{3/2} (I+sA_h)^{-n-1} A_h^{1/2} E_h(n(\tau-s)) P_h v \|_{L^2{(D)}} \, ds \\
& \le C n \int_0^{\tau} s s^{-3/2} (n+1)^{-3/2} \|A_h^{1/2} E_h(n(\tau-s)) P_h v \|_{L^2{(D)}} \, ds \\
& \le C n^{-1/2} \int_0^{\tau} s^{-1/2} n^{-1/2}(\tau-s)^{-1/2} \| P_h v \|_{L^2{(D)}} \, ds \le C\tau t_n^{-1}\| v \|_{L^2{(D)}}.\\
\end{split}
\end{equation*}
This completes the proof of the theorem.
\end{proof}
\subsection{Crank-Nicolson method}\lambdabel{ssec:fullcn}
Now we turn to the fully discrete scheme based on the Crank-Nicolson method. It reads
\begin{equation*}
U^n-U^{n-1}+\tau A_h U^{n-1/2}=0, \quad U^0=v_h, \quad n=1,2,...,N,
\end{equation*}
where $U^{n-1/2}=\frac12( U^{n} + U^{n-1 })$. Therefor we have
\begin{equation}\lambdabel{eqn:Crank-Nicolson}
U^n=\left(I+\tfrac{\tau}{2} A_h\right)^{-n}\left(I-\tfrac{\tau}{2} A_h\right)^n v_h, \quad n=1,2,...,N.
\end{equation}
It can be verified by the energy method that the Crank-Nicolson method is
unconditionally stable, i.e., for any $n\in\mathbb{N}$,
$\|\left(I+\tfrac{\tau}{2} A_h\right)^{-n}\left(I-\tfrac{\tau}{2} A_h\right)^n\| \le 1$.
For the error analysis, we need a result on the rational function
\begin{equation*}
r_{cn}(z)=\frac{1-\frac{z}{2}}{1+\frac{z}{2}}.
\end{equation*}
\begin{lemma}\lambdabel{lem:cnbound}
For any arbitrary $R >0$, there exist $C>0$ and $c>0$ such that
\begin{equation*}
|e^{-nz} - r_{cn}(z)^n| \le \left\{\begin{array}{ll}
\displaystyle Ce^{-\frac{cn}{|z|}}, & \quad |\arg z |\le \deltalta_1,\, |z| \ge R,\\
C n |z|^3 e^{-cn|z|},& \quad |\arg z |\le \deltalta_1,\, |z| \le R,
\end{array}\right.
\end{equation*}
\end{lemma}
\begin{proof}
The proof of general cases can be found in \cite[Lemmas 9.2 and 9.4]{Thomee:2006}. We
briefly sketch the proof here. By setting $w=1/z$, the first inequality follows from
\begin{equation*}
r_{cn}(z)=\frac{1-\frac{z}{2}}{1+\frac{z}{2}} = - \frac{1-2w}{1+2w}=-r(4w)=-e^{-4w+O(w^{2})}, \quad w \rightarrow 0,
\end{equation*}
and that for $c \le \cos \deltalta_1$,
$$
\displaystyle |e^{-z}| = e^{-\Re z} \le e^{-c|z|} \le Ce^{-\frac{c}{|z|}}, \, |\arg z| \le \deltalta_1, \ |z|\ge R.
$$
The first estimate now follows by the triangle inequality. Meanwhile, we observe that
\begin{equation*}
\begin{aligned}
& |r_{cn}(z)-e^{-z}|\le C |z|^{3}, \quad |z|\le R,\, |\arg z| \le \deltalta_1,\\
& |r_{cn}(z)|\leq e^{-c|z|},\quad |\arg z|\le \deltalta_1,\, |z|\le R.
\end{aligned}
\end{equation*}
Consequently for $z$ under consideration
\begin{equation*}
|e^{-nz} - r_{cn}(z)^n| = |(e^{-z} - r_{cn}(z))\sum_{j=0}^{n-1}r_{cn}(z)^j e^{-(n-1-j)z}| \le C |z|^{3}ne^{-cn|z|}.
\end{equation*}
This completes the proof of the lemma.
\end{proof}
Now we can state an $L^2{(D)}$-norm estimate for \eqref{eqn:Crank-Nicolson}
in case of smooth initial data.
\begin{theorem}\lambdabel{thm:fullsmooth:cn}
Let $u$ and $U^n$ be solutions of problem \eqref{variational} and \eqref{eqn:Crank-Nicolson} with $v\in D(A)$
and $U^0=R_h v$, respectively. Then for $t_n = n\tau$ and any $\beta\in[0,1/2)$, there holds
\begin{equation*}
\| u(t_n)- U^n \|_{L^2{(D)}} \le C (h^{{\alpha}-2+2\beta} + \tau^2 t_n^{-1})\| Av \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
Like before, we split the error $e^n$ into
\begin{equation*}
e^n= (u(t_n) - u_h(t_n)) + (u_h(t_n) - U^n) := {\widetilde \varrho}^n + \varthetat^n,
\end{equation*}
where $u_h$ denotes the semidiscrete Galerkin solution with $v_h=R_h v$. Then
by Theorem \ref{thm:semismooth}, the term ${\widetilde \varrho}^n$ satisfies the following estimate
\begin{equation*}
\|{\widetilde \varrho}^n \|_{L^2{(D)}} \le C h^{{\alpha}-2+2\beta}\| Av\|_{L^2{(D)}}.
\end{equation*}
It remains to bound $\varthetat^n = E_h(n\tau)v_h - r_{cn}(\tau A_h)^nv_h$ by
\begin{equation*}
\|\varthetat^n\|_{L^2{(D)}} \le C\tau^2t_n^{-1} \| A_hv_h \|_{L^2{(D)}}.
\end{equation*}
Note that $\tau A_h$ is also sectorial with the same constant
as $A_h$, and further
$$
\| (zI-\tau A_h)^{-1} \| = \tau^{-1} \| \frac{z}{\tau}-A_h \| \le C \frac{1}{|z|}.
$$
With $t_n=n\tau$, it suffices to show
\begin{equation*}
\| A_h^{-1} (E_h(n) - r_{cn}(A_h)^n)\| \le Cn^{-1}.
\end{equation*}
By Lemma \ref{lem:semigroup}, there holds
\begin{equation*}
A_h^{-1} r_{cn}(A_h)^n = \frac{1}{2\pi\mathrm{i}} \int_{{\Gammamma_{\epsilon}}
\cup{\Gammamma_{\epsilon}^R} \cup{\Gammamma^R}}r_{cn}(z)^n z^{-1} R(z;A_h) \,dz.
\end{equation*}
Since $ \| r_{cn}(z)^n z^{-1} R(z;A_h)\| =O(z^{-2})$ for large $z$, we can let $R$ tend to $\infty$.
Further, by \cite[Lemma 9.3]{Thomee:2006}, we have
\begin{equation*}
A_h^{-1} E_h(n) =\frac{1}{2\pi\mathrm{i}} \int_{{\Gammamma_{\epsilon}}\cup{\Gammamma_{\epsilon}^{\infty}}} e^{-nz} z^{-1} R(z;A_h) \,dz.
\end{equation*}
By Lemma \ref{lem:cnbound},
\begin{equation*}
\|(e^{-nz} - r_{cn}(z)^n) z^{-1} R(z;A_h)\| =O(z)\quad \mbox{ as } z\to 0,\ |\arg z| \leq \deltalta_1,
\end{equation*}
and consequently, by taking $\epsilon\rightarrow 0$, there holds
\begin{equation*}
\begin{split}
A_h^{-1} (E_h(n) - r_{cn}(A_h)^n)
&=\frac{1}{2\pi\mathrm{i}} \int_{\Gammamma} (e^{-nz} - r_{cn}(z)^n) z^{-1} R(z;A_h) \,dz,
\end{split}
\end{equation*}
where the sector $\Gammamma$ is given by $\Gammamma= \left\{z:z=\varrhoo e^{\pm \mathrm{i}\deltalta_1}, \varrhoo\ge 0 \right\}$.
By applying Lemma \ref{lem:cnbound} with $R=1$, we deduce
\begin{equation} \lambdabel{eqn:fullestimate1}
\begin{aligned}
\|A_h^{-1} (E_h(n) - r_{cn}(A_h)^n) \|
&= \bigg |\hspace{-.6mm}\bigg |\frac{1}{2\pi\mathrm{i}}
\int_{\Gammamma} (e^{-nz} - r_{cn}(z)^n) z^{-1} R(z;A_h) \,dz \bigg |\hspace{-.6mm}\bigg |\\
&\le C \int_0^1 \varrhoo n e^{-cn\varrhoo}\, d\varrhoo + C \int_1^{\infty} \varrhoo^{-2}e^{-cn\varrhoo^{-1}}\, d\varrhoo\\
&\le Cn^{-1} \left(\int_0^{\infty} \varrho e^{-\varrho} d\varrho + \int_0^{\infty} e^{-\varrho}\,d\varrho\right)\le Cn^{-1}.
\end{aligned}
\end{equation}
This completes the proof of the theorem.
\end{proof}
Now we turn to the case of nonsmooth initial data, i.e., $v\in L^2{(D)}$. It is known that
in case of the standard parabolic equation, the
Crank-Nicolson method fails to give an optimal error estimate for such data unconditionally
because of a lack of smoothing property \cite{LuskinRannacher:1982,Zlamal:1974}. Hence we
employ a damped Crank-Nicolson scheme, which is realized by replacing the first two time steps
by the backward Euler method. Further, we denote
\begin{equation}\lambdabel{eqn:dampedCN}
r_{dcn}(z)^n =r_{bw}(z)^2 r_{cn}(z)^{n-2}.
\end{equation}
The damped Crank-Nicolson scheme is also unconditionally stable. Further, the function
$r_{dcn}(z)$ has the following estimates \cite[Lemma 2.2]{Hansbo:1999}.
\begin{lemma}\lambdabel{lem:dcnbound}
Let $r_{dcn}$ be defined as in \eqref{eqn:dampedCN} then there exist
positive constants $\epsilon$, $R$, $C$, $c$ such that
\begin{equation}\lambdabel{eqn:dcnbound}
\begin{split}
&|r_{dcn}(z)^n| \le \left\{\begin{array}{ll}
(1+C|z|)^n, & |z|<\epsilon;\\
e^{-cn|z|}, & \forall\, |z|\le 1,\, |\arg(z)| \le \deltalta_1;\\
C|z|^{-2}e^{-\frac{c(n-2)}{|z|}}, & \forall |z|\ge 1,\, |\arg(z)| \le \deltalta_1,\, n\ge2;\\
C|z|^{-2}, & |z| \ge R,\, n\ge2,\\
\end{array}\right. \\
&|r_{bw}(z)^2-e^{-2z}| \le C|z|^2, \quad \forall |z|\le \epsilon \quad \text{or} \quad |\arg(z)| \le \deltalta_1.
\end{split}
\end{equation}
\end{lemma}
\begin{theorem}\lambdabel{thm:fullnonsmooth:cn}
Let $u$ be the solution of problem \eqref{variational}, and $U^n= r_{dcn}(\tau A_h)U^0$ with $v\in L^2{(D)}$
and $U^0=P_h v$. Then for $t_n = n\tau$ and any $\beta\in[0,1/2)$, there holds
\begin{equation*}
\| u(t_n)- U^n \|_{L^2{(D)}} \le C (h^{{\alpha}-2+2\beta} t_n^{-1}+ \tau^2 t_n^{-2})\| v \|_{L^2{(D)}}.
\end{equation*}
\end{theorem}
\begin{proof}
We split the error $e^n=u(t_n)-U^n$ as \eqref{eqn:fullsplit}. Since the bound on ${\widetilde \varrho}^n$
follows from Theorem \ref{thm:seminonsmooth}, it remains to bound
$\varthetat^n = E_h(\tau n)v_h - r_{dcn}(\tau A_h)^nv_h$ for $n\ge1$ as
\begin{equation*}
\| \varthetat^n \|_{L^2{(D)}} \le C \tau^2 t_n^{-2} \| v_h \|_{L^2{(D)}}.
\end{equation*}
Let $F_n(z)= e^{-nz}-r_{dcn}(z)^n$. Then it
suffices to show for $n\ge1$
\begin{equation*}
\|F_n(A_h)\|\le C n^{-2}.
\end{equation*}
The estimate is trivial for $n=1,2$ by boundedness. For $n > 2$, we split $F_n(z)$ into
\begin{equation*}
\begin{split}
F_n(z) & = r_{bw}(z)^2(e^{-(n-2)z}-r_{cn}(z)^{n-2})+e^{-(n-2)z}(e^{-2z}-r_{bw}(z)^2) \\
&:= f_1(z)+f_2(z).
\end{split}
\end{equation*}
It follows from \cite[Lemma 9.1 and Lemma 9.3]{Thomee:2006} that
\begin{equation*}
\begin{aligned}
r_{dcn}(A_h)^n &= \frac{1}{2\pi\mathrm{i}} \int_{{\Gammamma_{\epsilon}}\cup{\Gammamma_{\epsilon}^R} \cup{\Gammamma^R}}r_{dcn}(z)^nR(z;A_h)\,dz,\\
E_h(n) &= \frac{1}{2\pi\mathrm{i}} \int_{{\Gammamma_{\epsilon}}\cup{\Gammamma_{\epsilon}^{\infty}}}e^{-nz}R(z;A_h)\,dz.
\end{aligned}
\end{equation*}
Using the fact $\| r_{dcn}(z)^n R(z;A_h)\| =O(z^{-3})$ as $z\rightarrow \infty$,
we may let $R\to\infty$ to obtain
\begin{equation*}
F_n(A_h)= \frac{1}{2\pi\mathrm{i}} \int_{{\Gammamma_{\epsilon}}\cup{\Gammamma_{\epsilon}^{\infty}}}F_n(z)R(z;A_h)\,dz.
\end{equation*}
Further, by Lemma \ref{lem:dcnbound}, $\|F_n(z) R(z;A_h)\| =O(z)$ as $z \rightarrow 0$,
and consequently by taking $\epsilon\rightarrow 0$ and setting $\Gammamma=
\left\{z:z=\varrhoo e^{\pm i\deltalta_1}, \varrhoo\ge 0 \right\}$, we have
\begin{equation}\lambdabel{eqn:repret3}
\begin{split}
F_n(A_h) &= \frac{1}{2\pi\mathrm{i}} \int_{\Gammamma}F_n(z)R(z;A_h)\,dz\\
&= \frac{1}{2\pi\mathrm{i}} \int_{\Gammamma}(f_1(z)+f_2(z))R(z;A_h)\,dz.
\end{split}
\end{equation}
Now we estimate the two terms separately. First, by Lemmas \ref{lem:cnbound} and \ref{lem:dcnbound}, we get
\begin{equation*}
\begin{split}
&|f_1(z)| \le |r_{dcn}(z)^n| + |r_{bw}(z)^2||e^{-(n-2)z}| \le C|z|^{-2}e^{-\frac{cn}{|z|}}, \quad z\in \Gammamma,\ |z|\ge1,\\
&|f_1(z)| \le |r_{bw}(z)^2||r_{cn}(z)^{n-2}-e^{-(n-2)z}| \le C|z|^{3}ne^{-cn|z|}, \quad z\in \Gammamma,\ |z|\le 1.
\end{split}
\end{equation*}
Repeating the argument for \eqref{eqn:fullestimate1} gives that for $n> 2$
\begin{equation*}
\bigg|\hspace{-0.6mm}\bigg|\frac1{2\pi\mathrm{i}}\int_{\Gammamma}f_1(z)R(z;A_h) \,dz \bigg|\hspace{-0.6mm}\bigg| \le Cn^{-2}.
\end{equation*}
As to other term, we deduce from \eqref{eqn:dcnbound} that
\begin{equation*}
|f_2(z)| \le |e^{-(n-2)z}||r_{bw}(z)^2-e^{-2z}| \le C |z|^2 ,\quad \forall |z| \le \epsilon,
\end{equation*}
and thus we can change the integration path $\Gammamma$ to $\Gammamma_{\epsilon/n}^{\infty}\cup \Gammamma_{\epsilon/n}$.
Further, we deduce from Lemma \ref{lem:dcnbound} that
\begin{equation*}
|f_2(z)|=|e^{-(n-2)z}(r_{bw}(z)^2-e^{-2z})| \le Ce^{-c(n-2)|z|}|z|^2,\quad \forall z\in\Gamma_{\epsilon/n}^{\infty}.
\end{equation*}
Thus, we derive the following bound for $n>2$
\begin{equation*}
\bigg|\hspace{-0.6mm}\bigg|\frac1{2\pi\mathrm{i}}\int_{\Gammamma}f_1(z)R(z;A_h) \,dz \bigg|\hspace{-0.6mm}\bigg|
\le C \int_{\epsilon/n}^{\infty}e^{-c(n-2)\varrhoo} \varrhoo d\varrhoo +C\int_{\Gamma_{\epsilon/n}}\varrhoo \, d\varrhoo \le Cn^{-2}.
\end{equation*}
This completes the proof of the theorem.
\end{proof}
\section{Numerical results}\lambdabel{sec:numeric}
In this section, we present numerical experiments to verify our theoretical results. To this end,
we consider the following three examples:
\begin{enumerate}
\item[(a)] smooth initial data: $v(x)= x(x-1)$, which lies
in $\Hd{3/2-\epsilon}$.
\item[(b)] nonsmooth initial data: (b1) $ v(x)=\chi_{(1/2,1)}(x)$,
the characteristic function of the interval $(1/2,1)$;
(b2) $v(x)=x^{1/4}$; Note that in (b1) $v \in \Hd{1/2-\epsilonsilon}$
while in (b2) $v \in \Hd {1/4-\epsilonsilon}$, for any $\epsilonsilon >0$.
\item[(c)] discontinuous potential $q(x)=\chi_{(0,1/2)}(x)$.
\end{enumerate}
We examine separately the spatial and temporal convergence rates at $t=1$.
For the case of nonsmooth initial data, we are especially interested in the errors
for $t$ close to zero, and thus we also present the errors at $t=0.1$, $0.01$,
$0.005$, and $0.001$. The exact solutions to these examples are not available
in closed form, and hence we compute the reference solution on a very refined mesh.
We measure the accuracy of the numerical approximation $U^n$ by the normalized errors
$\|u(t_n)-U^n\|_{L^2{(D)}}/\|v\|_{L^2{(D)}}$ and $\|u(t_n)-U^n\|_{\Hd {{\alpha}/2}}/ \|v
\|_{L^2{(D)}}$. The normalization enables us to observe the behavior of the errors with respect
to time in case of nonsmooth initial data. To study the rate of convergence in space,
we use a time step size $\tau=10^{-5}$ so that the time discretization error is negligible,
and we have the space discretization error only.
\subsection{Numerical results for example (a): smooth initial data}
In Table \ref{tab:smoothBE} we show the errors $\|u(t_n)-U^n\|_{L^2{(D)}}$ and $\|u(t_n)-U^n
\|_{\Hd {{\alpha}/2}}$ with the backward Euler method. We have set $\tau=10^{-5}$, so that the
error incurred by temporal discretization is negligible. In the table, \texttt{ratio}
refers to the ratio of the errors when the mesh size $h$ (or time step size $\tau$)
halves, and the numbers in the bracket denote theoretical convergence rates.
The numerical results show $O(h^{{\alpha}-1/2})$ and $O(h^{{\alpha}/2-1/2})$ convergence rates for
the $L^2{(D)}$- and $\Hd {{\alpha}/2}$-norms of the error, respectively. In Fig.
\ref{fig:smooth_space}, we plot the results for ${\alpha}=1.5$ at $t=1$
in a log-log scale. The $\Hd{{\alpha}pha/2}$-norm estimate is fully confirmed, but the $L^2
{(D)}$-norm estimate is suboptimal: the empirical convergence rate is one half order higher
than the theoretical one. The suboptimality is attributed to the low regularity of the
adjoint solution, used in Nitsche's trick. In view of the singularity of the term
$x^{{\alpha}pha-1}$ in the solution representation, cf. Remark \ref{rmk::singular}, the
spatial discretization error is concentrated around the origin.
\begin{table}[hbt!]
\small
\caption{$L^2$- and $ \tilde{H}^{{\alpha}/2}$-norms of the error for example (a),
smooth initial data, with ${\alpha}=1.25, 1.5, 1.75$ for backward Euler method and $\tau=10^{-5}$; in the last
column in brackets is the theoretical rate.}
\lambdabel{tab:smoothBE}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
${\alpha}$ & $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$1.25$ & $L^2$ &5.13e-3 &2.89e-3 &1.69e-3 &1.00e-3 &6.03e-4 &3.71e-4 &$\approx$ 0.76 ($0.25$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &4.93e-2 &4.39e-2 &3.98e-2 &3.62e-2 &3.29e-2 &3.00e-2 &$\approx$ 0.14 ($0.13$)\\
\hline
$1.5$ & $L^2$ &3.62e-4 &1.70e-4 &8.37e-5 &4.17e-5 &2.09e-5 &1.06e-5 &$\approx$ 1.02 ($0.50$)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &7.57e-3 &6.25e-3 &5.20e-3 &4.33e-3 &3.58e-3 &2.91e-3 &$\approx$ 0.27 ($0.25$)\\
\hline
$1.75$ & $L^2$ &1.12e-5 &4.61e-6 &1.92e-6 &8.02e-7 &3.35e-7 &1.37e-7 &$\approx$ 1.26 ($0.75$)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &4.63e-4 &3.47e-4 &2.58e-4 &1.95e-4 &1.46e-4 &1.06e-4 &$\approx$ 0.42 ($0.38$)\\
\hline
\end{tabular}
\end{center}
\end{table}
In Table \ref{tab:smoothChecktime}, we let the spacial step size $h \rightarrow 0$
and examine the temporal convergence order, and observe an $O(\tau)$ and $O(\tau^2)$ convergence
rates for the backward Euler method and Crank-Nicolson method, respectively.
Note that for the case ${\alpha}=1.75$, the Crank-Nicolson method fails to achieve
an optimal convergence order. This is attributed to the fact that $v$ is not in the domain
of the differential operator ${_0^RD_x^{{\alpha}}}$
for ${\alpha} > 1.5$. In contrast, the damped Crank-Nicolson method yields the
desired $O(\tau^2)$ convergence rates, cf. Table \ref{tab:DampedCNsmooth}.
This confirms the discussions in Section \ref{ssec:fullcn}.
\begin{figure}
\caption{Numerical results for example (a) (smooth data) with ${\alpha}
\end{figure}
\begin{table}[htb!]
\caption{$L^2$-norm of the error for example (a),
non-smooth initial data, with ${\alpha}=1.25, 1.5, 1.75$,
$h=2\times 10^{-5}$ (BE - backward Euler, CN - Crank-Nicolson)}
\lambdabel{tab:smoothChecktime}
\begin{center}
\begin{tabular}{|c|c|ccccc|c|}
\hline
& $\tau$ & $1/10$ & $1/20$ &$1/40$ &$1/80$ & $1/160$ &ratio \\
\hline
BE
& ${\alpha}=1.25$ &3.01e-2 &1.41e-2 &6.63e-3 &3.10e-3 &1.41e-3 &$\approx$ 1.10 (1.00) \\
\cline{2-7}
& ${\alpha}=1.5$ &1.32e-2 &5.88e-3 &2.71e-3 &1.25e-3 &5.62e-4 &$\approx$ 1.13 (1.00) \\
\cline{2-7}
& ${\alpha}=1.75$ &4.79e-3 &1.88e-3 &7.95e-3 &3.53e-4 &1.55e-4 &$\approx$ 1.20 (1.00) \\
\hline
CN
& ${\alpha}=1.25$ &3.18e-3 &5.98e-4 &1.35e-4 &3.32e-5 &8.52e-6 &$\approx 2.10$ (2.00)\\
\cline{2-7}
& ${\alpha}=1.5$ &3.22e-3 &7.32e-4 &1.75e-4 &4.32e-5 &1.05e-5 &$\approx$ 2.06 (2.00) \\
\cline{2-7}
& ${\alpha}=1.75$ &3.67e-3 &1.09e-3 &3.33e-4 &1.08e-4 &3.09e-5 &$\approx$ 1.73 ( - - ) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htb!]
\caption{$L^2$-norm of the error for example (a),
non-smooth initial data, for damped Crank-Nicolson method
with ${\alpha}=1.75$
and $h=2\times 10^{-5}$.}
\lambdabel{tab:DampedCNsmooth}
\begin{center}
\begin{tabular}{|c|ccccc|c|}
\hline
$\tau$ & $1/10$ & $1/20$ &$1/40$ &$1/80$ & $1/160$ &ratio \\
\hline
${\alpha}=1.75$ &7.57e-4 &1.98e-4 &5.45e-5 &1.40e-5 &2.90e-6 &$\approx$ 1.98 (2.00) \\
\hline
\end{tabular}
\end{center}
\end{table}
\subsection{Numerical results for nonsmooth initial data: example (b)}
In Tables \ref{tab:nonsmooth1BE}, \ref{tab:nonsmooth1Checktime} and \ref{tab:nonsmooth1smalltime},
we present numerical results for problem (b1). Table \ref{tab:nonsmooth1BE} shows that the spatial
convergence rate is of the order $O(h^{{\alpha}-1+\beta})$ in $L^2{(D)}$-norm and $O(h^{{\alpha}pha/2-1+\beta})$
in $\Hd{{\alpha}pha/2}$, whereas Table \ref{tab:nonsmooth1Checktime} shows that the temporal convergence
order is of order $O(\tau)$ and $O(\tau^2)$ for the backward Euler method and damped Crank-Nicolson
method, respectively. For the case of nonsmooth initial data, we are interested in the errors for
$t$ closed to zero, thus we check the error at $t=0.1$, $0.01$, $0.005$ and $0.001$. From Table
\ref{tab:nonsmooth1smalltime}, we observe that both the $L^2{(D)}$-norm and $\Hd{{\alpha}/2}$-norm of
the error exhibit superconvergence, which theoretically remains to be established. Numerically,
for this example. one observes that the solution is smoother than in $\Hdi 0{{\alpha}pha-1+\beta}$ for
small time $t$, cf. Fig. \ref{fig:checksolution_b1}.
Similarly, the numerical results for problem (b2) are presented in Tables \ref{tab:nonsmooth2BE},
\ref{tab:nonsmooth2Checktime} and \ref{tab:nonsmooth2smalltime}; see also Fig. \ref{fig:nonsmooth2_small_time}
for a plot of the results in Table \ref{tab:nonsmooth2smalltime}. It is observed that the convergence
is slower than that for problem (b1), due to the lower solution regularity.
\begin{table}[htb!]
\small
\caption{$L^2$- and $ \tilde{H}^{{\alpha}/2}$-norms of the error for example (b1), nonsmooth initail data,
for backward Euler method with $\tau=10^{-5}$.}
\lambdabel{tab:nonsmooth1BE}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
${\alpha}$& $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$1.25$ & $L^2$ &6.65e-3 &3.75e-3 &2.18e-3 &1.29e-3 &7.78e-4 &4.77e-4 &$\approx$ 0.76 ($0.25$)\\
\cline{2-8}
&$\tilde{H}^{{\alpha}/2}$ &6.36e-2 &5.66e-2 &5.12e-2 &4.66e-2 &4.24e-2 &3.87e-2 &$\approx$ 0.14 ($0.13$)\\
\hline
$1.5$ & $L^2$ &3.78e-4 &1.77e-4 &8.56e-5 &4.22e-5 &2.09e-5 &1.04e-5 &$\approx$ 1.03 ($0.50$)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &7.31e-3 &6.01e-3 &5.00e-3 &4.16e-3 &3.43e-3 &2.79e-3 &$\approx$ 0.27 ($0.25$) \\
\hline
$1.75$ & $L^2$ &2.11e-5 &9.49e-6 &4.06e-6 &1.69e-6 &6.83e-7 &2.59e-7 &$\approx$ 1.27 ($0.75$)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &3.63e-4 &2.69e-4 &1.99e-4 &1.50e-4 &1.12e-4 &8.19e-5 &$\approx$ 0.43 ($0.38$) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htb!]
\caption{$L^2$-norm of the error for example (b1),
non-smooth initial data, with
$h=2\times 10^{-5}$ (BE - backward Euler, CN - Crank-Nicolson)
}
\lambdabel{tab:nonsmooth1Checktime}
\begin{center}
\begin{tabular}{|c|c|ccccc|c|}
\hline
& $\tau$ & $1/10$ & $1/20$ &$1/40$ &$1/80$ & $1/160$ &ratio \\
\hline
BE
& ${\alpha}=1.25$ &3.73e-2 &1.80e-2 &8.53e-3 &4.00e-3 &1.81e-3 &$\approx$ 1.09 (1.00)\\
\cline{2-7}
& ${\alpha}=1.5$ &1.26e-2 &5.64e-3 &2.59e-3 &1.20e-3 &5.40e-4 &$\approx$ 1.13 (1.00)\\
\cline{2-7}
& ${\alpha}=1.75$ &3.68e-3 &1.44e-3 &6.12e-3 &2.71e-4 &1.20e-4 &$\approx$ 1.19 (1.00)\\
\hline
CN
& ${\alpha}=1.25$ &3.52e-3 &9.10e-4 &2.39e-4 &5.90e-5 &1.30e-5 &$\approx 2.01$ (2.00)\\
\cline{2-7}
& ${\alpha}=1.5$ &8.86e-4 &2.42e-2 &6.46e-5 &1.61e-5 &3.44e-6 &$\approx$ 1.99 (2.00)\\
\cline{2-7}
& ${\alpha}=1.75$ &1.86e-4 &4.01e-5 &1.02e-5 &2.57e-6 &5.41e-7 &$\approx$ 2.09 (2.00)\\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htb!]
\small
\caption{$L^2$- and $ \tilde{H}^{{\alpha}/2}$-norms of the error for example (b1), nonsmooth initial data,
with ${\alpha}=1.5$
for backward Euler method and $\tau=10^{-5}$.}
\lambdabel{tab:nonsmooth1smalltime}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
$t$ & $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$0.1$ & $L^2$ &3.64e-3 &1.53e-3 &7.42e-4 &3.72e-5 &1.87e-4 &9.46e-5 &$\approx$ 1.04 ($0.50$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &7.00e-2 &5.59e-2 &4.62e-2 &3.87e-2 &3.21e-2 &2.61e-2 &$\approx$ 0.28 ($ 0.25$) \\
\hline
$0.01$ & $L^2$ &2.81e-2 &7.07e-2 &1.63e-3 &3.84e-4 &9.21e-5 &2.18e-5 &$\approx$ 2.07 ($ 0.50$)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &4.04e-1 &1.56e-1 &6.09e-2 &2.49e-2 &1.03e-2 &4.27e-3 &$\approx$ 1.31 ($ 0.25$) \\
\hline
$0.005$ & $L^2$ &4.27e-2 &1.45e-2 &3.44e-3 &7.95e-4 &1.88e-4 &4.41e-4 &$\approx$ 2.07 ($0.50$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &5.94e-1 &3.34e-1 &1.27e-1 &5.05e-2 &2.08e-2 &8.56e-3 &$\approx$ 1.26 ($0.25$) \\
\hline
$0.001$ & $L^2$ &1.41e-1 &5.22e-2 &1.64e-2 &4.47e-3 &1.02e-3 &2.32e-3 &$\approx$ 1.80 ($0.50$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &2.61e0 &1.45e0 &6.63e-1 &2.81e-1 &1.08e-1 &4.34e-2 &$\approx$ 1.20 ($0.25$) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{figure}
\caption{Solution profile of example (b1) with ${\alpha}
\end{figure}
\begin{table}[htb!]
\small
\caption{$L^2$- and $ \tilde{H}^{{\alpha}/2}$-norms of the error for example (b2), nonsmooth initial data,
for backward Euler method with $\tau=10^{-5}$.}
\lambdabel{tab:nonsmooth2BE}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
${\alpha}$& $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$1.25$ & $L^2$ &6.31e-3 &3.55e-3 &2.07e-3 &1.23e-3 &7.38e-4 &4.53e-4 &$\approx$ 0.76 ($0.25$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &6.03e-2 &5.37e-2 &4.86e-2 &4.42e-2 &4.02e-2 &3.67e-2 &$\approx$ 0.14 ($0.13$) \\
\hline
$1.5$ & $L^2$ &4.11e-4 &1.91e-4 &9.24e-5 &4.55e-5 &2.26e-5 &1.12e-5 &$\approx$ 1.03 ($0.50$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &7.88e-3 &6.48e-3 &5.39e-3 &4.48e-3 &3.70e-3 &3.01e-3 &$\approx$ 0.27 ($0.25$) \\
\hline
$1.75$ & $L^2$ &2.75e-5 &1.21e-6 &5.09e-6 &2.11e-6 &8.48e-7 &3.20e-7 &$\approx$ 1.28 ($0.75$) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &4.50e-4 &3.33e-4 &2.46e-4 &1.86e-4 &1.39e-4 &1.01e-4 &$\approx$ 0.42 ($0.38$) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htb!]
\caption{$L^2$-norm of the error for example (b2),
non-smooth initial data, with
$h=2\times 10^{-5}$ (BE - backward Euler, CN - Crank-Nicolson).}
\lambdabel{tab:nonsmooth2Checktime}
\begin{center}
\begin{tabular}{|c|c|ccccc|c|}
\hline
& $\tau$ & $1/10$ & $1/20$ &$1/40$ &$1/80$ & $1/160$ &ratio \\
\hline
BE
& ${\alpha}=1.25$ &3.57e-2 &1.71e-2 &8.09e-3 &3.80e-3 &1.71e-3 &$\approx$ 1.09 (1.00) \\
\cline{2-7}
& ${\alpha}=1.5$ &1.36e-2 &6.82e-3 &2.80e-3 &1.30e-3 &5.81e-4 &$\approx$ 1.13 (1.00) \\
\cline{2-7}
& ${\alpha}=1.75$ &4.55e-3 &1.78e-3 &7.57e-3 &3.35e-4 &1.48e-4 &$\approx$ 1.20 (1.00) \\
\hline
CN
& ${\alpha}=1.25$ &3.32e-3 &8.59e-4 &2.26e-4 &5.60e-5 &1.24e-5 &$\approx$ 2.03 (2.00)\\
\cline{2-7}
& ${\alpha}=1.5$ &9.36e-4 &2.59e-5 &6.95e-5 &1.74e-6 &3.80e-7 &$\approx$ 1.99 (2.00) \\
\cline{2-7}
& ${\alpha}=1.75$ &1.69e-4 &4.43e-5 &1.22e-5 &3.15e-6 &6.50e-7 &$\approx$ 1.99 (2.00) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{table}[htb!]
\small
\caption{$L^2$- and $ \tilde{H}^{{\alpha}/2}$-norms of the error for example (b2), nonsmooth initial data,
for backward Euler method with $\tau=10^{-5}$.}
\lambdabel{tab:nonsmooth2smalltime}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
$t$& $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$0.1$ & $L^2$ &1.73e-2 &8.56e-3 &4.27e-3 &2.14e-3 &1.08e-3 &5.43e-4 &$\approx$ 1.00 (0.50) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &3.83e-1 &3.20e-1 &2.67e-1 &2.23e-1 &1.84e-1 &1.50e-1 &$\approx$ 0.26 (0.25) \\
\hline
$0.01$ & $L^2$ &3.35e-2 &1.39e-2 &6.45e-3 &3.17e-3 &1.58e-3 &7.97e-4 &$\approx$ 1.07 (0.50)\\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &6.41e-1 &4.89e-1 &3.97e-1 &3.28e-1 &2.71e-1 &2.20e-1 &$\approx$ 0.30 (0.25) \\
\hline
$0.005$ & $L^2$ &4.23e-2 &1.83e-2 &7.65e-3 &3.61e-3 &1.79e-3 &8.96e-4 &$\approx$ 1.11 (0.50) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &7.52e-1 &5.89e-1 &4.55e-1 &3.71e-1 &3.04e-1 &2.47e-1 &$\approx$ 0.29 (0.25) \\
\hline
$0.001$ & $L^2$ &1.07e-1 &4.12e-2 &1.54e-2 &5.89e-3 &2.49e-3 &1.19e-3 &$\approx$ 1.30 (0.50) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &1.98e0 &1.19e0 &7.51e-1 &5.19e-1 &4.08e-1 &3.28e-1 &$\approx$ 0.52 (0.25) \\
\hline
\end{tabular}
\end{center}
\end{table}
\begin{figure}
\caption{Numerical results for example (b2)
with ${\alpha}
\end{figure}
\subsection{Numerical results for general problems: example (c)}
Our theory can easily extend to problems with a potential function $q \in L^{\infty}{(D)}$.
Garding's inequality holds for the bilinear form, and thus all theoretical results follow
by the same argument. The normalized $L^2{(D)}$- and $\Hd{{\alpha}/2}$-norms of the spatial error
are reported in Table \ref{tab:general} at $t=1$ for ${\alpha}=1.25$, $1.5$ and $1.75$.
The results concur with the preceding convergence rates.
\begin{table}[htb!]
\small
\caption{$L^2$-norm of the error for the general differential equation
non-smooth initial data, example (c), with
$\tau=2\times 10^{-5}$ (BE - backward Euler, CN - Crank-Nicolson).}
\lambdabel{tab:general}
\begin{center}
\begin{tabular}{|c|c|cccccc|c|}
\hline
${\alpha}$& $h$ & $1/16$ & $1/32$ &$1/64$ &$1/128$ & $1/256$ & 1/512&ratio \\
\hline
$1.25$ & $L^2$ &4.80e-3 &2.71e-3 &1.58e-3 &9.40e-4 &5.66e-4 &3.48e-4 &$\approx$ 0.76 (0.25) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &4.62e-2 &4.12e-2 &3.73e-2 &3.39e-2 &3.09e-2 &2.82e-2 &$\approx$ 0.14 (0.13) \\
\hline
$1.5$ & $L^2$ &2.75e-4 &1.31e-4 &6.50e-5 &3.24e-5 &1.63e-5 &8.20e-5 &$\approx$ 1.00 (0.50) \\
\cline{2-8}
&$\tilde{H}^{{\alpha}/2}$ &5.90e-3 &6.86e-3 &4.05e-3 &3.37e-3 &2.79e-3 &2.26e-3 &$\approx$ 0.27 (0.25) \\
\hline
$1.75$ & $L^2$ &7.88e-6 &3.19e-6 &1.33e-6 &5.58e-7 &2.34e-7 &9.60e-8 &$\approx$ 1.27 (0.75) \\
\cline{2-8}
& $\tilde{H}^{{\alpha}/2}$ &3.24e-4 &2.42e-4 &1.80e-4 &1.36e-4 &1.02e-4 &7.43e-5 &$\approx$ 0.42 (0.38) \\
\hline
\end{tabular}
\end{center}
\end{table}
\section{conclusion}\lambdabel{sec:conclusion}
In this paper, we have studied a finite element method for an initial boundary value problem for
the parabolic problem with a space fractional derivative of Riemann-Liouville type and order ${\alpha}pha\in
(1,2)$ using the analytic semigroup theory. The existence and uniqueness of a weak solution in $L^2
(0,T;\Hd{{\alpha}pha/2})$ were established, and an improved regularity result was also shown. Error estimates
in the $L^2{(D)}$- and $\Hd {{\alpha}pha/2}$-norm were established for a space semidiscrete scheme with a piecewise
linear finite element method, and $L^2{(D)}$-norm estimates for fully discrete schemes based on the backward
Euler method and the Crank-Nicolson method, for both smooth and nonsmooth initial data.
The numerical experiments fully confirmed the convergence of the numerical schemes, but the $L^2{(D)}$-norm
error estimates are suboptimal: the empirical convergence rates are one-half order higher
than the theoretical ones. This suboptimality is attributed to the inefficiency of Nitsche's trick,
as a consequence of the low regularity of the adjoint solution. Numerically, we observe that the
$\Hd{{\alpha}pha/2}$-norm convergence rates agree well with the theoretical ones. The optimal
convergence rates in the $L^2{(D)}$-norm and the $\Hd{{\alpha}pha/2}$-norm estimate for the fully
discrete schemes still await theoretical justifications.
\section{Proof of Theorem \ref{thm:existence}}\lambdabel{app:existence}
\begin{proof}
We divide the proof into four steps.
\noindent Step (i) (energy estimates for $u_m$).
Upon taking $u_m$ as the test function, the identity $2(u_m',u_m)=\frac{d}{dt}\|u_m\|_{L^2{(D)}}^2$ for a.e. $0 \le t \le T$, and the
coercivity of $A(\cdot,\cdot)$, we deduce
\begin{equation}\lambdabel{appenergy1}
\frac{d}{dt}\|u_m(t)\|_{L^2{(D)}}^2 + c_0\|u_m(t)\|^2_{\Hd{{\alpha}/2}} \le 2\|f(t)\|_{H^{-{\alpha}/2}{(D)}}\|u_m(t)\|_{\Hd{{\alpha}/2}}.
\end{equation}
Young's inequality and integration in $t$ over $(0,t)$ gives
\begin{equation*}
\max_{0\le t\le T} \|u_m(t)\|_{L^2{(D)}}^2 \le \| v \|_{L^2{(D)}}^2 +
C \| f \|_{L^2(0,T;H^{-{\alpha}/2}{(D)})}^2 .
\end{equation*}
Next we integrate \eqref{appenergy1} from $0$ to $T$, and repeat the argument to get
\begin{equation}\lambdabel{eqn:uineq}
\| u_m \|_{L^2(0,T;\Hd {{\alpha}/2})}^2
\le \| v \|_{L^2{(D)}}^2 + C\| f \|_{L^2(0,T;H^{-{\alpha}/2}{(D)})}^2 .
\end{equation}
Finally we bound $\| u_m' \|_{L^2(0,T;H^{-{\alpha}/2}{(D)})}$. For any $\varphi \in \Hd{{\alpha}/2}$
such that $\|\varphi\|_{\Hd{{\alpha}/2}} \le 1$, we decompose it into
$\varphi=P\varphi + (I-P)\varphi$ with $P\varphi \in \text{span}\{\omega_k\}_{k=1}^m$ and
$I-P \in \text{span}\{\omega_k \}_{k>m}$.
By the stability of the projection $P$, $\|P\varphi\|_{\Hd{{\alpha}/2}}
\le C\|\varphi\|_{\Hd{{\alpha}/2}} \le C$, it follows from
$(u_m',P\varphi)+A(u_m,P\varphi)=(f,P\varphi)$ and $(u_m', P \varphi)=(u_m',\varphi)$ that
\begin{equation*}
\begin{aligned}
|\lambdangle u_m' (t), \varphi \rangle| = |\lambdangle u_m' (t) ,P\varphi\rangle|
& \le C\left(\|f(t) \|_{H^{-{\alpha}/2}{(D)}}+\|u_m(t)\|_{\Hd {{\alpha}/2}}\right).
\end{aligned}
\end{equation*}
Consequently, by the duality argument and \eqref{eqn:uineq} we arrive at
\begin{equation}\lambdabel{eqn:utineq}
\| u_m' \|_{L^2(0,T;H^{-{\alpha}/2}{(D)})}^2 \le C \left(\| f \|_{L^2(0,T;H^{-{\alpha}/2}{(D)})}^2 + \| v \|_{L^2{(D)}}^2\right ).
\end{equation}
\noindent Step (ii) (convergent subsequence).
By \eqref{eqn:uineq} and \eqref{eqn:utineq}, there exists a subsequence, also
denoted by $\{u_m\}$, and
$u \in L^2(0,T;\Hd {{\alpha}/2})$ and $\tilde{u} \in L^2(0,T;H^{-{\alpha}/2}{(D)})$, such that
\begin{equation}\lambdabel{weakconvg}
\begin{split}
u_{m} &\rightarrow u \quad \text{weakly in } L^2(0,T;\Hd{{\alpha}/2}), \\
u_{m}' &\rightarrow \tilde{u} \quad \text{weakly in } L^2(0,T;H^{-{\alpha}/2}{(D)}).
\end{split}
\end{equation}
By choosing $\phi \in C_0^{\infty}[0,T]$ and $\psi \in \Hd {{\alpha}/2}$, we deduce
$$
\int_0^T\lambdangle u_m',\phi\psi \rangle \,dt=-\int_0^T\lambdangle u_m,\phi'\psi \rangle \,dt.
$$
By taking $m\rightarrow \infty$ we obtain
\begin{equation*}
\int_0^T\lambdangle \tilde{u},\phi\psi \rangle \,dt=-\int_0^T\lambdangle u,\phi'\psi \rangle \,dt
=\int_0^T\lambdangle u',\phi\psi \rangle \,dt.
\end{equation*}
Thus $\tilde u=u'$ by the density of $\{ \phi(t)\psi(x) \}$ in $ L^2(0,T;\Hd{{\alpha}/2})$.
\noindent Step (iii) (weak form). Now for a fixed integer $N$, we choose a test
function $\psi \in V_N = \text{span}\{\omega_k\}_{k=1}^N$, and $\phi\in C^\infty[0,T]$.
Then for $m\ge N$, there holds
\begin{equation}\lambdabel{weak4}
\int_0^T \lambdangle u_m',\psi\phi\rangle +A(u_m,\psi)\phi \, dt = \int_0^T \lambdangle f,\psi\phi \rangle \, dt.
\end{equation}
Then letting $m \rightarrow \infty$, \eqref{weakconvg}
and the density of $\{ \phi(t)\psi(x) \}$ in $ L^2(0,T;\Hd{{\alpha}/2})$ gives
\begin{equation}\lambdabel{weak2}
\int_0^T \lambdangle u',\varphi\rangle +A(u,\varphi) \, dt = \int_0^T \lambdangle f,\varphi \rangle \, dt,\quad \forall \varphi \in L^2(0,T;\Hd {{\alpha}/2}).
\end{equation}
Consequently, we arrive at
\begin{equation*}
\lambdangle u',\varphi\rangle +A(u,\varphi) = \lambdangle f,\varphi \rangle,\ \ \forall \varphi \in \Hd {{\alpha}/2}\quad \,\mbox{a.e. } 0\leq t \leq T.
\end{equation*}
\noindent(iv) (initial condition). The argument presented in \cite[Theorem 3, pp.
287]{Evans:2010} yields $u \in C([0,T];L^2{(D)})$. By taking $\phi \in C^{\infty}[0,T]$
with $\varphi(T)=0$ and $\psi\in \text{span}\{\omega_k\}_{k=1}^N$, integrating \eqref{weak4} and \eqref{weak2}
by parts with respect to $t$, and a standard density argument, we arrive at the initial condition $u(0)=v$.
The uniqueness follows directly from the energy estimates.
\end{proof}
\end{document} |
\begin{document}
\title{Local approximation of multipartite quantum measurements}
\author{Scott M. Cohen}
\email{[email protected] (he/him/his)}
\affiliation{Department of Physics, Portland State University, Portland Oregon 97201, USA}
\begin{abstract}
We provide a necessary condition that a quantum measurement can be implemented by the class of protocols known as Local Operations and Classical Communication, or LOCC, including when an error is allowed but must vanish in the limit of an infinite number of rounds, a case referred to as asymptotic LOCC. Our condition unifies, extends, and provides an intuitive, geometric justification for previous results on asymptotic LOCC. We use our condition to answer a variety of long-standing, unsolved problems, including for distinguishability of certain sets of states by LOCC. These include various classes of unextendible product bases, for which we prove they cannot be distinguished by LOCC even when infinite resources are available and asymptotically vanishing error is allowed.
\end{abstract}
\date{\today}
\pacs{03.65.Ta, 03.67.Ac}
\maketitle
\section{Introduction}
Beginning in the earliest days of quantum mechanics, even the leading scientists of the time struggled to understand the notion of quantum entanglement \cite{EPR,SchrodingerAll,Bohr1,Bohr2}, an unusual correlation that has no classical counterpart. More recently, entanglement has been found to underlie many of the key advancements in quantum information processing, including but not limited to, the surprising phenomenon of teleportation \cite{BennettTele,ExptTele6,ExptTele7}, quantum computing \cite{Benioff,Deutsch,FeynmanQComp,ShorFactor,MartinisQComp}, and quantum cryptography \cite{Ekert,BB84b}. Yet, our understanding of entanglement remains far from complete. In an effort to better understand this important phenomenon, researchers developed the first quantum resource theory \cite{ChitGourResource}, that for the resource of entanglement \cite{HoroRMP}. In general, such theories identify the states and operations that are available for free as separate from those that are, in some sense, costly. For entanglement, in particular, the free operations are known as \emph{local quantum operations and classical communication}, commonly denoted as LOCC. LOCC are the operations that cannot create entanglement between two or more spatially separated subsystems, and it has therefore long been recognized that an understanding of LOCC will greatly contribute to our understanding of entanglement (see, for example, Ref.~\cite{Nielsen,BKrausStateTransf}). Certainly, the study of LOCC has by now a long and distinguished history \cite{PeresWootters,BennettTele,Bennett9,BennettPurifyTele,Walgate}, driven also by the fact that LOCC has important practical applications in its own right \cite{CiracDistComp,BennettPurifyTele,Nielsen,BKrausStateTransf}. Nonetheless, limited progress has been made concerning the important asymptotic case of vanishingly small error \cite{Bennett9}, although recent years have seen renewed interest in this problem \cite{KKB,WinterLeung,FuLeungMancinska,ChitambarHsiehPeresWootters,ChildsLeung,ChitambarHsiehHierarchy}.
Here, we consider the problem of determining if a measurement ${\cal M}$ on a multipartite quantum system, consisting of any number of spatially separated parties $P$, can be carried out when those parties are constrained to the use of LOCC. In particular, our aim is to address ``asymptotic LOCC", here denoted $\overline{\textrm{LOCC}}$ (the topological closure of LOCC), for which an error is allowed in the implementation of ${\cal M}$, but that this error must become vanishingly small when the number of rounds used by the parties is allowed to grow without limit. Our main result, see Theorem~\ref{thm2} below, is a necessary condition that ${\cal M}$ can be implemented by $\overline{\textrm{LOCC}}$. The key to our result is recognition of the significance of a geometric object ${\cal Z}_{\cal M}$, uniquely associated to ${\cal M}$, followed by proof of existence of certain kinds of continuous paths lying within ${\cal Z}_{\cal M}$ when ${\cal M}$ is in $\overline{\textrm{LOCC}}$. Theorem~\ref{thm2} subsumes previous results on $\overline{\textrm{LOCC}}$, which we will show are direct consequences of our theorem, and being geometric in nature, it provides important intuition as to when a given measurement is, or is not, in $\overline{\textrm{LOCC}}$. We here use Theorem~\ref{thm2} to answer a number of longstanding unsolved problems.
When measurement ${\cal M}$ is possible by $\overline{\textrm{LOCC}}$, which we will represent as ${\cal M}\in\overline{\textrm{LOCC}}$, ${\cal M}$ can be approximated as closely as one wishes simply by including more and more rounds in the protocol. When this is not possible, then there is a non-zero gap between what can be accomplished by LOCC as compared to when ${\cal M}$ is implemented by global means. Several years ago, the authors in \cite{KKB} derived a necessary condition that ${\cal M}\in\overline{\textrm{LOCC}}$. Their condition was used to prove that when discriminating the ``double-trine" ensemble \cite{PeresWootters} of quantum states with minimum error, the optimal LOCC probability of success is strictly smaller than by global means \cite{ChitambarHsiehPeresWootters}. Their condition was also used to prove \cite{FuLeungMancinska} that a global measurement is strictly better than LOCC for discriminating any unextendible product basis \cite{IBM_CMP} on a $3\times3$ system. In addition, a ``nonlocality constant" $\eta$ is defined in \cite{ChildsLeung} and used to obtain a lower bound on the probability of error in LOCC discrimination of any set of bipartite states. A necessary condition for perfect discrimination by $\overline{\textrm{LOCC}}$ is then that $\eta=0$, a condition that is implied by the necessary condition of \cite{KKB}. These accomplishments notwithstanding, there remains a great deal to learn about asymptotic LOCC.
The remainder of the paper is organized as follows. We begin by reviewing how LOCC, and quantum measurements in general, can be mathematically described, and then we present our main result, Theorem~\ref{thm2}. In Section~\ref{sec3}, we provide a detailed proof for Theorem~\ref{thm2} and then in Section~\ref{sec4}, we illustrate its usefulness with several examples of local state discrimination, as well as by showing that the necessary conditions of \cite{KKB} (and therefore, \cite{ChildsLeung}), as well as \cite{ChitambarHsiehHierarchy}, all follow directly from our Theorem~\ref{thm2}. Finally, we close with a discussion of the implications of what we have found.
\section{Main Result}\label{sec2}
Any quantum measurement ${\cal M}$ may be thought of as a positive operator-valued measure, or POVM. A POVM consists of a set of operators, $E_j\ge0$, individually referred to as a POVM element. Each $E_j$ is a positive semidefinite operator, denoted as $E_j\ge0$, which means simply that the eigenvalues of $E_j$ are all non-negative. These operators satisfy a completeness relation,
\begin{align}\label{eqn100}
\sum_{j=1}^{|{\cal M}|}E_j={\cal I}_{\cal H},
\end{align}
where $|{\cal M}|$ is the number of POVM elements in ${\cal M}$, and ${\cal I}_{\cal H}$ is the identity operator on Hilbert space ${\cal H}$, of finite dimension $D$, describing states of the quantum system being measured.
An LOCC protocol implements an overall measurement ${\cal M}$ through a sequence of intermediate, local measurements by the individual parties. Such a protocol consists of one party making a measurement on their local system and then communicating the outcome of that measurement to the other parties. This is followed, according to a pre-approved plan, by the next party making a measurement and communicating the result to the others. Notice that only one party measures at a time, and they continue measuring and sharing classical information for however many rounds, possibly an infinite number, as is necessary.
As is commonly done, we represent an LOCC protocol as a tree graph, consisting of nodes connected by an edge to each of its children. Each node $n$ is associated with a positive semidefinite operator $F_n$ representing the action of all parties up to that point in the protocol; a method for obtaining operators $F_n$ from the actual protocol is described in detail in the first paragraph of Section~II of \cite{myLOCCbyFirstMeas}. Because intermediate measurements are always local, each $F_n$ is of the tensor product form ${\cal A}_n\otimes{\cal B}_n\otimes\cdots$, where ${\cal A}_n$ is an operator on the first party's Hilbert space ${\cal H}_A$ and similarly for the other operators appearing in this expression.\footnote{Any positive semidefinite operator may be written as $F_n=f_n^\dagger f_n$, for some operator, $f_n$. That is, $F_n$ is Hermitian and acts as $F_n:{\cal H}\to{\cal H}$; the input and output spaces are the same. While the input and output spaces of $f_n$ may certainly differ from each other (and from ${\cal H}$), this is not an issue of concern for us here, since we only need consider $F_n$. In any case, it is possible to show that for any protocol that involves intermediate maps $f_n$ that have input and output spaces that differ, there is an equivalent protocol with $f_n$ replaced by $f_n^\prime$, where $f_n^\prime:{\cal H}\to{\cal H}$, and then for each final outcome labeled by $l$, a single map (which will be an isometry when the output space is larger than the input) from ${\cal H}$ to ${\cal H}_l$ is implemented at the end of the protocol.} The root node represents the situation before any of the parties have done anything, and so is associated with the identity operator ${\cal I}_{\cal H}$. Every local measurement has multiple outcomes, each represented by one of the child nodes of their shared parent. Because this local measurement is complete, then in analogy to Eq.~\eqref{eqn100}, the sum of sibling child nodes is equal to their parent. A branch of the protocol begins at the root node and stretches from each node to one of its children, continuing without end in the case of an infinite branch, or terminating at what is known as a leaf node (leaf nodes being those that do not themselves have children). Those protocols that include infinite branches may be thought of as the limit of a sequence of finite-round protocols, and this is what is meant by asymptotic LOCC.
We will presently introduce certain \emph{convex sets}. Convex sets satisfy the condition that if $a$ and $b$ are each members of that set, then for any $0\le x\le1$, $(1-x)a+xb$ is also a member of that set. Starting with the convex set, $P$, consisting of all positive semidefinite operators, define the proper subset ${\cal Z}\subset P$ as ${\cal Z}=\left\{z\left\vert z=\sum_jc_jE_j, 0\le c_j\le1,E_j\in P\right.\right\}$. That is, ${\cal Z}$ consists of all positive linear combinations of the operators, $E_j$, with coefficients not exceeding unity. Equivalently, ${\cal Z}$ may be viewed as the Minkowski sum \cite{MinkowskiSum} of a set of line segments stretching from the zero operator to operators $E_j$. These line segments will be denoted as $[0,E_j]$, whereas the half-open line segment $(0,E_j]$ omits the zero operator. The geometric object ${\cal Z}$, which is known as a \emph{zonotope}, may therefore also be written as ${\cal Z}=\sum_j[0,E_j]$.
Consider an LOCC protocol implementing measurement ${\cal M}$ consisting of POVM elements $E_j$. Let us envision each branch of this protocol as a path through $P$. Significantly, as we will see in the next section, these paths are confined within the zonotope, ${\cal Z}\subset P$, defined above. We will refer to ``monotonic" paths, by which we mean a path of operators $\Pi(s)$ such that $\textrm{Tr}\left({\Pi(s)}\right)$ is a monotonic function of $s$. Then, we have our main theorem.
\begin{thm2}\label{thm2}
If ${\cal M}\in\overline{\textrm{LOCC}}$, with measurement ${\cal M}$ consisting of POVM elements $E_j$, then for each $j$, there exists a continuous, monotonic path of product operators from ${\cal I}_{\cal H}$ to a point on the (half-open) line segment $(0,E_j]$, and this path lies entirely within zonotope ${\cal Z}_{\cal M}=\sum_j[0,E_j]$.
\end{thm2}
The proof is given in the following section; those readers only interested in seeing how this theorem implies previously known conditions for asymptotic LOCC, as well as how it can be used to solve long outstanding problems, may skip ahead to Section~\ref{sec4}, where among other things, we will show that Theorem~\ref{thm2} implies the necessary conditions of \cite{KKB,ChildsLeung,ChitambarHsiehHierarchy}. In fact, we will show there that our theorem is strictly stronger than the first two of these conditions (and it is trivially stronger than the third, which applies only to cases of distinguishing a \emph{pair} of quantum states).
\section{Proof of Theorem~\ref{thm2}}\label{sec3}
In this section, we prove Theorem~\ref{thm2}. We follow the description given above of LOCC trees and how each node is associated with a positive operator representing the action of all parties up to that point in the protocol; see \cite{myLOCCbyFirstMeas} for more details. We will need Lemma~$1$ from \cite{myLOCCbyFirstMeas}, which is a straightforward consequence of the completeness of each intermediate measurement.
\begin{lem4}\cite{myLOCCbyFirstMeas}\label{lem4}
Each node $n$ in a finite-round LOCC tree is equal to the sum of all leaf nodes that are descended from that node.
\end{lem4}
\noindent We will also use the following lemma in obtaining our main result.
\begin{lem1}\label{lem1}
Given any measurement ${\cal M}$ with POVM elements $\{E_j\}$, the collection of operators ${\cal Z}_{\cal M}:=\{\sum_{j}c_{j}E_j\vert 0\le c_j\le1~\forall{j}\}$ is a compact, convex set.
\end{lem1}
\proof Convexity is obvious. To see that it is compact, notice that ${\cal Z}_{\cal M}=\sum_j[0,E_j]$, where this sum of line segments is of the Minkowski type, as discussed just above Theorem~\ref{thm2}. Each line segment is closed and bounded, hence compact, and the sum of compact sets is itself compact. Therefore, ${\cal Z}_{\cal M}$ is compact.\hspace{\stretch{1}}$\blacksquare$
We choose as our metric on operator space to be the trace norm, $\left\|{X}\right\|=\textrm{Tr}\left({\sqrt{X^\dag X}}\right)$, which for positive semidefinite operators is equal to the trace. To prove our results, we also need to define a distance measure on POVMs. From a simplistic perspective, two POVMs will be identical when they share the same set of POVM elements. However, there may be cases when the number of elements in the two POVMs differ, so we need to take such a possibility into account. This will be important in studying LOCC, for which the number of outcomes grows increasingly larger with the number of rounds, $r$, even while the target measurement for that LOCC protocol may have a relatively small number of outcomes. For the LOCC and target POVMs to be identical, then, there would have to be many outcomes from the LOCC protocol that are the same, apart from a positive scalar factor. Thus, we recognize that for two POVMs to be identical, each outcome in the first POVM must be proportional to one of the outcomes in the second, and vice versa. In addition, identification of the two POVMs requires that the combined weights of the two sets of so identified elements must be equal. That is, if all proportional elements in the first POVM are added together to reduce each such subset to a single element, and the same is done for the second POVM, then there must be a one-to-one relationship between the elements of the two POVMs, each (reduced) element from the first being equal to the corresponding (reduced) element from the second. For simplicity in what follows, when we say measurement ${\cal M}$ consists of outcomes $E_j$, we will assume such a reduction has already been carried out.
These considerations are precisely captured by the zonotopes discussed above \cite{AubrunLancienZonotopes}. For example, in direct analogy to the discussion above about combining weights of proportional POVM elements, the Minkowski sum of parallel line segments is just another parallel line segment having the combined length of the original subset. A POVM ${\cal M}$ with elements $E_j$ generates zonotope ${\cal Z}_{\cal M}=\sum_j[0,E_j]$, and one can show that two zonotopes are identical if and only if their corresponding POVMs are identical in the sense discussed in the preceding paragraph. Therefore, we define our distance measure on POVMs to be the Hausdorff distance between the corresponding zonotopes. That is,
\begin{align}\label{eqn101}
d({\cal M}_1,{\cal M}_2)=d_H({\cal Z}_1,{\cal Z}_2)=\max\left\{\sup_{z_1\in {\cal Z}_1}\inf_{z_2\in {\cal Z}_2}\left\|{z_1-z_2}\right\|,\sup _{z_2\in {\cal Z}_2}\inf_{z_1\in {\cal Z}_1}\left\|{z_1-z_2}\right\|\right\}.
\end{align}
We follow \cite{WinterLeung} in drawing a distinction between two subsets of infinite-round LOCC, each of which may be discussed in terms of sequences of finite-round LOCC protocols. The first such subset involves infinite-round protocols that are the limit of sequences for which the next protocol in the sequence is the same as the previous one, except for the addition of one or more rounds at the end. The limit of such a sequence of protocols is in LOCC. By contrast, one may instead have a sequence of finite-round protocols for which the earlier rounds are allowed to change when adding rounds in going from one protocol to the next. Each of the protocols in the latter type of sequence implements an LOCC measurement, say ${\cal M}_r$, but the measurement ${\cal M}$ that is the asymptotic limit of this sequence of measurements may not itself be LOCC, instead only being in the topological closure, denoted as ${\cal M}\in\overline{\textrm{LOCC}}$.
We will show below that if measurement ${\cal M}\in\overline{\textrm{LOCC}}$, then for each $E_j\in{\cal M}$, there exists a particular set of monotonic paths of product operators lying entirely within the compact, convex set ${\cal Z}_{\cal M}$ defined in Lemma~\ref{lem1}.
We begin with the following observation, in which we introduce the concept of a ``piecewise-local path". Similar to that of a piecewise-constant curve, by this we mean a path made up of segments that are ``local", or in other words, segments for which one and only one of the parties' tensor parts is changing. This should be made clear by the examples given in the next paragraph.
\begin{obs1}\label{obs1}
Each branch in an LOCC protocol, finite or infinite, constitutes a continuous, piecewise-local path in the convex set of positive semidefinite operators. Significantly, each point along this path is not just a positive semidefinite operator, but also a product operator of the form ${\cal A}\otimes{\cal B}\otimes{\cal C}\otimes\cdots$.
\end{obs1}
\noindent To see this, consider the sequence of positive operators labeling the nodes along a given branch, starting from the root node. Each such positive operator other than the root node represents the outcome of a local measurement by one of the parties. Being local, these measurements only change that particular party's part of the positive operator representing each outcome. Given that every protocol starts with a product operator, that being the identity operator, $I_A\otimes I_B\otimes\cdots$, then if Alice measures first with outcome ${\cal A}$, a continuous path of product operators from the identity operator to that outcome can be parameterized by $x$ ranging from $0$ to $1$ as $[(1-x)I_A+x{\cal A}]\otimes I_B\otimes\cdots$. Given that it is only the operator on ${\cal H}_A$ that changes, this piece of the path is local. If Bob measures next with outcome ${\cal B}$, this path is similarly parametrized as ${\cal A}\otimes[(1-y)I_B+y{\cal B}]\otimes\cdots$, which is another local piece for which only the ${\cal H}_B$ part of the operator changes. The remainder of the continuous path may be generated in the same fashion for each and every branch in the protocol, and it is clear that these paths consist of pieces that are local, as claimed. In the limit of infinite-round LOCC protocols, these paths still exist, just now with an infinite number of piecewise-local segments.
As a consequence of this observation, we obtain the following theorem, which provides a necessary condition for LOCC.
\begin{thm1}\label{thm1}
If an LOCC protocol, finite or infinite, implements a measurement ${\cal M}$ consisting of outcomes $E_j$, then for each $j$, there exists at least one continuous, monotonic, piecewise-local path from $I_{\cal H}$ to $(0,E_j]$, and every point along that path is a product operator. In addition, each of these paths lies entirely within the zonotope, ${\cal Z}_{\cal M}=\sum_j[0,E_j]$.
\end{thm1}
\proof The proof was given above, apart from the claims that the path is monotonic, terminates on $(0,E_j]$, and lies in ${\cal Z}_{\cal M}$. Monotonicity follows immediately from the recognition that each child node represents one of (generally) multiple outcomes of a measurement made at the parent node. Since the sum of the children, say $F_n$, is equal to their parent, $F_p$, then $\textrm{Tr}\left({F_p}\right)=\sum_n\textrm{Tr}\left({F_n}\right)\ge\textrm{Tr}\left({F_m}\right)$, for any $F_m$ in the set of children. Monotonicity is then evident from the fact that our paths proceed from parent to child, child to grandchild, and so on.
To show for finite protocols that the path terminates on $(0,E_j]$, notice that every leaf node terminating a branch is proportional to one of the $E_j$, since otherwise the protocol does not implement ${\cal M}$. This implies that each leaf $l$ is $\hat E_l=q_lE_j$, for some $j$ and $0<q_l\le1$ ($q_l$ cannot exceed unity because to implement ${\cal M}$, the sum of all leaf nodes proportional to $E_j$ must equal $E_j$). The argument just below Observation~\ref{obs1} indicates that the path terminates at $\hat E_l$, which lies on $(0,E_j]$, as claimed. To show that this path lies in ${\cal Z}_{\cal M}$, also notice that every node in the finite LOCC tree is a sum of the leaf nodes that descend from that node, see Lemma~\ref{lem4} above. It then follows immediately that every node in the tree is an element of ${\cal Z}_{\cal M}$, and since every point on the considered path is a convex combination of a pair of nodes in the tree---those nodes being the ancestor and descendant that are nearest to the point in question, see the explanation following Observation~\ref{obs1}---these points also lie in ${\cal Z}_{\cal M}$, and this completes the proof for the finite case.
For infinite protocols in LOCC, recall the discussion above that these are the limit of sequences of protocols that arise by simply adding one or more additional rounds at the end of branches present in the previous protocol of the sequence. Therefore in the limit, each branch generates a path as described in the theorem, but in this case some of those branches become infinite in the limit. While these paths now have an infinite number of steps, all those steps continue to be piecewise-local in the limit. As just discussed, for finite protocol ${\cal P}_r$, the corresponding path lies entirely within zonotope ${\cal Z}_{{\cal M}_r}$. In addition, each leaf node in ${\cal P}_r$ that is not also a leaf node in ${\cal P}_{r+1}$ is followed (and still present, though no longer a leaf) in ${\cal P}_{r+1}$ by one more complete local measurement. Since the children produced by that one extra measurement sum to their parent, it is easy to see that ${\cal Z}_{{\cal M}_r}\subseteq {\cal Z}_{{\cal M}_{r+1}}$. Thus, the piecewise-local paths generated by ${\cal P}_r$ not only lie entirely within ${\cal Z}_{{\cal M}_r}$, but also within ${\cal Z}_{{\cal M}_{r^\prime}}$ for all $r^\prime\ge r$. Taking the limit, we have that ${\cal Z}_{\cal M}=\lim_{r\to\infty}{\cal Z}_{{\cal M}_r}$ (since by assumption, the infinite-round protocol implements ${\cal M}$), ${\cal Z}_{{\cal M}_r}\subseteq {\cal Z}_{\cal M}$ for all $r$, and it follows that the path to each of the outcomes of ${\cal P}=\lim_{r\to\infty}{\cal P}_r$ lies entirely within ${\cal Z}_{\cal M}$. This completes the proof.\hspace{\stretch{1}}$\blacksquare$
\noindent Note that monotonicity is important because it excludes the trivial path, which exists for any measurement whatsoever, along $s{\cal I}_{\cal H}$ from ${\cal I}_{\cal H}$ to $0$ and then along $sE_j$ from $0$ to a point on the line segment $(0,E_j]$.
By dropping the condition that the paths be piecewise-local, we can now prove Theorem~\ref{thm2}.
\noindent\emph{Proof of Theorem~\ref{thm2}}. The condition of this theorem, ${\cal M}\in\overline{\textrm{LOCC}}$, means there exists a sequence of finite-round LOCC measurements $\{{\cal M}_r\}$ such that $\lim_{r\to\infty}{\cal M}_r={\cal M}$. This implies that for all $\epsilon>0$ there exists $R\in\mathbb{N}$ such that for all $r>R$,
\begin{align}\label{eqn1014}
\epsilon>d\left({\cal M},{\cal M}_r\right)=d_H({\cal Z}_{\cal M},{\cal Z}_{{\cal M}_r}),
\end{align}
and therefore, ${\cal Z}_{\cal M}=\lim_{r\to\infty}{\cal Z}_{{\cal M}_r}$. We wish to show that there exists a continuous, monotonic path of product operators from $I_{\cal H}$ to $(0,E_j]$, for each $j$, and that these paths lie in ${\cal Z}_{\cal M}$. By Theorem~\ref{thm1}, for each leaf node $\hat E_l^{(r)}$ of protocol $P_r$ implementing ${\cal M}_r$, we know there is a path of product operators from $I_{\cal H}$ to $\hat E_l^{(r)}$ lying entirely within ${\cal Z}_{{\cal M}_r}$.\footnote{Note, however, that ${\cal Z}_{{\cal M}_r}\subseteq {\cal Z}_{{\cal M}_{r+1}}$ need not hold here because we allow earlier rounds to change in going from one protocol in the sequence to the next. Note also that, according to the discussion following Observation~\ref{obs1} and the proof of Theorem~\ref{thm1} for the finite case, this path terminates not just along $(0,\hat E_l^{(r)}]$, but precisely at $\hat E_l^{(r)}$.} Thus, there exists a sequence of continuous, monotonic paths of product operators $\{\Pi_r(s)\}_r$ from ${\cal I}_{\cal H}$ to each outcome $\hat E_l^{(r)}$ of ${\cal P}_r$, each path lying in ${\cal Z}_{{\cal M}_r}$. The condition $\lim_{r\to\infty}{\cal M}_r={\cal M}$ means that for each $E_j\in{\cal M}$ there exists a sequence of indices, $\{l_r\}_r$, such that $\lim_{r\to\infty}\hat E_{l_r}^{(r)}=qE_j$ for some $0<q\le1$ (generally, many such sequences will exist for each $j$). This sequence of indices therefore corresponds to a sequence ${\cal S}_j$ of paths whose endpoints $\hat E_l^{(r)}$ converge to a point on $(0,E_j]$ as $r\to\infty$. Starting with ${\cal S}_j$, we can now show the existence of a continuous, monotonic path of product operators $\Pi(s)\in {\cal Z}_{\cal M}$ from ${\cal I}_{\cal H}$ to a point on $(0,E_j]$. Since $j$ is arbitrary here, this conclusion will then hold for each $j$.
Path $\Pi_r(s)$ consists of a sequence of piecewise-local segments starting from $F_0={\cal I}_{\cal H}$ at node $n=0$ and continuing with each local measurement to node $n=1,2,\cdots,r^\prime$ (we include $r^\prime\le r$ to allow for branches that terminate before round $r$). The $(n+1)$th segment of this path starts at parent node $F_n$ and follows a straight line to child node $F_{n+1}$. Let us parametrize this path in terms of the trace---with $s$ decreasing through the interval $\left[0,D\right]$ starting at $D$ and moving toward $0$. Then, each segment is
\begin{align}\label{eqn202}
\Pi_r(s)=\frac{\left[s-\textrm{Tr}\left({F_{n+1}}\right)\right]F_n+\left[\textrm{Tr}\left({F_n}\right)-s\right]F_{n+1}}{\textrm{Tr}\left({F_n}\right)-\textrm{Tr}\left({F_{n+1}}\right)},~~\textrm{Tr}\left({F_n}\right)\ge s\ge\textrm{Tr}\left({F_{n+1}}\right)
\end{align}
for each node starting at $n=0$ and continuing to $n=r^\prime-1$. We add a constant final piece to this path, $\Pi_r(s)=\hat E_l^{(r)}$ for $\textrm{Tr}\left({\hat E_l^{(r)}}\right)\ge s\ge0$, so that $\Pi_r(s)$ is defined over the full interval $s\in[0,D]$. Taking the trace of \eqref{eqn202}, we see that $s=\textrm{Tr}\left({\Pi_r(s)}\right)$ for $D\ge s\ge\textrm{Tr}\left({\hat E_l^{(r)}}\right)$.
Next, we show that for each $r$, $\Pi_r(s)$ is Lipschitz continuous, which means that $\left\|{\Pi_r(s)-\Pi_r(s^\prime)}\right\|\le K\left\vert{s-s^\prime}\right\vert$ for some constant $K$, referred to as the Lipschitz constant. For $\textrm{Tr}\left({\hat E_l^{(r)}}\right)\le s^\prime\le s$, we have
\begin{align}\label{eqn204}
s-s^\prime&=\textrm{Tr}\left({\Pi_r(s)-\Pi_r(s^\prime)}\right)\notag\\
&=\left\|{\Pi_r(s)-\Pi_r(s^\prime)}\right\|,
\end{align}
which follows for the trace norm because, as is easily seen from how we've constructed these paths, $\Pi_r(s)-\Pi_r(s^\prime)$ is positive semidefinite, and so has non-negative eigenvalues. In addition, for $s^\prime\le s\le\textrm{Tr}\left({\hat E_l^{(r)}}\right)$, $\Pi_r(s)=\Pi_r(s^\prime)$; while for $s^\prime\le\textrm{Tr}\left({\hat E_l^{(r)}}\right)\le s$, $s-s^\prime\ge\textrm{Tr}\left({\Pi_r(s)-\hat E_l^{(r)}}\right)=\textrm{Tr}\left({\Pi_r(s)-\Pi_r(s^\prime)}\right)=\left\|{\Pi_r(s)-\Pi_r(s^\prime)}\right\|$. Thus, $\Pi_r(s)$ is Lipschitz continuous over the entire interval $s\in[0,D]$, as claimed, with Lipschitz constant $K=1$, independent of $r$.
Therefore, we may apply the Arzel\`a-Ascoli theorem \cite{ArzelaAscoli}, which tells us that for any sequence of Lipschitz continuous paths for which all instances share the same Lipschitz constant, there exists a subsequence that converges uniformly on $[0,D]$ to a continuous limiting path, $\Pi(s)$. In addition, $\Pi(s)$ is also Lipschitz continuous with the same Lipschitz constant. For each $s$, $\Pi(s)$ is thus a limit of points $\Pi_r(s)$ in this subsequence, implying that $\Pi(s)$ is arbitrarily close to a product operator, and is therefore itself a product operator.\footnote{This is easily proved using the Eckart–Young–Mirsky theorem.\cite{EckartYoungMirsky}} That is, $\Pi(s)$ is a continuous path of product operators. Furthermore, since (1) every path in the subsequence is monotonic and lies within its corresponding zonotope ${\cal Z}_{{\cal M}_r}$; (2) $\lim_{r\to\infty}{\cal Z}_{{\cal M}_r}={\cal Z}_{\cal M}$; and (3) ${\cal Z}_{\cal M}$ is a compact set, see Lemma~\ref{lem1}; then $\Pi(s)$ is also monotonic and lies in ${\cal Z}_{\cal M}$. Finally, since we started with a sequence ${\cal S}_j$ of paths whose endpoints converge to a point on $(0,E_j]$, the chosen subsequence must also have endpoints converging to that same point on $(0,E_j]$, and this completes the proof.\hspace{\stretch{1}}$\blacksquare$
Theorem~\ref{thm2} provides a powerful method for the study of approximate implementation of quantum measurements by LOCC. Consider the local state discrimination problem \cite{Dieks,Walgate,WalgateHardy,Chefles,ChildsLeung,KKB,Hayashi,myLDUPB}, wherein a referee prepares a multipartite system in one of a known set of states and distributes the subsystems to the individual parties, who are then tasked with determining in which one of the states the system was prepared. In the following section, we use Theorem~\ref{thm2} to show for several long-standing unsolved examples of local state discrimination, including in several cases of unextendible product bases \cite{IBM_CMP}, that the parties cannot accomplish the given task with arbitrarily small error when restricted to using LOCC. For completeness, we also show this holds for a few cases that were previously known, often accomplishing our proof in a simpler, more direct way. These examples include cases of perfect discrimination, where the parties always know with certainty which state was prepared, as well as a class of unambiguous state discrimination problems, see Section~\ref{subsecG}. In addition, we use Theorem~\ref{thm2} to show that the POVM constructed in \cite{ChitambarHsiehPeresWootters} for optimal discrimination of the double-trine states, initially studied in the seminal paper of Peres and Wootters \cite{PeresWootters}, cannot be implemented in $\overline{\textrm{LOCC}}$, a result also obtained in \cite{ChitambarHsiehPeresWootters}. Often one discovers that for these measurements, $(0,{\cal I}_{\cal H}]$ is an isolated line segment in the intersection of the set of product operators with ${\cal Z}_{\cal M}$, which according to the following corollary to Theorem~\ref{thm2}, shows directly that ${\cal M}\not\in\overline{\textrm{LOCC}}$.
\begin{cor1}\label{cor1}
Given measurement ${\cal M}$, consisting of POVM elements $E_j$, then ${\cal M}\not\in\overline{\textrm{LOCC}}$ if any one or more of the following are isolated in the intersection of ${\cal Z}_{\cal M}$ with the set of (non-zero) product operators: $(a)$ $(0,E_j]$, for any $j$; or $(b)$ $(0,I_{\cal H}]$.
\end{cor1}
\proof Case $(a)$ is obvious, given Theorem~\ref{thm2}. For case $(b)$, simply note that when $(0,I_{\cal H}]$ is isolated, then the only way to get from $I_{\cal H}$ to $(0,E_j]$ along a continuous path of product operators is to go directly along a line from $I_{\cal H}$ to $0$, and then from $0$ back out along the line segment $(0,E_j]$. However, this is not monotonic, so does not satisfy the conditions of Theorem~\ref{thm2}.\hspace{\stretch{1}}$\blacksquare$
\noindent We will use this corollary in the next section to prove that certain sets of states cannot be distinguished within $\overline{\textrm{LOCC}}$.
\section{Applications}\label{sec4}
In this section, we illustrate the power of Theorem~\ref{thm2} by applying it to a series of examples.
\subsection{Understanding Proposition~$1$ of \cite{KKB}}
As our first illustration of the power of our theorem, we show below that Proposition~$1$ of \cite{KKB} is a direct consequence of Theorem~\ref{thm2}, lending insight into the meaning of Proposition~$1$. We then show that our theorem is able to answer questions their proposition is incapable of answering. Specifically, for the set of states discussed in \cite{KKB}, which they show that their Proposition $1$ \emph{cannot} determine whether there exists a measurement ${\cal M}\in\overline{\textrm{LOCC}}$ that accomplishes perfect state discrimination of the set, we will use Theorem~\ref{thm2} to answer this question in the negative. First, we restate Proposition~$1$ of \cite{KKB}.
{\bf Proposition $1$.}\cite{KKB} \textit{Let $\{\rho_\mu\}$ be a family of $N$ states, such that $\cap_\mu\ker{(\rho_\mu)}$ contains no product vector (except $0$). Then $\{\rho_\mu\}$ can be discriminated perfectly by asymptotic LOCC only if for all $\chi$ with $1/N\le\chi\le1$ there exists a product operator $R\ge0$ obeying $\sum_\mu \textrm{Tr}\left({R\rho_\mu}\right)=1$, $\max_\mu Tr(R\rho_\mu)=\chi$, and $Tr(R\rho_\mu R\rho_\nu)=0$ for $\mu\ne\nu$.}
\noindent We will now see that the existence of product operator $R$ of this proposition for the given continuous range of $\chi$ follows directly from the continuous paths of product operators required by our Theorem~\ref{thm2}, as is demonstrated by the proof of the following lemma.
\begin{lem5}\label{lem5}
Proposition 1 of \cite{KKB} is a direct consequence of our Theorem~\ref{thm2}.
\end{lem5}
\proof Consider measurement ${\cal M}$ consisting of outcomes $E_j$ and partition the outcomes into distinct sets $J_\mu$ such that $\textrm{Tr}\left({E_j\rho_\mu}\right)=0$ for all $j\not\in J_\mu$. Such a partition must be possible for perfect discrimination of these states by ${\cal M}$, since otherwise there will be errors. For $0\le s\le1$, define
\begin{align}\label{eqn2001}
R(s)=\frac{\Pi(s)}{\sum_\mu\textrm{Tr}\left({\Pi(s)\rho_\mu}\right)},
\end{align}
where $\Pi(s)=\sum_jc_j(s)E_j$, $0\le c_j(s)\le1$, is a continuous path of positive semidefinite product operators, guaranteed to exist by Theorem~\ref{thm2}, from ${\cal I}_{\cal H}$ to $(0,E_{\hat\jmath}]$, for the specific outcome $\hat\jmath$. As such, $R(s)$ is a continuous function of $s$. We set $c_j(0)=1$ for all $j$ so that $\Pi(0)={\cal I}_{\cal H}$ and $R(0)={\cal I}_{\cal H}/N$, and $c_j(1)=0$ for all $j\ne\hat\jmath$, so that $\Pi(1)\propto E_{\hat\jmath}$ and $R(1)=E_{\hat\jmath}/\textrm{Tr}\left({E_{\hat\jmath}\rho_{\hat\mu}}\right)$, where $\hat\jmath\in J_{\hat\mu}$. Note that the condition of Proposition $1$, that $\cap_\mu\ker{\rho_\mu}$ contains no nonzero product operator, is necessary to ensure the denominator of $R(s)$ does not vanish.
We have defined $R(s)\ge0$ such that $\sum_\mu\textrm{Tr}\left({R(s)\rho_\mu}\right)=1$ for all $s$. Since $E_j\ge0$ and $\rho_\mu\ge0$, we have that $\textrm{Tr}\left({E_j\rho_\mu}\right)=0\implies E_j\rho_\mu=0=\rho_\mu E_j$ for all $j\not\in J_\mu$, and we see immediately that $\textrm{Tr}\left({R(s)\rho_\mu R(s)\rho_\nu}\right)=0$ for all $s$ and for all $\mu\ne\nu$. Finally, we must show that there exists $s$ such that $f(s)\equiv\max_\mu{\textrm{Tr}\left({R(s)\rho_\mu}\right)}=\chi$ for all $\chi$ in the range $1/N\le\chi\le1$. First, note that $f(0)=1/N$ and $f(1)=1$, each of which are easily seen from the expressions for $R(0)$ and $R(1)$ given in the preceding paragraph. The result for all $\chi$ then follows immediately due to continuity of $f(s)$ for $0\le s\le1$, which itself follows from continuity of $R(s)$. This ends the proof.\hspace{\stretch{1}}$\blacksquare$
Next, we show that Theorem~\ref{thm2} is, in fact, strictly stronger than Proposition $1$ of \cite{KKB}, by using our theorem to demonstrate the fact (which cannot be shown by Proposition $1$ \cite{KKB}) that no measurement ${\cal M}\in\overline{\textrm{LOCC}}$ accomplishes perfect state discrimination of the set of mutually orthogonal states given in their Eq.~(15). We will see that there is only one measurement ${\cal M}$ that works for this task, and that the requisite (by Theorem~\ref{thm2}) path of product operators in ${\cal Z}_{\cal M}$ is non-existent for at least one of the outcomes in ${\cal M}$.
The (pairwise orthogonal) states in the set Eq.~(15) of \cite{KKB} are
\begin{align}\label{eqn1007}
\vert{\psi_{1}}\rightarrow ngle&\propto\vert{00}\rightarrow ngle\notag\\
\vert{\psi_{2}}\rightarrow ngle&\propto2\vert{01}\rightarrow ngle-\left(\sqrt{3}+1\right)\vert{10}\rightarrow ngle-\sqrt{6}\sqrt[\leftroot{0}\uproot{0}4]{3}\vert{11}\rightarrow ngle\notag\\
\vert{\psi_{3}}\rightarrow ngle&\propto2\vert{01}\rightarrow ngle-\left(\sqrt{3}-1\right)\vert{10}\rightarrow ngle+\sqrt{2}\sqrt[\leftroot{0}\uproot{0}4]{3}\vert{11}\rightarrow ngle.
\end{align}
The only state orthogonal to all of these is proportional to
\begin{align}\label{eqn1008}
\vert{\phi}\rightarrow ngle&\propto2\sqrt[\leftroot{0}\uproot{0}4]{3}\vert{01}\rightarrow ngle+\sqrt[\leftroot{0}\uproot{0}4]{3}\left(\sqrt{3}+1\right)\vert{10}\rightarrow ngle-\sqrt{2}\vert{11}\rightarrow ngle.
\end{align}
Therefore, the only (refined) measurement discriminating these states must consist of elements that project onto states that are superpositions of $\vert{\phi}\rightarrow ngle$ with one (and then being orthogonal to the other two) of the states in Eq.~\eqref{eqn1007}.
There are six such states that are product, as required, including $\vert{\psi_{1}}\rightarrow ngle$. They are
\begin{align}\label{eqn1009}
\vert{\psi_{11}}\rightarrow ngle&\propto\vert{\psi_{1}}\rightarrow ngle=\vert{0}\rightarrow ngle\otimes\vert{0}\rightarrow ngle\notag\\
\vert{\psi_{12}}\rightarrow ngle&\propto\left(\sqrt{2}\sqrt[\leftroot{0}\uproot{0}4]{3}\vert{0}\rightarrow ngle-\vert{1}\rightarrow ngle\right)\otimes\left[\sqrt[\leftroot{0}\uproot{0}4]{3}\left(\sqrt{3}+1\right)\vert{0}\rightarrow ngle-\sqrt{2}\vert{1}\rightarrow ngle\right]\notag\\
\vert{\psi_{21}}\rightarrow ngle&\propto\left(\sqrt[\leftroot{0}\uproot{0}4]{3}\vert{0}\rightarrow ngle-\sqrt{2}\vert{1}\rightarrow ngle\right)\otimes\vert{1}\rightarrow ngle\notag\\
\vert{\psi_{22}}\rightarrow ngle&\propto\vert{1}\rightarrow ngle\otimes\left[\sqrt[\leftroot{0}\uproot{0}4]{3}\left(\sqrt{3}+1\right)\vert{0}\rightarrow ngle+\sqrt{2}\vert{1}\rightarrow ngle\right]\notag\\
\vert{\psi_{31}}\rightarrow ngle&\propto\left(3^{3/4}\vert{0}\rightarrow ngle+\sqrt{2}\vert{1}\rightarrow ngle\right)\otimes\vert{1}\rightarrow ngle\notag\\
\vert{\psi_{32}}\rightarrow ngle&\propto\vert{1}\rightarrow ngle\otimes\left[3^{3/4}\sqrt{2}\vert{0}\rightarrow ngle-\left(\sqrt{3}+1\right)\vert{1}\rightarrow ngle\right]
\end{align}
Define $\psi_{ij}=\ket{\psi_{ij}}\bra{\psi_{ij}}$. Allowing for higher-rank measurements, any outcome that identifies $\ket{\psi_i}$ with certainty must be a mixture of the form, $c_{i1}\psi_{i1}+c_{i2}\psi_{i2}$, but these are not product operators (as required for $\overline{\textrm{LOCC}}$) unless $c_{i1}c_{i2}=0$, so the measurement must be rank-$1$ with each outcome proportional to one of the $\psi_{ij}$. From the condition that the measurement is complete, \myeq{eqn100}, we find that $\psi_{12}$ must be excluded. There is one (and only one) complete measurement ${\cal M}$ consisting of elements proportional to the remaining five projectors. We will show that within the intersection of the set of product operators on ${\cal H}$ with the zonotope ${\cal Z}_{\cal M}$ generated by these five projectors, $(0,\psi_{11}]$ is isolated, so that no continuous, monotonic path of product operators exists in $Z_{\cal M}$ from ${\cal I}_{\cal H}$ to this segment. By Theorem~\ref{thm2}, we then have that ${\cal M}\not\in\overline{\textrm{LOCC}}$, and the states of \myeq{eqn1007} cannot be discriminated within asymptotic LOCC.
Consider all product operators in ${\cal Z}_{\cal M}$, which are of the form
\begin{align}\label{eqn1010}
{\cal A}\otimes{\cal B}=c_{11}\psi_{11}+\sum_{k=2}^3\sum_{j=1}^2c_{kj}\psi_{kj}.
\end{align}
By writing the right-hand side out explicitly as a $4\times4$ matrix, and noting that in order to be a product operator the four $2\times2$ blocks must be proportional to each other, one finds that (1) if ${\cal B}$ is not diagonal then ${\cal A}={\cal A}_{11}[1]$, which is diagonal ($[j]=\vert{j}\rightarrow ngle\langle{j}\vert,~j=0,1$); and (2) if ${\cal A}$ is not diagonal, then ${\cal B}={\cal B}_{11}[1]$, which is also diagonal. Significantly for our purposes, neither of these cases is anywhere near being proportional to $\psi_{11}=[0]\otimes[0]$. Therefore, if ${\cal A}\otimes{\cal B}$ is to be close to $(0,\psi_{11}]$, then ${\cal A}$ and ${\cal B}$ are both diagonal, from which one finds that $c_{2j}=\sqrt{3}c_{3j}$ for $j=1,2$, and ${\cal A}\otimes{\cal B}$ reduces to
\begin{align}\label{eqn1012}
{\cal A}\otimes{\cal B}&=(1+\sqrt{3})\{c_{11}^\prime [00]+3c_{31}[01]\notag\\
&+12c_{32}[10]+2(c_{31}+2c_{32})[11]\},
\end{align}
with $c_{11}=(1+\sqrt{3})c_{11}^\prime$. The right-hand side must be a product operator, implying
\begin{align}\label{eqn1013}
(3c_{31})(12c_{32})&=2\left(c_{31}+2c_{32}\right)c_{11}^\prime\notag\\
&\ge4c_{11}^\prime c_{32}.
\end{align}
This implies that either $c_{32}=0$ or $c_{31}\ge c_{11}^\prime/9$. A completely analogous argument shows that either $c_{31}=0$ or $c_{32}\ge c_{11}^\prime/18$. Note that if $c_{31}=0\ne c_{32}$ or $c_{31}\ne0=c_{32}$, then $c_{11}^\prime=0$, a case we may exclude because we are seeking operators near $(0,\psi_{11}]$.
Therefore, Eq.~\eqref{eqn1013} implies either (i) $c_{31}=0=c_{32}$, which leaves us with $c_{11}\psi_{11}\in(0,\psi_{11}]$; or (ii) $c_{32}\ge c_{11}^\prime/18$ and $c_{31}\ge c_{11}^\prime/9$, which is not near $(0,\psi_{11}]$. Hence, the only product operators near $(0,\psi_{11}]$ are proportional to $\psi_{11}$ itself, and we thus see that $(0,\psi_{11}]$ is an isolated line segment in the intersection of the set of product operators with ${\cal Z}_{\cal M}$ as claimed, and this concludes the proof.\footnote{Let us note that function $E_\chi$, found in the appendix of \cite{KKB}, satisfies the conditions of their Proposition $1$ and is of a form very similar to that given here in Eq.~\eqref{eqn2001}. However, while $E_\chi$ provides a path of product operators from ${\cal I}_{\cal H}$ to $(0,\psi_{31}]$ lying in ${\cal Z}_{\cal M}$ in its entirety, it is not continuous at $\chi=1/2$. In any case, it does not approach $(0,\psi_{11}]$, which we have just shown is impossible.}
\subsection{The necessary condition of \cite{ChitambarHsiehHierarchy}}\label{sec0}
Here, we use our Theorem~\ref{thm2} to derive the necessary condition of \cite{ChitambarHsiehHierarchy} that a pair of states can be perfectly discriminated by asymptotic LOCC, see their Theorem~$1$, reproduced here.
\begin{thm3}\label{thm3}\cite{ChitambarHsiehHierarchy}
If $N$-partite states $\rho$ and $\sigma$ can be perfectly distinguished by asymptotic LOCC,
then for each $x\in[1/2, 1]$ there must exist a POVM $\{\Pi_0,\Pi_\lambda\}_{\lambda=1}^D$ such that $\Pi_0$ is a separable operator, each $\Pi_\lambda$ is a product operator, and
\begin{align}
\Tr{\Pi_0\rho} &= 0,\label{eqn1101}\\
\Tr{\Pi_\lambda\rho\Pi_\lambda\sigma} &= 0, ~~~\forall{1\le\lambda\le D},\label{eqn1002}\\
\Tr{\Pi_\lambda[(1-x)\rho -x\sigma]} &= 0, ~~~\forall{1\le\lambda\le D}, \label{eqn1003}
` \end{align}
where $D=\prod\limits_{k=1}^Nd_k^2+1$ and $d_k$ is the dimension of system $k$.
\end{thm3}
Let us now show that Theorem~\ref{thm3} follows from our necessary condition, Theorem~\ref{thm2}, that there exists a continuous path of product operators from ${\cal I}_{\cal H}$ to $(0,E_j]$ for each $j$. Here, $\{E_j\}$ is a POVM ${\cal M}$ that perfectly distinguishes $\{\rho,\sigma\}$, so that there exists a partition of ${\cal M}$ into those elements $j\in S_\rho$ that identify $\rho$ and those elements $j\in S_\sigma$ identifying $\sigma$. Then, $\Tr{E_j\rho}=0$ for $j\in S_\sigma$ and $\Tr{E_j\sigma}=0$ for $j\in S_\rho$. Define our continuous path of product operators lying in ${\cal Z}_{\cal M}$ from ${\cal I}_{\cal H}$ to $(0,E_{\hat\jmath}]$ for $\hat\jmath\in S_\rho$ as
\begin{align}\label{eqn1004}
R_{\hat\jmath}(s)=\sum_jc_j^{(\hat\jmath)}(s)E_j,
\end{align}
where $0\le s\le1$, $0\le c_j^{(\hat\jmath)}(s)\le1$, $c_j^{(\hat\jmath)}(0)=1$ for all $j$, and $c_j^{(\hat\jmath)}(1)=\delta_{j\hat\jmath}$. This path is guaranteed to exist by our Theorem~\ref{thm2}.
Set $\Pi_0=\sum_{j\in S_\sigma}E_j$, which is manifestly separable, and satisfies \myeq{eqn1101}, as required for Theorem~\ref{thm3}. Note that $\Tr{E_i\rho E_j\sigma}=0$ for all $i,j\in S_\rho$, because $\Tr{X\sigma}=0$ implies $X\sigma=0=\sigma X$ when $X\ge0$ (since $\sigma\ge0$). Therefore, the replacement $\Pi_\lambda=R_{\hat\jmath}(s)$ satisfies \myeq{eqn1002}. It also satisfies \myeq{eqn1003} for each $x\in[1/2,1]$, as we now demonstrate. Solving for $x$, we find
\begin{align}\label{eqn1005}
x(s)&=\frac{\Tr{R_{\hat\jmath}(s)\rho}}{\Tr{R_{\hat\jmath}(s)\rho}+\Tr{R_{\hat\jmath}(s)\sigma}}\notag\\
&=\frac{\sum_jc_j^{(\hat\jmath)}(s)\Tr{E_j\rho}}{\sum_j\left[c_j^{(\hat\jmath)}(s)\Tr{E_j\rho}+c_j^{(\hat\jmath)}(s)\Tr{E_j\sigma}\right]}
\end{align}
Each term in the sum in the denominator is non-negative and at least some terms do not vanish. Thus, the denominator is nonzero. We know that $\sum_jc_j^{(\hat\jmath)}(s)E_j$ is continuous in $s$, and therefore $x=x(s)$ is a continuous function of $s$. For $s=0$, $c_j^{(\hat\jmath)}(0)=1$ for all $j$ so that $R_{\hat\jmath}(s)={\cal I}_{\cal H}$, and we see that $x(0)=1/2$. At the other end of the range of $s$ we have that $c_j^{(\hat\jmath)}(1)=\delta_{j\hat\jmath}$ so that $R_{\hat\jmath}(1)=E_{\hat\jmath}$. Recalling the choice $\hat\jmath\in S_\rho$ so that $\Tr{E_{\hat\jmath}\sigma}=0$, we have that $x(1)=1$. By continuity, then, $x(s)$ takes on all values between $1/2$ and $1$, and the conditions of the theorem are satisfied by replacing $\Pi_\lambda\to R_{\hat\jmath}(s)$ for any $\hat\jmath$. In the case where $\vert S_\rho\vert<D$, there are not enough values of $\hat\jmath$ available, but we can simply add zero operators to make up the difference. On the other hand, when $\vert S_\rho\vert>D$, we can reduce the number by using the same argument used in \cite{ChitambarHsiehHierarchy}, by way of Carath\'eodory's Theorem. Thus, we see that our Theorem~\ref{thm2}, guaranteeing a continuous path of product operators from ${\cal I}_{\cal H}$ to each segment, $(0.E_j]$, which lies entirely within ${\cal Z}_{\cal M}$, implies Theorem~\ref{thm3}, which is what we set out to prove.
\subsection{Locally discriminating the rotated domino states}
The seminal result of \cite{Bennett9} showed that a set of states could be ``non-local" even if each of those states is a tensor product, and thus not entangled. They used a fairly involved (both technically and conceptually) argument to show that the set of nine states on ${\cal H}={\cal H}_1\otimes{\cal H}_2$ known as the domino states, with each ${\cal H}_j$ of dimension $3$, cannot be perfectly discriminated in $\overline{\textrm{LOCC}}$. The authors in \cite{ChildsLeung} then used a somewhat simplified (but perhaps not quite `simple') argument to prove the same conclusion for a generalization, the rotated domino states, given by
\begin{align}\label{eqn205}
\ket{\Psi_1}&=\ket{1}\otimes\ket{1}\notag\\
\ket{\Psi_2}&=\ket{0}\otimes(\cos{\theta_1}\ket{0}+\sin{\theta_1}\ket{1})\notag\\
\ket{\Psi_3}&=\ket{0}\otimes(\sin{\theta_1}\ket{0}-\cos{\theta_1}\ket{1})\notag\\
\ket{\Psi_4}&=(\cos{\theta_2}\ket{0}+\sin{\theta_2}\ket{1})\otimes\ket{2}\notag\\
\ket{\Psi_5}&=(\sin{\theta_2}\ket{0}-\cos{\theta_2}\ket{1})\otimes\ket{2}\notag\\
\ket{\Psi_6}&=\ket{2}\otimes(\cos{\theta_3}\ket{1}+\sin{\theta_3}\ket{2})\notag\\
\ket{\Psi_7}&=\ket{2}\otimes(\sin{\theta_3}\ket{1}-\cos{\theta_3}\ket{2})\notag\\
\ket{\Psi_8}&=(\cos{\theta_4}\ket{1}+\sin{\theta_4}\ket{2})\otimes\ket{0}\notag\\
\ket{\Psi_9}&=(\sin{\theta_4}\ket{1}-\cos{\theta_4}\ket{2})\otimes\ket{0}
\end{align}
with $0<\theta_j\le\pi/4$ for all $j$ (the original set of \cite{Bennett9} is recovered by setting $\theta_j=\pi/4$ for all $j$). We now give a very simple argument that these states in \myeq{eqn205} cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$. Since these states are a complete, orthogonal basis of ${\cal H}$, the only successful measurement is one consisting of projectors onto each of these nine states, ${\cal M}_{\textrm{dom}}=\{[\Psi_j]\}$. According to Theorem~\ref{thm2}, there must be a continuous, monotonic path of product operators from $I_{\cal H}$ to each outcome of this measurement, and lying in the corresponding zonotope, which we denote as ${\cal Z}_{\textrm{dom}}$. We will show that $(0,I_{\cal H}]$ is an isolated line segment within the intersection of all product operators with ${\cal Z}_{\textrm{dom}}$, which by Corollary~\ref{cor1}, proves that ${\cal M}_\textrm{dom}\not\in\overline{\textrm{LOCC}}$.
\begin{thm5}
${\cal M}_\textrm{dom}\not\in\overline{\textrm{LOCC}}$.
\end{thm5}
\proof Consider $z\in {\cal Z}_{\textrm{dom}}$,
\begin{align}\label{eqn206}
z=\sum_{j=1}^9c_j[\Psi_j],
\end{align}
with $c_j\ge0$ for all $j$. When $z$ is a product operator of the form ${\cal A}\otimes{\cal B}$, the nine $3$-by-$3$ blocks in $z$ must all be proportional to each other. Assuming $z$ is a product operator then, we can write
\begin{align}\label{eqn207}
{\cal A}_{00}{\cal B}&=\begin{bmatrix}
c_2\cos^2{\theta_1}+c_3\sin^2{\theta_1}&\frac{1}{2}(c_2-c_3)\sin{2\theta_1}&0\\
\frac{1}{2}(c_2-c_3)\sin{2\theta_1}&c_2\sin^2{\theta_1}+c_3\cos^2{\theta_1}&0\\
0&0&c_4\cos^2{\theta_2}+c_5\sin^2{\theta_2}
\end{bmatrix}
\end{align}
\begin{align}\label{eqn208}
{\cal A}_{01}{\cal B}&=\begin{bmatrix}
0&0&0\\
0&0&0\\
0&0&\frac{1}{2}(c_4-c_5)\sin{2\theta_2}
\end{bmatrix}
\end{align}
\begin{align}\label{eqn209}
{\cal A}_{11}{\cal B}&=\begin{bmatrix}
c_8\cos^2{\theta_4}+c_9\sin^2{\theta_4}&0&0\\
0&c_1&0\\
0&0&c_4\sin^2{\theta_2}+c_5\cos^2{\theta_2}
\end{bmatrix}
\end{align}
\begin{align}\label{eqn210}
{\cal A}_{12}{\cal B}&=\begin{bmatrix}
\frac{1}{2}(c_8-c_9)\sin{2\theta_4}&0&0\\
0&0&0\\
0&0&0
\end{bmatrix}
\end{align}
\begin{align}\label{eqn211}
{\cal A}_{22}{\cal B}&=\begin{bmatrix}
c_8\sin^2{\theta_4}+c_9\cos^2{\theta_4}&0&0\\
0&c_6\cos^2{\theta_3}+c_7\sin^2{\theta_3}&\frac{1}{2}(c_6-c_7)\sin{2\theta_3}\\
0&\frac{1}{2}(c_6-c_7)\sin{2\theta_3}&c_6\sin^2{\theta_3}+c_7\cos^2{\theta_3}
\end{bmatrix}
\end{align}
One can show that all product operators in ${\cal Z}_{\textrm{dom}}$ are either proportional to $I_{\cal H}$ or have rank no greater than $2$. It is easier, however, to show that all product operators in ${\cal Z}_{\textrm{dom}}$ that are not proportional to $I_{\cal H}$ cannot have full rank equal to $9$ so therefore cannot approach the line segment $(0,I_{\cal H}]$, and this is what we will now do.
If $z={\cal A}\otimes{\cal B}$ is full rank, then ${\cal A}$ and ${\cal B}$ each have full rank equal to $3$. Notice that Eqs.~\eqref{eqn208} and \eqref{eqn210} preclude ${\cal B}$ having full rank, unless ${\cal A}_{01}=0={\cal A}_{12}$, so ${\cal A}$ is diagonal, implying that $c_4=c_5$ and $c_8=c_9$. Since ${\cal A}$ is diagonal and full rank, ${\cal A}_{11}\ne0$, so from \myeq{eqn209}, ${\cal B}$ is diagonal, and then from Eqs.~(\ref{eqn207}) and (\ref{eqn211}) we have that $c_2=c_3$ and $c_6=c_7$. Along with the fact that ${\cal A}_{jj}\ne0$ for all $j$, these conditions lead to
\begin{align}\label{eqn212}
{\cal B}=\frac{1}{{\cal A}_{00}}\begin{bmatrix}
c_2&0&0\\
0&c_2&0\\
0&0&c_4
\end{bmatrix}
=\frac{1}{{\cal A}_{11}}\begin{bmatrix}
c_8&0&0\\
0&c_1&0\\
0&0&c_4
\end{bmatrix}
=\frac{1}{{\cal A}_{22}}\begin{bmatrix}
c_8&0&0\\
0&c_6&0\\
0&0&c_6
\end{bmatrix}
\end{align}
From the first expression for ${\cal B}$ we see that ${\cal B}_{00}={\cal B}_{11}$ and from the third expression, ${\cal B}_{11}={\cal B}_{22}$, so that ${\cal B}\propto I_B$. Considering these expressions for ${\cal B}_{22}$, we have $c_4/{\cal A}_{00}=c_4/{\cal A}_{11}$ or ${\cal A}_{00}={\cal A}_{11}$. Similarly considering the expressions for ${\cal B}_{00}$, we find that ${\cal A}_{11}={\cal A}_{22}$, and therefore, ${\cal A}\propto I_A$, which implies that the only product operators in ${\cal Z}_{\textrm{dom}}$ of rank equal to $9$ are all proportional to $I_{\cal H}$. This means that no non-zero product operators are close to $(0,I_{\cal H}]$ and by Corollary~\ref{cor1}, this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
\subsection{The Unextendible Product Basis known as Tiles}
If, from the domino set of states---those given in \myeq{eqn205} with $\theta_j=\pi/4$ for all $j$---one omits the states $\ket{\Psi_1}$, $\ket{\Psi_2}$, $\ket{\Psi_4}$, $\ket{\Psi_6}$ and $\ket{\Psi_8}$, and adds one extra state given as $\ket{\Psi_{51}}$ in \myeq{eqn301} below, one obtains a set of states known as Tiles. This is an Unextendible Product Basis (UPB), a set of product states for which there is no other product state orthogonal to each of the states in the original set. UPBs have found important applications in quantum information theory \cite{IBM_PRL,IBM_CMP}. As shown in \cite{FuLeungMancinska}, any measurement, ${\cal M}_{\textrm{Tiles}}$, perfectly discriminating the Tiles UPB cannot be implemented within $\overline{\textrm{LOCC}}$. We provide an alternative proof here, as illustration of Theorem~\ref{thm2} (an argument very similar to that we give here can also be used to prove that no UPB on a $3\times3$ system can be perfectly discriminated using $\overline{\textrm{LOCC}}$, a result also obtained in \cite{FuLeungMancinska}). This proof is more difficult than that given in the preceding subsection, since here we are not working with a complete basis of ${\cal H}$, so we must consider a range of possible measurements.
The states of the Tiles UPB are
\begin{align}\label{eqn301}
\ket{\Psi_{11}}&=\frac{1}{\sqrt{2}}\ket{0}\otimes(\ket{0}-\ket{1})\notag\\
\ket{\Psi_{21}}&=\frac{1}{\sqrt{2}}(\ket{0}-\ket{1})\otimes\ket{2}\notag\\
\ket{\Psi_{31}}&=\frac{1}{\sqrt{2}}\ket{2}\otimes(\ket{1}-\ket{2})\\
\ket{\Psi_{41}}&=\frac{1}{\sqrt{2}}(\ket{1}-\ket{2})\otimes\ket{0}\notag\\
\ket{\Psi_{51}}&=\frac{1}{3}(\ket{0}+\ket{1}+\ket{2})\otimes(\ket{0}+\ket{1}+\ket{2})\notag
\end{align}
Let us characterize all measurements that perfectly discriminate this set. Each outcome of any such measurement must be orthogonal to all but one of the states. For $\overline{\textrm{LOCC}}$, those measurement outcomes must also be product operators. Consider the local parts of these states (for example, $\ket{0}$ is the first party's local part of $\ket{\Psi_{11}}$ and the second party's local part of $\ket{\Psi_{41}}$). Note that, for either party, the local parts of any three of these states span their local Hilbert space, ${\cal H}_j$. Therefore, no state on ${\cal H}_j$ is orthogonal (on that side) to more than two of the $\ket{\Psi_{i1}}$. This means that any product state orthogonal to four of the states of \myeq{eqn301} must be orthogonal on the ${\cal H}_1$ side to two of the states, and to the other two states on the ${\cal H}_2$ side. For each of the states in Tiles, we can use this observation to identify by inspection six states orthogonal to the other four states in Tiles. Defining
\begin{align}\label{eqn302}
[i\pm j]&=(\ket{i}\pm\ket{j})(\bra{i}\pm\bra{j})/2\notag\\
[i\pm j\pm k]&=(\ket{i}\pm\ket{j}\pm\ket{k})\bra{i}\pm\bra{j}\pm\bra{k})/3\notag\\
[\phi_0]&=(2\ket{0}-\ket{1}-\ket{2})(2\bra{0}-\bra{1}-\bra{2})/6\\
[\phi_2]&=(\ket{0}+\ket{1}-2\ket{2})(\bra{0}+\bra{1}-2\bra{2})/6\notag
\end{align}
the projectors onto the six states orthogonal to all but $\ket{\Psi_{i1}}$ of the states in Tiles, for each $i$, are
\begin{align}\label{eqn303}
E_{11}&=[0]\otimes[0-1]&E_{21}&=[0-1]\otimes[2]\notag\\
E_{12}&=[0+1]\otimes[1-2]&E_{22}&=[1-2]\otimes[1+2]\notag\\
E_{13}&=[0-1]\otimes[1]&E_{23}&=[1]\otimes[1-2]\notag\\
E_{14}&=[\phi_0]\otimes[0]&E_{24}&=[0]\otimes[\phi_2]\notag\\
E_{15}&=[\phi_2]\otimes[1+2]&E_{25}&=[1+2]\otimes[\phi_0]\notag\\
E_{16}&=[0+1+2]\otimes[\phi_0]&E_{26}&=[\phi_0]\otimes[0+1+2]\notag\\\notag\\
E_{31}&=[2]\otimes[1-2]&E_{41}&=[1-2]\otimes[0]\notag\\
E_{32}&=[1+2]\otimes[0-1]&E_{42}&=[0-1]\otimes[0+1]\notag\\
E_{33}&=[1-2]\otimes[1]&E_{43}&=[1]\otimes[0-1]\notag\\
E_{34}&=[\phi_2]\otimes[2]&E_{44}&=[2]\otimes[\phi_0]\notag\\
E_{35}&=[\phi_0]\otimes[0+1]&E_{45}&=[0+1]\otimes[\phi_2]\notag\\
E_{36}&=[0+1+2]\otimes[\phi_2]&E_{46}&=[\phi_2]\otimes[0+1+2]\notag\\\notag\\
E_{51}&=[0+1+2]\otimes[0+1+2]\notag\\
E_{52}&=[0]\otimes[0+1]\notag\\
E_{53}&=[0+1]\otimes[2]\notag\\
E_{54}&=[2]\otimes[1+2]\notag\\
E_{55}&=[1+2]\otimes[0]\notag\\
E_{56}&=[1]\otimes[1]
\end{align}
Any outcome of ${\cal M}_{\textrm{Tiles}}$ identifying $\ket{\Psi_{i1}}$ must be a linear combination of the $E_{ij}$ for fixed $i$ and $j=1,6$. Noting that the local operators on one (or the other) side of all the $E_{ij}$, for fixed $i$, are linearly independent, the only such linear combinations that are product operators are the individual $E_{ij}$, themselves. Therefore, if ${\cal M}_{\textrm{Tiles}}\in\overline{\textrm{LOCC}}$, each of its outcomes must be proportional to one member in a subset of these (rank-$1$) operators $E_{ij}$. This still leaves us with a range of possible measurements to consider, but our task is simplified by the observation that for any one of these possible measurements, its corresponding zonotope lies within the zonotope defined by this entire set of $30$ projection operators. We will call the latter zonotope ${\cal Z}_{\textrm{Tiles}}$, and then we can prove ${\cal M}_{\textrm{Tiles}}\not\in\overline{\textrm{LOCC}}$ by showing there is no continuous, monotonic path of product operators, lying entirely within ${\cal Z}_{\textrm{Tiles}}$, from $I_{\cal H}$ to any one of these outcomes. We will do this with reference to Corollary~\ref{cor1}, by showing that $(0,I_{\cal H}]$ is isolated within the intersection of ${\cal Z}_{\textrm{Tiles}}$ and the set of product operators.
\begin{thm6}
The unextendible product basis known as Tiles cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$.
\end{thm6}
\proof
Consider an arbitrary point in ${\cal Z}_{\textrm{Tiles}}$,
\begin{align}\label{eqn304}
R=\sum_{jk}c_{jk}E_{jk},
\end{align}
with $c_{jk}\ge0$ for all $j,k$. We seek conditions under which $R={\cal A}\otimes{\cal B}$ is a product operator. Note that each of the local projectors appearing in \myeq{eqn303} can be written as a linear combination of the six linearly independent projectors, $[0],[1],[2],[0+1],[1+2]$, and $[0+1+2]$ on either party. (For example, $[0-1]=[0]+[1]-[0+1]$, $[1-2]=[1]+[2]-[1+2]$, $[\phi_0]=[0]+[1+2]-[0+1+2]$, and $[\phi_2]=[2]+[0+1]-[0+1+2]$.) Rewriting the $E_{jk}$ in terms of these six linearly independent local operators on each side, one finds that the following six product operators do not appear anywhere in this set of $30$ operators:
\begin{align}\label{eqn305}
&[0]\otimes[1+2]&&[1]\otimes[0+1+2]\notag\\
&[2]\otimes[0+1]&&[0+1]\otimes[0]\notag\\
&[1+2]\otimes[2]&&[0+1+2]\otimes[1].
\end{align}
If we rewrite $R$ in the same way, none of these six product operators will appear. We can expand $R$ in terms of the six linearly independent projectors on the $A$-side as
\begin{align}\label{eqn306}
R=[0]\otimes{\cal B}_{0}+[1]\otimes{\cal B}_{1}+[2]\otimes{\cal B}_{2}+[0+1]\otimes{\cal B}_{3}+[1+2]\otimes{\cal B}_{4}+[0+1+2]\otimes{\cal B}_{5},
\end{align}
where ${\cal B}_{0}$ has no term with $[1+2]$ appearing in it, ${\cal B}_{1}$ has no $[0+1+2]$, etc. Now, in order for $R$ to be a product operator, the ${\cal B}_{i}$ must all be proportional to each other. This means that either ${\cal B}_{0}=0$ or none of the ${\cal B}_i$ contain a term with $[1+2]$; either ${\cal B}_1=0$ or none of the ${\cal B}_i$ contain a term with $[0+1+2]$; and so on. In other words, either $[0]$ appears nowhere on the $A$ side, or $[1+2]$ appears nowhere on the $B$ side; either $[1]$ appears nowhere on the $A$ side, or $[0+1+2]$ appears nowhere on the $B$ side; etc. That is, for each of the six operators appearing in \myeq{eqn305}, either the $A$-part is absent from ${\cal A}$ or the $B$ part is absent from ${\cal B}$.
Next, consider what these observations tell us about the presence of product operators in ${\cal Z}_{\textrm{Tiles}}$ that are close to $I_{\cal H}$. First, note that any such operator must have full rank equal to $9$, so that $\textrm{rank}({\cal A})=\textrm{rank}({\cal B})=3$. This means neither ${\cal A}$ nor ${\cal B}$ can be missing more than three of the local operators mentioned in the preceding paragraph. Since together, the two must be missing a total of six of these operators, we reach the conclusion that ${\cal A}$ and ${\cal B}$ must each be missing three of them. Considering all possible trios from the six local operators, $[0],[1],[2],[0+1],[1+2],[0+1+2]$, it is easy to see that the only trio that allows ${\cal A}$ (${\cal B}$) to be close to $I_A$ ($I_B$) is $[0],[1],[2]$. For example, the trio $[0],[1],[0+1]$ gives $\bra{2}{\cal A}\ket{2}=0$ so is not close to $I_A$; the trio
$[0],[1],[1+2]$ has $\bra{1}{\cal A}\ket{2}=\bra{2}{\cal A}\ket{2}$ so is not close to $I_A$; and so on. Therefore, ${\cal A}$, ${\cal B}$ must be diagonal in the standard basis, $[0],[1],[2]$. This condition imposes strong constraints on the coefficients $\{c_{jk}\}$ in \myeq{eqn304}, which reduce the expression for $R$ to the required diagonal form, with (nonzero) entries $[x~~x~~y~~w~~t~~y~~w~~z~~z]$. The first set of three entries is ${\cal A}_{00}{\cal B}$, so $x\propto{\cal B}_{00}={\cal B}_{11}$, and the last set of three entries is ${\cal A}_{22}{\cal B}$, so $z\propto{\cal B}_{22}={\cal B}_{11}$. That is, ${\cal B}$ must be proportional to $I_B$. This tells us that $x=y=w=t=z$, from which we see that $R=xI_{\cal H}$. Therefore, the only product operators in ${\cal Z}_{\textrm{Tiles}}$ close to $(0,I_{\cal H}]$ are proportional to $I_{\cal H}$, itself, and this completes the proof. {\hspace{\stretch{1}}$\blacksquare$}
\subsection{The Unextendible Product Basis known as Shifts}
We next turn our attention to the Shifts UPB, which consists of the set of four three-qubit states,
\begin{align}\label{eqn401}
\ket{\Psi_{11}}&=\ket{000}\notag\\
\ket{\Psi_{21}}&=\ket{+1-}\notag\\
\ket{\Psi_{31}}&=\ket{1-+}\notag\\
\ket{\Psi_{41}}&=\ket{-+1},
\end{align}
where
\begin{align}\label{eqn402}
\ket{\pm}=\left(\ket{0}\pm\ket{1}\right)/\sqrt{2}.
\end{align}
This UPB can be analyzed using the exact same approach as was used in the preceding subsection for Tiles. The local parts of any two of these states span their local Hilbert space, ${\cal H}_j$. Therefore, no state on ${\cal H}_j$ is orthogonal (on that side) to more than one of the $\ket{\Psi_{i1}}$. This means that any product state orthogonal to three of the states of \myeq{eqn401} must be orthogonal to one of those three states on ${\cal H}_1$, one on ${\cal H}_2$, and the third on ${\cal H}_3$. For each of the states in Shifts, we can use this observation to identify by inspection six states orthogonal to the other three states in Shifts. The projectors onto the six states orthogonal to all but $\ket{\Psi_{i1}}$ of the states in Shifts, for each $i$, are
\begin{align}\label{eqn403}
E_{11}&=[000]&E_{21}&=[+1-]\notag\\
E_{12}&=[-+0]&E_{22}&=[1+0]\notag\\
E_{13}&=[---]&E_{23}&=[1--]\notag\\
E_{14}&=[0-+]&E_{24}&=[0-1]\notag\\
E_{15}&=[+++]&E_{25}&=[010]\notag\\
E_{16}&=[+0-]&E_{26}&=[++1]\notag\\\notag\\
E_{31}&=[1-+]&E_{41}&=[-+1]\notag\\
E_{32}&=[100]&E_{42}&=[10-]\notag\\
E_{33}&=[-10]&E_{43}&=[1++]\notag\\
E_{34}&=[--1]&E_{44}&=[-1-]\notag\\
E_{35}&=[+1+]&E_{45}&=[001]\notag\\
E_{36}&=[+01]&E_{46}&=[01+]
\end{align}
Any outcome of a measurement that perfectly discriminates Shifts, which identifies $\ket{\Psi_{i1}}$, must be a linear combination of the $E_{ij}$ for fixed $i$ and $j=1,6$. Once again, while we have a range of possible measurements to consider, our task is simplified by the fact that for any one of these possible measurements, its corresponding zonotope lies within the zonotope defined by this entire set of $24$ projection operators given in \myeq{eqn403}. Denoting the latter zonotope as ${\cal Z}_{\textrm{Shifts}}$, we can prove Shifts is not in $\overline{\textrm{LOCC}}$ by showing there is no continuous, monotonic path of product operators, lying entirely within ${\cal Z}_{\textrm{Shifts}}$, from $I_{\cal H}$ to any one of these outcomes. Once again, we use Corollary~\ref{cor1}, and show that $(0,I_{\cal H}]$ is isolated within the intersection of ${\cal Z}_{\textrm{Shifts}}$ and the set of product operators.
\begin{thm7}
The unextendible product basis known as Shifts cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$.
\end{thm7}
\proof
Consider an arbitrary point in ${\cal Z}_{\textrm{Shifts}}$,
\begin{align}\label{eqn404}
R=\sum_{jk}c_{jk}E_{jk}.
\end{align}
We seek conditions under which $R={\cal A}\otimes{\cal B}\otimes{\cal C}$ is a product operator. Note that each of the local projectors appearing in \myeq{eqn403} can be written as a linear combination of the three linearly independent projectors, $[0],[+]$, and $[-]$.\footnote{We have chosen to eliminate $[1]$ here, because $[111]$ is not one of the $E_{jk}$. If instead we had eliminated $[0]$, for example, then since $E_{11}=[000]$, all possible combinations of $[1],[+],[-]$, such as $[111],[1+-],[+0-]$, etc., would appear in $R$, and the approach of the preceding section would not work. Due to the presence of $E_{13}$ and $E_{15}$, the same conclusion would hold for eliminating $[-]$ or $[+]$.} Rewriting the $E_{jk}$ in terms of these three linearly independent local operators, one finds that the following three product operators do not appear anywhere in this set of $24$ operators:
\begin{align}\label{eqn405}
[0+-]&&[+-0]&&[-0+].
\end{align}
If we rewrite $R$ in the same way, none of these three product operators will appear. We can expand $R$ as
\begin{align}\label{eqn406}
R=[0]\otimes{\cal B}_{0}\otimes{\cal C}_{0}+[+]\otimes{\cal B}_{+}\otimes{\cal C}_{+}+[-]\otimes{\cal B}_{-}\otimes{\cal C}_{-},
\end{align}
where ${\cal B}_{0}\otimes{\cal C}_{0}$ has no term with $[+-]$ appearing in it, ${\cal B}_{+}\otimes{\cal C}_{+}$ has no $[-0]$, and ${\cal B}_{-}\otimes{\cal C}_{-}$ has no $[0+]$. Now, in order for $R$ to be a product operator, the ${\cal B}_{i}\otimes{\cal C}_{i}$ must all be proportional to each other, which also means that all the ${\cal B}_i$ are proportional to each other and all the ${\cal C}_i$ are proportional to each other. This means that either ${\cal B}_{0}\otimes{\cal C}_{0}=0$ or none of the ${\cal B}_i\otimes{\cal C}_{i}$ contain a term with $[+-]$, in turn implying none of the ${\cal B}_i$ has $[+]$ and/or none of the ${\cal C}_{i}$ has $[-]$. Similarly from the $[+]$ on the $A$ side, either ${\cal B}_+\otimes{\cal C}_+=0$ or none of the ${\cal B}_i$ contain a term with $[-]$ and/or none of the ${\cal C}_i$ have $[0]$; and from the $[-]$ on the $A$ side, either ${\cal B}_-\otimes{\cal C}_-=0$ or none of the ${\cal B}_i$ contain a term with $[0]$ and/or none of the ${\cal C}_{i}$ have $[+]$. In other words, for each of the three operators appearing in \myeq{eqn405}, the $A$-part is absent from ${\cal A}$ and/or the $B$ part is absent from ${\cal B}$ and/or the $C$ part is absent from ${\cal C}$.
Next, consider what these observations tell us about the presence of product operators in ${\cal Z}_{\textrm{Shifts}}$ that are close to $I_{\cal H}$. First, note that any such operator must have full rank equal to $8$, so that $\textrm{rank}({\cal A})=\textrm{rank}({\cal B})=\textrm{rank}({\cal C})=2$. This means each of ${\cal A}$, ${\cal B}$, and ${\cal C}$ must have contributions from at least two of the operators $[0],[+]$, and $[-]$, implying none can be missing more than one of these local operators. Now, in order to exclude all three of the operators in \myeq{eqn405}, the three parties combined must be missing a total of at least three of those local operators. Hence, we can conclude that ${\cal A}$, ${\cal B}$, and ${\cal C}$ must each be missing exactly one of them, so each must be a linear combination of exactly two of them (with non-vanishing coefficients). Considering all possible pairs from the three local operators, $[0],[+],[-]$, it is easy to see that the only pair that allows ${\cal A}$ (or ${\cal B}$ or ${\cal C}$) to be close to $I_A$ (or $I_B$ or $I_C$) is $[+],[-]$. Therefore, ${\cal A}$, ${\cal B}$, and ${\cal C}$ must each be diagonal in the $[+],[-]$ basis, which constrains the coefficients $\{c_{jk}\}$ in \myeq{eqn404} and reduces the expression for $R$ to the required diagonal form,
\begin{align}\label{eqn407}
{\cal A}\otimes{\cal B}\otimes{\cal C}=[+]\otimes\left(t[++]+x[+-]+y[-+]+x[--]\right)+[-]\otimes\left(z[++]+z[+-]+y[-+]+w[--]\right)
\end{align}
The two expressions in parentheses must be proportional to each other in order for this to be a product operator. From the $[-+]$ terms, we see that the two expressions must actually be equal. Therefore, $x=z=t=w$, which reduces this to
\begin{align}\label{eqn408}
{\cal A}\otimes{\cal B}\otimes{\cal C}=I_A\otimes\left(x[+]\otimes I_C+[-]\otimes\left(y[+]+x[-]\right)\right).
\end{align}
This is not a product operator unless $x=y$, leaving us with ${\cal A}\otimes{\cal B}\otimes{\cal C}=xI_{\cal H}$. That is, the only product operators in ${\cal Z}_{\textrm{Shifts}}$ that are close to $(0,I_{\cal H}]$ are proportional to $I_{\cal H}$, itself, and this completes the proof. {\hspace{\stretch{1}}$\blacksquare$}
\subsection{The Class of Unextendible Product Bases known as GenTiles$2$}
There are two generalizations of the Tiles UPB, GenTiles$1$ and GenTiles$2$, each of which are infinite classes of UPB's on bipartite systems that have a tiling representation reminiscent of Tiles. We first discuss GenTiles$2$, leaving GenTiles$1$ to the next subsection. GenTiles$2$ is a UPB on an $m\times n$ Hilbert space ${\cal H}$, which for all $n>3$, $m\ge3$, and $n\ge m$, consists of states
\begin{align}\label{eqn3001}
\ket{S_{j}}&=\frac{1}{\sqrt{2}}\left(\ket{j}-\ket{j+1\mmod{m}}\right)\otimes\ket{j}\notag\\
\ket{L_{jk}}&=\ket{j}\otimes\frac{1}{\sqrt{n-2}}\left(\sum_{i=0}^{m-3}\omega^{ik}\ket{i+j+1\mmod{m}}+\sum_{i=m-2}^{n-3}\omega^{ik}\ket{i+2}\right),\\
\ket{F}&=\frac{1}{\sqrt{nm}}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1}\ket{i}\otimes\ket{j},\notag
\end{align}
with $0\le j\le m-1$, $1\le k\le n-3$, and $\omega=e^{2\pi i/(n-2)}$. We will also use the states $\ket{T_{j}}=\left(\ket{j}+\ket{j+1\mmod{m}}\right)\otimes\ket{j}/\sqrt{2}$ and $\ket{L_{j0}}$ in our arguments, even though they are not a part of the UPB. The states $\ket{L_{jk}}$ with fixed $j$ share the same support and all lie within one of the long tiles of length $n-2$ (and labeled as $L_0$ to $L_6$ for the $m=7$ case) in Figure~\ref{fig103}. The short tiles representing the $\ket{S_j}$, labeled $S_0$ to $S_6$ in the figure, are each of length two, and the state $\ket{F}$, which covers the entire figure, is not shown.
\begin{figure}
\caption{\label{fig103}
\label{fig103}
\end{figure}
Here we prove the theorem,
\begin{thm9}\label{thm7}
The unextendible product basis known as GenTiles$2$ cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$.
\end{thm9}
\noindent As usual, the first step is to identify possible separable measurements that accomplish perfect discrimination. Toward this end, we prove the lemma,
\begin{lem11}\label{lem11}
The only product states in ${\cal H}$ that are orthogonal to all but one of the states in GenTiles$2$ are $\ket{F}$ and the sets of states $\{\ket{S_{j}}\}$, $\{\ket{T_j}\}$, $\{\ket{L_{jk}}\}$, $\sqrt{2}\ket{L_{j0}}-\sqrt{n-2}\left(\ket{T_{j-1}}-\ket{S_{j-1}}\right)$, and $\sqrt{2}\ket{L_{j0}}-\sqrt{n-2}\left(\ket{T_{j}}+\ket{S_{j}}\right)$, for $j=0,\cdots,m-1$ and $k=0,\cdots,n-3$, unless $n=4$. When $n=4$, states $\left(\ket{0}+\ket{1}+\ket{2}\right)\otimes\left(\sum_{i\ne j}^{3}\ket{i}-3\ket{j}\right)$ are also allowed when $m=3$; $\ket{L_{j0}}+\ket{L_{j1}}-\ket{T_{j+1}}$ are also allowed when $m=3,4$; and $\ket{L_{j0}}-\ket{L_{j1}}-\ket{T_{j+2}}$ are also allowed when $m=4$. [All indices are to be understood as $\mmod{m}$ and $j=0,\cdots,m-1$.]
\end{lem11}
\noindent The proof is given in Appendix~\ref{AppB1}. Given this result, we then prove in Appendix~\ref{AppB2}, that
\begin{lem18}\label{lem18}
The only complete separable measurement that perfectly discriminates GenTiles$2$ consists of projectors onto the states, $\ket{S_j},\ket{T_j}$, and $\ket{L_{jk}}$, for $j=0,\cdots,m-1$ and $k=0,\cdots,n-3$.
\end{lem18}
We are now in a position to prove Theorem~\ref{thm7}.
\proof From Lemma~\ref{lem18}, we have that the only separable measurement that perfectly discriminates GenTiles$2$ is ${\cal M}_{GT2}=\{[S_j],[T_j],[L_{jk}]\}$ for $j=0,\cdots,m-1$ and $k=0,\cdots,n-3$, and its associated zonotope will be denoted ${\cal Z}_{GT2}$. The proof of this theorem will use Corollary~\ref{cor1} and closely follows the approach of the preceding subsections.
Defining $\ket{L_{jk}}=\ket{j}\otimes\ket{l_{jk}}$, so that
\begin{align}\label{eqn3000}
\ket{l_{jk}}=\frac{1}{\sqrt{n-2}}\left(\sum_{i=0}^{m-3}\omega^{ik}\ket{i+j+1\mmod{m}}+\sum_{i=m-2}^{n-3}\omega^{ik}\ket{i+2}\right),
\end{align}
\noindent and $[j_\pm]=\left(\ket{j}\pm\ket{j+1\mmod{m}}\right)\left(\bra{j}\pm\bra{j+1\mmod{m}}\right)/2$, any operator $R\in{\cal Z}_{GT2}$ may be written
\begin{align}\label{eqn3002}
R&=\sum_{j=0}^{m-1}\left(a_j[S_j]+b_j[T_j]+\sum_{k=0}^{n-3}c_{jk}[L_{jk}]\right)\notag\\
&=\sum_{j=0}^{m-1}\left([j]\otimes{\cal B}_j+[j_-]\otimes{\cal B}_{m+j}\right),
\end{align}
with $c_i\ge0$ for all $i$. We have chosen $\{[j],[j_-]\}_{j=0}^{m-1}$ because it constitutes a linearly independent set, which means that the set of operators, $\{{\cal B}_i\}_{i=0}^{2m-1}$, must all be proportional to each other in order that $R$ is a product operator, as we require for the application of Corollary~\ref{cor1}. Note also that $[j_+]=[j]+[j+1\mmod{m}]-[j_-]$.
Expanding, we have
\begin{align}\label{eqn3003}
R&=\sum_{j=0}^{m-1}\left[a_j[j_-]\otimes[j]+b_j\left([j]+[j+1\mmod{m}]-[j_-]\right)\otimes[j]+[j]\otimes\sum_kc_{jk}[l_{jk}]\right]\notag\\
&=\sum_{j=0}^{m-1}\left[[j]\otimes\left(b_j[j]+\sum_kc_{jk}[l_{jk}]\right)+b_j[j+1\mmod{m}]\otimes[j]+[j_-]\otimes\left(a_j[j]-b_j[j]\right)\right]\notag\\
&=\sum_{j=0}^{m-1}\left[[j]\otimes\left(b_j[j]+b_{j-1}[j-1\mmod{m}]+\sum_kc_{jk}[l_{jk}]\right)+\left(a_j-b_j\right)[j_-]\otimes[j]\right]
\end{align}
From this, we identify ${\cal B}_j=b_j[j]+b_{j-1}[j-1\mmod{m}]+\sum_kc_{jk}[l_{jk}]$ and ${\cal B}_{m+j}=\left(a_j-b_j\right)[j]$ for $j=0,\cdots,m-1$. Since all the ${\cal B}_i$ must be proportional to each other, they must all have rank equal to $1$, as do all the ${\cal B}_{m+j}$, unless $a_j=b_j$ for all $j$. To use Corollary~\ref{cor1}, we seek all $R\in{\cal Z}_{GT2}$ that are close to $I_{\cal H}$, so that rank($R$)$=mn$ and rank(${\cal B}_j$)$=n$ for all $j$. Thus, $a_j=b_j$, and
\begin{align}\label{eqn3004}
R&=\sum_{j=0}^{m-1}[j]\otimes\left(b_j[j]+b_{j-1}[j-1\mmod{m}]+\sum_kc_{jk}[l_{jk}]\right).
\end{align}
Noting that $\inpd{j}{l_{jk}}=0=\inpd{j-1\mmod{m}}{l_{jk}}$, compare
\begin{align}\label{eqn3005}
{\cal B}_0&=b_0[0]+b_{m-1}[m-1]+\sum_kc_{0k}[l_{0k}],\notag\\
{\cal B}_1&=b_1[1]+b_0[0]+\sum_kc_{1k}[l_{1k}].
\end{align}
These must be proportional to each other, but the coefficients of $[0]$ are equal to $b_0$ in both cases, indicating that in fact, ${\cal B}_0={\cal B}_1$. By similarly comparing each pair of operators ${\cal B}_j,{\cal B}_{j+1},j=0,\cdots,m-2$ in succession, we easily see that ${\cal B}_j={\cal B}$, independent of $j$. This indicates that $R=I_{{\cal H}_1}\otimes{\cal B}$ for $R$ a full rank product operator in ${\cal Z}_{GT2}$. Note that $\bra{i}{\cal B}_j\ket{j}=b_j\delta_{ij}$, which tells us that ${\cal B}_j$, and therefore ${\cal B}$, are diagonal in the standard basis. Therefore, $\sum_kc_{jk}[l_{jk}]$ must also be diagonal in that basis.
Expand
\begin{align}\label{eqn3006}
(n-2)\sum_kc_{jk}[l_{jk}]&=\sum_{i,i^\prime=0}^{m-3}\left(\sum_kc_{jk}\omega^{(i-i^\prime)k}\right)\ket{i+j+1\mmod{m}}\bra{{i^\prime+j+1\mmod{m}}}\notag\\
&+\sum_{i,i^\prime=m-2}^{n-3}\left(\sum_kc_{jk}\omega^{(i-i^\prime)k}\right)\ket{i+2}\bra{i^\prime+2}\notag\\
&+\left[\sum_{i=0}^{m-3}\sum_{i^\prime=m-2}^{n-3}\left(\sum_kc_{jk}\omega^{(i-i^\prime)k}\right)\ket{i+j+1\mmod{m}}\bra{i^\prime+2}+h.c.\right].
\end{align}
where $h.c.$ stands for Hermitian conjugate, and the sums over $k$ run from $k=0$ to $k=n-3$. In order that ${\cal B}_j$ is diagonal in the standard basis, \myeq{eqn3006} tells us that $\sum_kc_{jk}\omega^{pk}=0$ for $p=1,\cdots,n-3$. This is only possible if $c_{jk}=c_{j0}$, independent of $k$. Then, \myeq{eqn3006} reduces to
\begin{align}\label{eqn3007}
\sum_kc_{jk}[l_{jk}]&=c_{j0}\left(\sum_{i=0}^{m-3}[i+j+1\mmod{m}]+\sum_{i,=m-2}^{n-3}[i+2]\right)=c_{j0}\left(I_{{\cal H}_2}-[j-1\mmod{m}]-[j]\right).
\end{align}
This leaves us with
\begin{align}\label{eqn3008}
{\cal B}_j&=(b_j-c_{j0})[j]+(b_{j-1}-c_{j0})[j-1\mmod{m}]+c_{j0}I_{{\cal H}_2}.
\end{align}
Comparing all the ${\cal B}_j$ and recalling they must be independent of $j$, we see that $b_j=c_{j0}=b_{j-1}$ for all $j$, implying these coefficents are all independent of $j$, as well, and we have that ${\cal B}_j\propto I_{{\cal H}_2}$. Thus, $R\propto I_{\cal H}$ are the only product operators close to $I_{\cal H}$ in ${\cal Z}_{GT2}$ and by Corollary~\ref{cor1}, this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
\subsection{The Class of Unextendible Product Bases known as GenTiles$1$}
GenTiles$1$ is an unextendible product basis (UPB) on an $n\times n$ Hilbert space ${\cal H}$, which for all even $n\ge4$ consists of states
\begin{align}\label{eqn1001}
\ket{V_{km}}&=\frac{1}{\sqrt{n}}\ket{k}\otimes\sum_{j=0}^{\frac{n}{2}-1}\omega^{jm}\ket{j+k+1\mmod{n}},\notag\\
\ket{H_{km}}&=\frac{1}{\sqrt{n}}\sum_{j=0}^{\frac{n}{2}-1}\omega^{jm}\ket{j+k\mmod{n}}\otimes\ket{k},\\
\ket{F}&=\frac{1}{n}\sum_{ij=0}^{n-1}\ket{i}\otimes\ket{j},\notag
\end{align}
with $m=1,\cdots,n/2-1$ and $k=0,\cdots,n-1$ and $\omega=e^{4\pi i/n}$. We will also use the states $\ket{V_{k0}}$ and $\ket{H_{k0}}$ in these arguments, even though they are not a part of the UPB. The states $\ket{V_{km}}$ with fixed $k$ share the same support, lying within the $V_k$-tile shown (for the $n=6$ case) in Figure~\ref{fig101}. Similarly the $H_k$-tile shown in that figure contains all the states $\ket{H_{km}}$ for fixed $k$. These tiles are all of length $n/2$.
\begin{figure}
\caption{\label{fig101}
\label{fig101}
\end{figure}
Here we prove the theorem,
\begin{thm9}\label{thm9}
The unextendible product basis known as GenTiles$1$ cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$.
\end{thm9}
\noindent We will see that for each $n$, there is one and only one measurement that accomplishes perfect discrimination of the given set of states. For this complete measurement ${\cal M}_\textrm{GT$1$}$ and the zonotope ${\cal Z}_\textrm{GT$1$}$ that it generates, we find once again that $(0,I_{\cal H}]$ is an isolated line segment within the intersection of ${\cal Z}_\textrm{GT$1$}$ with the set of all product operators acting on ${\cal H}$, which by Corollary~\ref{cor1}, is what we need to prove this theorem. Note that the case of GenTiles$1$ with $n=4$ is identical to GenTiles$2$ on a $4\times4$ system, so we will here confine the discussion to even $n\ge6$.
We start with the following lemma telling us which product states may be included in one of these measurements.
\begin{lem8}\label{lem8}
The only product states in ${\cal H}$ that are orthogonal to all but one of the states in GenTiles$1$ are $\ket{F}$ and the sets of states $\{\ket{H_{km}}\}$ and $\{\ket{V_{km}}\}$, for $k=0,\cdots,n-1$ and $m=0,\cdots,n/2-1$ ($n\ge6$).
\end{lem8}
\noindent The next lemma tells us what the unique separable measurement is that serves our present purposes.
\begin{lem6}\label{lem6}
The sole complete separable measurement, ${\cal M}_\textrm{GT$1$}$, that perfectly distinguishes GenTiles$1$ consists of projectors onto the states $\{\ket{H_{km}}\}$ and $\{\ket{V_{km}}\}$, for $k=0,\cdots,n-1$ and $m=0,\cdots,n/2-1$.
\end{lem6}
\noindent Lemmas~\ref{lem8} and \ref{lem6} may be proven using arguments that are entirely analogous to the proofs of Lemmas~\ref{lem11} and \ref{lem18} for GenTiles$2$, respectively, so we omit the details here.
The proof that ${\cal M}_{GT1}\not\in\overline{\textrm{LOCC}}$ is more challenging than others we've given, mainly due to the difficulty in identifying an appropriate, simple, linearly independent set of operators, as was done for the sets of states considered in preceding subsections. Therefore, we leave the proof to Appendix~\ref{AppA3}, where we show that the only product operators in ${\cal Z}_\textrm{GT$1$}$ that have rank greater than $n/2$ are all proportional to $I_{\cal H}$. Therefore, by Corollary~\ref{cor1}, we are led to the conclusion in Theorem~\ref{thm9}.
\subsection{Minimum error discrimination: The double trine ensemble of Peres and Wootters}
The preceding examples have involved perfect discrimination of quantum states. Here, we consider a different case, that of achieving the minimum possible error in discriminating the double trine ensemble, originally discussed in the seminal paper of Peres and Wootters \cite{PeresWootters}. The double trine ensemble consists of states $\ket{\psi_i}\otimes\ket{\psi_i}$ with $\ket{\psi_i}=U^i\ket{0}$ and $U=\exp(-i\pi\sigma_y/3)$ with $\sigma_y$ the usual Pauli operator. It was shown in \cite{ChitambarHsiehPeresWootters} that for discriminating the double trine ensemble, the minimum error achievable using global operations can only be achieved using $\overline{\textrm{LOCC}}$ if the following orthogonal set of states can be \emph{perfectly} discriminated using $\overline{\textrm{LOCC}}$:
\begin{align}\label{eqn8001}
\ket{F_1}&=\frac{1}{\sqrt{6}}\left[\left(\sqrt{2}+1\right)\ket{00}-\left(\sqrt{2}-1\right)\ket{11}\right]\notag\\
\ket{F_2}&=\frac{1}{3}\left[\left(\sqrt{2}-1\right)\ket{00}+\sqrt{3}\left(\ket{01}+\ket{10}\right)+\left(\sqrt{2}+1\right)\ket{11}\right]\notag\\
\ket{F_3}&=\frac{1}{3}\left[\left(\sqrt{2}-1\right)\ket{00}-\sqrt{3}\left(\ket{01}+\ket{10}\right)+\left(\sqrt{2}+1\right)\ket{11}\right]
\end{align}
\noindent The singlet state, $\left(\ket{01}-\ket{10}\right)/\sqrt{2}$, is orthogonal to each of the $\ket{F_i}$. Consistent with the discussion in \cite{ChitambarHsiehPeresWootters}, we find via a straightforward argument that any (refined) separable measurement perfectly distinguishing the states of \myeq{eqn8001} must consist of rank-$1$ operators proportional to projectors onto the (unnormalized) product states,
\begin{align}\label{eqn8002}
\ket{\phi_{11}}&=\left(x\ket{0}+\ket{1}\right)\otimes\left(x\ket{0}-\ket{1}\right)\notag\\
\ket{\phi_{12}}&=\left(x\ket{0}-\ket{1}\right)\otimes\left(x\ket{0}+\ket{1}\right)\notag\\
\ket{\phi_{21}}&=\left(z\ket{0}+x\ket{1}\right)\otimes\left(y\ket{0}+x\ket{1}\right)\notag\\
\ket{\phi_{22}}&=\left(y\ket{0}+x\ket{1}\right)\otimes\left(z\ket{0}+x\ket{1}\right)\notag\\
\ket{\phi_{31}}&=\left(z\ket{0}-x\ket{1}\right)\otimes\left(y\ket{0}-x\ket{1}\right)\notag\\
\ket{\phi_{32}}&=\left(y\ket{0}-x\ket{1}\right)\otimes\left(z\ket{0}-x\ket{1}\right),
\end{align}
with $x=\sqrt{2}+1$, $y=\sqrt{3}+\sqrt{2}$, and $z=\sqrt{3}-\sqrt{2}$. We note that $I_{\cal H}$ does in fact lie within the zonotope defined by this set of operators, ${\cal Z}_{\cal M}$; the corresponding separable measurement, ${\cal M}$, perfectly distinguishes the states of \myeq{eqn8001}. We will show that there are no product operators in ${\cal Z}_{\cal M}$ that are arbitrarily close to $I_{\cal H}$, which by our Corollary~\ref{cor1} provides an alternative proof that the globally achievable minimum error for discriminating the double trine ensemble cannot be achieved using $\overline{\textrm{LOCC}}$, a result previously obtained in \cite{ChitambarHsiehPeresWootters}.
Each point in ${\cal Z}_{\cal M}$ is of the form $R=\sum_{ij}c_{ij}\phi_{ij}$ with $\phi_{ij}=\ket{\phi_{ij}}\bra{\phi_{ij}}$. We seek $R={\cal A}\otimes{\cal B}$ and close to $I_{\cal H}$. Writing ${\cal A}_{kl}{\cal B}_{mn}=\bra{km}R\ket{ln}$, $\hat c_1=c_{11}+c_{12},\hat c_2=c_{21}+c_{31},\hat c_3=c_{22}+c_{32},\hat c_4=c_{11}-c_{12},\hat c_5=c_{21}-c_{31},\hat c_6=c_{22}-c_{32}$, we have
\begin{align}\label{eqn8003}
{\cal A}_{00}{\cal B}_{00}&=x^4\hat c_1+\hat c_2+\hat c_3\notag\\
{\cal A}_{00}{\cal B}_{11}&=x^2\left(\hat c_1+z^2\hat c_2+y^2\hat c_3\right)\notag\\
{\cal A}_{01}{\cal B}_{01}&=x^2\left(-\hat c_1+\hat c_2+\hat c_3\right)\notag\\
{\cal A}_{11}{\cal B}_{00}&=x^2\left(\hat c_1+y^2\hat c_2+z^2\hat c_3\right)\notag\\
{\cal A}_{11}{\cal B}_{11}&=\hat c_1+x^4\left(\hat c_2+\hat c_3\right)\notag\\
{\cal A}_{00}{\cal B}_{01}&=-x^3\hat c_4+x\left(z\hat c_5+y\hat c_6\right)\notag\\
{\cal A}_{01}{\cal B}_{00}&=x^3\hat c_4+x\left(y\hat c_5+z\hat c_6\right)\notag\\
{\cal A}_{01}{\cal B}_{11}&=x\hat c_4+x^3\left(z\hat c_5+y\hat c_6\right)\notag\\
{\cal A}_{11}{\cal B}_{01}&=-x\hat c_4+x^3\left(y\hat c_5+z\hat c_6\right).
\end{align}
Note that ${\cal A}_{01}={\cal A}_{10}$ and ${\cal B}_{01}={\cal B}_{10}$ and these are real numbers because the $\ket{\phi_{ij}}$ are real. The expressions in \myeq{eqn8003} can be manipulated to find three independent constraints that $R$ is a product operator. Defining $\alpha_0={\cal A}_{01}/{\cal A}_{00},\alpha_1={\cal A}_{11}/{\cal A}_{00},\beta_0={\cal B}_{01}/{\cal B}_{00},\beta_1={\cal B}_{11}/{\cal B}_{00}$, these constraints are
\begin{align}\label{eqn8004}
x^2&=\left(1-x^4\right)\alpha_0\beta_0+x^2\alpha_1\beta_1\notag\\
\beta_1+\alpha_1&=2+4x\alpha_0\beta_0\notag\\
x^2\left(\beta_0+\alpha_0\right)&=\alpha_0\beta_1+\alpha_1\beta_0.
\end{align}
From these we find that
\begin{align}\label{eqn8005}
\alpha_0^2&=\frac{x^4+x^2\alpha_1\left[x^2\left(\alpha_1-2\right)-\left(\alpha_1-1\right)^2\right]}{x^6-2x^4+4x^3-x^2+2-\alpha_1\left(4x^5-x^4+1\right)}.
\end{align}
Note that $R=I_{\cal H}$ corresponds to $\alpha_1=1$ and $\alpha_0=0$. We now ask if $R$ as a product operator can be close to $I_{\cal H}$. To see if this is possible, set $\alpha_1=1+\epsilon$ and $\alpha_0^2=\eta>0$, with $|\epsilon|<<1,\eta<<1$. Inserting these into \myeq{eqn8005} and using $x^2-1=2x$, we find
\begin{align}\label{eqn8006}
\eta=-\frac{2\epsilon^2x^3}{56+40\sqrt{2}}+{\cal O}(\epsilon^3)
\end{align}
which is manifestly negative, a contradiction (we have inserted $x=\sqrt{2}+1$ into the denominator to simplify the expression). Thus, there are no product operators within ${\cal Z}_{\cal M}$ that are close to $I_{\cal H}$ and ${\cal M}\not\in\overline{\textrm{LOCC}}$.
\subsection{Examples involving optimal unambiguous discrimination}\label{subsecG}
In this subsection, we consider another situation different from all those previously studied here, wherein a set of states is to be unambiguously discriminated, which means that failure is allowed, but the parties must know when they have succeeded in conclusively identifying the state, and when they have failed (an inconclusive result) \cite{CheflesBarnett,CheflesLOCC}. We want to know if the global optimum success probability can be achieved using $\overline{\textrm{LOCC}}$.
The class we consider here was introduced in \cite{myUSD}. Each member of this class provides a set of states to be unambiguously discriminated, along with the unique separable measurement that achieves the global optimal (minimum) failure rate. We showed in \cite{myUSD} that this measurement cannot be implemented by finite-round LOCC. Here, we show that it also cannot be implemented in $\overline{\textrm{LOCC}}$, excluding the possibility that with an infinite number of rounds, one might come arbitrarily close to achieving the optimal failure rate. The set of states to be discriminated, $\{\ket{\Phi_i}\}$, is reciprocal to the set, $S_\Psi=\{\ket{\Psi_j}\}$, defined below. By reciprocal, we mean that
\begin{align}\label{eqn20}
\inpd{\Psi_j}{\Phi_i}=\delta_{ij}\inpd{\Psi_i}{\Phi_i}
\end{align}
for all $i,j$ in $S_\Psi$, which must be a linearly independent set. This can be any subset of $D=N-1$ members of the states we are about to describe, for any appropriately chosen local dimensions; see below.
Consider any prime number $N\ge5$ and a multipartite system having overall dimension $D=N-1$. The number of parties $P$ can be chosen in any way consistent with the prime factorization of $D$---this choice is generally not unique, but it is unimportant for our present purposes. Let ${\cal H}_\alpha$ be the Hilbert space describing party $\alpha$'s subsystem, and the overall Hilbert space is then ${\cal H}={\cal H}_1\otimes{\cal H}_2\otimes\ldots\otimes{\cal H}_P$. Define states
\begin{align}\label{eqn21}
\ket{\Psi_j}=\ket{\psi_j^{(1)}}\otimes\ldots\otimes\ket{\psi_j^{(P)}},~j=1,\ldots,N,
\end{align}
\noindent with
\begin{align}\label{eqn22}
\ket{\psi_j^{(\alpha)}}=\frac{1}{\sqrt{d_\alpha}}\sum_{m_\alpha=0}^{d_\alpha-1}e^{2\pi \textrm{i}jp_\alpha m_\alpha/N}\ket{m_\alpha},
\end{align}
\noindent where $d_\alpha$ is the dimension of ${\cal H}_\alpha$, with parties ordered such that $d_1\le d_2\le\cdots\le d_P$, and overall dimension $D=d_1d_2\cdots d_P$.
Here, $p_1=1$ and for $\alpha\ge2$, $p_\alpha=d_1d_2\cdots d_{\alpha-1}$, and $\ket{m_\alpha}$ is the standard basis for party $\alpha$. It was shown in \cite{myExtViolate1} that any proper subset of these states constitutes a linearly independent set, and that
\begin{align}\label{eqn23}
I=\frac{D}{N}\sum_{j=1}^N\Psi_j,
\end{align}
where $\Psi_j=\ket{\Psi_j}\bra{\Psi_j}$. Therefore, by choosing $D=N-1$ of these states---omitting state $J$, say---${\cal M}_\Psi=\{\Psi_j\}$ is a complete separable measurement that achieves the optimal failure rate, with $\Psi_J$ being the outcome indicating failure. In Appendix~\ref{AppC}, we prove
\begin{thm11}\label{thm11}
For any choice of $J$ and set of local dimensions, $d_\alpha$, consistent with the overall dimension $D=N-1$, where $N$ is any prime number, the set of states $\{\ket{{\Phi_i}}\}_{i\ne J}$ defined by \myeq{eqn20} cannot be optimally unambiguously discriminated within $\overline{\textrm{LOCC}}$.
\end{thm11}
\section{Conclusion}
In summary, we have presented Theorem \ref{thm2} that if measurement ${\cal M}\in\overline{\textrm{LOCC}}$, then there exists a continuous, monotonic path of positive semidefinite product operators from identity operator ${\cal I}_{\cal H}$ to $(0,E_j]$ for each of the outcomes $E_j$ of ${\cal M}$. We have used Theorem~\ref{thm2} to answer a number of long-standing unsolved problems, including several cases of unextendible product bases for which we have shown they cannot be discriminated within $\overline{\textrm{LOCC}}$. We showed that Proposition $1$ of \cite{KKB} follows from our Theorem~\ref{thm2}, and then used the example from Eq.~$(15)$ of \cite{KKB} to demonstrate that Theorem~\ref{thm2} is strictly stronger than their proposition. Note also that Theorem~\ref{thm2} can be applied to the local state discrimination problem even when $\cap_\mu\ker\rho_\mu$ contains non-vanishing product operators, a circumstance for which Proposition $1$ cannot be used. In addition, Proposition $1$ of \cite{KKB} implies the necessary condition for $\overline{\textrm{LOCC}}$ of \cite{ChildsLeung} that their nonlocality constant $\eta$ vanishes. Therefore, Theorem~\ref{thm2} implies, and is strictly stronger than, the latter necessary condition, as well. Another necessary condition---for perfect discrimination of a pair of multipartite quantum states by $\overline{\textrm{LOCC}}$---has been obtained as Theorem~$1$ in \cite{ChitambarHsiehHierarchy}, and it is also possible to derive this condition from our Theorem~\ref{thm2}. We suspect, though have no proof, that our theorem is strictly stronger than that of \cite{ChitambarHsiehHierarchy}, as well. In any case, since their Theorem~$1$ is restricted to discrimination of pairs of states, our result is certainly much more general.\footnote{Indeed, Theorem~\ref{thm2} applies to all measurements and is not restricted to the local discrimination problem.} Thus, we have succeeded in unifying, and going beyond, all of these previous results on $\overline{\textrm{LOCC}}$. Significantly, the geometric nature of our theorem provides a simple, intuitive way to understand $\overline{\textrm{LOCC}}$.
The question can be raised as to whether or not these necessary conditions for $\overline{\textrm{LOCC}}$ may also be sufficient. The demonstration above that our Theorem~\ref{thm2} is strictly stronger than Proposition $1$ of \cite{KKB} shows that neither Proposition $1$, or the condition of \cite{ChildsLeung} that $\eta=0$, is sufficient. We do not believe that Theorem~\ref{thm2} is sufficient, either, though it remains a possibility. If it is not sufficient, then neither is the necessary condition of \cite{ChitambarHsiehHierarchy}. In our proof of Lemma~\ref{lem5}, we only needed to use a single path to the one outcome $E_{\hat\jmath}$ to show that the conditions of Proposition~1 of \cite{KKB} are satisfied. Our Theorem~\ref{thm2} requires there exists such a path to each of the outcomes, which is, in itself, a stronger requirement. We believe, however, that it is likely that a single path to each outcome is not sufficient for $\overline{\textrm{LOCC}}$, though such cases do exist.\footnote{A simple example of an LOCC measurement for which one, and only one, path exists in ${\cal Z}_{\cal M}$ to each of the outcomes is the measurement on two qubits consisting of outcomes $[0]\otimes[0],[0]\otimes[1],[1]\otimes[+],[1]\otimes[-]$, where $\vert{\pm}\rightarrow ngle=(\vert{0}\rightarrow ngle\pm\vert{1}\rightarrow ngle)/\sqrt{2}$. In this case, the only product operators in ${\cal Z}_{\cal M}$ are those that lie on the single, piecewise local paths to each of these outcomes.} Rather, given that LOCC trees generally involve each branch giving rise to an entire series of subsequent branches, it is likely that some kind of ``tree" of paths to the collection of outcomes would be needed in general for $\overline{\textrm{LOCC}}$, which would mean that the single path of Theorem~\ref{thm2} is not sufficient. Of course, it would not be surprising for the existence of a single path to generally imply the existence of many, but we do not know if there are cases where one or more paths to each outcome exist, while at the same time, the measurement is nonetheless not in $\overline{\textrm{LOCC}}$.
Finally, we note that the geometric nature of Theorem~\ref{thm2} points toward ways of extending these results. In particular, the fact that the paths of this theorem must lie within the zonotope ${\cal Z}_{\cal M}$ suggests new approaches for obtaining lower bounds on the error necessarily incurred when implementing a measurement ${\cal M}$ by LOCC. Indeed, as a preliminary result in this direction, we have devised a method of finding such a lower bound when ${\cal M}$ is used to discriminate any orthogonal set of pure states in Hilbert space ${\cal H}$. These results, and hopefully extension to more general sets of states and more general tasks, will be discussed elsewhere.
\noindent\textit{Acknowledgments} --- We are grateful to Dan Stahlke for a series of extremely helpful discussions.
\appendix
\section{Proofs for GenTiles$2$}
\subsection{All product states orthogonal to all but one of the states in GenTiles$2$}\label{AppB1}
Here, we prove Lemma~\ref{lem11} by finding all product states orthogonal to all but one of the states in GenTiles$2$. Reference will be made to Figure~\ref{fig103} of the main text, along with the indexing of the rows and columns shown there. We use the orthonormal basis of ${\cal H}$ consisting of states $\ket{L_{jk}}$, $\ket{S_{j}}$ and $\ket{T_{j}}$, with $j=0,\cdots,m-1$ and $k=0,\cdots,n-3$. Since the states we seek must be product, they are of the form
\begin{align}\label{eqn6000}
\ket{\phi}=\ket{x}\ket{y}=\sum_{j=0}^{m-1}\left(\sum_{k=0}^{n-3}c_{jk}\ket{L_{jk}}+a_{j}\ket{S_{j}}+b_j\ket{T_j}\right).
\end{align}
For orthogonality to each state $\ket{S_{j}}$ or $\ket{L_{jk}}$ of GenTiles$2$, we require $a_{j}=0$ or $c_{jk}=0$, respectively. Denote the state that we are not requiring $\ket{\phi}$ to be orthogonal to as $\ket{R}$. Then,
\begin{align}\label{eqn6001}
\ket{\phi}=\ket{x}\ket{y}=r\ket{R}+\sum_{j=0}^{m-1}\left(c_{j0}\ket{L_{j0}}+b_{j}\ket{T_{j}}\right),
\end{align}
and orthogonality to $\ket{F}$ requires $\sum_{j}(\sqrt{n-2}c_{j0}+\sqrt{2}b_{j}=0)$, as well.
Write $\ket{x}=\sum_ix_i\ket{i}$ and $\ket{y}=\sum_jy_j\ket{j}$. We begin with the following lemma.
\begin{lem14}\label{lem14}
If $\ket{R}\ne\ket{S_j}$ and $y_j\ne0$, then $x_j=x_{j+1\mmod{m}}$. Similarly, if $x_j\ne0$ and $\ket{R}\ne\ket{L_{jk}}$ for any $k$, then $y_i=y_{i^\prime}$ for all $i,i^\prime\ne j,j-1\mmod{m}$.
\end{lem14}
\proof Recall the definitions of $\ket{T_j},\ket{L_{j0}}$ given below \myeq{eqn3001} of the main text. When $\ket{R}\ne\ket{S_j}$, then from \myeq{eqn6001} we have that $x_jy_j=\inpd{jj}{\phi}=b_j/\sqrt{2}=\inpd{j+1,j}{\phi}=x_{j+1}y_j$. Therefore, if $y_j\ne0$ the first claim follows immediately. When $\ket{R}\ne\ket{L_{jk}}$, $x_jy_i=\inpd{ji}{\phi}=c_{j0}/\sqrt{n-2}=\inpd{ji^\prime}{\phi}=x_{j}y_{i^\prime}$ for all $i,i^\prime\ne j,j-1\mmod{m}$ and the second claim follows, completing the proof.{\hspace{\stretch{1}}$\blacksquare$}
\begin{lem12}\label{lem12}
Excluding the case $m=3,n=4$, if for all $i,j$, $\inpd{ij}{\phi}\ne0$, then $x_i=x_0\ne0$ for all $i$ and $y_j=y_0\ne0$ for all $j$, which gives $\ket{\phi}\propto\ket{F}$ as the only product state orthogonal to all but one of the GenTiles$2$ states. Therefore, the omitted state cannot be $\ket{S_j}$ or $\ket{L_{jk}}$ but must be $\ket{F}$ itself. In case $m=3,n=4$, then the (unnormalized) state $\left(\ket{0}+\ket{1}+\ket{2}\right)\otimes\left(\sum_{i\ne j+1}\ket{i}-3\ket{j+1}\right)$ is orthogonal to all states in GenTiles$2$ other than $\ket{L_{j1}}=\ket{j}\otimes\left(\ket{j+1\mmod{3}}-\ket{3}\right)/\sqrt{2}$.
\end{lem12}
\proof Since none of the $x_i,y_i$ vanish, we can invoke Lemma~\ref{lem14}. Even if $\ket{R}=\ket{S_J}$, then due to the circular relationship imposed by $\mmod{m}$, we have in succession, $x_{J+1}=x_{J+2},x_{J+2}=x_{J+3},\cdots,x_{m-2}=x_{m-1},x_{m-1}=x_0,\cdots, x_{J-2}=x_{J-1},x_{J-1}=x_J$, and all the $x_j$ are identical. Similarly, even if $\ket{R}=\ket{L_{JK}}$, there are always two non-adjacent columns that are not the $J^\textrm{th}$ (when $m>3$): $j-1,j+1\mmod{m}$, such that the second claim of Lemma~\ref{lem14} applies for each of these columns, from which we may conclude that all the $y_j$ are identical, which completes the proof for the general case.
For the case, $m=3$, columns $j-1,j+1\mmod{m}$ are adjacent to each other. Therefore when $\ket{R}=\ket{L_{Jk}}$ for some $k$, row $J+1$ may be different from the others, those others still being equal to each other. That is, $y_i=y_j$ for all $i,j\ne J+1\mmod{m}$, while we still have $x_j=x_0$ for all $j$. With orthogonality to $\ket{F}$, this leaves us with a state $(\ket{0}+\ket{1}+\ket{2})\otimes(\sum_{j\ne J+1}\ket{j}-(n-1)\ket{J+1})$. However, this is not orthogonal to any of the states $\ket{L_{Jk}}$ for $k\ne0$. Therefore, this state is an acceptable solution only when $n=4$ and $\ket{R}=\ket{L_{J1}}$, which is the only state of GenTiles$2$ in that long tile, and this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
To find other acceptable product states, we may thus assume that $\phi_{ij}=\inpd{ij}{\phi}=0$ for at least one pair of indices, $i,j$. We begin with,
\begin{lem13}\label{lem13}
The number of zero entries in any given tile is either $0,1$ or equal to the length of the tile (short tiles have length $2$, long tiles $n-2$). Furthermore, there can be no more than one tile that has $1$ zero entry, that being the tile containing $\ket{R}$.
\end{lem13}
\proof Consider the $J^\textrm{th}$ short tile, whose entries are determined by $a_J\ket{S_J}+b_J\ket{T_J}$. If $\ket{R}\ne\ket{S_J}$, then orthogonality to $\ket{S_J}$ requires $a_J=0$. Then, we have no zero entries when $b_J\ne0$, and $2$ zero entries when $b_J=0$. On the other hand, when $\ket{R}=\ket{S_J}$ and $b_J=\pm a_J$, we have $1$ zero entry in that tile.
For the $J^\textrm{th}$ long tile, we follow a similar argument. This tile's entries are determined by the coefficients $c_{Jk}$, where $c_{Jk}=0$ for all $k\ne0$ unless $\ket{R}\ne\ket{L_{JK}}$, in which case both $c_{J0}$ and $c_{JK}$ can be non-vanishing. If both vanish, we have $n-2$ zero entries in that tile; if one vanishes and the other does not, we have no zero entries; and finally, if both are non-zero and related as $c_{JK}=-\omega^{iK}c_{J0}$, we have a single zero entry in that tile. Clearly, the case of a single zero entry can only occur when $\ket{R}$ resides within that given tile, and this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
We will also use the following lemma.
\begin{lem15}\label{lem15}
If there is a long tile that has all its entries equal to zero but that column is not all zeros, then the only allowed non-vanishing product states are $\ket{S_j},\ket{T_j}$ (depending on which state is chosen for $\ket{R}$), unless $n=4$. When $n=4$, states $\ket{L_{j0}}+\ket{L_{j1}}-\ket{T_{j+1}}$ are also allowed when $m=3,4$; and
$\ket{L_{j0}}-\ket{L_{j1}}+\ket{T_{j+2}}$,
as well as $\ket{L_{j1}}$ and $\ket{L_{j0}}$, are also acceptable states when $m=4$. [All indices are to be understood as $\mmod{m}$.]
\end{lem15}
\proof The entries in the $J^\textrm{th}$ long tile are equal to $x_Jy_i$ for all $i\ne J-1,J$. Therefore, under the assumptions of the lemma, $y_i=0$ for all $i\ne J-1,J$, and there are no more than two non-zero rows. Given the length of each long tile is $n-2$, then when $n>5$, there are at least $n-2-2>1$ zero entries in each of the long tiles. According to Lemma~\ref{lem13}, this implies that every long tile is all zeros, and the only contribution to $\ket{\phi}$ are from the $\ket{S_j},\ket{T_j},j=J-1,J$. Then, it is easy to see that no linear combination of these is a product state, except $a_{J-1}\ket{S_{J-1}}+b_{J-1}\ket{T_{J-1}}$ and $a_J\ket{S_J}+b_J\ket{T_J}$. Given the constraint of orthogonality to all states of GenTiles$2$ but the one chosen as $\ket{R}$, we are left with the only allowed product states being $\ket{S_j},\ket{T_j},j=J-1,J$, depending on the choice of $\ket{R}$, which completes the proof for $n>5$.
When $n=5$, there can be $n-2-2=1$ zero in one of the long blocks, if that block contains $\ket{R}$. Then, the only additional product state is a linear combination of $\ket{L_{jk}}$ and $\ket{L_{j0}}$ for that $j^\textrm{th}$ block (with both coefficients necessarily non-zero in order for the block to have that one zero entry), but this is not orthogonal to $\ket{F}$, so leads to no new acceptable states under these circumstances.
When $n=4$, the long blocks have length $2$ and can either fit entirely within the two non-zero rows, or can have one zero entry outside those rows and one non-zero entry inside them. When $m=3$ (see Figure~\ref{fig602}), each long tile intersects row $3$, so we have $y_3=0$ in this case and every long tile has a zero in it. One of these tiles can have only one zero ($L_0$ or $L_2$ in Figure~\ref{fig602}), but the other must be all zeros. Then, in addition to the acceptable product states already identified, we also find $\ket{L_{J+1,1}}+\ket{L_{J+1,0}}-\ket{T_{J+2}}$ and $\ket{L_{J-1,1}}+\ket{L_{J-1,0}}-\ket{T_{J}}$ are product states orthogonal to all the states in GenTiles$2$ except $\ket{L_{J\pm1,1}}$, respectively (and all subscripts are mod $m$, so each of these product states is of the form indicated in the lemma). When $m=4$ (see Figure~\ref{fig603}), a similar kind of situation arises and here we obtain $\ket{L_{J-1,0}}+\ket{L_{J-1,1}}-\ket{T_{J}}$, $\ket{L_{J+1,0}}-\ket{L_{J+1,1}}-\ket{T_{J-1}}$, $\ket{L_{J+2,0}}-\ket{L_{J+2,1}}-\ket{T_{J}}$, $\ket{L_{J+2,0}}+\ket{L_{J+2,1}}-\ket{T_{J-1}}$ when $\ket{R}$ is the given $\ket{L_{j1}}$ (again, each of these is of the form indicated in the lemma, with mod $m$). In addition, here, a long tile can be without any zero entries, so we also have $\ket{L_{j1}}$ and $\ket{L_{j0}}$ as acceptable states, depending on the choice of $\ket{R}$. This completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
\begin{figure}
\caption{\label{fig602}
\label{fig602a}
\label{fig602b}
\label{fig602}
\end{figure}
\begin{figure}
\caption{\label{fig603}
\label{fig603a}
\label{fig603b}
\label{fig603}
\end{figure}
We can now complete our search for allowable product states by considering the follwowing case: if for every $j$ such that the $j^\textrm{th}$ long tile is all zeros, then $x_j=0$, see Lemma~\ref{lem17} below. First, we have
\begin{lem16}\label{lem16}
If there exists pair $i,j$ such that $\inpd{ij}{\phi}=0$, then there also exists $s$ such that $y_s=0$.
\end{lem16}
\proof The proof is by contradiction, so assume $x_iy_j=\inpd{ij}{\phi}=0$, but for all $s$, $y_s\ne0$. Then $x_i=0$ and the $i^\textrm{th}$ and $(i-1)^\textrm{th}$ short tiles each have a zero in them. It must be that $\ket{R}\ne\ket{S_i}$ or $\ket{R}\ne\ket{S_{i-1}}$, so starting with the one that is not $\ket{R}$, apply the argument used in the proof of Lemma~\ref{lem12}. For example, suppose $\ket{R}\ne\ket{S_i}$. Then, the other entry in the $i^\textrm{th}$ short tile, which is $x_{i+1}y_i$ is also zero. Since by assumption, $y_i\ne0$, we have that $x_{i+1}=0$, implying that the $(i+1)^\textrm{th}$ short tile is all zeros, so that also, $x_{i+2}y_{i+1}=0$. Therefore, $x_{i+2}=0$, and by continuing the argument around the circle (mod $m$), we find $x_l=0$ for all $l$ and we have no non-zero state under these conditions. If one of the other $\ket{S_l}$ is the chosen $\ket{R}$, then one can go around in both directions starting at $x_{i-1},x_i$ to finish at $x_l$ coming from both sides, arriving at the same conclusion. This completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
The last step is
\begin{lem17}\label{lem17}
If every long tile that is all zeros sits in a column that is also all zeros, then the only allowable product states under these circumstances are $\ket{L_{j0}}$ (if $\ket{R}=\ket{F}$) and $\sqrt{2}\ket{L_{j+1,0}}-\sqrt{n-2}\left(\ket{T_{j}}-\ket{S_{j}}\right)$ and $\sqrt{2}\ket{L_{j0}}-\sqrt{n-2}\left(\ket{T_{j}}+\ket{S_{j}}\right)$ (the latter two apply when $\ket{R}=\ket{S_j}$).
\end{lem17}
\proof Since from Lemma~\ref{lem16}, at least one row is all zeros, there are no more than two of the long tiles that do not have a zero in them. Let us first consider the case where $\ket{R}=\ket{F}$ or $\ket{S_j}$ for some $j$. In this case, for which the long tiles have only $\ket{L_{j0}}$ in them, one zero entry implies that the entire long tile is zero. Under the conditions of the present Lemma, this means there are no more than two columns that have non-zero entries in them. This situation, with $y_s=0$ leaving the $s^\textrm{th}$ and $(s+1)^\textrm{th}$ columns the only ones that aren't all zero, is depicted in Figure~\ref{fig604}. Note that $\alpha\ne0$ only if $\ket{R}=\ket{S_{s-1}}$.
\begin{figure}
\caption{\label{fig604}
\label{fig604}
\end{figure}
When $\ket{R}=\ket{F}$, $\alpha=0$ in Figure~\ref{fig604} and the only contributions to $\ket{\phi}$ are from $\ket{L_{s0}}$ and $\ket{L_{s+1,0}}$. It is clear that the only linear combination of these two states that yields a product state are the individual states $\ket{L_{s0}}$ and $\ket{L_{s+1,0}}$, each by itself.
When $\ket{R}=\ket{S_j}$ with $j\ne s-1$, we still require $\alpha=0$, and we are left with the same situation as for $\ket{R}=\ket{F}$. However, we here require orthogonality to $\ket{F}$, so we find no allowable states in this case.
When $\ket{R}=\ket{S_{s-1}}$, we have $\ket{\phi}=c_{s0}\ket{L_{s0}}+c_{s+1,0}\ket{L_{s+1,0}}+b_{s-1}\ket{T_{s-1}}+a_{s-1}\ket{S_{s-1}}$. Here, the zero appearing in the short tile at position $(s+1,s+1)$ requires that either $c_{s0}=0$ (from $y_{s+1}=0$) or $c_{s+1,0}=0$ (from $x_{s+1}=0$). If $c_{s+1,0}\ne0$ here, then also $\alpha=0$ to obtain a product state, and we have $\ket{L_{s+1,0}}$, which is not allowed due to the requirement of orthogonality to $\ket{F}$. We do find an allowable state when $c_{s+1,0}=0$, that being $\sqrt{2}\ket{L_{s0}}-\sqrt{n-2}\left(\ket{T_{s-1}}-\ket{S_{s-1}}\right)$. Similarly, when $\ket{R}=\ket{S_{s+1,0}}$, we obtain the allowable state, $\sqrt{2}\ket{L_{s+1,0}}-\sqrt{n-2}\left(\ket{T_{s+1}}+\ket{S_{s+1}}\right)$.
We are left to consider the case where $\ket{R}=\ket{L_{jk}}$ for some $j,k$. In the case $j\ne s-1$, we must have $\alpha=0$ again in Figure~\ref{fig604}, and the only contributions are from the two long tiles depicted in the figure, along with (if also $j\ne s,s+1$) additional contributions from the $j^\textrm{th}$ long tile. Then, $\ket{\phi}=c_{jk}\ket{L_{jk}}+c_{j0}\ket{L_{j0}}+c_{s0}\ket{L_{s0}}+c_{s+1,0}\ket{L_{s+1,0}}$, and it is easy to see that no product state orthogonal to $\ket{F}$ is possible in this case (whether or not $j=s,s+1$, in which case the second appearance of $\ket{L_{s0}}$ or $\ket{L_{s+1,0}}$ should obviously be omitted).
Finally, if $j=s-1$, then in Figure~\ref{fig604}, the zero in that short tile next to $\alpha$ should be replaced by another instance of $\alpha$, which need not vanish. Now we have $\ket{\phi}=c_{s-1,0}\left(\ket{L_{s-1,0}}-\ket{L_{s-1,1}}\right)+c_{s0}\ket{L_{s0}}+c_{s+1,0}\ket{L_{s+1,0}}+b_{s-1}\ket{T_{s-1}}$ (this is the combination that makes the entry in the $s^\textrm{th}$ row within the $(s-1)^\textrm{th}$ long tile vanish, as it must since $y_s=0$). Noting in this altered version of Figure~\ref{fig604} that the $(s-1,s-2)$ entry, which is equal to $x_{s-1}y_{s-2}$, must vanish (it is part of the $(s-2)^\textrm{th}$ short tile), then if $x_{s-1}\ne0$, $y_{s-2}=0$ which requires $c_{s0}=0=c_{s+1,0}$. Then the only product states are either $\ket{T_{s-1}}$ or $\ket{L_{s-1,0}}-\ket{L_{s-1,k}}$, neither of which is orthogonal to $\ket{F}$, so are not allowed. If, on the other hand, $x_{s-1}=0$, then we have a situation already considered in preceding paragraphs. This completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
Collecting all of the results in this appendix, we obtain Lemma~\ref{lem11}.
\subsection{Complete separable measurements for discriminating GenTiles$2$}\label{AppB2}
Any acceptable measurement consists of operators proportional to projectors onto the states listed in Lemma~\ref{lem11}. A complete measurement requires that a positive linear combination of these operators is equal to the identity, see \myeq{eqn100} in the main text. When $n>4$, let us list the various allowable states as
\begin{align}\label{eqn7001}
\ket{\Psi_0}&=\ket{F}\notag\\
\ket{\Psi_{j+1}}&=\ket{S_j}\notag\\
\ket{\Psi_{m+j+1}}&=\ket{T_j}\notag\\
\ket{\Psi_{(k+2)m+j+1}}&=\ket{L_{jk}}\notag\\
\ket{\Psi_{nm+j+1}}&=\sqrt{n-2}\ket{L_{j0}}-\frac{n-2}{\sqrt{2}}\left(\ket{T_j}+\ket{S_j}\right)\notag\\
&=\ket{j}\otimes\left(\sum_{i=0}^{m-3}\ket{i+j+1\mmod{m}}+\sum_{i=m}^{n-1}\ket{i}-(n-2)\ket{j}\right)\notag\\
\ket{\Psi_{(n+1)m+j+1}}&=\sqrt{n-2}\ket{L_{j0}}-\frac{n-2}{\sqrt{2}}\left(\ket{T_{j-1}}-\ket{S_{j-1}}\right)\notag\\
&=\ket{j}\otimes\left(\sum_{i=0}^{m-3}\ket{i+j+1\mmod{m}}+\sum_{i=m}^{n-1}\ket{i}-(n-2)\ket{j-1}\right).
\end{align}
The projectors onto these states are all diagonal in the standard basis on one or the other party, except for the one proportional to $[F]$. Taking matrix elements of \myeq{eqn100},
\begin{align}\label{eqn7002}
\bra{00}\left(\sum_jc_j[\Psi_j]\right)\ket{11}=\bra{00}I_{\cal H}\ket{11}
\end{align}
reduces to $c_0=0$, implying that $[F]$ must be excluded from the measurement. Given this, next take
\begin{align}\label{eqn7003}
\bra{jj}\left(\sum_jc_j[\Psi_j]\right)\ket{j,n-1}=0,
\end{align}
and
\begin{align}\label{eqn7004}
\bra{j,j-1}\left(\sum_jc_j[\Psi_j]\right)\ket{j,n-1}=0,
\end{align}
which gives $c_{nm+j+1}=0$ and $c_{(n+1)m+j+1}=0$, respectively, for $j=0,\cdots,m-1$. This leaves us with a unique complete separable measurement for discriminating GenTiles$2$ when $n>4$, that consisting of projectors onto the states, $\ket{S_j},\ket{T_j}$, and $\ket{L_{jk}}$, for $j=0,\cdots,m-1$ and $k=0,\cdots,n-3$. This completes the proof of Lemma~\ref{lem18} for $n>4$.
In the case of $m=n=4$, the allowed states are
\begin{align}\label{eqn7005}
\ket{\Psi_0}&=\ket{F}=\left(\ket{0}+\ket{1}+\ket{2}+\ket{3}\right)\otimes\left(\ket{0}+\ket{1}+\ket{2}+\ket{3}\right)\notag\\
\ket{\Psi_{j+1}}&=\ket{S_j}=\left(\ket{j}-\ket{j+1\mmod{3}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{j+5}}&=\ket{T_j}=\left(\ket{j}+\ket{j+1\mmod{3}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{3k+j+9}}&=\ket{L_{jk}}=\ket{j}\otimes\left(\ket{j+1\mmod{4}}+(-1)^k\ket{j+2\mmod{4}}\right)\notag\\ \ket{\Psi_{j+17}}&=\ket{L_{j0}}-\left(\ket{T_j}+\ket{S_j}\right)\notag\\
&=\ket{j}\otimes\left(\ket{j+1\mmod{4}}+\ket{j+2\mmod{4}}-2\ket{j}\right)\notag\\
\ket{\Psi_{j+21}}&=\ket{L_{j0}}-\left(\ket{T_{j-1}}-\ket{S_{j-1}}\right)\notag\\
&=\ket{j}\otimes\left(\ket{j+1\mmod{4}}+\ket{j+2\mmod{4}}-2\ket{j-1\mmod{4}}\right)\notag\\
\ket{\Psi_{j+25}}&=\ket{T_j}-\left(\ket{L_{j-1,0}}+\ket{L_{j-1,1}}\right)\notag\\
&=\left(\ket{j}+\ket{j+1\mmod{4}}-2\ket{j-1\mmod{4}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{j+29}}&=\ket{T_j}-\left(\ket{L_{j-2,0}}-\ket{L_{j-2,1}}\right)\notag\\
&=\left(\ket{j}+\ket{j+1\mmod{4}}-2\ket{j-2\mmod{4}}\right)\otimes\ket{j},
\end{align}
with $j=0,1,2,3$ and $k=0,1$. First note that $\ket{F}$ is the only state that is non-diagonal in the standard basis on both parties. Therefore, taking the $\bra{i_1i_2}\cdots\ket{i_1^\prime i_2^\prime}$ matrix element of \myeq{eqn100} gives that $c_0=0$ again, and $\ket{F}$ cannot be a part of the measurement. Then, taking the $\bra{jj}\cdots\ket{j,j+1}$ matrix element of \myeq{eqn100} shows that $c_{j+17}=0$ for all $j$. Taking the $\bra{jj}\cdots\ket{j,j+2}$ matrix element of \myeq{eqn100} shows that $c_{j+21}=0$ for all $j$. Similarly, the matrix elements $\bra{jj}\cdots\ket{j-1,j}$ and $\bra{jj}\cdots\ket{j-2,j}$ show that $c_{j+25}=0$ and $c_{j+29}=0$, respectively. This again leaves the only operators allowed in the complete measurement as those stated in Lemma~\ref{lem18}. This completes the proof for $m=n=4$.
The final case of $m=3,n=4$ is slightly more involved algebraically. The allowed states are
\begin{align}\label{eqn7006}
\ket{\Psi_0}&=\ket{F}=\left(\ket{0}+\ket{1}+\ket{2}\right)\otimes\left(\ket{0}+\ket{1}+\ket{2}+\ket{3}\right)\notag\\
\ket{\Psi_{j+1}}&=\ket{S_j}=\left(\ket{j}-\ket{j+1\mmod{3}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{j+4}}&=\ket{T_j}=\left(\ket{j}+\ket{j+1\mmod{3}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{3k+j+7}}&=\ket{L_{jk}}=\ket{j}\otimes\left(\ket{j+1\mmod{3}}+(-1)^k\ket{3}\right)\notag\\
\ket{\Psi_{j+13}}&=\ket{L_{j0}}-\left(\ket{T_j}+\ket{S_j}\right)\notag\\
&=\ket{j}\otimes\left(\ket{j+1\mmod{3}}+\ket{3}-2\ket{j}\right)\notag\\
\ket{\Psi_{j+16}}&=\ket{L_{j0}}-\left(\ket{T_{j-1}}-\ket{S_{j-1}}\right)\notag\\
&=\ket{j}\otimes\left(\ket{j+1\mmod{3}}+\ket{3}-2\ket{j-1\mmod{3}}\right)\notag\\
\ket{\Psi_{j+19}}&=\ket{T_j}-\left(\ket{L_{j-1,0}}+\ket{L_{j-1,1}}\right)\notag\\
&=\left(\ket{j}+\ket{j+1\mmod{3}}-2\ket{j-1\mmod{3}}\right)\otimes\ket{j}\notag\\
\ket{\Psi_{j+22}}&=\left(\ket{0}+\ket{1}+\ket{2}\right)\otimes\left(3\ket{j}-\sum_{i\ne j}^3\ket{i}\right),
\end{align}
with $j=0,1,2$ and $k=0,1$. From the $\bra{00}\cdots\ket{11}$, $\bra{00}\cdots\ket{12}$, $\bra{00}\cdots\ket{13}$, and $\bra{01}\cdots\ket{12}$ matrix elements of \myeq{eqn100}, only $\ket{F}$ and $\ket{\Psi_{j+22}}$ contribute and we obtain in turn,
\begin{align}\label{eqn7007}
0&=c_{0}-3c_{22}-3c_{23}+c_{24},\notag\\
0&=c_{0}-3c_{22}+c_{23}-3c_{24},\notag\\
0&=c_{0}-3c_{22}+c_{23}+c_{24},\notag\\
0&=c_{0}+c_{22}-3c_{23}-3c_{24}.
\end{align}
\noindent The determinant of the matrix of coefficients for this set of equations is equal to $64$, which implies that the only solution is for each of the $c_j$ appearing in that equation must vanish. Therefore, $\ket{F}$ and states $\ket{\Psi_{j+22}}$ must be omitted from the measurement. Excluding these states, consider the $\bra{0j}\cdots\ket{1j}$ matrix elements of \myeq{eqn100} for $j=1,2$. This yields $c_{20}=0$ from $j=1$, $c_{21}=0$ from $j=2$. Then, taking the $\bra{00}\cdots\ket{20}$ matrix element gives $c_{19}=0$, as well, and states $\ket{\Psi_{j+19}}$ are excluded. Finally, from matrix elements $\bra{j0}\cdots\ket{j1}$ for $j=0,2$, $\bra{j0}\cdots\ket{j2}$ for $j=1,2$, and $\bra{j1}\cdots\ket{j2}$ for $j=0,1$, we obtain in succession that $c_{13},c_{18},c_{17},c_{15},c_{16},c_{14}$ each must vanish. We are again left with only the states $\ket{S_j},\ket{T_j},\ket{L_{jk}}$ as claimed in Lemma~\ref{lem18}, and this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
\section{Non-existence of paths of product operators from $I_{\cal H}$ to the outcomes of ${\cal M}_{GT1}$ for GenTiles$1$}\label{AppA3}
We now prove that the measurement ${\cal M}_{GT1}$ of Lemma~\ref{lem6} is not in $\overline{\textrm{LOCC}}$. We will characterize all product operators that lie in the zonotope generated by the outcomes of ${\cal M}_{GT1}$, those being the projectors $\{[H_{km}]\}$, $\{[V_{km}]\}$, denoting this zonotope as ${\cal Z}_{{\cal M}_{GT1}}$. We will use the following lemma in what follows.
\begin{lem10}\label{lem10}
A positive linear combination of projectors onto the states in one of the $H$-tiles (or in one of the $V$-tiles) leads to a matrix whose entries are all zeros except for an $n/2\times n/2$ block along the diagonal that is of the Toeplitz form in the standard basis. This Toeplitz block is diagonal if and only if the positive linear combination is proportional to a simple sum of the projectors onto the given $H$-tile ($V$-tile) states, and is then itself proportional to a projector of rank $n/2$.
\end{lem10}
\proof Consider a positive linear combination of projectors onto the states in one of the $H$-tiles, say $H_k$. This has $B$-part equal to projector $[k]$, and $A$-part
\begin{align}\label{eqn1113}
\hat{\cal A}^{(k)}&=\sum_{m=0}^{\frac{n}{2}-1}\hat c_{km}\sum_{i,j=0}^{n/2-1}\omega^{m(j-i)}\ket{j+k\mmod{n}}\bra{i+k\mmod{n}}\notag\\
&=\sum_{i,j=0}^{n/2-1}\tilde c_{k,j-i}\ket{j+k\mmod{n}}\bra{i+k\mmod{n}},
\end{align}
where $\tilde c_{k,l}=\sum_{m=0}^{n/2-1}\hat c_{km}\omega^{ml}=\tilde c_{k,-l}^\ast$. Since the coefficients depend only on $j-i$, this has an $n/2\times n/2$ block along the diagonal that has the Toeplitz form,
\begin{align}\label{eqn1015}
\begin{bmatrix}
\tilde c_{k,0}&\tilde c_{k,1}&\tilde c_{k,2}&\tilde c_{k,3}&\cdots&\\
\tilde c_{k,-1}&\tilde c_{k,0}&\tilde c_{k,1}&\tilde c_{k,2}&\ddots&\\
\tilde c_{k,-2}&\tilde c_{k,-1}&\tilde c_{k,0}&\tilde c_{k,1}&\ddots&\\
\tilde c_{k,-3}&\tilde c_{k,-2}&\tilde c_{k,-1}&\tilde c_{k,0}&\ddots&\\
\vdots&\ddots&\ddots&\ddots&\ddots\\
\end{bmatrix}
\end{align}
Furthermore, $\tilde c_{k,0}=0$ if and only if $\hat c_{km}=0$ for all $m$, and then $\hat{\cal A}^{(k)}=0$. In addition, this block is diagonal if and only if $\tilde c_{k,r}=0$ for all $r\ne0$, implying that the vector of coefficients $\hat c_{km}$ is orthogonal to each of the vectors $\vec\omega_r$, with coefficients $(\vec\omega_r)=\omega^{mr}$, in which case $\hat c_{km}=\hat c_k$, independent of $m$. Then, $\hat{\cal A}^{(k)}$ is diagonal and proportional to a rank-$n/2$ projector, and this completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
\noindent Note that as $k$ is incremented, these Toeplitz blocks shift one entry to the right and downward within the full $n\times n$ matrix ${\cal A}$.
In order to prove Theorem~\ref{thm9}, we need to show that no continuous path of product operators from $I_{\cal H}$ to at least one of the outcomes of a complete measurement distinguishing the states of GenTiles$1$ exists. We will see that no such path exists to any of the outcomes in the measurement found above in Lemma~\ref{lem6}. Referring to Lemma~\ref{lem6}, which tells us which projectors are to be included in our complete separable measurement, we will next find all product operators that are positive linear combination of those projectors, and so must have the form
\begin{align}\label{eqn1110}
{\cal A}\otimes{\cal B}=\sum_{k=0}^{n-1}\sum_{m=0}^{\frac{n}{2}-1}\left(c_{km}[H_{km}]+c_{km}^\prime[V_{km}]\right),
\end{align}
with non-negative coefficients. We next prove the following lemma, in which we denote the standard basis on either party by `SB'.
\begin{lem2}\label{lem2}
If ${\cal A}$ is not diagonal in SB, then ${\cal B}$ is diagonal in SB and ${\cal A}\otimes{\cal B}$ has rank no greater than $n/2$. The same conclusion holds when the roles of the two parties is exchanged.
\end{lem2}
\proof Since $[V_{km}]$ is diagonal in SB on the $A$ side, then for $\kappa^\prime\ne\kappa$,
\begin{align}\label{eqn1011}
{\cal A}_{\kappa\kappa^\prime}{\cal B}&=\sum_{k=0}^{n-1}\sum_{m=0}^{\frac{n}{2}-1}c_{km}~_A\inpd{\kappa}{H_{km}}\inpd{H_{km}}{\kappa^\prime}_A.
\end{align}
Consider the case $\kappa^\prime>\kappa$; since ${\cal A}$ is Hermitian, this effectively covers all cases. Since the $H_k$ tile stretches from $k$ to $k+n/2-1\mmod{n}$ in the $A$-space, this constrains which $H$-tiles contribute in \myeq{eqn1011} for given $\kappa,\kappa^\prime$. Notice in particular, that if $\kappa^\prime-\kappa=n/2$, then ${\cal A}_{\kappa\kappa^\prime}=0$ since every term in the sum on the right-hand side of \myeq{eqn1011} vanishes. More generally, the only values of $k$ that contribute in this equation are those such that both $\kappa$ and $\kappa^\prime$ lie in the range from $k$ to $k+n/2-1\mmod{n}$. Introducing $\mu=\kappa-n/2+1\mmod{n}$ and $\mu^\prime=\kappa^\prime-n/2+1\mmod{n}$, and recalling the definition of $\tilde c_{kl}$ given below \myeq{eqn1113}, we have
\begin{align}\label{eqn1112}
{\cal A}_{\kappa\kappa^\prime}{\cal B}=
\begin{cases}
~\sum\limits_{k=\mu^\prime}^{\kappa}\tilde c_{k,\kappa-\kappa^\prime}[k]~~~\kappa^\prime-\kappa=1,2,\cdots,n/2-1
\\
~\sum\limits_{k=\mu}^{\kappa^\prime}\tilde c_{k,\kappa-\kappa^\prime}[k]~~~\kappa^\prime-\kappa=n/2+1,\cdots,n-1
\end{cases}
\end{align}
and then rank$({\cal B})\le n/2$. This tells us that if ${\cal A}$ is not diagonal in SB on the $A$-side, then ${\cal B}$ is diagonal in SB on the $B$-side. (Since the set of states are symmetric under exchange of the parties, we also have that when ${\cal B}$ is not diagonal, then ${\cal A}$ is diagonal, again in SB on each side.)
Since ${\cal B}$ is diagonal in SB, then according to Lemma~\ref{lem10}, there can be no contributions from the $V$-tiles at all, unless they are such that the corresponding Toeplitz blocks (for party $B$, here) are diagonal. This means that the only contributions from the $V$-tiles must be of the form of rank-$n/2$ projectors onto whichever $V$-tile is contributing. However, since we have seen that ${\cal B}$ has rank no greater than $n/2$, and since each of these projectors onto a $V$-tile has support strictly different than that for any other $V$-tile, this is a contradiction if there is more than one $V$-tile that contributes. Therefore, we can assume that no more than one $V$-tile contributes, and that one contributing $V$-tile must be the one that has support matching that of ${\cal B}$ shown in \myeq{eqn1112}. But this means that when ${\cal A}$ is not diagonal in SB, ${\cal A}\otimes {\cal B}$ of \myeq{eqn1110} reduces to
\begin{align}\label{eqn1021}
{\cal A}\otimes{\cal B}&=c_{\kappa0}^\prime[\kappa]\otimes\sum_{k=\kappa+1}^{\kappa+n/2}[k]+\sum_{k=0}^{n-1}\sum_{i,j=0}^{n/2-1}\tilde c_{k,j-i}\ket{j+k\mmod{n}}\bra{i+k\mmod{n}}\otimes[k],
\end{align}
where the first term on the right is from the single contributing $V_\kappa$-tile. Now, in order for this to be a product operator, the $A$-parts of the various terms must all be proportional to each other for different $k$. However, the second term with the $i,j$ sum, which is of the Toeplitz form discussed in Lemma~\ref{lem10}, differs from each $k$ to the next by a shift of the Toeplitz block down and to the right, as noted just after the proof of that lemma. The first term $c_{\kappa0}^\prime[\kappa]$ cannot correct for that shift to make these expressions proportional to each other, so this is not a product operator unless all of the Toeplitz blocks vanish except for one. Then, the remaining Toeplitz block is tensored with a single $[k]$, whereas the $c_{\kappa0}^\prime[\kappa]$ term is tensored with a sum over $n/2$ different projectors $[k]$. Therefore, we must have $c_{\kappa0}^\prime=0$, since we assume here that ${\cal A}$ is not diagonal in SB, and we are left with ${\cal B}$ having rank equal to unity, in which case ${\cal A}\otimes{\cal B}$ has rank no more than $n/2$. These cases are linear combinations of projectors onto the states $\ket{H_{km}}$ for one fixed $k$. By the symmetry under exchange of the parties, then for the case where ${\cal B}$ is not diagonal in SB, we also obtain linear combinations of projectors onto the states $\ket{V_{km}}$ for one fixed $k$, which again are of rank no greater than $n/2$. This completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
We are left to consider the case that ${\cal A}$ and ${\cal B}$ are each diagonal in their respective SB. We next prove our final lemma.
\begin{lem3}\label{lem3}
The identity operator $I_{\cal H}$, along with those proportional to it, are the only product operators ${\cal A}\otimes{\cal B}$ in ${\cal Z}_{\cal M}$ that has rank greater then $n/2$, and therefore, $(0,I_{\cal H}]$ is an isolated line segment in the intersection of ${\cal Z}_{\cal M}$ with the set of product operators.
\end{lem3}
\proof We have already shown that product operators in ${\cal Z}_{\cal M}$ that are not diagonal in SB have rank no greater than $n/2$. Therefore, we need to consider those product operators that are diagonal. For $\kappa\ne\kappa^\prime$, then from \myeq{eqn1112},
\begin{align}\label{eqn1016}
0={\cal A}_{\kappa\kappa^\prime}{\cal B}_{kk}&=\tilde c_{k,\kappa-\kappa^\prime}.
\end{align}
For each $k$ and pairs $\kappa,\kappa^\prime$ within the range of the $H_k$-tile, $\lvert\kappa-\kappa^\prime\rvert$ ranges over all values from $1$ to $n/2-1$, modulo $n/2$ (using modulo $n/2$ here because $\omega^{n/2}=1$). Therefore, $\tilde c_{k,\kappa-\kappa^\prime}=0$ here for all $k$ and all $\kappa\ne\kappa^\prime$, which implies that $c_{km}=c_{k0}$ for all $m$ and all $k$. By the same argument but looking at ${\cal A}_{kk}{\cal B}_{\kappa\kappa^\prime}=0$ for $\kappa\ne\kappa^\prime$, we see also that $c_{km}^\prime=c_{k0}^\prime$ for those terms in \myeq{eqn1110} involving the $V$-tiles. This means that ${\cal A}\otimes{\cal B}$ is a (positive) linear combination of rank-$n/2$ projectors, each of which projects onto the (entire) support of one of the $H$-tiles, or onto that of one of the $V$-tiles. These projectors are each of the form
\begin{align}\label{eqn1119}
P_H(k)&=\sum_{j=0}^{n/2-1}[j+k\mmod{n}]\otimes[k]\notag\\
P_V(k)&=[k]\otimes\sum_{j=0}^{n/2-1}[j+k+1\mmod{n}].
\end{align}
We wish to find all linear combinations of these projectors that are product operators. Introducing the isomorphism, $[i]\mapsto\ket{i}$, these map as $P_H(k)\mapsto\ket{H_{k0}}$ and $P_V(k)\mapsto\ket{V_{k0}}$, and our present problem maps to the problem of finding all product states that are linear combinations of the $\ket{H_{k0}}$ and $\ket{V_{k0}}$. However, this is the same problem as finding all product states that are orthogonal to all the states of GenTiles$1$ other than $\ket{F}$, which we have already solved above. The answer found there---see the paragraph just above \myeq{eqn1119}---translates back to the problem here as $\{P_H(k)\}\mapsfrom\{\ket{H_{k0}}\}$, $\{P_V(k)\}\mapsfrom\{\ket{V_{k0}}\}$, and $I_{\cal H}\mapsfrom\ket{F}$. The first two sets consist of product operators each of which has rank equal to $n/2$, while $I_{\cal H}$ has rank of $n^2$, and the proof is complete.{\hspace{\stretch{1}}$\blacksquare$}
A direct consequence of Lemma~\ref{lem3} is that there exists no continuous path of product operators lying within ${\cal Z}_{\cal M}$ and stretching from $I_{\cal H}$ to anywhere. This completes the proof of Theorem~\ref{thm9} that GenTiles$1$ cannot be perfectly discriminated within $\overline{\textrm{LOCC}}$.
\section{Proof of Theorem~\ref{thm11}}\label{AppC}
Let $\omega=e^{2\pi i/N}$ and
\begin{align}\label{eqn24}
{\cal R} := \sum_{j=1}^N c_j \Psi_j = \frac{1}{D}\sum_{j=1}^N c_j\sum_{ m_1, m_1^\prime=0}^{d_1-1}\sum_{ m_2, m_2^\prime=0}^{d_2-1}\cdots\sum_{ m_P, m_P^\prime=0}^{d_P-1}\omega^{\left[j\sum_{\alpha=1}^Pp_\alpha\left( m_\alpha- m_\alpha^\prime\right)\right]}\ket{ m_1, m_2,\cdots, m_P}\bra{ m_1^\prime, m_2^\prime,\cdots, m_P^\prime},
\end{align}
and we wish to determine the conditions under which ${\cal R}$ is a product operator of the form ${\cal A}\otimes\bar{\cal A}$, where $\bar{\cal A}={\cal B}\otimes{\cal C}\otimes\cdots$. From here on, we replace $\bar{\cal A}$ by ${\cal B}$. We will show that ${\cal R}= {\cal A}\otimes{\cal B}$ if and only if either (1) ${\cal R}=c_i\Psi_i$ for some fixed $i$ or (2) ${\cal R}=cI_A\otimes I_B$, which occurs when $c_j=Dc/N$, independent of $j$. We begin by examining the structure of ${\cal R}$, as given in \myeq{eqn24}.
First, let $D_2=D/d_1$ and notice that we can write
\begin{align}\label{eqn25}
{\cal R} = \frac{1}{D}\sum_{j=1}^N c_j\sum_{m_1,m_2=0}^{d_1-1}\sum_{n_1,n_2=0}^{D_2-1}\omega^{j\left[m_1-m_2+d_1\left(n_1-n_2\right)\right]}\ket{m_1,n_1}\bra{m_2,n_2},
\end{align}
where $n_1= m_2 + d_2\left( m_3 + d_3\left( m_4 + \cdots + d_{P-2}\left( m_{P-1}+ d_{P-1} m_P\right)\cdots\right)\right)$ and similarly for $n_2$. Then, if ${\cal R}$ is a product operator, we have that
\begin{align}\label{eqn26}
\bra{m_1,n_1}{\cal R}\ket{m_2,n_2} = \bra{m_1}{\cal A}\ket{m_2}\bra{n_1}{\cal B}\ket{n_2} = \frac{1}{D}\sum_{j=1}^N c_j\omega^{j\left[m_1-m_2+d_1\left(n_1-n_2\right)\right]}.
\end{align}
Defining $s_r=\sum_jc_j\omega^{jr}/D$ and $\bra{m}{\cal X}\ket{n}={\cal X}_{mn}$ for general operator ${\cal X}$, we have
\begin{align}\label{eqn27}
{\cal A}_{m_1m_2}{\cal B}_{n_1n_2} = s_{m_1-m_2+d_1\left(n_1-n_2\right)},
\end{align}
for all $m_1,m_2=0,1,\cdots,d_1-1$ and $n_1,n_2=0,1,\cdots,D_2-1$. Since this depends only on $m_1-m_2$ and $n_1-n_2$, we can restrict consideration to cases where either $m_1=0$ or $m_2=0$ and $n_1=0$ or $n_2=0$. Now, considering the tautology $\left({\cal A}_{m_1m_2}{\cal B}_{n_1n_2}\right)\left({\cal A}_{m_1^\prime m_2^\prime}{\cal B}_{n_1^\prime n_2^\prime}\right)=\left({\cal A}_{m_1m_2}{\cal B}_{n_1^\prime n_2^\prime}\right)\left({\cal A}_{m_1^\prime m_2^\prime}{\cal B}_{n_1n_2}\right)$, we obtain from the restriction that ${\cal R}$ is a product operator, that
\begin{align}\label{eqn28}
s_{m_1+d_1n_1}s_{m_2+d_1n_2}=s_{m_1+d_1n_2}s_{m_2+d_1n_1},
\end{align}
for all $-d_1+1\le m_1,m_2\le d_1-1$ and $-D_2+1\le n_1,n_2\le D_2-1$.
Note that $s_0>0$ since $c_j\ge0$ for all $j$, and we can clearly assume ${\cal R}\ne0$. Note also that $s_{-r}=s_r^\ast$ and $s_{r\pm N}=s_{r}$. Let us now prove the following lemma.
\begin{lem19}\label{lem19}
If exists $q$ such that $s_q=0$, then $s_r=0$ for all $r\ne0$.
\end{lem19}
\proof To begin with, note that ${\cal A}_{nn}\ne0$ and ${\cal B}_{nn}\ne0$ because $0\ne s_0={\cal A}_{mm}{\cal B}_{nn}$ for any $m,n$. Suppose $q=m_1+n_1d_1$ so that $0=s_{m_1+n_1d_1}={\cal A}_{m_10}{\cal B}_{n_10}$, implying that either ${\cal A}_{m_10}=0$ or ${\cal B}_{n_10}=0$. First, suppose that ${\cal A}_{m_10}=0$. Then, $0={\cal A}_{m_10}{\cal B}_{01}=s_{m_1-d_1}=\left(s_{d_1-m_1}\right)^\ast$, implying $0={\cal A}_{d_1-m_1,0}{\cal B}_{00}$. Since ${\cal B}_{00}\ne0$, this means that ${\cal A}_{d_1-m_1,0}=0$, and in turn, that $0={\cal A}_{d_1-m_1,0}{\cal B}_{D_2-1,0}=s_{d_1-m_1+\left(D_2-1\right)d_1}$. Recall that $N=d_1D_2+1$ and that $\omega^N=1$. Therefore, the preceding expression becomes $0=s_{d_1-m_1+\left(D_2-1\right)d_1}=s_{N-m_1-1}=s_{-m_1-1}=\left(s_{m_1+1}\right)^\ast=\left({\cal A}_{m_1+1,0}{\cal B}_{00}\right)^\ast$. Hence we see that ${\cal A}_{m_10}=0\rightarrow {\cal A}_{m_1+1,0}=0$, which means that ${\cal A}_{m_10}=0\rightarrow {\cal A}_{m0}=0$ for all $m\ge m_1$. In particular, we have that $d_1-1\ge m_1$ so that $0={\cal A}_{d_1-1,0}$, implying $0={\cal A}_{d_1-1,0}{\cal B}_{01}=s_{-1}=\left(s_1\right)^\ast=\left({\cal A}_{10}{\cal B}_{00}\right)^\ast$, so that in fact, ${\cal A}_{10}=0$. Hence, for any fixed $m_1$ we have that ${\cal A}_{m_10}=0$ implies ${\cal A}_{10}=0$, which in turn implies that ${\cal A}_{m0}=0$ for any $m$. Finally, this means that $s_{m+nd_1}={\cal A}_{m0}{\cal B}_{n0}=0$ for any $m,n$ not both equal to zero, which proves the lemma in the first case that there exists $m_1$ such that ${\cal A}_{m_10}=0$.
On the other hand if ${\cal A}_{m_10}\ne0$, then ${\cal B}_{n_10}=0$ when $s_{m_1+n_1d_1}=0$. This implies that $0={\cal B}_{n_10}{\cal A}_{m0}=s_{m+n_1d_1}$ for any $m$. Then, $0=s_{m-d_1+\left(n_1+1\right)d_1}={\cal A}_{0,d_1-m}{\cal B}_{n_1+1,0}$, so either (i) ${\cal A}_{d_1-m,0}=0$ for some $m$, which by the preceding paragraph implies that $s_r=0$ for any $r\ne0$; or (ii) ${\cal B}_{n_1+1,0}=0$. The latter case means that ${\cal B}_{n_10}=0\rightarrow{\cal B}_{n_1+1,0}=0\rightarrow{\cal B}_{n0}=0$ for any $n\ge n_1$. Setting $n=D_2-1$, we have that $0=s_{\left(D_2-1\right)d_1}=s_{-1-d_1}=\left(s_{1+d_1}\right)^\ast=\left({\cal A}_{10}{\cal B}_{10}\right)^\ast$, implying either (iia) ${\cal A}_{10}=0\rightarrow s_r=0$ for any $r\ne0$, by the preceding paragraph; or (iib) ${\cal B}_{10}=0$. The latter implies, by the foregoing argument, that ${\cal B}_{n0}=0$ for any $n$, so that $s_{m+nd_1}={\cal A}_{m0}{\cal B}_{n0}=0$ for any $m,n$, and the proof is complete.{\hspace{\stretch{1}}$\blacksquare$}
Note that one can view $s_r$ as an inner product between vector $\vec c$ with components $c_j$ and vectors $\vec w_r$ with components $\omega^{jr}$, where the $N$ vectors $\vec w_r$ are easily seen to be mutually orthogonal. Hence, by this lemma, we have two possibilities: Either (1) $s_r=0$ for all $r\ne0$, in which case ${\cal R}\propto I_A\otimes I_B$, which follows from \myeq{eqn23} and the fact that the only vector orthogonal to the $N-1$ vectors $\vec w_r$ with $r\ne0$ is $\vec w_0$, which has components that are independent of $j$; or (2) $s_r\ne0$ for all $r$. We now prove the following lemma.
\begin{lem20}\label{lem20}
If $s_r\ne0$ for all $r$, then $rank\left({\cal R}\right)=1$, implying ${\cal R}=c_j\Psi_j$ for some fixed $j$.
\end{lem20}
\proof With ${\cal R}={\cal A}\otimes{\cal B}=\sum_jc_j\Psi_j$ and $\Psi_j$ a rank-$1$ product operator, then ${\cal B}\propto\sum_jc_j\textrm{Tr}_A(\Psi_j)$ is a positive linear combination of rank-$1$ positive operators. Since rank-$1$ positive operators are extreme rays in the convex cone of positive operators, then if $\textrm{rank}\left({\cal B}\right)=1$, it must be that there is one and only one non-zero $c_j$, which implies both that $\textrm{rank}\left({\cal R}\right)=1$ and that ${\cal R}=c_j\Psi_j$ for some fixed $j$. Therefore, we need only show that $\textrm{rank}\left({\cal B}\right)=1$. This will be so if every $2\times2$ submatrix of ${\cal B}$ has determinant equal to zero, or in other words, if ${\cal B}_{n_1n_2}{\cal B}_{n_3n_4}={\cal B}_{n_1n_4}{\cal B}_{n_3n_2}$ for all $n_1,n_2,n_3,n_4$. Within each of these $2\times2$ submatrices, we may choose to call the lower-right element as ${\cal B}_{n_1n_2}$, and then without loss of generality, we have that $n_1>n_3\ge0$ and $n_2>n_4\ge0$. These conditions on ${\cal B}$ are equivalent to,
\begin{align}\label{eqn29}
s_{\left(n_1-n_2\right)d_1}s_{\left(n_3-n_4\right)d_1}=s_{\left(n_1-n_4\right)d_1}s_{\left(n_3-n_2\right)d_1},
\end{align}
for all $n_1>n_3\ge0$ and $n_2>n_4\ge0$, so these are what we need to show follow from \myeq{eqn28}.
We have
\begin{align}\label{eqn30}
s_{\left(n_1-n_2\right)d_1}s_{\left(n_3-n_4\right)d_1}&=s_{\left(n_1-n_2\right)d_1}s_{\pm1+\left(n_3-n_4\pm D_2\right)d_1}\notag\\
&=s_{\pm1+\left(n_1-n_2\right)d_1}s_{\left(n_3-n_4\pm D_2\right)d_1}\notag\\
&=s_{\mp d_1\pm1+\left(n_1-n_2\pm1\right)d_1}s_{\left(n_3-n_4\pm D_2\right)d_1}\notag\\
&=s_{\left(n_1-n_2\pm1\right)d_1}s_{\mp d_1\pm1+\left(n_3-n_4\pm D_2\right)d_1}\notag\\
&=s_{\left(n_1-n_2\pm1\right)d_1}s_{\left(n_3-n_4\mp1\right)d_1}\notag\\
\end{align}
where we have used \myeq{eqn28} along with the fact that $s_{r\pm N}=s_r$, where $N=d_1D_2+1$. The upper sign must be chosen if $n_3<n_4$, the lower sign if $n_3>n_4$. For the upper sign, we repeat this process $n_2-n_4$ times, while for the lower sign we repeat it $n_1-n_3$ times. In either case, we end up with \myeq{eqn29}, as desired. When $n_3=n_4$ we can swap the roles of the pair $n_3,n_4$ with the pair $n_1,n_2$ and obtain the desired result, except when it is also the case that $n_1=n_2$.\footnote{Note that a problem arises with this process if we ever end up with $s_{D_2d_1}$ or $s_{-D_2d_1}$, since then \myeq{eqn28} doesn't apply. However, $s_{D_2d_1}$ can only appear after repeating the process $ n=D_2-n_1+n_2$ times for the upper sign, or for $ n=D_2-n_3+n_4$ for the lower sign. For the upper sign this would only happen if $n_2-n_4\ge D_2-n_1+n_2$, or if $n_1-n_4\ge D_2$, which is a contradiction, and for the lower sign it only happens if $n_1-n_3\ge D_2-n_3+n_4$, giving the same contradiction. On the other hand, $s_{-D_2d_1}$ can only appear after repeating the process $ n=D_2+n_3-n_4$ times for the upper sign, or for $ n=D_2+n_1-n_2$ for the lower sign. For the upper sign, this requires that $n_2-n_4\ge D_2+n_3-n_4$, and for the lower sign it requires that $n_1-n_3\ge D_2+n_1-n_2$. Both of these lead again to a contradiction, in this case $n_2-n_3\ge D_2$, so this problem does not arise.} Therefore, we are done except for showing that $s_0^2=s_qs_{-q}$ for all $q$ in the range $0<q\le d_1\left(D_2-1\right)$.
From \myeq{eqn28}, we have that $s_0s_{m_2+d_1n_2}=s_{d_1n_2}s_{m_2}$. Multiplying this by $s_0$, we obtain
\begin{align}\label{eqn31}
s_0^2s_{m_2+d_1n_2}=s_0s_{d_1n_2}s_{m_2}=s_0\left[s_{-1-\left(D_2-n_2\right)d_1}s_{m_2}\right]=s_{-1}s_{-\left(D_2-n_2\right)d_1}s_{m_2}=s_{-1}\left[s_{1+n_2d_1}s_{m_2}\right]=s_{-1}s_1s_{m_2+n_2d_1}.
\end{align}
Since by assumption, $s_r\ne0$ for all $r$, this implies that
\begin{align}\label{eqn32}
s_0^2=s_1s_{-1}=s_1s_{d_1-1-d_1}=s_{d_1-1}s_{1-d_1}=s_{d_1-1}s_{2+\left(D_2-1\right)d_1}=s_2s_{-1+D_2d_1}=s_2s_{-2}=s_2s_{d_1-2-d_1}=s_{d_1-2}s_{2-d_1},
\end{align}
and so on, where we have repeatedly used \myeq{eqn28} and the fact that $s_r=s_{r\pm N}=s_{r\pm\left(D_2d_1+1\right)}$. This shows that $s_0^2=s_rs_{-r}$ for all $r=0,1,\cdots,d_1-1$. From here, we find
\begin{align}\label{eqn33}
s_0^2=s_{-1+d_1}s_{1-d_1}=s_{1+d_1}s_{-1-d_1}=s_{1+d_1}s_{\left(D_2-1\right)d_1}=s_{d_1}s_{1+\left(D_2-1\right)d_1}=s_{d_1}s_{-d_1},
\end{align}
and then starting from
\begin{align}\label{eqn34}
s_0^2=s_{1+nd_1}s_{-1-nd_1},
\end{align}
we find that
\begin{align}\label{eqn35}
s_0^2&=s_{1-d_1+\left(n+1\right)d_1}s_{-1-nd_1}=s_{-1+\left(n+1\right)nd_1}s_{1-d_1-nd_1}=s_{1+\left(n+1\right)d_1}s_{-1-\left(n+1\right)d_1},
\end{align}
from which we have that $s_0^2=s_{1+nd_1}s_{-1-nd_1}$ for all $n$. From this, we find that
\begin{align}\label{eqn36}
s_0^2&=s_{1+nd_1}s_{-1-nd_1}=s_{1+nd_1}s_{\left(D_2-n\right)d_1}=s_{nd_1}s_{1+\left(D_2-n\right)d_1}=s_{nd_1}s_{-nd_1},
\end{align}
which tells us that $s_0^2=s_{nd_1}s_{-nd_1}$ for every $n$. Finally, by following the steps taken in \myeq{eqn32} but instead starting from \myeq{eqn34}, we have that
\begin{align}\label{eqn37}
s_0^2=s_{-1+nd_1}s_{1-nd_1}=s_{-1+nd_1}s_{2+\left(D_2-n\right)d_1}=s_{2+nd_1}s_{-1+\left(D_2-n\right)d_1}=s_{2+nd_1}s_{-2-nd_1}=\cdots,
\end{align}
and thus we have that $s_0^2=s_{m+nd_1}s_{-m-nd_1}$ for every $m,n$, which completes the proof.{\hspace{\stretch{1}}$\blacksquare$}
We thus have two possibilities for ${\cal R}=\sum_jc_j\Psi_j$ to be a product operator. Either (1) ${\cal R}\propto I$, the identity operator on the full Hilbert space ${\cal H}$, or (2) ${\cal R}\propto\Psi_j$ for some fixed $j$. As a consequence, there cannot exist a continuous path of product operators stretching from $I$ to any one of the $\Psi_j$ in the space of positive operators on ${\cal H}$. By Corollary~\ref{cor1}, this completes the proof of Theorem~\ref{thm11}.
\end{document} |
\begin{document}
\date{}
\title{More results on weighted independent domination\footnote{Extended abstract of this paper appeared in the proceedings of
WG 2017 -- the 43rd International Workshop on Graph-Theoretic Concepts in Computer Science \cite{WG2017}
\begin{abstract}
Weighted independent domination is an NP-hard graph problem, which remains computationally intractable
in many restricted graph classes. In particular, the problem is NP-hard in the classes of sat-graphs
and chordal graphs. We strengthen these results by showing that the problem is NP-hard in a proper subclass of
the intersection of sat-graphs and chordal graphs. On the other hand, we identify two new classes of graphs
where the problem admits polynomial-time solutions.
\end{abstract}
\section{Introduction}
\textsc{Independent domination} is the problem of finding in a graph an inclusionwise maximal independent set of minimum cardinality.
This is one of the hardest problems of combinatorial optimization and it remains difficult under substantial restrictions.
In particular, it is NP-hard for so-called sat-graphs, where the problem is equivalent to {\sc satisfiability} \cite{Zverovich06}.
It is also NP-hard for planar graphs, triangle-free graphs, graphs of vertex degree at most 3 \cite{BL03}, line graphs \cite{YG80}, chordal bipartite graphs \cite{DMK90}, etc.
The weighted version of the problem (abbreviated WID) deals with vertex-weighted graphs and asks to find an inclusionwise maximal independent set of minimum total weight.
This version is provenly harder, as it remains NP-hard even for chordal graphs \cite{Chang2004}, where {\sc independent domination} can be solved in polynomial time \cite{Farber}.
In the present paper, we strengthen two NP-hardness results by showing that WID is NP-hard in a proper subclass of the intersection of sat-graphs and chordal graphs.
On the positive side, it is known that the problem is polynomial-time solvable for interval graphs, permutation graphs \cite{poly}, graphs of bounded clique-width \cite{CW}, etc.
Let us observe that all classes mention above are hereditary, i.e. closed under taking induced subgraphs. It is well-known
(and not difficult to see) that a class of graphs is hereditary if and only if it can be characterized in terms of minimal forbidden induced subgraphs.
Unfortunately, not much is known about efficient solutions for the WID problem on graph classes defined by {\it finitely many} forbidden induced subgraphs.
Among rare examples of this type, let us mention cographs and split graphs.
\begin{itemize}
\item A {\it cograph} is a graph in which every induced subgraph with at least two vertices is either disconnected or the complement of a disconnected
graph. The cographs are precisely $P_4$-free graphs, i.e. graphs containing no induced $P_4$. In the case of cographs, the problem can be solved efficiently by means of modular decomposition.
\item A {\it split graph} is a graph whose vertices can be partitioned into a clique and an independent set. In terms of forbidden induced subgraphs,
the split graphs are the graphs which are free of $2K_2, C_{4}$ and $C_5$.
The only available way to solve WID efficiently for a split graph is to examine all its inclusionwise maximal independent sets, of which there are polynomially many.
\end{itemize}
The class of sat-graphs, mentioned earlier, consists of graphs whose vertices can be partitioned into
a clique and a graph of vertex degree at most 1. Therefore, sat-graphs form an extension of split graphs. With this extension the complexity status of the problem
jumps from polynomial-time solvability to NP-hardness. In the present paper, we study two other extensions of split graphs and show polynomial-time solvability in both of them.
The first of them deals with the class of $(P_5,\overline{P}_5)$-free graphs, which also extends the cographs. From an algorithmic point of view,
this extension is resistant to any available technique. To crack the puzzle for $(P_5,\overline{P}_5)$-free graphs, we develop a new decomposition
scheme combining several algorithmic tools. This enables us to show that the WID problem can be solved
for $(P_5,\overline{P}_5)$-free graphs in polynomial time.
The second extension of split graphs studied in this paper deals with the class of $(P_5, \overline{P_3+P_2)}$-free graphs.
To solve the problem in this case, we develop a tricky reduction allowing us to reduce the problem to the first class.
Let us emphasize that in both cases the presence of $P_5$ among the forbidden graphs is necessary,
because each of $\overline{P}_5$ and $\overline{P_3+P_2}$ contains a $C_4$ and by forbidding $C_4$ alone we obtain a class where the problem is NP-hard \cite{BL03}.
Whether the presence of $P_5$ among the forbidden graphs is sufficient for polynomial-time solvability of WID is a big open question.
For the related problem of finding a maximum weight independent set (WIS), this question was answered only recently \cite{P5} after several decades of attacking
the problem on subclasses of $P_5$-free graphs (see e.g. \cite{gem,PP,Kar}). In particular, prior to solving the problem for $P_5$-free graphs, it was solved for
$(P_5,H)$-free graphs for all graphs $H$ with at most 5 vertices, except for $H=C_5$.
WID is a more stubborn problem, as it remains NP-hard in many classes where WIS can be solved in polynomial time, such as line graphs, chordal graphs, bipartite graphs, etc.
In \cite{LozMosPur2015}, the problem was solved in polynomial time for many subclasses of $P_5$-free graphs, including $(P_5,H)$-free graphs for all graphs $H$ with at most 5 vertices,
except for $H=\overline{P}_5$, $H=\overline{P_3+P_2}$ and $H=C_5$. In the present paper, we solve the first two of them, leaving the case of $(P_5,C_5)$-free graphs open.
We believe that WID in $(P_5,C_5)$-free graphs is polynomially equivalent to WID in $P_5$-free graphs. Determining the complexity status of the problem in both classes is a challenging open question.
We discuss this and related open questions in the concluding section of the paper.
The rest of the paper is organized as follows. In the remainder of the present section, we introduce basic terminology and notation.
In Section~\ref{sec:house} we solve the problem for $(P_5,\overline{P}_5)$-free graphs,
and in Section~\ref{sec:new} we solve it for $(P_5, \overline{P_3+P_2)}$-free graphs.
All graphs in this paper are finite, undirected, without loops and multiple edges.
The vertex set and the edge set of a graph $G$ are denoted by $V(G)$ and $E(G)$, respectively.
A subset $S\subseteq V(G)$ is
\begin{itemize}
\item[--] \textit{independent} if no two vertices of $S$ are adjacent,
\item[--] a \textit{clique} if every two vertices of $S$ are adjacent,
\item[--] \textit{dominating} if every vertex not in $S$ is adjacent to a vertex in $S$.
\end{itemize}
For a vertex-weighted graph $G$ with a weight function $w$, by $id_w(G)$ we denote the minimum weight of an independent dominating set in $G$.
If $v$ is a vertex of $G$, then $N(v)$ is the {\it neighbourhood} of $v$ (i.e. the set of vertices adjacent to $v$)
and $V(G) \setminus N(v)$ is the {\it antineighbourhood} of $v$.
We say that $v$ is \textit{simplicial} if its neighbourhood is a clique, and $v$ is \textit{antisimplicial} if
its antineighbourhood is an independent set.
Let $S$ be a subset of $V(G)$. We say that a vertex $v \in V(G) \setminus S$ \textit{dominates} $S$ if $S\subseteq N(v)$.
Also, $v$ \textit{distinguishes} $S$ if $v$ has both a neighbour and a non-neighbour in $S$.
By $G[S]$ we denote the subgraph of $G$ induced by $S$ and by $G - S$ the subgraph $G[V \setminus S]$.
If $S$ consists of a single element, say $S = \{ v \}$, we write $G - v$, omitting the brackets.
If $G$ is a connected graph but $G-S$ is not, then $S$ is a \textit{separator} (also known as a cut-set).
A \textit{clique separator} is a separator which is also a clique.
As usual, $P_n,C_n$ and $K_n$ denote a chordless path, a chordless cycle and a complete graph on $n$ vertices, respectively.
Given two graphs $G$ and $H$, we denote by $G+H$ the disjoint union of $G$ and $H$, and by $mG$
the disjoint union of $m$ copies of $G$.
We say that a graph $G$ contains a graph $H$ as an induced subgraph if $H$ is isomorphic to an induced subgraph of $G$.
Otherwise, $G$ is $H$-free.
A class $\mathcal{Z}$ of graphs is hereditary if it is closed under taking induced subgraphs, i.e. if
$G \in \mathcal{Z}$ implies that every induced subgraph of $G$ belongs to $\mathcal{Z}$.
It is well-known that $\mathcal{Z}$ is hereditary if and only if graphs in $G$ do not contain induced subgraphs from a set $M$,
in which case we say that $M$ is the set of forbidden induced subgraphs for $\mathcal{Z}$.
For an initial segment of natural numbers $\{ 1, 2, \ldots, n \}$ we will often use the notation $[n]$.
\section{An NP-hardness result}
\label{sec:NP}
As we mentioned in the introduction, the WID problem is NP-hard in the classes of sat-graphs and chordal graphs.
A graph is {\it chordal} if it is $(C_4,C_5,C_6,\ldots)$-free.
A graph $G$ is called a \textit{sat-graph} if there exists a partition $A \cup B = V(G)$ such
that
\begin{enumerate}
\item $A$ is a clique (possibly, $A = \emptyset$);
\item $G[B]$ is an induced matching, i.e. an induced 1-regular graph (possibly, $B = \emptyset$);
\item there are no triangles $(a,b,b')$, where $a \in A$ and $b,b' \in B$.
\end{enumerate}
We shall refer to the pair $(A,B)$ as a \textit{sat-partition} of $G$.
Below we show that WID is NP-hard in the class of $(C_4, Sun_3)$-free sat-graphs, where $Sun_3$ is the graph shown in Figure~\ref{fig:T_domino}.
Since cycles $C_k$ with $k\ge 5$ are not sat-graphs (which is easy to see), this class also is a subclass of chordal graphs. Moreover, $Sun_3$ is
both a sat-graph and a chordal graph. Therefore, $(C_4, Sun_3)$-free sat-graphs form a proper subclass of the intersection of sat-graphs and chordal graphs.
\begin{figure}
\caption{Graph $Sun_3$}
\label{fig:T_domino}
\end{figure}
Before we prove the main result of this section, let us make the following useful observation.
\begin{observation}\label{obs:domino_T}
Let $G$ be a sat-graph with a sat-partition $(A,B)$. If $G$ contains $Sun_3$
as an induced subgraph, then $1,2,3 \in A$ and $4,5,6 \in B$.
\end{observation}
\begin{theorem}
The WID problem is NP-hard in the class of $(C_4, Sun_3)$-free sat-graphs.
\end{theorem}
\begin{proof}
We prove the theorem by transforming the decision version of the {\sc minimum dominating set} problem
in $(C_3,C_4,C_5,C_6)$-free graphs to the WID problem in $(C_4, Sun_3)$-free graphs.
Since the former problem in NP-complete (see \cite{Kor90}), this will prove that the latter is
NP-hard.
For an $n$-vertex graph $G = (V,E)$ let us define the graph $G' = (V',E')$ with vertex set
$V' = \{v_1,v_2,v_3 : v \in V\}$
and edge set
$E' = \{ (v_1,v_2), (v_2,v_3): v \in V\}
\cup \{ (w_2,v_3), (w_3,v_2) : (w,v) \in E\} \cup \{(w_3, v_3) : w, v \in V, u \neq v\}$.
\begin{figure}
\caption{Graphs $P_4$ (top) and $P'_4$ (bottom)}
\label{fig:transformationP4}
\end{figure}
Figure~\ref{fig:transformationP4} illustrates the transformation of $P_4$ into $P'_4$.
It is easy to see that for every graph $G$, the graph $G'$ is a sat-graph. Moreover, it is $C_4$-free, i.e. $G'$ is a chordal graph.
Also using the fact that $Sun_3$ has the unique sat-partition (see Observation \ref{obs:domino_T}) it is not hard to check that if $G'$
contains $Sun_3$ as an induced subgraph, then $G$ has a cycle of length at most 6. Therefore, for any
$(C_3,C_4,C_5,C_6)$-free graph $G$, the graph $G'$ is a $(C_4, Sun_3)$-free sat-graph.
Further, for every $v \in V$ we assign weight 1 to vertex $v_1$, weight $2$ to vertex $v_2$,
and weight $2n$ to vertex $v_3$.
Now, we claim that $G$ has a dominating set of size at most $k$ if and only if $G'$ has an independent dominating set of total weight at most $n + k$.
First, suppose $G$ has a dominating set $D$ of size at most $k$.
Then $D' = \{v_2 : v \in D\} \cup \{v_1 : v \in V \setminus D\}$ is clearly an independent dominating set
of $G'$ with total weight at most $n + k$.
On the other hand, suppose $G'$ has an independent dominating set $D'$ of total weight at most
$n + k$.
If $k \geq n$, then $V$ is a dominating set of $G$ of size at most $k$.
If $k < n$, then $D'$ cannot contain any of the vertices of weight $2n$ and hence $D'$ is of the form $\{v_2 : v \in D\} \cup \{v_1 : v \in V \setminus D\}$ for some subset $D$ of $V$.
For any vertex $u \in V$, since $u_3$ is dominated in $G'$ by some $v_2 \in D'$, we have that in $G$ vertex $u$ is dominated by $v \in D$.
Hence, $D$ is a dominating set of $G$. Moreover, the total weight of $D'$ is $n + |D|$ implying that $D$ is of size at most $k$.
\end{proof}
\section{WID in $(P_5,\overline{P}_5)$-free graphs}
\label{sec:house}
To solve the problem for $(P_5,\overline{P}_5)$-free graphs, we first develop a new decomposition scheme in Section~\ref{sec:had}
that combines modular decomposition (Section~\ref{subsec:modular}) and antineighborhood decomposition (Section~\ref{subsec:anti}).
Then in Section~\ref{sec:P5} we apply it to $(P_5,\overline{P}_5)$-free graphs.
\subsection{Graph decompositions}
\subsubsection{Modular decomposition}
\label{subsec:modular}
Let $G=(V,E)$ be a graph. A set $M \subseteq V$ is a $module$ in $G$ if no vertex outside of $M$ distinguishes $M$.
Obviously, $V(G)$, $\emptyset$ and any vertex of $G$ are modules and we call them {\it trivial}.
A non-trivial module is also known as a \textit{homogeneous set}. A graph without homogeneous sets is called {\it prime}.
The notion of a prime graph plays a crucial role in {\em modular decomposition},
which allows to reduce various algorithmic and combinatorial problems in a hereditary class $\mathcal{Z}$ to prime graphs in $\mathcal{Z}$
(see e.g. \cite{MoeRad1984/1} for more details on modular decomposition and its applications).
In particular, it was shown in \cite{BL03} that the WID problem can be solved in polynomial time in
$\mathcal{Z}$ whenever it is polynomially solvable for prime graphs in $\mathcal{Z}$.
In our solution, we will use homogeneous sets in order to reduce the problem from a graph $G$ to two proper induced subgraphs of $G$ as follows.
Let $M \subset V$ be a homogeneous set in $G$. Denote by $H$ the graph obtained from $G$ by contracting $M$ into a single vertex $m$ (or equivalently,
by removing all but one vertex $m$ from $M$). We define the weight function $w'$ on the vertices of $H$ as follows:
$w'(v) = w(v)$ for every $v \ne m$, and $w'(m) = id_w(G[M])$. Then it is not difficult to see that
\begin{equation}
id_w(G) = id_{w'}(H).
\end{equation}
In other words, to solve the problem for $G$ we first solve the problem for
the subgraph $G[M]$, construct a new weighted graph $H$, and solve the problem for the graph $H$.
\subsubsection{Antineighborhood decomposition}
\label{subsec:anti}
One of the simplest branching algorithms for the maximum weight independent set problem
is based on the following obvious fact. For any graph $G=(V,E)$ and any vertex $v \in V$,
$$
is_w(G) = \max \{ is_w(G - N(v)), is_w(G - v) \},
$$
where $w$ is a weight function on the vertices of $G$, and $is_w(G)$ stands for the maximum weight of
an independent set in $G$. We want to use a similar branching rule for the WID problem, i.e.
\begin{equation}\label{eq:anti_WID}
id_w(G) = \min \{ id_w(G - N(v)), id_w(G - v) \}.
\end{equation}
However, formula (\ref{eq:anti_WID}) is not necessarily true, because
an independent dominating set in the graph $G - v$ is not necessarily dominating in the whole graph $G$.
To overcome this difficulty, we introduce the following notion.
\begin{definition}
A vertex $v$ is {\em permissible} if formula (\ref{eq:anti_WID}) is valid for $v$
\end{definition}
An obvious sufficient condition for a vertex to be permissible can be stated as follows:
if every independent dominating set in $G - v$ contains at least one neighbour of $v$, then $v$ is permissible.
Applying (\ref{eq:anti_WID}) to a permissible vertex $v$ of $G$, we reduce the problem from $G$ to two subgraphs $G - v$ and $G - N(v)$.
Such a branching procedure results in a decision tree.
In general, this approach does not provide a polynomial-time solution, since the decision tree may have exponentially many nodes (subproblems).
However, under some conditions this procedure may lead to a polynomial-time algorithm. In particular, this is true for graphs in hereditary classes
possessing the following property.
\begin{definition}\label{def:2}
A graph class ${\cal G}$ has the {\em antineighborhood property}
if there is a subclass ${\cal F} \subseteq {\cal G}$, and polynomial algorithms $P, Q$ and $R$, such that
\begin{enumerate}
\item[(i)] Given a graph $G$ the algorithm $P$ decides whether $G$ belongs to ${\cal F}$ or not;
\item[(ii)] $Q$ finds a permissible vertex $v$ in any input graph $G \in {\cal G} \setminus {\cal F}$
such that the graph $G-N(v)$ induced by the antineighborhood of $v$ belongs to ${\cal F}$;
we call $v$ a {\em good vertex};
\item[(iii)] $R$ solves the WID problem for (every induced subgraph of) any input graph from ${\cal F}$.
\end{enumerate}
\end{definition}
Directly from the definition we derive the following conclusion.
\begin{theorem}\label{theo: anti}
Let ${\cal G}$ be a hereditary class possessing the antineighborhood property.
Then WID can be solved in polynomial time for graphs in ${\cal G}$.
\end{theorem}
\subsubsection{Decomposition scheme}
\label{sec:had}
Let ${\cal G}$ be a hereditary class such that the class ${\cal G}_p$ of prime graphs in ${\cal G}$
has the antineighborhood property.
We define the decomposition procedure by describing the corresponding decomposition tree $T(G)$ for
a graph $G=(V,E) \in {\cal G}$. In the description, we use notions and notations introduced in Definition~\ref{def:2}.
\begin{enumerate}
\item If $G$ belongs to ${\cal F}$, then the node of $T(G)$ corresponding to $G$ is a leaf.
\item If $G \not\in {\cal F}$ and $G$ has a homogeneous set $M$,
then $G$ is decomposed into subgraphs $G_1 = G[M]$ and $G_2 = G[(V \setminus M) \cup \{m\}]$
for some vertex $m$ in $M$.
The node of $T(G)$ corresponding to $G$ is called a \textit{homogeneous node}, and it has
two children corresponding to $G_1$ and $G_2$. These children are in turn the roots of subtrees
representing possible decompositions of $G_1$ and $G_2$.
\item If $G \not\in {\cal F}$ and $G$ has no homogeneous set, then $G$ is prime and by the
antineighborhood property of ${\cal G}_p$ there exists a good vertex $v \in V$.
Then $G$ is decomposed into subgraphs $G_1 = G - N(v)$ and $G_2 = G - v$.
The node of $T(G)$ corresponding to $G$
is called an \textit{antineighborhood node}, and it has two children corresponding to
$G_1$ and $G_2$.
The graph $G_1$ belongs to ${\cal F}$ and the node corresponding to $G_1$ is a leaf. The node
corresponding to $G_2$ is the root of a subtree representing a possible decomposition of $G_2$.
\end{enumerate}
\begin{lemma}\label{tree}
Let $G$ be an $n$-vertex graph in ${\cal G}$. Then the tree $T(G)$ contains $O(n^2)$ nodes.
\end{lemma}
\begin{proof}
Since $T(G)$ is a binary tree, it is sufficient to show that the number of internal nodes is
$O(n^2)$. To this end, we prove that the internal nodes of $T(G)$ can be labeled by
pairwise different pairs $(a,b)$, where $a,b \in V(G)$.
Let $G' = (V',E')$ be an induced subgraph of $G$ that corresponds to
an internal node $X$ of $T(G)$.
If $X$ is a homogeneous node, then $G'$ is decomposed into
subgraphs $G_1 = G'[M]$ and $G_2 = G'[(V' \setminus M) \cup \{m\}]$, where $M \subset V'$ is a homogeneous
set of $G'$ and $m$ is a vertex in $M$. In this case, we label $X$ with $(a,b)$, where
$a \in M \setminus \{m\}$ and $b \in V' \setminus M$.
If $X$ is an antineighborhood node, then $G'$ is decomposed into subgraphs
$G_1 = G' - N(v)$ and $G_2 = G' - v$, where $v$ is a good vertex of $G'$. In this case, $X$ is labeled
with $(v,b)$, where $b \in N(v)$.
Suppose, to the contrary, that there are two internal nodes $A$ and $B$ in $T(G)$ with the
same label $(a,b)$. By construction, this means that $a,b$ are vertices of both $G_A$ and $G_B$,
the subgraphs of $G$ corresponding to the nodes $A$ and $B$, respectively.
Assume first that $B$ is a descendant of $A$. The choice of the labels implies that
regardless of the type of node $A$ (homogeneous or antineighborhood), the label of
$A$ has at least one vertex that is not a vertex of $G_B$, a contradiction.
Now, assume that neither $A$ is a descendant of $B$ nor $B$ is
a descendant of $A$. Let $X$ be the lowest common ancestor of
$A$ and $B$ in $T(G)$.
If $X$ is a homogeneous node, then $G_A$ and $G_B$ can have at most one vertex in
common, and thus $A$ and $B$ cannot have the same label.
If $X$ is an antineighborhood node, then one of its children is a leaf, contradicting to the
assumption that both $A$ and $B$ are internal nodes.
\end{proof}
\begin{lemma}\label{lem:construct}
Let $G$ be an $n$-vertex graph in ${\cal G}$. If time complexities of the algorithms $P$ and $Q$ are
$O(n^p)$ and $O(n^q)$, respectively, then $T(G)$ can be constructed in time
$O(n^{2 + \max\{ 2, p, q \})})$.
\end{lemma}
\begin{proof}
The time needed to construct $T(G)$ is the sum of times required to identify types of nodes of $T(G)$
and to decompose graphs corresponding to internal nodes of $T(G)$. To determine the type
of a given node $X$ of $T(G)$, we first use the algorithm $P$ to establish whether the graph
$G_X$ corresponding to $X$ belongs to ${\cal F}$ or not. In the former case $X$ is a leaf node, in the
latter case we further try to find in $G_X$ a homogeneous set, which can be performed
in $O(n+m)$ time \cite{McCSpi1999}. If $G_X$ has a homogeneous set, then $X$ is
a homogeneous node and we decompose $G_X$ into the graphs induced by the vertices in and outside
the homogeneous set, respectively. If $G_X$ does not have a homogeneous set, then $X$
is an antineighborhood node, and the decomposition of $G_X$ is equivalent to finding a
good vertex, which can be done by means of the algorithm $Q$.
Since there are $O(n^2)$ nodes in $T(G)$, the total time complexity for constructing
$T(G)$ is $O(n^{2 + \max\{ 2, p, q \}})$.
\end{proof}
Now we are ready to prove the main result of this section.
\begin{theorem}\label{theo:decomposition}
If ${\cal G}$ is a hereditary class such that the class ${\cal G}_p$ of prime graphs in ${\cal G}$
has the antineighborhood property, then the WID problem
can be solved in polynomial time for graphs in ${\cal G}$.
\end{theorem}
\begin{proof}
Let $G$ be an $n$-vertex graph in ${\cal G}$. To solve the WID problem for $G$, we construct
$T(G)$ and then traverse it bottom-up, deriving a solution for each node of $T(G)$ from the solutions
corresponding to the children of that node.
The construction of $T(G)$ requires a polynomial time by Lemma~\ref{lem:construct}.
For the instances corresponding to leaf-nodes of $T(G)$, the problem can be solved in polynomial time
by the antineighborhood property.
According to the discussion in Sections~\ref{subsec:modular} and~\ref{subsec:anti}, the solution for
an instance corresponding to an internal node can be derived from the solutions of its children
in polynomial time.
Finally, as there are $O(n^2)$ nodes in $T(G)$ (Lemma~\ref{tree}), the total running time to solve
the problem for $G$ is polynomial.
\end{proof}
\subsection{Application to $(P_5,\overline{P_5})$-free graphs}
\label{sec:P5}
In this section, we show that the WID problem can be solved efficiently for $(P_5,\overline{P_5})$-free
graphs by means of the decomposition scheme described in Section~\ref{sec:had}.
To this end, we will prove that the class of prime $(P_5,\overline{P_5})$-free graphs has the antineighborhood property.
We start with several auxiliary results. The first of them is simple and we omit its proof.
\begin{observation}\label{obs:distAdj}
Let $G=(V,E)$ be a graph, and let $W \subset V$ induce a connected
subgraph in $G$. If a vertex $v \in V \setminus W$ distinguishes $W$,
then $v$ distinguishes two adjacent vertices of $W$.
\end{observation}
\begin{proposition}\label{st:distNonadj}
Let $G=(V,E)$ be a prime graph. If a subset $W\subset V$ has at least two vertices and is not a clique,
then there exists a vertex $v \in V \setminus W$ which distinguishes two non-adjacent vertices of $W$.
\end{proposition}
\begin{proof}
Suppose, to the contrary, that none of the vertices in $V \setminus W$ distinguishes a pair of
non-adjacent vertices in $W$. If $G[W]$ has more than one connected component, then it is
easy to see that no vertex outside of $W$ distinguishes $W$. Hence,
$W$ is a homogeneous set in $G$, which contradicts the primality of $G$.
If $G[W]$ is connected, then $\overline{G[W]}$ has a connected component $C$ with at least two vertices, since
$W$ is not a clique. Then, by our assumption and Observation~\ref{obs:distAdj}, no vertex outside of $W$ distinguishes $C$.
Also, by the choice of $C$, no vertex of $W$ outside of $C$ distinguishes $C$. Therefore, $V(C)$
is a homogeneous set in $G$. This contradiction completes the proof of the proposition.
\end{proof}
\begin{lemma}\label{lem:atom}
If a $(P_5, \overline{P_5})$-free prime graph contains an induced copy of $2K_2$, then it has a
clique separator.
\end{lemma}
\begin{proof}
Let $G=(V,E)$ be a $(P_5, \overline{P_5})$-free prime graph containing an induced copy of $2K_2$.
Let $S \subseteq V$ be a minimal separator with the property that $G-S$ contains at least two non-trivial connected
components, i.e. connected components with at least two vertices. Such a separator necessarily exists, since $G$ contains an induced $2K_2$.
It follows from the choice of $S$ that
\begin{itemize}
\item $G - S$ has $k \geq 2$ connected components $C_1, \ldots, C_k$;
\item $r \geq 2$ of these components, say $C_1, \ldots, C_r$, have at least two vertices, and
all the other components $C_{r+1}, \ldots, C_k$ are trivial;
\item every vertex in $S$ has a neighbour in each of the non-trivial components
$C_1, \ldots, C_r$ (since $S$ is minimal);
\item for every $i \in \{ r+1, \ldots, k \}$, the unique vertex of the trivial component $C_i$
has a neighbour in $S$ (since $G$ is connected).
\end{itemize}
In the remaining part of the proof, we show that $G$ has a clique separator.
Let us denote
$U_i = V(C_i)$ for $i = 1, \ldots, k$. We first observe the following.
\vskip1ex
\textbf{Claim 1.} \textit{Any vertex in $S$ distinguishes at most one of the sets $U_1, \ldots, U_r$.}
\vskip1ex
\textit{Proof.} Assume $v \in S$ distinguishes $U_i$ and $U_j$ for distinct $i,j \in [r]$. Then by
Observation~\ref{obs:distAdj} $v$ distinguishes two adjacent vertices $a,b$ in $U_i$ and two adjacent
vertices $c,d$ in $U_j$. But then $a,b,v,c,d$ induce a forbidden $P_5$.
\vskip1ex
According to Claim 1, the set $S$ can be partitioned into subsets $S_0, S_1 \ldots, S_r$,
where the vertices of $S_0$
dominate every member of $\{ U_1, \ldots, U_r \}$, and for each $i \in [r]$, the vertices
of $S_i$ distinguish $U_i$ and dominate $U_j$ for all $j$ different from $i$.
Moreover, for each $i \in [r]$ the set $S_i$ is non-empty, as the graph $G$ is prime.
Now we prove two more auxiliary claims.
\vskip1ex
\textbf{Claim 2.} \textit{For $0 \leq i < j \leq r$, every vertex in $S_i$ is adjacent to every vertex in $S_j$.}
\vskip1ex
\textit{Proof.} Assume that the claim is false, i.e. there exist two non-adjacent vertices $s_i \in S_i$
and $s_j \in S_j$. By Observation \ref{obs:distAdj} there exist two adjacent vertices $a,b \in U_j$ that
are distinguished by $s_j$. But then $s_i, s_j, a, b$ and any vertex in $N(s_i) \cap U_i$ induce
a forbidden $\overline{P_5}$, a contradiction.
\vskip1ex
\textbf{Claim 3.} \textit{For $i \in [r]$, no vertex in $U_i$ distinguishes two non-adjacent
vertices in $S_i$.}
\vskip1ex
\textit{Proof.} Assume that there exists a pair of non-adjacent vertices $x,y \in S_i$ that are
distinguished by a vertex $u_i \in U_i$. Let $j \in [r] \setminus \{ i \}$, and let $s_j \in S_j$
and $u_j \in U_j \setminus N(s_j)$. Then, since $s_j$ dominates $S_i$, we have that $u_j, x, y, s_j, u_i$
induce a forbidden $\overline{P_5}$, a contradiction.
\vskip1ex
We split further analysis into two cases.
\textit{Case 1}: there is at least one trivial component in $G \setminus S$, i.e. $k > r$.
For $i \in \{ r+1, \ldots, k \}$ we denote by $u_i$ the unique vertex of $U_i$.
Let $U = \{ u_{r+1}, \ldots, u_k \}$ and let $u^*$ be a vertex in $U$ with a minimal (under inclusion)
neighbourhood. We will show that $N(u^*)$ is a clique,
and hence is a clique separator in $G$.
By Claim 2, it suffices to show that $N(u^*) \cap S_i$ is a clique for each $i \in \{0, 1, \ldots, k\}$.
Suppose that for some $i$ the set $N(u^*) \cap S_i$ is not a clique.
Then, by Proposition~\ref{st:distNonadj}, there are two nonadjacent vertices $x,y \in N(u^*) \cap S_i$
distinguished by a vertex $z \in V \setminus (N(u^*) \cap S_i)$. It follows from Claims 2 and 3
that either $z \in S_i \setminus N(u^*)$ or $z \in U$. If $z \in S_i \setminus N(u^*)$, then
$u^*, x, y, z,$ and any vertex in $U_j$, $j \in [r] \setminus \{ i \}$ induce a forbidden
$\overline{P_5}$, a contradiction.
Hence, assume that none of the vertices in $S \setminus (N(u^*) \cap S_i)$
distinguishes two nonadjacent vertices in $N(u^*) \cap S_i$.
If $z \in U$, with $z$ being nonadjacent to $x$ and adjacent to $y$, then by the minimality
of $N(u^*)$
there is a vertex $s \in N(z)$ that is not adjacent to $u^*$. Since $N(z) \subseteq S$, vertex $s$
does not distinguish $x$ and $y$. But then $x, u^*, y, z, s$ induce either a $P_5$ (if $s$ is adjacent
neither to $x$ nor to $y$) or a $\overline{P_5}$ (if $s$ is adjacent to both $x$ and $y$), a contradiction.
\vskip1ex
\textit{Case 2}: there are no trivial components in $G \setminus S$, i.e. $k = r$.
First, observe that $|S_0| \leq 1$, since $G$ is prime and no vertex outside of $S_0$ distinguishes $S_0$
(which follows from the definition of $S_0$, Claim 2 and the fact that $k = r$). Further,
Claims 2 and 3 imply that for each $i \in [r]$ no vertex in $V \setminus S_i$ distinguishes two
nonadjacent vertices in $S_i$. Therefore, applying Proposition~\ref{st:distNonadj} we conclude that
$S_i$ is a clique. Hence $S = \bigcup_{i=0}^{r} S_i$ is a clique separator in $G$.
\end{proof}
\begin{lemma}\label{lem:perm}
Let $G$ be a $(P_5, \overline{P_5})$-free prime graph containing an induced copy of $2K_2$.
Then $G$ contains a permissible antisimplicial vertex.
\end{lemma}
\begin{proof}
By Lemma \ref{lem:atom} graph $G$ has a clique separator, and therefore it also
has a minimal clique separator $S$.
Let $C_1, \ldots, C_k$, $k \geq 2$, be connected components of $G-S$, and $U_i = V(C_i)$,
$i = 1, \ldots, k$.
Since $S$ is a minimal separator, every vertex in $S$ has at least one neighbour in each of the sets
$U_1, \ldots, U_k$.
By Claim 1 in the proof of Lemma~\ref{lem:atom}, any vertex in $S$ distinguishes at most one of
the sets $U_1, \ldots, U_k$, and therefore, the set $S$
partitions into subsets $S_0, S_1 \ldots, S_k$, where the vertices of $S_0$
dominate every member of $\{ U_1, \ldots, U_k \}$, and for each $i \in [k]$ the vertices
of $S_i$ distinguish $U_i$ and dominate $U_j$ for all $j$ different from $i$.
If $S_0 \neq \emptyset$, then any vertex in $S_0$ is adjacent to all the other vertices in the graph,
and therefore it is permissible and antisimplicial. Hence, without loss of generality, assume that
$S_0 = \emptyset$ and $S_1 \neq \emptyset$.
Let $s$ be a vertex in $S_1$ with a maximal (under inclusion) neighbourhood in $U_1$.
We will show that $s$ is antisimplicial and permissible.
Suppose that the graph induced by the antineighbourhood of $s$ contains a
connected component $C$ with at least two vertices. Since $G$ is prime, by Observation~\ref{obs:distAdj} it must
contain a vertex $p$ outside of $C$ distinguishing two adjacent vertices $q$ and $t$ in $C$.
Then $p$ does not belong to $N(s) \cap U_1$, since otherwise $q, t, p, s$ together with any
vertex in $U_2$ would induce a $P_5$. Therefore, $p$ belongs to $S_1$.
Since the set $N(s) \cap U_1$ is maximal, it contains a vertex $y$ nonadjacent to $p$.
But now $t, q, p, s, y$ induce either a $P_5$ or its complement, as $y$ does not distinguish
$q$ and $t$.
This contradiction shows that every component in the graph induced by the antineighbourhood
of $s$ is trivial, i.e. $s$ is antisimplicial.
Assume now that $s$ is not permissible, i.e. there exists an independent dominating set $I$ in
$G - s$ that does not contain a neighbour of $s$. Since $s$ dominates $U_2 \cup \ldots \cup U_k$,
the set $I$ is a subset of $U_1 \setminus N(s)$. But then $I$ is not dominating, since no vertex of $U_2$ has a neighbour in $I$,
This contradiction completes the proof of the lemma.
\end{proof}
\begin{lemma}\label{lemm:anti}
The class of prime $(P_5,\overline{P_5})$-free graphs has the antineighborhood
property.
\end{lemma}
\begin{proof}
Let ${\cal F}$ be the class of $(2K_2,\overline{P_5})$-free graphs (this is a subclass of
$(P_5,\overline{P_5})$-free graphs, since $2K_2$ is an induced subgraph of $P_5$).
Clearly, graphs in ${\cal F}$ can be recognized in polynomial time.
Moreover, the WID problem can be solved in polynomial time for graphs in ${\cal F}$,
because the problem is polynomially solvable on $2K_2$-free graphs (according to \cite{BY},
these graphs have polynomially many maximal independent sets).
If a prime $(P_5,\overline{P_5})$-free graph $G=(V,E)$ does not belong to ${\cal F}$, then
by Lemma \ref{lem:perm} it contains a permissible vertex $v$ whose antineighbourhood is
an independent set, and therefore, $G - N(v) \in {\cal F}$.
It remains to check that a permissible antisimplicial vertex in $G$ can be found in polynomial time.
It follows from the proof of Lemma~\ref{lem:perm} that in a minimal clique separator of $G$
any vertex with a maximal neighbourhood is permissible and antisimplicial. A minimal
clique separator in a graph can be found in polynomial time \cite{Whitesides1981}, and therefore
the desired vertex can also be computed efficiently.
\end{proof}
Now the main result of the section follows from Theorem~\ref{theo:decomposition} and Lemma~\ref{lemm:anti}.
\begin{theorem}\label{thm:house}
The WID problem is polynomial-time solvable in the class of $(P_5,\overline{P_5})$-free graphs.
\end{theorem}
\section{WID in $(P_5, \overline{P_3+P_2)}$-free graphs}
\label{sec:new}
To solve the problem for $(P_5, \overline{P_3+P_2)}$-free graphs, let us introduce the following notation:
for an arbitrary graph $F$, we denote by $F^*$ the graph obtained from $F$ by adding three new vertices, say $b,c,d$,
such that $b$ dominates (adjacent to each vertex of) $F$, while $c$ is adjacent to $b$ and $d$ only (see Figure~\ref{fig:coP5star} for an illustration in the case $F=\overline{P}_5$).
The importance of this notation is due to the following result proved in \cite{LozMosPur2015}.
\begin{theorem}\label{th:P5Fstar}
Let $F$ be any connected graph. If the WID problem can be solved in polynomial time for
$(P_5,F)$-free graphs, then this problem can also be solved in polynomial time for $(P_5, F^*)$-free
graphs.
\end{theorem}
This result together with Theorem~\ref{thm:house} leads to the following conclusion.
\begin{corollary}\label{cor:star}
The WID problem is polynomial-time solvable in the class of $(P_5,\overline{P_5}^*)$-free graphs.
\end{corollary}
To solve the problem for $(P_5, \overline{P_3+P_2)}$-free graphs, in this section we reduce it to $(P_5, \overline{P_3+P_2}, \compPfiveStar)$-free graphs, where the problem is solvable in polynomial time by Corollary~\ref{cor:star}.
Let $G$ be a $(P_5, \overline{P_3+P_2)}$-free graph containing a copy of $\overline{P_5}^*$ induced by vertices
$a_1, a_2, a_3, a_4, a_5, b, c, d$, as shown in Figure~\ref{fig:coP5star}.
\begin{figure}
\caption{The graph $\overline{P_5}
\label{fig:coP5star}
\end{figure}
\noindent
Denote by $U$ the set of vertices in $G$ that have at
least one neighbour in $\{ a_1, a_2, a_3, a_4, a_5\}$, that is, $U = N(a_1) \cup \ldots \cup N(a_5)$.
In particular, $\{ a_1, a_2, a_3, a_4, a_5,b\}$ is a subset of $U$.
We assume that
\begin{itemize}
\item[(**)] the copy of $\overline{P_5}^*$ in $G$ is chosen in such a way that $U$ has the minimum
number of elements.
\end{itemize}
Now we prove several auxiliary results about the structure of $G$.
\begin{proposition}\label{prop:U2sep}
If a vertex $x \in U$ has a neighbour $y$ outside of $U$, then $x$ is adjacent to each of the vertices
$a_1, a_2, a_3, a_4$.
\end{proposition}
\begin{proof}
Let $A = \{ a_1, a_2, a_3, a_4 \}$.
Note that if $x$ is adjacent to $a_5$, then it must be adjacent to at least one vertex in $A$,
since otherwise a forbidden $P_5$ arises.
If $x$ is adjacent to exactly one or to exactly two adjacent vertices in $A$, then $\{ x,y \} \cup A$
induces a subgraph containing a forbidden $P_5$.
If $x$ is adjacent to exactly two non-adjacent vertices in $A$, say $a_1$ and $a_3$, then $x$
must be adjacent to $a_5$, since otherwise $y,x,a_3,a_2,a_5$ induce a $P_5$. But this is
impossible, since in this case $x,a_1,a_2,a_3,a_5$ induce a $\overline{P_3+P_2}$.
Finally, if $x$ has exactly three neighbours in $A$, then $\{x\} \cup A$ induces a forbidden
$\overline{P_3+P_2}$. Therefore, $x$ must be adjacent to every vertex in $A$.
\end{proof}
\noindent
Taking into account Proposition~\ref{prop:U2sep}, we partition the set $U$ into three subsets as follows:
\begin{itemize}
\item[$U_1$] consists of the vertices of $U$ that are adjacent to each of the vertices $a_1, a_2, a_3, a_4$,
and have at least one neighbour outside of $U$;
\item[$U_2$] consists of the vertices of $U$ that are adjacent to each of the vertices $a_1, a_2, a_3, a_4$,
but have no neighbours outside of $U$;
\item[$U_3$]$= U \setminus (U_1 \cup U_2)$.
\end{itemize}
Notice that $U_1$ is non-empty as it contains $b$. Also $\{ a_1, a_2, a_3, a_4, a_5 \} \subseteq U_3$,
and no vertex in $U_3$ has a neighbour outside of $U$.
\begin{proposition}\label{prop:U2clique}
$U_1$ is a clique in $G$.
\end{proposition}
\begin{proof}
Suppose to the contrary that $U_1$ contains two non-adjacent vertices $x_1$ and $x_2$. Also, let $y_1$ and $y_2$ be neighbours of $x_1$ and $x_2$ outside of $U$,
respectively. Vertex $y_1$ is not adjacent to $x_2$, since otherwise $x_1,x_2,a_1,a_2,y_1$
induce a $\overline{P_3+P_2}$. Similarly, $y_2$ is not adjacent to $x_1$. Hence $y_1 \neq y_2$,
and therefore, to avoid a copy of $P_5$ induced by $y_1,x_1,a_1,x_2,y_2$, vertices $y_1$ and
$y_2$ must be adjacent. For the same reason, $a_5$ should be adjacent to both $x_1$ and $x_2$.
But then $x_1,x_2,a_3,a_4,a_5$ induce a copy of the forbidden $\overline{P_3+P_2}$, a contradiction.
\end{proof}
\begin{proposition}\label{prop:coP5starFree}
The graph $G[U_2 \cup U_3]$ is $\overline{P_5}^*$-free.
\end{proposition}
\begin{proof}
Suppose to the contrary that $G[U_2 \cup U_3]$ contains vertices $a_1',a_2',a_3',a_4',a_5',b',c',d'$
inducing a $\overline{P_5}^*$ (similarly to Figure~\ref{fig:coP5star}).
Since no vertex in $U_2 \cup U_3$ has a neighbour outside of $U$ in $G$, and $c',d'$ are not adjacent
to any of the vertices $a_1',a_2',a_3',a_4',a_5'$, we conclude that
$|N(a_1') \cup \ldots \cup N(a_5')| \leq |U|-2$, which contradicts the minimality of $|U|$.
\end{proof}
Now we describe a reduction from the graph $G$ with a weight function $w$ to a graph $G'$
with a weight function $w'$, where $|V(G')| \leq |V(G)|-4$, $G'$ is $(P_5, \overline{P_3+P_2)}$-free, and
$id_w(G) = id_{w'}(G')$.
First, we define $G'$ as the graph obtained from $G$ by
\begin{enumerate}
\item removing the vertices of $U_3$;
\item adding edges between any two non-adjacent vertices in $U_1 \cup U_2$;
\item adding a new vertex $u$ adjacent to every vertex in $U_1 \cup U_2$.
\end{enumerate}
Clearly, $|V(G')| \leq |V(G)|-4$, as the set $U_3$ of the removed vertices contains at least 5 elements
and we add exactly one new vertex $u$.
In the next proposition, we show that the above reduction does not produce any of the forbidden subgraphs.
\begin{proposition}\label{prop:GprimeForb}
The graph $G'$ is $(P_5, \overline{P_3+P_2)}$-free.
\end{proposition}
\begin{proof}
Note that the graph $G' - (U_2 \cup \{u\})$ is isomorphic to $G - (U_2 \cup U_3)$, and therefore it contains
no $P_5$ or $\overline{P_3+P_2}$ as an induced subgraph.
Hence, if $G'$ contains a forbidden subgraph, then at least one of the vertices of this subgraph
should lie in $U_2 \cup \{ u \}$.
By construction of $G'$ and the definition of $U_2$, the set $U_2 \cup \{ u \}$ is a clique, and every vertex
in this set is simplicial in $G'$.
Therefore, no vertex of $U_2 \cup \{ u \}$ can be a part of an induced copy of $\overline{P_3+P_2}$.
Also, $U_2 \cup \{ u \}$ can contain at most one vertex of an induced copy of $P_5$,
and if $U_2 \cup \{ u \}$ contains such a vertex, it must be a degree-one vertex of the $P_5$.
Suppose to the contrary that $G'$ contains a copy of $P_5$ induced by $v_1,v_2,v_3,v_4,v_5$
with $v_1 \in U_2 \cup \{ u \}$ and $\{ v_2,v_3,v_4,v_5 \} \subseteq V(G') \setminus (U_2 \cup \{ u \})$.
But then $a_1,v_2,v_3,v_4,v_5$ induce a forbidden $P_5$ in $G$, a contradiction.
\end{proof}
\noindent
Now we define a weight function $w'$ on the vertex set of $G'$ as follows:
\begin{enumerate}
\item $w'(x) = w(x)$, for every $x \in V(G') \setminus (\{ u \} \cup U_1 \cup U_2)$;
\item $w'(u) = id_w(G[U_3])$;
\item $w'(x) = w(x) + id_w(G[U \setminus N[x]])$, for every $x \in U_1$;
\item $w'(x) = w(x) + id_w(G[U \setminus (U_1 \cup N[x])])$, for every $x \in U_2$.
\end{enumerate}
\begin{lemma}\label{lem:polyGprime}
Given a weighted graph $(G,w)$, the weighted graph $(G',w')$ can be constructed in polynomial time.
\end{lemma}
\begin{proof}
To construct $G'$ we need to find in $G$ an induced copy of $\overline{P_5}^*$ that minimizes $|U|$.
Clearly, this can be done in polynomial time.
To show that $w'$ can be computed in polynomial time we observe that each of the graphs
$G[U_3]$,
$G[U \setminus (U_1 \cup N[x])]$ for $x \in U_2$, and
$G[U \setminus N[x]]$ for $x \in U_1$ is an induced subgraph of $G[U_2 \cup U_3]$.
This observation together with Proposition~\ref{prop:coP5starFree} and Corollary~\ref{cor:star}
imply the desired conclusion and finish the proof of the lemma.
\end{proof}
Now let us show that $id_w(G) = id_w'(G)$.
For this, we will need two auxiliary propositions.
\begin{proposition}\label{prop:U3}
Any independent dominating set in $G[U_3]$ dominates $U_1 \cup U_2$.
\end{proposition}
\begin{proof}
Let $A = \{ a_1, a_2, a_3, a_4 \}$, and
let $I$ be an independent dominating set in $G[U_3]$.
If $I$ contains at least one of the vertices from $A$, then $I$ dominates $U_1 \cup U_2$,
so we assume that $I \subseteq U_3 \setminus A$. Note that a vertex $x \in U_3 \setminus A$
has at most two neighbours in $A$. Indeed, $x$ cannot have four neighbours by the definition of $U_3$,
and it cannot have three neighbours, since otherwise $\{x\} \cup A$ induces a forbidden
$\overline{P_3+P_2}$.
Now, if $I$ contains a vertex $x \in U_3 \setminus A$ that is adjacent to
$a_1$ and $a_3$, then $I$ dominates $U_1 \cup U_2$, since otherwise $x$ together with
$a_1, a_2, a_3$ and a non-neighbour of $x$ in $U_1 \cup U_2$ induce a forbidden
$\overline{P_3+P_2}$.
Assume that $I$ contains none of the above vertices. Then there exist vertices
$x,y \in I$ such that $x$ is adjacent to $a_1$ and non-adjacent to $a_3$, and $y$
is adjacent to $a_3$ and non-adjacent to $a_1$. If $I$ does not dominate $U_1 \cup U_2$,
then there exists a vertex $z \in U_1 \cup U_2$ that is adjacent neither to $x$ nor to $y$.
But then $x,a_1, z, a_3, y$ induce a forbidden $P_5$.
\end{proof}
\begin{proposition}\label{prop:U1}
For every vertex $x \in U_2$, any independent dominating set in the graph $G - U$ dominates
$U_1 \setminus N(x)$.
\end{proposition}
\begin{proof}
Suppose to the contrary that there exists an independent dominating set $I$ in the graph $G-U$
that does not dominate a vertex $y \in U_1 \setminus N(x)$.
By the definition of $U_1$, vertex $y$ has a neighbour $z$ in $V(G) \setminus U$.
Since $I$ is dominating in $G-U$, there exists a vertex $v \in I$ that is adjacent to $z$.
But then $v,z,y,a_1,x$ induce a forbidden $P_5$, a contradiction.
\end{proof}
\begin{lemma}\label{lem:weight}
For any weighted graph $(G,w)$, we have $id_w(G) = id_{w'}(G')$.
\end{lemma}
\begin{proof}
First, we show that $id_w(G) \geq id_{w'}(G')$.
Let $I$ be an independent dominating set of the minimum weight in $G$. We distinguish between
the following three cases:
\begin{enumerate}
\item $I \cap U_1 \neq \emptyset$. \\
By Propositions~\ref{prop:U2sep} and \ref{prop:U2clique}, the set $U_1$ is a clique separating
$V(G) \setminus U$ from $U \setminus U_1$. Therefore, $I$ has only one element in $U_1$,
say $x$, and:
$$
id_w(G) = w(x) + id_w(G[U \setminus N[x]]) + id_w(G - (U \cup N[x])).
$$
Consequently
$$
id_w(G) = w'(x) + id_{w'}(G' - N[x]) \geq id_{w'}(G').
$$
\item $I \cap U_1 = \emptyset$ and $I \cap U_2 \neq \emptyset$. \\
Let $x \in I \cap U_2$. Then using Proposition~\ref{prop:U1}
$$
id_w(G) = w(x) + id_w(G[U \setminus (U_1 \cup N[x])]) + id_w(G - U) =
w'(x) + id_{w'}(G' - N[x]) \geq id_{w'}(G').
$$
\item $I \cap (U_2 \cup U_1) = \emptyset$. \\
In this case, taking into account Proposition \ref{prop:U3}, we conclude that
$$
id_w(G) = id_w(G[U_3]) + id_w(G - U) = w'(u) + id_{w'}(G' - N[u]) \geq id_{w'}(G').
$$
\end{enumerate}
Let us now prove the reverse inequality $id_w(G) \leq id_{w'}(G')$.
Let $I$ be an independent dominating set of the minimum weight in $G'$. Since $u$ does not have
neighbours outside of $U_1 \cup U_2$, and $\{ u \} \cup U_1 \cup U_2$ is a clique in $G'$, the set $I$
has exactly one element in $\{ u \} \cup U_1 \cup U_2$, which we denote by $x$.
Similarly to the first part of the proof, we consider three cases:
\begin{enumerate}
\item $x \in U_1$. \\
In this case
$$
id_{w'}(G') = w'(x) + id_{w'}(G' - N[x]) = w(x) + id_{w}(G[U \setminus N[x]]) +
id_w(G - (U \cup N[x])) \geq id_w(G).
$$
\item $x \in U_2$.\\
In this case, by Proposition~\ref{prop:U1},
$$
id_{w'}(G') = w'(x) + id_{w'}(G'-N[x]) =
w(x) + id_{w}(G[U \setminus (U_1 \cup N[x]) ]) + id_w(G-U) \geq id_w(G).
$$
\item $x = u$.\\
In this case, by Proposition~\ref{prop:U3},
$$
id_{w'}(G') = w'(x) + id_{w'}(G'-N[x]) = id_w(G[U_3]) + id_w(G-U) \geq id_w(G).
$$
\end{enumerate}
\end{proof}
\noindent
Now we are ready to prove the main result of this section.
\begin{theorem}
The WID problem is solvable in polynomial time for $(P_5, \overline{P_3+P_2)}$-free graphs.
\end{theorem}
\begin{proof}
Let $(G,w)$ be an $n$-vertex $(P_5, \overline{P_3+P_2)}$-free weighted graph.
If $G$ contains an induced copy of $\overline{P_5}^*$, then by Proposition~\ref{prop:GprimeForb}, and
Lemmas~\ref{lem:polyGprime} and~\ref{lem:weight}, the graph $(G,w)$ can be transformed in polynomial
time into a $(P_5, \overline{P_3+P_2)}$-free weighted graph $(G',w')$ with at most $n-4$ vertices such that
$id_w(G) = id_{w'}(G')$.
Repeating this procedure at most $\lfloor n/4 \rfloor$ times we obtain a $(P_5, \overline{P_3+P_2}, \compPfiveStar)$-free weighted
graph $(H,\supseteq_igma)$ such that $id_w(G) = id_{\supseteq_igma}(H)$.
By Corollary~\ref{cor:star} the WID problem for $(H,\supseteq_igma)$ can be solved in polynomial time.
Finally, it is not difficult to see that a polynomial-time procedure computing $id_w(G)$ can be easily transformed into
a polynomial-time algorithm finding an independent dominating set of weight $id_w(G)$.
\end{proof}
\section{Concluding remarks and open problems}
In this paper, we proved that \textsc{weighted independent domination} can be solved
in polynomial time for $(P_5,\overline{P}_5)$-free graphs and $(P_5, \overline{P_3+P_2)}$-free graphs.
A natural question to ask is whether these results can be extended to a class defined by one forbidden induced subgraph.
From the results in \cite{BL03} it follows that in the case of one forbidden induced subgraph $H$ the problem is solvable
in polynomial time {\it only if} $H$ is a linear forest, i.e. a graph every connected component of which is a path.
On the other hand, it is known that this necessary condition is not sufficient, since {\sc independent domination}
is NP-hard in the class of $2P_3$-free graphs. This follows from the fact that all sat-graphs are $2P_3$-free \cite{Zverovich06}.
In the case of a {\it disconnected} forbidden graph $H$, polynomial-time algorithms to solve {\sc weighted independent domination} are known only for $mP_2$-free graphs for any fixed value of $m$.
This follows from a polynomial bound on the number of maximal independent sets in these graphs \cite{BY}.
The unweighted version of the problem can also be solved for $P_2+P_3$-free graphs \cite{LozMosPur2015}.
However, for weighted graphs in this class the complexity status of the problem is unknown.
\begin{problem}
Determine the complexity status of {\sc weighted independent domination} in the class of $P_2+P_3$-free graphs.
\end{problem}
In the case of a {\it connected} forbidden graph $H$, i.e. in the case when $H=P_k$, the complexity status is known for $k\ge 7$
(as $P_7$ contains a $2P_3$) and for $k\le 4$ (as $P_4$-free graphs are precisely the cographs). Therefore,
the only open cases are $P_5$-free and $P_6$-free graphs.
As we mentioned in the introduction, the related problem of finding a maximum weight independent set (WIS) has been recently solved for $P_5$-free graphs \cite{P5}.
This result makes the class of $P_5$-free graphs of particular interest for {\sc weighted independent domination}
and we formally state it as an open problem.
\begin{problem}
Determine the complexity status of {\sc weighted independent domination} in the class of $P_5$-free graphs.
\end{problem}
We also mentioned earlier that a polynomial-time solution for WIS
in a hereditary class $\cal X$ does not necessarily imply the same conclusion for WID in $\cal X$.
However, in the reverse direction such examples are not known. We believe that such examples do not exist and
propose this idea as a conjecture.
\begin{conjecture}
If WID admits a polynomial-time solution in a hereditary class $\cal X$, then so does WIS.
\end{conjecture}
\end{document} |
\begin{document}
\title
{Counterexamples to a conjecture of Merker on 3-connected cubic planar graphs with a large cycle spectrum gap}
\author{
{\sc Carol T. ZAMFIRESCU\footnote{Department of Applied Mathematics, Computer Science and Statistics, Ghent University, Krijgslaan 281 - S9, 9000 Ghent, Belgium and Department of Mathematics, Babe\c{s}-Bolyai University, Cluj-Napoca, Roumania; e-mail address: \emph{[email protected]}}}}
\date{}
\maketitle
\begin{center}
\begin{minipage}{125mm}
{\bf Abstract.} Merker conjectured that if $k \ge 2$ is an integer and $G$ a 3-connected cubic planar graph of circumference at least $k$, then the set of cycle lengths of $G$ must contain at least one element of the interval $[k, 2k+2]$. We here prove that for every even integer $k \ge 6$ there is an infinite family of counterexamples.
{\bf Key words.} Cycles; Cycle spectrum; 3-connected; Cubic; Planar graphs
\textbf{MSC 2020.} 05C38, 05C10
\end{minipage}
\end{center}
\section{Introduction}
For a graph $G$, we denote by ${\cal C}(G)$ the set of lengths of cycles in $G$, i.e.\ its \emph{cycle spectrum}. The \emph{circumference} of $G$ is the length of a longest cycle in $G$. Merker~\cite{Me21} recently proved that for any non-negative integer $k$ every 3-connected cubic planar graph $G$ of circumference at least $k$ satisfies ${\cal C}(G) \cap [k, 2k+9] \ne \emptyset$. He conjectured that for any integer $k \ge 2$ and any 3-connected cubic planar graph $G$ of circumference at least $k$, we have ${\cal C}(G) \cap [k, 2k+2] \ne \emptyset$. We shall abbreviate this conjecture of Merker with ($\dagger$).
By Euler's formula, every cubic plane graph contains a face of length 3, 4, or 5, so ($\dagger$) holds for $k \in \{ 2, 3 \}$. Suppose ($\dagger$) is untrue for $k = 5$. Then there exists a 3-connected cubic plane graph $G$ of circumference at least~$5$ with ${\cal C}(G) \cap [5, 12] = \emptyset$. Any 3- or 4-cycle in $G$ must be the boundary of a face of $G$, and any two faces in $G$ of size 3 or 4 are disjoint since $5 \notin {\cal C}(G)$ and $6 \notin {\cal C}(G)$. We contract every triangle and every quadrilateral of $G$ to a vertex and obtain the graph $G'$. If we exclude 3- and 4-cycles, cycles in $G$ have length at least 13, so $G'$ is a planar 3-connected graph with no cycle of length less than 7 (as on any $\ell$-cycle $C$ of $G$, $C$ shares at most $\lfloor \ell/2 \rfloor$ edges with a 3- or 4-cycle), a contradiction. The argument for $k = 4$ is very similar (and simpler). This yields that ($\dagger$) holds for $k \in \{ 2, 3, 4, 5 \}$. However, we now show that for any even integer $k \ge 6$ there is an infinite family of counterexamples to ($\dagger$).
\section{Result}
\noindent \textbf{Theorem.} \emph{For any even integer $k \ge 6$ there exists an infinite family of $3$-connected cubic planar graphs of circumference at least $k$ whose cycle spectrum contains no element of $[k,2k+2]$.}
\noindent \emph{Proof.} Consider the graph $H$ depicted in Fig.~1. Its left-most and right-most parts should be identified in the obvious way, where the boundary cycles of the two faces incident only with pentagons (top and bottom of Fig.~1) may have any length of at least $2r + 8$ (this yields the advertised infinite family). The vertices of $H$ are either black or white, as illustrated in Fig.~1.
\begin{center}
\includegraphics[height=70mm]{fig1}\\[1mm]
Figure 1: The graph $H$.
\end{center}
Consider the operations $A$ and $B$ defined in Fig.~2. We shall call a \emph{rung} any edge depicted as a horizontal line-segment in Fig.~2. In each operation, we replace a cubic vertex with the plane graph $A_r$ and $B_r$ (in which we ignore the three dangling edges), respectively, where $r$ denotes the number of rungs.
\begin{center}
\includegraphics[height=36mm]{fig2}\\[1mm]
Figure 2: Operations $A$ (left-hand side) and $B$ (right-hand side).
\end{center}
Using operations $A$ and $B$, replace in $H$ each black vertex with a copy of $A_{r+2}$ and each white vertex with a copy of $B_r$, respecting the orientations given in Fig.~1 by the numbers 1, 2, 3. We obtain a planar graph $G$ that is clearly 3-connected and cubic. The circumference of $A_{r+2}$ and $B_r$ is $2r+5$. By construction, any cycle in $G$ of length greater than $2r+5$ has length at least $4r+15$, which is the length of the cycle bounding the face $F$ and also of the cycle bounding the face $F'$. Thus, for every $\ell \in \{ 2r+6, \dots, 4r+14 \}$, the graph $G$ contains no cycle of length $\ell$. Setting $k := 2r+6$, the proof is complete, since $G$ clearly has circumference at least $k$.
$\Box$
Merker proves in~\cite{Me21} that for every integer $k \ge 4$ there exists a 3-connected cubic planar graph $G$ of circumference at least~$k$ which satisfies ${\cal C}(G) \cap [k,2k+1] = \emptyset$. In order to illustrate the construction yielding this result, Merker provides an example in~\cite[Fig.~2]{Me21}, which we will call $G$. We point out that, despite indeed explaining the construction method, this graph $G$ is not well chosen: $G$ does not satisfy the conditions Merker himself sets out and in consequence, there exists no positive integer $k \le |V(G)|$ ($G$ is hamiltonian) such that ${\cal C}(G) \cap [k, 2k+1] = \emptyset$. However, his proof is correct, only that $n$ (as defined in Merker's proof) must be chosen large enough in relation to $k$, as he himself states.
\noindent \textbf{Acknowledgements.} I thank Nico Van Cleemput for comments which improved the presentation of the above results. My research was supported by a Postdoctoral Fellowship of the Research Foundation Flanders (FWO).
\end{document} |
\begin{document}
\title[Brauer group of the Moduli spaces of ${\rm PGL}_r({\mathbb
C})$--bundles]{Unramified Brauer group of the moduli spaces of ${\rm PGL}_r({\mathbb C})$--bundles over
curves}
\author[I. Biswas]{Indranil Biswas}
\address{School of Mathematics, Tata Institute of Fundamental
Research, Homi Bhabha Road, Bombay 400005, India}
\email{[email protected]}
\author[A. Hogadi]{Amit Hogadi}
\address{School of Mathematics, Tata Institute of Fundamental
Research, Homi Bhabha Road, Bombay 400005, India}
\email{[email protected]}
\author[Y. I. Holla]{Yogish I. Holla}
\address{School of Mathematics, Tata Institute of Fundamental
Research, Homi Bhabha Road, Bombay 400005, India}
\email{[email protected]}
\subjclass[2000]{14H60, 14E08, 14F22}
\keywords{Semistable projective bundle, moduli space,
rationality, Brauer group, Weil pairing}
\date{}
\begin{abstract}
Let $X$ be an irreducible smooth complex projective curve of
genus $g$, with $g\,\geq\, 2$. Let $N$ be a connected component of the moduli space of
semistable principal ${\rm PGL}_r(\mathbb C)$--bundles over $X$; it is a normal
unirational complex projective variety. We prove that the Brauer group of a desingularization of $N$ is
trivial.
\end{abstract}
\maketitle
\section{Introduction}
Let $X$ be an irreducible smooth complex projective curve, with
$\text{genus}(X)\, =\, g\, \geq\, 2$. For a fixed line bundle
$\mathcal L$ over $X$, let $M_X(r, {\mathcal L})$
be the coarse moduli space of semistable vector bundles over $X$ of rank
$r$ and determinant $\mathcal L$. It is a normal
unirational complex projective variety, and if $\text{degree}({\mathcal
L})$ is coprime to $r$, then $M_X(r, {\mathcal L})$ is known to
be rational \cite{Ne}, \cite{KS}. Apart from these coprime case, and
the single case of $g\,=\, r\, =\,\text{degree}(\mathcal L)\,=\,2$
when $M_X(r, {\mathcal L})\,=\, {\mathbb P}^3_{{\mathbb C}}$,
the rationality of $M_X(r, {\mathcal L})$
is an open question in every other case. See \cite{Ho} for
rationality of some other types of moduli spaces associated to $X$.
We consider the coarse moduli space $N_X(r,d)$ of
semistable principal ${\rm PGL}_r(\mathbb C)$--bundles of topological type $d$ over $X$. Recall that a ${\rm PGL}_r(\mathbb C)$ bundle $P/X$ is said to be of topological type $d$ if the associated ${\mathbb P}^{r-1}$-bundle is isomorphic to ${\mathbb P}{\rm roj}({\mathbb E})$ for some rank $r$ vector bundle $E$ whose degree is congruent to $d$ modulo $r$.
This $N_X(r,d)$ is an irreducible normal unirational
complex projective variety. This paper is a sequel to \cite{BHH}, where we investigate the Brauer group of
desginularization of moduli spaces attached to curves. This Brauer group is a birational invariant of the space and its vanishing is a necessary condition for the space involved to be rational.
In this paper we prove that the Brauer group of a desingularization
of $N_X(r,d)$ is zero (see Theorem \ref{theorem}).
When $g\,=\, 2$, the moduli space $N_X(2,0)$ is a
quotient of ${\mathbb P}^3_{{\mathbb C}}$ by a faithful action of the
abelian group $({\mathbb Z}/2{\mathbb Z})^4$. In this special case it follows the quotient is rational.
\section{Preliminaries}\label{pril}
We continue with the above set--up and notation.
Let $N_X(r,d)$ denote the coarse moduli space of S--equivalence
classes of all semistable principal
$\text{PGL}_r(\mathbb C)$--bundles of topological type $d$ over $X$. For notational
convenience, $N_X(r,d)$ will also be denoted by $N$.
Let ${M}_X(r,{\mathcal L}_X)$ be the coarse moduli space of
S-equivalence classes of semistable vector bundles over $X$ of
rank $r$ and determinant ${\mathcal L}_X$.
Let ${\mathbb G}amma$ be the group of all isomorphism classes
of algebraic line bundles $\tau$ over $X$ such that
$\tau^{\otimes r}\, =\, {\mathcal O}_X$. This group ${\mathbb G}amma$
has the following natural action on ${M}_X(r,{\mathcal L}_X)$:
the action of any $\tau\, \in\, {\mathbb G}amma$ sends any $E\, \in\,
{M}_X(r,{\mathcal L}_X)$ to $E\, \otimes \, \tau$. The moduli
space $N$ is identified with the quotient variety
${M}_X(r,{\mathcal L}_X)/{\mathbb G}amma$. Let
\begin{equation}\label{4f}
f\,:\,{M}_X(r,{\mathcal L}_X)\, \longrightarrow\,
{M}_X(r,{\mathcal L}_X)/{\mathbb G}amma\,=\, N
\end{equation}
be the quotient morphism.
For notational convenience, the moduli space $M_X(r,{\mathcal
O}_X)$ will also be denoted by $M_{{\mathcal L}_X}$.
Let
\begin{equation}\label{ress}
M_{{\mathcal L}_X}^{\rm st}\,\subset\, M_{{\mathcal L}_X}
~\, ~ \text{ and }~\, ~ N^{\rm st}\,\subset\, N
\end{equation}
be the loci of stable bundles. The above action of
${\mathbb G}amma$ on $M_{{\mathcal O}_X}$
preserves $M_{{\mathcal O}_X}^{\rm st}$, and
$$
f(M_{{\mathcal O}_X}^{\rm st})\,=\, N^{\rm st}\, .
$$
\section{The action of ${\mathbb G}amma$}
Consider the action of ${\mathbb G}amma$ on $M_{{\mathcal O}_X}$ defined
in Section \ref{pril}.
For any primitive $\tau\, \in\, {\mathbb G}amma$, i.e. an element of order $r$, let
\begin{equation}\label{is}
M^\tau_{{\mathcal O}_X}\, =\, \{E\, \in\, M_{{\mathcal O}_X}\,
\mid\, E\otimes \tau\, =\, E\}\, \subset\, M_{{\mathcal O}_X}
\end{equation}
be the fixed point locus.
Take any nontrivial line bundle $\tau\, \in\, {\mathbb G}amma$ of order $r$.
Let
\begin{equation}\label{res-phi}
\phi\, :\, Y \, \longrightarrow\, X
\end{equation}
be the \'etale cyclic covering of degree $r$ given by $\tau$. We
recall the construction of $Y$ as the spectral cover associated to the equation $\tau ^r \cong {\mathcal O}_X$.
Let
$$
\beta\, :\, Y\, \longrightarrow\, Y
$$
be a nontrivial generator of the Galois group
$\text{Gal}(\phi)\,=\, {\mathbb Z}/r {\mathbb Z}$ of the
covering $\phi$. The homomorphism
$\xi\, \longmapsto\, \beta^*\xi$ defines an action of
$\text{Gal}(\phi)$ on $\text{Pic}^d(Y)$ for any $d$.
Let
\begin{equation}\label{pull-back}
\phi^*:\, {\rm Pic}^0(X) \, \longrightarrow \, {\rm Pic}^0(Y)
\end{equation}
be the pullback homomorphism $L\, \longmapsto\, \phi^*L$. Let
$K$ denote the kernel of $\phi^*$; it
is a group of order $r$ generated by $\tau$. Let
\begin{equation}\label{norm0}
{\rm Nm}: \, {\rm Pic}^d (Y) \, \longrightarrow\, {\rm Pic}^d(X)
\end{equation}
and
\begin{equation}\label{norm}
{\rm N}: \, {\rm Pic}^d (Y) \, \longrightarrow\, {\rm Pic}^d(X)
\end{equation}
be the norm homomorphism and the twisted norm morphism. We recall that ${\rm Nm}$ takes a line bundle $\xi$ to the descent of $\otimes (\beta^{*i}{\xi})$ and
${\rm N}$ sends a line bundle $\xi$ to ${\rm Nm}(\xi)\otimes \tau ^{(r(r-1)/2}$.
The group ${\mathbb G}amma$ has a natural action on ${\rm Pic}^d (Y)$; any
$\sigma\, \in\, {\mathbb G}amma$ acts as the automorphism $\xi\,
\longmapsto\, \xi\otimes \phi^*\sigma$. Therefore, $\phi^*$
in \eqref{pull-back} is ${\mathbb G}amma$--equivariant, and the
kernel $K$ acts trivially on ${\rm Pic}^d(Y)$. The morphism
${\rm N}$ in \eqref{norm} factors through the quotient morphism
${\rm Pic}^d(Y)\,\longrightarrow\,{\rm Pic}^d(Y)/{\mathbb G}amma$. The action of
${\mathbb G}amma$ on ${\rm Pic}^d (Y)$ clearly commutes with the action of
$\text{Gal}(\phi)$ defined earlier.
Let
\begin{equation}\label{recu}
{\mathcal U}_{{\mathcal L}_X}\, :=\,{\rm N}^{-1}({\mathcal L}_X)\setminus
({\rm N}^{-1}({\mathcal L}_X))^{{\rm Gal}(\phi)}
\, \subset \, {\rm Nm}^{-1}({\mathcal L}_X)
\end{equation}
be the complement of the fixed point locus for the
action of $\text{Gal}(\phi)$. It is a
${\mathbb G}amma$--invariant open subscheme.
Now we state a well-known result (cf. \cite[Lemma 3.4]{NR}).
\begin{lem}\label{BNR}
Take any primitive line bundle $\tau\, \in\, {\mathbb G}amma$.
The reduced closed subscheme $$(M^{\rm st}_{{\mathcal
L}_X})^\tau\,:=\,M^{\rm
st}_{{\mathcal L}_X}\cap M^{\tau}_{{\mathcal L}_X}\,
\subset\, M^{\rm st}_{{\mathcal L}_X}$$
(see \eqref{is} and \eqref{ress}) is
${\mathbb G}amma$--equivariantly isomorphic to the quotient scheme
$$
{\mathcal U}_{{\mathcal L}_X}/{\rm Gal}(\phi)\, .
$$
\end{lem}
\begin{lem}\label{HP}
The norm map as defined in \eqref{norm} is surjective, and
there is a bijection of the set of connected components
$\pi_0({\rm N}^{-1}({\mathcal L}_X))$ with the Cartier dual
$K^{\vee}\, :=\, {\rm Hom}(K,\, {\mathbb C}^*)\,=\,
{\mathbb Z}/r\mathbb Z$, where $K\,:=\, {\rm kernel}(\phi^*)\,
=\, {\mathbb Z}/r{\mathbb Z}$.
\end{lem}
Lemma \ref{HP} is proved in \cite{NR} (see \cite[Proposition
3.5]{NR}).
Let $V_0$ be the connected components of ${\rm N}^{-1}(
{\mathcal O}_X)$, with ${\mathcal O}_Y\,\in\, V_0$. Since
${\rm Nm}^{-1}({\mathcal O}_X)$ is smooth, both $V_0$.
is irreducible.
\begin{lem}\label{gal}
The action of ${\rm Gal}(\phi)$ on ${\rm
N}^{-1}({\mathcal O}_X)$ preserves the connected component $V_0$.
For the action of ${\rm Gal}(\phi)$ on ${\rm
N}^{-1}({\mathcal L}_X)$ the quotient ${\rm
N}^{-1}({\mathcal L}_X)/{\rm Gal}(\phi)$ has exactly $(r,d)$ components which are smooth, here $(r,d)$ is the greatest common divisor of $r$ and $d$.
\end{lem}
\begin{proof}
The point ${\mathcal O}_Y\, \in\, \text{Pic}^0(Y)$ is
fixed by ${\rm Gal}(\phi)$; hence $V_0$ is fixed by
${\rm Gal}(\phi)$. Therefore, the other component, namely $V_1$, is
also fixed by ${\rm Gal}(\phi)$. See \cite{NR}, Proposition 3.5 for the proof of the second statement.
\end{proof}
\begin{lem}\label{nss} Let $r=2$.
The set of all points in the complement $M_{{\mathcal O}_X}\setminus
M^{\rm st}_{{\mathcal O}_X}$ (see \eqref{ress}) fixed by $\tau$ is finite.
\end{lem}
\begin{proof}
Take any point $x\,\in\, M_{{\mathcal O}_X}\setminus M^{\rm
st}_{{\mathcal O}_X}$. Let $E\,= \, L\oplus L^*$, with $L\,
\in\, \text{Pic}^0(X)$, be the unique
polystable vector bundle representing
the point $x\,\in\, M_{{\mathcal O}_X}$. The action of $\tau$
takes the point $x$ to the point represented by the polystable
vector bundle $(L\otimes
\tau) \oplus (L^*\otimes\tau)$.
Assume that $\tau\cdot x\,=\,x$. Then
the two vector bundles $L\oplus L^*$ and $(L\otimes \tau) \oplus
(L^*\otimes\tau)$ are isomorphic. This implies that
\begin{equation} \label{equal}
L\otimes\tau \, \cong \, L^*
\end{equation}
(recall that $\tau$ is nontrivial; so $L\,\not=\, L\otimes\tau$). From
\eqref{equal} it follows that $L^{\otimes 2}\,=\, (L^{\otimes 2})^*$.
Consequently, isomorphism classes of all line bundles $L\, \in\,
\text{Pic}^0(X)$ satisfying \eqref{equal}, for a given
$\tau$, is a finite subset. Therefore, there
are only finitely many points of $M_{{\mathcal O}_X}\setminus M^{\rm
st}_{{\mathcal O}_X}$ that are fixed by $\tau$.
\end{proof}
\begin{rmk}\label{g2}
{\rm When genus of $X$ equals $2$, ${\rm dim}({\rm Pic}^0(Y))=3$. It follows from Lemma \ref{BNR},
that $(M^{st}_{{\mathcal O}_X})^{\tau}$ is one dimensional and hence Lemma \ref{nss} implies that
$M_{{\mathcal O}_X}^{\tau}$ is of codimension two in $M_{{\mathcal O}_X} \cong {\mathbb P}^3_{{\mathbb C}}$.
}
\end{rmk}
Let $\sigma\, \in {\mathbb G}amma$ be another primitve element such that $\sigma$ and $\tau$ are ${\mathbb Z}/r{\mathbb Z}$ linearly independent. The subgroup of ${\mathbb G}amma$ generated $\sigma$ and $\tau$ will
be denoted by $A$. So $A$ is isomorphic to
$({\mathbb Z}/r{\mathbb Z})^{\oplus 2}$.
We note that ${\mathbb G}amma\subset {\rm Pic}(X)$ is identified with
$H^1(X,\, {\mathbb Z}/r{\mathbb Z})\subset H^1(X,{\mathbb G}_m)$ under the
natural inclusion. Let
\begin{equation}\label{rese}
e\, :\, {\mathbb G}amma\otimes {\mathbb G}amma\, \longrightarrow\, {\mathbb Z}/r\mathbb Z
\end{equation}
be the pairing given by the cup product
$$
H^1(X,\, {\mathbb Z}/r{\mathbb Z})\otimes H^1(X,\, {\mathbb Z}/r{\mathbb Z})
\, \stackrel{\cup}{\longrightarrow}\,
H^2(X,\, {\mathbb Z}/r{\mathbb Z})\,=\, {\mathbb Z}/
r\mathbb Z\, .
$$
It is known that this $e$ coincides with the Weil pairing
(see \cite[p. 183]{Mu}).
\begin{prop}\label{fixed-points}
Let $\sigma$ and $\tau$ be two primitive elements of ${\mathbb G}amma$ such that they generate a subgroup $A=({\mathbb Z}/r{\mathbb Z})^{\oplus 2}$.
If the pairing $e(\sigma\,, \,\tau)\, =\, 0$ then there exists a nonempty closed irreducible $A$--invariant subset of
$M_{{\mathcal O}_X}^{\rm st}$ which is fixed pointwise by $\tau$.
\end{prop}
\begin{proof}
By \cite{BP2}, Proposition 4.5, it follows that under the condition $e(\sigma, \tau)=0$ there is a stable bundle $E$ of rank $r$ and determinant ${\mathcal O}_X$ such that
$E\otimes \sigma=E\otimes \tau=E$.
The condition $E\otimes \tau=E$ implies the existence of a line bundle $\xi \in {\rm N}^{-1}({\mathcal O}_X)$ such that $ \phi _*(\xi)=E$.
The condition $E\otimes \sigma =E$ implies that there is a $\beta \in \text{Gal}(\phi)$ such that $\xi \otimes \phi ^*\sigma= \beta ^*\xi$.
Hence $\phi^*\sigma = (\beta^*\xi) \otimes \xi^{-1}$ lies in $V_0$, because $\beta^*\xi$ and $\xi^{-1}$ lie in the
same component (see both parts of the Lemma \ref{gal}).
One observes that ${\rm N}^{-1}({\mathcal L}_X)$ is ${\mathbb G}amma$ equivariantly isomorphic to the translate $L \cdot {\rm Nm}^{-1}({\mathcal O}_X)$ by any line bundle $L$ such that $N(L)={\mathcal L}_X$. This along with the above fact that $\phi^*\sigma \in V_0$ implies that the translation by $\phi^*\sigma$ preserves the connected
components of ${\rm N}^{-1}({\mathcal L}_X)$.
Hence we conclude that any connected component of the
quotient ${\mathcal U}_{{\mathcal L}_X}/{\rm Gal}(\phi)\, \subset\, (M^{\rm st}_{{\mathcal O}_X})^\tau$ is a
closed irreducible $A$-invariant subscheme of $M_{{\mathcal O}_X}^{\rm st}$
which is fixed pointwise by $\tau$.
This completes the proof of the proposition.
\end{proof}
\section{Brauer group of a desingularization of $N$}
In this section we identify the second cohomology $H^2({\mathbb G}amma,\,{\mathbb C}^*)$ with the space of alternating
bi-multiplicative maps from ${\mathbb G}amma$ to ${\mathbb C}^*$ (see
\cite[p. 215, Proposition 4.3]{Ra}); the group $H^2({\mathbb G}amma,\,
{\mathbb C}^*)$ is isomorphic to the dual of the second exterior
power of $({\mathbb Z}/2{\mathbb Z})^{2g}$.
Recall that under the identification of ${\mathbb G}amma$ with $H^1(X, {\mathbb Z}/r{\mathbb Z})$, the Weil pairing
coincides with the intersection pairing. Let $\{a_1,b_1,\cdots a_g,b_g\}$ be a symplectic basis for $H^1(X, {\mathbb Z}/r{\mathbb Z})$.
in other words we have $e(a_i,a_j)=0=e(b_i,b_j)$ for all $i$ and $j$, and $e(a_i,b_j)=\delta _{i,j}$.
Let $G\, \subset\, H^2({\mathbb G}amma,\,{\mathbb C}^*)$ be defined by
$$
G\,:= \, \{ b\, \in\, H^2({\mathbb G}amma,\,{\mathbb C}^*)\, \mid \,
e(\sigma_1\, ,\,\sigma_2)\, =\, 0\, \Rightarrow\,
b(\sigma_1\, ,\,\sigma_2)\,= \,0\}\, .
$$
Let $H$ be the subgroup of $G$ of order two generated by the Weil pairing $e$.
\begin{lem}\label{mistake}
The group $G$ coincides with the subgroup $H$.
\end{lem}
\begin{proof}
Fix an $i$ and $j$ such that $i\neq j$ then one checks that $e(a_i+b_j,a_j-b_i)=e(a_j,b_j)-e(a_i,b_i)=0$ hence if $f \in G$
then $0=f(a_i+b_j,a_j-b_i)=f(a_j,b_j)-f(a_i,b_i)$. This implies that $f$ is a multiple of $e$.
\end{proof}
Our main theorem is the following.
\begin{thm}\label{theorem}
Let ${\widehat N}$ be a desingularization of the moduli space $N$. Then the Brauer group
${\rm Br}({\widehat N})=0$
\end{thm}
\begin{proof} We first assume that either $g\,\geq \,3$ or when $g=2$ rank $r>2$. The case of $g\,=\,2$ and rank $r=2$
will be treated separately.
It is enough to prove the theorem for some desingularization $\widehat{N}$ of
$N$ because the Brauer group is a birational invariant for the
smooth projective varieties. We choose a ${\mathbb G}amma$--equivariant desingularization
\begin{equation}\label{p}
p\,:\, {\widetilde M}_{{\mathcal O}_X}\,\longrightarrow
\,M_{{\mathcal O}_X}
\end{equation}
which is an isomorphism over $M^{\rm st}$; so ${\widetilde
M}_{{\mathcal L}_X}$ is equipped with an action of ${\mathbb G}amma$
given by the action of ${\mathbb G}amma$ on $M_{{\mathcal L}_X}$. Define
$$
{\widetilde N}\, :=\, {\widetilde M}_{{\mathcal L}_X}/{\mathbb G}amma\, .
$$
Let
$$
{\widehat N}\, \longrightarrow\, {\widetilde N}
$$
be a desingularization of ${\widetilde N}$ which is an isomorphism over
the smooth locus. So ${\widehat
N}$ is also a desingularization of $N$.
A stable principal $\text{PGL}_r({\mathbb C})$--bundle $E$ on $X$
is called \textit{regularly stable} if
$$
\text{Aut}(E)\,=\, e
$$
(by $\text{Aut}(E)$ we denote the automorphisms of the
principal bundle $E$ over the identity map of $X$).
It is known that the locus of regularly stable bundles in $N$,
which we will denote by $N^{\rm rst}$, coincides with the smooth
locus of $N$ \cite[Corollary 3.4]{BHof}. Define
$$
M^{\rm rst}\, :=\, f^{-1}(N^{\rm rst})\, ,
$$
where $f$ is the morphism in \eqref{4f}. We note that the action of
${\mathbb G}amma$ on $M_{{\mathcal L}_X}$ preserves $M^{\rm rst}$, because
$f$ is an invariant for the action of ${\mathbb G}amma$. The
action of ${\mathbb G}amma$ on $M^{\rm rst}$ can be shown to be free.
Indeed, if $E\, =\, E\otimes\tau$, where $\tau$ is nontrivial,
any isomorphism of $E$ with $E\otimes\tau$ produces a nontrivial
automorphism of ${\mathbb P}(E)$, because ${\mathbb
P}(E\otimes\tau)\,=\, {\mathbb P}(E)$. Hence such a vector bundle $E$
cannot lie in $M^{\rm rst}$.
Consequently, the projection $f$ in \eqref{4f} defines a
principal ${\mathbb G}amma$--bundle
\begin{equation}\label{gfb}
M^{\rm rst}\, \stackrel{f}{\longrightarrow}\, N^{\rm rst}\, .
\end{equation}
Since $N$ is normal, and $N^{\rm rst}$ is its smooth locus, it
follows that the codimension of the complement $N\setminus
N^{\rm rst}$ is at least two. Therefore, the codimension of the
complement of $M^{\rm rst} \,\subset \, M_{{\mathcal O}_X}$ is at
least two. Hence
$$ H^0(M^{\rm rst},{\mathbb G}_m)={\mathbb C}^* $$
The Serre spectral sequence for the above principal
${\mathbb G}amma$--bundle gives an exact sequence
$$
\text{Pic}(N^{\rm rst}) \,\stackrel{\delta}{\longrightarrow}\,
\text{Pic}(M^{\rm
rst})^{\mathbb G}amma\, \longrightarrow\, H^2({\mathbb G}amma,\, {\mathbb C}^*)\, .
$$
We have $\text{Pic}(M^{\rm rst})^{\mathbb G}amma/{\rm image}(\delta)
\,=\, {\mathbb Z}/l{\mathbb Z}$ \cite{BH} (see (3.5) in \cite{BH} and
lines following it) where $l=(r,d)$. Hence we get an inclusion
\begin{equation}\label{res-go2}
{\mathbb Z}/l{\mathbb Z}\, \hookrightarrow\, H^2({\mathbb G}amma,\, {\mathbb C}^*)\, ,
\end{equation}
where the generator of ${\mathbb Z}/l{\mathbb Z}$ maps to the Weil pairing $e$
(see the proof of Proposition 9.1 in \cite[p. 203]{BLS}).
For the chosen desingularization $\widehat{N} \to N$, we have
\begin{equation}\label{resem}
{\rm Br}({\widehat N}) \,\subset\, {\rm Br}(N^{\rm rst})\,=\,
{\rm Br}(M^{\rm rst}/{\mathbb G}amma)
\end{equation}
using the inclusion of $N^{\rm rst}$ in ${\widehat N}$. The
Brauer group ${\rm Br}(N^{\rm rst})$ is computed in \cite{BH}.
The Serre spectral sequence for the principal
${\mathbb G}amma$--bundle in \eqref{gfb} gives the following exact
sequence:
\begin{equation}\label{rho}
H^2({\mathbb G}amma,\, {\mathbb C}^*)\,\stackrel{\rho}{\longrightarrow}
\,H^2(M^{\rm
rst}/{\mathbb G}amma,\,{\mathbb G}_m)\,\longrightarrow\,H^2(M^{\rm rst},\,{\mathbb G}_m)\, .
\end{equation}
Let $\mathbb S$ be the set of all bicyclic subgroups $A\,
\subset\, {\mathbb G}amma$ of the form $({\mathbb Z}/r{\mathbb Z})^{\oplus 2}$ satisfying the condition that there is some closed
irreducible subvariety $\mathcal Z$ of ${\widetilde M}_{{\mathcal O}_X}$ preserved
be the action of $A$ such that a primitive element of $A$ fixes $\mathcal Z$.
Define the subgroup
$$G'\, :=\, \bigcap_{A\in{\mathbb S}}{\rm kernel}(H^2({\mathbb G}amma,\,
{\mathbb C}^*) \,\rightarrow\,H^2(A,\,{\mathbb C}^*))\, \subset
\, H^2({\mathbb G}amma,\,{\mathbb C}^*)\, .
$$
Using a theorem of Bogomolov, \cite[p. 288, Theorem 1.3]{Bo}, we have
\begin{equation}\label{i}
\rho^{-1}(H^2({\widehat N},\,{\mathbb G}_m))\,\subset\,G'
\end{equation}
(see \eqref{resem}).
We will show that $G'$ is a subgroup of $G$ in $H^2({\mathbb G}amma,\,
{\mathbb C}^*)$. This will prove that the image $\rho( \rho^{-1}(H^2({\widehat N},\,{\mathbb G}_m)))=0$
$b\, \in\, G'$. We need to check that
$b(\sigma\, ,\,\tau)\, =\, 0$ whenever $e(\sigma\, ,\,\tau)\, =\, 0$ for a pair of primitive elements generating the subgroup $A={\mathbb Z}/r{\mathbb Z}^{\oplus 2}$.
Since $e(\sigma\, ,\,\tau)\, =\, 0$, by Proposition \ref{fixed-points},
there is an irreducible closed subscheme $Z\, \subset\, M_{{\mathcal O}_X}^{\rm st}$
which is $A$--invariant and fixed pointwise by $\tau$.
Since the ${\mathbb G}amma$--equivariant desingularization $p$ in \eqref{p} is an isomorphism
over $M_{{\mathcal O}_X}^{\rm st}$, we conclude that the closure of $Z$ in
${\widetilde M}_{{\mathcal O}_X}$ is an $A$-invariant closed irreducible subscheme which is
fixed pointwise by $\tau$. Hence the action of $A$ on this closure is cyclic.
This implies that $A\, \in\, {\mathbb S}$, and hence $b(\sigma\, ,\,\tau)\, =\, 0$.
Therefore $G'\, \subset\, G$.
Consequently, we have shown that $ H^2({\widehat N},\,{\mathbb G}_m) \cap {\rm image}(\rho)=0$.
This proves that the composition
\begin{equation}\label{c1}
H^2({\widehat N},\, {\mathbb G}_m) \,\longrightarrow\, H^2(M^{\rm
rst}/{\mathbb G}amma,
\, {\mathbb G}_m)\,\longrightarrow \,H^2(M^{\rm rst},\, {\mathbb G}_m)
\end{equation}
is injective. We will prove that this composition
is zero (these homomorphisms are induced by the inclusion
$M^{\rm rst}/{\mathbb G}amma\, \hookrightarrow\, \widehat N$ and the
quotient map to $M^{\rm rst}/{\mathbb G}amma$).
Consider the diagram
$$ \xymatrix{
{\widehat M} \ar[r]\ar[d] &
{\widetilde M}_{{\mathcal L}_X} \ar[d] \\
{\widehat N}\ar[r] & {\widetilde N}}
$$
where ${\widehat M}$ is a ${\mathbb G}amma$--equivariant desingularization
of the closure of
$$
M^{\rm rst}\,=\, {\widehat N}\times _{N^{\rm rst}} M^{\rm rst}$$
in the fiber product ${\widehat N}\times_{{\widetilde
N}}{\widetilde M}_{{\mathcal L}_X}$. This gives an action of
${\mathbb G}amma$ on the smooth projective variety
${\widehat M}$ which has a ${\mathbb G}amma$--invariant open
subscheme $M^{\rm rst}$
with the quotient $M^{\rm rst}/{\mathbb G}amma$
being the Zariski open subset $N^{\rm rst}$ of ${\widehat N}$.
Using the commutativity of ${\mathbb G}amma$--actions we obtain a commutative
diagram of homomorphisms
\begin{equation}\label{dg}
\xymatrix{
H^2({\widehat N}, {\mathbb G}_m) \ar[r]\ar[d] &
H^2(N^{\rm rst}, {\mathbb G}_m) \ar[d] \\
H^2({\widehat M}, {\mathbb G}_m)\ar[r] & H^2(M^{\rm rst}, {\mathbb G}_m)}
\end{equation}
Since ${\widehat M}$ is also a desingularization of $M$, we conclude
by \cite[p. 309, Theorem 1]{Ni} (see also \cite[Theorem 1]{BHH}) that
$$
H^2({\widehat M}, \,{\mathbb G}_m)\,=\, 0\, .
$$
Hence from \eqref{dg} it follows that the image of $H^2({\widehat
N}, \,{\mathbb G}_m)$ in $H^2(M^{\rm rst},\, {\mathbb G}_m)$ by the composition
in \eqref{c1} is zero. This completes the proof when $g\,\geq\, 3$.
Now assume that $g\,=\,2$ and rank $r=2$. So $M_{{\mathcal L}_X}$ is
already smooth. We take
${\widetilde M}_{{\mathcal L}_X}\,=\, M_{{\mathcal L}_X}$. Let $M^{\rm free}
\,\subset\, M_{{\mathcal L}_X}$ be the largest Zariski
open subset where the action of ${\mathbb G}amma$ is free. It follows from Remark
\ref{g2} that the complement of $M^{\rm free}$ is of codimension two. The
entire argument above works in this case after replacing $M^{\rm rst}$ by
$M^{\rm free}$ and $N^{\rm rst}$ by $M^{\rm free}/{\mathbb G}amma$.
\end{proof}
\end{document} |
\begin{document}
\global\long\def\mathbb{E}{\mathbb{E}}
\global\long\def\mathbb{P}{\mathbb{P}}
\global\long\def\mathbb{N}{\mathbb{N}}
\global\long\def\mathbb{I}{\mathbb{I}}
\title{{\normalsize\tt
\jobname.tex}\\
On convergence of 1D Markov diffusions to heavy-tailed invariant density}
\author{O.A. Manita\footnote{Moscow State University, Moscow, Russia;
email: oxana.manita @ gmail.com},
A.Yu. Veretennikov\footnote{University of Leeds, UK, \& National Research University Higher School of Economics, \& Institute for Information Transmission Problems, Moscow, Russia; email: a.veretennikov @ leeds.ac.uk. For this author the work has been funded by the Russian Academic Excellence Project '5-100' (the sections 1 -- 2, the setting in the section 3 and both Lemmata) and by the Russain Science Foundation project no. 17-11-01098 (steps 1, 3, 8 -- 9 of the proof of the Theorem 1).}
}
\maketitle
\begin{abstract}
Rate of convergence is studied for a diffusion process on the half line with a non-sticky reflection to a heavy-tailed 1D invariant distribution which density on the half line has a polynomial decay at infinity. Starting from a standard receipt which guarantees some polynomial convergence, it is shown how to construct
a new non-degenerate diffusion process on the half line which converges to the same invariant
measure exponentially fast uniformly with respect to the initial data.
\end{abstract}
Key words: 1D diffusion; invariant distribution; heavy tails; fast convergence
\noindent
MSC codes: 60H10, 60J60.
\section{Introduction}
A topical area of Markov Chain Monte Carlo (MCMC) in theoretical statistics is around the
following problem: given a fixed ``target'' density or distribution known up to a constant multiplier --
a normalizing constant -- how to construct a (Markov) process which would have this density as a (unique)
invariant one and which would converge to this invariant one with a rate that could be theoretically
evaluated? In particular, a permanent great interest in recent decades was about dealing with
``heavy-tailed'' densities with a polynomial decay at infinity.
With this problem in mind, let us consider a polynomially decreasing probability density $\pi$ on
the line \(\mathbb R^1\); in the precise setting it will be restricted to the half-line \(\mathbb R^1_+\). The question under consideration in this paper is constructing a Markov
diffusion process with invariant measure $\pi(x)dx$ such that this measure is invariant for the constructed
process and, moreover, so that an exponential convergence in total variation to the invariant
distribution holds.
This problem has certain deep relations to ergodicity and to the Perron -- Frobenius theorem for Markov
chains with finite state space, to spectral gap for semigroup generators, to upper and
lower bounds for convergence to stationarity; yet, a spectral gap in this paper is not used. The literature in this area is huge and we only mention a few important references related to the subject of the paper more or less directly (see \cite{AitSahalia},
\cite{Cattiaux}, \cite{Fort}, \cite{Kovchegov}, \cite{kulik-leonenko}, \cite{Eva},
\cite{MenshPopov}, \cite{MenshPopov2}, et al.; also, see further references theiren).
The paper consists of four sections, the first one being this Introduction. In the section 2
two known receipts of constructing an SDE with a given stationary measure are shown: one is an SDE with a unit diffusion coefficient while another one is an
SDE with an affine drift. In the section 3 we state the main result of this paper, and in the section
4 its proof is provided. The construction is based on the first one of the standard receipts from the
section 2 and on a random time change. The proof uses certain recurrence type hitting time moment bounds introduced earlier in \cite{ayv_grad_drift}.
\section{Quick review: two standard receipts on $\mathbb R^1$}
\subsection{Receipt 1: SDE with a unit diffusion coefficient}
Suppose a continuous and differentiable strictly positive probability density $\pi$
on $\mathbb{R}^{1}$ decreases at infinity polynomially,
i.e. there exist constants $c>0$ and $m>1$ such that for any $x$,
\begin{equation}
c\left(1+\left|x\right|\right)^{-m}\leq
\pi\left(x\right)\leq
c^{-1}\left(1+\left|x\right|\right)^{-m}.
\label{eq:dens}
\end{equation}
Here $m>1$ is required so that the function $\pi$ were integrable; for further claims a bit more restrictive condition \(m>3\) will be assumed in the sequel.
On a probability space $\left(\Omega,\mathcal{F},\mathbb{P}\right)$ let us
fix a standard Wiener process $W_t$ with its natural filtration $\mathcal{F}_{t}=\mathcal{F}_{t}^W$ (as usual, $\mathbb P$ -- completed).
On this probability space
consider a Langevin diffusion $Y_t$ given by an SDE
\begin{equation}\label{eq:lang}
dY_{t}=dW_{t}+b\left(Y_{t}\right)dt, \quad Y_0=\xi,
\end{equation}
with an arbitrary nonrandom initial value \(\xi\), where
\begin{equation}\label{eqb}
b(x)=\frac{1}{2}(\ln\pi(x))'.
\end{equation}
If there is no explosion then this equation possesses a strong solution \cite{Ve81}.
Random initial values will be mentioned briefly in the section \ref{sec22} and in principle could have been allowed here, too. It is assumed that two derivatives \(\pi'\) and $\pi''$ exist and that the drift \(b\) is locally bounded;
its global boundedness is not required because, as it turns out \cite{ayv_grad_drift2, ayv_grad_drift}, a no blow-up is guaranteed just by the
assumption (\ref{eq:dens}) on the function \(\pi\) only (see below the details). Emphasize that despite the assumed two derivatives, the only quantitative assumption will be just on $\pi$ itself given in (\ref{eq:dens}); also there is a hypothesis that the assumption about $\pi''$ could be dropped, and this is the reason why we refer to \cite{Ve81} instead of more standard results under a local Lipschitz condition on the drift, while talking on strong solutions in the sequel.
~
The receipt (\ref{eq:lang})--(\ref{eqb}) is, actually, a continuous time analogue of (one of) a standard
MCMC receipt(s) in discrete time after a suitable limit. We do not recall it because the paper does not rely upon this limiting procedure; however, this is likely to signify a possible link to MCMC algorithms in discrete time. Obviously $\pi(x)dx$ is the (unique) invariant distribution of the process $Y_t$; this
can be shown explicitly by checking the Kolmogorov equation for the invariant distribution.
The process $Y_t$ is ergodic and has a polynomial rate of convergence to the stationary
distribution with density $\pi$
\cite[Theorem 1]{ayv_grad_drift2} (under a bit more restricted conditions
see also \cite{ayv_grad_drift}), at least, if \(m\) is not too small. Note that unlike in most
of other works on convergence or mixing rates, Lyapunov functions are not used here, as they were not used in \cite{ayv_grad_drift2, ayv_grad_drift}.
For close
results for some particular distributions and for homogeneous Markov processes under
various assumptions (usually more restrictive because of explicit assumptions about
the derivative \(\pi'\)) see also \cite{Abu-Ver09, Abu-Ver09b, kulik-leonenko}; for discrete
time examples -- that is, actually, about MCMC
algorithms -- see, e.g., \cite{MP, Fort, MenshPopov2} and further references therein. Emphasize that the
assumptions
in \cite[Theorem 1]{ayv_grad_drift2} as well as in \cite[Theorem 1]{ayv_grad_drift} are
all on $\pi$ and not on~$\pi'$ except that the latter derivative exists and that \(b\) is
locally bounded. It remains to be our goal to avoid any assumptions on $\pi'$ beyond
its existence and local boundedness of \(b\) in the sequel.
Moreover, it is known that under the {\it additional assumption} about $\pi'$,
\begin{equation}\label{binfty}
\liminf_{|x| \to \infty} x b(x) =
\liminf_{|x| \to \infty} \frac{x\pi'(x)}{2\pi(x)} = -r < - 3/2,
\end{equation}
the {\em beta-mixing rate} of $Y_{t}$ is {\em no faster than polynomial, $\ge C t^{-k}$ with any $k> r-1/2$ and some $C>0$} (see the definition
and the details in \cite{2006}). The notion of beta-mixing -- which is neither defined nor discussed here in detail -- is rather close although not identical to
the convergence in total variation. Hence, and also because of close results about lower bounds for
convergence rates in \cite{Klokov_lb, MenshPopov}, it is likely that convergence of $Y$ to the stationary
distribution of $Y_t$ under (\ref{eq:dens}) is also no faster than some polynomial. The assumption (\ref{binfty}) will not be used in the sequel but was shown just for information. Recall that our aim is a faster convergence, and that we want to avoid any conditions on the derivative $\pi'$ except for its existence and local boundedness. \\
Note that in the case if for large \(|x|\) the density equals {\em exaclty} \(c(1+|x|)^{-m}\), it apparently follows that we need
\(m>3\) in order to have the inequality \(r>3/2\) in (\ref{binfty}).
Yet, we will not use conditions in terms of \(\pi'\), assuming just (\ref{eq:dens}). Also, emphasize that the requirement \(m>3\) is for the quick reference on some existing earlier results. We do not claim that for \(m\le 3\) a similar analysis and asymptotics are not possible, but just that we are not aware of such asymptotics for \(m\le 3\).
Note that for the density on a half-line $\mathbb R^1_+$ a natural analogue of (\ref{eq:lang}) is the process satisfying an SDE with a non-sticky reflection at zero,
\begin{equation}\label{eq:lang2}
dY_{t}=dW_{t}+b\left(Y_{t}\right)dt + d\phi^Y_t, \quad Y_0=\xi.
\end{equation}
For the process satisfying (\ref{eq:lang2}) similar mixing and convergence bounds follow from the bounds and from the calculus quite similar to those in \cite{ayv_grad_drift} applied to the situation of the reflected SDE, or just from a consideration of an SDE (\ref{eq:lang}) with a symmetric
(\(b(-x)=b(x)\)) drift.
\subsection{Receipt 2: SDE with an affine ``mean-reverted'' drift}\label{sec22}
There is another receipt different from (\ref{eq:lang}) offered in \cite{Bibby2005}
(see also the references therein concerning some other earlier constructions): in a slightly
simplified form it suggests to consider an SDE on the line
\begin{equation}\label{sde_bibby}
dZ_t = -(Z_t-\mu) \,dt + \sqrt{v(Z_t)}\,dW_t,
\end{equation}
with an appropriate initial distribution (e.g., stationary $\pi$ as in the reference paper),
with
\begin{equation}\label{bibby2}
v(z) = \pi(z)^{-1} \, \int_{-\infty}^z (\mu-s)\,\pi(s)\,ds, \quad
\mu = \int s\,\pi(s)\,ds.
\end{equation}
It is, of course, assumed that $\mu$ is finite, and then it is easily proved that $v\ge 0$, so that the SDE (\ref{sde_bibby}) is well-defined. (Indeed, $\int_{-\infty}^\mu (\mu-s)\,\pi(s)\,ds \ge 0$ since $\mu-s \ge 0$ for $s\le \mu$; and for $s>\mu$ the values $\mu-s$ are negative but the whole integral $\int_{-\infty}^{+\infty} (\mu-s)\,\pi(s)\,ds = 0$, so that for any $z<+\infty$, $v(z)\ge 0$ as required.)
However, of course, ``good'' ergodic properties of the solution of this equation depend on
some features of the density $\pi$. The solution locally exists due to local Lipschitz property
of $\sqrt{v}$ combined with the affine drift assumption, but no-explosion should be derived from
other conditions. Some related papers are, for example, \cite{Abu-Ver09, Abu-Ver09b, kulik-leonenko} which
tackle particular parametric families of target densities $\pi$ -- Student, reciprocal Gamma, and
Fisher-Snedekor diffusions.
In all three papers
a quadratic Lyapunov
function allows to show an exponential convergence in total variation which is non-uniform in the
initial state $Z_0$. It is interesting that in \cite{Bibby2005} an
exponential character of the stationary correlation function is established; yet,
convergence of a {\em non-}stationary process to a stationary regime was not studied.
In fact, the process under investigation in \cite{Bibby2005} is stated to be ``ergodic'',
which ergodicity is understood in the sense of being stationary without any convergence
statements. At the same time, the assumptions on the density in \cite{Bibby2005} involve the
stationary density \(\pi\) (in our notations) but not on its derivative (possibly with
some additional non-restrictive requirements in some theorems like continuity of the
target density). Recall that in the present paper $C^2$-differentiability of \(\pi\) is assumed,
but convergence rate bounds only depend on the asymptotic assumptions at infinity on the
density \(\pi\) itself. It looks plausible that, in principle, it may be possible to work with ``weak'' definitions of
the process via Dirichlet forms theory (\cite{Fuku, MaRo}),
but we prefer to have a well-defined solution trajectory; in
particular, we will be working with strong solutions due to \cite{Ve81}.
The receipt 2 naturally rises the question whether it is possible to arrange even a faster convergence, let theoretically.
\section{The setting \& main result}
Our primary goal is a density $\pi$ on $\mathbb R^1_+ = [0,\infty)$ satisfying
\begin{equation}
c\left(1+x\right)^{-m}\leq
\pi\left(x\right)\leq
c^{-1}\left(1+x\right)^{-m}, \quad x\ge 0.
\label{eq:dens1}
\end{equation}
Receipts I \& II in the previous section can be applied to this setting if we just extend the density in a symmetric way to the whole line, with a natural normalisation. As was said in the Introduction, we aim at constructing a diffusion process on $\mathbb R^1_+$ which converges towards $\pi$ with an exponential rate uniformly with respect to the initial data. It is likely that a similar result holds true for a {\it symmetric} density $\pi$ on the whole line $\mathbb R^1$ satisfying (\ref{eq:dens}), which we mention as a remark.
In order to achieve yet a better convergence than typically guaranteed by the receipts (\ref{eq:lang})--(\ref{eqb}) or even by (\ref{sde_bibby})--(\ref{bibby2}), and for yet a more general class of densities than in \cite{Abu-Ver09, Bibby2005, kulik-leonenko} and in quite a few other works, let us consider two diffusion processes $Y_t$ and $X_{t}$ on $\mathbb R_+$ satisfying, respectively, SDEs with a non-sticky reflection at zero (\ref{eq:lang2})
and
\begin{equation}\label{feq}
dX_{t}=f\left(X_{t}\right)dW_{t}+f^{2}\left(X_{t}\right) b\left(X_{t}\right)dt + d\phi^X_t, \quad X_0 = \xi,
\end{equation}
with a local time $\phi_t^X$ at zero and
with a special auxiliary function $f$,
\begin{equation}
f(z):=
\displaystyle \left(1+\int_{0}^{z}\frac{dy}{\pi(y)}\right)^{1/2}, \quad z\ge 0.
\label{eq:accel}\end{equation}
The generator of this process is given by
\[
L=f^2 L_0,
\]
where $L_0$ is the generator of the reflected diffusion \eqref{eq:lang2}:
\[
L_0v(x) = \frac12 v''(x) + b(x) v'(x), \; \forall \, x>0, \quad \& \quad L_0v(0) = v'(0+).
\]
Recall the requirements on the non-sticky solution and on its local time: $\phi^X$ is a monotonically non-decreasing function; for any $t>0$,
\begin{align*}
\phi^X_t = \int_0^t 1(X_s=0)d\phi^X_s; \quad \int_0^t 1(X_s=0)ds = 0 \; \mbox{a.s.}
\end{align*}
~
Of course, a question about existence of solution of this equation (\ref{feq})
on the whole half-line \(t\ge 0\) arises here, and a positive answer to this question for the first sight may look doubtful given fast increasing coefficients. However, it will be
justified with the help of a random time change and of the law of large numbers that such a (strong) solution exists on the whole line and does not explode.
The main result
is the following Theorem.
\begin{theorem}\label{thm1}
Assume that for a strictly positive probability density $\pi\in C^2$ with two locally bounded derivatives the bounds \eqref{eq:dens1}
hold with some $m>3$.
Then the SDE (\ref{feq}) has a strong solution $X_{t}$ for all \(t\ge 0\) which is
strongly (pathwise) unique and which possesses an exponential
rate of convergence to the stationary distribution $\pi\left(x\right)dx$,
\begin{equation}\label{eq:crate}
\|\mu_t^{\xi} - \mu\| _{TV} \le C \exp(-\lambda t), \quad t\ge 0,
\end{equation}
uniformly with respect to $\xi$, with some constants $\lambda$ and $C$ which both admit certain evaluation, where $\mu^\xi_t$ is a marginal measure of the process $X_t$ that starts from $\xi$ at $t=0$, and $\mu (dx) = \pi(x)dx$ is the (unique) invariant measure of the process.
\end{theorem}
The right hand side in (\ref{eq:crate}) does not depend on the initial value $\xi$.
Theoretical evaluations of both constants in the bound (\ref{eq:crate}) is likely to be not very efficient, yet possible which is clearly better than pure existence of such constants.
\section{Proof of Theorem \ref{thm1}}
The proof will be split into several steps.
\noindent
{\bf Step 1. Random change of time.}
Define the function $f(z)$ on
$\mathbb{R}^1_+$ by \eqref{eq:accel}.
Obviously there exists $0<a\le 1$ (namely, any $a\in [0,c^2]$, with $c$ from (\ref{eq:dens1})) such that
\begin{equation}
a\left(1+z\right)^{m+1}\leq f^{2}(z)\leq a^{-1}\left(1+z\right)^{m+1}, \quad \forall z \in \mathbb{R}_+^1.
\label{eq:est_f}\end{equation}
Let us define a random time change (cf. \cite{GS, McKean}) by
\begin{equation}
\chi_{t}:=\int_{0}^{t}f^{-2}\left(Y_{s}\right)ds, \quad \& \quad \beta_t:= \chi^{-1}_t \quad \mbox{(the inverse function).}
\label{eq:time-change}
\end{equation}
In other words,
\[
\beta'_t = f^2(Y_{\beta_t}),
\]
and
\[
t = \int_{0}^{\beta_t}f^{-2}\left(Y_{s}\right)ds.
\]
This time change $t\mapsto \beta_t$ is non-degenerate, that is, the following two conditions hold:
$\rm{(i)}$ there is no blow up at finite time:
\begin{equation}
\mathbb{P}(\chi_{t}|_{t\rightarrow T-0}\rightarrow+\infty) =0\quad \forall T\in(0,+\infty).
\label{eq:tc-bounds-2}\end{equation}
$\rm{(ii)}$ $\chi_t$ is unbounded as $t\rightarrow +\infty$
(i.e. when "real" time goes to infinity):
\begin{equation}
\chi_{t}\geq0,\quad\chi_{t}|_{t\rightarrow\infty}\rightarrow+\infty\quad\mathbb{P}-\mbox{a.s.}
\label{eq:tc-bounds-1}\end{equation}
To prove \eqref{eq:tc-bounds-2}, it suffices to notice that, due
to \eqref{eq:est_f}, for any \(s<t\),
\[
0\le \chi_{t}-\chi_{s}\leq a^{-1}\int_{s}^{t}\left(1+\left|Y_{r}\right|\right)^{-m-1}dr,
\]
hence
$$
\chi_{t}^{'}\leq a^{-1}\cdot\sup_{r\in\mathbb{R}^{1}}\left(1+|r|\right)^{-m-1}=a^{-1}, \quad \mathbb{P}-\mbox{a.s.}
$$
Then (\ref{eq:tc-bounds-2}) immediately follows.
~
From here we find,
\[
\inf_{t\ge 0}\beta'_t \ge a>0, \quad \mathbb{P}-\mbox{a.s.},
\]
and
\[
\mathbb P(\limsup_{t\to\infty} \beta_t < \infty) = 0.
\]
~
The assertion \eqref{eq:tc-bounds-1} follows from the following Lemma.
\begin{lemma}\label{erg} Let $m>3$, and let $g$ be a bounded continuous function
on $\mathbb{R}^{1}$. Assume that the diffusion process $Y_{t}$ satisfies
(\ref{eq:lang}), and let $\mu_{inv}$
be its unique invariant measure.
Then for any $\delta>0$ and $\varepsilon>0$ there exists $T_0>0$
such that
\[
\mathbb{P}\left(\left|\frac{1}{t}\int_{0}^{t}g\left(Y_{s}\right)ds-\int g(x)d\mu_{inv}(x)\right|
>\varepsilon\right)<\delta\qquad\mbox{for any }t\geq T_{0}.
\]
\end{lemma}
The Lemma with $g(r)=\left(1+\left|r\right|\right)^{-m-1}$ yields
the assertion (\ref{eq:tc-bounds-1}).
Indeed,
let us fix any $\delta\in(0,1)$ and \(\varepsilon = a_g/2\). Naturally, \(a_g > 0\).
Then with $\mathbb{P}$-probability at least $1-\delta$
one has $\chi_{t}\geq\left(a_{g}-\varepsilon\right)t=a_g \, t/2$ for {\bf any} $t$ large
enough. This means that with probability at least \(1-\delta\) the change of time mapping does not stop up to at least $a_g\, t/2$. Since \(\delta\in (0,1)\) is arbitrary, \eqref{eq:tc-bounds-1} holds.
Proof of Lemma \ref{erg}. First of all, we will refer to the mixing results for SDEs on the whole line; however, in the case of symmetric coefficients ($b(-x)=b(x)$ and similarly for the diffusion if it is not a constant) such results straightforward imply similar bounds and convergence rates for (non-sticky) reflected at zero diffusions, too.
In other words,
The process \((Y_t)\) is Markov ergodic with a finite variance (and, in fact, with any moment $m'<m-2$; $g$ is bounded) with a polynomial beta-mixing rate as well as convergence in total variation \(\beta^\xi(t) + |\mu_t^\xi - \mu_\infty|_{TV} \le C_k(\xi)(1+t)^{-k}\) with some \(C(\xi)\) for any \(k<m-1\), to the stationary regime $\mu_\infty$, see \cite{ayv_grad_drift, ayv_grad_drift2}. Indeed, the assumptions of \cite{ayv_grad_drift} are met with \(p=m-1\) where \(p\) is the standing parameter in \cite{ayv_grad_drift}. The assumption $m>3$ implies $k>2$. Moreover, the function $g$ is bounded; hence, the process $\int_0^t g(Y_s)\,ds$ possesses all moments (including exponential with any constant, although, this is far too much for our goal). The beta-mixing coefficient dominates the alpha-mixing, while certain convergence rate to zero of the alpha coefficient is the standing assumption in the Theorem 18.5.4 of \cite{IbrLinnik}.
Hence, for the stationary regime, the assertion
of the Lemma -- LLN -- follows from the Central Limit Theorem \cite[Theorem 18.5.4]{IbrLinnik}. Indeed, splitting
the integral from zero to $t$ into a sum $\sum_1^{[t]}$ plus $\int_{[t]}^t$, the claim follows. For a {\it nonstationary}
regime the desired LLN follows again from the CLT for the {\it stationary} case, from the Markov property,
and from the polynomial convergence of $\mbox{Law }(Y_{s})$ to $\mu_{inv}$
in total variation, similarly to the proof of `` non-stationary CLT'' in \cite[Theorem 4]{Ve2} with the help of the results from \cite[Theorem 1]{ayv_grad_drift} with $m>3$. After mixing bounds have been found, see also \cite{Ve_LN} for LLN (formally, in \cite{Ve_LN} mixing is exponential, but obviously any polynomial would do such that the related sums or integrals converge).
This finishes the proof of the Lemma~\ref{erg}.
{}$\square$
See also \cite{Eva} for close results under slightly different assumptions. As was already mentioned, the statement of the Lemma will be used straight away for our reflected diffusion~(\ref{eq:lang2}).
~
\noindent
{\bf Step 2. Constructing the process $X_t$.}
On the probability space $\left(\Omega,\mathcal{F}, (\mathcal{F}_{t}),\mathbb{P}\right)$ with a solution $Y_t$ to the equation (\ref{eq:lang2}),
let us introduce stochastic processes
\[
X_{t}:=Y_{\beta_{t}}, \quad \phi^X_t = \phi^Y_{\beta_{t}}.
\]
Then due to the time change \cite[Theorem 3.15.5]{GS}
it follows that the process $X_{t}$ satisfies an SDE
\begin{equation}
dX_{t}=f\left(X_{t}\right)\, d W_{t}+f^{2}\left(X_{t}\right) b\left(X_{t}\right)dt + d \phi^X_t, \quad X_0 = \xi,
\label{eq:lang-ac}
\end{equation}
with a new Wiener process \(\displaystyle \tilde W_{t} = \int_0^{\beta_t} f^{-1}(X_s) \, dW_s\), and with the local time at zero \( \phi^X_t\); recall that $f(0)=1$. Indeed, outside zero the ``main part'' here
$$1(X_t>0)dX_{t}=1(X_t>0)\left[f\left(X_{t}\right)\, d\tilde W_{t}+f^{2}\left(X_{t}\right) b\left(X_{t}\right)dt\right]$$ follows straightforward from \cite[Theorem 3.15.5]{GS}, and
$$
1(X_t=0)dX_{t}=1(X_t=0)d \phi^X_t
$$
is a direct consequence of the equation
$$
1(Y_t=0)dY_{t}\,=\,1(Y_t=0)d\phi^Y_t.
$$
Also, we have,
\begin{align*}
\int_0^t 1(X_s=0)ds = 0, \;\;
\int_0^t 1(X_s=0)d\phi^X_s = \phi^X_t.
\end{align*}
Finally,
\[
X_t - \xi - \int_0^t f\left(X_{s}\right)\, d W_{s}+\int_0^t f^{2}\left(X_{s}\right) b\left(X_{s}\right)ds - \phi^X_t = 0, \quad {\mbox{a.s.}}
\]
Thus, $X$ is the solution of the equation (\ref{eq:lang-ac}) with a non-sticky reflection, as required.
~
The equation (\ref{eq:lang-ac}) can be also derived from the time change for the SDE (\ref{eq:lang}) on the whole line with a symmetric drift and symmetrically extended $f$ after the application of It\^o--Tanaka's formula to the modulus,
\begin{equation}
d\bar X_{t}=\bar f\left(\bar X_{t}\right)\, d\tilde W_{t}+\bar f^{2}\left(\bar X_{t}\right) b\left(\bar X_{t}\right)dt, \quad \bar X_0 = \xi,
\label{eq:lang-ac2}
\end{equation}
with
\[
\bar b(x) = \mbox{sign}(x)b(|x|), \;\; \bar f(x) = f(|x|), \quad \forall \, x\in \mathbb R^1.
\]
By construction, the processes $X_t$ and $ \phi^X_t$ are regular, i.e. are defined for all $t\geq 0$, and adapted to the filtration \(\tilde {\cal F}_t \equiv {\cal F}_{\beta_t}\), see \cite{GS}. Recall the well-known fact that the new filtration \({\cal F}_{\beta_t}\) is well-defined because of the fact that for any \(t\), the random variable \(\beta_t\) is a stopping time.
Emphasize that the process $X_t$ is well defined on the whole half-line $t\ge 0$, it does not explode, and it neither reaches infinity from zero, nor vice versa (zero from infinity) over a finite time,
all of these because of the construction via the time change.
\noindent
{\bf Step 3.} The solution $X_t$ is strong. Indeed, it is well-defined on \(t\ge 0\), and the diffusion coefficient is locally continuously differentiable, and locally bounded, and locally non-degenerate, while the drift coefficient is also locally bounded. Due to the results in \cite{Ve81},
this suffices for strong uniqueness via the stopping time arguments with the help of the strong Markov property -- see \cite{Krylov_selection}. This will be used in the sequel in the coupling procedure (although, probably could be done with weak solutions, too).
\noindent
{\bf Step 4. Stationary distribution for $X_t$.}
Let us prove that the process \(X_t\) has a unique invariant distribution $\pi(x)dx$.
The stationary distribution $\mu$ satisfies the stationary Kolmogorov equation $L^* (\mu)=0$ on $\mathbb R_+$ -- or, equivalently, \(L^*\pi = 0\) --
where
\begin{equation}\label{L}
L=\frac{f^2}{2} D_x^2 +(f^2 b)D_x
\end{equation}
is the generator of $X_t$ and
${}^*$ is the adjoint with respect to the Lebesgue measure.
First of all, the Kolmogorov equation $L^* (\mu)=0$ has at most one probability solution due to
\cite[Example 4.1.1]{Bo}. Next, the measure $\mu(dx)=\pi(x) dx$
satisfies this equation. Indeed, since
\begin{equation}
\frac{1}{2}\pi^{'}-\left(b\pi\right)=0,
\label{eq:pl}\end{equation}
we have
\begin{multline}
\frac{1}{2}\left((f^{2})\pi\right)^{''}-\left((f^{2})b\pi\right)^{'}=
\frac{1}{2}\left((f^{2})^{''}\pi+2(f^{2})^{'}\pi^{'}+(f^{2})\pi^{''}\right)-
(f^{2})^{'}(b\pi)-(f^{2})(b\pi)^{'}=\\
(f^{2})\left(\frac{1}{2}\pi^{''}-
\left(b\pi\right)^{'}\right)+(f^{2})^{'}\pi^{'}+
\frac{1}{2}(f^{2})^{''}\pi-(f^{2})^{'}(b\pi)\overset{\eqref{eq:pl}}{=}
\frac{1}{2}(f^{2})^{''}\pi+\frac{1}{2}(f^{2})^{'}\pi^{'}=
\frac{1}{2}\left((f^{2})^{'}\pi\right)^{'}.
\label{eq:check}\end{multline}
But
\begin{equation}\label{one}
((f^{2})^{'}\pi)(x)=
\displaystyle \left(1+\int_{0}^{x}\frac{dy}{\pi(y)}\right)^{'}\cdot\pi(x)=
\frac{\pi(x)}{\pi(x)}=1, \quad x\ge 0,
\end{equation}
where at zero derivative is understood as right one. Hence the expression in the right hand side of \eqref{eq:check} equals zero, i.e., $\pi$ is a stationary measure for the new process \(X\). Note that the same calculus with $f$ replaced by $1$ shows the invariance of $\pi$ for $Y_t$
As may be expected, (\ref{one}) implies the equality
\begin{equation}\label{e-inv}
\mathbb{E}_\pi h(X_t) = \int h(y)\pi(y)\,dy,
\end{equation}
for any \(t>0\) and \(h\in C_b(\mathbb{R}^1_+)\).
By virtue of the Lebesgue dominated convergence
theorem we can take \(h\in C^\infty_0(\mathbb{R}^1_+)\) (continuous with a compact support), but more than that, it suffices to consider functions $h\in C^\infty_0(\mathbb{R}^1_+)$ with $h'(0+)=0$: indeed, the latter class -- denoted in the sequel as $C^\infty_{00}(\mathbb{R}^1_+)$ -- is clearly dense in $C^\infty_0(\mathbb{R}^1_+)$.
We have, due to $h'(0+)=0$,
\begin{align*}
dh(X_t) = Lh(X_t)\,dt + h'(X_t)f(X_t)\,dW_t + 1(X_t=0)h'(0+)d\phi^X_t
\\\\
= Lh(X_t)\,dt + h'(X_t)f(X_t)\,dW_t.
\end{align*}
Moreover, since \(h\) has a compact support, \(Lh\) and \(h'f\) are bounded. So, by rewriting
in integral form and taking expectations we get,
\[
\mathbb{E}_\pi h(X_t) - \mathbb{E}_\pi h(X_0) = \mathbb{E}_\pi \int_0^t Lh(X_s)\,ds
\equiv \int_0^t \mathbb{E}_\pi Lh(X_s)\,ds,
\]
the last equality due to Fubini's theorem.
Denote by $p_s(y,z)$ the transition density of the Markov process $X$; its existence follows, e.g., from \cite[Corollary 2.9 \& Remark 2.17]{Bogach}. In a ``good case'' with all appropriate derivatives, the standard {\it formal} calculus runs as follows:
\begin{align*}
\mathbb{E}_\pi Lh(X_s) = \iint (L_zh(z))\pi(y)p_s(y,z)\,dzdy
\\\\
= \iint \pi(y) h(z) L^*_zp_s(y,z)dzdy
= \iint \pi(y) h(z) \partial_s p_s(y,z)dzdy
\\\\
= -\iint \pi(y) h(z) L_y p_s(y,z)dzdy
= -\int h(z) \left(\int p_s(y,z) L^*_y \pi(y)dy\right)dz = 0,
\end{align*}
due to
forward and backward Kolmogorov's equations, and Fubini's theorem. Therefore, we conclude that
\begin{equation}\label{esta}
\mathbb{E}_\pi h(X_t) = \mathbb{E}_\pi h(X_0),
\end{equation}
which is equivalent to (\ref{e-inv}).
A rigorous justification without additional assumptions follows from \cite{Bogach}.
Note that uniqueness of the invariant measure will follow from the convergence bound (\ref {eq:crate}) once it is established.
\noindent
{\bf Step 5. Uniform exponential moment bound.}
\noindent
Let us take {\it any} $K>0$, and define
\begin{equation} \gamma_{X}^{\xi}\equiv\gamma_X^{\xi,K} \equiv\gamma :=\inf\left(t\geq0:\,\, X_{t}
\leq K,
\,\, X_{0}=\xi \ge 0\right).
\label{eq:moments}
\end{equation}
Let us show that $\mathbb{E}\exp(\alpha\gamma_{X}^{\xi}) <+\infty$
for $\alpha>0$ small enough, uniformly with respect to the initial state of the process.
Denote $v_q \left(\xi\right):=
\mathbb{E}_{\xi}\gamma^{q}$,
with a convention $v_{0}\equiv1$.
Obviously,
\[
\mathbb{E}_{\xi}e^{\alpha\gamma} =\sum_{q=0}^{+\infty}\frac{\alpha^{q}\mathbb{E}_{\xi}\gamma^{q}}{q!} = \sum_{q=0}^{+\infty}\frac{\alpha^{q}v_{q}\left(\xi\right)}{q!} < \infty,
\]
provided all quantities \(v_{q}(\xi)\) are finite and grow not too fast in $q$.
\noindent
{\bf Step 6. Auxiliary results for polynomial moments.}
\noindent
In order to guarantee that the values $v_q$, indeed, may not grow too fast, let us find alternative representations for them. By
virtue of the identity
\[ \left(\int_{0}^{\gamma}1dt\right)^{q}=q\int_{0}^{\gamma}\left(\int_{t}^{\gamma}1ds\right)^{q-1}dt,
\]
which holds both for finite and infinite $\gamma$, we get,
\[
v_{q}(\xi)=q\mathbb{E}_{\xi}\int_{0}^{\gamma}v_{q-1}(X_{t})dt
\]
at least, for any $q\ge 1$ such that $v_{q}\left(\xi\right)$ is finite. Indeed, due
to the Fubini's theorem (iii) and the Markov property (iv),
\begin{multline*} v_{q}\left(\xi\right)\equiv \mathbb{E}_{\xi}\gamma^{q} = q \mathbb{E}_{\xi}\int_{0}^{\gamma}
\left(\int_{t}^{\gamma}1ds \right)^{q-1}dt = q\mathbb{E}_{\xi}\int_{0}^{\infty}1 \left(\gamma > t\right) \left(\int_{t}^{\gamma}1ds\right)^{q-1}dt =
\\
\overset{(iii)}{=}q\int_{0}^{\infty}\mathbb{E}_{\xi}
1 \left(\gamma > t\right)\left(\int_{t}^{\gamma} 1ds\right)^{q-1}dt =
q\int_{0}^{\infty}\mathbb{E}_{\xi}1 \left(\gamma\geq t\right) \mathbb{E}_{\xi}\left(\left(\int_{t}^{\gamma}
1ds\right)^{q-1} | {\cal F}^X_t\right) dt=
\\
=
q\int_{0}^{\infty}\mathbb{E}_{\xi}1 \left(\gamma > t\right) \mathbb{E}_{\xi}\left((\gamma - t)^{q-1} | X_t\right) dt =q\int_{0}^{\infty}\mathbb{E}_{\xi}1 \left(\gamma > t\right)
\mathbb{E}_{X_{t}}\gamma^{q-1}dt
=
\\
\overset{(iv)}{=}q\int_{0}^{\infty}\mathbb{E}_{\xi}1 \left(\gamma > t\right)v_{q-1}(X_{t})dt\overset{(iii)}{=} q\mathbb{E}_{\xi}\int_{0}^{\gamma}v_{q-1}(X_{t})dt.
\end{multline*}
Hence, if both quantities are finite, then
\[ v_{q}\left(\xi\right)=q\mathbb{E}_{\xi}\int_{0}^{\gamma}v_{q-1}(X_{t})dt,\quad v_0=1.
\]
Note that for each $q\geq1$, if $v_q$ is finite, then it satisfies
\begin{equation}
Lv_{q}(x)=-qv_{q-1}(x), \;\; x\ge K,
\label{eq:pois}\end{equation}
by virtue of the probabilistic representation of solutions of the elliptic equation with Dirichlet boundary condition, or equivalently by Duhamel's formula.
Obviously $v_{q}(K)=0$, as well as $v_{q}(x)=0, \, 0\le x\le K$. Also,
it is known that if $v_q(\xi)<\infty$ for some $\xi$ then it is finite for any $\xi$.
However, we
are not going to use this equation directly since it lacks the ``second boundary condition'' normally required for the second order differential equation. Instead, we will find solutions to boundary problems that approximate $v_q$.
In fact, what we shall need instead is the following Lemma.
Let $\hat L:\,(\hat Lu)(x)=a(x)u^{''}(x)+c(x)u^{'}(x)$ be the generator of the diffusion process $(\zeta_t,t\geq 0)$
with locally bounded coefficients $a>0$ (the diffusion) and $c$ (the drift), and such that $a$ is locally uniformly non-degenerate, and which process is a strong solution of a corresponding SDE.
Let us fix a positive (non-negative) function $\psi$ on $\mathbb{R}^1$. Let
\[
\tau_K:=\inf\left(t\geq 0:\,\,\zeta_{t}\leq K,
\,\, \zeta_{0}=\xi\right) \quad (\xi>0),
\]
and
\[
v\left(\xi\right) =\mathbb{E}_{\xi}\int_{0}^{\tau_K} \psi(\zeta_{t})\,dt.
\]
\begin{lemma}\label{PDE_lemma}
For any
\(N>K>0\), consider the boundary problem
\begin{equation}
\hat Lv^+_{N}=-\psi,\quad v^+_{N}\left(K\right)=0,\,\,\left(v^+_{N}\right)^{'}(N)=0,
\label{eq:bvp-lem}
\end{equation}
Then the function $v^+_{N}(\xi)\uparrow v(\xi)$ as $N\uparrow\infty$, for every
$\zeta_0 = \xi$ with $\xi \ge K$.
\end{lemma}
\textbf{Proof of Lemma \ref{PDE_lemma}.}
For any
$0\le K\le \xi\le N$, let us consider a family of stochastic
processes $\zeta_{t}^{N}$, given by the SDE with reflection,
\[
d\zeta_{t}^{N}=\sqrt{2a(\zeta_t ^N)}dw_{t}+c(\zeta_{t}^{N})dt +d\phi_{t}^{N}, \quad \zeta_0^N = \xi,
\]
with values on $[0,N]$,
with a non-sticky reflection at $N$ and an absorbtion at zero, where $\phi_{t}^{N}$ is its local time at $N$.
Applying It\^o's formula (or, in fact, more precisely It\^o--Krylov's formula if continuity of $\psi$, $a$, and $c$ is not assumed) to $v_{N}(\zeta_{t}^{N})$,
we get the following representations:
\[
v_{N}(x)=\mathbb{E}_{x}\int_{0}^{\tau_{K,N}}\psi(\zeta_{s}^{N})ds,
\]
where
\(
\tau_{K,N} = \inf\left(t\geq0:\,\, |\zeta^N _{t}| \le K\right)
\) is the moment when the process $\zeta^N _t$ first hits the interval $[0,K]$.
Note that
$\tau_{K,N}$ monotonically increases as $N$ increases. Also note that if $\tau_K<\infty$ then, obviously, \(\tau_{K,N} \uparrow \tau_K\); and if
$\tau_K=\infty$ then still \(\tau_{K,N} \uparrow \infty =\tau_K\). These all follow from the comparison theorem for one-dimensional SDEs possessing strong solutions with the same coefficients and different initial data. This comparison theorem can be shown as follows. Consider two SDEs with the same initial value $\xi$ but with two different $N_1 < N_2$, say. Denote the corresponding solutions by $\zeta^{N_1}_t$ and $\zeta^{N_2}_t$. Assuming that $\xi\in [K,N_1]$, due to the strong uniqueness $\zeta^{N_1}_t = \zeta^{N_2}_t$ until $\hat \tau^{}:=\inf(t\ge 0: \, \zeta^{N_1}_t = K \; \mbox{or} \; N_1)$. If at this moment -- which is a stopping time --
$\zeta^{N_1}_t = \zeta^{N_2}_t = K$, then the claim is justified because $K$ is the absorbtion point. If, however, $\zeta^{N_1}_t = \zeta^{N_2}_t = N_1$, then the first process $\zeta^{N_1}_t$ will remain less than or equal to $N_1$ all the time, while the second will exceed this level $N_1$ with probability one on any right interval of $\hat \tau$. This follows easily from the ``reverse'' time change which makes diffusion back equal to one and from the Girsanov theorem about eliminating the drift via change of measure, because for the standard Wiener process this property is well-known (e.g., it follows from Khintchin's iterated logarithm law for WP \cite{ItMc}
along with the strong Markov property. Thus, on any small right neighbourhood of the moment $\hat\tau$ we would have $\zeta^{N_1} \le \zeta^{N_2}$, with strict inequality at least at infinitely many moments of time arbitrarily close to $\hat \tau$. Yet, both solutions are strong Markov. So, if we now start two processes with the same generator a new at two distinct initial value $\xi_1 < \xi_2$, then due to continuity the two solutions will satisfy
$1(t>\hat\tau)1(\zeta^{N_1}_t<\zeta^{N_2}_t) = 1(t>\hat\tau)$, at least, until they meet again, i.e., for all $t< \bar\tau:= \inf(s\ge \hat\tau: \zeta^{N_1}_s = \zeta^{N_2}_s)$ (here, of course, $\inf(\emptyset) = \infty$, and, in fact, they will never meet again). But then, if we assume that $\bar \tau < \infty$, they will again coincide until the next moment when they touch the level $N_1$, after which we have again $\zeta^{N_1}\le \zeta^{N_2}$, and the cycle can repeated indefinitely. This shows that
$\zeta^{N_1}_t\le \zeta^{N_2}_t$ for all $t\ge 0$.
Hence, we have \(\tau_{K,N} \uparrow \tau_K\), $N\uparrow \infty$, and so, the monotonic convergence Theorem yields the assertion of the Lemma, as required.
{}$\square$
\noindent
Similar calculi in similar situations yielding various close claims can be found in \cite{Mao, ayv_grad_drift2, ayv_grad_drift}.
~
\noindent
{\bf Step 7. Bounds for polynomial moments.}
\noindent
Let us prove that
\begin{equation}
v_{q}\left(\xi\right)\leq q!\cdot C^{q}
\label{eq:major_est-1}
\end{equation}
for all $q\geq1$, with
\[
C=\frac{a^{-1}}{m}\cdot A_{m},\quad A_{m}:=\int_{K}^{\infty}\left(1+w\right)^{-m}dw.
\]
does not depend on $q$.
We argue by induction. Now our particular generator is $\hat L = L$ from~(\ref{L}).
\underbar{Base}: Let $q=1$. Fix $N>0$. Notice that $v^{0}=1$ and
consider a boundary value problem,
\begin{equation}
Lv_{N}^{1}=-1,\quad v_{N}^{1}\left(K\right)=0,\,\,\left(v_{N}^{1}\right)^{'}(N)=0.
\label{eq:BVP-1}
\end{equation}
Since $L=f^{2}L_{0}$,
where $L_{0}u=\frac{1}{2}u^{''}+\frac{1}{2}\nabla\ln\pi(x)u^{'}$, this problem
admits a unique solution
\begin{equation}
v_{N}^{1}(\xi)=2\int_{K}^{\xi}\pi^{-1}(w_{1})dw_{1}
\int_{w_{1}}^{N}\frac{\pi(w_{2})}{f^{2}\left(w_{2}\right)}dw_{2},
\label{eq:sol_pois__no_lim-1}\end{equation}
and due to \eqref{eq:dens} and \eqref{eq:est_f} we estimate replacing \(N\) by infinity in the upper limit of the integral,
\begin{multline*}
v_{N}^{1}(\xi)\leq2c\int_{K}^{\xi}(1+w_{1})^{m}dw_{1}\int_{w_{1}}^{\infty}c^{-1}\cdot
(1+w_{2})^{-m}\cdot a^{-1}\left(1+w_{2}\right)^{-m-1}dw_{2}\leq\\
\leq2a^{-1}\int_{K}^{\xi}(1+w_{1})^{m}dw_{1}\int_{w_{1}}^{\infty}(1+w_{2})^{-m}\left(1+w_{2}\right)^{-m-1}dw_{2}
=\\
=2a^{-1}\int_{K}^{\xi}(1+w_{1})^{m}\frac{(1+w_{1})^{-2m}}{2m}dw_{1}\leq
\frac{a^{-1}}{m}\int_{K}^{\infty}(1+w_{1})^{-m}dw_{1}=A_{m}\cdot\frac{a^{-1}}{m}=:C.
\end{multline*}
By virtue of Lemma \ref{PDE_lemma}, this implies
$v_{1}\left(\xi\right)=\lim_{N\rightarrow\infty}v_{N}^{1}\left(\xi\right)\leq C$.
\underbar{Induction Step}: Note that if the right hand side in the equation (\ref{eq:BVP-1})
is multiplied by a constant \(R>0\), then, given the specific boundary conditions, the bound for the solution will be also multiplied by this \(R\), so that instead of the upper bound \(C\) there will be a new upper bound \(RC\).
Suppose that for some \(q\) and for $n=q-1$ we have
\[
v_{n}(\xi)\leq C^{n}\, n!
\]
with the same constant \(C\) as above. Then, by the remark in the beginning of the induction step with \(R=C^{n}\, n! \times q \equiv C^{q-1}\,q!\), we immediately obtain
\[
v_{q}(\xi)\leq C^{q-1}\, q!\times C = C^{q}\, q!,
\]
as required. Hence, the inequality (\ref{eq:major_est-1}) follows.
Note that a similar simple argument with a reference to the induction method and without using explicitly the second barrier \(N\) can be found in \cite[Lemma 3.1]{Mao}; practically the same calculus, yet with unbounded growing in \(x\) moments was used in \cite{ayv_grad_drift}.
~
Now, take any $\alpha\in\left(0,C^{-1}\right)$. Then due to (\ref{eq:major_est-1})
one has
\begin{equation}
\mathbb{E}_{\xi}e^{\alpha\gamma}=\sum_{q=0}^{+\infty}\frac{\alpha^{q}\mathbb{E}_{\xi}\gamma^{q}}{q!}
\leq\sum_{q=0}^{+\infty}\alpha^{q}
C^{q}=\frac{1}{1-\alpha C}<\infty.
\label{eq:exp}
\end{equation}
It may be argued now that the desired ``exponential coupling'' can be arranged via the exponential moment bound (\ref{eq:exp}) and due to the elliptic Harnack inequality for divergent type equations \cite[Theorem 8.20]{GT} in the way similar to \cite{ayv_grad_drift}, see the next step. Note that it is a ``common knowledge'' that the bound (\ref{eq:major_est-1}) suffices for the Theorem claim. The reader who knows the exact reference may skip the rest of the proof.
\noindent
{\bf Step 8. Using Harnack inequality. }
The usage of coupling method assumes that glueing or meeting of two versions of the process -- one stationary and another non-stationary -- can be arranged with a positive probability bounded away from zero on each period of this construction. Here it suffices to consider a ``symmetric'' SDE on the whole line. By ``period'' in our case any finite interval may be taken; e.g., it is convenient to use \([0,2]\) which will be split into two equal parts, \([0,1]\) and \([1,2]\) (their intersection at one single point is not important). On the first half, according to the inequality (\ref{eq:exp}) and Bienaym\'e -- Chebyshev -- Markov's inequality, both independent versions of the process will attain some (actually, any) bounded neighbourhood of zero. On the second half we want to glue them with a probability also bounded away from zero. Note that the standard 1D or finite state space idea just to wait until the two trajectories intersect here does not work straightforward as we would like it. Or, rather, it works but the bound obtained in such a way would use some bounds on the derivative \(\pi'\), which we want to avoid by all means.
There is a recent rather general tool based on regeneration period moments \cite{Zv}. Yet, to verify the mild condition (*) required for this tool is probably no easier than -- or, maybe, equivalent to -- what we suggest instead in the next paragraphs.
One more approach which does not involve any properties of \(\pi'\) uses classical inequalities for divergence form PDEs.
Indeed, this step justifies that it is possible by virtue of Moser's Harnack inequality for divergent type
elliptic equation (see \cite{GT}) (cf., e.g., \cite{Ve1}, \cite{Ve2} where a parabolic Harnack inequality was used for the same goal).
Here it is convenient to return to an SDE on the whole line with symmetric coefficients (\ref{eq:lang-ac2}) which solution is denoted by $\bar X_t$. Its modulus satisfies the equation (\ref{eq:lang2}) with a new Wiener process.
We argue that an elliptic Harnack inequality
\begin{equation}\label{ha}
\mathbb E_x g(\bar X_{\bar\sigma}) \le C \mathbb E_{x'} g(\bar X_{\bar\sigma})
\end{equation}
for any non-negative function $g$ and any $|x|, |x'| \le 1$ with
$\bar \sigma := \inf\,(t\ge 0:\; |\bar X_t|\ge 2)$ follows from \cite[Theorem 8.20]{GT} due to the equation $\mbox{div} (\exp(2U(x) \nabla v(x)) = 0$,
here $v(x) = \mathbb E_x g(\bar X_\sigma)$. This reasoning should be combined with the bound $\mathbb P_x(\bar\sigma>t) \le
C t^{-1}$ for \(t>0\) with some $C>0$ depending on the sup-norms of all coefficients in the ball \(B:= \{|x|\le 2\}\). The latter bound follows from \cite[Theorem 8.16]{GT} applied and from the Bienaym\'e -- Chebyshev--Markov inequality \(\mathbb P_x(\sigma>t) \le t^{-1}\,\mathbb E_x \sigma\) since the function \(v(x) := \mathbb E_x \bar \sigma\) is a solution to the equation \(\frac12\,\exp(-2U)\mbox{div}(\exp(2U) \nabla v) + 1 = 0, \; \& \; v|_{\partial B}=0\), or, equivalently, to the (Poisson) equation
\[
\mbox{div}(\exp(2U) \nabla v) + 2\exp(2U) = 0, \quad v|_{\partial B}=0,
\]
to which the Theorem 8.16 \cite{GT} is applicable stating that solution \(v(x)\) is bounded by a constant, say, \(N\) depending only on \(\sup_{|x|\le 2}|U(x)|\) (actually, even on some integral norm of \(\exp(2U)\)). This immediately implies that by choosing \(t\ge 3N\) we have that \(\mathbb P_x(\bar\sigma\le t) \ge \frac23\). The same estimate holds true for the process $X_t$, with the stopping time $\sigma = \inf\,(t\ge 0:\; X_t\ge 2)$, and with a non-negative function $g$ on $\mathbb R^1_+$, i.e.,
$$
\mathbb E_x g(X_{\sigma}) \le C \mathbb E_{x'} g(X_{\sigma}).
$$
Along with
(\ref{eq:exp}), this suffices for a successful exponential coupling for the process $X_t$ with its stationary version. Although it will not be used here, note that the obtained bound implies a stronger exponential inequality $\mathbb P_x(\sigma>t) \le
C \exp(-\lambda t)$ with some $C,\lambda>0$ by the well-known property of homogeneous Markov processes and their exit times.
Finally,
we can change the function
$U$ outside the ball $|x|\le 3$ so that it becomes bounded, --
the latter is possible without changing the process until
$\sigma$.
\noindent
{\bf Step 9. Exponential convergence.} Let us return to the half-line $\mathbb R_+$ and to the process $X_t$, and let us fix some $K>0$. It is known that -- modulo the conclusion of the previous step -- for the proof of the desired exponential convergence in total variation, it sufficies to show that $\mathbb{E}\exp(\alpha\gamma_{X}^{\xi})$ is finite
for some $\alpha>0$,
and for some -- {\it actually, for any} -- $K>0$ where $\gamma_{X}^{\xi}= \gamma_{X}^{\xi, K}$ was defined in (\ref{eq:moments}).
Let $X_{t}^{st}$ be the {\it independent} stationary version of the Markov process $X_{t}$, i.e., $X_{t}^{st}$ is the process with the same generator and initial distribution with the density \(\pi\), if necessary, on some extended probability space with another independent Wiener process. (However, we will not change our notations for $\mathbb P$ and $\mathbb E$.) Naturally, the couple $(X_{t}, X_{t}^{st})$ is considered on some extension of the original probability space. For $\xi > K$ let $\tau\equiv \tau^\xi$ be the
moment of the first intersection of $X_t$ started from \(X_0=\xi\) with the stationary version $X_t ^{st}$, i.e.,
$$
\tau :=\inf\left(t\geq0:\,\, X_{t}=X_{t}^{st}\right).
$$
As the random variable
$\tau$ is a stopping time and $X_{t}$ has strong Markov property,
we can define a new strong Markov process
\begin{equation}
\hat{X}_{t}:=X_{t}1\left(t<\tau\right) +X_{t}^{st}1\left(t\geq\tau\right),
\label{eq:coupling}\end{equation}
with the property
\[
\mbox{Law }(\hat X_{t}^{})=\mbox{Law }\left(X_{t}\right).
\]
Obviously on $\{t>\tau\} \cap \{\tau < \infty\}$ the trajectories of $X_{t}^{st}$ and $\hat{X}_{t}$ ``after $\tau$''
coincide. Then for any Borel set $A$ one has (we drop the initial value $\xi$ since the final estimate is uniform in it)
\begin{align*}
\left|\mathbb P\left(X_{t}\in A\right)-\mathbb P\left(X_{t}^{st}\in A\right)\right|\overset{\eqref{eq:coupling}}{=}
\left|\mathbb P\left(\hat{X}_{t}\in A\right)-\mathbb P\left(X_{t}^{st}\in A\right)\right|=
\\\\
=\left|\mathbb{E}\left(\left(1 \left(\hat X_{t}\in A\right)-1 \left(X_{t}^{st}\in A\right)\right)
\times\left(1 \left(t<\tau\right)+1 \left(t\geq\tau\right)\right)\right)\right|=
\\\\
=\left|\mathbb{E}\left(\left(1 \left(\hat X_{t}\in A\right)-1 \left(X_{t}^{st}\in A\right)\right)
\times1 \left(t<\tau\right)\right)\right|
\\\\
\leq\mathbb{E}\left|\left
(1 \left(\hat X_{t}\in A\right)-1 \left(X_{t}^{st}\in A\right)\right)\right|
\times1 \left(t<\tau\right).
\end{align*}
Since $\left|\left(1 \left(\hat X_{t}\in A\right)-1 \left(X_{t}^{st}\in A\right)\right)\right|\leq1$,
taking into account the exponential version of Bienaym\'e -- Chebyshev--Markov's inequality, we conclude that
\[
\left|\mathbb{P}\left(X_{t}\in A\right)-\mathbb{P}\left(X_{t}^{st}\in A\right)\right|\leq \mathbb{E}1 \left(t<\tau\right)=\mathbb{P}\left(t<\tau\right)
\leq\exp\left(-\alpha t\right)\mathbb{E}\exp(\alpha\tau).
\]
Passing to the supremum over Borel sets $A$ we obtain due to (\ref{eq:exp}),
\[
\left\Vert \mu_{t}-\mu_{st}\right\Vert _{TV}\leq2\exp\left(-\alpha t\right)\mathbb{E}\exp(\alpha\tau) \le \frac{2}{1-\alpha C}\,\exp\left(-\alpha t\right),
\]
where $\mu_{t}=Law\left(X_{t}\right)$ and $\mu_{st}(dx)=\pi(x)dx$. This bound does not depend on the initial value $\xi$ which was dropped in the notation $\mathbb E_\xi$. Hence, the proof of the Theorem is completed.
$
\square$
~
\begin{remark}
It is likely that for the symmetric density $\pi$ on $\mathbb R^1$ and for the equation (\ref{eq:lang}) on $\mathbb R^1$ this method is applicable, too, with the function $f(y) \equiv f(|y|)$, and that it provides a similar convergence rate bound (\ref{eq:crate}) as in the Theorem \ref{thm1}. We leave it till further papers.
\end{remark}
~
\begin{remark}
The form of the process $X_t$ is a result of an educated guess. We were looking for the process $X_t$
whose generator $L$ would be the
generator of the Langevin diffusion multiplied by a positive function $F=f^2$, which is needed for applying a random time change. At the same time, we wanted the
new process $X_t$ to have the same invariant density $\pi$.
From this condition the function $F$ is
determined up to two constants $c_1, c_2>0$
\[
F(x)=
\displaystyle c_1 + \int_0 ^{x} \frac{c_2 dy}{\pi(y)}, \quad x\geq 0.
\]
It can be checked by an explicit computation that with this choice of the function \(F\), the new process
$X_t$ would still have an exponential rate of convergence to the invariant measure (we choose $c_1=c_2=1$ but in fact any
strictly positive $c_1$ and $c_2$ give the same result).
\end{remark}
\begin{remark}
Note that in the Theorem \ref{thm1} the property of continuity of the state space is important.
If the state space is discrete, the modification we consider (multiplication of the generator by a function)
typically does not affect the rate of convergence. Indeed, let us consider a birth-death process $Y_t,\,\,t\geq 0$
with
birth rates $\{\lambda_n,\, n\in \mathbb{N}_{\geq 0}\}$ and death rates
$\{\mu_n,\, n\in \mathbb{N}\}$:
\begin{equation}
\mathbb{P}\left(Y_{t+h}=m|Y_{t}=n\right)=\left\{ \begin{array}{lcl}
\lambda_{n}h+o(h), & & m=n+1\\
\mu_{n}h+o(h), & & m=n-1\\
1-\mu_{n}h-\lambda_{n}h+o(h), & & m=n\\
o(h), & & |m-n|>1\end{array}\right.
\label{eq:BDP}\end{equation}
The generator $A_0$ of the process $Y_t$ is given by
\[
A_0 \varphi (n) = \lambda_n ( \varphi (n+1) - \varphi (n) ) + \mu_n (\varphi (n-1)-\varphi (n)).
\]
If we want to apply the same transformation as in the continuous case, i.e. to consider a
birth-death process $X_t$ with the generator $A=f_n \cdot A_0$, then the new process $X_t$ will have the
birth and death rates $\{\lambda_n ^{'}=f_n \cdot \lambda_n,\, n\in \mathbb{N}_{\geq 0}\}$ and
$\{\mu_n ^{'}=f_n \cdot \mu_n,\, n\in \mathbb{N}\}$ respectively.
The invariant distribution $\pi$ of $Y_t$ for the can be computed
explicitly and equals
\[
\pi(\{ n\} )\equiv\pi_{n}=
\pi_{0}\frac{\lambda_{0}\dots\lambda_{n-1}}{\mu_{1}\dots\mu_{n}},
\quad\pi_{0}=\left(1+\sum_{n\geq1}
\frac{\lambda_{0}\dots\lambda_{n-1}}{\mu_{1}\dots\mu_{n}}\right)^{-1}.
\]
Hence
\[
\frac{\lambda_{n-1}}{\mu_n} = \frac{\pi_{n-1}}{\pi_n}\quad \mbox{for each}\quad n\in\mathbb{N}.
\]
The assumption that $X_t$ has the same invariant distribution as $Y_t$ yields
\[
\frac{\lambda_{n-1} ^{'}}{\mu_n ^{'}}=\frac{f_{n-1} \cdot \lambda_{n-1}}{f_n \cdot \mu_n}
= \frac{\pi_{n-1}}{\pi_n}\quad \mbox{for each}\quad n\in\mathbb{N},
\]
hence $f_n = f_0 = \mbox{const}$ for all values of $n$. So, such a transformation is just changing the time scale by multiplying it by a positive constant which doesn't influence the rate of convergence qualitatively.
\end{remark}
\end{document} |
\begin{document}
\title{\LARGE \bf
Optimal Control of Thermostatic Loads for Planning Aggregate Consumption: Characterization of Solution and Explicit Strategies
}
\thispagestyle{empty}
\pagestyle{empty}
\begin{abstract}
We consider the problem of planning the aggregate energy consumption
for a set of thermostatically controlled loads for demand response, accounting price forecast trajectory and thermal comfort constraints. We address this as a continuous-time optimal control problem,
and analytically characterize the structure of its solution in the general case.
In the special case, when the price forecast is monotone and the loads have equal dynamics, we show that it is possible to determine the solution in an explicit form.
Taking this fact into account, we handle the non-monotone price case
by considering several subproblems, each corresponding to a time subinterval where the price function is monotone, and then allocating to each subinterval a fraction of the total energy budget. This way, for each time subinterval, the problem reduces to a simple convex optimization problem with a scalar decision variable, for which a descent direction is also known. The price forecasts for the day-ahead energy market typically have no more than four monotone segments, so the resulting optimization problem can be solved efficiently with modest computational resources.
\end{abstract}
\section{Introduction}
\label{sec:introduction}
Thermostatically controlled loads (TCLs), such as air conditioners (ACs), are valuable as flexible resources to elicit demand response, i.e., for actively controlling the loads to offset intermittency in the generation side (e.g., due to renewables)
\cite{callaway2011achieving,bashash2011modeling,zhang2013aggregated,halder2015control,halder2017architecture}.
Utilities or load serving entities (LSEs) can dynamically exploit the \emph{thermal inertia} of the population of TCLs to strategically plan and control the aggregate consumption in a desired manner. In this paper, we consider the optimal planning problem for an LSE wherein the objective is to plan the power consumption trajectory over a time horizon to minimize the total purchase cost of energy (e.g., from a day-ahead market) while adhering to the individual thermal comfort limits and TCL dynamics constraints, given that a forecasted price trajectory is available over the planning horizon. Here, we restrict the planning problem to single horizon case, although one can envisage solving the same in a sliding time-window manner.
Since TCLs are subject to discrete ON-OFF controls, finding and implementing the solution resulting from the optimal control subject to state (here, temperature) inequality constraints is a non-trivial task, even for simple cases (e.g., monotone price forecast).
For example, physical TCLs have minimum switching period constraints which do not allow ``holding" the TCLs at a constant temperature value over an interval of time.
This suggests accounting the switching period constraint explicitly in control design, so that the solution structure for the optimal control trajectory may become well defined. On the other hand, the computational challenge in solving mixed integer control problems brings forth the question: is it possible to recover the discrete, non-convex optimal control from the simpler convexified (albeit numerical) optimal control solution?
In Section II, we outline the optimal control problem accounting switching constraints, and describe the convex relaxation. Sections III and IV characterize structure of the solution considering general and monotone price forecasts, respectively. These results motivate a decomposition strategy (Section V) allowing us to solve simpler subproblems over monotone price segments. This paper extends our earlier results \cite{halder2019optimal} to apply Pontryagin's Maximum Principle (PMP) for the planning problem accounting switching constraints. Section VI concludes the paper.
\section{Optimal Planning Problem}
For specificity, hereafter we refer TCLs as ACs. We consider an optimal consumption planning problem over time $t\in[0,T]$ for $N$ ACs with respective (indoor) temperature states $\{x_{i}(t)\}_{i=1}^{N}$, thermal coefficients $\{\alpha_{i},\beta_{i}\}_{i=1}^{N}$, ON-OFF controls $\{u_{i}(t)\}_{i=1}^{N}$, and initial conditions $\{x_{i0}\}_{i=1}^{N}$. We suppose that the ACs have upper and lower thermal comfort levels $\{L_{i},U_{i}\}_{i=1}^{N}$, the ambient temperature trajectory is $\hat{x}(t)>\max_{i}U_{i}$, and a total energy budget for the LSE is $E$.
\footnote[3]{We use the shorthand $[N]:=\{1,2,\hdots,N\}$, the abbreviation a.e. to mean ``almost everywhere",
{ the symbol $\ensuremath{{\rm supp}}\{\cdot\}$ to denote the support of a function, and}
{the notation $[t]^+ := \max\{0,t\}$, and $t_{1}\wedge t_{2} := \min\{t_{1},t_{2}\}$.}
}
Given a price forecast $\pi(t)$, assuming Newtonian thermal dynamics for indoor temperature trajectories, and that an ON AC draws power $P$, the planning problem is to minimize the energy procurement cost, i.e.,
\begin{align}
& \mbox{Minimize\ } J(\ensuremath{\mathbf{u}}) = \int_{0}^{T} \pi(t) P \sum_{i=1}^N u_i(t) \, \ensuremath{{\rm d}} t, \nonumber \\
& \mbox{subject to}\nonumber \\
& \dot{x}_i(t)=-\alpha_i( x_i(t) - \hat{x}(t)) - \beta_i u_i(t), \mbox{a.e.\ } t \in [0,T], i\in[N], \nonumber \\
& L_i \le x_i(t) \le U_i \quad \mbox{for all\ } t \in [0,T], \quad i\in[N], \nonumber \\
& \int_{0}^{T} \sum_{i=1}^{N} u_i(t) \ensuremath{{\rm d}} t = E, \label{eq:const1}\\
& u_{i}(t) \in \{0,1\} \quad \mbox{a.e.\ } t \in [0,T], i\in[N]. \label{eq:const2}
\end{align}
In order to apply standard optimal control tools to characterize the solution of this planning problem, namely necessary conditions of optimality in the form of the PMP, we consider a modification to this problem, analyze its solution, and then relate the solution of the modified problem to the solution of the original one.
The main difficulties in analyzing the planning problem in its original form are constraints (\ref{eq:const1}) and (\ref{eq:const2}).
By introducing an additional state variable $x_{N+1}$, the isoperimetric constraint (\ref{eq:const1}), can be rewritten as
$$
\dot{x}_{N+1}(t)=u_1(t)+ u_2(t) + \ldots + u_N(t), \quad \mbox{a.e.\ } t \in [0,T],
$$
with end-point conditions $x_{N+1}(0)=0$, $x_{N+1}(T)=E$. The difficulty with constraint (\ref{eq:const2}) is the fact that it makes the set of possible control values non-convex and an optimal solution to this continuous-time problem might not exist.
In fact, when the optimal solution would be to maintain the temperature constant, e.g., along
the thermal limits $U_i$ or $L_i$, the corresponding control would have to chatter between $0$ and $1$ at infinite frequency. Such a solution would not be defined when the trajectories are assumed to be measurable functions (we would have to enlarge the space of trajectories to include the so-called Young measures \cite{You69}). Also, such solution would not be practically implementable in ACs.
In order to guarantee that the optimal solution is not a chattering solution, we relax the admissible control values set to its convex hull, allowing intermediate control values,
$$
u_i(t) \in [0,1] \quad \mbox{a.e.\ } t \in [0,T], \quad i\in[N].
$$
A natural question arises: if the ACs only have ON-OFF control, how do we interpret and implement a solution that has intermediate control values? To address this, we note that physical AC units have a maximum on-off switching frequency (which prevents a hypothetical chattering solution from being implemented), or equivalently a minimum switching period.
Let $T_m$ be the minimum switching period of the AC unit and
$\hat{u}_i\in[0,1]$ be an intermediate control value.
We define an implementable equivalent control $\tilde{u}_{i}\in\{0,1\}$ to be the periodic ON-OFF control with
duty-cycle $\lambda/T_m$. In each period, we have
$$
\tilde{u}_{i}(t)=
\left\{
\begin{array}{ll}
1 & \text{for}\;t \in [0,\lambda), \\
0 & \text{for}\;t\in [\lambda, T_m).
\end{array}
\right.
$$
The entire periodic signal
over a time interval of length $K T_m$, with $K$ being some positive integer,
is given by
\begin{equation} \label{eq:util}
\tilde{u}_{i}(t)=
\left\{
\begin{array}{ll}
1 & \text{for}\;t \in [jT_m,jT_m+\lambda), \\
0 & \text{for}\;t \in [jT_m+\lambda, (j+1) T_m),
\end{array}
\right.
\end{equation}
with $j= 0,...,K-1$.
Here, the time $\lambda$ at which the control turns ON is such that the trajectories
of $x_i$ resulting from applying either $\hat{u}_{i}$ or $\tilde{u}_{i}$ coincide at the end of the switching period.
i.e., $\lambda$ satisfies
$$
\int_0^{T_m} \mathrm{e}^{-\alpha_i(T_m -s ) } \beta_i \hat{u}_{i}(s) \ensuremath{{\rm d}} s =
\int_0^{T_m} \mathrm{e}^{-\alpha_i(T_m -s ) } \beta_i \tilde{u}_{i}(s) \ensuremath{{\rm d}} s.
$$
Assuming $\hat{x}$ is constant in that period, we have that $\hat{u}_{i}$ is also constant and we obtain
$
\hat{u}_{i} \int_0^{T_m} \mathrm{e}^{-\alpha_i(T_m -s ) } \ensuremath{{\rm d}} s =
\int_0^{\lambda} \mathrm{e}^{-\alpha_i(T_m -s ) } \ensuremath{{\rm d}} s,
$
implying that $\lambda$ is given explicitly by
\begin{equation} \label{eq:lambda}
\lambda = \frac{1}{\alpha_i} \log (1+(\mathrm{e}^{\alpha_i T_m} - 1)\hat{u}_{i}).
\end{equation}
The state trajectory resulting from $\tilde{u}_{i}$ will over-approximate the trajectory
resulting from $\hat{u}_{i}$. When the state $x_{i}(t)$ is at the lower limit $L_{i}$, we should instead use a control starting with OFF segment, i.e.,
for $j= 0,...,K-1$
\begin{equation} \label{eq:util2}
\tilde{u}_{i}(t)=
\left\{
\begin{array}{ll}
0 & \text{for}\;t \in [jT_m,(j+1)T_m-\lambda), \\
1 & \text{for}\;t \in [(j+1)T_m-\lambda, (j+1)T_m),
\end{array}
\right.
\end{equation}
Omitting the scaling factor $P$ without loss of generality, the modified problem is to minimize the energy cost over $\mathcal{U}$, the set of measurable functions $u_i: [0,T] \mapsto [0,1]$, $i\in[N]$. We refer to the following problem as \textbf{(P)}.
\begin{align}
& \mbox{Minimize\ } J(\ensuremath{\mathbf{u}}) = \int_{0}^{T} \pi(t) \sum_{i=1}^N u_i(t) \, \ensuremath{{\rm d}} t, \label{eq:obj}\\
& \mbox{subject to}\nonumber \\
& \dot{x}_i(t)=-\alpha_i( x_i(t) - \hat{x}(t)) - \beta_i u_i(t)) \mbox{a.e.\ } t \in [0,T], i\in[N], \nonumber \\
& \dot{x}_{N+1}(t)= \sum_{i=1}^N u_i(t), \quad \mbox{a.e.\ } t \in [0,T],\label{eq:isop}\\
& x_i(0)=x_{i0}, \quad i\in[N],\\
& x_{N+1}(0)=0, \quad x_{N+1}(T)=E, \\
& u_i(t) \in [0,1], \quad \quad \mbox{a.e.\ } t \in [0,T], \quad i\in[N], \\
& L_i \le x_i(t) \le U_i, \quad \mbox{for all\ } t \in [0,T], \quad i\in[N].
\label{eq:ocp}
\end{align}
In the next section, we analyze and characterize the solution to this problem.
\section{Characterization of solution: general case}
We start by defining two control values $\overline{u}_i, \underline{u}_i$, given by
\begin{align}
\overline{u}_i := \frac{\alpha_i}{\beta_i}(\hat{x}-U_i), \quad \underline{u}_i := \frac{\alpha_i}{\beta_i}(\hat{x}-L_i),
\label{uoverbarunderbar}
\end{align}
that are used in the development below. These controls lead to ``zero" dynamics when the state is on each boundary, thereby permitting it to slide along the same. Specifically, the control $\overline{u}_i$ permits the state to slide along the upper boundary $U_i$, while $\underline{u}_i$ permits to slide the state along the lower boundary $L_i$.
We note that $\overline{u}_i$ and $\underline{u}_i$ are intermediate control values and its implementation in a TCL is done using \eqref{eq:util} or \eqref{eq:util2}, respectively, together with \eqref{eq:lambda}.
The main results here require the following assumptions.
\textit{Assumption A1:}
\begin{enumerate}
\item The initial states are admissible, i.e.,
$$
L_i \le x_{i0} \le U_i, \mbox{\ for all $i\in[N]$}.
$$
\item The total energy prescribed, $E$, can be spent respecting the limits $L_i,U_i$ for all initial states, i.e., $E \in [\overline{E},\underline{E})$,
where
$
\overline{E}:=\int_0^T \sum_{i=1}^N \max\{0, \overline{u}_i(t)\} {\rm{d}} t,
$
and
$
\underline{E}:=\int_0^T \sum_{i=1}^N \min\{\underline{u}_i(t),1 \} {\rm{d}} t.
$
\item When the states are on the boundary of the admissible region, there is a control that drives the states into the interior of the admissible region, i.e., the values of $\alpha_i$ and $\beta_i$ are such that for all $i$, and for all possible $\hat{x}$, the temperature can rise from $L_i$ with control $u_i=0$ and can decrease from $U_i$ with control $u_i=1$
\begin{align}
-\alpha_i (L_i-\hat{x})>0, \quad
-\alpha_i (U_i-\hat{x})-\beta_i<0. \label{eq:A2}
\end{align}
\end{enumerate}
\textit{Assumption A2:}
The functions $\pi(\cdot)$ and $\hat{x}(\cdot)$ are differentiable.
The function $\pi(\cdot)$ does not take the specific form
$
\pi(t)=A \mathrm{e}^{\alpha_i t} + B,
$
for some index $i\in[N]$, and some constant values $A,B$ on any subinterval of $[0,T]$ of
nonzero measure.
Assumption A1 guarantees the existence of at least one admissible control-state pair satisfying the constraints.
It imposes the requirement that power of the AC unit is capable of overcoming the losses for the range of outside temperatures considered.
Assumption A2 is of a technical nature. If a very specific growth of the price is allowed, some algebraic coincidences lead to singular controls which are much more difficult to analyze. Assumption A2 rules out the singular control scenario.
Assumptions A1 and A2 are imposed throughout the paper.
In addition to these assumptions, for some results it is also useful to consider the following equal dynamics hypothesis H1, which permits
us to deduce further relevant properties for homogeneous populations of ACs.
\textit{Hypothesis H1:}
There exist constants $\alpha, \beta, L,U$ such that
for all $i\in[N]$,
$\alpha_i=\alpha$, $\beta_i=\beta$, $L_i=L$, $U_i=U$.
With these assumptions, using results from optimal control theory (see e.g. \cite{vinter_optimal_2000}), in particular applying and analyzing necessary conditions of optimality in the form of a normal maximum principle in
\cite{fontes2015normality}, we can establish the following.
\begin{theorem}\label{ThmOptimalControlGeneral}
For problem \textbf{(P)}, each component ${u^*_i}$ of the optimal control is piecewise constant, and at each time it can assume only one of the 4 values: $0$, $1$, $\overline{u}_i$, or
$\underline{u}_i$. The value $\overline{u}_i$ occurs only when the corresponding component of the state trajectory is on the upper boundary, i.e., ${x^*_i}=U_i$, and the
value $\underline{u}_i$ occurs only when the corresponding component of the state trajectory is on the lower boundary, i.e., ${x^*_i}=L_i$.
Moreover, if H1 holds then
the transitions to the values 0 or 1 occur simultaneously for all components of the control.
\end{theorem}
\subsection{Proof of the characterization result}
First, we guarantee the existence of an optimal solution. Then we guarantee that the PMP can be written in normal form.
For $i\in[N]$, let $v_i := -\alpha_i( x_i - \hat{x}(t)) - \beta_i u_i$, and $v_{N+1}:=\sum_{i=1}^{N}u_{i}$, where $u_i \in [0,1]$. We note that for each $(t,x)$, the set $\{(v, c) \in \ensuremath{\mathbb{R}}^{N+1} \times \ensuremath{\mathbb{R}} : c \ge \pi(t) v_{N+1}\}$, being Cartesian product of convex sets, is convex.
Combining this with assumptions A1, then problem \textbf{(P)} satisfies the conditions for existence of an optimal solution; see \cite[Thm. 23.11]{clarke_functional_2013}.
That the PMP is satisfied in normal form, can be checked by verifying that certain inward--pointing conditions are satisfied along the trajectory when the state constraint is active (see \cite{fontes_normal_2013, fontes2015normality}).
In this case, the inequalities (\ref{eq:A2}) directly imply the inward-pointing constraint qualifications guaranteeing normality.
Therefore, we can apply a strengthened version of PMP \cite[Thm. 3.2]{fontes2015normality} and obtain the following conditions involving a scalar $\pi^*$ that can be interpreted as an intermediate price.
\begin{proposition}(\!\!\cite[Appendix A]{2019arXiv190300988F})
If $\left( \ensuremath{\mathbf{x}}^\ast, \ensuremath{\mathbf{u}}^\ast \right)$ is a local minimizer for problem \textbf{(P)},
then there exist a scalar $\pi^*$, absolutely continuous functions $p_i,q_i: \ensuremath{\left[0, T\right]} \to \ensuremath{\mathbb{R}}$, and positive Radon measures $\mu_i, \ell_i$ on $\ensuremath{\left[0, T\right]}$, for $i\in[N]$, satisfying
\begin{align}
& \dot{p}_i(t) = \alpha_i q_i(t), \quad \text{a.e.}\ t \in \ensuremath{\left[0, T\right]},\\
& q_i (t) = p_i(t) + \mu_i\{[0,t)\} - \ell_i\{[0,t)\}, t \in [0,T),\\
& q_i (T) = p_i(T) + \mu_i\{[0,T]\} - \ell_i\{[0,T]\} =0,\\
& \ensuremath{{\rm supp}} \{ \ell_i \} \subset \{ t : x_i(t) = L_i \}, \label{CS1}\\
& \ensuremath{{\rm supp}} \{ \mu_i \} \subset \{ t : x_i(t) = U_i \}, \label{CS2}\\
& \sum_{i=1}^{N} (\pi^* -\pi(t) - \beta_i q_i ) u_i^{*}(t) \ge \sum_{i=1}^{N} (\pi^* -\pi(t) - \beta_i q_i ) u_i, \nonumber
\end{align}
for $\mbox{a.e.\ }t \in \ensuremath{\left[0, T\right]}, u_i \in [0,1]$.
\end{proposition}
We now deduce a few lemmas, which combined together yield the result asserted in Theorem \ref{ThmOptimalControlGeneral}.
\begin{lemma}(\!\!\cite[Appendix E]{2019arXiv190300988F})
For $i\in[N]$, consider the control values $(\overline{u}_i,\underline{u}_i)$ as in (\ref{uoverbarunderbar}).
The optimal control for problem \textbf{(P)} satisfies
$$
u_i^*(t)=\left\{
\begin{array}{ll}
1 & \mbox{\ if\ }
\pi^* -\pi(t) - \beta_i q_i >0, \\
0 & \mbox{\ if\ } \pi^* -\pi(t) - \beta_i q_i < 0,\\
\underline{u}_i & \mbox{\ if\ } \pi^* -\pi(t) - \beta_i q_i =0,
x_i(t)=L_i,\\
\overline{u}_i & \mbox{\ if\ } \pi^* -\pi(t) - \beta_i q_i =0,
x_i(t)=U_i.\\
\end{array}
\right.
$$
\end{lemma}
It remains to analyze whether with $\pi^* -\pi(t) - \beta_i q_i =0$,
other intermediate values of control could be optimal. The next lemma establishes that no intermediate control values are attained when the state is strictly within the boundaries.
\begin{lemma}(\!\!\cite[Appendix F]{2019arXiv190300988F})
If Assumption 2 holds, then for any $t\in I \subset [0,T]$ such that $x_i(t) \in (L_i, U_i)$ for $i\in[N]$, the control is a piecewise constant function taking values in $\{0,1\}$.
\end{lemma}
At this point, it remains to prove the last assertion of Theorem \ref{ThmOptimalControlGeneral} concerning the synchronization of the controls when H1 holds. To this end, we proceed as follows.
\def\overline{t}_i{\overline{t}_i}
\def\overline{t}_j{\overline{t}_j}
\begin{lemma}(\!\!\cite[Appendix G]{2019arXiv190300988F})
Assume that H1 holds. Consider two trajectories $x_i$ and $x_j$ ending in the interior of the admissible state set, i.e., $x_i(T),x_j(T) \in (L,U)$, with control $u_i=u_j=u_{\text{end}}$, and $u_{\text{end}}$ being either the value 0 or 1.
Let $\overline{t}_i$ and $\overline{t}_j$ be the respective initial instances of the maximum time interval ending in $T$ with control $u_{\text{end}}$, i.e.,
\begin{align*}
& \overline{t}_i:=\inf \{t \in [0,T] : u_i(s)=u_{\text{end}}, s \in [t, T]\}, \\
& \overline{t}_j:=\inf \{t \in [0,T] : u_j(s)=u_{\text{end}}, s \in [t, T]\}.
\end{align*}
We have that $\overline{t}_i=\overline{t}_j$.
\end{lemma}
The next lemma, whose proof uses standard dynamic programming arguments, enables us to generalize the last assertions.
\begin{lemma}
Consider the optimal control problem \textbf{(P)} in (\ref{eq:obj})-(\ref{eq:ocp}) with solution $(\ensuremath{\mathbf{x}}^{*},\ensuremath{\mathbf{u}}^{*})$. For some given time $T^{\oplus}$ in $(0,T)$, consider the optimal control problem \textbf{(P}$^{\oplus}$\textbf{)} of minimizing
\begin{equation*}
\int_{0}^{T^{\oplus}} \pi(t) \left(u_1(t)+ u_2(t) + \ldots + u_N(t)\right) \ensuremath{{\rm d}} t
\end{equation*}
subject to (\ref{eq:obj})-(\ref{eq:ocp}), and
$
x_i(T^{\oplus})=x^*_i(T^{\oplus})
$
for all $i\in[N]$.
For $i\in[N]$, denote the components of the optimal state for problem \textbf{(P}$^{\oplus}$\textbf{)} as $x^{\oplus}_i(t)$. Then, $x^{\oplus}_i(t)=x^*_i(t)$ for all $t \in [0,T^{\oplus}]$, for all $i\in[N]$.
\end{lemma}
Combining Lemma 3 and Lemma 4 by placing $T^{\oplus}$ at any instant of time for which the trajectories are in the interior of the admissible state constraint set, we conclude that all transitions of the control function to 0 or to 1, are synchronized whenever H1 holds.
\section{Characterization of the solution: monotone price case}
Consider first the case when the function $\pi$ is monotonically increasing.
\def{t_i^{L,\text{in}}}{{t_i^{L,\text{in}}}}
\def{t_i^{U,\text{in}}}{{t_i^{U,\text{in}}}}
\def{t_i^{U,\text{in}}}{{t_i^{U,\text{in}}}}
\deft_i^{\text{out}}{t_i^{\text{out}}}
\deft^{\text{out}}{t^{\text{out}}}
\deft^{*}{t^{*}}
\begin{proposition}(\!\!\cite[Appendix B]{2019arXiv190300988F})\label{proptstar}
Assume that the function $\pi$ is increasing, and $\hat{x}$ is constant. Then for $i\in[N]$, there exist $t^{*}_i$ such that the optimal control for problem \textbf{(P)} is
$$
u_i^*(t)=\left\{
\begin{array}{ll}
1 & \mbox{\ if\ } t < t^{*}_i, \quad x_i(t) \in (L_i,U_i),\\
\underline{u}_i & \mbox{\ if\ } t < t^{*}_i, \quad x_i(t)= L_i, \\
0 & \mbox{\ if\ } t \ge t^{*}_i, \quad x_i(t) \in (L_i,U_i), \\
\overline{u}_i & \mbox{\ if\ } t \le t^{*}_i, \quad x_i(t)= U_i.
\end{array}
\right.
$$
Moreover, if H1 holds then for all $i,j\in[N]$ such that $i\neq j$, we have $t^{*}_i= t^{*}_{j} =: t^{*}$.
\end{proposition}
In the case in which all dynamics are equal, i.e., H1 holds, the next result gives
the optimal solution in explicit form.
\begin{theorem}(\!\!\cite[Appendix C]{2019arXiv190300988F})
Assume the homogeneous population hypothesis H1. Assume also that the function $\pi$ is increasing, and that $\hat{x}$ is constant.
Then, the entry times for the temperature states at the boundary $L$, are
\begin{equation}
\label{tilin}
{t_i^{L,\text{in}}} := \frac{1}{\alpha} \log
\frac{x_{i0} + \beta/\alpha - \hat{x}}{L+ \beta/\alpha - \hat{x}}, \quad i\in[N];
\end{equation}
the time needed to go from $L$ to $U$ with zero control is
\begin{equation}
\label{t0}
t^0 := \frac{1}{\alpha} \log
\frac{\hat{x}- L}{\hat{x} - U};
\end{equation}
and the time $t^{*}$ in Proposition \ref{proptstar} solves
\begin{align*}
\!\!\displaystyle\sum_{i=1}^{N}\!\! \bigg\{{t_i^{L,\text{in}}}\wedget^{*} \!+\! \left[t^{*}-{t_i^{L,\text{in}}}\right]^{+}\!\!\underline{u} \!+\! \left[T - t^{*}-t^0\right]^{+} \!\overline{u}\bigg\} \!=\! E.
\end{align*}
Furthermore, let ${t_i^{U,\text{in}}}:=t^0_i + t^{*}$, $i\in[N]$, denote the entry times for the temperature states at the boundary $U$. In the case when ${t_i^{L,\text{in}}} \le t^{*} < {t_i^{U,\text{in}}} \le T$ for all $i\in[N]$, the time $t^{*}$ simplifies to
$$
t^{*} = \frac{E - (1-\underline{u})\left(\sum_{i=1}^{N} {t_i^{L,\text{in}}}\right) - N \overline{u}( T- t^0)}
{N(\underline{u}-\overline{u})}.
$$
Then, the optimal controls for problem \textbf{(P)} are
$$
u_i^*(t)=\left\{
\begin{array}{ll}
1 & \mbox{\ if\ } t \in [0, {t_i^{L,\text{in}}}\wedget^{*}),\\
\underline{u}_i & \mbox{\ if\ } t \in [{t_i^{L,\text{in}}}\wedget^{*}, t^{*}), \\
0 & \mbox{\ if\ } t \in [t^{*}, {t_i^{U,\text{in}}}\wedge T), \\
\overline{u}_i & \mbox{\ if\ } t \in [{t_i^{U,\text{in}}}\wedge T, T].
\end{array}
\right.
$$
The corresponding optimal states are given by
\begin{align*}
&x_i^*(t)= \nonumber\\
&\left\{
\begin{array}{ll}
e^{-\alpha t} x_{i0} + (\hat{x} -\beta/\alpha)(1 - e^{-\alpha t})
& \mbox{\ if\ } t \in [0, {t_i^{L,\text{in}}}\wedget^{*}),\\
L & \mbox{\ if\ } t \in [{t_i^{L,\text{in}}}\wedget^{*}, t^{*}), \\
e^{-\alpha (t-t^{*})}L & \mbox{\ if\ } t \in [t^{*}, {t_i^{U,\text{in}}}\wedge T), \\
U & \mbox{\ if\ } t \in [{t_i^{U,\text{in}}}\wedge T, T].
\end{array}
\right.
\end{align*}
If $L<x_i(T)<U$, then $\pi^*=\pi(t^{*})$; otherwise $\pi^*=\left(\pi(t^{*}+t^0)-\pi(t^{*})e^{\alpha t^0}\right)/\left(1 - e^{\alpha t^0}\right)$.
\end{theorem}
The case when $\pi$ is decreasing, can be analyzed likewise. In particular,
the time needed to go from $U$ to $L$ with maximum control is
\begin{equation}
\label{t1}
t^1 := \frac{1}{\alpha} \log
\frac{U- \hat{x} + \beta/\alpha}{L- \hat{x} + \beta/\alpha}.
\end{equation}
\subsection{Example}
Consider a planning problem for a homogeneous population with the following data:
$N=2;
T=24;
L=18;
U=22;
\hat{x}=30;
\alpha=0.1;
\beta=20\alpha;
P=1;
E=0.5\times N\times T=24;
\ensuremath{\mathbf{x}}_0\equiv\left(x_{10},x_{20},x_{30}\right)^{\top}=[L+1, U-1, 0];
\pi(t)=1 +t.
$ We remind the readers that the last component of the state vector is an auxiliary state with dynamics $\dot{x}_{3} = u_{1}+u_{2}$, subject to boundary conditions $x_{3}(0)=0$, $x_{3}(T)=E$.
From (12), we have $\bar{u}=0.4$, $\underbar{u}=0.6$.
From Theorem 2, we obtain
${t_i^{L,\text{in}}} = [1.1778, 3.1845]$, $t_0 =4.0547$, $t^{*} =15.7469$, ${t_i^{U,\text{in}}} =19.8016$, the minimum value of cost $J=245.9712$, and $\pi^{*} =8.6376$.
In Fig. 1, we compare the optimal states $x_{i}^{*}(t)$, $i=1,2,3$, obtained using the numerical optimal
control solver ICLOCS \cite{falugi_iclocs_2010}, and the same obtained from Theorem 2.
\begin{figure}
\caption{{\small{Optimal state trajectories $x_{i}
\label{fig:fig1}
\end{figure}
\section{The Nonmonotone Price Case: A Simple Explicit Strategy}
In the preceding section, we presented the solution for problem \textbf{(P)} in explicit form for the monotone price case.
In this section, we address the general price function case by decomposing \textbf{(P)} into several subproblems, each corresponding to a time subinterval where the price function is monotone.
Suppose that there are $M$ subintervals in which the price function has monotonic segments.
In practice, the price forecasts for
the day-ahead energy market typically have no more than four
monotone segments.
Below, we describe an algorithm to iteratively compute the optimal allocation of the total energy budget in each of these $M$ subintervals. We will argue that the energy budget allocation problem can be cast as an optimization problem in $M$ scalar decision variables. The resulting problem has several features which make this approach tractable. \emph{First}, it will turn out to be a convex problem with respect to the
$M$ scalar decision variables. \emph{Second}, the multipliers $\pi^*$, which can also be determined explicitly, define a descent direction and also an optimality criterion.
The usefulness of the multipliers in optimal allocation of common resources has long been recognized in optimization \cite{everett_iii_generalized_1963}.
Specifically for $j=1,\hdots,M$, the multiplier $\pi_{j}^*$ in the $j$-th subinterval is precisely the multiplier associated with the corresponding isoperimetric constraint, and thus acts as a marginal cost of the energy fraction in that subinterval. The optimal solution is obtained when the $\pi_{j}^{*}$'s are all equal. So, when the values of the $\pi_{j}^*$'s are different, they help define the descent direction for optimal allocation.
They also provide a stopping criterion by detecting optimality.
\subsection{The parametric problem and its convexity}
Let the price curve have $M$ monotone segments supported over $M$ disjoint subintervals of $[0,T]$. Suppose these subintervals are of lengths $T_1$, $T_2$, \ldots $T_M$, with $T_1+T_2+ \ldots +T_M=T$. Let $\mathcal{E}:=(E_1,E_2, \ldots E_M)$ be a possible allocation of the total energy $E$ among these subintervals, i.e., $E_1+E_2+ \ldots +E_M=E$.
Consider the set of admissible allocations $\mathfrak{E}$, given by
\begin{align*}
\mathfrak{E} := \{ & (E_1,E_2, \ldots E_M) \in \mathbb{R}^M_+: E_1 + E_2 + \ldots E_M = E, \\
& E_j \in [\overline{E}_j, \underline{E}_j]
\mbox{\ for all $m=1,2, \ldots M$} \},
\end{align*}
{ where $\overline{E}_{j} := N\overline u[T_j-t^0]^+$, and $\underline{E}_{j} := N\{(t^1\wedge T_j)+\underline u[T_j-t^1]^+\}$}
(see \cite[Appendix D]{2019arXiv190300988F} for details on these limits).
For some $\mathcal{E}=(E_1,E_2, \ldots E_M) \in \mathfrak{E}$, consider the parametric problem $ \mathcal{P}(\mathcal{E})$ given by
\begin{equation*}
\underset{\ensuremath{\mathbf{u}}\in\mathcal{U}}{\text{Minimize}} \: J(\ensuremath{\mathbf{u}})= \!\!\int_{0}^{T}\!\!\!\!\pi(t) \left(u_1(t)+ u_2(t) + \ldots + u_N(t)\right) \ensuremath{{\rm d}} t
\end{equation*}
subject to
\begin{align}
& \dot{x}_i(t)=-\alpha_i( x_i(t) - \hat{x}(t)) - \beta_i u_i(t)), \:i\in[N], \label{eq:ocp_par1}\\
& \dot{x}_{N+1}(t)= \sum_{i=1}^N u_i(t), \qquad\qquad\qquad\quad \mbox{a.e.\ } t \in [0,T],
\label{eq:isop-par}\\
& x_i(0)=x_{i0}, \qquad\qquad\qquad\qquad\qquad\;\; i\in[N],\\
& x_{N+1}(0)=0, \\
& x_{N+1}(T_1)=E_1, \\
& x_{N+1}(T_1+T_2)=E_1+E_2, \\
& \cdots \nonumber \\
& x_{N+1}(T_1+\ldots + T_M)=E_1+\ldots + E_M,\\
& u_i(t) \in [0,1], \qquad\;\, \mbox{a.e.\ } t \in [0,T], \quad i\in[N], \\
& L_i \le x_i(t) \le U_i, \quad \mbox{for all\ } t \in [0,T], \quad i\in[N].
\label{eq:ocp_par2}
\end{align}
An important property of this problem is given in the following result.
\begin{proposition}(\!\!\cite[Appendix D]{2019arXiv190300988F})
The parametric problem $ \mathcal{P}(\mathcal{E})$ is convex in $\mathcal{E}\in\mathfrak{E}$.
\end{proposition}
Next, we give an algorithm to solve problem \textbf{(P)} using the monotone segments of the price curve.
\subsection{Algorithm}
\begin{enumerate}
\item[(B.1)] Divide the price function $\pi(t)$ into $M$ monotone segments with the corresponding time subintervals having lengths $T_1$, $T_2$, \ldots, $T_M$, respectively.
\item[(B.2)] Choose a feasible energy allocation $\mathcal{E}\in\mathfrak{E}$, i.e., choose $E_{j} \in [\overline{E}_{j}, \underline{E}_{j}]$, $j \in [M]$, such that $\sum_{j=1}^{M}E_{j}=E$.
\item[(B.3)] For each $j \in [M]$, compute the optimal solution as well as $\pi_{j}^{*}$ for the $j$-th segment, using the explicit formulas in Theorem 2.
\item[(B.4)] Compare the multipliers $\pi_{j}^{*}$, $j \in [M]$, among all segments.
\textbf{If} the multipliers $\pi_{j}^{*}$ are considered equal, \textbf{then} Stop.
\textbf{Else} reduce $E_j$ in segments with higher $\pi^*_j$, and increase $E_j$ in segments with lower $\pi^*_j$, according to an update rule given next.
\item[(B.5)] Repeat from (B.3).
\end{enumerate}
For iteration index $k=1,2,\hdots$, the update rule in (B.4) can be implemented as
$$
E_{j}^{(k)}= E_{j}^{(k-1)} - \gamma \frac{ \pi^*_{j} - \widehat{\pi}^{*}}{\widehat{\pi}^{*}} {\widehat{E}}, \quad j=1,\hdots,M,
$$
where $\widehat{E}:=\sum_{j=1}^{M}E_{j}/M$, $\widehat{\pi}^{*}:=\sum_{j=1}^{M}\pi_{j}^{*}/M$, and $\gamma$ is a positive
parameter (we select $\gamma=0.5$ in the numerical example below).
{ The stopping criterion is $|\pi^*_i - \pi^*_j | < \epsilon_\pi $ for some small positive parameter $\epsilon_\pi$}.
If $E_j^{(k)} \not \in [\overline{E}_j, \underline{E}_j]$ after the update, then select the nearest extremum in this interval and rescale the remaining non-saturated $E_j$'s so that $\sum_{j=1}^{M}E_{j}=E$.
The convergence of the algorithm is sensitive to the parameter $\gamma$. If it is too small, then the convergence is slow; if it is too large, then the components of the energy allocated to each segment overshoot the average $\widehat{E}$.
Nevertheless, being just a scalar parameter, as in the case of step-lengths in line search algorithms, it is simple to tune.
\begin{theorem}
If the
multipliers resulting from the previous algorithm, at any iteration satisfies $\pi_{i}^{*}=\pi_{j}^{*}$ for all $i,j=1,\hdots,M$, then that solution is optimal.
\end{theorem}
Proof. The concatenation of the controls, trajectories and multipliers satisfy the optimality conditions of Proposition 1. Then, the convexity of the problem guarantees optimality.
\subsection{Example}
We consider an example with non-monotone price function
$
\pi(t)=5-\sin(2 \pi t/24),
$
while keeping the remaining problem data as in Example 1.
The price function is decreasing in the segment $[0,6]$, increasing in $[6,18]$ and again decreasing in $[18,24]$.
Using the optimal control solver ICLOCS, we obtain:
cost $J=
112.6562;
\pi^{*} =
5.3090.
$
From the algorithm in Section V.B, we obtain that
$
E= [
6.8054,\ 12.1189,\ 5.0757 ];
J =
112.6750.
$
The resulting optimal states $x_{i}^{*}(t)$ are illustrated in Fig. 2, both
the numerical solution obtained using the optimal control solver ICLOCS, and the solution obtained from the algorithm in Section V.B implementing the explicit strategy and Theorems 2-3.
We stress that the implementation of the explicit strategy requires solving a convex optimization problem with just $M$ decision variables (in this example, the number of monotone segments is $M=3$ and the solution is almost instantaneous).
This contrasts with the situation in discrete time formulations, such as Dynamic Programming or Mixed Integer Linear Programming (MILP), where one obtains much higher dimensional problems inviting significant computational load.
For example, with a switching period $T_m$ of 1 minute, the MILP formulation has $24 \times 60 \times 2 \times N$ decision variables (see \cite[Sect. 3.3]{halder2019optimal}).
\begin{figure}\label{fig:fig3}
\end{figure}
\section{Conclusions}
In this paper, we considered an optimal planning problem for demand response from the perspective of a utility or LSE, where the objective is to compute aggregate consumption for a population of thermostatic loads, conditioned on a forecasted price trajectory, that incurs the minimum cost of energy over the planning horizon. Solution of this problem can be used by the LSE for purchasing energy from the day-ahead market. A natural optimal control formulation is given that is non-convex in controls, and accounts practical switching constraints for thermostatic loads. We showed that solution of a convex relaxation can be used to recover the optimal (non-convex) solutions compliant with the switching constraints. Structural results for this relaxed problem are then exploited to further decompose this problem to sub-problems over time-intervals corresponding to monotone segments of the price forecast trajectory, which are shown to be computationally much more tractable than the original mixed-integer optimal control problem.
\section*{Acknowledgment}
{\footnotesize
This work is supported in part by NSF Science and Technology Center grant CCF-0939370, the Power Systems Engineering Research Center (PSERC), ERDF/{\-}COMPETE/{\-}NORTE2020/{\-}POCI/{\-}FCT funds through grants
PTDC-{\-}EEI-{\-}AUT-{\-}2933-{\-}2014--Toccata,
and
02/{\-}SAICT/{\-}2017-{\-}31447-{\-}Upwind.
}
\appendix
\section*{Proof of intermediate results}
The proof of some intermediate results, for lack of space, is not included in the version submitted to the IEEE Control Systems Letters and to 2019 IEEE Conference on Decision and Control. Therefore, we are placing them here for completeness and for reviewing purposes.
\subsection{Proof of Proposition 1}
Noticing that $x_{N+1}$ is unconstrained and that (\ref{eq:isop})
does not depend on the state, the application of the maximum principle yields
$
q_{N+1}=p_{N+1},\quad \dot{p}_{N+1}=0.
$
That is, $q_{N+1}$ has a constant value. Denote such a value by $\pi^*$.
The remaining conditions follow from direct application of Theorem 3.2 in \cite{fontes2015normality}.
\subsection{Proof of Proposition 2}
Assume the price is monotonically increasing. Then, the optimal strategy is to consume energy as early as possible while respecting the constraints. That, combined with Theorem 1 suggests the control function described as a candidate to optimal. With the control function and the initial state defined, we can compute the trajectories and the adjoint vectors and show that the candidate solution, in fact satisfies the optimality conditions in Proposition 1.
The computations are done explicitly below, in the proof of Theorem 2, for the case H1 is satisfied.
\subsection{Proof of Theorem 2}
Assuming H1, by Thm 1 we have $t^{*}_i =t^{*}$ for all $i=1,2, ...,N$.
Let ${t_i^{L,\text{in}}}$ be the entry time on the boundary $L_i$ (the first instant $t$ for which $x_i(t)=L_i$).
The first control switch might occur at ${t_i^{L,\text{in}}}$ or $t^{*}$, depending which occurs first. Therefore we have
$$
u_i^*(t)=\left\{
\begin{array}{ll}
1 & \mbox{\ if\ } t \in [0, \min\{{t_i^{L,\text{in}}},t^{*}\})\\
\underline{u}_i & \mbox{\ if\ } t \in [ \min\{{t_i^{L,\text{in}}},t^{*}\}, t^{*}).
\end{array}
\right.
$$
Similarly, we define ${t_i^{U,\text{in}}}$ to be the entry time on the boundary $U_i$ (the first instant $t$ for which $x_i(t)=U_i$), and
$$
u_i^*(t)=\left\{
\begin{array}{ll}
0 & \mbox{\ if\ } t \in [t^{*}, \min\{{t_i^{U,\text{in}}},T\}) \\
\overline{u}_i & \mbox{\ if\ } t \in [\min\{{t_i^{U,\text{in}}},T\}, T].
\end{array}
\right.
$$
This defines the trajectories
\begin{align*}
&x_i^*(t)= \nonumber\\
&\left\{
\begin{array}{ll}
e^{-\alpha_i t} x_{i0} + \\
\quad (\hat{x} -\beta_i/\alpha_i)(1 - e^{-\alpha_i t})
& \mbox{\ if\ } t \in [0, \min\{{t_i^{L,\text{in}}},t^{*}\})\\
L_i & \mbox{\ if\ } t \in [ \min\{{t_i^{L,\text{in}}},t^{*}\}, t^{*}) \\
e^{-\alpha_i (t-t^{*})}L_i & \mbox{\ if\ } t \in [t^{*}, \min\{{t_i^{U,\text{in}}},T\}) \\
U_i & \mbox{\ if\ } t \in [\min\{{t_i^{U,\text{in}}},T\}, T],
\end{array}
\right.
\end{align*}
and, using also the optimality conditions, the adjoint multipliers satisfy
\begin{align*}
& \dot{q}_i(t)= \alpha q_i(t) & t \in [0, \min\{{t_i^{L,\text{in}}},t^{*}\}) \\
& q_i({t_i^{L,\text{in}}})= \frac{1}{\beta_i}(\pi^* - \pi({t_i^{L,\text{in}}})) \\
& q_i(t)=\frac{1}{\beta_i}(\pi^* - \pi(t)) & t \in ( \min\{{t_i^{L,\text{in}}},t^{*}\},t^{*}] \\
& \dot{q}_i(t)= \alpha q_i(t) & t \in [t^{*},\min\{{t_i^{U,\text{in}}},T\}] \\
& q_i({t_i^{U,\text{in}}})= \frac{1}{\beta_i}(\pi^* - \pi({t_i^{U,\text{in}}})) \\
& q_i(t)= \frac{1}{\beta_i}(\pi^* - \pi(t)) & t \in [\min\{{t_i^{U,\text{in}}},T\}, T) \\
& q_i(T)=0,
\end{align*}
where $\pi^*$ also satisfies
\begin{align*}
& \pi^*> \pi(t)+ \beta_i q_i(t) & t \in [0, \min\{{t_i^{L,\text{in}}},t^{*}\})) \\
& \pi^*= \pi(t)+\beta_i q_i(t) & t \in [ \min\{{t_i^{L,\text{in}}},t^{*}\}),t^{*}] \\
& \pi^* < \pi(t)+\beta_i q_i(t) & t \in (t^{*},\min\{{t_i^{U,\text{in}}},T\}) \\
& \pi^*= \pi(t)+\beta_i q_i(t) & t \in [ \min\{{t_i^{U,\text{in}}},T\},T].
\end{align*}
In case $x_i(T) \in (L_i,U_i)$, then $\pi^*$ is given by
$$\pi^*=\pi(t^{*}),$$
else
$$
\pi^*=\frac{\pi(t^{*}+t^0)-\pi(t^{*})e^{\alpha t^0}}{1 - e^{\alpha t^0}}.
$$
The knowledge of the trajectories enables us to compute the times ${t_i^{L,\text{in}}}$ and ${t_i^{U,\text{in}}}$ explicitly.
The time to go from $x_{i0}$ to $L_i$ with control $ u_i^*(t)=1$ is
\begin{equation}
\label{tilin}
{t_i^{L,\text{in}}} = \frac{1}{\alpha_i} \ln
\frac{x_{i0} + \beta_i/\alpha_i - \hat{x}}{L_i+ \beta_i/\alpha_i - \hat{x}},
\end{equation}
the time $t^0$ (time to go from $L_i$ to $U_i$ with zero control) is
\begin{equation}
\label{t0}
t^0_i= \frac{1}{\alpha_i} \ln
\frac{\hat{x}- L_i}{\hat{x} - U_i},
\end{equation}
the isoperimetric constraints impose that the time $t^{*}$ solves
\begin{align}
\label{tstar}
\sum_{\ensuremath{i=1,2, \ldots, N}} \min\{{t_i^{L,\text{in}}},t^{*}\} &+ [t^{*}-{t_i^{L,\text{in}}})]^+ \underline{u}_i \nonumber \\
&+ [T - t^{*}-t^0]^+ \overline{u}_i = E.
\end{align}
In the case where ${t_i^{L,\text{in}}} \le t^{*} < {t_i^{U,\text{in}}} \le T$ for all $i$, $t^{*}$ is given by
the simpler expression
$$
t^{*} = \frac{E - (1-\underline{u})\sum_{\ensuremath{i=1,2, \ldots, N}} {t_i^{L,\text{in}}} - N \overline{u}( T- t^0)}
{N(\underline{u}-\overline{u})},
$$
and
\begin{equation}
\label{tiuin}
{t_i^{U,\text{in}}}=t^0_i + t^{*}.
\end{equation}
The case when the price function $pi$ is decreasing ia analysed in an analogous way. It involves the time needed to reach $L$ from $U$ with control $u\equiv 1$, which is of use later.
Denote by $t^1$ the time needed to reach $L$ from $U$ with control $u\equiv 1$. Explicitly, $t^1$ is given by
\begin{align*}
t^1=\frac{1}{\alpha}\log\frac{U-\hat x+\beta/\alpha}{L-\hat x+\beta/\alpha}.
\end{align*}
\subsection{Proof of Proposition 3}
We start by computing the minimum and maximum possible energy limits in each monotone segment $m$, respectively $\overline{E}_m$ and $\underline E_m$.
Let $0=t_0<t_1<\cdots<t_M=T$ be a partition of $[0,T]$ and $T_m:=t_m-t_{m-1}$. The minimum possible energy in the subinterval $[t_{m-1},t_m]$ is given by
\begin{align*}
\overline{E}_m=& N\left \{\int_{t_{m-1}}^{t_{m-1}+t^0}\text{d}t+\int_{t_{m-1}+t^0}^{t_m}\overline u\text{d}t\right \} \\
=& N\overline u[t_m-t_{m-1}-t^0],
\end{align*}
when $t_{m-1}+t^0\leq t_{m}$, and is 0 when $t_{m-1}+t^0>t_{m}$. In both cases we get
\begin{align*}
\overline{E}_m=N\overline u[t_m-t_{m-1}-t^0]^+=N\overline u[T_m-t^0]^+.
\end{align*}
The maximum possible energy in the same interval is given by
\begin{align*}
\underline E_m &= N\left\{\int_{t_{m-1}}^{t_{m-1}+t^1}\text{d}t+\int_{t_{m-1}+t^1}^{t_m}\underline u\text{d}t\right\} \\
&= N\{t^1+\underline u[t_m-t_{m-1}-t^1]\},
\end{align*}
when $t_{m-1}+t^1\leq t_m$, and
\begin{align*}
\underline E_m=N \int_{t_{m-1}}^{t_m}\text{d}t= N (t_m-t_{m-1}),
\end{align*}
when $t_{m-1}+t^1>t_m$. Thus
\begin{align*}
\underline E_m=N\{(t^1\wedge T_m)+\underline u[T_m-t^1]^+\}.
\end{align*}
In short,
\begin{align*}
\overline{E}_m&=N\overline u[T_m-t^0]^+,\\
\underline E_m&=N\{(t^1\wedge T_m)+\underline u[T_m-t^1]^+\}.
\end{align*}
Now, note that
each state component $x_i$ can be written as an affine functional of the function $\ensuremath{\mathbf{u}}$,
since
$$
x_i(t)= \mathrm{e}^{-\alpha_i t} x_{i0} + \int_0^t
\mathrm{e}^{-\alpha_i (t-s)} (\alpha_i\hat{x}(s) - \beta_i u_i(s) ) ds.
$$
Therefore, all constraints of problem $ \mathcal{P}(\mathcal{E})$, (\ref{eq:ocp_par1}--\ref{eq:ocp_par2}), can be written in the form
\begin{align*}
g_i(\ensuremath{\mathbf{u}},\mathcal{E} ) = 0, \quad
h_i(\ensuremath{\mathbf{u}},\mathcal{E} ) \le 0,
\end{align*}
with $g_i$ and $h_i$ affine functions of $\ensuremath{\mathbf{u}}$ and $\mathcal{E}$, defining a jointly convex domain in ($\ensuremath{\mathbf{u}},\mathcal{E}$).
As a consequence, the set-valued map $R: \mathbb{R}^M \rightrightarrows \mathcal{U}$
$$
R(\mathcal{E}):=\{ \ensuremath{\mathbf{u}} \in \mathcal{U}: \mbox{\ (\ref{eq:ocp_par1}--\ref{eq:ocp_par2}) are satisfied with $\mathcal{E}$} \}
$$
is convex on $\mathfrak{E}$.
That is, the set
$$
\mathrm{Graph}(R):=\{(\mathcal{E},\ensuremath{\mathbf{u}}): \mathcal{E} \in \mathfrak{E}, \ensuremath{\mathbf{u}} \in R(\mathcal{E}) \}
$$
is convex, or equivalently \cite{fiacco_convexity_1986}, for all $\lambda \in [0,1]$, all $\mathcal{E}_1, \mathcal{E}_2 \in \mathfrak{E}$
$$
\lambda R(\mathcal{E}_1) + (1-\lambda) R(\mathcal{E}_2) \subseteq R(\lambda \mathcal{E}_1 + (1-\lambda)\mathcal{E}_2).
$$
We note also that the set $\mathfrak{E}$ is convex. Therefore, using the arguments in
Prop. 2.1. in \cite{fiacco_convexity_1986}, we can show that
for all $\lambda \in [0,1]$, all $\mathcal{E}_1, \mathcal{E}_2 \in \mathfrak{E}$,
\begin{align*}
V(\lambda \mathcal{E}_1 &+ (1-\lambda) \mathcal{E}_2)
= \min_{\ensuremath{\mathbf{u}} \in R(\lambda \mathcal{E}_1+ (1-\lambda) \mathcal{E}_2)} J(\ensuremath{\mathbf{u}}) \\
&\le \min_{\ensuremath{\mathbf{u}}_1 \in R(\mathcal{E}_1),\ensuremath{\mathbf{u}}_2 \in R(\mathcal{E}_2)} J(\lambda \ensuremath{\mathbf{u}}_1 + (1-\lambda) \ensuremath{\mathbf{u}}_2) \\
&= \lambda \min_{\ensuremath{\mathbf{u}}_1 \in R(\mathcal{E}_1) } J( \ensuremath{\mathbf{u}}_1) + (1-\lambda) \min_{\ensuremath{\mathbf{u}}_2 \in R(\mathcal{E}_2)} J(\ensuremath{\mathbf{u}}_2) \\
&= \lambda V(\mathcal{E}_1) + (1-\lambda) V(\mathcal{E}_2).
\end{align*}
That is, $\mathcal{E} \mapsto V(\mathcal{E})$ is convex on $\mathfrak{E}$.
\subsection{Proof of Lemma 1}
The maximization of the Hamiltonian condition directly yields the cases when $u_i^*(t)=0$ and when $u_i^*(t)=1$. Other intermediate values can only occur if $\pi^* -\pi(t) - \beta_i q_i =0$. When the trajectory is on the boundary $L_i$ for some interval of time, we must have $\dot{x}_i(t)=0$.
The dynamic equation equal to zero immediately yields $u_i^*(t)= \underline{u}_i= \frac{\alpha_i}{\beta_i}(\hat{x}-L_i)$. The same argument can be used on the boundary $U_i$ to get $\overline{u}_i$.
\subsection{Proof of Lemma 2}
For the function $u_i$ to assume some intermediate values not in the set $\{0,1\}$, by the maximization of the Hamiltonian we would have to have in that time interval
$$
\pi^* -\pi(t) - \beta_i q_i =0,
$$
$$
\frac{d}{dt}(\pi^* -\pi(t) - \beta_i q_i) =0.
$$
Developing this last equation and substituting $q_i$ from the previous equation, we obtain
$$
\frac{d}{dt} \pi(t)= \alpha_i \pi(t) - \alpha_i \pi^*.
$$
However, the solution of this equation is precisely of the structure that Assumption 2 rules out.
\subsection{Proof of Lemma 3}
Assume in contradiction to what we wish to prove that $\overline{t}_i<\overline{t}_j$,
and, without loss of generality, that $u_{end}=1$.
By (14)-(16), we have
\begin{align*}
& q_i(T)=q_j(T)=0, \\
& \dot{q}_i(t)= \alpha q_i(t), \quad t \in [\overline{t}_j,T],\\
& \dot{q}_j(t)= \alpha q_j(t), \quad t \in [\overline{t}_j,T].
\end{align*}
Therefore
$$
q_i(t)=q_j(t), \quad t \in [\overline{t}_j,T].
$$
By the way $\overline{t}_j$ is defined, we have
\begin{align}
& \pi^* -\pi(t) - \beta_i q_j(t) > 0 \quad t \in (\overline{t}_j,T]\\
& \pi^* -\pi(t) - \beta_i q_j(t) = 0 \quad t=\overline{t}_j,
\label{eq1}
\end{align}
but also
\begin{equation}
\pi^* -\pi(t) - \beta_i q_i(t) > 0 \quad t \in (\overline{t}_i,T].
\label{eq2}
\end{equation}
Since $\overline{t}_i<\overline{t}_j$, the last two equations are a contradiction.
Repeating the same argument when $u_{end}=0$, we prove the lemma.
\end{document} |
\betaegin{document}
\thetaitle{{\Lambdaarge\sigmac All Inequalities for the Relative Entropy}}
\alphauthor{Ben Ibinson}
\epsilonmail{[email protected]}
\alphauthor{Noah Linden}
\epsilonmail{[email protected]}
\alphauthor{Andreas Winter}
\epsilonmail{[email protected]} \alphaffiliation{Department of
Mathematics, University of Bristol, Bristol BS8 1TW, United Kingdom}
\deltaate{29th November 2005}
\betaegin{abstract}
The relative entropy of two $n$-party quantum states is an
important quantity exhibiting, for example, the extent to which
the two states are different. The relative entropy of the states
formed by reducing two $n$-party to a smaller number $m$ of
parties is always less than or equal to the relative entropy of
the two original $n$-party states. This is the monotonicity of
relative entropy.
Using techniques from convex geometry, we prove that
monotonicity under restrictions is the only general inequality
satisfied by relative entropies. In doing so we make a
connection to secret sharing schemes with general access
structures.
A suprising outcome is that the structure of allowed relative
entropy values of subsets of multiparty states is much simpler
than the structure of allowed entropy values. And the structure
of allowed relative entropy values (unlike that of entropies) is
the same for classical probability distributions and quantum
states.
\epsilonnd{abstract}
\kappaeywords{relative entropy, inequalities, cone, secret sharing.}
\muaketitle
\sigmaection{Entropy and relative entropy}
\lambdaabel{sec:intro}
Entropy inequalities play a central role in information
theory~\chiite{Cover:Thomas}, classical or quantum. This is so
because practically all capacity theorems are formulated in terms
of entropy, and the same, albeit to a lesser degree, holds for many
monotones, of, for example, entanglement: e.g., the
\epsilonmph{entanglement of formation}~\chiite{BDSW} or \epsilonmph{squashed
entanglement}~\chiite{CW:04}. It may thus come as a surprise that
until recently~\chiite{LW05} essentially the only inequality known
for the von Neumann entropies in a composite system is
\epsilonmph{strong subadditivity}
\betaegin{equation}
\lambdaabel{eq:ssa}
S(\rhoho^{AB}) + S(\rhoho^{BC}) \gammaeq S(\rhoho^{ABC}) + S(\rhoho^B),
\epsilonnd{equation}
proved by Lieb and Ruskai~\chiite{LR73}. We use the notation
$\rhoho^{ABC}$ for the density operator representing the state of
the system $ABC$, with the notation $\rhoho^{BC}=\thetar_A\rhoho^{ABC}$
etc. for the reduced states.
The relative entropy of two states $\rhoho,\sigmaigma$ (density
operators of trace $1$) is defined as
\betaegin{equation*}
D(\rhoho\|\sigmaigma) = \betaegin{cases}
\thetar \rhoho(\lambdaog\rhoho-\lambdaog\sigmaigma) & \thetaext{ if }
\sigmaupp\rhoho\sigmaubset\sigmaupp\sigmaigma, \\
+\infty & \thetaext{ otherwise},
\epsilonnd{cases}
\epsilonnd{equation*}
where $\sigmaupp\rhoho$ is the supporting subspace of the density
operator $\rhoho$. Note that in this paper, log always denotes the
logarithm to base 2. Like von Neumann entropy, the relative
entropy is used extensively in quantum information and
entanglement theory to obtain capacity-like quantities and
monotones. The most prominent example may be the \epsilonmph{relative
entropy of entanglement}~\chiite{VedPlen,VPRK}. Many other
applications of the relative entropy are illustrated in the
review~\chiite{Vedral}.
In this paper we study the universal relations between the
relative entropies in a composite system and for general pairs of
states. For the most part we shall restrict ourselves to finite
dimensional spaces.
What are the known inequalities? First of all, the relative
entropy is always nonnegative, and indeed $0$ iff $\rhoho=\sigmaigma$
(see the recent survey by Petz~\chiite{Petz}). The most important,
and indeed only known inequality, for the relative entropy is the
\epsilonmph{monotonicity},
\betaegin{equation}
\lambdaabel{eq:mono}
D(\rhoho^{AB}\|\sigmaigma^{AB}) \gammaeq D(\rhoho^A\|\sigmaigma^A)
\epsilonnd{equation}
for a bipartite system $AB$. This relation can be derived from
strong subbadditivity, eq.~(\rhoef{eq:ssa}), as was shown by
Lindblad~\chiite{Lindblad} in the finite dimensional case;
Uhlmann~\chiite{Uhlmann} later showed it in generality. To
illustrate the connection, strong subadditivity can be easily
derived from eq.~(\rhoef{eq:mono}). Note that we can identify the
following relative entropy quantity with \epsilonmph{quantum mutual
information}~\chiite{CA:97}:
\betaegin{align*}
D(\rhoho^{AB} \| \rhoho^A \omegatimes \rhoho^B)
&= \thetar{\betaig(\rhoho^{AB} \lambdaog\rhoho^{AB}\betaig)}
- \thetar{\betaig(\rhoho^{AB} \lambdaog (\rhoho^A \omegatimes \rhoho^B)\betaig)}\\
&=-S(\rhoho^{AB})- \thetar{\betaig(\rhoho^{AB} \lambdaog \rhoho^A\betaig)}
- \thetar{\betaig(\rhoho^{AB} \lambdaog \rhoho^B\betaig)} \\
&=-S(\rhoho^{AB})+S(\rhoho^A)+S(\rhoho^B) \\
&=I(A:B).
\epsilonnd{align*}
Hence we can recover strong subadditivity from the monotonicity
relation
\betaegin{equation*}
D(\rhoho^{ABC}\|\rhoho^{AB} \omegatimes \rhoho^C) \gammaeq D(\rhoho^{BC}\|\rhoho^B
\omegatimes \rhoho^C),
\epsilonnd{equation*}
as follows:
\betaegin{align*}
0 &\lambdaeq D(\rhoho^{ABC}\|\rhoho^{AB} \omegatimes \rhoho^C)
- D(\rhoho^{BC}\|\rhoho^B\omegatimes \rhoho^C) \\
&=I(AB:C) - I(B:C) \\
&=S(\rhoho^{AB})+S(\rhoho^C)-S(\rhoho^{ABC})- S(\rhoho^B)-S(\rhoho^C)+S(\rhoho^{BC})\\
&=S(\rhoho^{AB})+S(\rhoho^{BC}) - S(\rhoho^{ABC})-S(\rhoho^B).
\epsilonnd{align*}
Before returning to relative entropy we make a few further
observations about entropy. For an $n$-party system, there are
$2^n-1$ non-trivial reduced states, with their entropies, so we
can associate with each state a vector of $2^n-1$ real
coordinates. Pippenger~\chiite{Pippenger}, following the programme
of Yeung and Zhang in the classical case~\chiite{Yeung}, showed
that, after going to the topological closure, the set of all
entropy vectors is a convex cone. Hence it must be describable by
linear (entropy) inequalities, like strong subadditivity, and one
can ask if the entropy cone coincides with the cone defined by the
"known" inequalities (strong subadditivity in the quantum case,
additionally positivity of conditional entropy classically). This
is indeed the case for $n\lambdaeq 3$: the classical result is due to
Yeung and Zhang~\chiite{Yeung}, the quantum case by
Pippenger~\chiite{Pippenger}. Yeung and Zhang~\chiite{YZ} have however
found a new, "non-Shannon type" inequality for $n=4$ classical
parties, and Linden and Winter~\chiite{LW05} found a new so-called
constrained inequality for $n=4$ quantum parties, providing
evidence that to describe the entropy cones of four and more
parties one needs new inequalities, too.
In~\chiite{LW05} Linden and Winter describe how the putative vector
of entropies,
\betaegin{equation}
\lambdaabel{eq:ray}
[S_A,S_B,\lambdadots,S_{ABCD}] = \lambdaambda [3,3,2,2,4,3,3,3,3,4,4,4,3,3,2],
\epsilonnd{equation}
for $\lambdaambda \gammaeq 0$, satisfies strong subadditivity for all subsets
of parties $ABCD$, but is nonetheless not achievable by any quantum
state $\rhoho$ [i.e. there is no quantum state $\rhoho$ such that
$S_A=S_A(\rhoho_A), S_B=S_B(\rhoho_B)$ etc. achieving the values in
eq.~(\rhoef{eq:ray})]. Here we ask (and answer in the affirmative) the
question of whether any vector
\betaegin{equation*}
[D_A,D_B,\lambdadots,D_{ABCD}]
\epsilonnd{equation*}
in which the numbers $D_A,\lambdadots,D_{ABCD}$ satisfy the constraints
of monotonicity for all subgroups may be realised as the relative
entropy of pairs of states [i.e. for any such vector we show that
there are states $\rhoho$ and $\sigmaigma$ such that $D_A =
D(\rhoho_A\|\sigmaigma_A)$ etc.]
In this paper we prove the result that for relative entropy,
monotonicity is necessary and sufficient to describe the complete
set of realisable relative entropy vectors. This is a surprising
discovery as relative entropy is a seemingly more complex
functional than entropy. However strong subadditivity is
sufficient to define all possible relative entropy vectors (as
monotonicity is derived from it) whereas it cannot encapsulate
normal von Neumann entropy. Our approach is as follows: we show
first, by adapting the Yeung-Pippenger techniques, that the
topological closure of the set of all relative entropy vectors is
a convex cone (section~\rhoef{sec:relent-cone}). Then we study the
extremal rays of the Lindblad-Uhlmann cone defined by
monotonicity, in section~\rhoef{sec:LU-cone}: they correspond
one-to-one to so-called up-sets in $2^{[n]}$. It remains to prove
that every one of the rays is indeed populated by relative entropy
vectors, which we do in section~\rhoef{sec:equality}. It turns out
that the construction to show this depends heavily on secret
sharing schemes, which we explain in section~\rhoef{sec:equality},
to make the paper self-contained, followed by an instructive
example in section~\rhoef{sec:thres}, after which we conclude.
\sigmaection{The cone of relative entropy vectors}
\lambdaabel{sec:relent-cone} Define the set $\Lambdaambda_n^* \sigmaubset
\RR_{\gammaeq 0}^{2^n -1}$ of vectors $\muathbf{v} = \betaigl( v_{\chial S}
\betaigr)_{\epsilonmptyset\nueq{\chial S}\sigmaubset[n]}$ , with $[n] =
\{1,2,\lambdadots,n\}$: $\muathbf{v}\in \Lambdaambda_n^*$ iff there exist
quantum states of $n$-parties $\rhoho,\sigmaigma$ such that
$D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})=v_{\chial S}$ for every non empty
subset $\chial S$. Observe that there are $2^n-1$ nonempty subsets
${\chial S}$, which label the coordinates of $\RR^{2^n -1}$ in some
fixed way.
\betaegin{lemma}
\lambdaabel{lemma:cone}
The topological closure $\omegaverline{\Lambdaambda_n^*}$ of $\Lambdaambda_n^*$ is
a convex cone. To be precise, it is enough to show that~\chiite{Pippenger}:
\betaegin{enumerate}
\item (Additivity) for $\muathbf{v},\muathbf{w}\in \Lambdaambda_n^*$,
$\muathbf{v}+\muathbf{w}\in\Lambdaambda_n^*$;
\item (Approximate diluability) for all $\deltaelta>0$ there exists $\epsilonpsilon>0$ such that
for all $\muathbf{v}\in\Lambdaambda_n^*$ and $0 \lambdaeq \lambdaambda \lambdaeq \epsilonpsilon$
there is $\muathbf{w}\in\Lambdaambda_n^*$ with $\| \lambdaambda \muathbf{v} - \muathbf{w} \| \lambdaeq
\deltaelta$.
\epsilonnd{enumerate}
(We use the sup norm in the proof below, but since all norms in finite dimensions
are equivalent, the exact choice of the norm is irrelevant.)
\epsilonnd{lemma}
\betaegin{proof}
Consider the following states $\rhoho , \rhoho', \sigmaigma$ and
$\sigmaigma'$ where the prime indicates that the corresponding state lives on a system
different from the unprimed states. Let us define $\muathbf{v}$ and $\muathbf{v}'$ as the
relative entropy vectors generated from taking entropy values of
$D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})$ and $D(\rhoho^{'\chial S}\|\sigmaigma^{'\chial
S})$ respectively. Consider states $\widetilde{\rhoho}=\rhoho \omegatimes
\rhoho'$ and $\widetilde{\sigmaigma}=\sigmaigma \omegatimes
\sigmaigma'$. To prove the first part of the Lemma, we show $\widetilde{\muathbf{v}} =
\muathbf{v}'+\muathbf{v}$ for the relative entropy vector
$\widetilde{\muathbf{v}}$ of $\widetilde{\rhoho}$; in detail,
for every ${\chial S} \sigmaubset [n]$,
\betaegin{equation*}
D(\widetilde{\rhoho}^{\chial S} \| \widetilde{\sigmaigma}^{\chial S})=
D(\rhoho^{\chial S} \omegatimes \rhoho'^{\chial S} \| \sigmaigma^{\chial S} \omegatimes \sigmaigma'^{\chial S} ) =
D(\rhoho^{\chial S} \| \sigmaigma^{\chial S}) + D(\rhoho'^{\chial S} \| \sigmaigma'^{\chial S}).
\epsilonnd{equation*}
Then,
\betaegin{align*}
D(\widetilde{\rhoho}^{\chial S} \| \widetilde{\sigmaigma}^{\chial S})
=& -S(\widetilde{\rhoho}^{\chial S})
-\thetar(\widetilde{\rhoho}^{\chial S} \lambdaog \widetilde{\sigmaigma}^{\chial S}) \\
=& -S(\rhoho^{\chial S}) - S(\rhoho'^{\chial S}) - \thetar\betaig(\rhoho^{\chial S} \omegax
\rhoho'^{\chial S} \lambdaog \betaig(\sigmaigma^{\chial S} \omegax \sigmaigma'^{\chial S}\betaig)\betaig).
\epsilonnd{align*}
We use the fact that
$\lambdaog(\sigmaigma\omegatimes\sigmaigma')=(\lambdaog \sigmaigma)\omegatimes \1 + \1\omegatimes(\lambdaog\sigmaigma')$.
Therefore,
\betaegin{align*}
D(\widetilde{\rhoho}^{\chial S} \| \widetilde{\sigmaigma}^{\chial S})
=& -S(\rhoho^{\chial S}) - S(\rhoho'^{\chial S})
-\thetar(\rhoho^{\chial S} \lambdaog \sigmaigma^{\chial S}) - \thetar(\rhoho'^{\chial S} \lambdaog
\sigmaigma'^{\chial S})\\
=& D(\rhoho^{\chial S}\|\sigmaigma^{\chial S}) + D(\rhoho'^{\chial S}\|\sigmaigma'^{\chial S}).
\epsilonnd{align*}
Therefore we can always construct a state that will give a vector
in $\Lambdaambda_n^*$ and is the sum of $\muathbf{v}$ and
$\muathbf{v}'$.
To prove the second part, choose $\epsilonpsilon$ such that $\epsilonpsilon
\lambdaeq 1/2$ and $H_2(\epsilonpsilon) \lambdaeq \deltaelta$ where $H_2(\epsilonpsilon)$ is
the binary entropy of $\epsilonpsilon$,
$$H_2(\epsilonpsilon)=-\epsilonpsilon \lambdaog \epsilonpsilon -(1-\epsilonpsilon)\lambdaog
(1-\epsilonpsilon).$$
Note that we can always
choose a value of $\epsilonpsilon$ which satisfies these conditions for
any $\deltaelta$. Let $\muathbf{v}$ be the relative entropy vector created by states $\rhoho,
\sigmaigma$. Consider the following states,
$\widehat{\rhoho}=\lambdaambda \rhoho+(1-\lambdaambda) \sigmaigma$ and $\widehat{\sigmaigma}=\sigmaigma$
with the entropy vector $\muathbf{w}$ created by states $\widehat{\rhoho},
\widehat{\sigmaigma}$. Consider the following quantity that leads to the entropy vector
$\muathbf{w}$:
\betaegin{align}
\lambdaabel{eq:fol}
D(\widehat{\rhoho}^{\chial S} \| \widehat{\sigmaigma}^{\chial S})
=& D\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S} \|
\sigmaigma^{\chial S}\betaig)\nuonumber\\
=& -S\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig)
-\thetar[\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig)\lambdaog
\sigmaigma^{\chial S}]\nuonumber\\
=&-S\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig) - \lambdaambda
\thetar(\rhoho^{\chial S}
\lambdaog \sigmaigma^{\chial S}) - (1-\lambdaambda)\thetar(\sigmaigma^{\chial S} \lambdaog
\sigmaigma^{\chial S}).
\epsilonnd{align}
We now make use of the following inequality, see for example~\chiite{Neison:Chang}.
\betaegin{equation*}
\sigmaum_i p_i S(\rhoho_i) \lambdaeq S\betaigg(\sigmaum_i p_i \rhoho_i\betaigg) \lambdaeq
H(p_i) + \sigmaum_i p_i S(\rhoho_i),
\epsilonnd{equation*}
which here specialises to
\betaegin{equation*}
\lambdaambda S(\rhoho^{\chial S}) + (1-\lambdaambda) S(\sigmaigma^{\chial S}) \lambdaeq S\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig) \lambdaeq H_2(\lambdaambda) + \lambdaambda
S(\rhoho^{\chial S})+ (1-\lambdaambda) S(\sigmaigma^{\chial S}).
\epsilonnd{equation*}
Hence we can define a quantity $\alphalpha$ such that $0 \lambdaeq \alphalpha \lambdaeq
H_2(\lambdaambda) \lambdaeq H_2(\epsilonpsilon)\lambdaeq \deltaelta$
\betaegin{equation*}
S\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig) = \lambdaambda
S(\rhoho^{\chial S})+ (1-\lambdaambda) S(\sigmaigma^{\chial S})+\alphalpha.
\epsilonnd{equation*}
Therefore, eq.~(\rhoef{eq:fol}) reads,
\betaegin{align*}
D(\widehat{\rhoho}^{\chial S} \| \widehat{\sigmaigma}^{\chial S})
=&-S\betaig(\lambdaambda \rhoho^{\chial S} + (1-\lambdaambda) \sigmaigma^{\chial S}\betaig) - \lambdaambda
\thetar(\rhoho^{\chial S}
\lambdaog \sigmaigma^{\chial S}) - (1-\lambdaambda)\thetar(\sigmaigma^{\chial S} \lambdaog \sigmaigma^{\chial S})\\
=&-\lambdaambda S(\rhoho^{\chial S})- (1-\lambdaambda) S(\sigmaigma^{\chial S}) - \alphalpha- \lambdaambda
\thetar(\rhoho^{\chial S}
\lambdaog \sigmaigma^{\chial S}) - (1-\lambdaambda)\thetar(\sigmaigma^{\chial S} \lambdaog \sigmaigma^{\chial S})\\
=& \lambdaambda D(\rhoho^{\chial S}\|\sigmaigma^{\chial S}) + (1-\lambdaambda)
D(\sigmaigma^{\chial S}\|\sigmaigma^{\chial S})- \alphalpha\\
=& \lambdaambda D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})-\alphalpha.
\epsilonnd{align*}
Thus for our given vector $\muathbf{v}$ [the vector made from
the relative entropies $D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})$], we
have found a $\muathbf{w}$ [the vector of the relative
entropies $D(\widehat{\rhoho}^{\chial S} \| \widehat{\sigmaigma}^{\chial S})$]
such that for all $\deltaelta>0$ (where $H(\epsilonpsilon) \lambdaeq \deltaelta$),
\betaegin{equation*}
\| \lambdaambda \muathbf{v}-\muathbf{w} \| = \alphalpha \lambdaeq \deltaelta
\epsilonnd{equation*}
for all $\lambdaambda \lambdaeq \epsilonpsilon$ (where $H(\epsilonpsilon) \lambdaeq \deltaelta$).
This completes the proof.
\epsilonnd{proof}
\sigmaection{The Lindblad-Uhlmann cone}
\lambdaabel{sec:LU-cone} Define the convex cone $\Lambdaambda_n \sigmaubset
\RR_{\gammaeq 0}^{2^n -1}$: all vectors $\muathbf{v}$ satisfying the
following inequalities, for all $[n] \sigmaupset {\chial S} \sigmaupset
{\chial S}' \nueq \epsilonmptyset$:
\betaegin{align}
\lambdaabel{eq:mon1}
v_{\chial S} &\gammaeq v_{{\chial S}'},\\
\lambdaabel{eq:mon2}
v_{\chial S} &\gammaeq 0.
\epsilonnd{align}
This defines the cone of all vectors that obey the only known
inequality between relative entropies of subsystems, the
Lindblad-Uhlmann monotonicity relation (which implies
non-negativity).
\betaegin{proposition}
\lambdaabel{prop:Lambda-extremal}
The extremal rays of $\Lambdaambda_n$ are spanned by vectors $\muathbf{u}$ of the form
\betaegin{equation*}
u_{\chial S} = \betaegin{cases}
1 & \thetaext{ if }{\chial S}\in{\betaf U}, \\
0 & \thetaext{ if }{\chial S}\nuot\in{\betaf U},
\epsilonnd{cases}
\epsilonnd{equation*}
for a set family $\epsilonmptyset \nueq {\betaf U} \sigmaubset 2^{[n]}$ and
$\epsilonmptyset \nuotin {\betaf U}$
with the property that for all ${\chial S}\in{\betaf U}$ and
${\chial S}' \sigmaupset {\chial S}$, ${\chial S}'\in{\betaf U}$.
(Such a set family is called an \epsilonmph{up-set}.)
Conversely, every up-set ${\betaf U}$, by the above
assignment, defines a vector $\muathbf{u} \in \Lambdaambda_n$ spanning an extremal ray.
\epsilonnd{proposition}
\betaegin{proof}
Every extremal ray $R$ of $\Lambdaambda_n$ is spanned by a vector
$v\in\Lambdaambda_n$, such that $R=\RR_{\gammaeq 0}\,\muathbf{v}$. It has the
property that if $\lambdaambda\muathbf{a}+\muu\muathbf{b} \in R$ for
$\lambdaambda,\muu > 0$ and $\muathbf{a},\muathbf{b} \in \Lambdaambda_n$, then
$\muathbf{a},\muathbf{b} \in R$. With this every point in the cone is
a positive linear combination of elements from extremal rays. In
geometric terms, $R$ is an edge of the cone $\Lambdaambda_n$
\chiite{grunbaum}. It is a standard result from convex geometry (see
\chiite{grunbaum}) that an extremal ray is specified by requiring that
sufficiently many of the defining inequalities are satisfied with
equality, in the sense that the solution space of these equations is
one-dimensional. (Of course, in addition the remaining inequalities
must hold.)
In the present case, there are only two, very simple, types of
inequalities. For a spanning vector $\muathbf{v}$ of an extremal
ray $R$, the equations (i.e., inequalities satisfied with
equality) take one of the following two forms: for $\muathcal{A}
\sigmaubset \muathcal{B}$, $\muathcal{C} \sigmaubset [n]$,
\betaegin{align}
\lambdaabel{eq:ineq}
v_{\muathcal{A}} &= v_{\muathcal{B}},\\
\lambdaabel{eq:ineq2}
v_{\muathcal{C}} &= 0.
\epsilonnd{align}
How can it be that $\muathbf{v}$ is specified by a set of such
equations up to a scalar multiple? Since the equations only demand
that an entry of $\muathbf{v}$ is $0$ or that two entries are
equal, it must be such that there exists a subset $\muathbf{U}
\sigmaubset 2^{[n]}$ such that for all $\muathcal{A},\muathcal{B} \in
\muathbf{U}$, the corresponding entries of $\muathbf{v}$ are equal,
$v_{\muathcal{A}}=v_{\muathcal{B}}=v$, while for
$\muathcal{C}\nuot\in\muathbf{U}$, it holds that $v_{\muathcal{C}}=0$.
Now, to satisfy all the monotonicity inequalities, $\muathbf{U}$
must be an up-set. (We note that $\muathbf{v}\nueq\muathbf{0}$ to
span a ray, hence $v\nueq 0$.)
Thus, $\muathbf{v} = v \muathbf{u}$ for the vector $\muathbf{u}$
constructed from the up-set $\muathbf{U}$ in the statement of the
Proposition. This shows that every extremal ray is determined by
an up-set.
For the other direction, we first observe that $\muathbf{u}$
constructed from an arbitrary up-set $\muathbf{U}$ as stated
satisfies all the inequalities. Furthermore, it is clear that many
inequalities will be saturated. To show that $R=\RR_{\gammaeq
0}\,\muathbf{u}$ is extremal, we only need to find a set of $2^n -
2$ linearly independent equations of the form~(\rhoef{eq:ineq})
and~(\rhoef{eq:ineq2}) that are satisfied. This is given by
\betaegin{alignat*}{2}
v_{\muathcal{A}} &= v_{[n]} &\quad&\mubox{for $[n]\nueq \muathcal{A} \in \muathbf{U}$}, \\
v_{\muathcal{B}} &= 0 & &\mubox{for $\muathcal{B}\nuot\in \muathbf{U}$}.
\epsilonnd{alignat*}
Indeed, these equations leave only the freedom to choose
$v_{[n]}$, and then all entries of $\muathbf{v}$ are determined.
This concludes the proof that every up-set determines an extremal
ray.
\epsilonnd{proof}
\betaegin{example}
The following table shows all the extremal rays and hence all
possible up-sets for three parties up to permutations of parties.
\betaegin{center}
\betaegin{tabular}[c]{|c|c|c|c|c|c|c|c|}
\etaline
\ &$v_A$ & $v_B$ & $v_C$ & $v_{AB}$ & $v_{AC}$ & $v_{BC}$ & $v_{ABC}$\\
\etaline
\etaline
\ Ray 1&0 & 0 & 0 & 0 & 0 & 0 & 1\\
\ Ray 2&0 & 0 & 0 & 1 & 0 & 0 & 1\\
\ Ray 3&0 & 0 & 0 & 1 & 1 & 0 & 1\\
\ Ray 4&0 & 0 & 0 & 1 & 1 & 1 & 1\\
\ Ray 5&1 & 0 & 0 & 1 & 1 & 0 & 1\\
\ Ray 6&1 & 0 & 0 & 1 & 1 & 1 & 1\\
\ Ray 7&1 & 1 & 0 & 1 & 1 & 1 & 1\\
\ Ray 8&1 & 1 & 1 & 1 & 1 & 1 & 1\\
\etaline
\epsilonnd{tabular}
\epsilonnd{center}
These up-sets are also represented in graphical form in Fig.~1.
\betaegin{figure}
\betaegin{center}
\includegraphics[scale=0.42,clip]{Fig1.eps}
\epsilonnd{center}
\betaegin{caption}
{All possible `up-sets' for three parties, up to permutations.
Broken arrows indicate which sets have the corresponding element
as a subset, i.e broken arrow implies is subset of. Every element
that is inside a box or circle is defined as having relative
entropy 1. A box indicates that we have chosen the set to have
relative entropy 1, where as a circle indicates the set is forced
to have relative entropy 1 as one of its subsets also has relative
entropy 1. This `forcing' of relative entropy via one of the
subsets is represented as a black arrow.}
\epsilonnd{caption}
\epsilonnd{figure}
\epsilonnd{example}
Note that every extremal ray of the relative entropy cone is very
well structured and can be defined precisely with up-sets. The
standard entropy cone however shows no such structure and its
extremal rays, although realised by highly structured states, show
far less structure in the actually entropy values of the extremal
rays (see~\chiite{Pippenger,Magnificent:7}).
\sigmaection{$\muathbf{ \omegaverline{\Lambdaambda_n^*} = \Lambdaambda_n }$}
\lambdaabel{sec:equality} Clearly $\Lambdaambda_n^* \sigmaubset \Lambdaambda_n$ since
all actual states obey the Lindblad-Uhlmann monotonicity
inequalities~(\rhoef{eq:mon1}) and (\rhoef{eq:mon2}). Since $\Lambdaambda_n$ is
closed, we thus get $\omegaverline{\Lambdaambda_n^*} \sigmaubset \Lambdaambda_n$.
In this section we will show the opposite inclusion,
$\omegaverline{\Lambdaambda_n^*} \sigmaupset \Lambdaambda_n$, thus showing
equality between the relative entropy cone and the Lindblad-Uhlmann cone.
To show this, it will clearly be enough to show that on every
extremal ray of $\Lambdaambda_n$ there exists a nonzero vector contained
in $\Lambdaambda_n^*$. In other words, if we can construct a pair of
states that has a relative entropy vector on an extremal ray, for
all possible extremal rays of $\Lambdaambda_n$, then due to approximate
dilutability we can find entropy vectors along all points of all
extremal rays. Since every point inside a cone can be made with a
positive linear combination of points from its extremal rays, we
obtain that every point inside the cone can be realised and
$\Lambdaambda_n=\omegaverline{\Lambdaambda_n^*}$.
Achieving these states can be identified with classical secret
sharing schemes (see for example~\chiite{Stinson}) as we will explain.
The formalism
for a secret sharing scheme can be defined as follows. Imagine a
defined secret bit that we want to share between a number of
participants. We want only certain so-called "authorised" groups
of participants to be able to recover the secret exactly, while
unauthorised groups of parties get no information about the
secret. It is clear that with every authorised group $\chial S$, any
group $\chial S' \sigmaupset \chial S$ will also be authorised. So, the
authorised groups will form an up-set called an $\epsilonmph{access
structure}$.
\betaegin{definition}
An $n$-party secret sharing scheme for a bit $b$ with access
structure $\epsilonmptyset \nueq \muathbf{U} \sigmaubset 2^{[n]}, \epsilonmptyset
\nuotin \chial \muathbf{U}$, consists of the following
\betaegin{description}
\item[(i)] \quad Random variables $X_1(b),X_2(b),X_3(b), \lambdadots ,X_n(b)$, each
one associated with a participant labelled $1,\lambdadots, n$ in the
secret sharing scheme. $X_i(b)$ takes values in a set ${\chial
X}_i$.
\item[(ii)] \quad For $\chial S \in \muathbf{U}$, denote $X^{\chial S}(b)
= (X_i(b):i \in \chial S)$, the collection of shares accessible to the
group $\chial S$
\item[(iii)] \quad For each $\chial S \in \muathbf{U}$, there is a
function $f_{\chial S}:{\chial X}^{\chial S} := \pirod_{ i \in \chial S} {\chial
X}_i \rhoightarrow \{0,1\}$ s.t. $f_{\chial S}(X^{\chial S}(b))=b$. For $\chial
S \nuotin \muathbf{U}$ however, $X^{\chial S}(0)$ and $X^{\chial S}(1)$ have
the same distributions.
\epsilonnd{description}
\epsilonnd{definition}
With this scheme the notion of an up-set is naturally included.
Since an authorised group of parties are allowed to recover the
secret, adding additional parties must also result in an authorised
group since the decoding function can be chosen only to act on the
previous authorised group. This is the defining feature of an
up-set. To relate this to a quantum information setting, we can
construct the following density matrix based on a secret sharing
scheme:
\betaegin{equation}
\lambdaabel{eq:densmat} \rhoho(b) = \sigmaum_{x_1\lambdadots x_n}
\Pir\{X_1(b)=x_1,\lambdadots,X_n(b)=x_n\}\kappaetbra{x_1}^1 \omegatimes
\kappaet\betara{x_2}^2 \omegatimes \chidots \omegatimes \kappaetbra{x_n}^n.
\epsilonnd{equation}
The superscript on the terms of the tensor product denote the
label of the share. We denote a partial trace of the matrix as
\betaegin{equation}
\rhoho(b)^\muathcal{S}=\sigmaum_{x_j : j \in {\chial S}} Pr \{X_j(b)=x_j,
\forall j \in {\chial S}\}\betaigotimes_{j \in {\chial S}} \kappaetbra{x_j}{x_j}^j.
\epsilonnd{equation}
$\rhoho(b)^{\chial S}$ has the following properties :
\betaegin{itemize}
\item If ${\chial S} \in {\muathbf{U}}$ then the
supporting subspace of $\rhoho(0)^\muathcal{S}$ is orthogonal to that
of $\rhoho(1)^\muathcal{S}$ which allows the group ${\chial S}$ to
determine the secret bit exactly: $\rhoho(0)^{\chial S} \pierp
\rhoho(1)^{\chial S}$. \item If $\muathcal{S} \nueq \muathbf{U}$ then
$\rhoho(0)^\muathcal{S}=\rhoho(1)^\muathcal{S}$ and no information about
the secret can be achieved.
\epsilonnd{itemize}
With this density matrix we can construct the following matrices
for use in relative entropy $D(\rhoho\|\sigmaigma)$:
\betaegin{align}
\lambdaabel{eq:den2}
\rhoho^{\chial S} &= \rhoho(0)^{\chial S}, \\
\lambdaabel{eq:den3}
\sigmaigma^{\chial S} &= \frac{1}{2}\betaigl(\rhoho(0)^{\chial S}+\rhoho(1)^{\chial S}\betaigr).
\epsilonnd{align}
Note that if $\muathcal{S}\nuotin \muathbf{U}$ then $\rhoho^{\chial
S}=\sigmaigma^{\chial S}$ and the relative entropy is zero. For ${\chial
S} \in \muathbf{U}$, we can calculate the relative entropy as
follows:
\betaegin{equation}
D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})=\thetar \betaigg[\rhoho(0)^{\chial S} \lambdaog
\rhoho(0)^{\chial S} - \rhoho(0)^{\chial S} \lambdaog \betaigg(
\frac{\rhoho(0)^{\chial S}}{2} + \frac{\rhoho(1)^{\chial S}}{2}
\betaigg)\betaigg].
\epsilonnd{equation}
Using $\rhoho(0)^{\chial S} \pierp \rhoho(1)^{\chial S}$.
\betaegin{equation}
D(\rhoho^{\chial S}\|\sigmaigma^{\chial S})=\thetar \betaigg[\rhoho(0)^{\chial S} \lambdaog
\rhoho(0)^{\chial S} - \rhoho(0)^{\chial S} \lambdaog \frac{\rhoho(0)^{\chial
S}}{2} + \rhoho(0)^{\chial S} \lambdaog \frac{\rhoho(1)^{\chial S}}{2}\betaigg].
\epsilonnd{equation}
Since there are no elements in $\rhoho(0)^{\chial S}$ that are present
in $\rhoho(1)^{\chial S}$ the third term is zero. Hence expanding the
second term
\betaegin{align}
D(\rhoho^{\chial S}\|\sigmaigma^{\chial S}) &= \thetar \betaigg[\rhoho(0)^{\chial S}
\lambdaog \rhoho(0)^{\chial S} - \rhoho(0)^{\chial S} \lambdaog
\rhoho(0)^{\chial S} + \rhoho(0)^{\chial S} (\lambdaog 2) \1 \betaigg]\\
&= (\lambdaog 2) \thetar [\rhoho(0)^{\chial S}]=1.
\epsilonnd{align}
Note that the relative entropy is constant and independent of the
number of elements of ${\chial S}$. Hence we have states from which we
can produce relative entropies in the form of up-sets described in
Proposition 2 by simply realising a classical secret sharing scheme
with the required access structure. There exists a secret sharing
scheme for every up-set structure, in fact for every access
structure \chiite{Shamir,ISN}. Therefore for each extremal ray of
$\Lambdaambda_n$ there is a secret sharing scheme whose density operators
according to eqs.~(\rhoef{eq:densmat}), (\rhoef{eq:den2}) and
(\rhoef{eq:den3}) will produce the required relative entropy vector and
hence prove that each extremal ray is realisable. Hence we have
proved that $\omegaverline{\Lambdaambda_n^*}=\Lambdaambda_n$ and thus that
monotonicity under restrictions is the only inequality satisfied by
relative entropies.
\sigmaection{Simple secret sharing: threshold schemes}
\lambdaabel{sec:thres}
In this section we will describe a simple secret sharing scheme
for a specialised access structure known as a \epsilonmph{threshold}
scheme. We will then build upon this scheme showing how we can
construct schemes for any access structure. The threshold scheme
was discovered by Shamir \chiite{Shamir} and allows parties to
recover a secret if and only if enough of the parties collaborate,
such that their number is beyond a predetermined threshold number
of parties. Each party is given a part of the secret which we call
a `share' of the secret. There is a total of $n$ shares, one share
for each party. A threshold value $k$ is also determined such that
if a number of parties get together and pool their shares, if the
number of shares they have are greater than or equal to $k$ then
they can recover the secret precisely. However, if the number of
shares is less than $k$, then no information can be extracted
about the secret. Accordingly, these schemed are called
$(n,k)$-threshold schemes, depending on the number of parties and the
desired threshold value. The construction of the threshold scheme
is outlined as follows. The premise for the scheme is based on
evaluations of a polynomial. Imagine the following polynomial.
\betaegin{equation}
y=a_0+a_1x+a_2x^2+a_3x^3+\lambdadots+a_{m-1}x^{m-1}
\epsilonnd{equation}
We label $a_0$ as the secret value and the shares as evaluations
of this polynomial at different points. Geometry tells us that we
need exactly $m$ evaluations of this polynomial to determine the
coefficient $a_0$ and that if we have any fewer than $m$
evaluations any value of $a_0$ would fit the given points. This
means that if we have $m$ or more evaluations we know the secret
exactly and if we have fewer than $m$ evaluations we know nothing
about the secret. The evaluations of the polynomials becomes the
'shares' of the scheme and we perform $(n,k)$-threshold scheme the
calculations over a finite field. Here is a formulation of the
scheme extracted from the original paper by Shamir ~\chiite{Shamir}.
\betaegin{itemize}
\item Choose a random $k-1$ degree polynomial
$y(x)=a_0+a_1x+a_2x^2+\lambdadots+a_{k-1}x^{k-1}$ and let $s$ be the secret
where $s=a_0$ i.e. $a_1,\lambdadots a_{k-1}$ are chosen independently
and uniformly from the field $GF(p)$ of $p$ elements (integer
modulo $p$)\item The shares are defined as $D_1=y(1),
D_2=y(2),\lambdadots,D_i=y(i),\lambdadots,D_n=y(n)$.
\item Any given subset
of $k$ of these $D_i$ values together with their indices can find
the coefficients of $y(x)$ by interpolation and hence find the
value of $s=y(0)$. \item Knowing $k-1$ or fewer shares will not
reveal what the value of $s$ as there exists polynomials that will
fit the given points in the polynomial and allow $a_0=0$ or
$a_0=1$ with every polynomial equally likely.
\item We use a set
of integers modulo a prime number $p$ which forms a finite field
allowing interpolation. \item Given that the secret is an integer
we require $p$ to be larger than both max $s$ and $n$. \item If we
only have $k-1$ shares, there is one and only one polynomial that
can be constructed for each value of $s$ in $GF(p)$. Since each
polynomial is equally likely by construction, no information about
the secret can be gained.
\epsilonnd{itemize}
This scheme can be easily translated to the quantum density matrix
defined in eq.~(\rhoef{eq:densmat}). Most of the probabilities in
the sum are zero except for the ones that are valid for a polynomial
fitting the secret value, with shares labeling that part of the sum.
This scheme has a very specific access structure, but we can expand
to more general access structures. Consider the number of parties
$p$, we can have $n>p$ so that we have more shares than parties,
allowing us to distribute multiple shares to single parties. This
allows us to have access structures not possible with the simple
access structure. Imagine that we require an access structure given
in Fig.~2. We require that B and C cannot recover the secret, however
if they pool their resources together they can. We also need A to be
able to recover the secret independently. Under the normal threshold
scheme, we need the threshold to be set at $k=1$ so that single
party A can recover the secret. However, this means B and C will
independently be also able to recover the secret so we cannot create
the required access structure.
However, if we use a scheme with more shares than parties, we can
achieve this access structure, see Example 5. Many up-sets can be
realised using this modified threshold scheme. The following
example provides the required threshold scheme and the resulting
density matrices.
\betaegin{example}
Imagine an $n=3$ system, each labelled by A,B and C respectively.
Consider also the following up-set representing an extremal ray.
This is Ray 6 as used in the previous section.
\betaegin{center}
\betaegin{tabular}[c]{|c|c|c|c|c|c|c|}
\etaline
\ $v_A$ & $v_B$ & $v_C$ & $v_{AB}$ & $v_{AC}$ & $v_{BC}$ & $v_{ABC}$\\
\etaline
\ 1 & 0 & 0 & 1 & 1 & 1 & 1\\
\etaline
\epsilonnd{tabular}
\epsilonnd{center}
\betaegin{figure}
\betaegin{center}
\includegraphics[scale=0.6,clip]{Fig2.eps}
\epsilonnd{center}
\betaegin{caption}
{Diagram of up-set used in Example 5}
\epsilonnd{caption}
\epsilonnd{figure}
With this up-set we can now construct a secret sharing scheme to
represent it. One of the easiest constructions to understand is
the threshold scheme. The scheme required is a (4,2) threshold
scheme: 4 is the total number of shares, 2 shares or higher
required to construct secret. We distribute the shares as follows:
two shares to A and only one share to B and one to C. This leads
us to the required access structure as shown below.
\betaegin{center}
\betaegin{tabular}[c]{|c|c|c|c|c|c|c|c|}
\etaline
\ &$A$ & $B$ & $C$ & $AB$ & $AC$ & $BC$ & $ABC$\\
\etaline
\ Shares & 2 & 1 & 1 & 3 & 3 & 2 & 4\\
\ Above threshold
&$\chiheckmark$&$\thetaimes$&$\thetaimes$&$\chiheckmark$&$\chiheckmark$&$\chiheckmark$&$\chiheckmark$\\
\etaline
\epsilonnd{tabular}
\epsilonnd{center}
Since we have a total of four shares, we have to construct the
scheme of a finite field of 5. In this example calculations will
be assumed to be done over this finite field. Since the threshold
is two shares, we only need consider polynomials of order one,
since only two or more values are necessary to recover the
polynomial of order 1. Therefore the possible polynomials are as
follows.
\betaegin{align*}
y&=s\\
y&=s+x\\
y&=s+2x\\
y&=s+3x\\
y&=s+4x
\epsilonnd{align*}
We can now embed this scheme into a quantum system. Each system
has the same number of qudits as the corresponding party has
shares, with $d$ being
large enough to incorporate the finite field values (i.e. in this
case d=5). For example system A has two qudits whereas
system B only has one. We now construct the density matrices
$\rhoho(0)$ and $\rhoho(1)$ as follows:
\betaegin{align}
\rhoho(0) &= \frac{1}{5}\betaigl(
\kappaetbra{0000}{0000}+\kappaetbra{1234}{1234}+\kappaetbra{2413}{2413}
+\kappaetbra{3142}{3142}+\kappaetbra{4321}{4321}\betaigr) \\
\rhoho(1) &= \frac{1}{5}\betaigl(
\kappaetbra{1111}{1111}+\kappaetbra{2340}{2340}+\kappaetbra{3024}{3024}
+\kappaetbra{4203}{4203}+\kappaetbra{0432}{0432}\betaigr)
\epsilonnd{align}
$A$ has the first two qudits, $B$ the third and $C$ the fourth.
From this we can construct the overall system described
previously. We take $\rhoho=\rhoho(0)$ and
$\sigmaigma=\frac{\rhoho(0)+\rhoho(1)}{2}$ as in eqs.~(\rhoef{eq:den2})
and (\rhoef{eq:den3}). As examples we may compute
\betaegin{align}
\rhoho_A &=\frac{1}{5}\betaigl(
\kappaetbra{00}{00}+\kappaetbra{12}{12}+\kappaetbra{24}{24}+\kappaetbra{31}{31}+\kappaetbra{43}{43}\betaigr),\\
\sigmaigma_A&=\frac{1}{10}\betaigl(
\kappaetbra{00}{00}+\kappaetbra{12}{12}+\kappaetbra{24}{24}+\kappaetbra{31}{31}+\kappaetbra{43}{43}\betaigr.
\nuonumber\\
&\pihantom{===}\betaigl.
+\kappaetbra{11}{11}+\kappaetbra{23}{23}+\kappaetbra{30}{30}+\kappaetbra{42}{42}+\kappaetbra{04}{04}\betaigr).
\epsilonnd{align}
Therefore it can be verified that the relative entropy of party A
is $\lambdaog{2}$. Repeating this for party B.
\betaegin{equation}
\rhoho_B =
\frac{1}{5}\betaigl(\kappaetbra{0}{0}+\kappaetbra{3}{3}+\kappaetbra{1}{1}+\kappaetbra{4}{4}
+\kappaetbra{2}{2}\betaigr)=\sigmaigma_B.
\epsilonnd{equation}
Therefore the relative entropy for B is 0. All other relative
entropies can be verified in this way.
\epsilonnd{example}
Thus giving unequal number of shares to the parties can achieve
more complicated access structures. However not all access
structures can be produced in this way. For example imagine that
we have a 4 parties A,B,C and D with number of shares in each
party being $a,b,c$ and $d$ respectively. We require that A and B
can recover the secret and that C and D can recover the secret but
no other two party combination. If A and B can recover the secret
then their combine total of shares must be greater than $k$ i.e.
$a+b\gammaeq k$. Therefore either $a\gammaeq \frac{k}{2}$ or $b\gammaeq
\frac{k}{2}$. Similarly we can claim that $c\gammaeq \frac{k}{2}$ or
$d\gammaeq \frac{k}{2}$. Say that in this case $a\gammaeq \frac{k}{2},
c\gammaeq \frac{k}{2}$. Hence there exists another two party
combination, A and C, that have a number of shares greater than
$k$ and can recover the secret i.e. $a+c\gammaeq k$. Therefore the
access structure is impossible to produce with this scheme.
However there are general methods for dealing with arbitrary
access structures~\chiite{ISN,BenLei}. These allow us to represent any
extremal ray. One strategy is to create a hierarchy of threshold
schemes. Here we illustrate the strategy with an example.
\betaegin{example}
Imagine an $n=4$ system which we label $A,B,C$ and $D$ respectively.
Consider also the following up-set representing an extremal ray.
\betaegin{center}
\betaegin{tabular}[c]{|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\etaline
\ $v_A$ & $v_B$ & $v_C$ & $v_D$ & $v_{AB}$ & $v_{AC}$ & $v_{AD}$ & $v_{BC}$&$v_{BD}$&$v_{CD}$&$v_{ABC}$&$v_{ABD}$&$v_{ACD}$&$v_{BCD}$&$v_{ABCD}$\\
\etaline
\ 0 & 0 & 0 & 0 & 1 & 0 & 0&0&0&1&1&1&1&1&1\\
\etaline
\epsilonnd{tabular}
\epsilonnd{center}
Note that access structure representing this ray requires that no
single party has access to the secret and only parties A and B
collaborating, and C and D collaborating will be authorised. Also
any greater number of parties will always contain an authorised
group and are therefore also authorised. The required access
structure can be represented by two schemes. This in illustrated
in Fig.~3.
\betaegin{figure}
\betaegin{center}
\includegraphics[scale=0.34,clip]{Fig3.eps}
\epsilonnd{center}
\betaegin{caption}
{Diagram of up-set used in Example 6}
\epsilonnd{caption}
\epsilonnd{figure}
Each scheme requires a $(2,2)$-threshold scheme, 2 total number of
shares with a threshold for recovering the secret of 2 shares. We
distribute the shares as follows : in one scheme (scheme $\alphalpha$)
we give 1 share to A and 1 share to B. In the other scheme (scheme
$\betaeta$) we give 1 share to C and 1 share to D. This ensures that
the secret can be recovered by authorised parties via at least one
of the schemes reaching threshold, shown below.
\betaegin{center}
\betaegin{tabular}[c]{|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\etaline
\ & $A$ & $B$ & $C$ & $D$ & $AB$ & $AC$ & $AD$ & $BC$ & $BD$ & $CD$ & $ABC$ & $ABD$ & $ACD$ & $BCD$ & $ABCD$\\
\etaline
\ Shares(scheme $\alphalpha$) & 1 & 1 & 0 & 0 & 2 & 1 & 1 & 1 & 1 & 0 & 2 & 2 & 1 & 1 & 2 \\
\ Shares(scheme $\betaeta$) & 0 & 0 & 1 & 1 & 0 & 1 & 1 & 1 & 1 & 2 & 1 & 1 & 2 & 2 & 2 \\
\ Above threshold
&$\thetaimes$&$\thetaimes$&$\thetaimes$&$\thetaimes$&$\chiheckmark_\alphalpha$&$\thetaimes$&$\thetaimes$&$\thetaimes$&$\thetaimes$&$\chiheckmark_\betaeta$&$\chiheckmark_\alphalpha$&$\chiheckmark_\alphalpha$&$\chiheckmark_\betaeta$&$\chiheckmark_\betaeta$&$\chiheckmark_{\alphalpha,\betaeta}$\\
\etaline
\epsilonnd{tabular}
\epsilonnd{center}
Since we have a total of two shares for each scheme, we construct
the scheme using a finite field of 3 elements. From now on
calculations will be assumed to be done over this finite field.
Since the threshold is two shares, we only need consider
polynomials of order one, since only two or more coordinates are
necessary to recover the polynomial of order 1. Therefore the
possible polynomials are as follows.
\betaegin{align*}
y&=s\\
y&=s+x\\
y&=s+2x
\epsilonnd{align*}
In the construction of the quantum density matrix we need to
consider all possible set of shares the individual parties can
have. The possible combinations are presented in the following
tables.
\betaegin{table}[!h]
\betaegin{minipage}{3in}
\betaegin{tabular}{|c|c|cccc|}
\etaline
\ Scheme $ \alphalpha$&Scheme $\betaeta$ &$A$&$B$&$C$&$D$\\
\etaline
\ $y=s$&$y=s$ &$0\alphast$&$0\alphast$&$\alphast0$&$\alphast0$\\
\ $y=s$&$y=s+x$ &$0\alphast$&$0\alphast$&$\alphast1$&$\alphast2$\\
\ $y=s$&$y=s+2x$ &$0\alphast$&$0\alphast$&$\alphast2$&$\alphast1$\\
\ $y=s+x$&$y=s$ &$1\alphast$&$2\alphast$&$\alphast0$&$\alphast0$\\
\ $y=s+x$&$y=s+x$ &$1\alphast$&$2\alphast$&$\alphast1$&$\alphast2$\\
\ $y=s+x$&$y=s+2x$ &$1\alphast$&$2\alphast$&$\alphast2$&$\alphast1$\\
\ $y=s+2x$&$y=s$ &$2\alphast$&$1\alphast$&$\alphast0$&$\alphast0$\\
\ $y=s+2x$&$y=s+x$ &$2\alphast$&$1\alphast$&$\alphast1$&$\alphast2$\\
\ $y=s+2x$&$y=s+2x$ &$2\alphast$&$1\alphast$&$\alphast2$&$\alphast1$\\
\etaline
\epsilonnd{tabular}
\betaegin{caption}
{All possible shares for $s=0$}
\epsilonnd{caption}
\epsilonnd{minipage}
\betaegin{minipage}{3in}
\betaegin{tabular}{|c|c|cccc|}
\etaline
\ Scheme $ \alphalpha$&Scheme $\betaeta$ &$A$&$B$&$C$&$D$\\
\etaline
\ $y=s$&$y=s$ &$1\alphast$&$1\alphast$&$\alphast1$&$\alphast1$\\
\ $y=s$&$y=s+x$ &$1\alphast$&$1\alphast$&$\alphast2$&$\alphast0$\\
\ $y=s$&$y=s+2x$ &$1\alphast$&$1\alphast$&$\alphast0$&$\alphast2$\\
\ $y=s+x$&$y=s$ &$2\alphast$&$0\alphast$&$\alphast1$&$\alphast1$\\
\ $y=s+x$&$y=s+x$ &$2\alphast$&$0\alphast$&$\alphast2$&$\alphast0$\\
\ $y=s+x$&$y=s+2x$ &$2\alphast$&$0\alphast$&$\alphast0$&$\alphast2$\\
\ $y=s+2x$&$y=s$ &$0\alphast$&$2\alphast$&$\alphast1$&$\alphast1$\\
\ $y=s+2x$&$y=s+x$ &$0\alphast$&$2\alphast$&$\alphast2$&$\alphast0$\\
\ $y=s+2x$&$y=s+2x$&$0\alphast$&$2\alphast$&$\alphast0$&$\alphast2$\\
\etaline
\epsilonnd{tabular}
\betaegin{caption}
{All possible shares for $s=1$}
\epsilonnd{caption}
\epsilonnd{minipage}
\epsilonnd{table}
Each party has two registers, one for each scheme. If a party has no
share then the register associated with that scheme is put into a
fixed state (here
$\kappaet{\alphast}_A,\kappaet{\alphast}_B,\kappaet{\alphast}_C,\kappaet{\alphast}_D$) which is
uncorrelated to the variables for that scheme. Thus the density
matrix $\rhoho(0)$ is
\betaegin{align*}
\rhoho(0)=\frac{1}{9}&
\betaigl( \ \piroj{0{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}0}_D
+\piroj{0{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}2}_D \\
&
+\piroj{0{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}1}_D
+\piroj{1{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}0}_D \\
&
+\piroj{1{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}2}_D
+\piroj{1{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}1}_D \\
&
+\piroj{2{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}0}_D
+\piroj{2{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}2}_D \\
&
\pihantom{+\piroj{0{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}2}_D}
+\piroj{2{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}1}_D \betaigr).
\epsilonnd{align*}
Similarly we can construct $\rhoho(1)$ by repeating the process but
setting the secret bit to be 1, i.e.
$\kappaet{1{\alphast}}_A \kappaet{1{\alphast}}_B \kappaet{{\alphast}1}_C \kappaet{{\alphast}1}_D$
etc., leading to the density matrix $\rhoho(1)$:
\betaegin{align*}
\rhoho(1)=\frac{1}{9}&
\betaigl( \ \piroj{1{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}1}_D
+\piroj{1{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}0}_D \\
&
+\piroj{1{\alphast}}_A\,\piroj{1{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}2}_D
+\piroj{2{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}1}_D \\
&
+\piroj{2{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}0}_D
+\piroj{2{\alphast}}_A\,\piroj{0{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}2}_D \\
&
+\piroj{0{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}1}_C\,\piroj{{\alphast}1}_D
+\piroj{0{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}2}_C\,\piroj{{\alphast}0}_D \\
&
\pihantom{+\piroj{0{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}2}_D}
+\piroj{0{\alphast}}_A\,\piroj{2{\alphast}}_B\,\piroj{{\alphast}0}_C\,\piroj{{\alphast}2}_D \betaigr).
\epsilonnd{align*}
We notice that in both states $\rhoho(0)$ and $\rhoho(1)$ in this
example the state $\kappaet{\alphast}_A\kappaet{\alphast}_B\kappaet{\alphast}_C\kappaet{\alphast}_D$
factors out so that we could equally well take
\betaegin{align}
\rhoho(0) &= \frac{1}{9}\betaigl(
\kappaetbra{0000}{0000}+\kappaetbra{0012}{0012}+\kappaetbra{0021}{0021}
+\kappaetbra{1200}{1200} \nuonumber \\
&\pihantom{==}
+\kappaetbra{1212}{1212}+\kappaetbra{1221}{1221}
+\kappaetbra{2100}{2100}+\kappaetbra{2112}{2112}+\kappaetbra{2121}{2121} \betaigr), \\
\rhoho(1) &= \frac{1}{9}\betaigl(
\kappaetbra{1111}{1111}+\kappaetbra{1120}{1120}+\kappaetbra{1102}{1102}
+\kappaetbra{2011}{2011} \nuonumber \\
&\pihantom{==}
+\kappaetbra{2020}{2020}+\kappaetbra{2002}{2002}
+\kappaetbra{0211}{0211}+\kappaetbra{0220}{0220}+\kappaetbra{0202}{0202} \betaigr).
\epsilonnd{align}
[Note however that in more complicated examples parties need shares from
more than one scheme.] From this we can construct the overall system
described previously, and for example for parties $AB$.
\betaegin{align}
\rhoho_{AB} &= \frac{1}{3}\betaigl(
\kappaetbra{00}{00}+\kappaetbra{12}{12}+\kappaetbra{21}{21}\betaigr), \\
\sigmaigma_{AB} &= \frac{1}{6}\betaigl(
\kappaetbra{00}{00}+\kappaetbra{12}{12}+\kappaetbra{21}{21}+
\kappaetbra{11}{11}+\kappaetbra{20}{20}+\kappaetbra{02}{02} \betaigr).
\epsilonnd{align}
Therefore it can be verified that the relative entropy of parties AB
is $\lambdaog{2}$. Repeating this for parties BC,
\betaegin{equation}
\rhoho_{BC} =
\frac{1}{6}\betaigl(\kappaetbra{11}{11}+\kappaetbra{12}{12}+\kappaetbra{10}{10}+\kappaetbra{02}{02}
+\kappaetbra{00}{00}+\kappaetbra{21}{21}+\kappaetbra{22}{22}+\kappaetbra{20}{20}\betaigr)=\sigmaigma_{BC}.
\epsilonnd{equation}
Therefore the relative entropy for BC is 0. All other relative
entropies can be verified in this way.
\epsilonnd{example}
The idea of using a hierarchy of threshold schemes was discovered by
Ito, Saito and Nishizeki \chiite{ISN} and requires an exponential
number of threshold schemes to represent an access structure. This
number of schemes required is irrelevant as long as a scheme exists
and we can create the corresponding density matrix. A simpler
general access structure was found by Benaloh and Leichter
\chiite{BenLei}, which does not use threshold schemes but can be
directly translated to the required density matrices in
eq.~(\rhoef{eq:densmat}).
\sigmaection{Conclusion}
\lambdaabel{sec:coda}
In this paper, we have determined the set of all
relative entropy vectors for general states on (general) $n$-party
systems: it coincides with the convex cone defined by
non-negativity and monotonicity of the relative entropy. We have
done this by first showing that the former set in is indeed a
convex cone, and then demonstrating that every extremal ray in the
latter cone is realised by a specific pair of states. These
extremal rays are characterised by up-sets in $2^{[n]}$, and the
pairs of states correspond to (classical) secret sharing schemes.
A particular consequence is that the cone of relative entropy
vectors is the same for quantum states and for classical
probability distributions. This is in marked contrast to the case
of entropy vectors, where even for $n=2$ classical and quantum
entropy cone differ~\chiite{Pippenger}.
Beyond the characterisation in terms of convex geometry, our
result also means that, apart from monotonicity, there can be no
other univeral relation between the relative entropy values of the
reduced states in a composite systems (except that is follows
trivially from monotonicty). In this sense, quantum and classical
relative entropy is completely characterised by the monotonicity
relation.
We are now in a position to go back to our assumption of finite
dimensional systems and the demand that all relative entropies are
finite. Clearly, if some of the parties are described by infinite
dimensional quantum systems, we still have
monotonicity~\chiite{Uhlmann}, so the relative entropy vectors are all
within the Lindblad-Uhlmann cone. In this case, and even in the
finite dimensional case some entries in a relative entropy vector
may be positive infinity. However, even this does not present a
problem, once we realise that the groups where the value is infinite
form an up-set, so the vector can indeed be obtained as a limit of
finite relative entropy vectors in the Lindblad-Uhlmann cone.
Another mathematical peculiarity is the following: From the proof
of achievability of all extremal ray of the Lindblad-Uhlmann
cone, we discover that every
point in the entropy cone is achievable rather than infinitely
approximated, i.e. $\Lambdaambda_n = \Lambdaambda_n^*$. This is due to the fact that
every point on all exremal rays can be attained. To see, this,
simply choose $\rhoho(0)$ and $\rhoho(1)$ in eq.~(\rhoef{eq:den3})
with different weights $p$ and $1-p$ ($0\lambdaeq p\lambdaeq 1$). Then the
calculation following that equation shows that
the relative entropy is either $H_2(p)$ or $0$ depending
on whether ${\chial S}$ is an authorised set or not. By additivity
in Lemma~\rhoef{lemma:cone} we obtain that every point on the extremal
rays is realised, hence every point in the Lindblad-Uhlmann cone.
\piar\muedskip
We conclude the paper by commenting briefly on possible connections
of our result to the entropy cone, and possibly to the relative
entropy of entanglement. In the above arguments we have often used
the formula $D(\rhoho\|\sigmaigma) = -S(\rhoho)-\thetar\rhoho\lambdaog\sigmaigma$, which
means that if we make the restriction $\sigmaigma = \frac{1}{d}\1$, the
maximally mixed state in $d$ dimensions, the relative entropies (now
dependent only on $\rhoho$) evaluate to $\lambdaog d - S(\rhoho)$. Going
through the proof of Lemma~\rhoef{lemma:cone} we see that for any number $n$ of
parties, the set of all these relative entropy vectors is also a
convex cone, and one might think that its relations would capture
all inequalities for the entropy. That this is too optimistic a
hope, is indicated by the fact that the relative entropy is
expressed by the entropy and a term beyond what can be expressed by
general entropies alone (essentially the log of the rank). And it is
indeed not the case, since for example the nonegativity of the
relative entropy translates into $S(\rhoho) \lambdaeq \lambdaog d$. However, the
fundamental fact that the entropy $S(\rhoho)$ is nonnegative, is not
captured at all, since that would require an upper bound on the
relative entropy depending on the dimension. Still, there may be
some less stringent relation between the entropy and the relative
entropy cones, whose existence we would like to advertise as an open
problem.
\alphacknowledgments
BI was supported by the U.K. Engineering and
Physical Sciences Research Council. NL and AW acknowledge support by
the EU project RESQ and the U.K.~EPSRC's IRC QIP.
\betaegin{thebibliography}{99}
\betaibitem{BenLei} J. Benaloh, J. Leichter, ``Generalising Secret
Sharing and Monotone Functions.'', Advances in Cryptology, \thetaextit{CRYPTO}
1998, pp. 27-35, LNCS 403, Springer Verlag, Berlin, 1990.
\betaibitem{BDSW} C. H. Bennett, D. P. DiVincenzo, J. A. Smolin, W. K.
Wootters, ``Mixed State entanglement and quantum error
correction'', \thetaextit{Phys. Rev. A}, vol. 54, pp. 3824-3851, 1996.
\betaibitem{CA:97} N. J. Cerf, C. Adami, ``Negative Entropy and Information in quantum
mechanics'', \thetaextit{Phys. Rev. Lett.}, vol. 79, pp. 5194-5197, 1997.
\betaibitem{CW:04} M. Christandl, A. Winter, ``Squashed Entanglement -- An additive entanglement
measure'', \thetaextit{J. Math. Phys.}, vol. 45, no. 3, pp. 829-840, 2004.
\betaibitem{Cover:Thomas} T. M. Cover, J. A. Thomas, \epsilonmph{Elements of Information
Theory}, Wiley \&{} Sons, 1991.
\betaibitem{grunbaum} B. Gr\"unbaum, \epsilonmph{Convex Polytopes}, 2nd ed.~prepared
by V.~Kaibel, V.~Klee, and G.~Ziegler, Graduate Texts in
Mathematics 221, Springer Verlag, Berlin, 2003.
\betaibitem{ISN} M. Ito, A. Saito, T. Nishizeki ``Secret Sharing
Schemes releasing General Access Structure'',
\thetaextit{Proc. IEEE Globecom '87}, pp. 99-102, 1987.
\betaibitem{LR73} E. H. Lieb, M. B. Ruskai, ``Proof of the Strong Subadditivity of
Quantum-Mechanical Entropy'',
\thetaextit{J.~Math. Phys.}, vol. 14, pp. 1938-1941 , 1973.
\betaibitem{Lindblad} G. Lindblad, ``Completey positive maps and entropy inequalities'', \thetaextit{Commun. Math. Phys.},
vol. 40, pp. 147-151, 1975.
\betaibitem{Magnificent:7} N. Linden, E. Maneva, S. Massar, S. Popescu, D. Roberts,
B. Schumacher, J. A. Smolin, A. V. Thapliyal, in preparation (2005).
\betaibitem{LW05} N. Linden, A. Winter, ``A new inequality for the von Neumann
entropy'', \thetaextit{Commun. Math. Phys.}, vol. 259, pp. 129-138, 2005.
\betaibitem{Neison:Chang} M. A. Nielsen, I. L. Chuang,
\epsilonmph{Quantum Computation and Quantum Information},
Cambridge University Press, 2000.
\betaibitem{Petz} D. Petz, ``Monotonicity of quantum relative
entropy revisited'', \thetaextit{Rev. Math. Phys.}, vol. 15, no.1, pp. 79-91, 2003.
\betaibitem{Pippenger} N. Pippenger, ``The inequalities of quantum information theory'',
\thetaextit{IEEE Trans. Inf. Theory}, vol. 49, no. 4, pp. 773-789, 2003.
\betaibitem{Shamir} A. Shamir, ``How to Share a Secret'', \thetaextit{Commun. ACM},
vol. 22, no. 11, pp. 612-613, 1979.
\betaibitem{Stinson} D. R. Stinson, ``An explication of secret sharing schemes'',
\thetaextit{Designs, Codes and Cryptography}, vol. 2, no. 4, pp.~357-390, 1992.
\betaibitem{Uhlmann} A. Uhlmann, ``Relative Entropy and the
Wigner-Yanase-Dyson-Lieb Concavity in an Interpolation Theory",
\thetaextit{Commun. Math. Phys.}, vol. 54, pp. 21-32, 1977.
\betaibitem{Vedral} V. Vedral, ``The Role of Relative Entropy in Quantum Information
Theory'', \thetaextit{Rev. Mod. Phys.}, vol. 74, no. 1, pp. 197-234, 2002.
\betaibitem{VedPlen} V. Vedral, M. B. Plenio, ``Entanglement measures and purification procedures'', \thetaextit{Phys. Rev. A},
vol. 57, pp.~1619-1633, 1998.
\betaibitem{VPRK} V. Vedral, M. B. Plenio, M. A. Rippin, P. L. Knight
``Quantifying Entanglement'',
\thetaextit{Phys. Rev. Lett.},
vol. 78, pp. 2275-2279, 1996.
\betaibitem{Yeung} R. W. Yeung, ``A Framework for Linear Information Inequalities'', \thetaextit{IEEE Trans. Inf. Theory},
vol. 43, no. 6, pp. 1924 - 1934, 1997.
\betaibitem{YZ} Z. Zhang, R. W. Yeung, ``On Characterization of Entropy Function via Information Inequalities'',
\thetaextit{IEEE Trans. on Inform. Theory}, vol. 44, no. 4, pp. 1440-1452, 1998.
\epsilonnd{thebibliography}
\epsilonnd{document} |
\begin{document}
\title{Reeb Dynamics of the Link of the $A_n$ Singularity}
\begin{abstract}
The link of the $A_n$ singularity, $L_{A_n} \subset \mathbb{C}^3$ admits a natural contact structure $\xi_0$ coming from the set of complex tangencies. The canonical contact form $\alpha_0$ associated to $\xi_0$ is degenerate and thus has no isolated Reeb orbits. We show that there is a nondegenerate contact form for a contact structure equivalent to $\xi_0$ that has two isolated simple periodic Reeb orbits. We compute the Conley-Zehnder index of these simple orbits and their iterates. From these calculations we compute the positive $S^1$-equivariant symplectic homology groups for $\left(L_{A_n}, \xi_0 \right)$. In addition, we prove that $\left(L_{A_n}, \xi_0 \right)$ is contactomorphic to the Lens space $L(n+1,n)$, equipped with its canonical contact structure $\xi_{std}$.
\end{abstract}
\setcounter{tocdepth}{2}
\tableofcontents
\section{Introduction and Main results }
The classical topological theory of isolated critical points of complex polynomials relates the topology of the link of the singularity to the algebraic properties of the singularity \cite{M}. More generally, the link of an irreducible affine variety $A^n \subset \mathbb{C}^N$ with an isolated singularity at $\mathbf{0}$ is defined by $L_A = A \cap S_\delta^{2N+1}$. For sufficiently small $\delta$, the link $L_A$ is a manifold of real dimension $2n-1$, which is an invariant of the germ of $A$ at $\mathbf{0}.$ The links of Brieskorn varieties can sometimes be homeomorphic but not always diffeomorphic to spheres \cite{Br}, a preliminary result which further motivated the study of such objects. Recent developments in symplectic and contact geometry have shown that the algebraic properties of a singularity are strongly connected to the contact topology of the link and symplectic topology of (the resolution of) the variety. A wide range of results demonstrating the power of investigating the symplectic and contact perspective of singularities include \cite{K}, \cite{O}, \cite{McL}, \cite{R}, \cite{Se}, \cite{U}.
In this paper we study the contact topology of the link of the $A_n$ singularity, providing a computation of positive $S^1$-equivariant symplectic homology. This is done via our construction of an explicit nondegenerate contact form and the computation of the Conley-Zehnder indices of the associated simple Reeb orbits and their iterates. Our computations show that positive $S^1$-equivariant symplectic homology is a free $\mathbb{Q}[u]$ module of rank equal to the number of conjugacy classes of the finite subgroup $A_n$ of SL$(2;\mathbb{C})$. This provides a concrete example of the relationship between the cohomological McKay correspondence and symplectic homology, which is work in progress by McLean and Ritter \cite{MR}. As a result, the topological nature of the singularity is reflected by qualitative aspects of the Reeb dynamics associated to the link of the $A_n$ singularity.
The link of the $A_n$ singularity is defined by
\begin{equation}\label{linkeq}
L_{A_n} =f_{A_n}^{-1}(0) \cap S^5 \subset \mathbb{C}^3, \ \ \ f_{A_n}=z_0^{n+1} + 2 z_1 z_2.
\end{equation}
It admits a natural contact structure coming from the set of complex tangencies,
\[
\xi_0:=TL_{A_n} \cap J_0(TL_{A_n}).
\]
The contact structure can be expressed as the kernel of the canonically defined contact form,
\[
\alpha_0 = \frac{i}{2} \left( \sum_{j=0}^m ( z_j d\bar{z}_j -\bar{z}_jdz_j ) \right)\bigg \vert_{L_{A_n}}.
\]
The contact form $\alpha_0$ is degenerate and hence not appropriate for computing Floer theoretic invariants as the periodic orbits of the Reeb vector field defined by
\[
{\alpha_0}(R_{\alpha_0})=1, \ \ \ \iota_{R_{\alpha_0}}d\alpha_0 =0,
\]
are not isolated.
Our first result is to construct a nondegenerate contact form $\alpha_\epsilon$ such that $( L_{A_n}, \ker \alpha_0)$ and $( L_{A_n}, \ker \alpha_\epsilon)$ are contactomorphic. Define the Hamiltonian on $\mathbb{C}^3$ by
\[
\begin{array}{rlcl}
H:& \mathbb{C}^3 &\to & \mathbb{R} \\
& (z_0,z_1,z_2) &\mapsto & |z|^2 + \epsilon(|z_1|^2 - |z_2|^2), \\
\end{array}
\]
where $\epsilon$ is chosen so that $H > 0$ on $S^5$. As will be shown,
\begin{equation}\label{alphaepsilon}
\alpha_\epsilon = \frac{1}{H} \left[ \frac{(n+1)i}{8}\left(z_0d\overline{z}_0 - \overline{z}_0 dz_0\right) + \frac{i}{4} \left(z_1 d\bar{z}_1 - \bar{z}_1 dz_1 + z_2 d\bar{z}_2 - \bar{z}_2 dz_2\right)\right],
\end{equation}
is a nondegenerate contact form. We also find the simple Reeb orbits of $R_{\alpha_\epsilon}$ and compute the associated Conley-Zehnder index with respect to the canonical trivialization of $\mathbb{C}^3$ of their iterates.
\begin{thm}\label{CZcomputation}
The 1-form $\alpha_\epsilon$ is a nondegenerate contact form for $L_{A_n}$ such that $(L_{A_n}, \ker \alpha_0)$ and $( L_{A_n}, \ker \alpha_\epsilon)$ are contactomorphic. The Reeb orbits of $R_{\alpha_\epsilon}$ are defined by
\begin{align*}
\mathfrak{g}amma_+(t) & = (0,e^{2i(1 + \epsilon)t},0) \quad \quad 0 \le t \le \frac{\partiali}{1 + \epsilon}\\
\mathfrak{g}amma_-(t) & = (0,0,e^{2i(1 - \epsilon)t}) \quad \quad 0 \le t \le \frac{\partiali}{1 - \epsilon}.
\end{align*}
The Conley-Zehnder index for $\mathfrak{g}amma = \mathfrak{g}amma_{\partialm}^N$ in $0 \le t \le \frac{N\partiali}{1 \partialm \epsilon}$ is
\begin{align}\label{CZeq}
\mu_{CZ}(\mathfrak{g}amma_{\partialm}^N) = 2\left( \left\lfloor \frac{2N}{(n+1)(1 \partialm \epsilon)}\right \rfloor + \left\lfloor \frac{N(1 \mp \epsilon)}{1 \partialm \epsilon} \right\rfloor - \left \lfloor \frac{2N}{1 \partialm \epsilon} \right \rfloor \right) + 2N + 1.
\end{align}
\end{thm}
\begin{rem}
If $\epsilon$ is chosen such that $0 <\epsilon \ll \frac{1}{N}$ then (\ref{CZeq}) can be further simplified:
\begin{equation}
\begin{array}{lcl}
\mu_{CZ}(\mathfrak{g}amma_{-}^N)& = &2 \left\lfloor \dfrac{2N}{(n+1)(1 - \epsilon)}\right \rfloor + 1;\\
&&\\
\mu_{CZ}(\mathfrak{g}amma_{+}^N)& = &2 \left\lfloor \dfrac{2N}{(n+1)(1 + \epsilon)}\right \rfloor + 1. \\
\end{array}
\end{equation}
\end{rem}
The proof of Theorem \ref{CZcomputation} is obtained by adapting methods of Ustilovsky \cite{U} to obtain both $\alpha_\epsilon$ and to compute the Conley-Zehnder indices. The Conley-Zehnder index is a Maslov index for arcs of symplectic matrices and defined in Section \ref{CZsection}. These paths of matrices are obtained by linearizing the flow of the Reeb vector field along the Reeb orbit and restricting to $\xi_0$. To better understand the spread of the Reeb orbits and their iterates in various indices, we have the following example.
\begin{example}
Let $n=2$ and $0 <\epsilon \ll \frac{1}{10}$.
\[
\begin{array}{lc c clcc}
\mu_{CZ}(\mathfrak{g}amma_- ) &=&1,&\ \ \ \ \ \ &\mu_{CZ} (\mathfrak{g}amma_+ )&=&1 \\
\mu_{CZ}(\mathfrak{g}amma_-^2 )& =&3,&\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^2)& =&3 \\
\mu_{CZ}(\mathfrak{g}amma_-^3 )&=&5, &\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^3 )&=&3 \\
\mu_{CZ}(\mathfrak{g}amma_-^4 )&=& 5,&\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^4 )& =&5\\
\mu_{CZ}(\mathfrak{g}amma_-^5 )& =& 7,&\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^5 )& =&7\\
\mu_{CZ}(\mathfrak{g}amma_-^6 )&=& 9,&\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^6) &=& 7\\
\mu_{CZ}(\mathfrak{g}amma_-^7 )&=& 9,&\ \ \ \ \ \ &\mu_{CZ}(\mathfrak{g}amma_+^7) &=& 9 \\
\end{array}
\]
It is interesting to note that spread of integers is not uniform between $\mu_{CZ}(\mathfrak{g}amma_-^N)$ and $\mu_{CZ}(\mathfrak{g}amma_+^N),$ and where these jumps in index occur. However, we see that there are $n=2$ Reeb orbits with Conley Zehnder index 1 and $n+1=3$ orbits with Conley Zehnder index $2k+1$ for each $k\mathfrak{g}eq1$.
\end{example}
\begin{rem}\label{freehtpy}
Extrapolating this to all values of $n$ and $N$ demonstrates that the numerology of the Conley-Zehnder index realizes the number of free homotopy classes of $L_{A_n}$. Recall that $[\Sigma L_{A_n}] = \partiali_0(\Sigma L_{A_n}) = \partiali_1(L_{A_n})/\{\mbox{conjugacy classes}\}$ and $H_1(L_{A_n}, \mathbb{Z}) = \mathbb{Z}_{n+1}$. The information that the $n+1$-th iterate of $\mathfrak{g}amma_\partialm$ is the first contractible Reeb orbit is also encoded in the above formulas. Qualitative aspects of the Reeb dynamics reflect this topological information in the following computation of a Floer-theoretic invariant of the contact structure $\xi_0$.
\end{rem}
Theorem \ref{CZcomputation} allows us to easily compute positive $S^1$-equivariant symplectic homology $SH_*^{+,S^1}$. Symplectic homology is a Floer type invariant of symplectic manifolds, with contact type boundary, see also \cite{biased}. Under additional assumptions, one can prove that the positive $S^1$-equivariant symplectic homology $SH_*^{+,S^1}$ is in fact an invariant of the contact structure; see \cite[Theorems 1.2 and 1.3]{GuSH} and \cite[Section 4.1.2]{BO}. Because of the behavior of the Conley-Zehnder index in Theorem \ref{CZcomputation}, we can directly compute $SH_*^{+,S^1}(L_{A_n}, \xi_0)$ and conclude that it is a contact invariant. As a result, the underlying topology of the manifold determines qualitative aspects of any Reeb vector field associated to a contact form defining $\xi_0$.
\begin{thm}\label{linksh}
The positive $S^1$-equivariant symplectic homology of $(L_{A_n}, \xi_0)$ is
\[
SH^{+,S^1}_*(L_{A_n}, \xi_0) = \left\{ \begin{array}{cl}
\mathbb{Q}^n & * =1 \\
\mathbb{Q}^{n+1} & * \mathfrak{g}eq 3, \mbox{ odd } \\
0 & * \ \mbox{ else } \\
\end{array} \right.
\]
\end{thm}
\begin{proof}
To obtain a contact invariant from $SH^{+,S^1}_*$ we need to show in dimension three that all contractible Reeb orbits $\mathfrak{g}amma$ satisfy $\mu_{CZ}(\mathfrak{g}amma)\mathfrak{g}eq3$; see \cite[Theorems 1.2 and 1.3]{GuSH} and \cite[Section 4.1.2]{BO}. The first iterate of $\mathfrak{g}amma_\partialm$ which is contractible is the $(n+1)$-th iterate, and by Theorem \ref{CZcomputation}, will always satisfy $\mu_{CZ}(\mathfrak{g}amma_\partialm)\mathfrak{g}eq3$.
If $\alpha$ is a nondegenerate contact form such that the Conley-Zehnder indices of all periodic Reeb orbits are lacunary, meaning they contain no two consecutive numbers, then we can appeal to \cite[Theorem 1.1]{GuSH}. This result of Gutt allows us to conclude that over $\mathbb{Q}$-coefficients the differential for $SH^{S^1,+}$ vanishes. In light of Theorem \ref{CZcomputation} we obtain the above result.
\end{proof}
Remark \ref{freehtpy} yields the following corollary of Theorem \ref{linksh}, indicating a Floer theoretic interpretation of the McKay correspondence \cite{IM} via the Reeb dynamics of the link of the $A_n$ singularity. The $A_n$ singularity is the singularity of $f^{-1}_{A_n}(0)$, where $f_{A_n}$ is described as (\ref{linkeq}). This is equivalent to its characterization as the absolutely isolated double point quotient singularity of $\mathbb{C}^2/A_n$, where $A_n$ is the cyclic subgroup of SL$(2;\mathbb{C})$; see Section \ref{contactgeomlens}. The cyclic group $A_n$ acts on $\mathbb{C}^2$ by $(u,v) \mapsto \left(e^\frac{2\partiali i}{n+1}u, e^\frac{2\partiali in}{n+1}v\right)$.
\begin{cor}
The positive $S^1$-equivariant symplectic homology $SH^{+,S^1}_*(L_{A_n}, \xi_0)$ is a free $\mathbb{Q}[u]$ module of rank equal to the number of conjugacy classes of the finite subgroup $A_n$ of $\mbox{\em SL}(2;\mathbb{C})$.
\end{cor}
\begin{rem}
Ongoing work of Nelson \cite{jocompute} and Hutchings and Nelson \cite{HN3} is needed in order to work under the assumption that a related Floer-theoretic invariant, cylindrical contact homology, is a well-defined contact invariant of $(L_{A_n},\xi_{0})$. Once this is complete, the index calculations provided in Theorem \ref{CZcomputation} show that positive $S^1$-equivariant symplectic homology and cylindrical contact homology agree up to a degree shift.
In \cite{BO} Bourgeois and Oancea prove that there are restricted classes of contact manifolds for which once can prove that cylindrical contact homology (with a degree shift) is isomorphic to the positive part of $S^1$-equivariant symplectic homology, when both are defined over $\mathbb{Q}$-coefficients. Their isomorphism relies on having transversality for a generic choice of $J,$ which is presently the case for unit cotangent bundles $DT^*L$ such that dim $L \mathfrak{g}eq 5$ or when $L$ is Riemannian manifold which admits no contractible closed geodesics \cite{BOcorrig}. Our computations confirm that their results should hold for many more closed contact manifolds.
\end{rem}
Our final result is an explicit proof that $(L_{A_n}, \xi_0)$ and the lens space $(L(n+1,n), \xi_{std})$ are contactomorphic. The lens space
\[ L(n+1,n) = S^3/\big((u,v) \sim (e^{2\partiali i/(n+1)}u,e^{2\partiali ni/(n+1)}v)\big)
\]
admits a contact structure, which is induced by the one on $S^3$ and can be expressed as the kernel of the following contact form,
\[
\lambda_{std}= \frac{i}{2} ( u d\bar{u} -\bar{u}du +v d\bar{v} -\bar{v}dv).
\]
\begin{thm}\label{lenslinkcontacto}
The link of the $A_n$ singularity $(L_{A_n}, \xi_0=\ker \alpha_0)$ and the lens space $(L(n+1,n), \xi_{std}=\ker \lambda_{std})$ are contactomorphic.
\end{thm}
Theorems \ref{linksh} and \ref{lenslinkcontacto} allow us to reprove the following result of van Koert and Kwon \cite{O}. Since $(L_{A_n}, \xi_0)$ and $(L(n+1,n), \xi_{std})$ are contactomorphic and $SH_*^{S^1,+}$ is a contact invariant, $SH_*^{S^1,+}(L(n+1,n),\xi_{std}) =SH_*^{S^1,+}(L_{A_n}, \xi_0)$.
\begin{thm}[Appendix A \cite{O}]
The positive $S^1$-equivariant symplectic homology of $(L(n+1,n),\xi_{std})$ is
\[
SH^{+,S^1}_*(L(n+1,n), \xi_{std}) = \left\{ \begin{array}{cl}
\mathbb{Q}^n & * =1 \\
\mathbb{Q}^{n+1} & * \mathfrak{g}eq 3, \mbox{ odd } \\
0 & * \ \mbox{ else } \\
\end{array} \right.
\]
\end{thm}
Their proof relies on the following nondegenerate contact form on $(L(n+1,n),\xi_{std})$. If $a_1,a_2$ are any rationally independent positive real numbers then
\[ \lambda_{a_1,a_2} = \frac{i}{2} \sum_{ j = 1}^2 a_j(z_j d\overline{z}_j - \overline{z}_j dz_j)\]
is a nondegenerate contact form for $(L(n+1,n), \xi_{std})$. The simple Reeb orbits on $L(n+1,n)$ are given by
\begin{align*}
\mathfrak{g}amma_1 & = (e^{it/a_1},0) \quad \quad 0 \le t \le \frac{ 2 a_1\partiali}{n+1}, \\
\mathfrak{g}amma_2 & = (0,e^{it/a_2}) \quad \quad 0 \le t \le \frac{2a_2\partiali}{n+1},
\end{align*}
which descend from the simple isolated Reeb orbits on $S^3$. Again, the $n+1$ different free homotopy classes associated to this lens space are realized by covers of the isolated Reeb orbits $\mathfrak{g}amma_i$ for $i=1$ or $2$. The Conley-Zehnder index for $\mathfrak{g}amma_1^N$ is
\begin{equation}\label{CZlens} \mu_{CZ}(\mathfrak{g}amma_1^N) = 2\left(\left\lfloor \frac{N}{n+1}\right\rfloor + \left\lfloor \frac{N a_1}{(n+1)a_2}\right\rfloor\right) + 1,
\end{equation}
with a similar formula holding for $\mathfrak{g}amma_2^N$.
\textbf{Outline} The necessary background is given in Section \ref{background}. The construction of a nondegenerate contact form and the proof of Theorem \ref{CZcomputation} is given in Section \ref{CZcomputationsection}. The proof of Theorem \ref{lenslinkcontacto} is given in Section \ref{sectionlinklens}.
\section{Background}\label{background}
In these sections we recall all the necessary symplectic and contact background which is needed to prove Theorems \ref{CZcomputation} and \ref{lenslinkcontacto}.
\subsection{Contact Structures} \mathfrak{h}space{\fill} \\
First we recall some notions from contact geometry.
\begin{definition}
Let $M$ be a manifold of dimension $2n+1$. A \textbf{contact structure} is a maximally non-integrable hyperplane field $\xi=\mbox{ker }\alpha \subset TM$.
\end{definition}
\begin{rem}
The kernel of a 1-form $\alpha$ on $M^{2n+1},$ $\xi=\ker \alpha$, is a contact structure whenever
\[
\alpha \wedge (d\alpha)^n \neq 0,
\]
which is equivalent to the condition that $d\alpha$ be nondegenerate on $\xi$.
\end{rem}
Note that the contact structure is unaffected when we multiply the contact form $\alpha$ by any positive or negative function on $M$. We say that two contact structures $\xi_0=\mbox{ker } \alpha_0$ and $\xi_1=\mbox{ker }\alpha_1$ on a manifold $M$ are \textbf{contactomorphic} whenever there is a diffeomorphism $\partialsi:M \to M$ such that $\partialsi$ sends $\xi_0$ to $\xi_1$:
\[
\partialsi_*(\xi_0)=\xi_1
\]
If a diffeomorphism $\partialsi: M\to M$ is in fact a contactomorphism then there exists a non-zero function $g:M \to \mathbb{R} $ such that $\partialsi^*\alpha_1=g\alpha_0$. Finding an explicit contactomorphism often proves to be a rather difficult and messy task, but an application of Moser's argument yields Gray's stability theorem, which essentially states that there are no non-trivial deformations of contact structures on a fixed closed manifold.
First we give the statement of Moser's Theorem, which says that one cannot vary a symplectic structure by perturbing it within its cohomology class. Recall that a \textbf{symplectic structure} on a smooth manifold $W^{2n}$ is a nondegenerate closed 2-form $\omega \in \Omega^2(W)$.
\begin{thm}[Moser's theorem] \cite[Thm 3.17]{MD} \label{moser}
Let $W$ be a closed manifold and suppose that $\omega_t$ is a smooth family of cohomologous symplectic forms on $W$. Then there is a family of diffeomorphisms $\Psi_t$ of $W$ such that
\[
\Psi_0=\mbox{id}, \ \ \ \partialsi^*_t\omega_t=\omega_0.
\]
\end{thm}
The aforementioned contact analogue of Moser's theorem is Gray's stability theorem, stated formally below.
\begin{thm}[Gray's stability theorem] \cite[Thm 2.2.2]{G}
Let $\xi_t, \ t \in [0,1]$, be a smooth family of contact structures on a closed manifold $V$. Then there is an isotopy $(\partialsi_t)_{t\in [0,1]}$ of $V$ such that
\[ {\partialsi_t}_*(\xi_0) = \xi_t \ \mbox{ for each } t \in [0,1] \]
\end{thm}
Next we give the most basic example of a contact structure.
\begin{example}
\em
Consider $\mathbb{R}^{2n+1}$ with coordinates $(x_1, y_1,...,x_n,y_n,z)$ and the 1-form
\[
\alpha=dz+\sum_{j=1} ^n x_jdy_j.
\]
Then $\alpha$ is a contact form for $\mathbb{R}^{2n+1}$. The contact structure $\xi=\mbox{ker }\alpha$ is called the standard contact structure on $\mathbb{R}^{2n+1}$
\end{example}
As in symplectic geometry, a variant of Darboux's theorem holds. This states that locally all contact structures are diffeomorphic to the standard contact structure on $\mathbb{R}^{2n+1}$.
A contact form gives rise to a unique Hamiltonian-like vector fields as follows.
\begin{definition}
For any contact manifold $(M, \xi=\mbox{ker }\alpha)$ the \textbf{Reeb vector field} $R_\alpha$ is defined to be the unique vector field determined by $\alpha$:
\[
\iota(R_\alpha)d\alpha=0, \ \ \ \alpha(R_\alpha)=1.
\]
We define the Reeb flow of $R_\alpha$ by $\varphi_t: M \to M$, $\dot{\varphi_t} = R_\alpha(\varphi_t)$.
\end{definition}
The first condition says that $R_\alpha$ points along the unique null direction of the form $d\alpha$ and the second condition normalizes $R_\alpha$. Because
\[
\mathcal{L}_{R_\alpha} \alpha = d \iota_{R_\alpha}\alpha + \iota_{R_\alpha} d\alpha
\]
the flow of $R_\alpha$ preserves the form $\alpha$ and hence the contact structure $\xi$. Note that if one chooses a different contact form $f \alpha$, the corresponding vector field $R_{f\alpha}$ is very different from $R_\alpha$, and its flow may have quite different properties.
A {\bf{Reeb orbit}} $\mathfrak{g}amma$ of period $T$ associated to $R_\alpha$ is defined to be a path $\mathfrak{g}amma: \mathbb{R}/T\mathbb{Z} \to M$ given by an integral curve of $R_\alpha$. That is,
\[
\frac{d\mathfrak{g}amma}{dt} = R_\alpha \circ \mathfrak{g}amma(t), \quad \mathfrak{g}amma(0) = \mathfrak{g}amma(T).
\]
Two Reeb orbits
\[
\mathfrak{g}amma_1, \ \mathfrak{g}amma_0 : \mathbb{R}/T\mathbb{Z} \to M
\]
are considered equivalent if they differ by reparametrization, i.e. precomposition with a translation of $\mathbb{R}/T\mathbb{Z}.$
The $N$-fold cover $\mathfrak{g}amma^N$ is defined to be the composition of $\mathfrak{g}amma_\partialm$ with $\mathbb{R}/NT\mathbb{Z} \to \mathbb{R}/T\mathbb{Z}$. A
\textbf{simple Reeb orbit} is one such that $\mathfrak{g}amma: \mathbb{R}/T\mathbb{Z} \to M$ is injective.
\begin{rem}
Since Reeb vector fields are autonomous, the terminology ``simple Reeb orbit $\mathfrak{g}amma$" refers to the entire equivalence class of orbits, and likewise for its iterates.
\end{rem}
A Reeb orbit $\mathfrak{g}amma$ is said to be {\bf{nondegenerate}} whenever the linearized return map
\[ d(\varphi_T)_{\mathfrak{g}amma(0)}: \xi_{\mathfrak{g}amma(0)} \to \xi_{\mathfrak{g}amma(T) = \mathfrak{g}amma(0)}\] has no eigenvalue equal to 1. A {\bf{nondegenerate contact form}} is one whose Reeb orbits are all nondegenerate and hence isolated. Note that since the Reeb flow preserves the contact structure, the linearized return map is symplectic.
Next we briefly review the canonical contact form on $S^3$ and its Reeb dynamics.
\begin{example}[Canonical Reeb dynamics on the 3-sphere]
\label{3-sphere}
{ If we define the following function $f\colon \mathbb{R}^4 \to \mathbb{R}$
\[
f(x_1, y_1, x_2, y_2)= x_1^2+y_1^2+x_2^2+ y_2^2,
\]
then $S^3=f^{-1}(1)$. Recall that the canonical contact form on $S^3 \subset \mathbb{R}^4$ is given to be
\begin{equation}
\label{ls}
\lambda_0 := - \frac{1}{2} df \circ J = \left( x_1 dy_1 - y_1 dx_1 + x_2 dy_2 - y_2 dx_2 \right)\arrowvert_{S^3}.
\end{equation}
The Reeb vector field is given by
\begin{equation}\label{reebreal}
\begin{array}{lcl}
R_{\lambda_0}&=&\left(x_1 \dfrac{\partialartial}{\partialartial y_1} - y_1 \dfrac{\partialartial}{\partialartial x_1} + x_2 \dfrac{\partialartial}{\partialartial y_2} - y_2 \dfrac{\partialartial}{\partialartial x_2}\right) \\
&=& (-y_1,x_1,-y_2,x_2). \\
\end{array}
\end{equation}
Equivalently we may reformulate these using complex coordinates by identifying $\mathbb{R}^4$ with $\mathbb{C}^2$ via
\[
u = x_1+iy_1, \ \ \ v = x_2+iy_2.
\]
We obtain
\[
\lambda_0=\frac{i}{2}\left(u d\bar{u} - \bar{u} du + v d\bar{v} - \bar{v} dv\right)\big |_{S^3},
\]
and
\begin{equation}
\label{reeb3sphere2}
\begin{array}{ccl}
R_{\lambda_0} & =& i \left( u \dfrac{\partialartial}{\partialartial u} - \bar{u} \dfrac{\partialartial}{\partialartial \bar{u}} + v \dfrac{\partialartial}{\partialartial v} - \bar{v} \dfrac{\partialartial}{\partialartial \bar{v}}\right) \\
&=& (iu, iv) \\
\end{array}
\end{equation}
The second expression for $R_{\lambda_0}$ follows from (\ref{reebreal}) since $iu=(-y_1,x_1)$ and $iv=(-y_2,x_2)$.
To see that the orbits of $R_{\lambda_0}$ define the fibers of the Hopf fibration recall that a fiber through a point
\[
(u,v)=(x_1+iy_1, x_2+ iy_2) \in S^3 \subset \mathbb{C}^2,
\]
can be parameterized as
\begin{equation}
\label{reebflow}
\varphi(t)=(e^{it}u, e^{it}v), \ t\in \mathbb{R}.
\end{equation}
We compute the time derivative of the fiber
\[
\dot{\varphi}(0)=(iu,iv)=(i x_1 - y_1, i x_2 - y_2).
\]
Expressed as a real vector field on $\mathbb{R}^4$, which is tangent to $S^3$, this is the Reeb vector field $R_{\lambda_0}$ as it appears in (\ref{reeb3sphere2}), so the Reeb flow does indeed define the Hopf fibration.
}
\end{example}
\subsection{Hypersurfaces of contact type}\mathfrak{h}space{\fill}\\
Another notion that we need from symplectic and contact geometry is that of a hypersurface of contact type in a symplectic manifold. The following notion of a Liouville vector field allows us to define hypersurfaces of contact type. Liouville vector fields will be used to understand the Reeb dynamics of the nondegenerate contact form $\alpha_1$ as well as to construct the contactomorphism between $(L_{A_n},\xi_0)$ and $(L(n+1,n),\xi_{std})$.
\begin{definition}
\label{lioudef}
A \textbf{Liouville vector field} $Y$ on a symplectic manifold $(W, \omega)$ is a vector field satisfying
\[ \mathcal{L}_Y \omega = \omega \]
The flow $\partialsi_t$ of such a vector field is conformal symplectic, i.e. $\partialsi^*_t(\omega)=e^t \omega$. The flow of these fields are volume expanding, so such fields may only exist locally on compact manifolds.
\end{definition}
Whenever there exists a Liouville vector field $Y$ defined in a neighborhood of a compact hypersurface $Q$ of $(W, \omega)$, which is transverse to $Q$, we can define a contact 1-form on $Q$ by
\[
\alpha : = \iota_Y\omega.
\]
\begin{prop}[{\cite[Prop 3.58]{MD}}]\label{contacttype}
Let $(W, \omega)$ be a symplectic manifold and $Q \subset W$ a compact hypersurface. Then the following are equivalent:
\begin{itemize}
\item[{(i)}] There exists a contact form $\alpha$ on $Q$ such that $d \alpha = \omega|_Q$.
\item[{(ii)}] There exists a Liouville vector field $Y:U \to TW$ defined in a neighborhood $U$ of $Q$, which is transverse to $Q$.
\end{itemize}
If these conditions are satisfied then $Q$ is said to be of \textbf{contact type.}
\end{prop}
We will need the following application of Gray's stability theorem to hypersurfaces of contact type to prove Theorem \ref{lenslinkcontacto} in Section \ref{sectionlinklens}.
\begin{lem}\cite[Lemma 2.1.5]{G}\label{graycor}
Let $Y$ be a Liouville vector field on a symplectic manifold $(W,\omega)$. Suppose that $M_1$ and $M_2$ are hypersurfaces of contact type in $W$. Assume that there is a smooth function
\begin{equation}\label{heq}
h:W \to \mathbb{R}
\end{equation}
such that the time-1 map of the flow of $hY$ is a diffeomorphism from $M_1$ to $M_2$. Then this diffeomorphism is in fact a contactomorphism from $(M_1, \ker \iota_Y \omega|_{TM_1})$ to $(M_2, \ker \iota_Y \omega|_{TM_2})$.
\end{lem}
\subsection{Symplectization} \mathfrak{h}fill \\
The symplectization of a contact manifold is an important notion in defining Floer theoretic theories like symplectic and contact homology. It will also used in our calculation of the Conley-Zehnder index. Let $(M, \xi = \ker \alpha)$ to be a contact manifold. The \textbf{symplectization} of $(M,\xi = \ker \alpha)$ is given by the manifold $\mathbb{R} \times M$ and symplectic form
\[
\omega = e^t(d\alpha - \alpha \wedge dt) = d (e^t\alpha).
\]
Here $t$ is the coordinate on $\mathbb{R}$, and it should be noted that $\alpha$ is interpreted as a 1-form on $\mathbb{R} \times M$, as we identify $\alpha$ with its pullback under the projection $\mathbb{R} \times M \to M$.
Any contact structure $\xi$ may be equipped with a complex structure ${J}$ such that $(\xi, {J})$ is a complex vector bundle.
This set is nonempty and contractible. There is a unique canonical extension of the almost complex structure ${J}$ on $\xi$ to an $\mathbb{R}$-invariant almost complex structure $\tilde{J}$ on $T(\mathbb{R} \times M)$, whose existence is due to the splitting,
\begin{equation}
\label{decomp}
T(\mathbb{R} \times M) = \mathbb{R} \frac{\partialartial}{\partialartial t} \oplus \mathbb{R} R_{\alpha} \oplus \xi.
\end{equation}
\begin{definition}[Canonical extension of ${J}$ to $\tilde{J}$ on $T(\mathbb{R} \times M)$]\label{complexstruc}
Let $[a,b;v]$ be a tangent vector where $a, \ b \in \mathbb{R}$ and $v \in \xi$. We can extend ${J}: \xi \to \xi$ to $\tilde{J}: T(\mathbb{R} \times M) \to T(\mathbb{R} \times M)$ by
\[
\tilde{J}[a,b;v] = [-b,a,{J}v].
\]
Thus $\tilde{J}|_\xi = {J}$ and $\tilde{J}$ acts on $\mathbb{R} \frac{\partialartial}{\partialartial t} \oplus \mathbb{R} R_{\alpha}$ in the same manner as multiplication by $i$ acts on $\mathbb{C}$, namely ${J} \frac{\partialartial}{\partialartial t} = R_{\alpha}$.
\end{definition}
\subsection{The Conley-Zehnder index}\label{CZsection}\mathfrak{h}fill \\
The Conley-Zehnder index $\mu_{CZ}$, is a Maslov index for arcs of symplectic matrices which assigns an integer $\mu_{CZ}(\Phi)$ to every path of symplectic matrices $\Phi : [0,T] \to \mbox{Sp}(n)$, with $\Phi(0) = \mathds{1} $. In order to ensure that the Conley-Zehnder index assigns the same integer to homotopic arcs, one must also stipulate that 1 is not an eigenvalue of the endpoint of this path of matrices, i.e. $\det(\mathds{1} - \Phi(T))\neq 0$. We define the following set of continuous paths of symplectic matrices that start at the identity and end on a symplectic matrix that does not have 1 as an eigenvalue.
\[
\Sigma^*(n) = \{ \Phi :[0,T] \to \mbox{Sp}(2n) \ | \ \Phi \mbox{ is continuous}, \ \Phi(0)=\mathds{1}, \mbox{ and } \mbox{det}(\mathds{1} - \Phi(T)) \neq 0 \}.
\]
The Conley-Zehnder index is a functor satisfying the following properties, and is uniquely determined by the homotopy, loop, and signature properties.
\begin{thm}\label{CZpropthm}{\cite[Theorem 2.3, Remark 5.4]{RS}}, {\cite[Theorem 2, Proposition 8 \& 9]{GuCZ}}\label{CZprop} \\
There exists a unique functor $\mu_{CZ}$ called the {\bf{Conley-Zehnder index}} that assigns the same integer to all homotopic paths $\Psi$ in $\Sigma^*(n)$,
\[
\mu_{CZ}: \Sigma^*(n) \to \mathbb{Z}.
\]
such that the following hold.
\begin{enumerate}[\em (1)]
\item {\bf{Homotopy}}: The Conley-Zehnder index is constant on the connected components of $\Sigma^*(n)$.
\item {\bf{Naturalization}}: For any paths $\Phi, \Psi: [0,1] \to Sp(2n)$, $\mu_{CZ}(\Phi\Psi\Phi^{-1}) = \mu_{CZ}(\Psi)$.
\item {\bf{Zero}}: If $\Psi(t) \in \Sigma^*(n)$ has no eigenvalues on the unit circle for $t >0$, then $\mu_{CZ}(\Psi) = 0$.
\item {\bf{Product}}: If $n = n' + n''$, identify $Sp(2n') \oplus Sp(2n'')$ with a subgroup of $Sp(2n)$ in the obvious way. For $\Psi' \in \Sigma^*(n')$, $\Psi'' \in \Sigma^*(n'')$, then $\mu_{CZ}(\Psi' \oplus \Psi'') = \mu_{CZ}(\Psi') + \mu_{CZ}(\Psi'')$.
\item {\bf{Loop}}: If $\Phi$ is a loop at $\mathds{1}$, then $\mu_{CZ}(\Phi\Psi) = \mu_{CZ}(\Psi) + 2\mu(\Phi)$ where $\mu$ is the Maslov Index.
\item {\bf{Signature}}: If $S \in M(2n)$ is a symmetric matrix with $||S|| < 2\partiali$ and $\Psi(t) = \exp(J_0St)$, then $\mu_{CZ}(\Psi) = \frac{1}{2}\sgn(S)$.
\end{enumerate}
\end{thm}
The linearized Reeb flow of $\mathfrak{g}amma$ yields a path of symplectic matrices
\[
d(\varphi_t)_{\mathfrak{g}amma(0)}: \xi_{\mathfrak{g}amma(0)} \to \xi_{\mathfrak{g}amma(t) = \mathfrak{g}amma(0)}
\]
for $t\in[0,T],$ where $T$ is the period of $\mathfrak{g}amma$.
Thus we can compute the Conley-Zehnder index of $d\varphi_t, \ t\in[0,T].$ This index is typically dependent on the choice of trivialization $\tau$ of $\xi$ along $\mathfrak{g}amma$ which was used in linearizing the Reeb flow. However, if $c_1(\xi;\mathbb{Z})=0$ we can use the existence of an (almost) complex volume form on the symplectization to obtain a global means of linearizing the flow of the Reeb vector field. The choice of a complex volume form is parametrized by $H^1(\mathbb{R} \times M;\mathbb{Z})$, so an absolute integral grading is only determined up to the choice of volume form. See also \cite[\S 1.1.1]{jocompute}.
We define
\[
\mu_{CZ}^\tau(\mathfrak{g}amma):=\mu_{CZ}\left( \left\{ d\varphi_t \right\}\arrowvert_{t\in[0,T]}\right)
\]
In the case at hand we will be able to work in the ambient space of $(\mathbb{C}^3, J_0)$, and use a canonical trivialization of $\mathbb{C}^3$.
\subsection{The canonical contact structure on Brieskorn manifolds}\mathfrak{h}space{\fill} \\
The $A_n$ link is an example of a Brieskorn manifold, which are defined generally by
\[
\Sigma(\mathbf{a})= \left\{ (z_0,\dots,z_m) \in \mathbb{C}^{m+1} \ \bigg| \ f:= \sum_{j = 0}^m z_j^{a_j} = 0, \ a_j \in \mathbb{Z}_{>0} \text{ and } \sum_{j = 0}^m |z_j|^2 = 1 \right\}.
\]
The link of the $A_n$ singularity after a linear change of variables is $ \Sigma(n+1,2,2)$ for $n >3$; see (\ref{coorchange}).
Brieskorn gave a necessary and sufficient condition on $\mathbf{a}$ for $\Sigma(\mathbf{a})$ to be a topological sphere, and means to show when these yield exotic differentiable structures on the topological $(2n-1)$-sphere in \cite{Br}. A standard calculus argument \cite[Lemma 7.1.1]{G} shows that $\Sigma(\mathbf{a})$ is always a smooth manifold.
In the mid 1970's, Brieskorn manifolds were found to admit a canonical contact structure, given by their set of complex tangencies,
\[
\xi_0=T\Sigma \cap J_0 (T\Sigma),
\]
where $J_0$ is the standard complex structure on $\mathbb{C}^{m+1}$. The contact structure $\xi_0$ can be expressed as $\xi_0 = \ker \alpha_0$ for the canonical 1-form
\[
\alpha_0:= (- d\rho \circ J_0)|_\Sigma = \frac{i}{4} \left( \sum_{j=0}^m ( z_j d\bar{z}_j -\bar{z}_jdz_j ) \right)\bigg \vert_\Sigma,
\]
where $\rho=(||z||^2-1)/4$. A proof of this fact may be found in {\cite[Thm 7.1.2]{G}}. The Reeb dynamics associated to $\alpha_0$ are difficult to understand. There is a more convenient contact form $\alpha_1$ constructed by Ustilovsky \cite[Lemma 4.1.2]{U} via the following family.
\begin{prop}[{\cite[Proposition 7.1.4]{G}}] The 1-form
\[
\alpha_t = \frac{i}{4}\sum_{j = 0}^m \frac{1}{1 - t + \frac{t}{a_j}} (z_j d\bar{z}_j - \bar{z}_jdz_j)
\]
is a contact form on $\Sigma(\mathbf{a})$ for each $t\in [0,1]$.
\end{prop}
Via Gray's stability theorem we obtain the following corollary.
\begin{cor}
For all $t \in (0,1]$, $(\Sigma(\mathbf{a}), \ker \alpha_0)$ is contactomorphic to $(\Sigma(\mathbf{a}), \ker \alpha_t)$.
\end{cor}
Next we compute the Reeb dynamics associated to $\alpha_1 = \frac{i}{4}\sum_{j=0}^m a_j (z_j d\bar{z}_j - \bar{z}_jdz_j) $.
\begin{rem}
While $\alpha_1$ is degenerate, one can still easily check that the Reeb vector field associated to $\alpha_1$ is given by,
\[
R_{\alpha_1} = 2i \sum_{j = 0}^m \frac{1}{a_j}\left( z_j \frac{\partialartial}{\partialartial z_j} - \bar{z}_j \frac{\partialartial}{\partialartial \bar{z}_j} \right) = 2i \left( \frac{z_0}{a_0},...,\frac{z_m}{a_m} \right).
\]
Indeed, one computes
\[
df\left(R_{\alpha_1}\right) = f(\mathbf{z}) \mbox{ and } d\rho \left(R_{\alpha_1}\right) =0.
\]
This shows that $R_{\alpha_1}$ is tangent to $\Sigma(\mathbf{a})$. The defining equations for the Reeb vector field are satisfied since
\[
\alpha_1\left(R_{\alpha_1}\right) \equiv 1 \mbox{ and } \iota_{R_{\alpha_1}}d\alpha_1 = -d\rho,
\]
with the latter form being zero on the $T_p\Sigma(\mathbf(a))$. The flow of $R_{\alpha_1}$ is given by
\[
\varphi_t(z_0,...,z_m) = \left( e^{2it/a_0},...,e^{2it/a_m} \right)
\]
All the orbits of the Reeb flow are closed, and the flow defines an effective $S^1$-action on $\Sigma(\mathbf{a})$.
\end{rem}
In the next section we perturb $\alpha_1$ to a nondegenerate contact form.
\section{Proof of Theorem \ref{CZcomputation}}\label{CZcomputationsection}
\subsection{Constructing a nondegenerate contact form} \mathfrak{h}space{\fill} \\
In this section we adapt a method used by Ustilovsky in \cite[Section 4]{U} to obtain a nondegenerate contact form $\alpha_\epsilon$ on $L_{A_n}$ whose kernel is contactomorphic to $\xi_0$. Ustilovsky's methods yielded a nondegenerate contact form on Brieskorn manifolds of the form $\Sigma(p,2,....,2)$, which were diffeomorphic to $S^{4m+1}$.
We define the following change of coordinates to go from $\Sigma(n+1,2,2)$ with defining function $f=z_0^{n+1} + z_1^2+z_2^2$ to $L_{A_n}$ with defining function $f_{A_n}= w_0^{n+1} + 2w_1w_2.$
\begin{equation}\label{coorchange}
\Psi(w_0,w_1,w_2) = \left(\underbrace{w_0}_{{:=z_0}} \ , \underbrace{\tfrac{\sqrt{2}}{2}(w_1+w_2)}_{:=z_1} \ , \underbrace{\tfrac{\sqrt{2}}{2}(-iw_1+iw_2)}_{:=z_2} \right)
\end{equation}
We obtain
\begin{align}
\Psi^*f(z_0,z_1,z_2)= w_0^{n+1} + 2w_1w_2.
\end{align}
Then the pull-back of
\[
\frac{\alpha_1}{2} = \frac{i}{8}\sum_{j=0}^m a_j (z_j d\bar{z}_j - \bar{z}_jdz_j)
\]
is given by
\[
\frac{\Psi^*\alpha_1}{2} = \frac{(n+1)i}{8}(w_0 d\overline{w}_0 - \overline{w}_0 dw_0) + \frac{i}{4}(w_1 d\overline{w}_1 - \overline{w}_1dw_1 + w_2 d\overline{w}_2 - \overline{w}_2 dw_2).\]
We now construct the Hamiltonian function
\[H(w)=|w|^2+ \epsilon(|w_{1}|^2-|w_{2}|^2)\]
We choose $0<\epsilon<1$ such that $H(w)$ is positive on $S^5$, and define the contact form
\begin{equation}
\alpha_\epsilon= \frac{\Psi^*\alpha_1}{2H}
\end{equation}
\begin{rem}
The above shows that $(\Sigma(n+1,2,2), \ker \alpha_1)$ is contactomorphic to $(\Psi(\Sigma(n+1,2,2)), \ker \alpha_\epsilon)$. Moreover $L_{A_n}=\Psi(\Sigma(n+1,2,2))$, where $L_{A_n}$ was defined in $(\ref{linkeq})$.
\end{rem}
\begin{prop}\label{perturbedreebprop} The Reeb vector field for $\alpha_\epsilon$ is
\begin{align}\label{perturbedreeb}
R_{\alpha_\epsilon} & =\frac{4i}{n+1}w_0\frac{\partial}{\partial w_0}-\frac{4i}{n+1}\overline{w}_0\frac{\partial}{\partial \overline{w}_0} +
2i(1+\epsilon)\left(w_{1}\frac{\partial}{\partial w_{1}}-\overline{w}_{1}\frac{\partial}{\partial \overline{w}_{1}} \right)\notag \\
& + 2i(1 - \epsilon) \left(w_{2} \frac{\partial}{\partial w_{2}} - \overline w_{2}\frac{\partial}{\partial \overline w_{2j}}\right) \notag \\
& = \left( \frac{4i}{n+1}w_0,2i(1+\epsilon)w_1,2i(1-\epsilon)w_2\right).
\end{align}
\end{prop}
\begin{rem}
The second formulation of the Reeb vector field is equivalent to the first in the above Proposition via the standard identification of $\mathbb{R}^4$ with $\mathbb{C}^2$, as explained in Example \ref{3-sphere}, equation (\ref{reeb3sphere2}).
\end{rem}
Before proving Proposition \ref{perturbedreebprop} we need the following lemma.
\begin{lem}\label{helper}
On $\mathbb{C}^3$, the vector field
\begin{align}
X(w) = \frac{1}{2}\left(\sum_{j=0}^{2} w_j\frac{\partial}{\partial w_j} + \overline{w}_j\frac{\partial}{\partial \overline{w}_j}\right)
\end{align}
is a Liouville vector field for the symplectic form
\[\omega_1=\frac{d(\Psi^*\alpha_1)}{2}=\frac{i(n+1)}{4}dw_0 \wedge d\overline{w}_0 + \frac{i}{2}\sum_{j=1}^{2} dw_j \wedge d\overline{w}_j.\]
The Hamiltonian vector field $X_H$ of $H$ with respect to $\omega_1$ is $-R_{\alpha_\epsilon}$, as in \emph{(\ref{perturbedreeb})}
\end{lem}
\begin{proof}
Recall that the condition to be a Liouville vector field is $\mathcal{L}_X \omega_1 = \omega_1$. We show this with Cartan's formula:
\begin{align*}
\mathcal{L}_X\omega_1 & = \iota_X d\omega_1 + d(\iota_X \omega_1) \\
& = d(\iota_X \omega_1).
\end{align*}
We do the explicit calculation for the first term and the rest easily follows:
\begin{align*}
d \left( \frac{i(n+1)}{4} d\omega_0 \wedge d\overline{\omega}_0 \left( \frac{1}{2} \left( w_0 \frac{\partial}{\partial w_0} + \overline{w}_0 \frac{\partial}{\partial \overline{w}_0}\right), \cdot \right) \right) & = d \left( \frac{i(n+1)}{8} w_0 d\overline{w}_0 - \overline{w}_0 dw_0\right) \\
& = \frac{i(n+1)}{8} \left( dw_0 \wedge d\overline{w}_0 - d\overline{w}_0 \wedge dw_0 \right) \\
& = \frac{i(n+1)}{4} dw_0 \wedge d\overline{w}_0,
\end{align*}
so $X(w)$ is indeed a Liouville vector field for $\omega_1$. \\
Next we prove that $\omega_1(-R_{\alpha_\epsilon},\cdot) = dH(\cdot)$. First we calculate $dH$,
\[ dH = \left(\sum_{j = 0}^{2} w_j d\overline{w}_j + \overline{w}_j dw_j\right) + \epsilon(w_{1} d\overline{w}_{1} + \overline{w}_{1} dw_1 - w_{2} d\overline{w}_{2} - \overline{w}_{2}dw_{2}).\]
Then we compare the coefficients of $dH$ to the coefficients of $\omega_1(-R_{\alpha_\epsilon},\cdot)$ associated to each term, $(dw_i \wedge d\overline{w}_i)$. The $(dw_0 \wedge d\overline{w}_0)$ term is
\begin{align*}
\frac{i(n+1)}{4} dw_0 \wedge d\overline{w}_0 \left(-\frac{4i}{n+1} w_0 \frac{\partial}{\partial w_0} + \frac{4i}{n+1} \overline{w}_0 \frac{\partial}{\partial \overline{w}_0} ,\cdot\right) & = \frac{i(n+1)}{4} \left(- \frac{4i}{n+1} w_0 d\overline{w}_0 - \frac{4i}{n+1} \overline{w}_0 dw_0 \right) \\
& = w_0 d\overline{w}_0 + \overline{w}_0 dw_0.
\end{align*}
The $(dw_{1} \wedge d\overline{w}_{1})$ term is
\begin{align*}
\frac{i}{2} dw_{1} \wedge d\overline{w}_{1} \left( -2i(1 + \epsilon) w_{1} \frac{\partial}{\partial w_{1} } + 2i(1 + \epsilon) \overline{w}_{1} \frac{\partial}{\partial \overline{w}_{1}} \right) & = \frac{i}{2} \left( - 2i(1 + \epsilon)w_{1} d\overline{w}_{1} - 2i(1 + \epsilon)\overline{w}_{1}dw_{1}\right) \\
& = (1 + \epsilon) w_{1} d\overline{w}_{1} + (1 + \epsilon) \overline{w}_{1} dw_{1}.
\end{align*}
The $(dw_2 \wedge d\overline{w}_2)$ term is obtained similarly. Summing the terms yields $\omega_1(-R_{\alpha_\epsilon},\cdot) = dH(\cdot)$.
\end{proof}
\begin{proof}[Proof of Proposition \ref{perturbedreebprop}]
First we show that $X_H =-R_{\alpha_\epsilon}$ is tangent to the link $\Psi(\Sigma(n+1,2,2) )$. We compute
\begin{align*}
(\Psi_*df)(R_{\alpha_\epsilon}) =
& = \left( (n+1)w_0^n dw_0 + 2w_{1}dw_{2} + 2w_{2} dw_1 \right) (R_{\alpha_\epsilon}) \\
& = 4i w_0^{n+1} + 4i(1 - \epsilon) w_{1}w_{2} + 4i(1 + \epsilon) w_{1}w_{2} \\
& = 4i (\Psi^*f) \\
& = 0
\end{align*}
the last equality because $\Psi^*f$ is constant along $\Psi(\Sigma(n+1,2,2) )$. Now we have to show that $\dfrac{\Psi^*\alpha_1}{2}(X_H) = -H$. We have
\begin{align*}
\dfrac{\Psi^*\alpha_1}{2}\left(\cdot\right) & = \iota_X\omega_1(\cdot) = \omega_1(X(w),\cdot) = - \omega(\cdot,X(w)) \\
\dfrac{\Psi^*\alpha_1}{2}(X_H) & = -\omega(X_H,X(w)) = - dH(X(w)) \\
& = - |w|^2 - \epsilon (|w_{1}|^2 - |w_{2}|^2) \\
& = -H.
\end{align*}
From these, we conclude
\begin{align*}
\alpha_\epsilon(X_H) & = -\frac{1}{H}H = -1 \\
d\alpha_\epsilon(X_H,\cdot) & = - \frac{1}{2H^2} (dH \wedge \Psi^* \alpha_1)(X_H,\cdot) + \frac{1}{2H} d\Psi^*\alpha_1(X_H,\cdot) \\
& = - \frac{1}{2H^2} dH(X_H) \Psi^*\alpha_1(\cdot) + \frac{1}{2H^2} \Psi^*\alpha_1(X_H) dH(\cdot) + \frac{1}{H} \omega (X_H,\cdot) \\
& = - \frac{1}{2H^2}\omega_1(X_H,X_H) \Psi^*\alpha_1(\cdot) - \frac{1}{H} dH(\cdot) + \frac{1}{H} dH(\cdot) \\
& = 0
\end{align*}
By Lemma \ref{helper}, we know $-X_H = R_{\alpha_\epsilon}$ so the result follows.
\end{proof}
\subsection{Isolated Reeb Orbits} \mathfrak{h}space{\fill} \\
In this quick section, we prove the following proposition.
\begin{prop}
The only simple periodic Reeb orbits of $R_{\alpha_\epsilon}$ are nondegenerate and defined by
\begin{align*}
\mathfrak{g}amma_+(t) & = (0,e^{2i(1 + \epsilon)t},0), \quad \quad 0 \le t \le \frac{\partiali}{1 + \epsilon} \\
\mathfrak{g}amma_-(t) & = (0,0,e^{2i(1 - \epsilon)t}), \quad \quad 0 \le t \le \frac{\partiali}{1 + \epsilon}.
\end{align*}
\end{prop}
\begin{proof}
The flow of
\[ R_{\alpha_\epsilon} = \left( \frac{4i}{n + 1}w_0,2i(1 + \epsilon)w_1,2i(1 - \epsilon)w_2\right)\]
is given by
\[\varphi_t(w_0,w_1,w_2) = \left(e^{\frac{4it}{n+1}}w_0,e^{2i(1+\epsilon)t}w_{1},e^{2i(1-\epsilon)t}w_{2}\right).\]
Since $\epsilon$ is small and irrational, the only possible periodic trajectories are
\begin{align*}
\mathfrak{g}amma_0(t) & = (e^{\frac{4i}{n+1}t},0,0) \\
\mathfrak{g}amma_+(t) & = (0,e^{2i(1 + \epsilon)t},0) \\
\mathfrak{g}amma_-(t) & = (0,0,e^{2i(1 - \epsilon)t}).
\end{align*}
It is important to note that the first trajectory does not lie in $\Psi(\Sigma(n+1,2,2))$, but rather on total space $\mathbb{C}^3$. This is because the point $\mathfrak{g}amma_0(0) = (1,0,0)$ is not a zero of $f_{A_n}=w_0^{n+1}+2w_1w_2$.
Next we need to check that the linearized return maps $d\partialhi|_\xi$ associated to $\mathfrak{g}amma_+$ and $\mathfrak{g}amma_-$ have no eigenvalues equal to 1. We consider the first orbit $\mathfrak{g}amma_+$ of period $\partiali/(1 + \epsilon)$, as a similar argument applies to the return flow associated to $\mathfrak{g}amma_-$. The differential of its total return map is:
\[ d\varphi_{T} = \left. \begin{pmatrix}
e^{\frac{4iT}{n+1}} & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & e^{2i(1 - \epsilon)T}
\end{pmatrix}\right\arrowvert_{T=\frac{\partiali}{1+\epsilon}} \]
Since $\epsilon$ is a small irrational number, the total return map only has one eigenvalue which is 1. The eigenvector associated to the eigenvalue which is 1 is in the direction of the Reeb orbit $\mathfrak{g}amma^+$, but since we are restricting the return map to $\xi$, we can conclude that $\mathfrak{g}amma_+$ is nondegenerate.
\end{proof}
\subsection{Computation of the Conley-Zehnder index}\mathfrak{h}space{\fill} \\
To compute the Conley-Zehnder indices of the Reeb orbits in Theorem 1.1 we use the same method as in \cite{U}, extending the Reeb flow to give rise to a symplectomorphism of $\mathbb{C}^3\setminus \{\mathbf{0} \}$. This permits us to do the computations in $\mathbb{C}^3$, equipped with the symplectic form
\[
\omega_1=\frac{d(\Psi^*\alpha_1)}{2}=\frac{i(n+1)}{4}dw_0 \wedge d\overline{w}_0 + \frac{i}{2}\sum_{j=1}^{2} dw_j \wedge d\overline{w}_j.
\]
We may equip the contact structure $\xi_0$ with the symplectic form $\omega = d\alpha_1$ instead of $d\alpha_\epsilon$ when computing the Conley-Zehnder indices. This is because $ \ker \alpha_\epsilon = \ker \alpha_1 = \xi_0$, as $\alpha_\epsilon = \frac{1}{H} \alpha_1$ with $H > 0$ and because $\omega|_\xi = Hd\alpha_\epsilon|_\xi$ and $H$ is constant along Reeb trajectories.
Our first proposition shows that we can construct a standard symplectic basis for the symplectic complement
\[
\xi^\omega = \{ v \in \mathbb{C}^3 \ | \ \omega(v,w) = 0 \text{ for all $w \in \xi$}\}
\]
of $\xi$ in $\mathbb{C}^3$. As a result, $c_1(\xi^\omega)=0$. Since $c_1(\mathbb{C}^3)=0$, we know $c_1(\xi)=0$. Thus we may compute the Conley-Zehnder indices in the ambient space $\mathbb{C}^3$ and use additivity of the Conley-Zehnder index under direct sums of symplectic paths to compute it in $\xi$.
\begin{prop}
There exists a standard symplectic basis for the symplectic complement $\xi^\omega$ with respect to $\omega = d\alpha_1$.
\end{prop}
\begin{proof}
Notice that $\xi^\omega = \mbox{span}(X_1, Y_1,X_2,Y_2)$ where
\begin{align*}
X_1 & = (\bar{w}_0^n,\bar{w}_1,\bar{w}_2) \quad Y_1 = iX_1 \\
X_2 & = R_\epsilon \quad \quad \quad \quad \quad Y_2 = w.
\end{align*}
We make this a into a symplectic standard basis for $\xi^\omega$ via a Gram-Schmidt process. The new basis is given by:
\[
\begin{array}{rclc rcl}
\tilde X_1 & = & \dfrac{X_1}{\sqrt{\omega(X_1,Y_2)}} & \ \ \ \ \ \ & \tilde Y_1 & =& \dfrac{Y_1}{\sqrt{\omega(X_1,Y_1)}} = i \tilde X_1 \\
&&&&&& \\
\tilde X_2 &=& X_2 &\ \ \ \ \ \ & \tilde Y_2 &= & Y_2 - \dfrac{\omega(X_1,Y_2)Y_1 - \omega(Y_1,Y_2)X_1}{\omega(X_1,Y_1)} \\
&&&&&& \\
&&&& \ \ \ \ \ \ &= &Y_2 - \dfrac{n-1}{2}w_0^{n+1}{w(X_1,Y_1)}X_1. \\
\end{array}
\]
This is a standard basis for the symplectic vector space $\xi^\omega$, i.e. the form $\omega$ in this basis is given by
\[\begin{pmatrix}
\begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix} & \\
& \begin{pmatrix}
0 & 1 \\
1 & 0
\end{pmatrix}
\end{pmatrix}.\]
\end{proof}
Now we are ready to prove the Conley-Zehnder index formula in Theorem \ref{CZcomputation}.
\begin{prop}
The Conley-Zehnder index for $\mathfrak{g}amma = \mathfrak{g}amma_{\partialm}^N$ in $0 \le t \le \frac{N\partiali}{1 \partialm \epsilon}$ is
\begin{align}
\mu_{CZ}(\mathfrak{g}amma_{\partialm}^N) = 2\left( \left\lfloor \frac{2N}{(n+1)(1 \partialm \epsilon)}\right \rfloor + \left\lfloor \frac{N(1 \mp \epsilon)}{1 \partialm \epsilon} \right\rfloor - \left \lfloor \frac{2N}{1 \partialm \epsilon} \right \rfloor \right) + 2N + 1.
\end{align}
\end{prop}
\begin{proof}
The Reeb flow $\varphi$ which we introduced in the previous section can be extended to a flow on $\mathbb{C}^3 $, which we also denote by $\varphi$. The action of the extended Reeb flow on $\mathbb{C}^3$ is given by:
\[
\begin{array}{lclclcl}
d\varphi_t(w)\tilde X_1 & = & e^{4it}\tilde X_1(\varphi_t(w)) & \ \ \ &d\varphi_t(w)\tilde Y_1 & = & e^{4it}\tilde Y_1(\varphi_t(w)) \\
d\varphi_t(w)\tilde X_2 & = &\tilde X_2(\varphi_t(w)) & \ \ \ &d\varphi_t(w)\tilde Y_2 &=& \tilde Y_2(\varphi_t(w)). \\
\end{array}
\]
Define
\[
\Phi := d\varphi_t \big |_{\mathbb{C}^3} = \diag \left(e^{\frac{4i}{n+1}t},e^{2i(1+\epsilon)t},e^{2i(1 - \epsilon)t}\right)
\]
We can now use the additivity of the Conley-Zehnder index under direct sums of symplectic paths, Theorem \ref{CZprop}(4) to get
\[
\mu_{CZ}(\mathfrak{g}amma_\partialm) = \mu_{CZ}(\Phi) - \mu_{CZ}(\Phi_{\xi^\omega}),
\]
where
\begin{equation}\label{CZsum}
\Phi_{\xi^\omega} := d\varphi_t \big |_{\xi^\omega} = \diag \left(e^{4it},1\right).
\end{equation}
The right hand side of (\ref{CZsum}) is easily computed via the crossing form; see \cite[Rem 5.4]{RS}. In particular we have
\[
\mu_{CZ}\left(\{e^{it}\} \big |_{t\in [0,T]}\right) = \left\{ \begin{array}{ll}
\dfrac{T}{\partiali}, & T \in 2\partiali \mathbb{Z} \\
&\\
2 \left \lfloor \dfrac{T}{2\partiali} \right \rfloor + 1,\ \ \ & \text{otherwise.}
\end{array} \right.
\]
\noindent Thus for $\{ \Phi(t) \} = \{ e^{4it/(n+1)}\oplus e^{2it(1 + \epsilon)} \oplus e^{2it(1 - \epsilon)} \}$ with $0 \leq t \leq T$ we obtain: \\
\begin{align*}
\mu_{CZ}(\Phi) & = \left\{ \begin{array}{ll}
\dfrac{4T}{(n+1)\partiali}, & T \in \frac{(n+1)\partiali}{2}\mathbb{Z} \\
&\\
2 \left\lfloor \dfrac{2T}{(n+1)\partiali}\right \rfloor + 1,\ \ \ & T \notin \frac{(n+1)\partiali}{2}\mathbb{Z}
\end{array}\right. \ \ \ \ \ + \ \ \ \left\{ \begin{array}{ll}
\dfrac{2T(1+\epsilon)}{\partiali}, & T \in \frac{\partiali}{1 + \epsilon}\mathbb{Z} \\
&\\
2 \left\lfloor \dfrac{T(1 + \epsilon)}{\partiali} \right\rfloor + 1,\ \ \ & T \notin \frac{\partiali}{1 + \epsilon}\mathbb{Z}
\end{array}\right. \\
&\\
& + \left\{ \begin{array}{ll}
\dfrac{2T(1-\epsilon)}{\partiali}, & T \in \frac{\partiali}{1 - \epsilon}\mathbb{Z} \\
&\\
2 \left \lfloor \dfrac{T(1 - \epsilon)}{\partiali}\right \rfloor + 1,\ \ \ & T \notin \frac{\partiali}{1 - \epsilon}\mathbb{Z}.
\end{array}\right.
\end{align*}
\noindent Likewise for $\Phi_{\xi^\omega}$ with $0 \leq t \leq T$ we obtain:
\begin{align*}
\mu_{CZ}(\Phi_{\xi^\omega}) & = \left\{ \begin{array}{ll}
\dfrac{4T}{\partiali}, & T \in \frac{\partiali}{2}\mathbb{Z} \\
&\\
2 \left \lfloor \dfrac{2T}{\partiali} \right \rfloor + 1,\ \ \ & T \notin \frac{\partiali}{2}\mathbb{Z}.
\end{array}\right.
\end{align*}
Hence we get that the Conley-Zehnder index for $\mathfrak{g}amma_{\partialm}^N$ in $0 \le t \le \frac{N\partiali}{1 \partialm \epsilon}$ is given by:
\begin{equation}
\mu_{CZ}(\mathfrak{g}amma_{\partialm}^N) = 2\left( \left\lfloor \frac{2N}{(n+1)(1 \partialm \epsilon)}\right \rfloor + \left\lfloor \frac{N(1 \mp \epsilon)}{1 \partialm \epsilon} \right\rfloor - \left \lfloor \frac{2N}{1 \partialm \epsilon} \right \rfloor \right) + 2N + 1.
\end{equation}
\end{proof}
\section{Proof of Theorem \ref{lenslinkcontacto}}\label{sectionlinklens}
This section proves that $(L_{A_n},\xi_0)$ and $(L(n+1,n),\xi_{std})$ are contactomorphic. This is done by constructing by constructing a 1-parameter family of contact manifolds via a canonically defined Liouville vector field and applying Gray's stability theorem.
\subsection{Contact geometry of $(L(n+1,n),\xi_{std})$}\label{contactgeomlens} \mathfrak{h}fill \\
The lens space $L(n+1,n)$ is obtained via the quotient of $S^3$ by the binary cyclic subgroup $A_n \subset SL(2,\mathbb{C})$. The subgroup $A_n$ is given by the action of $\mathbb{Z}_{n+1}$ on $\mathbb{C}^2$ defined by
\begin{align*} \begin{pmatrix}
u \\
v
\end{pmatrix} \mapsto \begin{pmatrix}
e^{2\partiali i/(n+1)} & 0 \\
0 & e^{2 n\partiali i/(n+1)}
\end{pmatrix}\begin{pmatrix}
u \\
v
\end{pmatrix} . \\
\end{align*}
The following exercise shows that $L(n+1,n)$ is homeomorphic to $L_{A_n}$.
This construction will be needed later on in another proof, so we explain it here to set up the notation.
The origin is the only fixed point of the $A_n$ action on $\mathbb{C}^2$ and hence is an isolated quotient singularity of $\mathbb{C}^2/A_n$. We can represent $\mathbb{C}^2/A_n$ as a hypersurface of $\mathbb{C}^3$ as follows. Consider the monomials
\[
z_0 := uv, \quad z_1 := \tfrac{i}{\sqrt{2}}u^{n+1}, \quad z_2 := \tfrac{i}{\sqrt{2}}v^{n+1}
.\]
These are invariant under the action of $A_n$ and satisfy the equation $z_0^{n+1} + 2z_1z_2 = 0$. Recall that
\[
f_{A_n}(z_0,z_1,z_2) = z_0^{n+1} + 2z_1z_2,
\]
and
\[
L_{A_n}=S^5 \cap \{ f_{A_n}^{-1}(0) \}
\]
Moreover,
\begin{equation}\label{varphieq}
\begin{array}{llcl}
\tilde\varphi: &\mathbb{C}^2 &\to& \mathbb{C}^3 \\
&(u,v) &\mapsto &(uv,\tfrac{i}{\sqrt{2}}u^{n+1},\tfrac{i}{\sqrt{2}}v^{n+1})\\
\end{array}
\end{equation} descends to the map
\[
\varphi: \mathbb{C}^2/A_n \to \mathbb{C}^3,
\]
which sends $\varphi (\mathbb{C}^2/A_n)$ homeomorphically onto the hypersurface $f^{-1}_{A_n}(0)$.
Rescaling away from the origin of $\mathbb{C}^3$ yields a homeomorphism between $\varphi(S^3/A_n)$ and $L_{A_n}$. As 3-manifolds which are homeomorphic are also diffeomorphic \cite{moise} we obtain the following proposition.
\begin{prop}
$L(n+1,n)$ is diffeomorphic to $L_{A_n}$.
\end{prop}
\begin{rem}
In order to prove that two manifolds are contactomorphic, one must either construct an explicit diffeomorphism or make use of Gray's stability theorem. Sadly, $\varphi$ is not a diffeomorphism onto its image when $u=0$ or $v=0$. As the above diffeomorphism is only known to exist abstractly, we will need to appeal the latter method to prove that $(L_{A_n},\xi_0)$ and $(L(n+1,n),\xi_{std})$ are contactomorphic. As a result, this proof is rather involved. \end{rem}
Our application of Gray's stability theorem uses the flow of a Liouville vector field to construct a 1-parameter family of contactomorphisms. First we prove that $L(n+1,n)$ is a contact manifold whose contact structure descends from the quotient of $S^3$.
Consider the standard symplectic form on $\mathbb{C}^2$ given by
\begin{equation}
\begin{array}{lcl}
\omega_{\mathbb{C}^2}&=&d\lambda_{\mathbb{C}^2} \\
\lambda_{\mathbb{C}^2} &= &\dfrac{i}{2} \left(u d\bar u - \bar u du + v d\bar v - \bar v dv\right). \\
\end{array}
\end{equation}
The following proposition shows that $\lambda_0$ restricts to a contact form on $L(n+1,n)$. We define $\ker\lambda = \xi_{\text{std}}$ on $L(n+1,n)$.
\begin{prop}\label{calcliou}
The vector field
\[
Y_0 = \frac{1}{2} \left( u\frac{\partial}{\partial u} + \bar{u} \frac{\partial }{\bar{u}} + v\frac{\partial}{\partial v} + \bar{v} \frac{\partial }{\bar{v}} \right)
\]
is a Liouville vector field on $(\mathbb{C}^2/A_n,\omega_{\mathbb{C}^2})$ away from the origin and transverse to $L(n+1,n)$.
\end{prop}
\begin{proof}
We have that $\mathbb{C}^2/A_n$ is a smooth manifold away from the origin because $0$ is the only fixed point by the action of $A_n$. Write
\[
S^3/A_n = \{ (u, v) \in \mathbb{C}^2/A_n\ \big | \ |u|^2+|v|^2 = 1\}.
\]
Then $L(n+1,n) =S^3/A_n$ is a regular level set of $g(u,v) = |u|^2+|v|^2$ Choose a Riemannian metric on $\mathbb{C}^2/A_n$ and note that
\[
Y_0 = \frac{1}{4} \nabla g.
\]
Thus $Y_0$ is transverse to $L(n+1,n)$. Since
\[
\mathcal{L}_{Y_0} \omega_{\mathbb{C}^2} = d(i_{Y_0}d\lambda_{\mathbb{C}^2}) = \omega_{\mathbb{C}^2},
\]
we may conclude that $Y_0$ is indeed a Liouville vector field on $(\mathbb{C}^2/A_n, \omega_{\mathbb{C}^2})$ away from the origin. Thus by Proposition \ref{contacttype}, $L(n+1,n)$ is a hypersurface of contact type in $\mathbb{C}^2/A_n$.
\end{proof}
\subsection{ The proof that $(L_{A_n},\xi_0)$ and $(L(n+1,n),\xi_{std})$ are contactomorphic} \mathfrak{h}fill \\
First we set up $L_{A_n}$ and $\varphi(L(n+1,n))$ as hypersurfaces of contact type in $\{f^{-1}_{A_n}(0) \} \setminus \{ \mathbf{0} \}$. Define $\rho : \mathbb{C}^3 \to \mathbb{R}$ by
\[ \rho(z) = \frac{|z|^2 - 1}{4} = \frac{z_0\bar{z}_0 + \cdots + z_2\bar{z}_2 - 1}{4}.
\]
The standard symplectic structure on $\mathbb{C}^3$ is given by.
\[ \omega_{\mathbb{C}^3} = \frac{i}{2}( dz_0\wedge d\bar{z}_0 + \cdots + dz_2 \wedge d\bar{z}_2).\]
Moreover,
\begin{equation}\label{liouY}
Y = \nabla \rho = \frac{1}{2} \sum_{j = 0}^2 z_j \frac{\partial}{\partial z_j} + \bar{z}_j \frac{\partial}{\partial \bar{z}_j}
\end{equation}
is a Liouville vector field for $(\mathbb{C}^3,\omega_{\mathbb{C}^3}).$ We define
\[
\lambda_{\mathbb{C}^3}= \iota_Y \omega_{\mathbb{C}^3}.
\]
A standard calculation analogous to the proof of Proposition \ref{calcliou} shows that $Y$ is a Liouville vector field on $\left( \{f^{-1}_{A_n}(0) \} \setminus \{ \mathbf{0} \}, \omega_{\mathbb{C}^3} \right)$
\begin{rem}
Both $\varphi(L(n+1,n))$ and $L_{A_n}$ are hypersurfaces of contact type in
$\left( \{f^{-1}_{A_n}(0) \} \setminus \{ \mathbf{0} \}, \omega_{\mathbb{C}^3} \right)$.
Note that $\varphi(L(n+1,n))$ is in fact transverse to the Liouville vector field $Y$ because
\[
\begin{array}{ccl}
\varphi(L(n+1,n)) &=& \varphi \left( \left \{|u|^{2} + |v|^{2} = 1 \right \}/ A_n\right) \\
&=& \varphi ( \{|u|^{4} + 2|u|^2|v|^2+ |v|^{4} = 1 \}/ A_n) \\
&=&\left \{ 2|z_0|^2+4^{1/(n+1)}|z_1|^{4/(n+1)} + 4^{1/(n+1)} |z_2|^{4/(n+1)} = 1 \right \} \cap f_{A_n}^{-1}(0) \\
\end{array} \]
\end{rem}
We will want $\varphi(L(n+1,n))$ and $L_{A_n}$ to be disjoint in $\{f^{-1}_{A_n}(0) \}. $ This is easily accomplished by rescaling $r$ in the definition of the link.
\begin{definition}
Define
\[
L_{A_n}^r = f^{-1}_{A_n}(0) \cap S^5_r,
\]
with the assumption that $r$ has been chosen so that $\varphi(L(n+1,n))$ and $L_{A_n}^r$ are disjoint in $\{f^{-1}_{A_n}(0) \}$ and so that the flow of the Liouville vector field $Y$ ``hits" $\varphi(L(n+1,n))$ before $L_{A_n}^r$.
\end{definition}
The first result is the following lemma, which provides a 1-parameter family of diffeomorphic manifolds starting on $\varphi(L(n+1,n))$ and ending on $L_{A_n}^r$. First we set up some notation. Let
\[
\partialsi_t:\mathbb{R} \times X \to X
\]
be the flow of $Y$ and $\partialsi_t(z) = \mathfrak{g}amma_z(t)$ the unique integral curve passing through $z \in \varphi(L(n+1,n))$ at time $t = 0$. For any integral curve $\mathfrak{g}amma$ of $Y$ we consider the following initial value problem:
\begin{equation}
\label{ivp}
\begin{array}{ccl}
\mathfrak{g}amma'(t)&=&Y(\mathfrak{g}amma(t))\\
\mathfrak{g}amma(0)&=&z \in \varphi(L(n+1,n)) \\
\end{array}
\end{equation}
By means of the implicit function theorem and the properties of the Liouville vector field $Y$ we can prove the following claim.
\begin{lem}\label{oneparameter}
For every $\mathfrak{g}amma_z$, there exists a $\tau(z) \in \mathbb{R}_{> 0}$ such that $\mathfrak{g}amma_z(\tau(z)) \in L_{A_n}^r$. The choice of $\tau(z) $ varies smoothly for each $z \in \varphi(L(n+1,n))$.
\end{lem}
\begin{proof}
In order to apply the implicit function theorem, we must show for all $(t,z)$ with $\rho \circ \mathfrak{g}amma =0$ that
\[
\frac{\partial (\rho \circ \mathfrak{g}amma)}{\partial t} \neq 0.
\]
Note that $\rho \circ \mathfrak{g}amma$ is smooth. By the chain rule,
\[
\left. \frac{\partial (\rho \circ \mathfrak{g}amma)}{\partial t}\right|_{(s,p)} = \mbox{grad }\rho |_{\mathfrak{g}amma(s,p)} \cdot \dot{\mathfrak{g}amma}|_{(s,p)},
\]
where $\dot{\mathfrak{g}amma}|_{(s,p)} = \frac{\partial \mathfrak{g}amma}{\partial t}|_{(s,p)}$.
If $\mbox{grad } \rho \arrowvert_{\mathfrak{g}amma(s,p)} \cdot \dot{ \mathfrak{g}amma}|_{(s,p)} = 0$, then either $\mbox{grad } \rho$ is not transverse along $\{ (\rho \circ \mathfrak{g}amma) \ (s,p)=0 \}$ or $ \dot{ \mathfrak{g}amma}|_{(s,p)} = 0$, since $\mbox{grad } \rho \neq 0$. By construction grad $\rho = \nabla \rho$ is a Liouville vector field transverse to $L_{A_n}^r$ . Furthermore, the conformal symplectic nature of a Liouville vector field implies that for any integral curve $\mathfrak{g}amma$ satisfying the initial value problem given by equation (\ref{ivp}), $\dot{\mathfrak{g}amma}|_{(s,p)} \neq 0$. Thus we see that the conditions for the implicit function theorem are satisfied and our claim is proven.
\end{proof}
\begin{rem}\label{helperrem}
The time $\tau(z)$ can be normalized to 1 for each $z$, yielding a 1-parameter family of diffeomorphic contact manifolds $(M_t,\zeta_t)$ for $0 \le t \le 1$ given by
\[ M_t = \partialsi_t( \varphi(L(n+1,n))), \quad \zeta_t = TM_t \cap J_{\mathbb{C}^3} (TM_t)\] where
\[ M_0 = \partialsi_0(\varphi(L(n+1,n))) = \varphi(L(n+1,n)), \quad M_1 = \partialsi_1 (\varphi(L(n+1,n))) = L_{A_n}.\]
\end{rem}
Moreover, we can relate the standard contact structure on $L(n+1,n)$ under the image of $\varphi$. To avoid excessive parentheses, we use $S^3/A_n$ in place of $L(n+1,n)$ in this lemma.
\begin{lem}\label{technicalphi}
On $\varphi(S^3/A_n), \ \ \varphi_*\xi_{std}= T(\varphi(S^3/A_n)) \cap J_{\mathbb{C}^3} (T(\varphi(S^3/A_n))) .$
\end{lem}
\begin{proof}
Since $A_n \subset SL(2,\mathbb{C})$
we have
\[
\tilde \varphi (J_{\mathbb{C}^2}TS^3) = J_{\mathbb{C}^3}(T\tilde \varphi(S^3)).
\]
We examine $\varphi_*\big(\xi_{\text{std}}\big)$:
\begin{align*}
\varphi_*(T(S^3/A_n) \cap J_{\mathbb{C}^2} T(S^3/A_n)) &=\tilde \varphi_*(TS^3 \cap J_{\mathbb{C}^2}(TS^3)) \\
&=\tilde \varphi_*(TS^3) \cap \tilde \varphi_*(J_{\mathbb{C}^2}(TS^3)) \\
&=\tilde \varphi_*(TS^3) \cap J_{\mathbb{C}^3}\tilde \varphi_*(TS^3) \\
&= T\tilde\varphi(S^3) \cap J_{\mathbb{C}^3}(T\tilde\varphi(S^3)) \\
&= T(\varphi(S^3/A_n)) \cap J_{\mathbb{C}^3}(T\varphi(S^3/A_n)).
\end{align*}
\end{proof}
Lemmas \ref{oneparameter} and \ref{technicalphi} in conjunction with Remark \ref{helperrem} and Lemma \ref{graycor} yields the following proposition.
\begin{prop}\label{propatlast}
The image of the lens space $(\varphi(L(n+1,n)), \varphi_*\xi_{std})$ is contactomorphic to $(L_{A_n}, \xi_{0})$.
\end{prop}
It remains to show that $(\varphi(L(n+1,n)), \varphi_*\xi_{std})$ is contactomorphic to $(L(n+1,n),\xi_{std})$. To accomplish this, we use Moser's Lemma to prove the following lemma.
\begin{lem}\label{moserlem} The manifolds $(\mathbb{C}^2 \setminus \{ \mathbf{0} \},d\lambda_{\mathbb{C}^2})$ and $(\mathbb{C}^2 \setminus \{\mathbf{0} \}, d\tilde\varphi^*\lambda_{\mathbb{C}^3})$ are contactomorphic.
\end{lem}
\begin{proof}
Consider the family of 2-forms
\[ \omega_t = (1 - t)\omega_{\mathbb{C}^2} + t\tilde \varphi^*\omega_{\mathbb{C}^3}\]
for $0 \le t \le 1$. Then $\omega_t$ is exact because $Y_0$ and $Y$ are Liouville vector fields for $\mathbb{C}^2\setminus \mathbf{0}$ equipped with the symplectic forms $\omega_{\mathbb{C}^2}$ and $\omega_{\mathbb{C}^3}$ respectively, thus $d\lambda_t = \omega_t$ for
\[\lambda_t = (1 - t)\lambda_{\mathbb{C}^2} + t\tilde\varphi^*(\lambda_{\mathbb{C}^3}).\]
for $0 \le t \le 1$. We claim for each $t \in [0,1]$, $\lambda_t$ is a family of contact forms.
We compute
\begin{align*}
\frac{2}{i} \tilde\varphi^*d\lambda_{\mathbb{C}^3} & = d(uv)\wedge d(\overline{uv} ) + d(u^{n+1})\wedge d(\bar u^{n+1}) + d(v^{n+1}) \wedge d(\bar v^{n+1}) \\
& = ((n+1)^2 |u|^{2n} + |v|^2)du\wedge d\bar u + 2\mathbb{R}e (u\bar v dv \wedge d\bar u) + ((n+1)^2|v|^{2n} + |u|^2) dv\wedge d\bar v.
\end{align*}
Since $\omega_t$ is exact for each $t\in[0,1]$, $d(\omega_t)=0$ for each $t\in[0,1]$. Moreover, a simple calculation reveals that $\omega_t \wedge \omega_t$ is a volume form on $\mathbb{C}^2$ for each $t\in[0,1]$. Thus we may conclude that, $\omega_t$ is a symplectic form for each $t\in[0,1]$. Applying Moser's argument, Theorem \ref{moser}, yields the desired result.
\end{proof}
This yields the desired corollary.
\begin{cor}\label{atlast}
The manifolds $(L(n+1,n), \ker \lambda_{\mathbb{C}^2})$ and $(L(n+1,n), \ker \varphi^*\lambda_{\mathbb{C}^3})$ are contactomorphic.
\end{cor}
\begin{proof}
Let $\partialhi:(\mathbb{C}^2 \setminus \{ \mathbf{0} \},d\lambda_{\mathbb{C}^2})$ and $(\mathbb{C}^2 \setminus \{ \mathbf{0} \}, d\tilde\varphi^*\lambda_{\mathbb{C}^3})$ be the symplectomorphism, which exists by Lemma \ref{moserlem}. It induces the desired contactomorphism. On $\mathbb{C}^2 \setminus \{ \mathbf{0} \}$,
\[
\partialhi^*d(\varphi^*\lambda_{\mathbb{C}^3}) = d\lambda_{\mathbb{C}^2},
\]
thus
\[
d\partialhi^*(\varphi^*\lambda_{\mathbb{C}^3}) = d\lambda_{\mathbb{C}^2}.
\]
So indeed on $L(n+1,n)$,
\[
\begin{array}{lcl}
\partialhi_*(\xi_{std}) &=& \partialhi_*(\ker \lambda_{\mathbb{C}^2}) \\
&=& \ker \varphi_* \lambda_{\mathbb{C}^3} \\
& =& \varphi_* \xi_{std}. \\
\end{array}
\]
\end{proof}
Proposition \ref{propatlast} and Corollary \ref{atlast} complete the proof of Theorem \ref{lenslinkcontacto}.
\end{document} |
\begin{document}
\title{Single Loop Gaussian Homotopy Method for\\ Non-convex Optimization}
\author{
Hidenori Iwakiri\thanks{The first two authors contributed equally.} \\
The University of Tokyo, RIKEN AIP\\
\texttt{[email protected]}\\
\And
Yuhang Wang\textcolor{ptpurple}{\footnotemark[1]} \\
The University of Tokyo\\
\texttt{[email protected]}\\
\AND
Shinji Ito\\
NEC Corporation, RIKEN AIP\\
\texttt{[email protected]}\\
\And
Akiko Takeda\\
The University of Tokyo, RIKEN AIP\\
\texttt{[email protected]}
}
\maketitle
\begin{abstract}
The Gaussian homotopy (GH) method is a popular approach to finding better stationary points for non-convex optimization problems by gradually reducing a parameter value $t$, which changes the problem to be solved from an almost convex one to the original target one. Existing GH-based methods repeatedly call an iterative optimization solver to find a stationary point every time $t$ is updated, which incurs high computational costs. We propose a novel single loop framework for GH methods (SLGH) that updates the parameter $t$ and the optimization decision variables at the same. Computational complexity analysis is performed on the SLGH algorithm under various situations: either a gradient or gradient-free oracle of a GH function can be obtained for both deterministic and stochastic settings. The convergence rate of SLGH with a tuned hyperparameter becomes consistent with the convergence rate of gradient descent, even though the problem to be solved is gradually changed due to $t$. In numerical experiments, our SLGH algorithms show faster convergence than an existing double loop GH method while outperforming gradient descent-based methods in terms of finding a better solution.
\end{abstract}
\paragraph{Keywords}
Gaussian homotopy, Gaussian smoothing, Nonconvex optimization, Worst-case iteration complexity, Zeroth-order optimization
\section{Introduction}
Let us consider the following non-convex optimization problem:
\begin{align}
{\mathop{\rm minimize}\limits_{x\in {\mathbb{R}}^d}}\quad f(x),
\label{nonconvprob}
\end{align}
where $f:{\mathbb{R}}^d\rightarrow \mathbb{R}$ is a non-convex function.
Let us also consider the following stochastic setting:
\begin{align}
f(x) := \mathbb{E}_\xi [\bar{f}(x;\xi)],
\label{stocprob}
\end{align}
where $\xi$
is the random variable following a probability distribution $P$ from which
i.i.d.~samples can be generated.
Such optimization problems attract significant attention in machine learning,
and at the same time, the need for optimization algorithms that can find a stationary point with smaller objective value is growing. For example, though
it is often said that simple gradient methods can find global minimizers for deep learning
(parameter configurations with zero or near-zero training loss),
such beneficial behavior is not universal, as noted in \cit{li2017visualizing};
the trainability of neural nets is highly dependent on network architecture design choices,
variable initialization, etc.
There are also various other highly non-convex optimization problems in machine learning
(see e.g., \cit{jain2017nonconv}).
The Gaussian homotopy (GH) method is designed to avoid
poor stationary points by building a sequence of
successively smoother approximations of the original objective function $f$,
and it is expected to find a good stationary point with a small objective value
for a non-convex problem.
More precisely, using the GH function $F(x,t)$ with a parameter $t \geq 0$ that satisfies
$F(x,0)=f(x)$, the method starts from solving an almost convex smoothed function $F(x,t_1)$ with
some sufficiently large $t_1 \geq 0$ and gradually changes the optimization problem $F(x,t)$
to the original one $f(x)$ while decreasing the parameter $t$.
The homotopy method developed so far, then, consists of a double loop structure;
the outer loop reduces $t$, and the inner loop solves $\min_x F (x, t)$ for the fixed $t$.
\paragraph{Related research on the GH method}
The GH method is popular owing to its ease of implementation
and the quality of its obtained stationary points, i.e., their function values.
The nature of this method was first proposed in \cit{blake1987visual},
and it was then successfully applied in various fields,
including computer vision \cit{Nielsen1993,brox2010large,Zach2018},
physical sciences \cit{hemeda2012homotopy}
and computation chemistry \cit{wu1996effective}.
\cit{hazan2016graduated} introduces machine learning applications for the GH method,
and an application to tuning hyperparameters of kernel ridge regression \cit{shao2019graduated}
has recently been introduced.
Although there have been recent studies on the GH function $F(x,t)$ \cit{mobahi2015link, mobahi2015theoretical, hazan2016graduated}, all existing GH methods use the double loop approach noted above. Moreover, to the best of our knowledge, there are no existing works that give theoretical guarantee for the convergence rate except for \cit{hazan2016graduated}. It characterizes a family of non-convex functions for which a GH algorithm converges to a global optimum and derives the convergence rate to an $\epsilon$-optimal solution. However, the family covers only a small part of non-convex functions, and it is difficult to check whether the required conditions are satisfied for each function. See Appendix~\ref{sec:related_work} for more discussion on related work.
\begin{table}[tb] \label{table:summary}
\caption{Each theorem shows the iteration complexity of SLGH with respect to $\epsilon$
and the dimension of input space $d$ to reach an $\epsilon$-stationary point in
the corresponding problem setting. ``const.~$\gamma$'' shows the complexity
when we treat the decreasing parameter $\gamma$ as a constant.
``tuned $\gamma$'' shows the lowest complexity of SLGH attained by
updating $t$ appropriately, which matches the complexity of
the standard first- or zeroth-order methods (see e.g., Theorem \ref{iter_determin}).
We also consider two cases of a zeroth-order setting:
``exact $f$'', in which we can query the exact or stochastic function value,
and ``err.~$f$'', in which we can only access the function value with bounded error.}
\centering
\begin{tabular}{c|ccc}
& 1) first-order & \multicolumn{2}{c}{zeroth-order} \\\cline{3-4}
& & 2) exact $f$ & 3) err. $f$ \\ \hline
a) deterministic~& Thm.~\ref{iter_determin} & Thm.~\ref{thm:zo_determinstic} & Thm.~\ref{thm:error_deterministic}\\
const. $\gamma$ & $O\left(\frac{d^{3/2}}{\epsilon^2}\right)$ & $O\left(\frac{d^{2}}{\epsilon^2}\right)$ & $O\left(\frac{d^{3}}{\epsilon^2}\right)$ \\
tuned $\gamma$ & $O\left(\frac{1}{\epsilon^2}\right)$ & $O\left(\frac{d}{\epsilon^2}\right)$ & $O\left(\frac{d}{\epsilon^2}\right)$\\ \hline
b) stochastic~& Thm.~\ref{thm:iter_stochastic} & Thm.~\ref{thm:zo_stochastic} & Thm.~\ref{thm:error_stochastic} \\
const.~$\gamma$ & $O \left(
\frac{d}{\epsilon^4} + \frac{d^{3/2}}{\epsilon^2}
\right)$ & $O\left(\frac{d^2}{\epsilon^4}\right)$ & $O\left(\frac{d^2}{\epsilon^4}+\frac{d^3}{\epsilon^2}\right)$\\
tuned $\gamma$ & $O\left(\frac{1}{\epsilon^4}\right)$ & $O\left(\frac{d}{\epsilon^4}\right)$ & $O\left(\frac{d}{\epsilon^4}\right)$\\ \hline
\end{tabular}
\end{table}
\paragraph{Motivation for this work}
This paper proposes novel deterministic and stochastic GH methods employing
a single loop structure in which the decision variables $x$
and the smoothing parameter $t$ are updated at the same time
using individual gradient/derivative information.
Using a well-known fact in statistical physics on the relationship between
the {\it heat equation} and Gaussian convolution of $f$,
together with {\it the maximum principle} (e.g., \cit{evans2010partial})
for the {\it heat equation},
we can see that a solution $(x^\ast, t^\ast)$
minimizing the GH function $F(x,t)$ satisfies $t^\ast=0$;
thus, $x^\ast$ is also a solution for (\ref{nonconvprob}).
This observation leads us to a single loop GH method (SLGH, in short),
which updates the current point $(x_k,t_k)$ simultaneously for $\min_{x \in \mathbb{R}^d, t \geq 0} F(x,t)$.
The resulting SLGH method can be regarded as an application of
the steepest descent method to the optimization problem,
with $(x, t)$ as a variable. We are then able to investigate the convergence rate of
our SLGH method so as to achieve an $\epsilon$-stationary point of
\eqref{nonconvprob} and \eqref{stocprob} by following existing theoretical complexity analyses.
We propose two variants of the SLGH method: $\text{SLGH}_\text{d}$ and $\text{SLGH}_\text{r}$, which have different update rules for $t$. $\text{SLGH}_\text{d}$ updates $t$ using the derivative of $F(x, t)$ in terms of $t$, based on the idea of viewing $F(x, t)$ as the objective function with respect to the variable $(x, t)$. Though this approach is effective in finding good solutions (as demonstrated in Appendix \ref{subsec:toy}), it requires additional computational cost due to the calculation of $\frac{\partial F}{\partial t}$. To avoid this additional computational cost, we also consider $\text{SLGH}_\text{r}$ that uses fixed-rate update rule for $t$. We also show that both $\text{SLGH}_\text{d}$ and $\text{SLGH}_\text{r}$ have the same theoretical guarantee.
Table 1 summarizes the convergence rate of our SLGH method to reach
an $\epsilon$-stationary point under a number of problem settings.
Since the convergence rate depends on the decreasing speed of $t$,
we list two kinds of complexity in the table; details are described in the caption.
We consider the three settings in which available oracles differ. In Case 1),
the full (or stochastic) gradient of $F(x,t)$ in terms of $x$ is available for
the deterministic problem \eqref{nonconvprob} (or stochastic problem \eqref{stocprob},
respectively). However, in this setting, we have to calculate Gaussian convolution
for deriving GH functions and their gradient vectors, which becomes expensive,
especially for high-dimensional applications, unless closed-form expression of Gaussian convolution is
possible. While \cit{mobahi2016closed} provides closed-form expression for some specific functions $f$, such as polynomials, Gaussian RBFs, and trigonometric functions,
such problem examples are limited.
As Case 2), we extend our deterministic and stochastic GH methods
to the zeroth-order setting, for which the convolution computation is approximated
using only the function values.
Another zeroth-order setting, Case 3), is also considered in this paper:
the inexact function values (more precisely, the function value with bounded error) can be
queried similarly as in the setting in \cit{jin2018local}. See Appendix \ref{sec:error_appendix} for more details.
Although no existing studies have analyzed the complexity of a double loop GH method to find an $\epsilon$-stationary point, we can see that its inner loop requires the same complexity as GD (gradient descent) method up to constants. Furthermore, as noted above, the complexity of the SLGH method with a tuned hyperparameter matches that of GD method. Thus, the SLGH method becomes faster than a double loop GH method by around the number of outer loops. The SLGH method is also superior to double loop GH methods from practical perspective, because in order to ensure convergence of their inner loops, we have to set the stepsize conservatively, and furthermore a sufficiently tuned terminate condition must be required.
\textbf{Contributions} \quad We can summarize our contribution as follows:
(1) We propose novel deterministic and stochastic single loop GH (SLGH) algorithms
and analyze their convergence rates to an $\epsilon$-stationary point.
As far as we know, this is the first analysis of convergence rates of GH methods for general non-convex problems \eqref{nonconvprob} and \eqref{stocprob}. For non-convex optimization, the convergence rate of SLGH with a tuned hyperparameter becomes consistent with the convergence rate of gradient descent, even though the problem to be solved is gradually changed due to $t$.
At this time, the SLGH algorithms become faster than a double loop one by around its number of outer loops.
(2) We propose zeroth-order SLGH (ZOSLGH) algorithms based on zeroth-order estimators of
gradient and Hessian values, which are useful when Gaussian smoothing convolution is
difficult. We also consider the possibly non-smooth case in which the accessible function
contains error, and we derive the upper bound of the error level for convergence guarantee.
(3) We empirically compare our proposed algorithm and other algorithms in experiments,
including artificial highly non-convex examples and black-box adversarial attacks.
Results show that the proposed algorithm converges much faster than an existing
double loop GH method, while it is yet able to find better solutions than are
GD-based methods.
\section{Standard Gaussian homotopy methods}
\paragraph{Notation:}
For an integer $N$, let $[N]:=\{1,...,N \}$. We express $\chi_{[N]}:=\{\chi_1, \ldots, \chi_N \}$ for a set of some vectors. We also express the range of the smoothing parameter $t$ as $\mathcal{T}:= [0, t_1]$, where $t_1$ is an initial value of the smoothing parameter. Let $\|\cdot\|$ denote the Euclidean norm and $\mathcal{N}(0,\mathrm{I}_d)$ denote the $d$-dimensional standard normal distribution.
Let us first define Gaussian smoothed function.
\begin{definition}
Gaussian smoothed function $F(x,t)$ of $f(x)$ is defined as follows:
\begin{align}
F(x,t) &:=\mathbb{E}_{u\sim\mathcal{N}(0,\mathrm{I}_d)}[f(x+tu)]
= \int f(x+ty)k(y)dy,
\label{00}
\end{align}
where $k(y) = (2\pi)^{-d/2}\exp{(-{\|y\|^2}/2)}$ is referred to as the Gaussian kernel.
\end{definition}
The idea of Gaussian smoothing is to take an expectation over the function value with
a Gaussian distributed random vector $u$. For any $t>0$,
the smoothed function $F(x,t)$ is a $C^\infty$ function,
and $t$ plays the role of a smoothing parameter that controls the level of smoothing.
Here, let us show the link between Gaussian smoothing and
the {\it heat equation} \cit{widder1976heat}. The Gaussian smoothing convolution is
basically the solution of the {\it heat equation} \cit{widder1976heat}.
\begin{align}
\frac{\partial}{\partial t}\hat{u} = \Delta_x \hat{u}, \quad \hat{u}(\cdot,0) = f(\cdot),
\label{heatequation}
\end{align}
where $\Delta_x$ denotes the Laplacian. The solution of the {\it heat equation} is
$\hat{u}(x,t) = (\frac{1}{4\pi t})^{\frac{d}{2}}\int f(y) e^{-\frac{\|x-y\|^2}{4t}}dy$.
This can be made the same as the Gaussian smoothing function $F(x,t)$
by scaling its coefficient, which only changes the speed of progression.
Corollary 9 in \cit{mobahi2012gaussian} shows a sufficient condition for ensuring that
$f$ has the asymptotic strict convexity in which the smoothed function $F(x,t)$ becomes
convex if a sufficiently large smoothing parameter $t$ is chosen.
On this basis, the standard GH method, Algorithm~\ref{algo:Continuation}, starts with
a (almost) convex optimization problem $F(x,t)$ with
large parameter value $t \in \mathbb{R}$ and gradually changes the problem toward
the target non-convex $f(\cdot)=F(\cdot,0)$ by decreasing $t$ gradually.
\cit{hazan2016graduated} reduces $t$ by multiplying by a factor of $1/2$ for
each iteration $k$. \cit{mobahi2015theoretical} focuses more on
theoretical work w.r.t.~the general setting and do not discuss the update rule for $t$.
\begin{algorithm}[H]
\caption{Standard GH method (\cit{mobahi2015theoretical,hazan2016graduated})} \label{algo:Continuation}
\begin{algorithmic}
\REQUIRE Objective function $f$, iteration number $T$, sequence
$\{t_1,\ldots,t_T\}$ satisfying $t_1 > \cdots > t_T$.
Find a solution $x_1$ for minimizing $F(x, t_1)$.
\FOR {$k = 1$ to $T$}
\STATE Find a stationary point $x_{k+1}$ of $F(x, t_{k+1})$ with the initial solution $x_{k}$.
\mathbb{E}NDFOR
\RETURN $x_T$
\end{algorithmic}
\end{algorithm}
\section{Single loop Gaussian homotopy algorithm}
\label{sec:first-order}
A function $h(x)$ is $L_{0}$-$Lipschitz$ with a constant $L_0$
if for any $x,y \in \mathbb{R}^d$, $|h(x)-h(y)|\leq L_0 \|x-y\|$ holds.
In addition, $h(x)$ is $L_{1}$-$smooth$ with
a constant $L_1$ if for any $x,y \in \mathbb{R}^d$, $\|\nabla h(x)-\nabla h(y)\|\leq L_1 \|x-y\|$ holds.
Let us here list assumptions for developing algorithms with convergence guarantee.
\begin{assumption}{A1}$\ $\label{A1}
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
\begin{enumerate}
\item Objective function $f$ satisfies $\sup_{x\in\mathbb{R}^d}\mathbb{E}_u[|f(x+tu)|] < \infty$ (In the stochastic setting, $f$ satisfies $\sup_{x\in\mathbb{R}^d,\xi}\mathbb{E}_u[|\bar{f}(x+tu;\xi)|] < \infty$).
\item The optimization problem (\ref{nonconvprob}) has an optimal value $f^\ast$.
\item Objective function $f(x)$ is $L_{0}$-$Lipschitz$ and $L_{1}$-$smooth$ on $\mathbb{R}^d$ (In the stochastic setting, $\bar{f}(x;\xi)$ is $L_{0}$-$Lipschitz$ and $L_{1}$-$smooth$ on $\mathbb{R}^d$ in terms of $x$ for any $\xi$).
\end{enumerate}
\end{assumption}
Assumption (i) for making $F(x,t)$ well-defined and enabling to exchange the order of differentiation and integration, as well as Assumption (ii),
is mandatory for theoretical analysis with the GH method.
Assumption (iii) is often imposed for gradient-based methods.
This is a regular boundedness and smoothness assumption
in recent non-convex optimization analyses
(see e.g., \cit{NEURIPS2019_50a074e6, NEURIPS2020_0cb5ebb1, NEURIPS2019_b8002139}).
In the remainder of this section, we consider the nature of
the GH method and propose a more efficient algorithm, a SLGH algorithm.
We then provide theoretical analyses for our proposed SLGH algorithm.
\subsection{Motivation}
The standard GH algorithm needs to solve an optimization problem for
a given smoothing factor $t$ in each iteration and manually reduce $t$,
e.g., by multiplying some decreasing factor. To simplify this process,
we consider an alternative problem as follows:
\begin{align}
{\mathop{\rm minimize}\limits_{x\in \mathbb{R}^d, t \in \mathcal{T}}} \quad F(x,t),
\label{xtvar_prob}
\end{align}
where
$F(x,t)$ is the Gaussian smoothed function of $f(x)$. This single loop structure can
reduce the number of iterations by optimizing $x$ and $t$ at the same time.
The following theorem is a (almost) special case of Theorem 6 in \cit{evans2010partial},\footnote{Although the assumptions in Theorem 3.1 are stronger than those in the theorem proved by Evans, the statement of ours is also stronger than that of his theorem, in a sense that our theorem guarantees that all optimal solutions satisfy $t=0$.}
which is studied in statistical physics but may not be well-known in machine learning and
optimization communities. This theorem shows that
the optimal solution of (\ref{xtvar_prob}) $(x^\ast, t^\ast)$ satisfies $t^\ast=0$, and thus $x^\ast$ is also a solution for (\ref{nonconvprob}).
Therefore, we can regard $F(x,t)$ as an objective function in the SLGH method.
\begin{theorem}\label{thm: main}
Suppose that Assumptions \ref{A1} (i) and (ii) are satisfied. Unless $f$ is constant a.e., the minimum of
the GH function $F(x,t)$ will be always found at $t=0$, and the corresponding $x$ will be
an optimal solution for \eqref{nonconvprob}.
\end{theorem}
We present a proof of this theorem in Appendix \ref{subsec:proof_optimality}.
The proof becomes much easier than
that in \cit{evans2010partial} due to its considering a specific case.
Let us next introduce an update rule for $t$ utilizing the derivative information.
When we solve the problem \eqref{xtvar_prob} using a gradient descent method,
the update rule for $t$ becomes $t_{k+1}=t_k-\eta\frac{\partial F}{\partial t}$,
where $\eta$ is a step size. The formula \eqref{heatequation} in the {\it heat equation}
implies that the derivative $\frac{\partial F}{\partial t}$ is equal to the Laplacian $\Delta_x F$,
i.e., $\frac{\partial F}{\partial t} = \mathrm{tr}(\mathrm{H}_F(x))$,
where $\mathrm{H}_F(x)$ is the Hessian of $F$ in terms of $x$.
Since $\mathrm{tr}(\mathrm{H}_F(x))$ represents the sharpness of minima \cit{Dinh2017sharp},
this update rule can sometimes decrease $t$ quickly around a minimum and
find a better solution. See Appendix \ref{subsec:toy} for an example of such a problem.
\subsection{SLGH algorithm}
\label{sec:GH}
Let us next introduce our proposed SLGH algorithm,
which has two variants with different update rules for $t$:
SLGH with a fixed-ratio update rule ($\text{SLGH}_\text{r}$) and
SLGH with a derivative update rule ($\text{SLGH}_\text{d}$).
$\text{SLGH}_\text{r}$ updates $t$ by multiplying a decreasing factor $\gamma$ (e.g., 0.999)
at each iteration. In contrast to this, $\text{SLGH}_\text{d}$ updates $t$
while using derivative information. Details are described in Algorithm~\ref{alg:GH}.
Algorithm~\ref{alg:GH} transforms a double loop Algorithm~\ref{algo:Continuation} into
a single loop algorithm. This single loop structure can significantly reduce
the number of iterations while ensuring the advantages of the GH method.
\begin{figure}
\caption{Deterministic/Stochastic Single Loop GH algorithm (SLGH)}
\label{alg:GH}
\end{figure}
In the stochastic setting of \eqref{stocprob}, the gradient of $F(x,t)$ in terms of $x$ is approximated by $\nabla_x \bar{F}(x,t;\xi)$ with randomly chosen $\xi$, where $\bar{F}(x,t;\xi)$ is the GH function of $\bar{f}(x;\xi)$. Likewise, the derivative of $F(x,t)$ in terms of $t$ is approximated by $\frac{\partial \bar{F}(x,t;\xi)}{\partial t}$. The stochastic algorithm in Algorithm~\ref{alg:GH} uses one sample $\xi_k$.
We can extend the stochastic approach to a minibatch one by approximating $\nabla_x F(x,t)$ by
$\frac{1}{M}\sum_{i=1}^M\nabla_x \bar{F}(x,t;\xi_i)$ with samples $\{\xi_1,\ldots,\xi_M\}$ of some batch size $M$, but for the sake of simplicity, we here assume one sample in each iteration. In this setting, the gradient complexity matches the iteration complexity; thus, we also use the term ``iteration complexity'' in the stochastic setting.
Other methods, such as momentum-accelerated method \cit{sutskever2013importance} and Adam \cit{kingma2014adam} can also be applied here. According to Theorem \ref{thm: main}, the final smoothing parameter needs to be zero. Thus, we multiply $\gamma$ by $t$ even in $\text{SLGH}_{\text{d}}$ when the decrease of $t$ is insufficient. We also assure that $t$ is larger than a sufficiently small positive value $\epsilon'>0$ during an update to prevent $t$ from becoming negative.
\subsection{Convergence analysis for SLGH} \label{sec:convSLGH}
Let us next analyze the worst-case iteration complexity for both deterministic and stochastic SLGHs, but, before that, let us first show some properties for
Gaussian smoothed function $F(x,t)$ under Assumption \ref{A1} for the original function $f(x)$.
In the complexity analyses in this paper,
we always assume that $\gamma$ is bounded from above by a universal constant $\bar{\gamma} < 1$,
which implies $1/(1 - \gamma) = O(1)$.
\begin{lemma}
Let $f(x)$ be a $L_0$-$Lipschitz$ function. Then, for any $t>0$, its Gaussian smoothed function $F(x,t)$ will then also be $L_0$-$Lipschitz$ in terms of $x$.
Let $f(x)$ be a $L_1$-$smooth$ function. Then, for any $t>0$, $F(x,t)$ will also be $L_1$-$smooth$ in terms of $x$.
\label{lem:Lip}
\end{lemma}
Lemma~\ref{lem:Lip} indicates that Assumption \ref{A1} given to the function $f(x)$ also guarantees the same properties for $F(x,t)$.
Below, we give some bounds between the smoothed function $F(x,t)$ and the original function $f(x)$.
\begin{lemma}
Let $f$ be a $L_0$-$Lipschitz$ function. Then, for any $x\in\mathbb{R}^d$, $F(x, t)$ is also $L_0\sqrt{d}$-$Lipschitz$ in terms of $t$, i.e., for any $x$, smoothing parameter values $t_1, t_2>0$, we have $|F(x,t_1) - F(x,t_2)|\leq L_0\sqrt{d}|t_1-t_2|.$
\label{lem:Lip_t}
\end{lemma}
On the basis of Lemmas~\ref{lem:Lip} and \ref{lem:Lip_t}, the convergence results of our deterministic and stochastic SLGH algorithms can be given as in
Theorems~\ref{iter_determin} and \ref{thm:iter_stochastic}, respectively. Proofs of the following theorems are given in Appendix \ref{subsec:proof_first_order}.
Let us first deal with the deterministic setting.
\begin{theorem}[\textbf{Convergence of SLGH, Deterministic setting}]
Suppose Assumption \ref{A1} holds
, and let $\hat{x}:=x_{k'},\ k' = \mathop{\mathrm{argmin}}_{k\in[T]} \| \nabla f(x_k)\|$. Set the stepsize for $x$ as $\beta=1/L_1$. Then,
for any setting of the parameter $\gamma$, $\hat{x}$ satisfies
$\|\nabla f(\hat{x})\|\leq\epsilon$ with the iteration complexity of
$T = O\left(d^{3/2}/\epsilon^2\right)$.
Further,
if we choose
$\gamma \leq d^{-\Omega(\epsilon^2)}$,
the iteration complexity can be bounded as
$T = O({1}/{\epsilon^2})$.
\label{iter_determin}
\end{theorem}
This theorem indicates that
if we choose $\gamma$ close to $1$,
then the iteration complexity can be $O\left( d^{3/2}/\epsilon^2 \right)$,
which is $O(d^{3/2})$ times larger than the $O(1/\epsilon^2)$-iteration complexity by the standard gradient descent methods \cit{nesterov2004intro}.
However,
we can remove this dependency on $d$
to obtain an iteration complexity matching that of the standard gradient descent,
by choosing $\gamma \leq d^{-\Omega(\epsilon^2)}$,
as shown in Theorem~\ref{iter_determin}.
Empirically,
settings of $\gamma$ close to $1$,
e.g., $\gamma = 0.999$,
seem to work well enough,
as demonstrated in Section~\ref{sec:exp}.
An inner loop of the double loop GH method using the standard GD requires the same complexity as the standard GD method up to constants since the objective smoothed function of inner optimization problem is $L_1$-smooth function. By considering the above results, we can see that the SLGH algorithm becomes faster than the double loop one by around the number of outer loops.
To provide theoretical analyses in the stochastic setting, we need additional standard assumptions.
\begin{assumption}{A2}\label{A2}$\ $
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
\begin{enumerate}
\item The stochastic function $\bar{f}(x;\xi)$ becomes an unbiased estimator of $f(x)$. That is, for any $x\in\mathbb{R}^d$, $f(x) = \mathbb{E}_\xi [\bar{f}(x;\xi)]$ holds.
\item For any $x\in\mathbb{R}^d$, the variance of the stochastic gradient oracle is bounded as $\mathbb{E}_{\xi}[\|\nabla_x\bar{f}(x;\xi)-\nabla f(x)\|^2] \leq \sigma^2$. Here, the expectation is taken w.r.t.~random vectors $\{\xi_k\}$.
\end{enumerate}
\end{assumption}
The following theorem shows the convergence rate in the stochastic setting.
\begin{theorem}[\textbf{Convergence of SLGH, Stochastic setting}]
Suppose Assumptions \ref{A1} and \ref{A2} hold.
Take $k_1 := \Theta(1/\epsilon^4)$ and $k_2 := O\left(\log_{\gamma} \min\{ d^{-1/2}, d^{-3/2} \epsilon^{-2} \}\right)$ and define $k_0 = \min \{ k_1, k_2 \}$. Let $\hat{x}:=x_{k'}$, where $k'$ is chosen from a uniform distribution over $\{ k_0+1, k_0+2, \ldots, T \}$.
Set the stepsize for $x$ as $\beta=\min\left\{1/L_1, 1/\sqrt{T-k_0}\right\}$.
Then,
for any setting of the parameter $\gamma$,
$\hat{x}$ satisfies
$\mathbb{E}[ \|\nabla f(\hat{x})\| ] \leq\epsilon$ with the iteration complexity of
$T = O \left(
d/\epsilon^4 + d^{3/2}/\epsilon^2
\right)$
where the expectation is taken w.r.t.~random vectors $\{\xi_k\}$.
Further,
if we choose $\gamma \leq (\max\{ d^{1/2}, d^{3/2} \epsilon^2 \})^{- \Omega(\epsilon^4)}$,
the iteration complexity can be bounded as
$T = O({1}/{\epsilon^4})$.
\label{thm:iter_stochastic}
\end{theorem}
We note that the iteration complexity of $T = O({1}/{\epsilon^4})$ for sufficiently small $\gamma$
matches that for the standard stochastic gradient descent (SGD) shown,
e.g.,
by \cit{ghadimi2013stochastic}.
\section{Zeroth-order single loop Gaussian homotopy algorithm}
\label{sec:zeroth-order}
In this section, we introduce a zeroth-order version of the SLGH algorithms. This ZOSLGH algorithm is proposed for those optimization problems in which Gaussian smoothing convolution is difficult to compute, or in which only function values can be queried.
\subsection{ZOSLGH algorithm}
For cases in which only function values are accessible, approximations for the gradient in terms of $x$ and derivative in terms of $t$ are needed.
\cit{nesterov2017random} has shown that the gradient of the smoothed function $F(x,t)$ can be represented as
\begin{align}
\nabla_x F(x,t) &= \frac{1}{t}\mathbb{E}_u([f(x+tu)-f(x)]u),\ u \sim \mathcal{N}(0,\mathrm{I}_d).
\end{align}
Thus, the gradient $\nabla_x F(x,t)$ can be approximated by an unbiased estimator $\Tilde{g}_{x}(x,t;u)$ as
\begin{align}
\Tilde{g}_{x}(x,t;u) := \frac{1}{t}(f(x+tu)-f(x))u,\ u \sim \mathcal{N}(0,\mathrm{I}_d).
\label{zogradient}
\end{align}
The derivative $\frac{\partial F}{\partial t}$ is equal to the trace of the Hessian of $F(x,t)$ because the Gaussian smoothed function is the solution of the {\it heat equation} $\frac{\partial F}{\partial t} = \mathrm{tr}(\mathrm{H}_F(x))$. We can estimate $\mathrm{tr}(\mathrm{H}_F(x))$ on the basis of the second order Stein's identity \cit{stein1972bound} as follows:
\begin{align}
\mathrm{H}_F(x) \approx \frac{(vv^\top-\mathrm{I}_d)}{t^2}(f(x+tv)-f(x)),\ v \sim \mathcal{N}(0,\mathrm{I}_d).
\end{align}
Thus, the estimator for derivative can be written as:
\begin{align}
\Tilde{g}_{t}(x,t;v) := \frac{(v^\top v-d)(f(x+tv)-f(x))}{{t}^2},\ v \sim \mathcal{N}(0,\mathrm{I}_d).
\label{zoderivative}
\end{align}
As for the stochastic setting, $f(x)$ in \eqref{zogradient} and \eqref{zoderivative} is replaced by the stochastic function $\bar{f}(x;\xi)$ with some randomly chosen sample $\xi$. The gradient
$\nabla_x \bar{F}(x,t;\xi)$ of its GH function $\bar{F}(x,t;\xi)$ can then be approximated by
$\Tilde{G}_{x}(x,t;\xi,u) := \frac{\bar f(x+tu;\xi)-\bar f(x;\xi)}{t}u$,
and the derivative $\frac{\partial \bar{F}}{\partial t}$ can be approximated by $\Tilde{G}_{t}(x,t;\xi,v) := \frac{(v^\top v-d)(\bar f(x+tv;\xi)-\bar f(x;\xi))}{{t}^2}$
(see Algorithm \ref{alg:ZOGH} for more details).
\begin{figure}
\caption{Deterministic/Stochastic Zeroth-Order Single Loop GH algorithm (ZOSLGH)}
\label{alg:ZOGH}
\end{figure}
\subsection{Convergence analysis for ZOSLGH}
We can analyze the convergence results using concepts similar to those used with the first-order SLGH algorithm.
Below are the convergence results for ZOSLGH in both the deterministic and stochastic settings.
Proofs of the following theorems are given in Appendix \ref{subsec:proof_zeroth_order}, and the definitions of $\hat{x}$ are provided in the proofs.
We start from the deterministic setting, which is aimed at the deterministic problem \eqref{nonconvprob}.
\begin{theorem}[\textbf{Convergence of ZOSLGH, Deterministic setting}]\label{thm:zo_determinstic}
Suppose Assumption \ref{A1} holds.
Take $k_1 := \Theta(d/\epsilon^2)$ and $k_2 := O\left(\log_{\gamma} d^{-1/2} \right)$, and define $k_0 = \min \{ k_1, k_2 \}$. Let $\hat{x}:=x_{k'}$, where $k'$ is chosen from a uniform distribution over $\{ k_0+1, k_0+2, \ldots, T \}$. Set the stepsize for $x$ as $\beta=1/(2(d+4)L_1)$.
Then,
for any setting of the parameter $\gamma$,
$\hat{x}$ satisfies
$\mathbb{E}[ \|\nabla f(\hat{x})\| ] \leq\epsilon$ with the iteration complexity of
$T = O(d^2 / \epsilon^2 )$,
where the expectation is taken w.r.t.~random vectors $\{u_k\}$ and $\{v_k\}$.
Further,
if we choose $\gamma \leq d^{-\Omega(\epsilon^2 / d)}$,
the iteration complexity can be bounded as
$T = O(d / {\epsilon^2})$.
\end{theorem}
This complexity of $O({d}/{\epsilon^2})$ for $\gamma \leq d^{-\Omega(\epsilon^2 / d)}$ matches that of zeroth-order GD (ZOGD) \cit{nesterov2017random}.
Let us next introduce the convergence result for the stochastic setting. As shown in \cit{ghadimi2013stochastic}, if we take the expectation for our stochastic zeroth-order gradient oracle with respect to both $\xi$ and $u$, under Assumption \ref{A2} (i), we will have
\begin{align*}
\mathbb{E}_{\xi,u} [\Tilde{G}_{x}(x,t;\xi,u)] = \mathbb{E}_u [\mathbb{E}_\xi [\Tilde{G}_{x}(x,t;\xi,u)|u]] = \nabla_x F(x,t).
\end{align*}
Therefore, $\zeta_k := (\xi_k,u_k)$ behaves similarly to $u_k$ in the deterministic setting.
\begin{theorem}[\textbf{Convergence of ZOSLGH, Stochastic setting}]
\label{thm:zo_stochastic}
Suppose Assumptions \ref{A1} and \ref{A2} hold.
Take $k_1 := \Theta(d/\epsilon^4)$ and $k_2 := O\left(\log_{\gamma} d^{-1/2} \right)$, and define $k_0 = \min \{ k_1, k_2 \}$. Let $\hat{x}:=x_{k'}$, where $k'$ is chosen from a uniform distribution over $\{ k_0+1, k_0+2, \ldots, T \}$. Set the stepsize for $x$ as $\beta=\mathop{\rm min}\{\frac{1}{2(d+4)L_1}, \frac{1}{\sqrt{(T - k_0)(d+4)}}\}$.
Then,
for any setting of the parameter $\gamma$,
$\hat{x}$ satisfies
$\mathbb{E}[ \|\nabla f(\hat{x})\| ] \leq\epsilon$ with the iteration complexity of
$T = O({d^{2}}/{\epsilon^4})$,
where the expectation is taken w.r.t.~random vectors $\{u_k\}$, $\{v_k\}$, and $\{\xi_k\}$.
Further,
if we choose $\gamma \leq d^{- \Omega(\epsilon^4/d)}$,
the iteration complexity can be bounded as
$T = O({d}/{\epsilon^4})$.
\end{theorem}
This complexity of $O(d/\epsilon^4)$ for $\gamma \leq d^{- \Omega(\epsilon^4/d)}$ also matches that of ZOSGD \cit{ghadimi2013stochastic}.
\label{sec:error}
In the previous sections, we assumed that we had access to the exact function value or a gradient oracle whose variance was finite. However, in some practical cases, we will have access only to the function values containing error, and it would be impossible to obtain accurate gradient oracles of an underlying objective function. Figure \ref{fig:smooth_and_error} illustrates such a case; although the objective function $f$ (Figure \ref{fig:smooth}) is smooth, the accessible function $f'$ (Figure \ref{fig:error}) contains some error, and thus many local minima arise. In this section, we consider optimizing a smooth objective function $f$ using only the information of $f'$. We assume that the following condition holds between $f$ and $f'$.
\begin{figure}
\caption{Illustration of a smooth objective function and the accessible function that contains error.}
\label{fig:smooth}
\label{fig:error}
\label{fig:smooth_and_error}
\end{figure}
\begin{assumption}{A3}\label{A3}$\ $
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
The supremum norm of the difference between $f$ and $f'$ is uniformly bounded:
$$\sup_{x\in\mathbb{R}^d}|f(x)-f'(x)|\leq\nu.$$
In the stochastic setting, we assume $\sup_{x\in\mathbb{R}^d}|f(x;\xi)-f'(x;\xi)|\leq\nu$ for any $\xi$.
\end{assumption}
Please note that we do not impose any other assumptions on the accessible function $f'$. Thus, $f'$ can be non-Lipschitz or even discontinuous. Even in such cases, we can develop an algorithm with a convergence guarantee because its smoothed function $F'(x,t)$ is smooth as far as $t$ is sufficiently large. In the following, we denote the Lipschitz and gradient Lipschitz constant of $F'(\cdot,t)$ as $L_0(t)$ and $L_1(t)$, respectively.
The ZOSLGH algorithm in this setting is almost the same as Algorithm \ref{alg:ZOGH}. The only difference is $\sqrt{\nu}$ rather than $\epsilon$ in the update rule of $t_{k+1}$. See the Algorithm \ref{alg:ZOGH_error} in the Appendix for a more detailed description.
We provide the convergence analysis only for the deterministic setting in the following theorem.
The proof and the definition of $\hat{x}$ are given in Appendix~\ref{subsec:proof_error_deterministic}.
For the stochastic setting, see Appendix \ref{subsec:proof_error_stochastic}.
\begin{theorem}[\textbf{Convergence of ZOSLGH with error tolerance, Deterministic setting}]
\label{thm:error_deterministic}
Suppose Assumptions~\ref{A1} and \ref{A3} hold, and set the stepsize for $x$ at iteration $k$ as $\beta_k=\frac{1}{16(d+4)L_1(t_k)}\ ,k\in[T]$.
Then,
for any setting of the parameter $\gamma$,
if the error level $\nu$ satisfies $\nu=O(\epsilon^2/d^3)$,
we can find $\hat{x}$ satisfying
$\mathbb{E}[ \|\nabla f(\hat{x})\| ] \leq\epsilon$ with the iteration complexity of
$T = O({d^{3}}/{\epsilon^2})$,
where the expectation is taken w.r.t.~random vectors $\{u_k\}$ and $\{v_k\}$.
Further,
if we choose $\gamma \leq d^{-\Omega(\epsilon^2 / d)}$,
the iteration complexity can be bounded as
$T = O({d}/{\epsilon^2})$.
\end{theorem}
\section{Experiments}
\label{sec:exp}
In this section, we present our experimental results. We conducted two experiments. The first was to compare the performance of several algorithms including the proposed ones, using test functions for optimization. We were able to confirm the effectiveness and versatility of our SLGH methods for highly non-convex functions. We also created a toy problem in which $\text{ZOSLGH}_{\text{d}}$, which utilizes the derivative information $\frac{\partial F}{\partial t}$ for the update of $t$, can decrease $t$ quickly around a minimum and find a better solution than that with $\text{ZOSLGH}_{\text{r}}$. The second experiment was to generate examples for a black-box adversarial attack with different zeroth-order algorithms. The target models were well-trained DNNS for CIFAR-10 and MNIST, respectively. All experiments were conducted using Python and Tensorflow on Intel Xeon CPU and NVIDIA Tesla P100 GPU. We show the results of only the adversarial attacks due to the space limitations; other results are given in Appendix \ref{sec:test_funcs}.
\textbf{Generation of per-image black-box adversarial attack example.} Let us consider the unconstrained black-box attack optimization problem in \cit{chen2019zo}, which is given by
\begin{align}
{\mathop{\rm minimize}\limits_{x\in \mathbb{R}^d}}\ f(x) :=& \lambda \ell(0.5\text{tanh}(\text{tanh}^{-1}(2a)+x)) + \|0.5\text{tanh}(\text{tanh}^{-1}(2a)+x)-a\|^2,\nonumber
\end{align}
where
$\lambda$ is a regularization parameter, $a$ is the input image data, and $tanh$ is the element-wise operator which helps eliminate the constraint representing the range of adversarial examples. The first term $\ell(\cdot)$ of $f(x)$ is the loss function for the untargeted attack in \cit{carlini2017towards}, and the second term $L_2$ distortion is the adversarial perturbation (the lower the better). The goal of this problem is to find the perturbation that makes the loss $\ell(\cdot)$ reach its minimum while keeping $L_2$ distortion as small as possible. The initial adversarial perturbation $x_0$ was set to $0$. We say a successful attack example has been generated when the loss $\ell(\cdot)$ is lower than the attack confidence (e.g., $1e-10$).
Let us here compare our algorithms, $\text{ZOSLGH}_{\text{r}}$ and $\text{ZOSLGH}_{\text{d}}$, to three zeroth-order algorithms: ZOSGD \cit{ghadimi2013stochastic}, ZOAdaMM \cit{chen2019zo}, and
ZOGradOpt \cit{hazan2016graduated}. ZOGradOpt is a homotopy method with a double loop structure. In contrast to this, ZOSGD and ZOAdaMM are SGD-based zeroth-order methods and thus do not change the smoothing parameter during iterations.
Table \ref{table:results} and Figure \ref{fig:plots} show results for our experiment. We can see that SGD-based algorithms are able to succeed in the first attack with far fewer iterations than our GH algorithms (e.g., Figure \ref{fig2a}, Figure \ref{fig2d}). Accordingly, the value of $L_2$ distortion decreases slightly more than GH methods. However, SGD-based algorithms have lower success rates than do our SLGH algorithms. This is because SGD-based algorithms remain around a local minimum $x=0$ when it is difficult to attack, while GH methods can escape the local minima due to sufficient smoothing (e.g., Figure \ref{fig2b}, Figure \ref{fig2e}). Thus, the SLGH algorithms are, on average, able to decrease total loss over that with SGD-based algorithms. In a comparison within GH methods, ZOGradOpt requires more than 6500 iterations to succeed in the first attack due to its double loop structure (e.g., Figure \ref{fig2c}, Figure \ref{fig2f}). In contrast to this, our SLGH algorithms achieve a high success rate with far fewer iterations. Please note that $\text{SLGH}_{\text{d}}$ takes approximately twice the computational time per iteration than the other algorithms because it needs additional queries for the computation of the derivative in terms of $t$. See Appendix \ref{sec:black_box} for a more detailed presentation of the experimental setup and results.
\begin{table}[H]
\centering
\caption[]{Performance of a per-image attack over $100$ images of CIFAR-10 under $T = 10000$ iterations. ``Succ. rate'' indicates the ratio of success attack, ``Avg. iters to 1st succ.'' is the average number of iterations to reach the first successful attack , ``Avg. $L_2$ (succ.)'' is the average of $L_2$ distortion taken among successful attacks, and ``Avg. total loss'' is the average of total loss $f(x)$ over 100 samples. Please note that the standard deviations are large since the attack difficulty varies considerably from sample to sample.}
\begin{tabular}{cc|c|c|c|c}
\toprule
&Methods & \begin{tabular}{c}Succ. rate\end{tabular} & \begin{tabular}{c} Avg. iters \\ to 1st succ.\end{tabular} & \begin{tabular}{c} Avg. $L_2$\ \\ (succ.)\end{tabular} & \begin{tabular}{c} Avg. total loss\end{tabular}\\ \hline
SGD algo. &ZOSGD & $88\%$ & $\textbf{835} \pm 1238$ & $0.076 \pm 0.085$ & $27.70 \pm 74.80$\\
&ZOAdaMM& $85\%$ & $3335 \pm 2634$ & $\textbf{0.050} \pm 0.055$ & $20.24 \pm 62.48$ \\\hline
GH algo. &ZOGradOpt & $65\%$ & $6789 \pm 1901$ & $0.249 \pm 0.159$ & $41.45 \pm 76.04$ \\
&$\text{ZOSLGH}_{\text{r}}\ (\gamma=0.999)$ & $\textbf{93\%}$ & $4979 \pm 756$ & $0.246 \pm 0.178$ & $\textbf{14.26} \pm 54.61$ \\
&$\text{ZOSLGH}_{\text{d}}\ (\gamma=0.999)$ & $\textbf{92\%}$ & $4436 \pm 805$ & $0.150 \pm 0.084$ & $\textbf{16.49} \pm 58.69$\\
\bottomrule
\end{tabular}
\label{table:results}
\end{table}
\begin{figure}
\caption{\begin{tabular}
\label{fig2a}
\label{fig2b}
\label{fig2c}
\label{fig2d}
\label{fig2e}
\label{fig2f}
\label{fig:plots}
\end{figure}
\section{Summary and future work}
\label{sec:conclu}
We have presented here the deterministic/stochastic SLGH and ZOSLGH algorithms as well as their convergence results. They have been designed for the purpose of finding better solutions with fewer iterations by simplifying the homotopy process into a single loop. We consider this work to be a first attempt to improve the standard GH method.
Although this study has considered the case in which the accessible function contains some error and is possibly non-smooth, we assume the underlying objective function to be smooth. Further work should be carried out to investigate the case in which the objective function itself is non-smooth.
\paragraph{Acknowledgements} This work was supported by JSPS KAKENHI Grant Number 19H04069, JST ACT-I Grant Number JPMJPR18U5, and JST ERATO Grant Number JPMJER1903.
\appendix
\onecolumn
\section{Related work}
\label{sec:related_work}
\paragraph{Iteration complexity analysis for GH methods} To the best of our knowledge, there are no existing works that give theoretical guarantee for the convergence rate except for \cit{hazan2016graduated}.\footnote{Their method is not exactly a GH method because it smooths the objective function using random variables sampled from the unit ball (or the unit sphere in a zeroth-order setting) rather than Gaussian random variables. However, for the sake of simplicity, we treat it as a GH method in this paper.} It characterized a parameterized family of non-convex functions referred to as ``$\sigma$-nice'', for which a GH algorithm converges to a global optimum. Moreover, it derived the convergence rate to an $\epsilon$-optimal solution for the $\sigma$-nice function. The framework of $\sigma$-nice imposes the two conditions: (i) the solution obtained in each inner loop is located sufficiently close to an optimal solution of the optimization problem in the next inner loop; (ii) the optimization problem in each inner loop is strongly convex around its optimal solutions. Unfortunately, it is not obvious whether we can efficiently judge a function is ``$\sigma$-nice'', and we cannot apply the analysis results to general non-convex functions. On the other hand, this work tackles a problem of different nature from \cit{hazan2016graduated} since it analyzes the convergence rate to an $\epsilon$-stationary point for general non-convex functions.
\paragraph{Guarantee for the value of the objective function}
\cit{mobahi2015theoretical} provided an upper bound on the objective value attained by a homotopy method.
The bound was characterized by a quantity that they referred to as ``optimization complexity'', which can be analytically computed when the objective function is expressed in some suitable basis functions such as Gaussian RBFs.
\paragraph{Other smoothing methods} Smoothing methods other than Gaussian smoothing include \cit{chen1993non, chen2012smoothing}. The smoothing kernel in those works is simpler but restricted to specific problem settings. For example, \cit{chen2012smoothing} constructs smoothing approximations for optimization problems that can be reformulated by using the plus function $(t)_{+}:=\max\{0, t\}$.
\paragraph{Zeroth-order techniques} In problem settings in which the explicit gradient of the objective function cannot be calculated but the exact function values can be queried,
zeroth-order optimization has become increasingly popular due to its potential for wide application. Such a class of applications appears in black-box adversarial attacks on deep neural networks \cit{chen2019zo}, structured prediction \cit{sokolov2016stochastic}, and reinforcement learning \cit{xu2020zeroth}. Various zeroth-order methods (ZOSGD \cit{ghadimi2013stochastic}, ZOAdaMM \cit{chen2019zo}, ZOSVRG \cit{liu2018zeroth}) have been proposed for such black-box situations. All of them have been developed from ZOGD in \cit{nesterov2017random}, which introduces random gradient-free oracles based on Gaussian smoothing with fixed $t$. This trend also applies to research on the GH method.
\cit{hazan2016graduated} developed a GH method in the zeroth-order setting for which the objective is only accessible through a noisy value oracle. \cit{shao2019graduated} proposed a GH method for hyperparameter tuning based on
\cit{hazan2016graduated} using two-point zeroth-order estimators \cit{nesterov2017random}.
\section{Proofs for theorems and lemmas in Sections \ref{sec:first-order} and \ref{sec:zeroth-order}}
\paragraph{Notation:} We sometimes denote the expectation with respect to random variables $\chi_{S+1}, \ldots, \chi_T\ (S, T\in\mathbb{N}, T>S)$ as $\mathbb{E}_\chi[\cdot]$ for the sake of simplicity.
\subsection{Theorem \ref{thm: main}}
\label{subsec:proof_optimality}
\textbf{Proof for Theorem \ref{thm: main}:}
Since the optimization problem \eqref{nonconvprob} has an optimal value $f^\ast$ by Assumption \ref{A1} (ii), for any $t\in\mathcal{T}$ and for any $x\in\mathbb{R}^d$, we have
\begin{align*}
F(x,t)-f^\ast = \mathbb{E}_u[f(x+tu)-f^\ast]\geq 0.
\end{align*}
Together with the relationship $F(x,0)=f(x)$, for any $x\in\mathbb{R}^d$, for any $t\in\mathcal{T}$ and for any optimal solution $x^\ast\in\mathbb{R}^d$ of the optimization problem \eqref{nonconvprob}, we have $F(x,t)-F(x^\ast, 0)\geq 0$. Furthermore, if we exclude cases where $f(x)$ is constant (a.e.), for any $(x,t)\in\mathbb{R}^d\times \mathcal{T}\setminus\{(x,0)\mid f(x)=f(x^\ast)\}$, we obtain
\begin{align*}
F(x,t)-f^\ast = \mathbb{E}_u[f(x+tu)-f^\ast]> 0.
\end{align*}
Therefore, a minimum of the optimization problem of the GH function ${\mathop{\rm minimize}\limits_{x\in \mathbb{R}^d, t \in \mathcal{T}}}\ F(x,t)$ holds only at $t=0$ and the corresponding $x$ becomes an optimal solution of the original optimization problem ${\mathop{\rm minimize}\limits_{x\in {\mathbb{R}}^d}}\ f(x)$.
$\Box$
\subsection{First-order SLGH algorithm}
\label{subsec:proof_first_order}
At the beginning of the subsection, we introduce a lemma that gives upper bounds for moments of Gaussian random variables, and then prove the two lemmas which appeared in the main paper.
\begin{lemma}[\textbf{Lemma 1 in \cit{nesterov2017random}}]\label{lem:gauss_norm}
Let $u\in\mathbb{R}^d$ be a standard normal random variable. For $p\in[0,2]$, we have $\mathbb{E}_u[\|u\|^p]\leq d^{p/2}$. If $p\geq 2$, $\mathbb{E}_u[\|u\|^p]\leq (d+p)^{p/2}$ holds.
\end{lemma}
\textbf{Proof for Lemma \ref{lem:Lip}:}
According to the definition of Gaussian smoothing in the main paper, we have
\begin{align}
|F(x,t)-F(y,t)| &= \left|\int (f(x+tz)k(z)-f(y+tz)k(z)) dz\right| \nonumber\\
&\leq \int \left|f(x+tz)-f(y+tz)\right|k(z) dz \nonumber\\
&\leq \int L_0\|x-y\|k(z)dz\nonumber\\
&\leq L_0\|x-y\|\nonumber.
\end{align}
The proof of $L_1$-$smooth$ is similar to that of $L_0$-$Lipschitz$:
\begin{align}
|\nabla_xF(x,t)-\nabla_xF(y,t)| &\leq \int |\nabla f(x+tz)-\nabla f(y+tz)|k(z) dz\nonumber\\
&\leq \int L_1\|x-y\|k(z)dz\nonumber\\
&\leq L_1\|x-y\|\nonumber.
\end{align}
$\Box$
The lemma has proved that the Lipschitz constants of $F(x,t)$ and $\nabla_x F(x,t)$ in terms of $x$ are smaller than
those of $f(x)$ and $\nabla f(x)$, respectively. Therefore we can use the Lipschitz constants $L_0$ and $L_1$ of $f(x)$ and $\nabla f(x)$
for $F(x,t)$ and $\nabla_x F(x,t)$.
\textbf{Proof for Lemma \ref{lem:Lip_t}:}
\begin{align}
|F(x,t_1)-F(x,t_2)| &= |\mathbb{E}_{u}[f(x+t_1u)-f(x+t_2u)]| \nonumber\\
&\leq \mathbb{E}_u[|f(x+t_1u)-f(x+t_2u)|]\nonumber\\
&\leq \mathbb{E}_u[L_0|t_1-t_2| \|u\|]\nonumber\\
&\leq L_0|t_1-t_2|\sqrt{d}\nonumber,
\end{align}
where the last inequality holds due to Lemma \ref{lem:gauss_norm}.
$\Box$
Before going to the convergence theorems, we introduce an additional useful lemma to estimate the gap between the gradient of the smoothed function and the true gradient.\\
\begin{lemma}\label{lem:grad_square_diff}
Let $f$ be a $L_1$-$smooth$ function.\\
\textbf{(i) (\textbf{Lemma 4 in \cit{nesterov2017random}})}
For any $x\in \mathbb{R}^d$ and $t>0$, we have
\begin{align*}
\|\nabla f(x)\|^2 \leq 2\|\nabla_x F(x,t)\|^2 + \frac{t^2}{2}L_1^2(d+6)^3.
\end{align*}
\textbf{(ii)} Further, if $f$ is $L_0$-Lipschitz, for any $x\in \mathbb{R}^d$ and $t>0$, we have
\begin{align*}
\|\nabla f(x)\|^2 \leq \|\nabla_x F(x,t)\|^2 + tL_0L_1(d+3)^{3/2}.
\end{align*}
\end{lemma}
\textbf{Proof for (ii):}
We have
\begin{align*}
\|\nabla f(x)\|^2-\|\nabla_x F(x,t)\|^2 &= (\|\nabla f(x)\|+\|\nabla_x F(x,t)\|)(\|\nabla f(x)\|-\|\nabla_x F(x,t)\|)\\
&\leq 2L_0(\|\nabla f(x)\|-\|\nabla_x F(x,t)\|)\\
&\leq 2L_0\|\nabla_x F(x,t) - \nabla f(x)\|.
\end{align*}
The term $\|\nabla_x F(x,t) - \nabla f(x)\|$ can be upper bounded as follows:
\begin{align*}
\|\nabla_x F(x,t) - \nabla f(x)\| &\leq \left\|\mathbb{E}_u\left[\left(\frac{f(x+tu)-f(x)}{t}-\langle \nabla f(x), u\rangle\right) u\right]\right\|\\
&\leq \mathbb{E}_u\left[\left|\frac{1}{t}\left(f(x+tu)-f(x)-t\langle \nabla f(x), u\rangle\right)\right|\|u\|\right]\\
&\leq \mathbb{E}_u\left[\frac{tL_1}{2}\|u\|^3\right]\\
&\leq \frac{tL_1}{2}(d+3)^{3/2},
\end{align*}
where the last second inequality follows from a property of $L_1$-smooth function ($\forall x,y\in\mathbb{R}^d,\ |f(y)-f(x)-\langle \nabla f(x), y-x\rangle|\leq \frac{L_1}{2}\|y-x\|^2$), and the last inequality holds due to Lemma \ref{lem:gauss_norm}. Therefore, we obtain
\begin{align*}
\|\nabla f(x)\|^2 \leq \|\nabla_x F(x,t)\|^2 + tL_0L_1(d+3)^{3/2}.
\end{align*}
Now, we are ready to prove Theorem \ref{iter_determin}.
\textbf{Proof for Theorem \ref{iter_determin}:}
We follow the convergence analysis of gradient descent. According to Assumption \ref{A1} and Lemma \ref{lem:Lip}, $F(x,t)$ is $L_0$-$Lipschitz$ and $L_1$-$smooth$ in terms of $x$. Therefore, we have
\begin{align}
F(x_{k+1},t_k) &\leq F(x_k,t_k) + \left<\nabla_x F(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1}{2}\|x_{k+1}-x_k\|^2 \nonumber\\
&= F(x_k,t_k) - \left(\beta-\frac{L_1}{2}\beta^2\right)\|\nabla_x F(x_k,t_k)\|^2\nonumber,
\end{align}
where the last equation holds due to the updating rule of the gradient descent: $x_{k+1} - x_k = -\beta\nabla_x F(x_k,t_k)$. Then, we can get the upper bound for $\|\nabla_x F(x,t)\|^2$:
\begin{align}
\left(\beta-\frac{L_1}{2}\beta^2\right)\|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_k,t_k) - F(x_{k+1},t_k)\nonumber\\
&= F(x_k,t_k) - F(x_{k+1},t_{k+1}) + F(x_{k+1},t_{k+1}) - F(x_{k+1},t_k)\nonumber\\
&\leq F(x_k,t_k) - F(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d},\nonumber
\end{align}
where the last inequality follows from Lemma \ref{lem:Lip_t}.\\
Now, sum up the above inequality for all iterations $k_0+1\leq k\leq T\ (T>k_0\in\mathbb{N})$, and denote the minimum of $f$ as $f^*$, then we have
\begin{align}
\left(\beta-\frac{L_1}{2}\beta^2\right)\sum_{k=k_0+1}^{T}\|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_{k_0+1},t_{k_0+1}) - F(x_{T+1},t_{T+1}) + L_0\sqrt{d}\sum_{k=k_0+1}^T|t_{k+1}-t_k|\nonumber\\
&\leq F(x_{k_0+1},t_{k_0+1}) - f^* + L_0\sqrt{d}\sum_{k={k_0+1}}^T|t_{k+1}-t_k|\nonumber\\
&\leq f(x_{k_0+1}) - f^* + L_0\sqrt{d}\left(t_{k_0+1} + \sum_{k=k_0+1}^T|t_{k+1}-t_k|\right)
\label{normGradF},
\end{align}
where the last inequality holds due to Lemma \ref{lem:Lip_t}.
Then,
we can get the upper bound for $\|\nabla f(\hat{x})\|^2$ as
\begin{align*}
& \| \nabla f(\hat{x}) \|^2
=
{\mathop{\rm min}\limits_{k\in [T]}}\|\nabla f(x_k)\|^2
\\
&
\leq
{\mathop{\rm min}\limits_{k = k_0+1, \ldots, T}}\|\nabla f(x_k)\|^2
\\
&\leq \frac{1}{T-k_0}\sum_{k=k_0+1}^{T}\|\nabla f(x_k)\|^2\nonumber
\\
&\leq \frac{1}{T-k_0}\sum_{k=k_0+1}^{T}\|\nabla_x F(x_k,t_k)\|^2 + \frac{1}{T-k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0+1}^Tt_k\nonumber\\
&\leq \frac{2\left(f(x_{k_0+1})-f^*+L_0\sqrt{d}\left(t_{k_0+1} + \sum_{k=k_0+1}^T|t_{k+1}-t_k|\right)\right)}{(T-k_0)(2\beta-L_1\beta^2)} + \frac{1}{T-k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0+1}^Tt_k,
\end{align*}
where the third inequality holds due to Lemma \ref{lem:grad_square_diff} (ii) and the last inequality follows from \eqref{normGradF}.
If we choose the step size $\beta$ as $\frac{1}{L_1}$, we have
\begin{align}
\nonumber
& \|\nabla f(\hat{x})\|^2\\
\nonumber &\leq
\frac{2L_1\left(f(x_{k_0+1})-f^*+L_0\sqrt{d}\left(t_{k_0+1} + \sum_{k=k_0+1}^T|t_{k+1}-t_k|\right)\right)}{T-k_0} + \frac{1}{T-k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0+1}^Tt_k
\\
&= O\left(\frac{1}{T - k_0}\left(1+ d^{3/2}\sum_{k= k_0 + 1}^Tt_k\right)\right),
\label{eq:boundsquarednorm_iter_determ}
\end{align}
where the last equality holds since $\sum_{k= k_0 + 1}^T|t_{k+1}-t_k|=O\left(\sum_{k= k_0 + 1}^Tt_k\right)$ is satisfied.
If we update $t_k$ as in Algorithm~\ref{alg:GH},
we have
$
\sum_{k= k_0 + 1}^T t_{k}
\leq
\sum_{k= k_0 + 1}^T \max\{ t_1 \gamma^{k-1}, \epsilon'\}
\leq
\sum_{k= k_0 + 1}^T \left( t_1 \gamma^{k-1} + \epsilon' \right)
\leq
\frac{t_{1}\gamma^{k_0}}{1 - \gamma} + \epsilon' (T-k_0).
$
By taking $\epsilon'$ sufficiently close to $0$, together with the assumption of $1/(1-\gamma) = O(1)$, we have $\sum_{k= k_0 + 1}^T t_k =O(\gamma^{k_0})$. This implies that
$\| \nabla f(\hat{x}) \|^2 \leq O( \frac{1 + \gamma^{k_0}d^{3/2}}{T - k_0} )$.
Hence,
we can obtain
$\| \nabla f(\hat{x}) \| \leq \epsilon$ in $T = k_0 + O\left(\frac{1 + \gamma^{k_0} d^{3/2}}{\epsilon^2}\right)$ iterations.
Now, set $k_0$ as $k_0=O\left(\frac{1}{\epsilon^2}\right)$, then, the iteration complexity can be bounded as $T=O\left(\frac{d^{3/2}}{\epsilon^2}\right)$. Furthermore, when $\gamma$ is chosen as $\gamma\leq d^{-3\epsilon^2/2}$, we can obtain
$
\gamma^{k_0} = O\left(d^{-3/2}\right)
$
for some $k_0=O\left( \frac{1}{\epsilon^2} \right)$. This yields the iteration complexity of $T=O\left(\frac{1}{\epsilon^2}\right)$.
$\Box$
Before going to the proof of Theorem \ref{thm:iter_stochastic} in the stochastic setting, we prove that the gradient of the smoothed stochastic function $\nabla F(x,t;\xi)$ is unbiased, and it has a finite variance.
\begin{lemma}
\label{lem:grad_smoothed_and_stochastic}
Suppose that $f$ satisfies Assumption \ref{A1} (i) and Assumption \ref{A2}.\\
\textbf{(i)}
The stochastic gradient of the smoothed function $\nabla_x \bar{F}(x,t;\xi)$ becomes an unbiased estimator of $\nabla_x F(x,t)$. That is, for any $x\in\mathbb{R}^d$ and $t>0$, $\mathbb{E}_\xi[\nabla_x\bar{F}(x,t;\xi)]=\nabla_x F(x,t)$ holds.\\
\textbf{(ii)}
For any $x\in\mathbb{R}^d$ and $t>0$, the variance of $\nabla_x \bar{F}(x,t;\xi)$ is bounded as $\mathbb{E}_{\xi}[\|\nabla_x\bar{F}(x,t;\xi)-\nabla_x F(x,t)\|^2] \leq \sigma^2$.
\end{lemma}
\textbf{Proof for (i):}
From Assumption~\ref{A1}~(i), we can exchange the order of integration in terms of $\xi$ and $u$, which yields that
\begin{align*}
\mathbb{E}_\xi[\nabla_x \bar{F}(x,t;\xi)]
&= \mathbb{E}_\xi \left[ \mathbb{E}_u \left[ \frac{\bar{f}(x+tu;\xi) - \bar{f}(x;\xi)}{t}u \right]\right] \\
&= \mathbb{E}_u \left[ \mathbb{E}_\xi \left[ \frac{\bar{f}(x+tu;\xi)- \bar{f}(x;\xi)}{t}u \right]\right] \\
&= \mathbb{E}_u \left[ \frac{f(x+tu) - f(x)}{t}u \right] \\
&= \nabla_x F(x,t).
\end{align*}
\textbf{Proof for (ii):}
We have
\begin{align*}
\mathbb{E}_\xi[\|\nabla_x \bar{F}(x,t;\xi)-\nabla_xF(x,t)\|^2] &= \mathbb{E}_\xi[\|\nabla_x \mathbb{E}_u[\bar{f}(x+tu;\xi)]-\nabla_x\mathbb{E}_u[f(x+tu)]\|^2]\\
&= \mathbb{E}_\xi[\|\mathbb{E}_u[\nabla_x\bar{f}(x+tu;\xi) - \nabla f(x+tu)]\|^2]\\
&\leq \mathbb{E}_\xi[\mathbb{E}_u[\|\nabla_x\bar{f}(x+tu;\xi) - \nabla f(x+tu)\|^2]]\\
&= \mathbb{E}_u[\mathbb{E}_\xi[\|\nabla_x\bar{f}(x+tu;\xi) - \nabla f(x+tu)\|^2]]\\
&\leq \sigma^2,
\end{align*}
where the second and third equalities hold due to Assumption~\ref{A1}~(i), and the last inequality follows from Assumption~\ref{A2}~(ii).
\textbf{Proof for Theorem \ref{thm:iter_stochastic}:}
Denote $\delta_k := \nabla_x \bar{F}(x_k,t_k;\xi_k)-\nabla_x F(x_k,t_k)$.
We follow the convergence analysis of stochastic gradient descent. According to Lemma \ref{lem:Lip}, since $f(x)$ is $L_0$-$Lipschitz$ and $L_1$-$smooth$, $F(x,t)$ is also $L_0$-$Lipschitz$ and $L_1$-$smooth$ in terms of $x$.
Thus, we have
\begin{align}
F(x_{k+1},t_k) &\leq F(x_k,t_k) + \left<\nabla_x F(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1}{2}\|x_{k+1}-x_k\|^2\nonumber\\
&= F(x_k,t_k) - \beta \left<\nabla_x F(x_k,t_k), \nabla_x \bar{F}(x_k,t_k;\xi_k)\right> + \frac{L_1}{2}\beta^2\|\nabla_x \bar{F}(x_k,t_k;\xi_k)\|^2\nonumber\\
&= F(x_k,t_k) - \left(\beta-\frac{L_1}{2}\beta^2\right)\|\nabla_x F(x_k,t_k)\|^2 - (\beta-L_1\beta^2) \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\delta_k\|^2,
\label{4}
\end{align}
where the first equation holds due to the updating rule $x_{k+1} - x_k = -\beta \nabla_x \bar{F}(x_k,t_k;\xi_k)$, and the last equation holds due to the definition of $\delta_k$. Denote
\[
A_k := - (\beta-L_1\beta^2) \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\delta_k\|^2
\]
for simplicity. From \eqref{4}, we obtain the upper bound for $\|\nabla_x F(x,t)\|^2$ as follows:
\begin{align}
\left(\beta-\frac{L_1}{2}\beta^2\right)\|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_k,t_k) - F(x_{k+1},t_k) + A_k\nonumber\\
&= F(x_k,t_k) - F(x_{k+1},t_{k+1}) + F(x_{k+1},t_{k+1}) - F(x_{k+1},t_k) + A_k\nonumber\\
&\leq F(x_k,t_k) - F(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d} + A_k,\nonumber
\end{align}
where the last inequality follows from Lemma \ref{lem:Lip_t}.\\
Now, sum up the above inequality for all iterations $k_0 + 1\leq k\leq T\ (k_0<T)$. Then we have
\begin{align}
&\left(\beta-\frac{L_1}{2}\beta^2\right)\sum_{k=k_0 + 1}^{T}\|\nabla_x F(x_k,t_k)\|^2 \nonumber \\
&\leq F(x_{k_0 + 1},t_{k_0 + 1}) - F(x_{T+1},t_{T+1}) + L_0\sqrt{d}\sum_{k=k_0 + 1}^T |t_{k+1}-t_k| + \sum_{k=k_0 + 1}^{T} A_k\nonumber\\
&\leq F(x_{k_0 + 1},t_{k_0 + 1}) - f^* + L_0\sqrt{d}\sum_{k=k_0 + 1}^T |t_{k+1}-t_k| + \sum_{k=k_0 + 1}^{T} A_k\nonumber.\\
&\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\right) + \sum_{k=k_0 + 1}^{T} A_k\nonumber.
\end{align}
Take the expectation with respect to the random vectors $\{ \xi_{k_0+1}, \ldots, \xi_T \}$, then we have
\begin{align}
&\left(\beta-\frac{L_1}{2}\beta^2\right)\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[\|\nabla_x F(x_k,t_k)\|^2] \nonumber
\\
&\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_{\xi}[|t_{k+1}-t_k|]\right) + \sum_{k=k_0 + 1}^{T} \mathbb{E}_{\xi}[A_k].
\label{5}
\end{align}
The expectation of $A_k$ is evaluated as
\begin{align}
\sum_{k=k_0 + 1}^{T} \mathbb{E}_{\xi}[A_k] &= - \sum_{k=k_0 + 1}^{T}(\beta-L_1\beta^2)\mathbb{E}_{\xi}[ \left<\nabla_x F(x_k,t_k),\delta_k\right>] + \sum_{k=k_0 + 1}^{T}\frac{L_1}{2}\beta^2\mathbb{E}_{\xi}[\|\delta_k\|^2]\nonumber\\
&\leq (T - k_0) \frac{L_1}{2}\beta^2\sigma^2,
\label{6}
\end{align}
where the last equality holds due to Lemma~\ref{lem:grad_smoothed_and_stochastic}~(ii) ($\mathbb{E}_{\xi}[\|\delta_k\|^2]\leq \sigma^2$) and the fact that each point $x_k$ is a function of the history $\xi_{[k-1]}$ in the random process, thus $\mathbb{E}_{\xi_k}[ \left<\nabla_x F(x_k,t_k),\delta_k\right>\mid\xi_{[k-1]}] = 0$.
Then,
we can estimate the upper bound for $\mathbb{E}_{\xi,k'}[\|\nabla f(\hat{x})\|^2]$ as
\begin{align*}
&\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|^2]=\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[\|\nabla f(x_k)\|^2]
\\
&
\leq \frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[\|\nabla_x F(x_k,t_k)\|^2] +\frac{1}{T - k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[t_k]\\
&\leq \frac{2\left(f(x_{k_0 + 1})-f^*+L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_{\xi}[|t_{k+1}-t_k|]\right)\right)}{(T - k_0)(2\beta-L_1\beta^2)} \\
&+
\frac{1}{T - k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[t_k]+ \frac{L_1\beta^2\sigma^2}{2\beta-L_1\beta^2},
\end{align*}
where the first inequality holds due to Lemma \ref{lem:grad_square_diff} (ii) and the last inequality follows from \eqref{5} and \eqref{6}.
If the step size $\beta$ is chosen as $\beta = \mathop{\rm min}\ \{\frac{1}{L_1}, \frac{1}{\sqrt{T - k_0}}\}$, then we have
\begin{align*}
\frac{1}{2\beta-L_1\beta^2} \leq \frac{1}{\beta},
\end{align*}
\begin{align*}
\frac{1}{\beta} \leq L_1 + \sqrt{T - k_0}.
\end{align*}
Hence, we can obtain
\begin{align*}
& \frac{2\left(f(x_{k_0 + 1})-f^*+L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_{\xi}[|t_{k+1}-t_k|]\right)\right)}{(T - k_0)(2\beta-L_1\beta^2)}\\
& + \frac{1}{T - k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[t_k] + \frac{L_1\beta^2\sigma^2}{2\beta-L_1\beta^2}\\
& = O \left(\frac{1+\sqrt{d}\mathbb{E}_{\xi}\left[\sum_{k=k_0 + 1}^T|t_{k+1}-t_k|\right]}{\sqrt{T - k_0}}+\frac{d^{3/2}}{T - k_0}\mathbb{E}_{\xi}\left[\sum_{k=k_0 + 1}^{T}t_k\right]\right).
\end{align*}
If $t_k$ is updated as in Algorithm~\ref{alg:GH},
we have
$
\sum_{k=k_0 + 1}^T |t_{k+1} - t_k| \leq t_1\gamma_{k_0} = O(\gamma^{k_0})
$
and
$
\sum_{k=k_0 + 1}^T t_{k} \leq \frac{t_1\gamma^{k_0}}{1-\gamma} + \epsilon'T = O(\gamma^{k_0})
$ in the same argument that showed Theorem~\ref{iter_determin}.
Combining the above inequalities,
we obtain
\begin{align}
\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|^2]
=
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\xi}[\|\nabla f(x_k)\|^2]
=
O \left( \frac{1 + \sqrt{d}\gamma^{k_0}}{\sqrt{T - k_0}} + \frac{d^{3/2} \gamma^{k_0}}{T - k_0} \right).
\label{eq:boundsquarednorm_iter_stoc}
\end{align}
Here, we have
$
k_0 = O \left(
\frac{1}{\epsilon^4}
\right)
$
by the definition of $k_0$.
Thus, by setting
$
T
= k_0 + O \left(
\frac{d}{\epsilon^4} + \frac{d^{3/2}}{\epsilon^2}
\right)
=
O \left(
\frac{d}{\epsilon^4} + \frac{d^{3/2}}{\epsilon^2}
\right)
$,
we can obtain
$
\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|^2] \leq \epsilon^2
$.
This implies
$
\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|] \leq \epsilon
$
as
$
\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|]^2
\leq
\mathbb{E}_{\xi, k'}[\|\nabla f(\hat{x})\|^2]
$
follows from Jensen's inequality. Furthermore, when $\gamma$ is chosen as
$\gamma \leq (\max\{ d^{1/2}, d^{3/2} \epsilon^{2} \})^{- \epsilon^4}$
, we have
$
\log_\gamma \min\{ d^{-1/2}, d^{-3/2} \epsilon^{-2} \} = O\left(\frac{1}{\epsilon^4}\right)
$, which implies $k_0 = \Omega(\log_\gamma \min\{ d^{-1/2}, d^{-3/2} \epsilon^{-2} \}).$
Therefore, we can obtain
$
\gamma^{k_0} = O\left(\min\{ d^{-1/2}, d^{-3/2} \epsilon^{-2} \})\right)
$
, which yields the iteration complexity of $T=O\left(\frac{1}{\epsilon^4}\right)$.
$\Box$
\subsection{Zeroth-order SLGH algorithm}
\label{subsec:proof_zeroth_order}
In the zeroth-order setting, we can evaluate the gap between the zeroth-order gradient estimator and the true gradient using the following lemma.\\
\begin{lemma}
[\textbf{Theorem 4 in \cit{nesterov2017random}}]\label{lem:zo_grad_square_diff}
Let $f$ be a $L_1$-$smooth$ function, then for any $x\in \mathbb{R}^d$ and for any $t>0$, we have
\begin{align*}
\mathbb{E}_u\left[\frac{1}{t^2}(f(x+tu)-f(x))^2\|u\|^2\right] \leq \frac{t^2}{2}L_1^2(d+6)^3 + 2(d+4)\|\nabla f(x)\|^2.
\end{align*}
\end{lemma}
\textbf{Proof for Theorem \ref{thm:zo_determinstic}:}
Let $w_k:=(u_k, v_k),\ k\in[T]$, and denote $\delta_k := \Tilde{g}_x(x_k,t_k;u_k)-\nabla_x F(x_k,t_k)$, where $\Tilde{g}_x(x_k,t_k;u_k)$ is the zeroth-order estimator of gradient defined in the main paper. Utilize the updating rule of $x$ and $L_1$-smoothness of $F(x,t)$ in terms of $x$. Then we have
\begin{align}
F(x_{k+1},t_k) &\leq F(x_k,t_k) + \left<\nabla_x F(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1}{2}\|x_{k+1}-x_k\|^2\nonumber\\
&= F(x_k,t_k) - \beta \left<\nabla_x F(x_k,t_k),\Tilde{g}_x(x_k,t_k;u_k)\right> + \frac{L_1}{2}\beta^2\|\Tilde{g}_x(x_k,t_k;u_k)\|^2\nonumber\\
&= F(x_k,t_k) - \beta\|\nabla_x F(x_k,t_k)\|^2 - \beta \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\Tilde{g}_x(x_k,t_k;u_k))\|^2,
\label{7}
\end{align}
where the first equation holds due to the updating rule $x_{k+1} - x_k = -\beta \Tilde{g}_x(x_k,t_k;u_k)$.\\
Denote
\[B_k := - \beta \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\Tilde{g}_x(x_k,t_k;u_k)\|^2
\]
for simplicity. From Lemma \ref{lem:Lip_t} and \eqref{7}, we get the upper bound for $\|\nabla_x F(x,t)\|^2$ as
\begin{align}
\beta \|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_k,t_k) - F(x_{k+1},t_k) + B_k\nonumber\\
&= F(x_k,t_k) - F(x_{k+1},t_{k+1}) + F(x_{k+1},t_{k+1}) - F(x_{k+1},t_k) + B_k\nonumber\\
&\leq F(x_k,t_k) - F(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d} + B_k\nonumber.
\end{align}
Now, sum up the above inequality for all iterations $k_0+1\leq k\leq T\ (k_0<T)$. Then we have
\begin{align}
\sum_{k=k_0+1}^{T}\beta\|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_{k_0 + 1},t_{k_0 + 1}) - F(x_{T+1},t_{T+1}) + L_0\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\sqrt{d} + \sum_{k=k_0 + 1}^{T}B_k\nonumber\\
&\leq F(x_{k_0 + 1},t_{k_0 + 1}) - f^* + L_0\sqrt{d}\sum_{k=k_0 + 1}^T |t_{k+1}-t_k| + \sum_{k=k_0 + 1}^{T}B_k\nonumber\\
&\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\right) + \sum_{k=k_0 + 1}^{T}B_k\nonumber.
\end{align}
Next, take the expectations with respect to random vectors $\{w_{k_0+1}, \ldots, w_T \}$ on both sides. Then we can get
\begin{align}
\sum_{k=k_0 + 1}^{T}\beta \mathbb{E}_{w}[\|\nabla_x F(x_k,t_k)\|^2] &\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right)\nonumber \\
&+ \sum_{k=k_0 + 1}^{T} \mathbb{E}_{w} [B_k].
\label{33}
\end{align}
Observe by the definition of $\Tilde{g}_x(x_k,t_k;u_k)$ in the main paper that $\mathbb{E}_{u_k}[\Tilde{g}_x(x_k,t_k;u_k)\mid u_{[k-1]}] = \nabla_x F(x_k,t_k)$, thus $\mathbb{E}_{w_k}[\left<\nabla_x F(x_k,t_k),\delta_k\right>\mid w_{[k-1]}] = 0$ holds.
Then we have
\begin{align}\label{B_k}
\mathbb{E}_{w_k} [B_k\mid w_{[k-1]}]
&= -\beta \mathbb{E}_{w_k}[ \left<\nabla_x F(x_k,t_k),\delta_k\right>\mid w_{[k-1]}] + \frac{L_1}{2}\beta^2\mathbb{E}_{w_k}[\|\Tilde{g}_x(x_k,t_k;u_k)\|^2\mid w_{[k-1]}]\nonumber\\
&\leq \frac{L_1}{2}\beta^2\left(\frac{\mathbb{E}_{w_k} [t_k^2\mid w_{[k-1]}]}{2}L_1^2(d+6)^3+2(d+4) \mathbb{E}_{w_k}[\|\nabla f(x_k)\|^2\mid w_{[k-1]}]\right)\nonumber\\
&= \frac{\mathbb{E}_{w_k} [t_k^2\mid w_{[k-1]}]}{4}L_1^3\beta^2(d+6)^3+L_1\beta^2(d+4) \mathbb{E}_{w_k}[\|\nabla f(x_k)\|^2\mid w_{[k-1]}],
\end{align}
where the inequality holds due to Lemma \ref{lem:zo_grad_square_diff}.
Lemma \ref{lem:grad_square_diff} (ii) together with the above inequalities yields that
\begin{align}
&\sum_{k=k_0 + 1}^{T}\beta \mathbb{E}_{w}[\|\nabla f(x_k)\|^2]\nonumber \\
&\leq \sum_{k=k_0 + 1}^{T}\beta \mathbb{E}_{w}[\|\nabla_x F(x_k,t_k)\|^2] + \sum_{k=k_0 + 1}^{T}\beta L_0L_1(d+3)^{3/2}\mathbb{E}_{w}[t_k]\nonumber\\
&\leq
f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right) + \sum_{k=k_0 + 1}^{T} \mathbb{E}_{w} [B_k]\nonumber\\
&+
\sum_{k=k_0 + 1}^{T}\beta L_0L_1(d+3)^{3/2}\mathbb{E}_{w}[t_k]\nonumber\\
&\leq
f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right)+\sum_{k=k_0 + 1}^{T}\frac{\mathbb{E}_w[t_k^2]}{4}L_1^3\beta^2(d+6)^3\nonumber\\
&+\sum_{k=k_0 + 1}^{T}L_1\beta^2(d+4) \mathbb{E}_{w}[\|\nabla f(x_k)\|^2] +\sum_{k=k_0 + 1}^{T}\beta L_0L_1(d+3)^{3/2}\mathbb{E}_{w}[t_k],
\end{align}
where the second inequality holds due to \eqref{33}, and the last inequality follows from \eqref{B_k}.
Rearrange the terms in the above inequality. Then we can get
\begin{align}
(\beta-(d+4)L_1\beta^2)\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[\|\nabla f(x_k)\|^2] &\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right)\nonumber\\&+\frac{L_1^3\beta^2(d+6)^3}{4}\sum_{k=k_0 + 1}^{T}\mathbb{E}_w[t_k^2]+ L_0L_1\beta(d+3)^{3/2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[t_k].
\end{align}
Divide both sides of the above inequality by $(T - k_0)(\beta-(d+4)L_1\beta^2)$ and set the step size $\beta$ as $\frac{1}{2(d+4)L_1}$. Since $\frac{1}{\beta-(d+4)L_1\beta^2} \leq 4(d+4)L_1$ holds, we can obtain
\begin{align}
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[\|\nabla f(x_k)\|^2]
&\leq \frac{4(d+4)L_1}{T - k_0} \left(f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right)\right.\nonumber\\
&\left.+\frac{L_1(d+6)^3}{16(d+4)^2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_w[t_k^2]+ \frac{L_0(d+3)^{3/2}}{2(d+4)}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[t_k]\right)\nonumber\\
&= O\left(\frac{d}{T - k_0}\left(1+d \mathbb{E}_w\left[ \sum_{k=k_0 + 1}^T t_k^2 \right] + \sqrt{d} \mathbb{E}_w \left[\sum_{k=k_0 + 1}^T t_k \right]\right)\right) \nonumber
\\
&
=O\left(
\frac{d}{T - k_0}
\left(1 + d\gamma^{2k_0} + \sqrt{d}\gamma^{k_0} \right)
\right),
\end{align}
where the last equality follows from the update rule of $t_k$,
as shown in the proof of Theorem~\ref{iter_determin} as well.
Here, we have
$
k_0 = O \left(
\frac{d}{\epsilon^2}
\right)
$
by the definition of $k_0$. Thus, by setting
$
T
= k_0 + O \left(
\frac{d^2}{\epsilon^2}
\right)
=
O \left(
\frac{d^2}{\epsilon^2}
\right)
$,
we can obtain
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|^2]
=
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[\|\nabla f(x_k)\|^2]
\leq \epsilon^2
$.
This implies
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|] \leq \epsilon
$
as
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|]^2
\leq
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|^2]
$
follows from Jensen's inequality. Furthermore, when $\gamma$ is chosen as
$\gamma \leq d^{-\epsilon^2/2d}$
, we have
$
\log_\gamma d^{-1/2} = O\left( \frac{d}{\epsilon^2} \right)
$,
which implies
$
k_0 = \Omega \left( \log_\gamma d^{-1/2} \right)
$. Therefore, we can obtain
$
\gamma^{k_0}
=
O( d^{-1/2} )
$,
which yields the iteration complexity of $T=O\left(\frac{d}{\epsilon^2}\right)$.
$\Box$
\textbf{Proof for Theorem \ref{thm:zo_stochastic}:}
Let $\zeta_k := (\xi_k,u_k, v_k)$, $k\in [T]$ and denote $\delta_k := \Tilde{G}_x(x_k,t_k;\xi_k,u_k)-\nabla_x F(x_k,t_k)$.
As discussed in the main paper, we have \begin{align}
\mathbb{E}_{\xi,u} [\Tilde{G}_{x}(x,t;\xi,u)] = \mathbb{E}_u [\mathbb{E}_\xi [\Tilde{G}_{x}(x,t;\xi,u)|u]] = \nabla_x F(x,t).
\label{expuxi}
\end{align}
From the update rule for $x$, we can obtain
\begin{align}
F(x_{k+1},t_k) &\leq F(x_k,t_k) + \left<\nabla_x F(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1}{2}\|x_{k+1}-x_k\|^2\nonumber\\
&= F(x_k,t_k) - \beta \left<\nabla_x F(x_k,t_k),\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\right> + \frac{L_1}{2}\beta^2\|\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\|^2\nonumber\\
&= F(x_k,t_k) - \beta\|\nabla_x F(x_k,t_k)\|^2 - \beta \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\|^2\nonumber.
\end{align}
Now, denote
\[
D_k := - \beta \left<\nabla_x F(x_k,t_k),\delta_k\right> + \frac{L_1}{2}\beta^2\|\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\|^2
\]
for simplicity. Then, we can get the upper bound for $\|\nabla_x F(x,t)\|^2$ with $D_k$:
\begin{align}
\beta \|\nabla_x F(x_k,t_k)\|^2 &\leq F(x_k,t_k) - F(x_{k+1},t_k) + D_k\nonumber\\
&= F(x_k,t_k) - F(x_{k+1},t_{k+1}) + F(x_{k+1},t_{k+1}) - F(x_{k+1},t_k) + D_k\nonumber\\
&\leq F(x_k,t_k) - F(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d} + D_k.\nonumber
\end{align}
Sum up the above inequality for all iterations $k_0 + 1\leq k\leq T\ (T>k_0)$. Then we have
\begin{align}
&\sum_{k=k_0 + 1}^{T}\beta\|\nabla_x F(x_k,t_k)\|^2 \nonumber \\
&\leq F(x_{k_0 + 1},t_{k_0 + 1}) - F(x_{T+1},t_{T+1}) + L_0\sqrt{d}\sum_{k=k_0 + 1}^T|t_{k+1}-t_k| + \sum_{k=k_0 + 1}^{T}D_k\nonumber\\
&\leq F(x_{k_0 + 1},t_{k_0 + 1}) - f^* + L_0\sqrt{d}\sum_{k=k_0 + 1}^T|t_{k+1}-t_k| + \sum_{k=k_0 + 1}^{T}D_k\nonumber\\
&\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T|t_{k+1}-t_k|\right) + \sum_{k=k_0 + 1}^{T}D_k,
\label{45}
\end{align}
where the last inequality follows from Lemma \ref{lem:Lip_t}.
Observe from \eqref{expuxi} that
\begin{align}
\mathbb{E}_{\zeta_k} [ \left<\nabla_x F(x_k,t_k),\delta_k\right>\mid\zeta_{[k-1]}] = 0\nonumber.
\end{align}
Thus, we have
\begin{align}
\mathbb{E}_{\zeta_k} [D_k\mid \zeta_{[k-1]}] &= -\beta \mathbb{E}_{\zeta_k}[ \left<\nabla_x F(x_k,t_k),\delta_k\right>\mid \zeta_{[k-1]}] + \frac{L_1}{2}\beta^2\mathbb{E}_{\zeta_k}[\|\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\|^2\mid \zeta_{[k-1]}] \nonumber\\
&= \frac{L_1}{2}\beta^2\mathbb{E}_{\zeta_k}(\|\Tilde{G}_x(x_k,t_k;\xi_k, u_k)\|^2\mid \zeta_{[k-1]})\nonumber\\
&\leq \frac{L_1}{2}\beta^2\left(\frac{\mathbb{E}_{\zeta_k}[t_k^2\mid\zeta_{[k-1]}]}{2}L_1^2(d+6)^3+2(d+4) (\mathbb{E}_{\zeta_k}[\|\nabla_x \bar{f}(x_k;\xi_k)\mid\zeta_{[k-1]}\|^2])\right)\nonumber\\
&\leq \frac{L_1}{2}\beta^2\left(\frac{\mathbb{E}_{\zeta_k}[t_k^2\mid\zeta_{[k-1]}]}{2}L_1^2(d+6)^3+2(d+4) (\mathbb{E}_{\zeta_k}[\|\nabla f(x_k)\mid\zeta_{[k-1]}\|^2]+\sigma^2)\right),\label{47}
\end{align}
where the fist inequality follows from Lemma~\ref{lem:zo_grad_square_diff} and the last inequality holds due to Assumption~\ref{A2}~(ii).
Take the expectation for \eqref{45} with respect to $\zeta_{k_0 + 1}, \ldots, \zeta_{T}$. Together with Lemma \ref{lem:grad_square_diff} (ii), we have
\begin{align}
&\sum_{k=k_0 + 1}^{T}\beta \mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2] \nonumber \\
&\leq \sum_{k=k_0 + 1}^{T}\beta \mathbb{E}_{\zeta}[\|\nabla_x F(x_k,t_k)\|^2] + \sum_{k=k_0 + 1}^T\beta\mathbb{E}_\zeta[t_k]L_0L_1(d+3)^{3/2} \nonumber\\
&\leq
f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) + \sum_{k=k_0 + 1}^T \mathbb{E}_\zeta[D_k] \nonumber\\
&+
\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k]L_0L_1\beta(d+3)^{3/2} \nonumber\\
&\leq
f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right)+\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k]L_0L_1\beta(d+3)^{3/2}\nonumber\\
&+
\sum_{k=k_0 + 1}^T \frac{\mathbb{E}_\zeta[t_k^2]}{4}L_1^3\beta^2(d+6)^3+\sum_{k=k_0 + 1}^T L_1\beta^2(d+4)\mathbb{E}_\zeta[\|\nabla f(x_k)\|^2]+L_1\beta^2(d+4)\sigma^2(T-k_0),\nonumber
\end{align}
where the last inequality holds due to \eqref{47}.
Rearrange the terms in the above inequality. Then we can get
\begin{align}
(\beta-(d+4)L_1\beta^2)\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2] &\leq f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right)\nonumber\\
&+ \frac{L_1^3\beta^2(d+6)^3}{4}\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k^2] + L_1\beta^2(d+4)\sigma^2(T - k_0) \nonumber\\
&+\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k]L_0L_1\beta(d+3)^{3/2},
\label{100}
\end{align}
If the step size $\beta$ is chosen as $\mathop{\rm min} \left\{\frac{1}{2(d+4)L_1}, \frac{1}{\sqrt{(T-k_0)(d+4)}}\right\}$, then we have
\begin{align*}
\frac{1}{\beta-(d+4)L_1\beta^2} \leq \frac{2}{\beta},\quad \frac{1}{\beta} \leq 2(d+4)L_1 + \sqrt{(T - k_0)(d+4)}.
\end{align*}
Hence,
by dividing both sides of \eqref{100} by $(T - k_0)(\beta-2(d+4)L_1\beta^2)$, we can obtain
\begin{align}
\nonumber
&\frac{1}{T - k_0}\sum_{k=1}^{T}\mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2] \\
\nonumber
&\leq
\frac{f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) +L_0L_1(d+3)^{3/2}\beta\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k]}{(T - k_0)(\beta-(d+4)L_1\beta^2)}\\
\nonumber
&+\frac{\frac{L_1^3\beta^2(d+6)^3}{4}\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k^2]+L_1\beta^2(d+4)\sigma^2T}{{(T - k_0)(\beta-(d+4)L_1\beta^2)}}\\
\nonumber
&\leq \frac{2}{T - k_0}\left(f(x_{k_0 + 1}) - f^* + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right)\right)\left(2(d+4)L_1+\sqrt{(T - k_0)(d+4)}\right)\\
\nonumber
&+\frac{2}{T - k_0}L_0L_1(d+3)^{3/2}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\zeta}[t_k]+\frac{L_1^3\beta(d+6)^3}{2(T - k_0)}\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[t_k^2]
+2L_1\beta(d+4)\sigma^2\\
\nonumber
&= O\left(\frac{\sqrt{d}\left(1+\sqrt{d}\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right)}{\sqrt{T - k_0}}+\frac{d\left(d\mathbb{E}_\zeta\left[ \sum_{k=k_0 + 1}^T t_k^2 \right]+ \sqrt{d}\mathbb{E}_\zeta\left[\sum_{k=k_0 + 1}^Tt_k \right]+1\right)}{T - k_0}\right)
\\
&= O\left(\frac{\sqrt{d}\left(1+\sqrt{d}\gamma^{k_0}\right)}{\sqrt{T - k_0}}+\frac{d\left(d\gamma^{2k_0}+\sqrt{d}\gamma^{k_0}+1\right)}{T - k_0}\right) \nonumber
\end{align}
where the last equality follows from the update rule of $t_k$,
as shown in the proof of Theorem~\ref{iter_determin} as well.
Here, we have
$
k_0 = O \left(
\frac{d}{\epsilon^4}
\right)
$
by the definition of $k_0$. Thus, by setting
$
T
= k_0 + O \left(
\frac{d^2}{\epsilon^4}
\right)
=
O \left(
\frac{d^2}{\epsilon^4}
\right)
$,
we can obtain
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|^2]
=
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2]
\leq \epsilon^2
$.
This implies
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|] \leq \epsilon
$
as
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|]^2
\leq
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|^2]
$
follows from Jensen's inequality. Furthermore, when $\gamma$ is chosen as
$\gamma \leq d^{-\epsilon^4/2d}$
, we have
$
\log_\gamma d^{-1/2} = O\left( \frac{d}{\epsilon^4} \right)
$, which implies that
$
k_0 = \Omega\left( \log_\gamma d^{-1/2} \right)
$. Therefore, we can obtain
$
\gamma^{k_0}
=
O( d^{-1/2} )
$, which yields the iteration complexity of $T=O\left(\frac{d}{\epsilon^4}\right)$.
$\Box$\\
\section{ZOSLGH algorithm with error tolerance}
\label{sec:error_appendix}
In Sections \ref{sec:first-order} and \ref{sec:zeroth-order}, we assumed that we had access to the exact function value or a gradient oracle whose variance was finite. However, in some practical cases, we will have access only to the function values containing error, and it would be impossible to obtain accurate gradient oracles of an underlying objective function. Figure \ref{fig:smooth_and_error} illustrates such a case; although the objective function $f$ (Figure \ref{fig:smooth}) is smooth, the accessible function $f'$ (Figure \ref{fig:error}) contains some error, and thus many local minima arise. In this section, we consider optimizing a smooth objective function $f$ using only the information of $f'$. We assume that the following condition holds between $f$ and $f'$.
\begin{assumption}{A3}\label{A3}$\ $
\renewcommand{(\roman{enumi})}{(\roman{enumi})}
The supremum norm of the difference between $f$ and $f'$ is uniformly bounded:
$$\sup_{x\in\mathbb{R}^d}|f(x)-f'(x)|\leq\nu.$$
In the stochastic setting, we assume $\sup_{x\in\mathbb{R}^d}|f(x;\xi)-f'(x;\xi)|\leq\nu$ for any $\xi$.
\end{assumption}
\begin{figure}[H]
\caption{Illustration of a smooth objective function and the accessible function that contains error.}
\label{fig:smooth}
\label{fig:error}
\label{fig:smooth_and_error}
\end{figure}
Please note that we do not impose any other assumptions on the accessible function $f'$. Thus, $f'$ can be non-Lipschitz or even discontinuous. Even in such cases, we can develop an algorithm with a convergence guarantee because its smoothed function $F'(x,t)$ is smooth as far as $t$ is sufficiently large. In the following, we denote the Lipschitz and gradient Lipschitz constant of $F'(\cdot,t)$ as $L_0(t)$ and $L_1(t)$, respectively.
The ZOSLGH algorithm in this setting is almost the same as Algorithm \ref{alg:ZOGH}. The only difference is $\sqrt{\nu}$ rather than $\epsilon$ in the update rule of $t_{k+1}$. See the Algorithm \ref{alg:ZOGH_error} for a more detailed description. Please note that $F', \tilde{g}'_x, \tilde{G}'_{x,u}, \tilde{g}'_t, \tilde{G}'_{t,v}$ are defined in the same way as the no-error setting using $f'$.
\begin{algorithm}[H]
\caption{Deterministic/Stochastic Zeroth-Order Single Loop GH algorithm (ZOSLGH) with error tolerance}
\label{alg:ZOGH_error}
\begin{algorithmic}
\REQUIRE Iteration number $T$, initial solution $x_1$, initial smoothing parameter $t_1$, sequence of step sizes $\{\beta_k\}$ for $x$, step size $\eta$ for $t$, decreasing factor $\gamma \in (0,1)$, error tolerance $\nu$
\FOR{$k=1$ to $T$}
\STATE Sample $u_k$ from $\mathcal{N}(0,\mathrm{I}_d)$
\STATE Update $x_{k}$ by
\begin{align*}
&x_{k+1} = x_k - \beta_k \bar{G}'_{x,u},\\
&\text{where } \bar{G}'_{x,u} = \left\{\begin{array}{cc}
\Tilde{g}'_{x}(x_k,t_k;u_k) & (\text{deterministic}) \\
\Tilde{G}'_{x}(x_k,t_k;\xi_k,u_k),\ \xi_k\sim P & (\text{stochastic})
\end{array}\right.
\end{align*}
\STATE Sample $v_k$ from $\mathcal{N}(0,\mathrm{I}_d)$
\STATE Update $t_k$ by
\begin{align*}
&t_{k+1} = \left\{\begin{array}{cc}
\text{max} \{\gamma t_{k}, \sqrt{\nu}\} & (\text{SLGH}_{\text{r}}) \\
\text{max}\{\text{min} \{t_{k} - \eta \bar{G}'_{t,v},
\gamma t_{k}\},
\sqrt{\nu}\} & (\text{SLGH}_{\text{d}})
\end{array},\right.\\
&\text{where } \bar{G}'_{t,v} = \left\{\begin{array}{cc}
\Tilde{g}'_{t}(x_k,t_k;v_k) & (\text{deterministic}) \\
\Tilde{G}'_{t}(x_k,t_k;\xi_k,v_k),\ \xi_k\sim P & (\text{stochastic})
\end{array}\right.
\end{align*}
\mathbb{E}NDFOR
\end{algorithmic}
\end{algorithm}
We provide the convergence analyses in the following theorems.
The definitions of $\hat{x}$ in the deterministic and stochastic settings are given in Appendix \ref{subsec:proof_error_deterministic} and \ref{subsec:proof_error_stochastic}, respectively.
\begin{theorem}[\textbf{Convergence of ZOSLGH with error tolerance, Deterministic setting}]
\label{thm:error_deterministic}
Suppose Assumptions~\ref{A1} and \ref{A3} hold.
Take $k_1 := \Theta(d/\epsilon^2)$ and $k_2 := O\left(\log_{\gamma} 1/d \right)$ and define $k_0 = \min \{ k_1, k_2 \}$. Let $\hat{x}:=x_{k'}$, where $k'$ is chosen from a uniform distribution over $\{ k_0+1, k_0+2, \ldots, T \}$. Set the stepsize for $x$ at iteration $k$ as $\beta_k=\frac{1}{16(d+4)L_1(t_k)}\ ,k\in[T]$.
Then,
for any setting of the parameter $\gamma$,
if the error level $\nu$ satisfies $\nu=O(\epsilon^2/d^3)$,
$\hat{x}$ satisfies
$\mathbb{E}[ \|\nabla f(\hat{x})\| ] \leq\epsilon$ with the iteration complexity of
$T = O({d^{3}}/{\epsilon^2})$,
where the expectation is taken w.r.t.~random vectors $\{u_k\}$ and $\{v_k\}$.
Further,
if we choose $\gamma \leq d^{-\Omega(\epsilon^2 / d)}$,
the iteration complexity can be bounded as
$T = O({d}/{\epsilon^2})$.
\end{theorem}
\begin{theorem}[\textbf{Convergence of ZOSLGH with error tolerance, Stochastic setting}]
\label{thm:error_stochastic}
Suppose Assumptions~\ref{A1}, \ref{A2} and \ref{A3} hold.
Take $k_1 := \Theta(d/\epsilon^4)$ and $k_2 := O\left(\log_{\gamma} 1/d \right)$ and define $k_0 = \min \{ k_1, k_2 \}$. Let $\hat{x}:=x_{k'}$, where $k'$ is chosen from a uniform distribution over $\{ k_0+1, k_0+2, \ldots, T \}$. Set the stepsize for $x$ at iteration $k$ as $\beta_k=\min\left\{\frac{1}{16(d+4)L_1(t_k)}, \frac{1}{\sqrt{(T - k_0)(d+4)}}\right\}$.
Then, for any setting of the parameter $\gamma$, if the error level $\nu$ satisfies $\nu=O(\epsilon^2/d^3)$, $\hat{x}$ satisfies $\mathbb{E}[\|\nabla f(\hat{x})\|]\leq\epsilon$ with the iteration complexity of $T=O(d^2/\epsilon^4+d^3/\epsilon^2)$, where the expectation is taken w.r.t. random vectors $\{u_k\}, \{v_k\}$ and $\{\xi_k\}$. Further, if we choose $\gamma \leq d^{-\Omega(\epsilon^4 / d)}$, the iteration complexity can be bounded as $T=O(d/\epsilon^4)$.
\end{theorem}
\subsection{Proofs for technical lemmas}
\label{subsec:tech_lemmas}
We introduce several lemmas before going to the convergence analysis. All of them describe properties of the function with error $f'$ and its Gaussian smoothing $F'$. Throughout this subsection, we assume that $f$ is $L_0$-Lipschitz and $L_1$-smooth function. We also suppose that the function pair $(f, f')$ satisfies $\mathop{\rm sup}\limits_{x\in\mathbb{R}^d}|f(x)-f'(x)|\leq\nu$.
\begin{lemma}
For any $x\in \mathbb{R}^d$ and $t>0$, we have
\begin{align*}
\mathbb{E}_u\left[\frac{1}{t^2}(f'(x+tu)-f'(x))^2\|u\|^2\right] \leq 4(d+4)\|\nabla f(x)\|^2 + t^2L_1^2(d+6)^3 + 8d\frac{\nu^2}{t^2}.
\end{align*}
\label{lem_error:zo_grad_square_diff}
\end{lemma}
\textbf{Proof:}
\begin{align*}
\mathbb{E}_u\left[\frac{1}{t^2}(f'(x+tu)-f'(x))^2\|u\|^2\right] &= \mathbb{E}_u\left[\frac{1}{t^2}(f(x+tu)-f(x) + (f'-f)(x+tu)-(f'-f)(x))^2\|u\|^2\right]\\
&\leq 2\mathbb{E}_u\left[\frac{1}{t^2}(f(x+tu)-f(x))^2\|u\|^2\right] + 2\mathbb{E}_u\left[\frac{1}{t^2}(2\nu)^2\|u\|^2\right]\\
&\leq 4(d+4)\|\nabla f(x)\|^2 + t^2L_1^2(d+6)^3 + 8d\frac{\nu^2}{t^2},
\end{align*}
where the last inequality holds due to Lemma~\ref{lem:gauss_norm} and Lemma~\ref{lem:zo_grad_square_diff}.
\begin{lemma}
For any $x\in \mathbb{R}^d$ and $t>0$, we have
\begin{align*}
\mathbb{E}_\zeta\left[\frac{1}{t^2}(\bar{f}'(x+tu;\xi)-\bar{f}'(x;\xi))^2\|u\|^2\right] \leq 4(d+4)(\|\nabla f(x)\|^2+\sigma^2) + t^2L_1^2(d+6)^3 + 8d\frac{\nu^2}{t^2}.
\end{align*}
\label{lem_error:zo_stochastic_grad_square_diff}
\end{lemma}
\textbf{Proof:}
\begin{align*}
&\mathbb{E}_\zeta\left[\frac{1}{t^2}(\bar{f}'(x+tu;\xi)-\bar{f}'(x;\xi))^2\|u\|^2\right]\\
&= \mathbb{E}_\xi\left[\mathbb{E}_u\left[\frac{1}{t^2}(\bar{f}(x+tu;\xi)-\bar{f}(x;\xi) + (\bar{f}'-\bar{f})(x+tu;\xi)-(\bar{f}'-\bar{f})(x;\xi))^2\|u\|^2\right]\right]\\
&\leq 2 \mathbb{E}_\xi\left[\mathbb{E}_u\left[\frac{1}{t^2}(\bar{f}(x+tu;\xi)-\bar{f}(x;\xi))^2\|u\|^2\right]\right] + \frac{2}{t^2}\mathbb{E}_\xi[\mathbb{E}_u[(2\nu)^2\|u\|^2]]\\
&\leq 2\mathbb{E}_\xi\left[\frac{t^2}{2}L_1^2(d+6)^3+2(d+4)\|\nabla \bar{f}(x;\xi)\|^2\right] + 8d\frac{\nu^2}{t^2}\\
&\leq 4(d+4)(\|\nabla f(x)\|^2+\sigma^2) + t^2L_1^2(d+6)^3 + 8d\frac{\nu^2}{t^2},
\end{align*}
where the second inequality follows from Lemma~\ref{lem:gauss_norm} and Lemma~\ref{lem:zo_grad_square_diff},
and the last inequality holds due to Assumption~\ref{A2}~(ii).
\begin{lemma}
\label{lem_error:Lip_t}
For any $x\in \mathbb{R}^d$ and for any $t_1,t_2\in\mathcal{T}$, we have
\begin{align*}
|F'(x,t_1)-F'(x,t_2)|\leq L_0|t_1-t_2|\sqrt{d}+2\nu.
\end{align*}
\end{lemma}
\textbf{Proof:}
\begin{align*}
|F'(x,t_1)-F'(x,t_2)| &= |F(x,t_1)-F(x,t_2) + (F'-F)(x,t_1)-(F'-F)(x,t_2)|\\
&\leq |F(x,t_1)-F(x,t_2)| + |\mathbb{E}_u[(f'-f)(x+t_1u)]|+|\mathbb{E}_u[(f'-f)(x+t_2u)]|\\
&\leq |F(x,t_1)-F(x,t_2)| + \mathbb{E}_u[|(f'-f)(x+t_1u)|]+\mathbb{E}_u[|(f'-f)(x+t_2u)|]\\
&\leq |F(x,t_1)-F(x,t_2)|+ 2\nu\\
&\leq L_0|t_1-t_2|\sqrt{d}+2\nu,
\end{align*}
where the last inequality holds due to Lemma \ref{lem:Lip_t}.
\begin{lemma}[\textbf{Lemma 30 in \cit{jin2018local}}]\label{lem:grad_diff} For any $x\in \mathbb{R}^d$ and for any $t_1,t_2\in\mathcal{T}$, we have
$$\|\nabla_x (F'-F)(x,t)\|\leq\sqrt{\frac{2}{\pi}}\frac{\nu}{t}.$$
\label{lem_error:grad_diff}
\end{lemma}
\begin{lemma}\label{lem:F'_lip}\quad\\
\textbf{(i)} $F'(x,t)$ is $L_0+\sqrt{\frac{2}{\pi}}\frac{\nu}{t}$-Lipschitz in terms of $x$.\\
\textbf{(ii) (\textbf{Lemma 20 in \cit{jin2018local}})} $F'(x,t)$ is $L_1+\frac{2\nu}{t^2}$-smooth in terms of $x$.
\label{lem_error:Lip}
\end{lemma}
\textbf{Proof for (i):}
\begin{align*}
\|\nabla_x F'(x,t)\|&\leq \|\nabla_x F(x,t)\| + \|\nabla_x (F'-F)(x,t)\|\\
&\leq L_0 + \sqrt{\frac{2}{\pi}}\frac{\nu}{t},\\
\end{align*}
where the last inequality holds due to Lemma \ref{lem:Lip} and Lemma \ref{lem:grad_diff}.
\begin{lemma}\label{lem_error:grad_square_diff} For any $x\in \mathbb{R}^d$ and $t>0$, we have
\begin{align*}
\|\nabla f(x)\|^2 \leq 4\|\nabla_x F'(x,t)\|^2 + \frac{t^2}{2}L_1^2(d+6)^3 + \frac{8}{\pi}\frac{\nu^2}{t^2}.
\end{align*}
\end{lemma}
\textbf{Proof:}
We have
\begin{align*}
\|\nabla f(x)\|^2 &= \|\mathbb{E}_u[\langle\nabla f(x), u\rangle u]\|^2\\
&= \left\|\frac{1}{t}\mathbb{E}_u[(f(x+tu)-f(x) - [f(x+tu)-f(x)-t\langle \nabla f(x), u\rangle])u]\right\|^2\\
&\leq \left\|\nabla_x F(x,t) -\frac{1}{t}\mathbb{E}_u[(f(x+tu)-f(x)-t\langle \nabla f(x), u\rangle)u]\right\|^2\\
&\leq 2\|\nabla_x F(x,t)\|^2 + \frac{2}{t^2}\left\|\mathbb{E}_u[(f(x+tu)-f(x)-t\langle \nabla f(x), u\rangle)u]\right\|^2\\
&\leq 2\|\nabla_x F(x,t)\|^2 + \frac{2}{t^2}\mathbb{E}_u[|f(x+tu)-f(x)-t\langle \nabla f(x), u\rangle|^2\|u\|^2]\\
&\leq 2\|\nabla_x F(x,t)\|^2 + \frac{t^2L_1^2}{2}\mathbb{E}_u[\|u\|^6]\\
&\leq 2\|\nabla_x F(x,t)\|^2 + \frac{t^2L_1^2}{2}(d+6)^3\\
&\leq 2(2\|\nabla_x (F-F')(x,t)\|^2 + 2\|\nabla_x F'(x,t)\|^2) + \frac{t^2L_1^2}{2}(d+6)^3\\
&\leq 4 \|\nabla_x F'(x,t)\|^2 + \frac{t^2L_1^2}{2}(d+6)^3 + \frac{8}{\pi}\frac{\nu^2}{t^2},
\end{align*}
where the third last inequality holds due to Lemma \ref{lem:gauss_norm}, and the last inequality holds due to Lemma \ref{lem:grad_diff}.
\subsection{Proof for the deterministic setting}
\label{subsec:proof_error_deterministic}
\textbf{Proof for Theorem \ref{thm:error_deterministic}:}
Let $w_k:=(u_k, v_k)$ and denote $\delta_k := \Tilde{g}'_x(x_k,t_k;u_k)-\nabla_x F'(x_k,t_k)$. Utilize the updating rule for $x$ and $L_1(t)$-smoothness of $F'(\cdot,t)$. Then we have
\begin{align}
F'(x_{k+1},t_k) &\leq F'(x_k,t_k) + \left<\nabla_x F'(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1(t_k)}{2}\|x_{k+1}-x_k\|^2\nonumber\\
&= F'(x_k,t_k) - \beta_k \left<\nabla_x F'(x_k,t_k),\Tilde{g}'_x(x_k,t_k;u_k)\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{g}'_x(x_k,t_k;u_k)\|^2\nonumber\\
&= F'(x_k,t_k) - \beta_k\|\nabla_x F'(x_k,t_k)\|^2 - \beta_k \left<\nabla_x F'(x_k,t_k),\delta_k\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{g}'_x(x_k,t_k;u_k))\|^2.
\label{error_1}
\end{align}
Denote
\[E_k := - \beta_k \left<\nabla_x F'(x_k,t_k),\delta_k\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{g}'_x(x_k,t_k;u_k)\|^2
\]
for simplicity. From Lemma \ref{lem_error:Lip_t} and \eqref{error_1}, we get the upper bound for $\|\nabla_x F'(x,t)\|^2$ as
\begin{align}
\beta_k \|\nabla_x F'(x_k,t_k)\|^2 &\leq F'(x_k,t_k) - F'(x_{k+1},t_k) + E_k\nonumber\\
&= F'(x_k,t_k) - F'(x_{k+1},t_{k+1}) + F'(x_{k+1},t_{k+1}) - F'(x_{k+1},t_k) + E_k\nonumber\\
&\leq F'(x_k,t_k) - F'(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d} + 2\nu + E_k\nonumber.
\end{align}
Now, sum up the above inequality for all iterations $k_0+1\leq k\leq T\ (T>k_0)$. Then we have
\begin{align}
&\sum_{k=k_0 + 1}^{T}\beta_k\|\nabla_x F'(x_k,t_k)\|^2\nonumber\\
&\leq F'(x_{k_0 + 1},t_{k_0 + 1}) - F'(x_{T+1},t_{T+1}) + L_0\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\sqrt{d} + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}E_k\nonumber\\
&\leq F'(x_{k_0 + 1},t_{k_0 + 1}) - f^* +\nu + L_0\sqrt{d}\sum_{k=k_0 + 1}^T |t_{k+1}-t_k| + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}E_k\nonumber.\\
&\leq f'(x_{k_0 + 1}) - f^* +3\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\right) + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}E_k\nonumber\\
&\leq f(x_{k_0 + 1}) - f^* +4\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T |t_{k+1}-t_k|\right) + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}E_k,
\label{error_9}
\end{align}
where the third inequality holds due to Lemma~\ref{lem_error:Lip_t}. We can bound the conditional expectation of $E_k$ as
\begin{align*}
&\mathbb{E}_{w_k} [E_k\mid w_{[k-1]}] \\
&= -\beta_k \mathbb{E}_{w_k}[ \left<\nabla_x F'(x_k,t_k),\delta_k\right>\mid w_{[k-1]}] + \frac{\mathbb{E}_{w_k} [L_1(t_k)\mid w_{[k-1]}]}{2}\beta_k^2\mathbb{E}_{w_k}[\|\Tilde{g}'_x(x_k,t_k;u_k)\|^2\mid w_{[k-1]}]\\
&\leq \frac{\mathbb{E}_{w_k} [L_1(t_k)\mid w_{[k-1]}]}{2}\beta_k^2\mathbb{E}_{w_k}[\|\Tilde{g}'_x(x_k,t_k;u_k)\|^2\mid w_{[k-1]}]\\
&\leq \frac{\mathbb{E}_{w_k} [L_1(t_k)\mid w_{[k-1]}]}{2}\beta_k^2\left(4(d+4)\mathbb{E}_{w_k} [\|\nabla f(x_k)\|^2\mid w_{[k-1]}]+L_1^2(d+6)^3\mathbb{E}_{w_k}[t_k^2\mid w_{[k-1]}]\right.\\
&\left.\hspace{40mm}
+ 8d\mathbb{E}_{w_k}\left[\nu^2/t_k^2\mid w_{[k-1]}\right]\right),
\end{align*}
where the first inequality holds since we have $\mathbb{E}_{w_k}[\delta_k\mid w_{[k-1]}]=\mathbb{E}_{u_k}[\delta_k\mid u_{[k-1]}]=0$, and the last inequality holds due to Lemma~\ref{lem_error:zo_grad_square_diff}. Take the expectations of \eqref{error_9} w.r.t. random vectors $\{w_{k_0+1},...,w_T\}$. Then we can get
\begin{align}
& \sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{w}[\|\nabla_x F'(x_k,t_k)\|^2] \nonumber\\
& \leq f(x_{k_0 + 1}) - f^* +4\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0) \nonumber \\
& + \frac{1}{2}\left(4(d+4)\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w} [L_1(t_k)\|\nabla f(x_k)\|^2]+L_1^2(d+6)^3\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w}[L_1(t_k)t_k^2]\right.\nonumber\\
&\left.\hspace{8.5mm}
+ 8d\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w}\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right).
\label{300}
\end{align}
Lemma~\ref{lem_error:grad_square_diff} together with \eqref{300} yields
\begin{align*}
& \sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{w}[\|\nabla f(x_k)\|^2] \\
& \leq 4\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{w}[\|\nabla_x F'(x_k,t_k)\|^2] + \frac{1}{2}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}[t_k^2]L_1^2(d+6)^3 + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}\left[\frac{\nu^2}{t_k^2}\right]\\
&\leq 4\left(f(x_{k_0 + 1}) - f^* +4\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0)\right)\\
&+2\left(4(d+4)\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w} [L_1(t_k)\|\nabla f(x_k)\|^2]+L_1^2(d+6)^3\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w}[L_1(t_k)t_k^2]\right.\\
&\left.\hspace{8.5mm}
+ 8d\sum_{k=k_0 + 1}^{T}\beta_k^2\mathbb{E}_{w}\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right)\\
&+ \frac{1}{2}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}[t_k^2]L_1^2(d+6)^3 + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}\left[\frac{\nu^2}{t_k^2}\right].
\end{align*}
By rearranging the terms, we obtain
\begin{align}
&\sum_{k=k_0 + 1}^{T}\left(\beta_k\mathbb{E}_{w}[\|\nabla f(x_k)\|^2]-8(d+4)\beta_k^2\mathbb{E}_{w}[L_1(t_k)\|\nabla f(x_k)\|^2]\right)\nonumber\\
&\leq 4\left(f(x_{k_0 + 1}) - f^* +4\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_w[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0)\right)\nonumber\\
&+ 2\left(L_1^2(d+6)^3\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_w[L_1(t_k)t_k^2] + 8d\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_w\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right)\nonumber\\
&+ \frac{1}{2}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}[t_k^2]L_1^2(d+6)^3 + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{w}\left[\frac{\nu^2}{t_k^2}\right].
\label{error_5}
\end{align}
If we update $t_k\ (k\in[T])$ as in Algorithm~\ref{alg:ZOGH_error}, we have $\nu=O(t_k^2)$, which yields $L_1(t_k)=O(1)$ from Lemma \ref{lem_error:Lip}. Hence, by setting the step size $\beta_k$ as $\frac{1}{16(d+4)L_1(t_k)}\ (k\in[T])$, we can obtain
\begin{align*}
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^T \mathbb{E}_{w}[\|\nabla f(x_k)\|^2]
&= O\left(\frac{d}{T - k_0}\left(1+\sqrt{d}\sum_{k=k_0 + 1}^T\mathbb{E}_w[|t_{k+1}-t_k|] + d^2\sum_{k=k_0 + 1}^T\mathbb{E}_w[t_k^2]\right)\right)
\end{align*}
in the same way as before. We can also get
$\sum_{k=k_0 + 1}^T |t_{k+1} - t_k| = \sum_{k=k_0 + 1}^T ( t_k - t_{k+1}) = t_{k_0 + 1} - t_{T+1} = t_{k_0 + 1} = O(\gamma^{k_0}) $.
Further,
we have
\begin{align*}
\sum_{k=k_0 + 1}^T t_k^2
\leq
\sum_{k=k_0 + 1}^T \max \{ t_1^2 \gamma^{2(k-1)}, \nu \}
\leq
\sum_{k=k_0 + 1}^T
\left(
t_1^2 \gamma^{2(k-1)} +
\nu
\right)
&\leq
\frac{t_1^2 \gamma^{2k_0}}{1 - \gamma^2}
+
\nu (T - k_0) \\
&=
O( \gamma^{2k_0} + \nu (T - k_0) ),
\end{align*}
where the first inequality follows from the update rule of $t_k$ in Algorithm~\ref{alg:ZOGH_error}.
Hence,
we obtain
\begin{align}
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^T \mathbb{E}_{w}[\|\nabla f(x_k)\|^2]
&=
O\left(
\frac{d}{T - k_0}
\left( 1 + \sqrt{d}\gamma^{k_0} + d^2 ( \gamma^{2k_0} + \nu (T - k_0)) \right)
\right) \nonumber \\
&=
O\left(
\frac{d(1+d^2\gamma^{2k_0})}{T - k_0}
+
d^3 \nu
\right)
=
O\left(
\frac{d(1 + d^2\gamma^{2k_0})}{T - k_0}
+
\epsilon^2
\right) \nonumber,
\end{align}
where the last equality follows from the assumption of $\nu = O(\epsilon^2 / d^3)$.
Here, we have
$
k_0 = O \left(
\frac{d}{\epsilon^2}
\right)
$
by the definition of $k_0$. Thus, by setting
$
T
= k_0 + O \left(
\frac{d^3}{\epsilon^2}
\right)
=
O \left(
\frac{d^3}{\epsilon^2}
\right)
$,
we can obtain
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|^2]
=
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{w}[\|\nabla f(x_k)\|^2]
\leq \epsilon^2
$.
This implies
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|] \leq \epsilon
$
as
$
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|]^2
\leq
\mathbb{E}_{w, k'}[\|\nabla f(\hat{x})\|^2]
$
follows from Jensen's inequality. Furthermore, when $\gamma$ is chosen as
$\gamma \leq d^{-\epsilon^2/d}$
, we have
$
\log_\gamma d^{-1} = O\left( \frac{d}{\epsilon^2} \right)
$,
which implies
$
k_0 = \Omega \left( \log_\gamma d^{-1} \right)
$. Therefore, we can obtain
$
\gamma^{k_0}
=
O( d^{-1} )
$,
which yields the iteration complexity of $T=O\left(\frac{d}{\epsilon^2}\right)$.
$\Box$
\subsection{Proof for the stochastic setting}
\label{subsec:proof_error_stochastic}
\textbf{Proof for Theorem \ref{thm:error_stochastic}:}
Let $\zeta_k := (\xi_k,u_k, v_k)$, $k\in [T]$ and denote $\delta_k := \Tilde{G}'_x(x_k,t_k;\xi_k,u_k)-\nabla_x F'(x_k,t_k).$
Since $\Tilde{G}'_x(x,t;\xi, u)$ is an unbiased estimator of $\nabla_x F'(x,t)$, we have
\begin{align}
F'(x_{k+1},t_k) &\leq F'(x_k,t_k) + \left<\nabla_x F'(x_k,t_k),(x_{k+1}-x_k)\right> + \frac{L_1(t_k)}{2}\|x_{k+1}-x_k\|^2\nonumber\\
&= F'(x_k,t_k) - \beta_k \left<\nabla_x F'(x_k,t_k),\Tilde{G}'_x(x_k,t_k;\xi_k,u_k)\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{G}'_x(x_k,t_k;\xi_k,u_k)\|^2\nonumber\\
&= F'(x_k,t_k) - \beta_k\|\nabla_x F'(x_k,t_k)\|^2 - \beta_k \left<\nabla_x F'(x_k,t_k),\delta_k\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{G}'_x(x_k,t_k;\xi_k,u_k)\|^2\nonumber.
\end{align}
Now, denote
\[
I_k := - \beta_k \left<\nabla_x F'(x_k,t_k),\delta_k\right> + \frac{L_1(t_k)}{2}\beta_k^2\|\Tilde{G}'_x(x_k,t_k;\xi_k,u_k)\|^2
\]
for simplicity. Then, we can get the upper bound for $\|\nabla_x F(x,t)\|^2$ with $I_k$:
\begin{align}
\beta_k \|\nabla_x F'(x_k,t_k)\|^2 &\leq F'(x_k,t_k) - F'(x_{k+1},t_k) + I_k\nonumber\\
&= F'(x_k,t_k) - F'(x_{k+1},t_{k+1}) + F'(x_{k+1},t_{k+1}) - F'(x_{k+1},t_k) + I_k\nonumber\\
&\leq F'(x_k,t_k) - F'(x_{k+1},t_{k+1}) + L_0|t_{k+1}-t_k|\sqrt{d} + 2\nu + I_k,\nonumber
\end{align}
where the last inequality follows from Lemma~\ref{lem_error:Lip_t}. Sum up the above inequality for all iterations $k_0 + 1\leq k\leq T$. Then we have
\begin{align}
&\sum_{k=k_0 + 1}^{T}\beta_k\|\nabla_x F'(x_k,t_k)\|^2\nonumber \\
&\leq F'(x_{k_0 + 1},t_{k_0 + 1}) - F'(x_{T+1},t_{T+1}) + L_0\sqrt{d}\sum_{k=k_0 + 1}^T|t_{k+1}-t_k| + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}I_k\nonumber\\
&\leq f(x_{k_0 + 1}) - f^* + 4\nu + L_0\sqrt{d}\left(t_{k_0 + 1} +\sum_{k=k_0 + 1}^T|t_{k+1}-t_k|\right) + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^{T}I_k.
\label{error_6}
\end{align}
We can also obtain
\begin{align*}
& \mathbb{E}_{\zeta_k} [I_k\mid \zeta_{[k-1]}]\\
& = -\beta_k \mathbb{E}_{\zeta_k}[ \left<\nabla_x F'(x_k,t_k),\delta_k\right>\mid \zeta_{[k-1]}] + \frac{\mathbb{E}_{\zeta_k}[L_1(t_k)\mid \zeta_{[k-1]}]}{2}\beta_k^2\mathbb{E}_{\zeta_k}[\|\Tilde{G}'_x(x_k,t_k;\xi_k, u_k)\|^2\mid \zeta_{[k-1]}] \\
&= \frac{\mathbb{E}_{\zeta_k}[L_1(t_k)\mid \zeta_{[k-1]}]}{2}\beta_k^2\mathbb{E}_{\zeta_k}[\|\Tilde{G}'_x(x_k,t_k;\xi_k, u_k)\|^2\mid \zeta_{[k-1]}]\\
&\leq \frac{\mathbb{E}_{\zeta_k}[L_1(t_k)\mid \zeta_{[k-1]}]s}{2}\beta_k^2\left(4(d+4)(\mathbb{E}_{\zeta_k}[\|\nabla f(x_k)\|^2\mid \zeta_{[k-1]}]+\sigma^2) + \mathbb{E}_{\zeta_k}[t_k^2\mid \zeta_{[k-1]}]L_1^2(d+6)^3\right. \\
&\left.
\hspace{39.8mm} + 8d\mathbb{E}_{\zeta_k}\left[\nu^2/t_k^2\mid \zeta_{[k-1]}\right]\right),
\end{align*}
where the last inequality holds due to Lemma~\ref{lem_error:zo_stochastic_grad_square_diff}.
Take the expectation of \eqref{error_6} with respect to $\zeta_{k_0 + 1}, \ldots, \zeta_{T}$. Then we have
\begin{align*}
& \sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}[\|\nabla_x F'(x_k,t_k)\|^2] \\
& \leq f(x_{k_0 + 1}) - f^* + 4\nu + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0) + \sum_{k=k_0 + 1}^T \mathbb{E}_\zeta[I_k] \\
&\leq f(x_{k_0 + 1}) - f^* + 4\nu + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0) +\\
&+ \frac{1}{2}\left(4(d+4)\sum_{k=k_0 + 1}^T\beta_k^2(\mathbb{E}_{\zeta}[L_1(t_k)\|\nabla f(x_k)\|^2]+\sigma^2) + L_1^2(d+6)^3\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_{\zeta}[L_1(t_k)t_k^2] \right.\\
& \left.
\hspace{8.5mm} + 8d\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_\zeta\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right),
\end{align*}
From Lemma~\ref{lem_error:grad_square_diff}~(ii), we have
\begin{align}
& \sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2] \nonumber \\
&\leq 4\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}[\|\nabla_x F'(x_k,t_k)\|^2] + \frac{L_1^2(d+6)^3}{2}\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}[t_k^2] + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}\left[\frac{\nu^2}{t_k^2}\right]\nonumber\\
&\leq 4\left(f(x_{k_0 + 1}) - f^* + 4\nu + L_0\sqrt{d}\left(t_{k_0 + 1} + \sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0)\right)\nonumber\\
&+ 2\left(4(d+4)\sum_{k=k_0 + 1}^T(\beta_k^2\mathbb{E}_{\zeta}[L_1(t_k)(\|\nabla f(x_k)\|^2+\sigma^2)]) + L_1^2(d+6)^3\sum_{k=k_0 + 1}^T\mathbb{E}_{\zeta}\beta_k^2[L_1(t_k)t_k^2]\right.\nonumber\\
& \left.
\hspace{7.5mm} + 8d\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_\zeta\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right) \nonumber \\
&+\frac{L_1^2(d+6)^3}{2}\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}[t_k^2] + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k \mathbb{E}_{\zeta}\left[\frac{\nu^2}{t_k^2}\right].
\label{error_8}
\end{align}
By rearranging the terms, we obtain
\begin{align}
&\sum_{k=k_0 + 1}^{T}\left(\beta_k\mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2]-8(d+4)\beta_k^2\mathbb{E}_{\zeta}[L_1(t_k)\|\nabla f(x_k)\|^2]\right)\nonumber\\
&\leq 4\left(f(x_{k_0 + 1}) - f^* +4\nu + L_0\sqrt{d}\left(t_{k_0 + 1}+\sum_{k=k_0 + 1}^T \mathbb{E}_\zeta[|t_{k+1}-t_k|]\right) + 2\nu (T - k_0) \right)\nonumber\\
&+ 2\left(4(d+4)\sigma^2\sum_{k=k_0 + 1}^T\beta_k^2\mathbb{E}_\zeta[L_1(t_k)]+L_1^2(d+6)^3\sum_{k=k_0 + 1}^T\beta_k\mathbb{E}_\zeta[L_1(t_k)t_k^2] + 8d\sum_{k=k_0 + 1}^T\beta_k\mathbb{E}_\zeta\left[L_1(t_k)\frac{\nu^2}{t_k^2}\right]\right)\nonumber\\
&+ \frac{L_1^2(d+6)^3}{2}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{\zeta}[t_k^2] + \frac{8}{\pi}\sum_{k=k_0 + 1}^{T}\beta_k\mathbb{E}_{\zeta}\left[\frac{\nu^2}{t_k^2}\right].
\end{align}
If we update $t_k\ (k\in[T])$ as in Algorithm~\ref{alg:ZOGH_error}, we have $\nu=O(t_k^2)$, which yields $L_1(t_k)=O(1)$ from Lemma \ref{lem_error:Lip}. Furthermore, if we set the step size $\beta_k$ as $\min\left\{\frac{1}{16(d+4)L_1(t_k)}, \frac{1}{\sqrt{(T - k_0)(d+4)}}\right\}\ (k\in[T])$, then we have
\begin{align*}
\frac{1}{\beta_k-8(d+4)L_1(t_k)\beta_k^2} \leq \frac{2}{\beta_k},
\end{align*}
\begin{align*}
\frac{1}{\beta_k} \leq 16(d+4)L_1(t_k) + \sqrt{(T - k_0)(d+4)}.
\end{align*}
for all $k\in[T]$. Using the above inequalities, we can obtain
\begin{align*}
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^T \mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2]
&= O\left(\frac{\sqrt{d}\left(1+\sqrt{d}\sum_{k=k_0 + 1}^T\mathbb{E}_\zeta[|t_{k+1}-t_k|]\right)}{\sqrt{T - k_0}}+\frac{d\left(1+d^2\sum_{k=k_0 + 1}^T \mathbb{E}_\zeta[t_k^2]\right)}{T - k_0}\right)
\\
&
= O\left(
\frac{\sqrt{d}+d\gamma^{k_0}}{\sqrt{T - k_0}}
+
\frac{d+d^3\gamma^{2k_0}}{T - k_0}
+
d^3\nu
\right)
\\
&
= O\left(
\frac{\sqrt{d}+d\gamma^{k_0}}{\sqrt{T - k_0}}
+
\frac{d+d^3\gamma^{2k_0}}{T - k_0}
+
\epsilon^2
\right),
\end{align*}
where the second and last equality can be shown via a similar way as in the proof of Theorem~\ref{thm:error_deterministic}.
Here, we have
$
k_0 = O \left(
\frac{d}{\epsilon^4}
\right)
$
by the definition of $k_0$. Thus, by setting
$
T
=
O\left(\frac{d^3}{\epsilon^2} + \frac{d^2}{\epsilon^4}\right)
=
O\left(\frac{d^3}{\epsilon^2} + \frac{d^2}{\epsilon^4}\right)
$,
we can obtain
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|^2]
=
\frac{1}{T - k_0}\sum_{k=k_0 + 1}^{T}\mathbb{E}_{\zeta}[\|\nabla f(x_k)\|^2]
\leq \epsilon^2
$.
This implies
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|] \leq \epsilon
$
as
$
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|]^2
\leq
\mathbb{E}_{\zeta, k'}[\|\nabla f(\hat{x})\|^2]
$
follows from Jensen's inequality. Furthermore, when $\gamma$ is chosen as
$\gamma \leq d^{-\epsilon^4/d}$
, we have
$
\log_\gamma d^{-1} = O\left( \frac{d}{\epsilon^4} \right)
$, which implies that
$
k_0 = \Omega\left( \log_\gamma d^{-1} \right)
$. Therefore, we can obtain
$
\gamma^{k_0}
=
O( d^{-1} )
$, which yields the iteration complexity of $T=O\left(\frac{d}{\epsilon^4}\right)$.
$\Box$
\section{Optimization of test functions}
\label{sec:test_funcs}
In the first three subsections, let us compare the performance of our SLGH algorithms with GD-based algorithms and double loop GH algorithms using highly-non-convex test functions for optimization: the Ackley function \cit{molga2005test}, Rosenbrock function, and Himmelblau function \cit{Andrei08anunconstrained}. We implemented the following five types of algorithms: (ZOS)GD, (ZO)GradOpt, in which the factor for decreasing the smoothing parameter was 0.5 or 0.8, $\text{(ZO)SLGH}_{\text{r}}$ with $\gamma=0.995$ or $\gamma=0.999$.
\subsection{Ackley Function}
The Ackley function is defined as
\begin{align*}
f(x, y)=-20 \exp \left[-0.2 \sqrt{0.5\left(x^{2}+y^{2}\right)}\right]-\exp [0.5(\cos 2 \pi x+\cos 2 \pi y)]+e+20,
\end{align*}
whose global optimum is $f(0,0)=0$. As shown in Figure~\ref{fig:ackley}, it has numerous small local minima due to cosine functions which are included in the second term. We ran the aforementioned five types of zeroth-order algorithms with the stepsize $\beta=0.1$ for $T=1000$ iterations. The initial smoothing parameter for the GH algorithms (ZOGradOpt and $\text{ZOSLGH}_{\text{r}}$) was set to $t_1=1$, where local minima of the smoothed function almost disappeared (Figure~\ref{fig:ackley_smoothed}). The smoothing parameter for ZOSGD was chosen as $t=0.005$. We set the initial point for the optimization as $(x,y)=(5,5)$.
\begin{figure}
\caption{Visualization of the Ackley function and its Gaussian smoothed function.}
\label{fig:ackley_and_smoothed}
\label{fig:ackley}
\label{fig:ackley_smoothed}
\end{figure}
We illustrate the optimization results in Table~\ref{table:results_ackley} and Figure~\ref{fig:results_ackley}. The GH methods successfully reach near the optimal solution $(0,0)$ when the decreasing speed of $t$ is not so fast, while ZOSGD is stuck in a local minimum in the immediate vicinity of the initial point $(5,5)$. Please note that GradOpt succeeds in optimization without decreasing the smoothing parameter since the optimal solution of the smoothed function with $t=1$ almost matches that of the original target function.
\begin{table}[H]
\centering
\caption{Optimization results of the Ackley function. The global optimum is $f(0,0)=0$.}
\begin{tabular}{cc|c|c}
\toprule
&Methods & $(x,y)$ & $f(x,y)$ \\ \hline
SGD algo. &ZOSGD & $(4.99, 4.99)$ & $12.63$\\\hline
GH algo.&ZOGradOpt $(\gamma=0.5)$& $(4.2\times 10^{-3}, 1.9\times 10^{-3})$ & $\mathbf{1.4\times 10^{-2}}$\\
&ZOGradOpt $(\gamma=0.8)$& $(-2.2\times 10^{-3}, 6.7\times 10^{-3})$ & $\mathbf{8.1\times 10^{-2}}$\\
&$\text{ZOSLGH}_{\text{r}}\ (\gamma=0.995)$ & $(1.97, 1.97)$ & $6.56$\\
&$\text{ZOSLGH}_{\text{r}}\ (\gamma=0.999)$ & $(-3.6\times 10^{-3}, -4.6\times 10^{-3})$ & $\mathbf{1.7\times 10^{-2}}$\\
\bottomrule
\end{tabular}
\label{table:results_ackley}
\end{table}
\begin{figure}
\caption{Plots of the function value and the smoothing parameter during optimization of the Ackley function.}
\label{fig:fvalue_ackley}
\label{fig:t_ackley}
\label{fig:results_ackley}
\end{figure}
\subsection{Rosenbrock Function}
Let us define the Rosenbrock function in 2D as
\begin{align*}
f(x,y)=100\left(y-x^{2}\right)^{2}+\left(1-x\right)^{2},
\end{align*}
whose global optimum is $f(1,1)=0$. This function is difficult to optimize because the global optimum lies inside a flat parabolic shaped valley with low function value (Figure~\ref{fig:rosenbrock}). Since this function is polynomial, we can calculate the GH smoothed function analytically (see \cit{mobahi2012gaussian}):
\begin{align*}
F(x,y,t) &:= \mathbb{E}_{u_x, u_y}[f(x+tu_x, y+tu_y)],\quad \left(u_x, u_y\sim\mathcal{N}(0, 1)\right)\\
&= 100x^4+(-200y+600t^2+1)x^2-2x+100y^2-200t^2y+(300t^4+101t^2+1).
\end{align*}
Thus, we applied first-order methods to this function. The stepsize and iteration number were set to $\beta=1\times 10^{-4}$ and $T=20000$, respectively. The initial smoothing parameter for the GH algorithms (GradOpt and $\text{SLGH}_{\text{r}}$) was set to $t_1=1.5$, where the smoothed function became almost convex around the optimal solution (Figure~\ref{fig:rosenbrock_smoothed}). We set the initial point for the optimization as $(x,y)=(-3,2)$.
\begin{figure}
\caption{Visualization of the Rosenbrock function and its Gaussian smoothed function.}
\label{fig:rosenbrock_and_smoothed}
\label{fig:rosenbrock}
\label{fig:rosenbrock_smoothed}
\end{figure}
We illustrate the optimization results in Table~\ref{table:results_rosenbrock}, Figure~\ref{fig:results_rosenbrock} and Figure~\ref{fig:x_rosenbrock}. The GH methods can decrease the function value much faster than GD. This is because the smoothed function is much easier to optimize than the original function while its optimal solution is close to that of the original one. In the early stage of optimization, the GH methods reach near a point $(0,2)$, which is a good initial point for optimization, while GD falls into a point in the flat valley, which is far from the optimal solution. (Figure~\ref{fig:x_rosenbrock}).
\begin{table}[H]
\centering
\caption{Optimization results of the Rosenbrock function. The global optimum is $f(1,1)=0$.}
\begin{tabular}{cc|c|c}
\toprule
&Methods & $(x,y)$ & $f(x,y)$ \\ \hline
GD algo. &GD & $(0.468, 0.216)$ & $0.284$\\\hline
GH algo.&GradOpt $(\gamma=0.5)$& $(0.817, 0.667)$ & $\mathbf{3.36\times 10^{-2}}$\\
&GradOpt $(\gamma=0.8)$& $(0.808, 0.652)$ & $\mathbf{3.70\times 10^{-2}}$\\
&$\text{SLGH}_{\text{r}}\ (\gamma=0.995)$ & $(0.819, 0.670)$ & $\mathbf{3.27\times 10^{-2}}$\\
&$\text{SLGH}_{\text{r}}\ (\gamma=0.999)$ & $(0.795, 0.631)$ & $\mathbf{4.19\times 10^{-2}}$\\
\bottomrule
\end{tabular}
\label{table:results_rosenbrock}
\end{table}
\begin{figure}
\caption{Plots of the function value and the smoothing parameter during optimization of the Rosenbrock function.}
\label{fig:fvalue_rosenbrock}
\label{fig:t_rosenbrock}
\label{fig:results_rosenbrock}
\end{figure}
\begin{figure}
\caption{\begin{tabular}
\label{fig:x_rosenbrock}
\end{figure}
\subsection{Himmelblau Function}
The Himmelblau function is defined as
\begin{align*}
f(x, y)= (x^2+y-11)^2+(x+y^2-7)^2.
\end{align*}
It has four minimum points in the vicinity of $(x,y)=(3.000, 2.000), (-2.805, 3.131), (-3.779, -3.283)$, $(3.584, -1.848)$ and one maximum point in the vicinity of $(x,y)=(-0.271, -0.923)$. It takes the optimal value $0$ at the four points. Since this function is also polynomial, we can calculate the GH smoothed function analytically:
\begin{align*}
& F(x,y,t):= \mathbb{E}_{u_x, u_y}[f(x+tu_x, y+tu_y)],\quad \left(u_x, u_y\sim\mathcal{N}(0, 1)\right)\\
&= x^4+(2y+6t^2-21)x^2+(2y^2+2t^2-14)x+y^4+(6t^2-13)y^2+(2t^2-22)y + (6t^4-34t^2+170).
\end{align*}
Thus, we applied first-order methods to this function. The stepsize and iteration number were set to $\beta=1\times 10^{-4}$ and $T=2000$, respectively. The initial smoothing parameter for GH algorithms was set to $t_1=2$, where the smoothed function became almost convex around the optimal solution (Figure~\ref{fig:himmelblau_smoothed}). We set the initial point for the optimization as $(x,y)=(5,5)$.
\begin{figure}
\caption{Visualization of the Himmelblau function and its Gaussian smoothed function.}
\label{fig:himmelblau_and_smoothed}
\label{fig:himmelblau}
\label{fig:himmelblau_smoothed}
\end{figure}
Table~\ref{table:results_himmelblau}, Figure~\ref{fig:results_himmelblau}, and Figure~\ref{fig:x_himmelblau} show the optimization results. GD and our SLGH algorithms successfully reach near the global optimum, while GradOpt fails to decrease the function value. This is because the optimal solution of the smoothed function when $t=2$ lies near the maximum point of the original Himmelblau function $(-0.271, -0.923)$. Figure~\ref{fig:x_himmelblau} describes detailed optimization process. Our SLGH algorithm succeeds in returning to the optimal solution once it has passed by reducing $t$. In contrast, GradOpt reaches the vicinity of a minimum of the smoothed function without knowing the detailed shape of the original function; as a result, it is stuck around a local maximum of the original function.
\begin{table}[H]
\centering
\caption{Results of optimization of the Himmelblau function. It has a global optimum $f(3,2)=0$.}
\begin{tabular}{cc|c|c}
\toprule
&Methods & $(x,y)$ & $f(x,y)$ \\ \hline
GD algo. &GD & $(2.998, 2.003)$ & $\mathbf{1.6\times 10^{-4}}$\\\hline
GH algo.&GradOpt $(\gamma=0.5)$& $(2.575, 1.437)$ & $14.14$\\
&GradOpt $(\gamma=0.8)$& $(1.573, 0.868)$ & $80.51$\\
&$\text{SLGH}_{\text{r}}\ (\gamma=0.995)$ & $(2.999, 2.002)$ & $\mathbf{6.9\times 10^{-5}}$\\
&$\text{SLGH}_{\text{r}}\ (\gamma=0.999)$ & $(2.983, 1.897)$ & $\mathbf{0.21}$\\
\bottomrule
\end{tabular}
\label{table:results_himmelblau}
\end{table}
\begin{figure}
\caption{Plots of the function value and the smoothing parameter during optimization of the Himmelblau function.}
\label{fig:fvalue_himmelblau}
\label{fig:t_himmelblau}
\label{fig:results_himmelblau}
\end{figure}
\begin{figure}
\caption{Comparison of output sequences of GradOpt, in which the factor for decreasing the smoothing parameter is $0.8$, and $\text{SLGH}
\label{fig:x_himmelblau}
\end{figure}
\subsection{Additional Toy Example}
\label{subsec:toy}
At the end of this section, let us present a toy example problem in which $\text{SLGH}_{\text{d}}$, which utilizes the derivative $\frac{\partial F}{\partial t}$ for the update of $t$, outperforms $\text{SLGH}_{\text{d}}$. Let us consider the following artificial non-convex function:
\begin{align*}
f(x, y)=\left\{\begin{array}{cc}
x^2 - 150\times 1.1^{-((x-10)^2+y^2)}& (x\geq0) \\
x^2/50 - 150\times 1.1^{-((x-10)^2+y^2)}& (x<0)\\
\end{array}\right..
\end{align*}
The second term creates a hole around $(x,y)=(10,0)$ (see Figure\ref{fig:toy}), and this function has an optimum in the vicinity of $f(9.319, 0)\simeq -56.670$. This function is difficult to optimize for GH methods since the hole around the optimum disappears when the smoothing parameter $t$ is large (Figure\ref{fig:toy_smoothed}).
We ran $\text{SLGH}_{\text{r}}\ (\gamma=0.995 \text{ or } 0.999)$ and $\text{SLGH}_{\text{d}}\ (\gamma=0.999)$ with the stepsize (for $x$) $\beta=0.01$ for $T=1000$ iterations. The initial point and initial smoothing parameter were set to $(x,y)=(15,0)$ and $t_1=5$, respectively. We set the stepsize for $t$ as $0.01$.
Table~\ref{table:results_toy} and Figure~\ref{fig:results_toy} show the optimization results. We can see that only $\text{SLGH}_{\text{d}}$ can decrease $t$ around the hole adaptively, and thus successfully can find the optimal solution.
\begin{table}[H]
\centering
\caption{Optimization results of the artificial non-convex function. It has a global optimum in the vicinity of $f(9.319, 0)\simeq -56.670$.}
\begin{tabular}{cc|c|c}
\toprule
&Methods & $(x,y)$ & $f(x,y)$ \\ \hline
GH algo.&$\text{SLGH}_{\text{r}}\ (\gamma=0.995)$ & $(-0.248, 2.38\times 10^{-2})$ & $-5.52\times 10^{-3}$\\
&$\text{SLGH}_{\text{r}}\ (\gamma=0.999)$ & $(-2.959, -2.18\times 10^{-3})$ & $0.175$\\
&$\text{SLGH}_{\text{d}}\ (\gamma=0.999)$ & $(9.319, 8.33\times 10^{-3})$ & $\mathbf{-56.670}$\\
\bottomrule
\end{tabular}
\label{table:results_toy}
\end{table}
\begin{figure}
\caption{\begin{tabular}
\label{fig:fvalue_toy}
\label{fig:t_toy}
\label{fig:results_toy}
\end{figure}
\begin{figure}
\caption{Visualization of the artificial non-convex function and its Gaussian smoothed function.}
\label{fig:toy_and_smoothed}
\label{fig:toy}
\label{fig:toy_smoothed}
\end{figure}
\section{Black-box adversarial attack}
\label{sec:black_box}
\subsection{Experimental Setup}
We used well-trained $\mathop{\rm DNNs}$\footnote{https://github.com/carlini/nn\_robust\_attacks} for CIFAR10 and MNIST classification tasks as target models, respectively. We adopt the implementation\footnote{https://github.com/KaidiXu/ZO-AdaMM} in \cit{chen2019zo} for ZOSGD and ZOAdaMM. GradOpt \cit{hazan2016graduated} in our implementation adopts the same random gradient-free oracles \cit{nesterov2017random} as with our ZOSLGH methods, rather than their smoothed gradient oracle, where random variables are sampled from the unit sphere. Moreover, we set the stepsize in its inner loop as a constant instead of $\Theta(1/k)$, where $k$ denotes an iteration number in the inner loop, due to less efficiency of the original setting. Therefore, the essential difference between GradOpt and $\text{ZOSLGH}_\text{r}$ is whether or not the structure of algorithms is single loop.
As recommended in their work, we set the parameter for ZOAdaMM as ${v}_0 = 10^{-5}$, $\beta_1=0.9$, and $\beta_2=0.3$. The factor for decreasing the smoothing parameter in ZOGradOpt was set to $0.5$. For all algorithms, we chose the regularization parameter $\lambda$ as $\lambda=10$ and set attack confidence $\kappa=1e-10$. We chose minibatch size as $M=10$ to stabilize estimation of values and gradients of the smoothed function. The initial adversarial perturbation was chosen as $x_0=0$, and the initial smoothing parameter $t_0$ was $10$ for GH methods and $0.005$ for the others. The decreasing factor for $t$ in the ZOSLGH algorithm was set to $\gamma=0.999$ for both of $\text{ZOSLGH}_\text{r}$ and $\text{ZOSLGH}_\text{d}$, unless otherwise noted. Other parameter settings are described in Table \ref{tab:parameter_setting}. We used different step sizes for ZOAdaMM because it adaptively penalizes the step size using the information of past gradients \cit{chen2019zo}.
\begin{table}[H]
\centering
\caption{Parameter settings in the adversarial attack problems. $T$ represents the iteration number. $\beta$ is the step size for $x$, and $\eta$ is the step size for $t$. $N_0$ and $\epsilon_0$ are used to determine termination condition of the inner loop in ZOGradOpt: we stop the inner loop and decrease $t$ if the condition $|\frac{1}{M}\sum_{i=1}^Mf(x_{k+1}+tu_i)-\frac{1}{M}\sum_{i=1}^Mf(x_{k}+tu'_i)|\leq\epsilon_0$ is satisfied $N_0$ times, where $u_i$ and $u_i'\ (i=1, ..., M)$ are sampled from $\mathcal{N}(0, \mathrm{I}_d)$. Each of ``3072'' and ``784'' is the dimension of images in CIFAR-10 and MNIST.}
\label{tab:parameter_setting}
\begin{tabular}{c|c|c|c|c|c}
\toprule
& $T$ & \begin{tabular}{c} $\beta$ \\ (other than\\ ZOAdaMM) \end{tabular} & \begin{tabular}{c} $\beta$ \\ (for ZOAdaMM) \end{tabular} & $\eta$ & $(N_0, \epsilon_0)$
\\ \hline
CIFAR-10 & $10000$ & $0.01/3072$ & $0.5/3072$ & $1\times10^{-4}/3072$ & $(100, 5\times10^{-3})$\\
MNIST & $20000$ & $1/784$ & $100/784$ & $0.1/784$ & $(100, 1\times10^{-3})$\\
\bottomrule
\end{tabular}
\end{table}
\subsection{CIFAR-10}
\label{sec:cifar10}
\paragraph{Additional plots}
Figures~\ref{fig:total_plot_cifar10} and \ref{fig:l2_plot_cifar10} show additional plots for total loss and $L_2$ distortion, respectively. We can see that our ZOSLGH algorithms successfully decrease the total loss value except in cases where images are so difficult to attack that no algorithms succeed in attacking (Figure~\ref{fig:veryhard1_cifar10}, \ref{fig:veryhard2_cifar10}). Plots in Figure \ref{fig:l2_plot_cifar10} imply that the algorithms are stuck around a local minimum $x=0$ when they are failed to decrease the loss value.
\begin{figure}
\caption{Additional plots of total loss versus iterations on CIFAR-10 (log scale). (a)-(c) All algorithms can successfully decrease the loss value when images are easy to attack. In particular, in plot (c), SGD-based algorithms can find better solutions than GH-based algorithms. (d)-(f) Only GradOpt fails to attack due to its slow convergence. (g) Only ZOSGD is stuck around a local minimum $x=0$. (h) Only our $\text{ZOSLGH}
\label{fig:easy01_cifar10}
\label{fig:easy1_cifar10}
\label{fig:easy2_cifar10}
\label{fig:easy02_cifar10}
\label{fig:mid1_cifar10}
\label{fig:mid2_cifar10}
\label{fig:hard1_cifar10}
\label{fig:hard2_cifar10}
\label{fig:veryhard1_cifar10}
\label{fig:veryhard2_cifar10}
\label{fig:total_plot_cifar10}
\end{figure}
\begin{figure}
\caption{Plots of $L_2$ distortion versus iterations for images that are difficult to attack on CIFAR-10. Each plot of (a)-(d) corresponds to Figure~\ref{fig:hard1_cifar10}
\label{fig:hard39_l2_cifar10}
\label{fig:hard104_l2_cifar10}
\label{fig:veryhard14_l2_cifar10}
\label{fig:veryhard41_l2_cifar10}
\label{fig:l2_plot_cifar10}
\end{figure}
\paragraph{Effect of choice of the parameter $\gamma$ in the ZOSLGH algorithm}
We also investigated the effect of choice of the decreasing parameter $\gamma$ in the ZOSLGH algorithm. We compared ZOSGD, $\text{ZOSLGH}_\text{r}$ with $\gamma=0.995$, and $\text{ZOSLGH}_\text{r}$ with $\gamma=0.999$. All other parameters were set to the same values as before. Figure~\ref{fig:comparison_cifar10} implies that the decreasing speed of $t$ is associated with a trade-off: a rapid decrease of $t$ yields fast convergence, but reduces the possibility to find better solutions.
\begin{figure}
\caption{Comparison of total loss transition of ZOSGD, $\text{ZOSLGH}
\label{fig:comp_cifar10_id8}
\label{fig:comp_cifar10_id66}
\label{fig:comp_cifar10_id105}
\label{fig:comp_cifar10_id89}
\label{fig:comparison_cifar10}
\end{figure}
\paragraph{Generated adversarial examples}
Table~\ref{tab:cifar10_attack_examples} shows adversarial images generated by different algorithms and their original images.
\begin{table}[H]
\caption{Comparison of adversarial images for CIFAR-10 with different algorithms.}
\centering
\begin{tabular}{ccccc}
\toprule
Image ID & 39 & 79 & 89& 115\\ \hline
Original & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_Orig_True_5.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_Orig_True_8.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_Orig_True_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_Orig_True_3.png}}
\end{minipage}\\ \
Classified as&dog & ship & truck & cat\\
$L_2$ distortion: &0 & 0 & 0 & 0\\ \hline
ZOSGD & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_ZOSGD_True_5_Pred_5.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_ZOSGD_True_8_Pred_0.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_ZOSGD_True_9_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_ZOSGD_True_3_Pred_7.png}}
\end{minipage}\\
Classified as&dog (fail.)& airplane & truck (fail.) & horse\\
$L_2$ distortion: &$6.7\times 10^{-5}$ & $0.154$ & $5.6\times 10^{-5}$ & $4.5\times 10^{-3}$\\ \hline
ZOAdaMM & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_ZOAdaMM_True_5_Pred_5.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_ZOAdaMM_True_8_Pred_0.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_ZOAdaMM_True_9_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_ZOAdaMM_True_3_Pred_7.png}}
\end{minipage}\\
Classified as&dog (fail.)& airplane & truck (fail.) & horse\\
$L_2$ distortion: &$0.226$ & $0.145$ & $0.131$ & $1.6\times 10^{-3}$\\ \hline
ZOGradOpt & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_ZOGradOpt_True_5_Pred_3.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_ZOGradOpt_True_8_Pred_0.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_ZOGradOpt_True_9_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_ZOGradOpt_True_3_Pred_7.png}}
\end{minipage}\\
Classified as&cat & airplane & truck (fail.) & horse\\
$L_2$ distortion: &$0.304$ & $0.254$ & $1.1\times 10^{-30}$ & $0.192$\\ \hline
$\text{ZOSLGH}_{\text{r}}$ & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_ZOSLGH-constant_True_5_Pred_3.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_ZOSLGH-constant_True_8_Pred_0.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_ZOSLGH-constant_True_9_Pred_1.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_ZOSLGH-constant_True_3_Pred_7.png}}
\end{minipage}\\
Classified as&cat & airplane & automobile & horse\\
$L_2$ distortion: &$0.540$ & $0.212$ & $0.282$ & $0.076$\\ \hline
$\text{ZOSLGH}_{\text{d}}$ & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_39_ZOSLGH_True_5_Pred_3.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_79_ZOSLGH_True_8_Pred_0.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_89_ZOSLGH_True_9_Pred_1.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/cifar10/id_115_ZOSLGH_True_3_Pred_7.png}}
\end{minipage}\\
Classified as&cat & airplane & automobile & horse\\
$L_2$ distortion: &$0.359$ & $0.174$ & $0.241$ & $0.075$\\
\bottomrule
\end{tabular}
\label{tab:cifar10_attack_examples}
\end{table}
\subsection{MNIST}\label{sec:mnist}
Finally, let us show the experimental results on the MNIST dataset. Our ZOSLGH algorithms attain higher success rates than other algorithms on this dataset as well as CIFAR-10 (Table~\ref{table:results_mnist}). Moreover, the average number of iterations to achieve the first successful attack becomes comparable to ZOSGD. The main difference from the results on CIFAR-10 is that the average of $L_2$ distortion at successful time becomes far larger, from $0.050\sim 0.250$ to $4.25\sim 5.20$. This implies that attacks on MNIST are more difficult than those on CIFAR-10. See Figure~\ref{fig:total_plot_mnist} and Figure~\ref{fig:l2_plot_mnist} for additional plots for total loss and $L_2$ distortion. Figure~\ref{tab:mnist_attack_examples} shows adversarial images generated by different algorithms and their original images.
\begin{table}[H]
\centering
\caption{Performance of a per-image attack over $100$ images of MNIST under $T = 20000$ iterations. ``Succ. rate'' indicates the ratio of success attack, ``Avg. iters to 1st succ.'' is the average number of iterations to reach the first successful attack, ``Avg. $L_2$ (succ.)'' is the average of $L_2$ distortion taken among successful attacks, and ``Avg. total loss'' is the average of total loss $f(x)$ over 100 samples. Please note that the standard deviations are large since the attack difficulty varies considerably from sample to sample.}
\begin{tabular}{cc|c|c|c|c}
\toprule
\label{table:results_mnist}
&Methods & \begin{tabular}{c}Succ. rate\end{tabular} & \begin{tabular}{c} Avg. iters\\ to 1st succ.\end{tabular} & \begin{tabular}{c} Avg. $L_2$\\ (succ.)\end{tabular} & \begin{tabular}{c} Avg. total loss\end{tabular}\\ \hline
SGD algo. &ZOSGD & $67\%$ & $1171 \pm 1954$ & $4.83 \pm 4.13$ & $73.60 \pm 102.70$ \\
&ZOAdaMM& $71\%$ & $\textbf{261} \pm 1068$ & $\textbf{4.25} \pm 3.36$ & $67.49 \pm 100.25$ \\\hline
&ZOGradOpt & $84\%$ & $6166 \pm 4354$ & $5.16 \pm 2.28$ & $28.25 \pm 65.35$ \\
GH algo. &$\text{ZOSLGH}_{\text{r}}\ (\gamma=0.999)$ & $\textbf{96\%}$ & $1537 \pm 277$& $\textbf{4.32} \pm 2.44$ & $\textbf{11.83} \pm 37.88$ \\
&$\text{ZOSLGH}_{\text{d}}\ (\gamma=0.999)$ & $\textbf{96\%}$ & $1342 \pm 242$& $\textbf{4.37} \pm 2.58$ & $\textbf{12.09}\pm 38.56$ \\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}
\caption{Additional plots of total loss versus iterations on MNIST (log scale). (a)-(b) All algorithms can successfully decrease the loss value when images are easy to attack. (c)-(d) Only GradOpt fails to attack due to its slow convergence. (e) ZOSGD and ZOAdaMM are stuck around a local minimum $x=0$. (f) Only our ZOSLGH algorithms succeed in escaping the local minimum, and thus they can decrease the loss value more than 200 than other algorithms. (g), (h): These images are so difficult to attack that no algorithms can succeed in attacking.}
\label{fig:easy1_mnist}
\label{fig:easy2_mnist}
\label{fig:mid1_mnist}
\label{fig:mid2_mnist}
\label{fig:hard1_mnist}
\label{fig:hard2_mnist}
\label{fig:veryhard1_mnist}
\label{fig:veryhard2_mnist}
\label{fig:total_plot_mnist}
\end{figure}
\begin{figure}
\caption{Plots of $L_2$ distortion versus iterations for images that are difficult to attack on MNIST. Each plot of (a)-(d) corresponds to Figure~\ref{fig:hard1_mnist}
\label{fig:hard104_l2_mnist}
\label{fig:hard39_l2_mnist}
\label{fig:veryhard14_l2_mnist}
\label{fig:veryhard41_l2_mnist}
\label{fig:l2_plot_mnist}
\end{figure}
\begin{table}[H]
\caption{Comparison of the adversarial images for MNIST with different algorithms.}
\centering
\begin{tabular}{ccccc}
\toprule
Image ID & 10 & 21 & 48& 83\\ \hline
Original & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_Orig_True_0.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_Orig_True_6.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_Orig_True_4.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_Orig_True_7.png}}
\end{minipage}\\ \
Classified as&0 & 6 & 4 & 7\\
$L_2$ distortion: &0 & 0 & 0 & 0\\ \hline
ZOSGD & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_ZOSGD_True_0_Pred_0.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_ZOSGD_True_6_Pred_5.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_ZOSGD_True_4_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_ZOSGD_True_7_Pred_7.png}}
\end{minipage}\\
Classified as&0 (fail.)& 5 & 9 & 7 (fail.)\\
$L_2$ distortion: &$4.1\times 10^{-7}$ & $1.194$ & $1.183$ & $1.8\times 10^{-4}$\\ \hline
ZOAdaMM & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_ZOAdaMM_True_0_Pred_0.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_ZOAdaMM_True_6_Pred_5.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_ZOAdaMM_True_4_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_ZOAdaMM_True_7_Pred_7.png}}
\end{minipage}\\
Classified as&0 (fail.)& 5 & 9 & 7 (fail.)\\
$L_2$ distortion: &$4.9\times 10^{-14}$ & $1.334$ & $1.100$ & $4.0\times 10^{-14}$\\ \hline
ZOGradOpt & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_ZOGradOpt_True_0_Pred_2.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_ZOGradOpt_True_6_Pred_5.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_ZOGradOpt_True_4_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_ZOGradOpt_True_7_Pred_9.png}}
\end{minipage}\\
Classified as& 2&5 & 9 & 9\\
$L_2$ distortion: &$3.898$ & $1.378$ & $1.903$ & $6.379$\\ \hline
$\text{ZOSLGH}_{\text{r}}$ & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_ZOSLGH-constant_True_0_Pred_2.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_ZOSLGH-constant_True_6_Pred_5.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_ZOSLGH-constant_True_4_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_ZOSLGH-constant_True_7_Pred_9.png}}
\end{minipage}\\
Classified as& 2&5 & 9 & 9\\
$L_2$ distortion: &$3.867$ & $1.261$ & $1.106$ & $6.075$\\ \hline
$\text{ZOSLGH}_{\text{d}}$ & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_10_ZOSLGH_True_0_Pred_2.png}}
\end{minipage} &
\begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_21_ZOSLGH_True_6_Pred_5.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_48_ZOSLGH_True_4_Pred_9.png}}
\end{minipage} & \begin{minipage}[b]{0.085\columnwidth}
\centering
\raisebox{-.5\height}{\includegraphics[width=\linewidth]{attack_examples/mnist/id_83_ZOSLGH_True_7_Pred_9.png}}
\end{minipage}\\
Classified as& 2&5 & 9 & 9\\
$L_2$ distortion: &$4.048$ & $1.222$ & $1.059$ & $5.722$\\
\bottomrule
\end{tabular}
\label{tab:mnist_attack_examples}
\end{table}
\end{document} |
\betaegin{document}
\title{On $\Sii$-complete Equivalence Relations on the Generalized Baire Space}
\betaegin{abstract}
Working with uncountable structures of fixed cardinality,
we investigate the complexity of certain equivalence relations and show that
if $V=L$, then many of them are ${\Sigma_1^1}$-complete, in particular the
isomorphism relation of dense linear orders.
Then we show that it is undecidable in ZFC whether or not the isomorphism relation of
a certain well behaved theory (stable, NDOP, NOTOP) is ${\Sigma_1^1}$-complete (it is, if $V=L$,
but can be forced not to be).
\varepsilonnd{abstract}
Key words: descriptive complexity, generalized Baire space, stability theory.
2012 MSC: 03C55, 03E47.
\sigmaection*{Introduction}
The descriptive set theory of the generalized Baire space $\kappa^\kappa$ for uncountable $\kappa$
has been initiated in the 1990's, see for example \cite{MV,Ha}, and developed
further e.g. in~\cite{FHK}. The theory differs from the classical
case $\kappa=\omega$ in many respects, but similarly as in classical case there is a strong connection
to model theory.
Let $T$ be a complete countable first-order theory, $\mathcal{M}(T)$ the set of models of $T$
with domain $\kappa$ and $\omegaperatorname{ISO}(T)$ the isomorphism relation on $\mathcal{M}(T)$. In a standard way $\mathcal{M}(T)$
can be viewed as a Borel subset of $2^\kappa$.
It was established in \cite{FHK}, that in many cases the descriptive complexity of
$\omegaperatorname{ISO}(T)$
is high if and only if $T$ is ``hard'' in terms of the classification theory developed by Shelah~\cite{Sh}.
For example if the isomorphism can be decided with a relatively short Ehrenfeucht-Fra\"iss\'e-game,
then the isomorphism relation is Borel* (Definition \ref{def:Eka}). This result is obtained
by translating between the EF-game and the Borel*-game which are similar in nature.
On the other hand, if the theory is unclassifiable, then
the equivalence relation on $2^\kappa$ modulo a certain version of the
non-stationary ideal can be embedded into its isomorphism
relation. A more robust example are the following two theorems:
\betaegin{Thm*}[\cite{FHK}]\lambdaanglebel{thm:ShallowBorell}
Assume that $\kappa^{<\kappa}=\kappa>\omega$ is not weakly inaccessible and $T$ a complete countable
first-order theory.
If the isomorphism relation $\cong^\kappa_T$ is Borel, then $T$ is classifiable
(superstable, NDOP and NOTOP) and shallow.
Conversely, if $\kappa>2^\omega$, then if $T$ is classifiable and shallow, then
$\cong^\kappa_T$ is Borel.
\varepsilonnd{Thm*}
\betaegin{Thm*}[\cite{FHK}]
Suppose $\kappa=\lambda^+=2^\lambda>2^\omega$ where $\lambda^{<\lambda}=\lambda$. Let
$T$ be a first-order theory. Then $T$ is classifiable if and only if for all regular $\mu<\kappa$,
$E^\kappa_\mu\not\lambdae_B\,\cong_T$, where $E^\kappa_\mu$ is the equivalence on $2^\kappa$
modulo the ideal of not $\mu$-stationary sets.
\varepsilonnd{Thm*}
Thus, the vague thesis of \cite{FHK} is that the more complex the theory is according to
classification theory, the more complex is its isomorphism relation in terms of
the generalized descriptive set theory at some fixed cardinal $\kappa$. In this paper we show
that if $V=L$, then there is a counter example to this thesis:
the theory $T_{\omega+\omega}$ (see Definition~\ref{def:Too}) is stable with no DOP nor OTOP, its
isomorphism relation can be decided by an EF-game of relatively short length and
its isomorphism relation is ${\Sigma_1^1}$-complete (being Borel* at the same time).
In order to do that, we investigate also other ${\Sigma_1^1}$-complete equivalence relations on $\kappa^\kappa$
for $\kappa^{<\kappa}=\kappa>\omega$ and meanwhile show that the isomorphism relation of dense linear orderings
is ${\Sigma_1^1}$-complete, if $V=L$ (without $V=L$ we still get that $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is $S_\kappa$-complete).
Then we show also that the same cannot be proven in ZFC, i.e. in a certain forcing extension $T_{\omega+\omega}$ is
not even $S_\kappa$-complete, Corollary~\ref{cor:StableNotCom}.
\paragraph{Acknowledgment.} We wish to thank Sy-David Friedman for the useful discussions we had during the preparation of this paper.
The research was partially supported by the Academy of Finland through its grant WBS 1251557 and the second author
was funded by the Science Foundation of the University of Helsinki.
\sigmaection{Some ${\Sigma_1^1}$-complete Equivalence Relations in $L$}
In this section we give definitions and show that if $V=L$, then many equivalence relations,
such as the equivalence on $\lambda^\kappa$ modulo the non-stationary ideal and the isomorphism relation
of dense linear orders, are ${\Sigma_1^1}$-complete.
\betaegin{Def}
We fix an uncountable cardinal $\kappa$ with the property $|\kappa^{<\kappa}|=|\mathcal{C}up_{\alpha<\kappa}\kappa^\alpha|=\kappa$.
We use the notation $\alpha^{<\beta}$ to denote both the set of functions from the initial segments of $\beta$ to $\alpha$
and the cardinality $|\alpha^{<\beta}|$, $\alpha,\beta$ ordinals.
Our basic space is $\kappa^\kappa$, all functions from $\kappa$ to $\kappa$, with the topology generated by
$$N_p=\{\varepsilonta\in \kappa^\kappa\mid \varepsilonta\sigmaupset p\}\quad p\in \kappa^{<\kappa}.$$
This is the generalized Baire space.
Often we deal with the closed subspaces of $\kappa^\kappa$ such as $2^\kappa$ and $\lambda^\kappa$ with $\lambda<\kappa$ an infinite
cardinal. Then the topology on them is the relative subspace topology.
We fix a one-to-one coding between the models of a fixed countable vocabulary with the universe $\kappa$ and
elements of $2^\kappa\sigmaubset \kappa^\kappa$:
$$\varepsilonta\in 2^\kappa\iff \mathcal{A}_\varepsilonta\text{ is a model with }\omegaperatorname{dom} A_\varepsilonta=\kappa.$$
More precisely, let $\lambdal$ be a countable relational vocabulary, $\lambdal=\{R_n\mid n<\omega\}$
and let $\#R_n$ be the arity of $R_n$. Let $\pi\colon \mathcal{C}up_{n<\omega}\{n\}\times\kappa^{\#R_n}\to\kappa$
be a bijection. Given a function $\varepsilonta\in 2^\kappa$, let
$\mathcal{A}_\varepsilonta$ be the structure such that $\omegaperatorname{dom} \mathcal{A}_\varepsilonta=\kappa$ and
$$\mathcal{A}_\varepsilonta\models R_n(\alpha_1,\lambdadots,\alpha_{\#R_n})\iff \varepsilonta(\pi(n,\alpha_1,\lambdadots,\alpha_{\#R_n}))=1.$$
This is clearly bijective and in some sense continuous -- the further $\varepsilonta$ is known the
larger segment of the model is determined and vice versa.
The collection of \varepsilonmph{Borel} sets is the smallest collection of subsets of $\kappa^\kappa$ such that:
\betaegin{myItemize}
\item closed sets are Borel,
\item if $(A_i)_{i<\kappa}$ is a sequence of Borel sets, then $\mathcal{C}up_{i<\kappa}A_i$, $\mathcal{C}ap_{i<\kappa}A_i$ and $\kappa^\kappa\sigmaetminus A_0$
are Borel.
\varepsilonnd{myItemize}
A function $X\to Y$, $X,Y\sigmaubset\kappa^\kappa$, is \varepsilonmph{Borel}, if the inverse image of every open set is Borel.
An equivalence relation $E$ on $X\sigmaubset \kappa^\kappa$ is \varepsilonmph{Borel reducible} to an equivalence relation
$E'$ on $Y\sigmaubset\kappa^\kappa$, if there is a Borel function
$f\colon X\to Y$ such that $\varepsilonta E\xi\iff f(\varepsilonta)E' f(\xi)$.
The coding of models to elements of $2^\kappa$
can be extended to $\lambda^\kappa$ ($\lambda>2$) via the continuous
surjection $\varepsilonta\mapsto \xi$, $\xi(\alpha)=0\iff\varepsilonta(\alpha)=0$, for $\varepsilonta\in\lambda^\kappa$ and
$\xi\in 2^\kappa$.
\varepsilonnd{Def}
A set $A\sigmaubset\kappa^\kappa$ is ${\Sigma_1^1}$, if it is the projection of a closed or Borel set $C\sigmaubset \kappa^\kappa\times\kappa^\kappa$.
It is~${\Delta_1^1}$, if both $A$ and its complement are~${\Sigma_1^1}$.
The following definition of $\mathcal{B}orel^{*}(\kappa)$ sets
is from \cite{Bl} in the case $\kappa =\omega$
and from \cite{MV} in the case $\kappa$ is uncountable.
\betaegin{Def}\lambdaanglebel{def:Eka}
Let $\alpha\lambdae\kappa$ be an ordinal and $\lambda\lambdae\kappa$ a cardinal.
\betaegin{myEnumerate}
\item We say that a tree $t$ is a $\kappa^{+},\alpha$-tree
if does not contain chains of order-type $\alpha$ and
every element has at most $\kappa$ successors.
\item We say that a pair $(t,f)$ is a $\mathcal{B}orel^{*}_{\lambda}$-code
if $t$ is a closed $\kappa^{+},\lambda$-tree and $f$ is a function
with domain $t$ such that if $x\in t$ is a leaf, then
$f(x)$ is a basic open set and otherwise
$f(x)\in\{\cup ,\cap\}$.
\item For an element $\varepsilonta\in \kappa^\kappa$ and
a $\mathcal{B}orel^{*}_{\lambda}(\kappa)$-code $(t,f)$, the $\mathcal{B}orel^{*}$-game
$B^{*}(\varepsilonta ,(t,f))$ is played as follows.
There are two players, $\,{\textrm{\betaf I}}$ and $\textrm{\betaf I\hspace{-1pt}I}$. The game
starts from the root of $t$. At each move,
if the game is at node $x\in t$ and $f(x)=\cap$,
then $\,{\textrm{\betaf I}}$ chooses an immediate successor $y$ of $x$
and the game continues from this $y$. If $f(x)=\cup$,
then $\textrm{\betaf I\hspace{-1pt}I}$ makes the choice.
At limits the game continues from the (unique)
supremum of the previous moves.
Finally, if $f(x)$ is a basic open set,
then the game ends, and $\textrm{\betaf I\hspace{-1pt}I}$ wins if $\varepsilonta\in f(x)$.
\item We say that $X\sigmaubseteq \kappa^\kappa$ is a $\mathcal{B}orel^{*}_{\lambda}(\kappa)$ set
if it has a $\mathcal{B}orel^{*}_{\lambda}(\kappa)$-code
i.e. that there is a $\mathcal{B}orel^{*}_{\lambda}(\kappa)$-code
$(t,f)$ such that for all $\varepsilonta\in \kappa^\kappa$,
$\varepsilonta\in X$ iff $\textrm{\betaf I\hspace{-1pt}I}$ has a winning strategy in the game
$B^{*}(\varepsilonta ,(t,f))$.
\item In this paper we have fixed an uncountable cardinal $\kappa$ and
we will drop $\kappa$ from the notation, i.e. $\mathcal{B}orel^*=\mathcal{B}orel^*(\kappa)$
and we write $\mathcal{B}orel^{*}$ also for the family of all $\mathcal{B}orel^{*}$ sets.
\varepsilonnd{myEnumerate}
\varepsilonnd{Def}
\betaegin{Def}
Given a class $M$ of structures with domain $\kappa$, let $C(M)\sigmaubset 2^\kappa$ be the set of codes of elements of $M$.
If $M$ is closed under isomorphism, denote by $\omegaperatorname{ISO}(M)$ the isomorphism relation on $C(M)$.
If $M=\omegaperatorname{Str}^\kappa(T)=\{\mathcal{A}\mid \omegaperatorname{dom}\mathcal{A}=\kappa\lambdaanglend \mathcal{A}\models T\}$ for some first order theory $T$, then
denote $\omegaperatorname{ISO}(\kappa,T)=\omegaperatorname{ISO}(M)$. For a first order theory $T$, $C(\omegaperatorname{Str}^\kappa(T))$ is Borel and
the equivalence relation $\omegaperatorname{ISO}(\kappa,T)$ is ${\Sigma_1^1}$. We denote the class
$\{\omegaperatorname{ISO}(M)\mid C(M)\text{ is Borel}\}$ by $S_\kappa$. The notation might be a bit confusing, since in some
contexts $S_\kappa$ denotes the group of all permutations of $\kappa$, but
note that every equivalence relation in $S_\kappa$ (as defined above) is induced by the action of this group.
We choose this definition, because we will look at the class $S_\kappa$ as a part of the hierarchy, see below.
\varepsilonnd{Def}
\betaegin{Def}
Given a collection of sets $\mathcal{G}amma$ we say
that an equivalence relation $E$ on $X\sigmaubset \kappa^\kappa$ is
\varepsilonmph{$\mathcal{G}amma$-complete}, if it is itself in $\mathcal{G}amma$ and for every
equivalence relation $F\in \mathcal{G}amma$ on some $Y\sigmaubset \kappa^\kappa$ there is a Borel reduction
$F\lambdae_B E$.
We consider mainly $\mathcal{G}amma\in\{\mathcal{B}orel,{\Delta_1^1},\mathcal{B}orel^*,{\Sigma_1^1},S_\kappa\}$.
\varepsilonnd{Def}
Let ${\omegaperatorname{DLO}}$ be the theory of dense linear orderings without end points.
Here is the list of results of this article:
\betaegin{itemize}
\item ($V=L$, $\kappa=\lambda^+$ or $\kappa=\alphaleph_\kappa=\lambda$, $\mu=\omegaperatorname{cf}(\mu)<\kappa$)
The equivalence on $\lambda^\kappa$ modulo the $\mu$-non-stationary ideal is ${\Sigma_1^1}$-complete. (Theorem~\ref{thm:Complete1})
\item ($V=L$, $\kappa=\lambda^+$, $\omegaperatorname{cf}(\lambda)=\lambda$) $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is ${\Sigma_1^1}$-complete. (Theorem~\ref{thm:Complete3})
\item ($\mathbb{Z}FC$, $\kappa^{<\kappa}=\kappa$) $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is $S_\kappa$-complete. (Theorem~\ref{thm:Complete4}).
\item ($V=L$, $\kappa=\lambda^+$, $\lambda^\omega=\lambda$) There is a stable NDOP, NOTOP theory $T$ whose models of size $\kappa$ can
be characterized up to isomorphism by an EF-game of length $\lambda\cdot(\omega+\omega+1)$ (in particular $\omegaperatorname{ISO}(\kappa,T)$ is $\mathcal{B}orel^*$ and
$T$ is the theory of $\omega+\omega$ equivalence relations
refining each other) such that $\omegaperatorname{ISO}(\kappa,T)$ is ${\Sigma_1^1}$-complete. (Corollary \ref{thm:Stable1}).
\item ($\kappa=\kappa^{<\kappa}=\lambda^+$, $\lambda^{<\lambda}=\lambda$)
It can be forced with a $<\kappa$-closed $\kappa^+$-c.c. forcing that
$\omegaperatorname{ISO}(\kappa,T)$ for the above stable theory $T$ is not $S_\kappa$-complete, in fact
$\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is not reducible to it. (Corollary~\ref{cor:StableNotCom})
\varepsilonnd{itemize}
Most of the discussion in the following few pages is within $\mathbb{Z}FC+V=L$
(we mention it every time though).
In this theory, there is a $\Sigma_1$-formula $\varphi_{\lambdae}(x,y)$ which provably
defines a well-ordering of the universe (``$\varphi_{\lambdae}(x,y)\iff x\lambdae y$'' \cite[Ch. 13]{Jech}).
By $\min_L A$ we mean the least element of $A$ in this ordering.
If $A\sigmaubset L_^{\text{th}}eta$ is a subset of the model $L_{^{\text{th}}eta}$ for some limit
ordinal $^{\text{th}}eta$, then ${\omegaperatorname{Sk}}(A)^{L_^{\text{th}}eta}$ is the Skolem closure of
$A$ in~$L_{^{\text{th}}eta}$ under the definable (from $\varphi_\lambdae$) Skolem functions \cite[Ex. 13.24]{Jech}.
Note that this Skolem closure ${\omegaperatorname{Sk}}(A)^{L_^{\text{th}}eta}\sigmaubset L_{^{\text{th}}eta}$ is definable in $V$.
By $\mathbb{Z}F^-$ we mean $\mathbb{Z}FC+(V=L)$ without the power set axiom.
If $\mu<\kappa$ is regular, then by $S^\kappa_\mu$ we denote all the $\mu$-cofinal ordinals less than $\kappa$.
\betaegin{Lemma}[\cite{FHK}]\lambdaanglebel{lemma:Fri}
Assume $V=L$. Suppose $\psi(x,\xi)$ is a $\Sigma_1$-formula in set theory with parameter $\xi\in 2^\kappa$
and that $r(\alpha)$ is a formula of set theory that says that ``$\alpha$ is a regular cardinal''.
Then for $x\in 2^\kappa$ we have $\psi(x,\xi)$ if and only if the set
$$A=\{\alpha<\kappa\mid \varepsilonxists \beta>\alpha (L_{\beta}\models \mathbb{Z}F^-\lambdaanglend \,\psi(x\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend r(\alpha))\}$$
contains a cub.
Moreover ``cub'' can be replaced by $\mu$-cub for any regular $\mu<\kappa$.
\varepsilonnd{Lemma}
\betaegin{proof}
Due to the length of \cite{FHK} the proof of this lemma was only sketched there,
so we give it here in detail.
Suppose that $x\in 2^\kappa$ is such that $\psi(x,\xi)$ holds.
Let $^{\text{th}}eta$ be a large enough cardinal such that
$$L_^{\text{th}}eta\models (\mathbb{Z}F^-\lambdaanglend \, r(\kappa)\lambdaanglend \psi(x,\xi)).$$
For each $\alpha<\kappa$, let
$$H(\alpha)={\omegaperatorname{Sk}}(\alpha\cup\{\kappa,\xi,x\})^{L_^{\text{th}}eta}$$
and $\omegaverline{H(\alpha)}$ the Mostowski collapse of $H(\alpha)$.
Let $$D=\{\alpha<\kappa\mid H(\alpha)\cap\kappa = \alpha\}.$$
It is easy to see that $D$ is a cub set. On the other
hand $D\sigmaubset A$ where $A$ is as in the statement of the theorem, because each $H(\alpha)$ is an elementary submodel of
$L_{^{\text{th}}eta}$ and the Mostowski collapse $\omegaverline{H(\alpha)}$ is equal to some $L_{\beta}$ with $\beta>\alpha$.
Of course a cub set is a $\mu$-cub set for any regular $\mu<\kappa$.
Suppose $x\in 2^\kappa$ is such that $\psi(x,\xi)$ does not hold. Similarly as above, let
$^{\text{th}}eta$ be a large enough cardinal such that
$$L_^{\text{th}}eta\models (\mathbb{Z}F^-\lambdaanglend\, r(\kappa)\lambdaanglend \lambdanot\psi(x,\xi))$$
and let $C$ be a $\mu$-cub set for some regular $\mu<\kappa$.
We are going to show that $C\sigmaetminus A\ne\varepsilons$.
Let
$$K(\alpha)={\omegaperatorname{Sk}}(\alpha\cup\{\kappa,C,\xi,x\})^{L_^{\text{th}}eta}\text{ and }D=\{\alpha\in S^\kappa_\mu\mid K(\alpha)\cap\kappa=\alpha\}.$$
Clearly $D$ is $\mu$-cub. Let $\alpha_0$ be the least ordinal in
$\lambdaim_\mu D$ (the set of $\mu$-cofinal limits of elements of~$D$).
Then we have $\alpha_0\in C$ by the elementarity of each $K(\alpha)$ and
$$\alpha_0>\mu.\varepsilonqno(*)$$
Let $\betaar\beta$ be
the ordinal such that $L_{\betaar\beta}$ is equal to $\omegaverline{K(\alpha_0)}$, the Mostowski
collapse of $K(\alpha_0)$. We will show that $\alpha_0\notin A$ which
completes the proof.
Suppose on contrary, that $\alpha_0\in A$.
Then there exists $\beta>\alpha_0$ such that
$$L_{\beta}\models \mathbb{Z}F^-\lambdaanglend\, \psi(x\!\restriction\!\alpha_0,\xi\!\restriction\!\alpha_0)\lambdaanglend r(\alpha_0).\varepsilonqno(**)$$
This $\beta$ must be a limit ordinal greater than $\betaar\beta$, because
$L_{\betaar\beta}\models \lambdanot \psi(x\!\restriction\!\alpha_0,\xi\!\restriction\!\alpha_0)$ and $\psi$ is~$\Sigma_1$.
As discussed before the lemma, $K(\alpha)$ is a definable subset of $L_^{\text{th}}eta$ and in fact the definition
depends only on finitely many parameters one of which is $\alpha$, so also $D$ is a definable subset of $L_^{\text{th}}eta$.
Therefore by elementarity, $D\cap\alpha_0$ is a definable subset of $K(\alpha_0)$ and so $D\cap\alpha_0$ is a definable
subset of $L_{\betaar\beta}$. Thus $D\cap\alpha_0\in L_\beta$ by the definition of the $(L_\alpha)$-hierarchy.
Now $L_{\beta}$ satisfies $\mathbb{Z}F^-$ and so it satisfies
\betaegin{center}
``there exists a $\gamma\lambdae \alpha_0$ and an order-preserving bijection from $\gamma$ to $D\cap \alpha_0$''.
\varepsilonnd{center}
But there is only one such map and its domain is $\mu$, since the order-type of $D\cap\alpha_0$ is
$\mu$ by the definition of $\alpha_0$. Hence by $(*)$ $\alpha_0$ is singular in $L_{\beta}$
which is a contradiction with~$(**)$ and the definition of~$r(\alpha)$.
\varepsilonnd{proof}
\betaegin{RemarkN}\lambdaanglebel{rem:LemmaStat}
The following version of the lemma above can be also proved (still under $V=L$):
for any $\Sigma_1$-formula $\varphi(\varepsilonta,x)$ with parameter $x\in 2^\kappa$, a regular $\mu<\kappa$ and a stationary
set $S\sigmaubset S^\kappa_\mu$, the following are equivalent for all $\varepsilonta\in 2^\kappa$:
\betaegin{enumerate}
\item $\varphi(\varepsilonta,x)$
\item $S\sigmaetminus A$ is non-stationary, where
$$A=\{\alpha\in S\mid \varepsilonxists \beta>\alpha(L_\beta\models \varphi(\varepsilonta\!\restriction\!\alpha,x\!\restriction\!\alpha)\lambdaanglend r(\alpha)\lambdaanglend s(\alpha))\},$$
where $s(\alpha)$ states that $S\cap \alpha$ is stationary and
$S\cap\alpha\sigmaubset S^\alpha_\mu$ in the sense that we require $\beta$ to be large enough to witness
that every element of $S\cap\alpha$ has cofinality $\mu$.
\varepsilonnd{enumerate}
Then the proof goes in the similar way except that we take $\alpha_0$ to be the smallest element
of $(\lambdaim_\mu D)\cap S$ instead of just $\lambdaim_\mu D$
and derive the contradiction in the same fashion as in the above proof
but this time using the fact that $S$ has a lot of $\mu$-cofinal ordinals common with $D$ from
the point of view of $L_{\beta}$ which is a contradiction with the minimality of $\alpha_0$
(here this $\beta>\alpha_0$ is defined as in the proof of Lemma~\ref{lemma:Fri} to witness the counter assumption
that~$\alpha_0\in A$).
\varepsilonnd{RemarkN}
\betaegin{Thm}[$V=L$]\lambdaanglebel{thm:Complete1}
Let $\kappa^{<\kappa}=\kappa>\omega$. If $\kappa=\lambda^+$, let $^{\text{th}}eta=\lambda$ and if $\kappa$ is inaccessible, let $^{\text{th}}eta=\kappa$.
Let $\mu<\kappa$ be a regular cardinal.
Then the equivalence relation on $^{\text{th}}eta^\kappa$ defined by
$$\varepsilonta\sigmaim\xi\iff \{\alpha<\kappa\mid \varepsilonta(\alpha)=\xi(\alpha)\}\text{ contains a }\mu\text{-cub}$$
is ${\Sigma_1^1}$-complete.
\varepsilonnd{Thm}
\betaegin{proof}
Suppose $E$ is a ${\Sigma_1^1}$-equivalence relation on $\kappa^\kappa$. Let
$a\colon \kappa^\kappa\to 2^{\kappa\times\kappa}$ be the canonical map which takes
$\varepsilonta$ to $\xi$ such that $\xi(\alpha,\beta)=1\iff \varepsilonta(\alpha)=\beta$. Further
let $b$ be a continuous bijection from $2^{\kappa\times\kappa}$ to $2^{\kappa}$.
Then $c=b\circ a$ is continuous and one-to-one. Let $E'$ be the equivalence relation
on $2^\kappa$ such that
$$(\varepsilonta,\xi)\in E'\iff (\varepsilonta=\xi)\lambdaor(\varepsilonta,\xi\in\omegaperatorname{ran}glen c\lambdaanglend \betaig(c^{-1}(\varepsilonta),c^{-1}(\xi)\betaig)\in E).$$
Then $c$ is a continuous reduction of $E$ to $E'$. On the other hand $E'$
is ${\Sigma_1^1}$ because it is a continuous image of $E$ (for the generalizations of the basics
of descriptive set theory, see~\cite{Ha,FHK}). So without loss of generality
we can assume that $E$ is an equivalence relation on $2^\kappa$.
For a given ${\Sigma_1^1}$ equivalence relation $E$ on $2^\kappa$ and a regular $\mu<\kappa$
we will define a function $f\colon 2^\kappa\to (2^{<\kappa})^\kappa$
such that for all $\varepsilonta,\xi\in \kappa^\kappa$, $(\varepsilonta,\xi)\in E$ if and only if the set
$\{\alpha<\kappa\mid f(\varepsilonta)(\alpha)=f(\xi)(\alpha)\}$ contains a $\mu$-cub and $f$ is continuous
in the topology on $(2^{<\kappa})^\kappa$ generated by the sets
$$\{\varepsilonta\mid \varepsilonta\!\restriction\!\alpha = p\},\ p\in (2^{<\kappa})^\alpha,\ \alpha<\kappa.$$
The function $f$ will be defined so that
if $\varepsilonta\in 2^\kappa$, then the value of $f(\varepsilonta)$ at $\alpha$ will be in
some $L_{\gamma(\alpha)}$ where $\gamma(\alpha)<\kappa$ is independent
of $\varepsilonta$.
If $\kappa=\lambda^+$, then for each $\gamma<\kappa$, the cardinality of $L_\gamma$ is at most $\lambda$, so
using injections $L_\gamma\to \lambda$ it is possible to have the range~$\lambda^\kappa$. $(*)$
If $E$ is a ${\Sigma_1^1}$-equivalence relation, then there exists a $\Sigma_1$-formula of set theory
$\psi(\varepsilonta,\xi)=\psi(\varepsilonta,\xi,x)=\varepsilonxists k\varphi(k,\varepsilonta,\xi,x)$ with parameter
$x\in 2^\kappa$ which defines $E$: for all $\varepsilonta,\xi\in 2^\kappa$, $E(\varepsilonta,\xi)\iff \psi(\varepsilonta,\xi,x)$.
Let $r(\alpha)$ be the formula that says ``$\alpha$ is a regular cardinal'' and let $\psi^E=\psi^E(\kappa)$ be the sentence
with parameter $\kappa$ that asserts that $\psi(\varepsilonta,\xi)$ defines an equivalence relation on $2^\kappa$.
For $\varepsilonta\in 2^\kappa$ and $\alpha<\kappa$, let
$$T_{\varepsilonta,\alpha}=\{p\in 2^{\alpha}\mid \varepsilonxists\beta>\alpha(L_{\beta}\models \mathbb{Z}F^-\lambdaanglend \psi(p,\varepsilonta\!\restriction\!\alpha,x)\lambdaanglend r(\alpha)\lambdaanglend \psi^E)\}.$$
and let
$$f(\varepsilonta)(\alpha)=
\betaegin{cases}
\min{}_L T_{\varepsilonta,\alpha}, \text{ if }T_{\varepsilonta,\alpha}\ne \varepsilons,\\
0,\text{ otherwise.}
\varepsilonnd{cases}
$$
Note that $f(\varepsilonta)(\alpha)\in L_{\gamma}$ where $\gamma$ is the least ordinal
such that $L_\gamma\models \lambdanot r(\alpha)$ which is $<\kappa$, which verifies
the discussion above at~$(*)$.
Suppose $\psi(\varepsilonta,\xi,x)=\varepsilonxists k\varphi(k,\varepsilonta,\xi,x)$ holds
and let $k$ be a witness of that.
Let $^{\text{th}}eta$ be a cardinal large enough so that $L_{^{\text{th}}eta}\models \mathbb{Z}F^-\lambdaanglend\,\varphi(k,\varepsilonta,\xi,x)\lambdaanglend r(\kappa)$.
For $\alpha<\kappa$ let $H'(\alpha)={\omegaperatorname{Sk}}(\alpha\cup\{\kappa,k,\varepsilonta,\xi,x\})^{L_^{\text{th}}eta}$.
Now
$$D=\{\alpha<\kappa\mid H'(\alpha)\cap\kappa=\alpha\lambdaanglend H'(\alpha)\models \psi^E\}$$
is a cub, and so using Mostowski-collapse we have that
$$D'=\{\alpha<\kappa\mid \varepsilonxists\beta>\alpha (L_{\beta}\models \varphi(k\!\restriction\!\alpha,\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha,x\!\restriction\!\alpha)
\lambdaanglend \mathbb{Z}F^- \lambdaanglend\, r(\alpha)\lambdaanglend \psi^E)\}$$
contains a cub.
Suppose $\alpha\in D'$ and $p\in T_{\varepsilonta,\alpha}$, i.e.
$$\varepsilonxists\beta_1>\alpha(L_{\beta_1}\models \mathbb{Z}F^-\lambdaanglend \,\psi(p,\varepsilonta\!\restriction\!\alpha)\lambdaanglend r(\alpha)\lambdaanglend\psi^E).$$
Since $\alpha\in D'$, there also exists $\beta_2>\alpha$ such that
$$L_{\beta_2}\models \mathbb{Z}F^-\lambdaanglend\, \psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend r(\alpha)\lambdaanglend\psi^E.$$
Hence if $\beta=\max\{\beta_1,\beta_2\}$, then
$$L_\beta\models \psi(p,\varepsilonta\!\restriction\!\alpha)\lambdaanglend \psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^- \lambdaanglend r(\alpha)\lambdaanglend\psi^E$$
and because
$\psi(p,\varepsilonta\!\restriction\!\alpha)\lambdaanglend \psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)$ implies $\psi(p,\xi\!\restriction\!\alpha)$ (because
$\psi^E$ holds and so transitivity for $\psi(\varepsilonta,\xi)$ holds),
we have that
$$L_\beta\models \psi(p,\xi\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^-\lambdaanglend r(\alpha)\lambdaanglend\psi^E,$$
which means that $p\in T_{\xi,\alpha}$. Thus we have proved that $T_{\varepsilonta,\alpha}\sigmaubset T_{\xi,\alpha}$.
By symmetry we conclude $T_{\varepsilonta,\alpha}=T_{\xi,\alpha}$ and therefore $f(\varepsilonta)(\alpha)=f(\xi)(\alpha)$ for all $\alpha\in D'$
which contains a cub, so this proves that
$$\psi(\varepsilonta,\xi,x)\mathbb{R}ightarrow \{\alpha\mid f(\varepsilonta)(\alpha)=f(\xi)(\alpha)\}\text{ contains a cub.}$$
Suppose that $\lambdanot\psi(\varepsilonta,\xi,x)$ holds.
Then by Lemma~\ref{lemma:Fri} there is no $\mu$-cub inside
$$\{\alpha<\kappa\mid\varepsilonxists\beta>\alpha(L_{\beta}\models\psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend\mathbb{Z}F^-\lambdaanglend r(\alpha))\},$$
but this is a superset of
$$\{\alpha<\kappa\mid\varepsilonxists\beta>\alpha(L_{\beta}\models\psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend\mathbb{Z}F^-\lambdaanglend r(\alpha))\lambdaanglend \psi^E\},\varepsilonqno(**)$$
so the latter does not contain a $\mu$-cub either.
Now
\betaegin{eqnarray*}
&&\{\alpha\mid f(\varepsilonta)(\alpha)=f(\xi)(\alpha)\}\\
&=&\{\alpha\mid \min{}_L T_{\varepsilonta,\alpha}=\min{}_L T_{\xi,\alpha}\}\\
&\sigmaubset&\{\alpha\mid\varepsilonxists p\in T_{\varepsilonta,\alpha}\cap T_{\xi,\alpha}\}\\
&=&\{\alpha\mid\varepsilonxists p\varepsilonxists\beta_1,\beta_2>\alpha\betaig((L_{\beta_1}\models \psi(p,\varepsilonta\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^-\lambdaanglend r(\alpha)\lambdaanglend\psi^E)\\
&&\phantom{\{\alpha\mid\varepsilonxists p\varepsilonxists\beta_1,\beta_2>}\lambdaanglend (L_{\beta_2}\models \psi(p,\xi\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^-\lambdaanglend r(\alpha)\lambdaanglend \psi^E)\betaig)\}
\varepsilonnd{eqnarray*}
... and taking $\beta=\max\{\beta_1,\beta_2\}$ we continue:
\betaegin{eqnarray*}
&\sigmaubset&\{\alpha\mid\varepsilonxists p\varepsilonxists\beta>\alpha(L_{\beta}\models \psi(p,\varepsilonta\!\restriction\!\alpha)\lambdaanglend\psi(p,\xi\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^-\lambdaanglend r(\alpha)\lambdaanglend\psi^E)\}\\
&=&\{\alpha\mid \varepsilonxists\beta>\alpha(L_{\beta}\models \psi(\varepsilonta\!\restriction\!\alpha,\xi\!\restriction\!\alpha)\lambdaanglend \mathbb{Z}F^-\lambdaanglend r(\alpha)\lambdaanglend\psi^E)
\varepsilonnd{eqnarray*}
which by $(**)$ does not contain $\mu$-cub, so $\{\alpha<\kappa\mid f(\varepsilonta)(\alpha)=f(\xi)(\alpha)\}$ doesn't contain one either.
\varepsilonnd{proof}
\betaegin{Remark}
By using the modified version of Lemma~\ref{lemma:Fri} as described in Remark~\ref{rem:LemmaStat}
one can prove a stronger result. Let $\lambda,\kappa$ and $^{\text{th}}eta$ be as in the theorem above and $\mu<\kappa$ regular.
For every stationary $S\sigmaubset S^\kappa_\mu$ the equivalence relation on $^{\text{th}}eta^\kappa$ defined by
$$\varepsilonta\sigmaim\xi\iff S\sigmaetminus\{\alpha\mid\varepsilonta(\alpha)=\xi(\alpha)\}\text{ is non-stationary}$$
is ${\Sigma_1^1}$-complete.
\varepsilonnd{Remark}
Now we will use Theorem \ref{thm:Complete1} first to show that the isomorphism relations $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ and $\omegaperatorname{ISO}(\kappa,T_{\omega+\omega})$
are ${\Sigma_1^1}$-complete. Both within $\mathbb{Z}FC+V=L$.
\betaegin{Def}[Colored Linear Orders]
A \varepsilonmph{colored linear order} (\varepsilonmph{clo}) is a pair $(L,c)$ where $L$ is a linear order and $c$ is a function with domain $L$.
An isomorphism between clos $(L,c)$ and $(L',c')$ is a function $f\colon L\to L'$ which is an isomorphism
between $L$ and $L'$ and preserves coloring: $c(x)=c'(f(x))$.
If $(L,c)$ and $(L',c')$ are clos, then $(L,c)+(L',c')$ is the clo $(L+L',d)$, where $d$
is such that $d\!\restriction\! L=c$ and $d\!\restriction\! L'=c'$. Similarly, if $L'$ is any linear order and $(L,c)$ is a clo,
then $(L,c)\cdot L'$ is the clo $(L\cdot L',d)$, where $d\!\restriction\! L\cdot \{x\}=c$ for any $x\in L'$.
\varepsilonnd{Def}
\betaegin{Thm}[$V=L$] \lambdaanglebel{thm:Complete3}
Suppose $\kappa=\lambda^+$ and $\lambda$ is regular.
The isomorphism relation on the class of dense linear orderings of size $\kappa$
is ${\Sigma_1^1}$-complete. If $\lambda>\omega$, one can assume that all the orderings are $\kappa$-like, i.e.
all initial segments have size $<\kappa$.
\varepsilonnd{Thm}
\betaegin{proof}
We will show that there exists a continuous function $f\colon\lambda^\kappa\to 2^\kappa$ such that
for all $\varepsilonta\in \lambda^\kappa$, $\mathcal{A}_{f(\varepsilonta)}$ is a dense linear order without end points and
for all $\varepsilonta,\xi\in\lambda^{\kappa}$ the set
$\{\alpha<\kappa\mid \varepsilonta(\alpha)=\xi(\alpha)\}$ contains a $\lambda$-cub if and only if
$\mathcal{A}_{f(\varepsilonta)}\cong \mathcal{A}_{f(\xi)}$.
Thus we embed a ${\Sigma_1^1}$-complete (by Theorem~\ref{thm:Complete1}) equivalence relation into the isomorphism of dense linear orders
which suffices.
We will first define a function $f$ which attaches to each function in $\lambda^\kappa$ a colored linear order. Then
we will show how to eliminate the use of colors by replacing each point by a linear ordering which depends on its color.
Let $\varepsilonta$ be a saturated dense linear ordering without end points
of cardinality $\lambda$. Suppose that the coloring $c\colon \varepsilonta\to\lambda\sigmaetminus\{0\}$ satisfies
\betaegin{itemize}
\item[$(*)$] If $A,B\sigmaubset\varepsilonta$ have cardinality
less than $\lambda$ and $x\in A,y\in B$ we have $x<y$, then for all $\alpha\in\lambda$ there exists $z$ with $x<z<y$ for all $x\in A, y\in B$
and $c(z)=\alpha$.
\varepsilonnd{itemize}
Then we call $(\varepsilonta,c)$ \varepsilonmph{a saturated clo}.
\betaegin{claim}{1}
A saturated clo exists.
\varepsilonnd{claim}
\betaegin{proofVOf}{Claim 1}
This can be done for example as follows. Let $\xi$ be a saturated dense linear order with domain $\lambda\sigmaetminus\{0\}$.
Let $\varepsilonta=\{f\colon\alpha+1\to \xi\mid \alpha<\lambda\}$ and for $f,g\in\varepsilonta$ let
$$f<g\iff (f\sigmaubset g)\lambdaor (f(\alpha)<g(\alpha)\text{, where }\alpha=\min\{\beta\mid f(\beta)\ne g(\beta)\}).$$
Let $c(f)=f(\max\omegaperatorname{dom}(f))$.
It is not difficult to check that this satisfies all the requirements.
\varepsilonnd{proofVOf}
Given two saturated colored linear orderings $(\varepsilonta,c)$ and $(\varepsilonta',c)$,
there is an isomorphism $f\colon (\varepsilonta,c)\to(\varepsilonta',c')$.
This can be seen by a simple back-and-forth argument.
By this observation we have: if $\varepsilonta=(\varepsilonta,c)$ is a saturated clo, then
\betaegin{itemize}
\item[(1)] $\varepsilonta\cong \varepsilonta+\varepsilonta$,
\item[(2)] for all $\alpha<\lambda$, $\varepsilonta\cong \varepsilonta + (1,c_\alpha) +\varepsilonta $, where $1$ is a linear ordering of length $1$ and $c_\alpha$ is the coloring with
range $\{\alpha\}$,
\item[(3)] for all $\alpha<\kappa$, $\varepsilonta\cdot \alpha + \varepsilonta\cong \varepsilonta$,
\item[(4)] if $\alpha<\kappa$ is $\lambda$-cofinal and
$\tau_i=(1,c_{\beta_i})+\varepsilonta$ for all $\lambda$-cofinal $i<\alpha$ and $\tau_i=\varepsilonta$ otherwise, then
$(\sigmaum_{i<\alpha}\tau_i)\cong\varepsilonta$.
\varepsilonnd{itemize}
We will now define a function with domain $\lambda^\kappa$ and range the set of colored linear orders.
Then we will define a function from that range into (non-colored) dense linear orders.
As above, denote by $(1,c_\alpha)$ a clo with a single element of color $\alpha$.
Given $f\colon \kappa\to\lambda$, let
$$\mathbb{P}hi(f)=\sigmaum_{\alpha<\kappa}\tau_{\alpha}^f,$$
where $\tau_\alpha^f=(1,c_{f(\alpha)})+\varepsilonta$, if $\omegaperatorname{cf}(\alpha)=\lambda$ and $\tau^f_\alpha=\varepsilonta$ otherwise.
\betaegin{claim}{2}
\nopagebreak{For $f,g\in \lambda^\kappa$, the set $\{\alpha\mid f(\alpha)=g(\alpha)\}$ contains a $\lambda$-cub if and only if
$\mathbb{P}hi(f)\cong \mathbb{P}hi(g)$.}
\varepsilonnd{claim}
\betaegin{proofVOf}{Claim 2}
Suppose $\{\alpha\mid f(\alpha)=g(\alpha)\}$ contains a $\lambda$-cub $C\sigmaubset S^\kappa_\lambda$.
Let $\{a_i\mid i<\kappa\}$ be an enumeration of $C$ such that $i<j\iff a_i<a_j$.
The orderings
$$\mathbb{P}hi_{i}(f)=\sigmaum_{a_i\lambdae\alpha<a_{i+1}}\tau_{\alpha}^f$$
and
$$\mathbb{P}hi_{i}(g)=\sigmaum_{a_i\lambdae\alpha<a_{i+1}}\tau_{\alpha}^g$$
are isomorphic by (4) above and because the color of $\min \mathbb{P}hi_{i}(f)$ is the same as that
of $\min \mathbb{P}hi_i(g)$ for all $i$ by the definition of $C$. This proves ``$\mathbb{R}ightarrow$''.
Suppose $\{\alpha\mid f(\alpha)=g(\alpha)\}$ does not contain a $\lambda$-cub. Then its complement
contains a $\lambda$-stationary set $S\sigmaubset S^\kappa_\lambda$. Suppose for a contradiction that
there is an isomorphism $F$ between $\mathbb{P}hi(f)$ and $\mathbb{P}hi(g)$.
Let
$$\mathbb{P}hi^i(f)=\sigmaum_{\alpha<i}\tau_{\alpha}^f$$
and
$$\mathbb{P}hi^i(g)=\sigmaum_{\alpha<i}\tau_{\alpha}^g.$$
By the standard argument, there is a cub set $C$ such that for all $i\in C$ we have that
the restriction
$F\!\restriction\!\mathbb{P}hi^i(f)\colon \mathbb{P}hi^i(f)\to \mathbb{P}hi^i(g)$ is an isomorphism. Pick
and element $j\in C\cap S$. Then $F\!\restriction\!\mathbb{P}hi^j(f)$ is an isomorphism, but
the color of $\min (\mathbb{P}hi(f)\sigmaetminus \mathbb{P}hi^j(f))$ is not the same as the color
of $\min (\mathbb{P}hi(g)\sigmaetminus \mathbb{P}hi^j(g))$ which is a contradiction.
\varepsilonnd{proofVOf}
Denote by $\omegaperatorname{ISO}({\omegaperatorname{CLO}})$ the isomorphism relation on all colored linear orders
and let $\omegaperatorname{ISO}({\omegaperatorname{DLO}})$ be the isomorphism relation on all the (non-colored) dense linear orders.
We have shown now that an arbitrary ${\Sigma_1^1}$-equivalence relation $E$ is Borel reducible to
$\omegaperatorname{ISO}({\omegaperatorname{CLO}})$. Next we show how to reduce the range of that reduction to $\omegaperatorname{ISO}({\omegaperatorname{DLO}})$.
To do that, we replace every point of $\mathbb{P}hi(f)$ by a (non-colored) dense linear order whose isomorphism
type depends on the color of the corresponding point
and we will do it so that the original clos are isomorphic if and only if the resulting dense linear orders
are isomorphic. Suppose first that $\lambda>\omega$.
Let $(S_i)_{i<\lambda}$ be a $\lambda$-long sequence of disjoint stationary subsets of $\lambda$.
Let
$$\xi_i=\sigmaum_{\alpha<\kappa}\sigma_\alpha,$$
where $\sigma_\alpha=1+\mathbb{Q}$, if $\alpha\in S_i$ and $\sigma_\alpha=\mathbb{Q}$ otherwise, where $\mathbb{Q}$ is the order of the rational numbers.
Then $\xi_i\not\cong\xi_{j}$ for all $i\ne j$ by a similar argument as above for $\mathbb{P}hi(f)\not\cong \mathbb{P}hi(g)$.
Then let
$$\mathbb{P}si(f)=\sigmaum_{a\in \mathbb{P}hi(f)}\xi_{c(a)}.$$
Note that since $\mathbb{P}hi(f)$ is $\kappa$-like, also $\mathbb{P}si(f)$ is $\kappa$-like.
If $\lambda=\omega$, then do the same, but now $S_i$ are stationary subsets of $\kappa=\omega_1$.
In this case we lose the property that $\mathbb{P}si(f)$ is $\kappa$-like.
\varepsilonnd{proof}
\sigmaection{The Isomorphism Relation of $\mathcal{M}(T_{\omega+\omega})$}
The models of $T_{\omega+\omega}$ which we are going to investigate are essentially certain
trees as will be shown later (Lemma~\ref{lemma:trTbired}). Thus we show first that the
isomorphism relation on these trees is ${\Sigma_1^1}$-complete in $L$ by using results from previous section.
The we will show, using a result from \cite{HK}, that it is consistent that the isomorphism relation
is not ${\Sigma_1^1}$-complete and in fact not even $S_\kappa$-complete.
In \cite{FHK} assuming $\kappa=\lambda^+$ and $\lambda=\lambda^\omega$, we constructed
for each set $S\sigmaubset S^\kappa_\omega$ a $\kappa^+,(\omega+2)$-tree
$J(S)$ such that $S\sigmad S'$ is non-stationary $\iff$ $J(S)\cong J(S')$. Adopting
a similar construction for colored trees, we will construct for each function
$f\in \lambda^\kappa$ a colored $\kappa^+,(\omega+2)$-tree $J_f$ such that for all $f,g\in \lambda^\kappa$, the set
$$\{\alpha\mid f(\alpha)=g(\alpha)\}$$
contains an $\omega$-cub if and only if $J_f\cong J_g$. The proof of Lemma 4.89 of \cite{FHK}
has to be modified such that instead of the \varepsilonmph{dichotomy} for every branch to either have a leaf or not,
there is a \varepsilonmph{$\lambda$-chromatomy} for the leaf of every branch to be of one of the $\lambda$ colors.
\betaegin{Def}\lambdaanglebel{def:CT}
Suppose $\kappa=\lambda^+$.
A \varepsilonmph{colored $\kappa^+,(\omega+2)$-tree} is a pair $(t,c)$, where $t$ is a $\kappa^+,(\omega+2)$-tree
(every element has less than $\kappa^+$ successors and all branches have
order type less than $\omega+2$) and $c$ is a map whose domain is the set $\{x\in t\mid \omegaperatorname{ht}(x)=\omega\}$,
where $\omegaperatorname{ht}(x)$ is the height of $x$ -- the order type of $\{y\in t\mid y<x\}$.
The range of $c$ is $\lambda\sigmaetminus \{0\}$.
An isomorphism between colored trees $(t,c)$ and $(t',c')$ is a map $f\colon t\to t'$ which
is an isomorphism between $t$ and $t'$ and for each $x\in \omegaperatorname{dom} c$, $c(x)=c'(f(x))$.
Denote the set of all colored $\kappa^+,(\omega+2)$-trees by $\mathcal{C}T^\omega$.
Let $\mathcal{C}T^\omega_*\sigmaubset \mathcal{C}T^\omega$ be the set of those trees in which
every element has infinitely many successors at each level $<\omega$.
\varepsilonnd{Def}
\betaegin{Def}\lambdaanglebel{def:Filtration}
Let $t$ be a colored tree of size $\kappa=\lambda^+$. Suppose $(I_{\alpha})_{\alpha<\kappa}$ is a collection of subsets of $t$ such that
\betaegin{myItemize}
\item for each $\alpha<\kappa$, $I_\alpha$ is a downward closed subset of $t$,
\item $\mathcal{C}up_{\alpha<\kappa} I_{\alpha}=t$,
\item if $\alpha<\beta<\kappa$, then $I_{\alpha}\sigmaubset I_{\beta}$,
\item if $\gamma$ is a limit ordinal, then $I_\gamma=\mathcal{C}up_{\alpha<\gamma}I_\alpha$,
\item for each $\alpha<\kappa$ the cardinality of $I_\alpha$ is less than $\kappa$.
\varepsilonnd{myItemize}
Such a sequence $(I_{\alpha})_{\alpha<\kappa}$ is called $\kappa$-\varepsilonmph{filtration} or just \varepsilonmph{filtration} of $t$.
\varepsilonnd{Def}
\betaegin{Def}\lambdaanglebel{def:FuncT}
Suppose that $\kappa=\lambda^+$ and $(t,c)$ is a colored tree of size $\kappa$, $t\sigmaubset\kappa^{\lambdae\omega}$,
with colors ranging in $\lambda\sigmaetminus \{0\}$ and let $\mathcal{I}=(I_{\alpha})_{\alpha<\kappa}$ be a filtration of $t$.
Define $f_{\mathcal{I},t}\in \lambda^\kappa$ as follows. Fix $\alpha<\kappa$.
Let $B_\alpha$ be the set of all $x\in t$ with $x\notin I_\alpha$, but $x\!\restriction\! n\in I_\alpha$ for all $n<\omega$.
\betaegin{itemize}
\item[(a)] If $B_\alpha$ is non-empty and there is $\beta$
such that for all $x\in B_\alpha$, $c(x)=\beta$, then let $f_{\mathcal{I},t}(\alpha)=\beta$,
\item[(b)] Otherwise let $f_{\mathcal{I},t}(\alpha)=0$
\varepsilonnd{itemize}
\varepsilonnd{Def}
For $f,g\in\lambda^\kappa$, by $f\sigmaim g$ we mean that
$\{\alpha<\kappa\mid f(\alpha)=g(\alpha)\}$ contains an $\omega$-cub.
\betaegin{Lemma}\lambdaanglebel{lemma:FiltrEquiv}
Suppose colored trees $(t_0,c_0)$ and $(t_1,c_1)$ are isomorphic, and
$\mathcal{I}=(I_\alpha)_{\alpha<\kappa}$ and $\mathcal{J}=(J_\alpha)_{\alpha<\kappa}$ are $\kappa$-filtrations of $t_0$ and $t_1$ respectively.
Then $f_{\mathcal{I},t_0}\sigmaim f_{\mathcal{J},t_1}$.
\varepsilonnd{Lemma}
\betaegin{proof}
Let $F\colon t_0\to t_1$ be a (color preserving) isomorphism. Then
$F\mathcal{I}=(F[I_\alpha])_{\alpha<\kappa}$ is a filtration of $t_1$ and for all $\alpha<\kappa$
$$f_{\mathcal{I},t_0}(\alpha)=f_{F\mathcal{I},t_1}(\alpha).\varepsilonqno(\sigmatar)$$
Define the set $C=\{\alpha\mid F[I_\alpha]=J_\alpha\}$. Let us show that it is $\omega$-cub.
Let $\alpha\in \kappa$. Define $\alpha_0=\alpha$ and by induction pick $(\alpha_n)_{n<\omega}$
such that $F[I_{\alpha_n}]\sigmaubset J_{\alpha_{n+1}}$ for odd $n$ and $J_{\alpha_n}\sigmaubset F[I_{\alpha_{n+1}}]$ for even $n$.
This is possible by the definition of a $\kappa$-filtration. Then $\alpha_\omega=\mathcal{C}up_{n<\omega}\alpha_n\in C$.
Clearly $C$ is closed and
$C\sigmaubset \{\alpha<\kappa\mid f_{F\mathcal{I},t_1}(\alpha)= f_{\mathcal{J},t_1}(\alpha)\}$ so now by $(\sigmatar)$ we have the result.
\varepsilonnd{proof}
\betaegin{Lemma}\lambdaanglebel{lem:StoJS}
Suppose $\kappa=\lambda^+$, $\lambda^\omega=\lambda$ and $\kappa^{<\kappa}=\kappa$.
There exists a function $J\colon \lambda^\kappa\to \mathcal{C}T^\omega_*$ such that for all $f,g\in \lambda^\kappa$,
$$f\sigmaim g\iff J_f\cong J_g$$
(as colored trees).
\varepsilonnd{Lemma}
\betaegin{proofV}{Lemma \ref{lem:StoJS}}
Define the ordering on $\omega\times\kappa\times\kappa\times\kappa\times\kappa$ lexicographically, i.e.
such that $(\alpha_1,\alpha_2,\alpha_3,\alpha_4,\alpha_5)<_{\text{lex}}(\beta_1,\beta_2,\beta_3,\beta_4,\beta_5)$ if and only if
$\alpha_k<\beta_k$ for the smallest $k$ with $\alpha_k\ne \beta_k$ (and such $k$ exists).
We order the set $(\omega\times\kappa\times\kappa\times\kappa\times\kappa)^{\lambdae \omega}$ as a tree:
$\varepsilonta<\xi$ if and only if $\varepsilonta\sigmaubset\xi$.
For each $f\in \lambda^\kappa$ we will define a colored tree $J_f=(J_f,c_f)$ such that
$J_f\sigmaubset (\omega\times\kappa\times\kappa\times\kappa\times\kappa)^{\lambdae\omega}$ with the induced ordering
and
\betaegin{myItemize}
\item[(a)] If $f\in \lambda^\kappa$ and $\mathcal{I}$ is any $\kappa$ filtration of $J_f$, then $f_{\mathcal{I},J_f}\sigmaim f$.
\item[(b)] If $f\sigmaim g$, then $J_f\cong J_g$.
\varepsilonnd{myItemize}
This suffices, because if $J_f\cong J_g$, then
for some filtrations $\mathcal{I}$ and $\mathcal{J}$ of $J_f$ and $J_g$ respectively we have
by Lemma \ref{lemma:FiltrEquiv} that $f_{\mathcal{I},J_f}\sigmaim f_{\mathcal{J},J_g}$ which further implies by (a)
that $f\sigmaim g$.
Let $f\in\lambda^\kappa$ and let us define a preliminary colored tree $(I_f,d_f)$
as follows. Let $I_f$ be the tree of all strictly increasing functions
from $n\lambdae\omega$ to $\kappa$ and for $\varepsilonta$ with domain $\omega$, let $d_f(\varepsilonta)=f(\sigmaup\omegaperatorname{ran}glen \varepsilonta)$.
For ordinals $\alpha<\beta$ and $i<\omega$ we adopt the notation:
\betaegin{myItemize}
\item $[\alpha,\beta]=\{\gamma\mid \alpha\lambdae \gamma\lambdae\beta\}$,
\item $[\alpha,\beta)=\{\gamma\mid \alpha\lambdae \gamma <\beta\}$,
\item $R(\alpha,\beta,i)=\mathcal{C}up_{i\lambdae j\lambdae\omega}\{\varepsilonta\colon [i,j)\to [\alpha,\beta)\mid \varepsilonta\text{ strictly increasing}\}$.
\varepsilonnd{myItemize}
For each $\alpha,\beta<\kappa$ let us define the colored trees $P^{\alpha,\beta}_{\gamma}$, for $\gamma<\kappa$ as follows.
If $\alpha=\beta=\gamma=0$, then $P^{0,0}_{0}=(I_f,d_f)$. Otherwise let $\{P^{\alpha,\beta}_\gamma\mid \gamma<\kappa\}$
enumerate all downward closed subtrees of $R(\alpha,\beta,i)$ for all $i$, with all possible colorings
and in such a way, that every isomorphism type appears cofinally often in the enumeration.
The isomorphism types are of course counted with respect to color preserving isomorphisms.
Define
$$Q(P^{\alpha,\beta}_{\gamma})$$
to be the natural number $i$ such that $P^{\alpha,\beta}_{\gamma}\sigmaubset R(\alpha,\beta,i)$.
The enumeration is possible, because the number of all downward closed subsets
of $R(\alpha,\beta,i)$, $i<\omega$, is at most
\betaegin{eqnarray*}
\mathcal{B}ig|\mathcal{C}up_{i<\omega}\mathcal{P}(R(\alpha,\beta,i))\mathcal{B}ig|&\lambdae& \omega\times |\mathcal{P}(R(0,\beta,0))|\\
&\lambdae&\omega\times |\mathcal{P}(\beta^\omega)|\\
&=&\omega\times 2^{\beta^{\omega}}\\
&\lambdae&\omega\times \kappa\\
&=&\kappa
\varepsilonnd{eqnarray*}
and since for each $\beta<\kappa$, $R(\alpha,\beta,i)$ has cardinality $<\kappa$, even when we add all possible colorings,
the number of trees remains $\kappa\times \lambda^\lambda=\kappa$.
For $f\in \lambda^\kappa$ define $J_f=(J_f,c_f)$ to be the tree of all
$\varepsilonta\colon s\to \omega\times \kappa\times\kappa\times\kappa\times\kappa=\omega\times\kappa^4$
such that $s\lambdae\omega$ and the following conditions are met for all $i,j<s$:
\betaegin{myEnumerate}
\item \lambdaanglebel{J-1}$\varepsilonta\!\restriction\! n\in J_f$ for all $n<s$,
\item \lambdaanglebel{J0}$\varepsilonta$ is strictly increasing with respect to the lexicographical order on $\omega\times\kappa^4$,
\item \lambdaanglebel{J1}$\varepsilonta_1(i)\lambdae \varepsilonta_1(i+1)\lambdae \varepsilonta_1(i)+1$,
\item \lambdaanglebel{J3}$\varepsilonta_1(i)=0\rightarrow \varepsilonta_{2}(i)=\varepsilonta_{3}(i)=\varepsilonta_{4}(i)=0$,
\item \lambdaanglebel{J4}$\varepsilonta_1(i)<\varepsilonta_1(i+1)\rightarrow \varepsilonta_2(i+1)\gammae \varepsilonta_3(i)+\varepsilonta_4(i)$,
\item \lambdaanglebel{J5}$\varepsilonta_1(i)=\varepsilonta_1(i+1)\rightarrow (\varphiorall k\in\{2,3,4\})(\varepsilonta_k(i)=\varepsilonta_k(i+1))$,
\item \lambdaanglebel{J6}if for some $k<\omega$, $[i,j)=\varepsilonta_1^{-1}\{k\}$, then\\
$\varepsilonta_5\!\restriction\![i,j)\in P^{\varepsilonta_2(i),\varepsilonta_3(i)}_{\varepsilonta_4(i)}$
\item \lambdaanglebel{J7}if $s=\omega$, then either
\betaegin{itemize}
\item[(a)] $(\varepsilonxists m<\omega)(\varphiorall k<\omega)(k>m\rightarrow \varepsilonta_1(k)=\varepsilonta_1(k+1))$ and the color of $\varepsilonta$
is determined by $P^{\varepsilonta_2(m),\varepsilonta_3(m)}_{\varepsilonta_4(m)}$: $c_f(\varepsilonta)=c(\varepsilonta_5)$, where $c$ is the coloring
of $P^{\varepsilonta_2(m),\varepsilonta_3(m)}_{\varepsilonta_4(m)}$.
\item[or else]
\item[(b)] $c_f(\varepsilonta)=f(\sigmaup\omegaperatorname{ran}glen \varepsilonta_5).$
\varepsilonnd{itemize}
\item Order $J_f$ as a subtree of $(\omega\times\kappa^4)^{\lambdae \omega}$, $\varepsilonta<\xi\iff\varepsilonta\sigmaubset\xi$.
\varepsilonnd{myEnumerate}
Note that it follows from the definition of $P^{\alpha,\beta}_\gamma$ and the conditions \varepsilonqref{J6} and
\varepsilonqref{J4} that for all $i<j<\omegaperatorname{dom} \varepsilonta$ and $\varepsilonta\in J_f$:\\
\betaegin{myEnumerate}\sigmaetcounter{enumi}{9}
\item \lambdaanglebel{J2} $i<j\rightarrow \varepsilonta_5(i)<\varepsilonta_5(j)$.\\
\varepsilonnd{myEnumerate}
Also we have that if $\varepsilonta\in (\omega\times\kappa^4)^{\lambdae \omega}$ is such that
$\varepsilonta\!\restriction\! n\in J_f$ for all $n$, then $\varepsilonta\in J_f$.
It is easy to see that these trees are in $\mathcal{C}T^\omega_*$.
For each $\alpha<\kappa$ let
$$J^{\alpha}_f=\{\varepsilonta\in J_f\mid \omegaperatorname{ran}glen\varepsilonta\sigmaubset\omega\times(\beta+1)^4\text{ for some }\beta<\alpha\}.$$
Then $(J^{\alpha}_f)_{\alpha<\kappa}$ is a $\kappa$-filtration of $J_f$ (see Claim~2 below).
If $\varepsilonta\in J_f$ and $\omegaperatorname{ran}glen\varepsilonta_1=\omega$, then
$$\sigmaup\omegaperatorname{ran}glen\varepsilonta_4\lambdae\sigmaup\omegaperatorname{ran}glen\varepsilonta_2=\sigmaup\omegaperatorname{ran}glen\varepsilonta_3=\sigmaup\omegaperatorname{ran}glen\varepsilonta_5\varepsilonqno(\#)$$
and if in addition to that, $\varepsilonta\!\restriction\! k\in J^{\alpha}_f$
for all $k$ and $\varepsilonta\notin J^{\alpha}_f$ or if $\omegaperatorname{ran}glen\varepsilonta_1=\{0\}$, then
$$\sigmaup\omegaperatorname{ran}glen\varepsilonta_5=\alpha.\varepsilonqno(\circledast)$$\lambdaanglebel{circledast}
To see $(\#)$ suppose $\omegaperatorname{ran}glen\varepsilonta_1=\omega$. By \varepsilonqref{J2}, $(\varepsilonta_5(i))_{i<\omega}$ is an
increasing sequence. By \varepsilonqref{J6} $\sigmaup\omegaperatorname{ran}glen\varepsilonta_3\gammae\sigmaup\omegaperatorname{ran}glen\varepsilonta_5\gammae\sigmaup\omegaperatorname{ran}glen\varepsilonta_2$.
By \varepsilonqref{J4}, $\sigmaup\omegaperatorname{ran}glen\varepsilonta_2\gammae\sigmaup\omegaperatorname{ran}glen\varepsilonta_3$ and again by \varepsilonqref{J4} $\sigmaup\omegaperatorname{ran}glen\varepsilonta_2\gammae \sigmaup\omegaperatorname{ran}glen\varepsilonta_4$.
Inequality $\sigmaup\omegaperatorname{ran}glen\varepsilonta_5\lambdae\alpha$ is an immediate consequence of the definition of $J^{\alpha}_f$,
so $(\circledast)$ follows now from the assumption that $\varepsilonta\notin J^{\alpha}_f$.
\betaegin{claim}{1}
Suppose $\xi\in J^{\alpha}_f$ and $\varepsilonta\in J_f$. Then
if $\omegaperatorname{dom} \xi<\omega$, $\xi\sigmaubsetneq \varepsilonta$ and
$(\varphiorall k\in \omegaperatorname{dom}\varepsilonta\sigmaetminus\omegaperatorname{dom}\xi)\betaig(\varepsilonta_1(k)=\xi_1(\max\omegaperatorname{dom}\xi)\lambdaanglend \varepsilonta_1(k)>0\betaig),$
then
$\varepsilonta\in J^{\alpha}_f$.
\varepsilonnd{claim}
\betaegin{proofVOf}{Claim 1}
Suppose $\xi,\varepsilonta\in J^{\alpha}_f$ are as in the assumption. Let us define
$\beta_2=\xi_2(\max\omegaperatorname{dom}\xi)$, $\beta_3=\xi_2(\max\omegaperatorname{dom}\xi)$, and
$\beta_4=\xi_4(\max\omegaperatorname{dom}\xi)$. Because $\xi\in J^{\alpha}_f$, there is $\beta$ such that
$\beta_2,\beta_3,\beta_4<\beta+1$ and $\beta<\alpha$.
Now by \varepsilonqref{J5} $\varepsilonta_2(k)=\beta_2$, $\varepsilonta_3(k)=\beta_3$ and $\varepsilonta_4(k)=\beta_4$, for all $k\in \omegaperatorname{dom}\varepsilonta\sigmaetminus\omegaperatorname{dom}\xi$.
Then by \varepsilonqref{J6} for all
$k\in\omegaperatorname{dom}\varepsilonta\sigmaetminus\omegaperatorname{dom}\xi$ we have that
$\beta_2<\varepsilonta_5(k)<\beta_3<\beta+1$. Since $\xi\in J^{\alpha}_f$,
also $\beta_4<\beta+1$, so $\varepsilonta\in J^{\alpha}_f$.
\varepsilonnd{proofVOf}
\betaegin{claim}{2}
$|J_f|=\kappa$, $(J^\alpha_f)_{\alpha<\kappa}$ is a $\kappa$-filtration of $J_f$
and if $f\in\lambda^\kappa$ and $\mathcal{I}$ is a $\kappa$-filtration of $J_f$, then $f_{\mathcal{I},J_f}\sigmaim f$.
\varepsilonnd{claim}
\betaegin{proofVOf}{Claim 2}
For all $\alpha<\kappa$,
$J^\alpha_f\sigmaubset (\omega\times\alpha^{4})^{\lambdae\omega}$, so by the cardinality assumption of the lemma,
the cardinality of $J^\alpha_f$ is $<\kappa$.
Clearly $\alpha<\beta$ implies $J^{\alpha}_f\sigmaubset J^{\beta}_f$. Continuity is verified by
\betaegin{eqnarray*}
\mathcal{C}up_{\alpha<\gamma}J^{\alpha}_f&=&\{\varepsilonta\in J_f\mid\varepsilonxists\alpha<\gamma,\varepsilonxists\beta<\alpha(\omegaperatorname{ran}glen\varepsilonta\sigmaubset \omega\times (\beta+1)^4)\}\\
&=&\{\varepsilonta\in J_f\mid\varepsilonxists\beta<\cup\gamma(\omegaperatorname{ran}glen\varepsilonta\sigmaubset \omega\times (\beta+1)^4)\}
\varepsilonnd{eqnarray*}
which equals $J^\gamma_f$ if $\gamma$ is a limit ordinal.
By Lemma \ref{lemma:FiltrEquiv} it is enough to show that $f_{\mathcal{I},J_f}\sigmaim f$ for
$\mathcal{I}=(J^\alpha_f)_{\alpha<\kappa}$, and we will show that if $\mathcal{I}=(J^\alpha_f)_{\alpha<\kappa}$, then
for all $\omega$-cofinal ordinals $\alpha$ we have $f_{\mathcal{I},J_f}(\alpha)=f(\alpha)$.
Suppose $\alpha$ is $\omega$-cofinal and suppose that $\varepsilonta$ is such that
$\varepsilonta\notin J^\alpha_f$, $\varepsilonta\!\restriction\! k\in J^\alpha_f$, $k<\omega$. By Claim 1 $\varepsilonta$ can
satisfy (a) of \varepsilonqref{J7} only if $\varepsilonta_1(n)=0$ for all $n<\omega$.
In that case $\varepsilonta=(0,0,0,0,\varepsilonta_5)$ and $\varepsilonta_5$ is in $P^{0,0}_0=I_f$
and by the definition in \varepsilonqref{J7}(a) we have $c_f(\varepsilonta)=d_f(\varepsilonta_5)$,
which is by definition $d_f(\varepsilonta_5)=f(\sigmaup\omegaperatorname{ran}glen \varepsilonta_5)=f(\alpha)$ (see the definition of
$P^{0,0}_0$ and $(I_f,d_f$) above.
Else, if (b) of \varepsilonqref{J7} is satisfied, then again $c_f(\varepsilonta)=f(\sigmaup\omegaperatorname{ran}glen\varepsilonta_5)$
which is by $(\circledast)$ equal to $f(\alpha)$. So that means that the color of all such
$\varepsilonta$ is $f(\alpha)$ and thus in defining $f_{\mathcal{I},J_f}(\alpha)$ we use
the condition (b) of Definition~\ref{def:FuncT} and get $f_{\mathcal{I},J_f}(\alpha)=f(\alpha)$.
\varepsilonnd{proofVOf}
\betaegin{claim}{3}
Suppose $f\sigmaim g$. Then $J_f\cong J_g$.
\varepsilonnd{claim}
\betaegin{proofVOf}{Claim 3}
Let $C'\sigmaubset \{\alpha<\kappa\mid f(\alpha)=g(\alpha)\}$ be the $\omega$-cub set which exists by the assumption
and let $C$ be its closure under limits of uncountable cofinality. We will build a back-and-forth system
to find the isomorphism. By induction on $i<\kappa$ we will define $\alpha_i$ and $F_{\alpha_i}$ such that:
\betaegin{myAlphanumerate}
\item If $i<j<\kappa,$ then $\alpha_i<\alpha_j$ and $F_{\alpha_i}\sigmaubset F_{\alpha_j}$.
\item If $i$ is a successor, then $\alpha_i$ is a successor and if $i$ is limit, then $\alpha_i\in C$.
\item If $\gamma$ is a limit ordinal, then $\alpha_\gamma=\sigmaup_{i<\gamma}\alpha_i$.
\item $F_{\alpha_i}$ is a color preserving partial isomorphism $J_f\to J_g$.
\item Suppose that $i=\gamma+n$, where $\gamma$ is a limit ordinal or $0$ and $n<\omega$ is even. Then
$\omegaperatorname{dom} F_{\alpha_i}=J^{\alpha_i}_f$.
\item If $i=\gamma+n$, where $\gamma$ is a limit ordinal or 0 and $n<\omega$ is odd, then
$\omegaperatorname{ran}glen F_{\alpha_i}=J^{\alpha_i}_g$.
\item If $\omegaperatorname{dom} \xi<\omega$, $\xi\in \omegaperatorname{dom} F_{\alpha_i}$,
$\varepsilonta\!\restriction\!\omegaperatorname{dom}\xi=\xi$ and $(\varphiorall k\gammae\omegaperatorname{dom}\xi)\betaig(\varepsilonta_1(k)=\xi_1(\max\omegaperatorname{dom}\xi)\lambdaanglend \varepsilonta_1(k)>0\betaig)$, then
$\varepsilonta\in \omegaperatorname{dom} F_{\alpha_i}$. Similarly for $\omegaperatorname{ran}glen F_{\alpha_i}$.
\item If $\xi\in\omegaperatorname{dom} F_{\alpha_i}$ and $k<\omegaperatorname{dom} \xi$, then $\xi\!\restriction\! k\in \omegaperatorname{dom} F_{\alpha_i}$.
\item For all $\varepsilonta\in \omegaperatorname{dom} F_{\alpha_i}$, $\omegaperatorname{dom}\varepsilonta=\omegaperatorname{dom} (F_{\alpha_i}(\varepsilonta))$.
\varepsilonnd{myAlphanumerate}
\noindent\textbf{The first step.}
The first step and the successor steps are similar, but the first step is easier. Thus we give it separately
in order to simplify the readability.
Let us start with $i=0$. Let $\alpha_0=\beta+1$, for arbitrary $\beta\in C$. Let us denote by
$$W(\alpha)$$
the ordinal $\omega\cdot\alpha^4$ that is order isomorphic to $\omega\times\alpha^4\sigmaubset \omega\times\kappa^4$ (the order
on the latter is defined in the beginning of this section).
Let $\gamma$ be such that there is a (color preserving) isomorphism $h\colon P^{0,W(\alpha_0)}_{\gamma}\cong J^{\alpha_0}_f$
and such that $Q(P^{0,\alpha_0}_{\gamma})=0$. Such exists by \varepsilonqref{J0}.
Suppose that $\varepsilonta\in J^{\alpha_0}_f$. Note that because $P^{0,\alpha_0}_{\gamma}$ and $J^{\alpha_0}_f$ are closed
under initial segments and by the definitions of $Q$ and $P^{\alpha,\beta}_\gamma$, we have $\omegaperatorname{dom} h^{-1}(\varepsilonta)=\omegaperatorname{dom} \varepsilonta$.
Define $\xi=F_{\alpha_0}(\varepsilonta)$ such that $\omegaperatorname{dom}\xi=\omegaperatorname{dom}\varepsilonta$
and for all $k<\omegaperatorname{dom} \xi$
\betaegin{myItemize}
\item $\xi_1(k)=1$,
\item $\xi_2(k)=0$,
\item $\xi_3(k)=W(\alpha_0)$,
\item $\xi_4(k)=\gamma$,
\item $\xi_5(k)=h^{-1}(\varepsilonta)(k)$.
\varepsilonnd{myItemize}
Let us check that $\xi\in J_g$. Conditions \varepsilonqref{J0}-\varepsilonqref{J5} and \varepsilonqref{J7} are satisfied because
$\xi_k$ is constant for all $k\in \{1,2,3,4\}$, $\xi_1(i)\ne 0$ for all $i$ and $\xi_5$ is increasing. For \varepsilonqref{J6}, if
$\xi_1^{-1}\{k\}$ is empty, the condition is verified since each $P^{\alpha,\beta}_\gamma$ is closed under initial segments
and contains the empty function. If it is non-empty, then $k=1$ and in that case $\xi_1^{-1}\{k\}=[0,\omega)$ and
by the argument above ($\omegaperatorname{dom} h^{-1}(\varepsilonta)=\omegaperatorname{dom} \varepsilonta=\omegaperatorname{dom}\xi$) we have
$\xi_5=h^{-1}(\varepsilonta)\in P^{0,W(\alpha_0)}_\gamma=P^{\xi_2(0),\xi_3(0)}_{\xi_4(0)}$, so the condition is satisfied.
The colors are preserved, because $h$ is an isomorphism.
Let us check whether all the conditions (a)-(i) are met. In (a), (b), (c)
and (f) there is nothing to check.
(d) holds, because $h$ is an isomorphism. (e) and (i) are immediate from the definition.
Both $J^{\alpha_0}_f$ and $P^{0,W(\alpha_0)}_\gamma$
are closed under initial segments, so (h) follows, because $\omegaperatorname{dom} F_{\alpha_0}=J^{\alpha_0}_f$ and
$\omegaperatorname{ran}glen F_{\alpha_0}=\{1\}\times \{0\}\times \{W(\alpha_0)\}\times \{\gamma\}\times P^{0,\alpha_0}_{\gamma}$.
Claim~1 implies (g) for $\omegaperatorname{dom} F_{\alpha_0}$. Suppose $\xi\in \omegaperatorname{ran}glen F_{\alpha_0}$ and $\varepsilonta\in J_g$ are as in the assumption of (g).
Then $\varepsilonta_1(i)=\xi_1(i)=1$ for all $i<\omegaperatorname{dom} \varepsilonta$. By \varepsilonqref{J5} it follows that
$\varepsilonta_2(i)=\xi_2(i)=0$,
$\varepsilonta_3(i)=\xi_3(i)=W(\alpha_0)$ and
$\varepsilonta_4(i)=\xi_4(i)=\gamma$
for all $i<\omegaperatorname{dom} \varepsilonta$, so by \varepsilonqref{J6} $\varepsilonta_5\in P^{0,W(\alpha_0)}_\gamma$ and since $h$ is an isomorphism,
$\varepsilonta\in\omegaperatorname{ran}glen F_{\alpha_0}$.\\
\noindent\textbf{Odd successor step.}
We want to handle odd case first,
because the most important case is the successor of a limit ordinal, see $(\iota\iota\iota)$
below. Except that, the even case is similar to the odd case.
Suppose that $j<\kappa$ is a successor ordinal. Then there exist $\beta_j$ and $n_j$ such that
$j=\beta_j+n_j$ and $\beta_j$ is a limit ordinal or $0$. Suppose that $n_j$ is odd and
that $\alpha_l$ and $F_{\alpha_l}$ are defined for all $l<j$ such that the conditions (a)--(i) and \varepsilonqref{J0}--\varepsilonqref{J2}
hold for $l<j$.
Let $\alpha_j=\beta+1$ where $\beta$ is such that $\beta\in C$, $\omegaperatorname{ran}glen F_{\alpha_{j-1}}\sigmaubset J^{\beta}_g$ and $\beta>\alpha_{j-1}$.
For convenience define $\xi(-1)=(0,0,0,0,0)$ for all $\xi\in J_f\cup J_g$.
Suppose $\varepsilonta\in \omegaperatorname{ran}glen F_{\alpha_{j-1}}$ has finite domain $\omegaperatorname{dom}\varepsilonta=m<\omega$ and denote $\xi=F^{-1}_{\alpha_{j-1}}(\varepsilonta)$.
Fix $\gamma_\varepsilonta$ to be such that $Q(P^{\alpha,\beta}_{\gamma_\varepsilonta})=m$ and
such that there is an isomorphism
$h_\varepsilonta\colon P^{\alpha,\beta}_{\gamma_\varepsilonta}\to W,$
where
$$W=\{\zeta\mid \omegaperatorname{dom}\zeta=[m,s), m<s\lambdae\omega,
\varepsilonta^{\varphirown}\lambdaangle m,\zeta(m)\omegaperatorname{ran}gle\notin
\omegaperatorname{ran}glen F_{\alpha_{j-1}}, \varepsilonta^{\varphirown}\zeta\in J^{\alpha_j}_g\},$$
$\alpha=\xi_3(m-1)+\xi_4(m-1)$ and $\beta=\alpha+W(\alpha_j)$ (defined in the beginning of the First step).
We will define $F_{\alpha_{j}}$ so that its range is $J^{\alpha_{j}}_g$ and instead of $F_{\alpha_j}$ we will
define its inverse.
So let $\varepsilonta\in J^{\alpha_j}_g$. We have three cases:
\betaegin{myItemize}
\item[($\iota$)] $\varepsilonta\in \omegaperatorname{ran}glen F_{\alpha_{j-1}}$,
\item[($\iota\iota$)] $\varepsilonxists m<\omegaperatorname{dom}\varepsilonta(\varepsilonta\!\restriction\! m\in \omegaperatorname{ran}glen F_{\alpha_{j-1}}\lambdaanglend \varepsilonta\!\restriction\!(m+1)\notin F_{\alpha_{j-1}})$,
\item[($\iota\iota\iota$)] $\varphiorall m<\omegaperatorname{dom}\varepsilonta(\varepsilonta\!\restriction\!(m+1)\in \omegaperatorname{ran}glen F_{\alpha_{j-1}}\lambdaanglend \varepsilonta\notin \omegaperatorname{ran}glen F_{\alpha_{j-1}})$.
\varepsilonnd{myItemize}
Let us define $\xi=F^{-1}_{\alpha_j}(\varepsilonta)$ such that $\omegaperatorname{dom}\xi=\omegaperatorname{dom}\varepsilonta$. If ($\iota$) holds, define
$\xi(n)=F^{-1}_{\alpha_{j-1}}(\varepsilonta)(n)$ for all $n<\omegaperatorname{dom}\varepsilonta$. If $\omegaperatorname{dom} \varepsilonta=\omega$, then
clearly $c_f(\xi)=c_g(\varepsilonta)$ by the induction hypothesis (specially (d)).
Suppose that ($\iota\iota$) holds
and let $m$ witness this. For all $n<\omegaperatorname{dom} \xi$ let
\betaegin{myItemize}
\item If $n<m$, then $\xi(n)=F^{-1}_{\alpha_{j-1}}(\varepsilonta\!\restriction\! m)(n)$.
\item Suppose $n\gammae m$. Let
\betaegin{myEnumerate}
\item[$\cdot$] $\xi_1(n)=\xi_1(m-1)+1$,
\item[$\cdot$] $\xi_2(n)=\xi_3(m-1)+\xi_4(m-1)$,
\item[$\cdot$] $\xi_3(n)=\xi_2(m)+W(\alpha_j)$,
\item[$\cdot$] $\xi_4(n)=\gamma_{\varepsilonta\!\restriction\!l m}$,
\item[$\cdot$] $\xi_5(n)=h_{\varepsilonta\!\restriction\!l m}^{-1}(\varepsilonta)(n)$.
\varepsilonnd{myEnumerate}
\varepsilonnd{myItemize}
Next we should check that $\xi\in J_f$ and if $\omegaperatorname{dom} \varepsilonta=\omega$, also that $c_f(\xi)=c_g(\varepsilonta)$;
let us check items \varepsilonqref{J0} and \varepsilonqref{J6}, the rest are left to the reader.
\betaegin{myItemize}
\item[\varepsilonqref{J0}] By the induction hypothesis $\xi\!\restriction\! m$ is increasing. Next,
$\xi_1(m)=\xi_1(m-1)+1$, so $\xi(m-1)<_{\text{lex}}\xi(m)$. If $m\lambdae n_1<n_2$,
then $\xi_k(n_1)=\xi_{k}(n_2)$ for all $k\in\{1,2,3,4\}$ and $\xi_5$ is increasing.
\item[\varepsilonqref{J6}] Suppose that $[i,j)=\xi_1^{-1}\{k\}$. Since $\xi_1\!\restriction\! [m,\omega)$ is constant,
either $j<m$, when we are done by the induction hypothesis, or $i=m$ and $j=\omega$. In that case
one verifies that $\varepsilonta\!\restriction\![m,\omega)\in W=\omegaperatorname{ran}glen h_{\varepsilonta\!\restriction\!l m}$ and then, imitating
the corresponding argument in the first step, that
$$\xi_5\!\restriction\! [m,\omega)=h_{\varepsilonta\!\restriction\!l m}^{-1}(\varepsilonta\!\restriction\! [m,\omega))$$
and hence in $\omegaperatorname{dom} h_{\varepsilonta\!\restriction\!l m}=P^{\xi_2(m),\xi_3(m)}_{\xi_4(m)}$.
\varepsilonnd{myItemize}
Suppose finally that ($\iota\iota\iota$) holds. Then $\omegaperatorname{dom}\varepsilonta$ must be $\omega$ since
otherwise the condition ($\iota\iota\iota$)
is simply contradictory
(because $\varepsilonta\!\restriction\!(\omegaperatorname{dom}\varepsilonta-1+1)=\varepsilonta$ (except for the case $\omegaperatorname{dom}\varepsilonta=0$,
but then condition ($\iota$) holds and we are done)).
By (g) of the induction hypothesis,
we have $\omegaperatorname{ran}glen\varepsilonta_1=\omega$, because otherwise we had $\varepsilonta\in \omegaperatorname{ran}glen F_{\alpha_{j-1}}$.
Let $F^{-1}_{\alpha_j}(\varepsilonta)=\xi=\mathcal{C}up_{n<\omega}F^{-1}_{\alpha_{j-1}}(\varepsilonta\!\restriction\! n)$.
Evidently $\xi\!\restriction\! n$ is in $J_f$ for all $n<\omega$, so $\xi\in J_f$ by the remark after
\varepsilonqref{J2}.
Let us check that $c_f(\xi)=c_g(\varepsilonta)$.
First of all $\xi$ cannot be in $J^{\alpha_{j-1}}_f$, since
otherwise, by (d) and (i),
$$F_{\alpha_{j-1}}(\xi)=\mathcal{C}up_{n<\omega}F_{\alpha_{j-1}}(\xi\!\restriction\! n)=\mathcal{C}up _{n<\omega}\varepsilonta\!\restriction\! n=\varepsilonta$$
were again in $\omegaperatorname{ran}glen F_{\alpha_{j-1}}$. But $\xi\!\restriction\! n$ is in $J^{\alpha_{j-1}}_f$, so by the definition
of $J^{\alpha}_f$, $\alpha_{j-1}$ must be a limit ordinal, for otherwise also $\xi$ were in $J^{\alpha_{j-1}}_f$.
Now by (b),
$\alpha_{j-1}$ is a limit ordinal in $C$ and by (a), (e) and (f),
$\omegaperatorname{ran}glen F_{\alpha_{j-1}}=J^{\alpha_{j-1}}_g$ and $\omegaperatorname{dom} F_{\alpha_{j-1}}=J^{\alpha_{j-1}}_f$. This implies
that $\omegaperatorname{ran}glen\varepsilonta\not\sigmaubset \omega\times \beta^4$ for any $\beta<\alpha_{j-1}$
and by ($\circledast$) on page \pageref{circledast} we must have $\sigmaup\omegaperatorname{ran}glen\varepsilonta_5=\alpha_{j-1}$,
so in particular $\alpha_{j-1}$ has cofinality $\omega$. Therefore $c_g(\varepsilonta)=f(\alpha_{j-1})$.
by \varepsilonqref{J7}.
Since $\alpha_{j-1}\in C$, we have
$f(\alpha_{j-1})=g(\alpha_{j-1})$. Again by $(\circledast)$ and that $\omegaperatorname{dom} F_{\alpha_{j-1}}=J^{\alpha_{j-1}}_f$ by (e),
we have $\sigmaup\omegaperatorname{ran}glen\xi_5=\alpha_{j-1}$ and $c_f(\xi)=f(\alpha_{j-1})=c_g(\varepsilonta)$,
thus the colors match.
Let us check whether all the conditions (a)-(i) are met. (a), (b), (c) are
common to the cases ($\iota$), ($\iota\iota$)
and ($\iota\iota\iota$) in the definition of $F_{\alpha_j}^{-1}$ and are easy to verify.
Let us sketch a proof for (d); the rest is left to the reader.
\betaegin{myAlphanumerate}
\item[(d)] We have already checked that the colors are preserved in the non-trivial cases.
Let $\varepsilonta_1,\varepsilonta_2\in \omegaperatorname{ran}glen F_{\alpha_{j}}$ and let us show that
$$\varepsilonta_1\sigmaubsetneq\varepsilonta_2\iff F^{-1}_{\alpha_j}(\varepsilonta_1)\sigmaubsetneq F^{-1}_{\alpha_j}(\varepsilonta_2).$$
The case where both $\varepsilonta_1$ and $\varepsilonta_2$ satisfy $(\iota\iota)$ is the interesting one (implies all the others).
So suppose $\varepsilonta_1,\varepsilonta_2\in (\iota\iota)$. Then there exist
$m_1$ and $m_2$ as described in the statement of ($\iota\iota$).
Let us show that $m_1=m_2$. We have $\varepsilonta_1\!\restriction\! (m_1+1)=\varepsilonta_2\!\restriction\! (m_1+1)$
and $\varepsilonta_1\!\restriction\! (m_1+1)\notin \omegaperatorname{ran}glen F_{\alpha_{j-1}}$,
so $m_2\lambdae m_1$. If $m_2\lambdae m_1$, then $m_2<\omegaperatorname{dom}\varepsilonta_1$,
since $m_1<\omegaperatorname{dom} \varepsilonta_1$. Thus if $m_2\lambdae m_1$, then
$\varepsilonta_1\!\restriction\! (m_2+1)=\varepsilonta_2\!\restriction\! (m_2+1)\notin \omegaperatorname{ran}glen F_{\alpha_{j-1}}$,
which implies $m_2=m_1$. According to the
definition of $F^{-1}_{\alpha_j}(\varepsilonta_i)(k)$ for $k<\omegaperatorname{dom} \varepsilonta_1$,
$F^{-1}_{\alpha_j}(\varepsilonta_i)(k)$ depends only on $m_i$ and $\varepsilonta\!\restriction\! m_i$
for $i\in \{1,2\}$. Since $m_1=m_2$ and $\varepsilonta_1\!\restriction\! m_1=\varepsilonta_2\!\restriction\! m_2$, we have
$F^{-1}_{\alpha_j}(\varepsilonta_1)(k)=F^{-1}_{\alpha_j}(\varepsilonta_2)(k)$ for all
$k<\omegaperatorname{dom}\varepsilonta_1$.
Let us now assume that $\varepsilonta_1\not\sigmaubset \varepsilonta_2$. Then take the smallest $n\in \omegaperatorname{dom}\varepsilonta_1\cap\omegaperatorname{dom}\varepsilonta_2$ such that
$\varepsilonta_1(n)\ne \varepsilonta_2(n)$. It is now easy to show that
$F^{-1}_{\alpha_j}(\varepsilonta_1)(n)\ne F^{-1}_{\alpha_j}(\varepsilonta_2)(n)$ by the construction.
\varepsilonnd{myAlphanumerate}
\noindent\textbf{Even successor step.} Namely the one where $j=\beta+n$, $\beta$ is limit and $n$ is even and $n>0$.
But this case goes exactly as the odd successor step when it is not the successor of a limit,
except that we start with $\omegaperatorname{dom} F_{\alpha_j}=J^{\alpha_j}_f$
where $\alpha_j$ is big enough successor of an element of $C$ such that $J^{\alpha_j}_f$ contains $\omegaperatorname{ran}glen F_{\alpha_{j-1}}$
and define $\xi=F_{\alpha_j}(\varepsilonta)$. Instead of (e) we use (f) as the induction hypothesis.
\noindent\textbf{Limit step.}
Assume that $j$ is a limit ordinal. Then let $\alpha_j=\mathcal{C}up_{i<j}\alpha_i$ and $F_{\alpha_j}=\mathcal{C}up_{i<j}F_{\alpha_i}$.
Since $\alpha_i$ are successors of ordinals in $C$, $\alpha_j\in C$, so (b) is satisfied.
Since each $F_{\alpha_i}$ is an isomorphism,
also their union is, so (d) is satisfied.
Because conditions (e), (f) and (i) hold for $i<j$, the conditions (e) and (i)
hold for $j$. (f) is satisfied because the premise is not true.
(a) and (c) are clearly satisfied. Also (g) and (h) are satisfied by Claim~1 since now $\omegaperatorname{dom} F_{\alpha_j}=J^{\alpha_j}_f$
and $\omegaperatorname{ran}glen F_{\alpha_j}=J^{\alpha_j}_g$ (this is because (a), (e) and (f) hold for $i<j$).
\noindent\textbf{Finally} $F=\mathcal{C}up_{i<\kappa}F_{\alpha_i}$ is an isomorphism between $J_f$ and $J_g$.
\varepsilonnd{proofVOf}
\varepsilonnd{proofV}
\betaegin{Def}\lambdaanglebel{def:trees}
Let $K(\kappa^+,\omega+\omega+2)$ be the class of those $\kappa^+,(\omega+\omega+2)$-trees which
have the following properties:
\betaegin{myItemize}
\item every node on level $<\omega+\omega$ has infinitely many immediate successors,
\item for every node $x$ there exists $y$ on level $\omega+\omega$ (a leaf) such that $x<y$.
\varepsilonnd{myItemize}
\varepsilonnd{Def}
\betaegin{Thm}[$V=L$, $\kappa=\lambda^+$, $\lambda^\omega=\lambda$]\lambdaanglebel{thm:Complete2}
The isomorphism relation on $K(\kappa^+,\omega+\omega+2)$ is ${\Sigma_1^1}$-complete.
\varepsilonnd{Thm}
\betaegin{proof}
By Theorem \ref{thm:Complete1} every ${\Sigma_1^1}$-equivalence relation can be reduced
to the equivalence relation on $\lambda^\kappa$ modulo the $\omega$-non-stationary ideal.
By Lemma \ref{lem:StoJS} this equivalence relation can embedded to the isomorphism
relation on $\mathcal{C}T^\omega_*$ (Definition \ref{def:CT}). So it remains to show that
the isomorphism relation on $\mathcal{C}T^\omega_*$ can be embedded into the isomorphism relation on
$K(\kappa^+,\omega+\omega+2)$.
Let $(t_i)_{i<\lambda}$ be a sequence of non-isomorphic $\kappa^+,(\omega+2)$-trees
such that every element has infinitely many immediate successors and
every element has a successor at level $\omega$.
These can be obtained for example by Lemma 4.89 in
\cite{FHK} which is essentially the same as Lemma~\ref{lem:StoJS}
above but with the number of colors $\lambda$ replaced by $2$ (a branch either has a leaf or not).
Let $(t,c)\in \mathcal{C}T^\omega_*$.
Let $F(t,c)$ be the tree obtained from $t$ as follows: if $x\in t$ has height $\omega$, then
by definition it has a color $\alpha<\lambda$, so replace the element $x$ by the tree $t_\alpha$.
Clearly if colored trees $(t,c)$ and $(t',c')$ are isomorphic, then so are the trees
$F(t,c)$ and $F(t',c')$. On the other hand if $g\colon F(t,c)\to F(t',c')$ is an isomorphism,
then $g\!\restriction\! t$ must be an isomorphism onto $t'$. Moreover it preserves colors, because
if $c(x)\ne c'(g(x))$ for some $x$ of height $\omega$ in $t$, then $g\!\restriction\! \{y\in F(t,c)\mid \varphiorall z<x(y>z)\}$ must
be an isomorphism onto $\{y\in F(t',c')\mid y>x\}$ which means that there is an isomorphism
between $t_{c(x)}$ and $t_{c'(g(x))}$ which is a contradiction.
It remains to show that $(t,c)\mapsto F(c,t)$ is continuous.
But this follows from the fact that $|t_\alpha|\lambdae |t_\lambda|\lambdae |\lambda^{<\omega}|=\lambda<\kappa$.
\varepsilonnd{proof}
\betaegin{Def}\lambdaanglebel{def:Too}
Let $A=\omega^{\omega+\omega}$ and for $\alpha<\omega+\omega$, let $E_{\alpha}$ be the equivalence relation on $A$ such that
let $(\varepsilonta,\xi)\in E_{\alpha}\iff \varepsilonta\!\restriction\!\alpha=\xi\!\restriction\!\alpha$.
Let $\mathcal{A}$ be a model of the vocabulary $(E_n)_{n<\omega+\omega}$
with domain $A$ and all these equivalence relations interpreted as explained above. Then
denote the complete theory of $\mathcal{A}$, $T(\mathcal{A})$ by $T_{\omega+\omega}$.
\varepsilonnd{Def}
\betaegin{Fact}
$T_{\omega+\omega}$ is stable and has no DOP nor OTOP.
\varepsilonnd{Fact}
\betaegin{Fact}[\cite{Hyt1}]\lambdaanglebel{fact}
Suppose that $\kappa=\lambda^+$, $\lambda$ is regular and $\kappa\in I[\kappa]$. (Here $I[\kappa]$ is the ideal of the approachable sets
introduced by S. Shelah, for reference see for example~\cite{HHR}. For our purposes this assumption is weak, because
$\kappa\in I[\kappa]$ holds for all $\kappa$ with $\kappa=\lambda^+$ and $\lambda^{<\lambda}=\lambda$.)
If $t$ and $t'$ are elements of $K(\kappa^+,\omega+\omega+2)$
and player $\textrm{\betaf I\hspace{-1pt}I}$ has a winning strategy
in the Ehrenfeucht-Fra\"iss\'e game $\mathcal{E}F^\kappa_{\lambda\cdot (\omega+\omega+1)}(t,t')$, then $t\cong t'$.
An earlier, less general version of this theorem along with a proof can be found in~\cite{HS1}.
Another version can also be found in~\cite{HT}.
\varepsilonnd{Fact}
\betaegin{Lemma}\lambdaanglebel{lemma:trTbired}
The relations $\omegaperatorname{ISO}(K(\kappa^+,\omega+\omega+2))$ and $\omegaperatorname{ISO}(\kappa,T_{\omega+\omega})$ are continuously bireducible
to each other
and Fact \ref{fact} holds with $K(\kappa^+,\omega+\omega+2)$ replaced by $\mathcal{M}(T_{\omega+\omega})$.
\varepsilonnd{Lemma}
\betaegin{proof}
Suppose $t\in K(\kappa^+,\omega+\omega+2)$. Let $M$ be the set of all leaves of $t$, i.e. elements on level
$\omega+\omega$. For all $\alpha<\omega+\omega$ and $a,b\in M$, set $(a,b)\in E_\alpha^M$ if and only if there exists $x\in t$
on level $\alpha$ with $x<a$ and $x<b$. Then $t\mapsto M$ defines a map
$$F\colon K(\kappa^+,\omega+\omega+2)\to \mathcal{M}(T_{\omega+\omega}).$$
Clearly if trees $t$ and $t'$
are isomorphic, then so are $F(t)$ and $F(t')$. Suppose $F(t)$ and $F(t')$ are isomorphic
via the isomorphism~$g\colon F(t)\to F(t')$.
This induces a bijection $f$ from the leaves of $t$ to the leaves of $t'$ which preserves the
the pairwise splitting levels of the branches. If $a$ and $b$ are leaves, denote by $s(a,b)$
the smallest $\alpha$ such that there is no $x$ on level $\alpha$ with $x<a$ and $x<b$.
Then the above can be written $s(f(a),f(b))=s(a,b)$ $(*)$.
Now we can extend $f$
to the whole tree using this information as follows. Let $x\in t$ and let $a$ be any leaf above $x$.
Such exists by the definition of $K(\kappa^+,\omega+\omega+2)$. Let $f(x)$ be the unique element in $t'$ below
$f(a)$ which is on the same level as $x$ is in $t$. Then $f$ is well defined: if $a$ and $b$ are
two different leaves above $x$, then $s(a,b)>\alpha$, where $\alpha$ is the level of $x$, so by $(*)$ we have
$s(f(a),f(b))>\alpha$ and so $f(x)$ is independent on which branch is used.
Deciding whether an element $x\in t$ is a leaf or not requires only countable information
and that is why the described reduction is continuous.
Let us now define a continuous reduction $G$ from $T_{\omega+\omega}$ to $K(\kappa^+,\omega+\omega+2)$.
Suppose $M$ is a model of $T_{\omega+\omega}$
Let $E_{\omega+\omega}^M$ the identity relation on $\omegaperatorname{dom} M$: $(a,b)\in E_{\omega+\omega}$ if and only
$a=b$. Let
$G(M)=\mathcal{C}up_{\alpha\lambdae \omega+\omega}\omegaperatorname{dom} M/E_\alpha$ and for $x,y\in G(M)$
let $x<y$, if $y\sigmaubset x$.
Clearly $G(M)\in K(\kappa^+,\omega+\omega+2)$ and in fact
$G(F(t))=t$ for all $t\in K(\kappa^+,\omega+\omega+2)$ which implies the rest.
Using this construction it is easy to see that if player $\textrm{\betaf I\hspace{-1pt}I}$ has a winning strategy in
$\mathcal{E}F^\kappa_{\lambda\cdot (\omega+\omega+1)}(M,M')$ for $M,M'\in \mathcal{M}(T_{\omega+\omega})$, then
she has a winning strategy also in the game $\mathcal{E}F^\kappa_{\lambda\cdot(\omega+\omega+1)}(G(M),G(M'))$. So
if this implies that $G(M)\cong G(M')$ (and it does under the assumptions of Fact~\ref{fact}),
then it also implies~$M\cong M'$.
\varepsilonnd{proof}
\betaegin{Cor}[$V=L$, $\kappa=\lambda^+$, $\lambda^\omega=\lambda$]\lambdaanglebel{thm:Stable1}
$\omegaperatorname{ISO}(\kappa,T_{\omega+\omega})$ is ${\Sigma_1^1}$-complete.
\varepsilonnd{Cor}
\betaegin{proof}
By the lemma above it is sufficient to look at the trees in the class $K(\kappa^+,\omega+\omega)$
and the result follows from Theorem~\ref{thm:Complete2}.
\varepsilonnd{proof}
A similar result to Theorem \ref{thm:Complete4} for computable reductions has been observed in~\cite{FFKMM}:
\betaegin{Thm}[ZFC, $\kappa^{<\kappa}=\kappa>\omega$]\lambdaanglebel{thm:Complete4}
Let ${\omegaperatorname{DLO}}$ be the theory of dense linear orderings without end points.
Then $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is $S_\kappa$-complete.
\varepsilonnd{Thm}
\betaegin{proof}
It was proved in \cite{FrSt} that the isomorphism relation
on all countable binary structures is reducible to countable linear orderings.
The same proof works for $\kappa>\omega$.
Then we embed all linear orders into dense linear orders
by replacing each point by the ordering $\varepsilonta+\mathbb{Q}+\varepsilonta$, where $\varepsilonta$ is the saturated ${\omegaperatorname{DLO}}$ of
size $\kappa$ and $\mathbb{Q}$ is the countable saturated~${\omegaperatorname{DLO}}$.
\varepsilonnd{proof}
\betaegin{Thm}[\cite{HK}]\lambdaanglebel{thm:DLONotBorelSt}
Suppose $\kappa^+=2^{\kappa}$ and $\kappa^{<\kappa}=\kappa$. Then there exists a $<\kappa$-closed, $\kappa^+$-c.c. forcing
$\mathbb{P}$ which forces that $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is not $\mathcal{B}orel^*$ and at
the same time ${\Delta_1^1}\sigmaubsetneq \mathcal{B}orel^*$. \qed
\varepsilonnd{Thm}
\betaegin{Cor}\lambdaanglebel{cor:StableNotCom}
Suppose $\kappa^+=2^\kappa$, $\kappa^{<\kappa}=\kappa=\lambda^+$ and $\lambda^{<\lambda}=\lambda$.
Then there is a $<\kappa$-closed $\kappa^+$-c.c. forcing which forces that
$\omegaperatorname{ISO}(\kappa,T_{\omega+\omega})$
is not $S_\kappa$-complete, in particular not ${\Sigma_1^1}$-complete.
\varepsilonnd{Cor}
\betaegin{proof}
By Theorem \ref{thm:DLONotBorelSt} there is a $<\kappa$-closed $\kappa^+$-c.c. forcing which forces
that $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ is not $\mathcal{B}orel^*$. This forcing preserves cardinals and preserves the
fact $\lambda^{<\lambda}=\lambda$ and so also that $I[\kappa]$ is improper.
Thus in the forced model the isomorphism of $T_{\omega+\omega}$
can be characterized by the $\mathcal{E}F$-game of length $\lambda\cdot(\omega+\omega+1)$ by Fact~\ref{fact}.
This implies that $\omegaperatorname{ISO}(\kappa,T_{\omega+\omega})$ is $\mathcal{B}orel^*$
which can be seen in the same way as the $\Leftarrow$-part of
Theorem 4.68 in~\cite{FHK}. Therefore $\omegaperatorname{ISO}(\kappa,{\omegaperatorname{DLO}})$ cannot be reduced to it because
a non-$\mathcal{B}orel^*$ relation cannot be Borel-reduced to a $\mathcal{B}orel^*$ equivalence relation.
To see this let $f\colon \kappa^\kappa\to\kappa^\kappa$ be a Borel map and let $B\sigmaubset \kappa^\kappa$ be a
Borel* set. It is sufficient to show that the inverse image of $B$ is also Borel*.
First note that in the definition of the Borel* sets the basic open sets can
be replaced by Borel sets the definition remaining equivalent; let us call such
Borel*-codes \varepsilonmph{extended Borel*-codes}. So then take the
Borel*-code $(t,h)$ of $B$ and let $(t,k)$ be an extended Borel*-code with the same tree
$t$ and $k(b)=f^{-1}h(b)$ for all leaves $b$ and otherwise $k$ gets the same values as
$h$. Now it is easy to see that $(t,k)$ is an extended Borel*-code for~$f^{-1}B$.
\varepsilonnd{proof}
\betaibliography{ref}{}
\betaibliographystyle{amsalpha}
\varepsilonnd{document} |
\begin{document}
\title{\LARGE \bf A Pressure Associated with a Weak Solution to
the Navier--Stokes Equations with Navier's Boundary Conditions}
\author{Ji\v{r}\'{\i} Neustupa, \ \v{S}\'arka Ne\v{c}asov\'a, \
Petr Ku\v{c}era \footnote{Authors' address: Czech Academy of
Sciences, Institute of Mathematics, \v{Z}itn\'a 25, 115 67 Praha
1, Czech Re\-pub\-lic, e--mails: [email protected],
[email protected], [email protected]}}
\date{}
\maketitle
\begin{abstract}
We show that if $\mathbf{u}$ is a weak solution to the Navier--Stokes
initial--boundary value problem with Navier's slip boundary
conditions in $Q_T:=\Omega\times(0,T)$, where $\Omega$ is a domain
in ${\mathbb R}^3$, then an associated pressure $p$ exists as a
distribution with a certain structure. Furthermore, we also show
that if $\Omega$ is a ``smooth'' domain in ${\mathbb R}^3$ then the
pressure is represented by a function in $Q_T$ with a certain rate
of integrability. Finally, we study the regularity of the pressure
in sub-domains of $Q_T$, where $\mathbf{u}$ satisfies Serrin's
integrability conditions.
\end{abstract}
\noindent
{\it AMS math.~classification (2010):} \
\noindent {\it Keywords:} \ Navier--Stokes equations, Navier's slip
boundary conditions, weak solutions, associated pressure,
regularity.
\section{Introduction} \label{S1}
{\bf 1.1. The Navier--Stokes initial--boundary value problem with
Navier's boundary conditions.} \ Let $T>0$ and $\Omega$ be a
locally Lipschitz domain in ${\mathbb R}^3$, satisfying the condition
\begin{list}{}
{\setlength{\topsep 2pt}
\setlength{\itemsep 1pt}
\setlength{\leftmargin 18pt}
\setlength{\rightmargin 0pt}
\setlength{\labelwidth 10pt}}
\item[(i)]
{\it there exists a sequence of bounded Lipschitz domains
$\Omega_1\subseteq\Omega_2\subseteq\dots$ such that
$\Omega=\bigcup_{n=1}^{\infty}\Omega_n$ and
$(\partial\Omega_n\cap\Omega)\subset\{\mathbf{x}\in{\mathbb R}^3;\ |\mathbf{x}|\geq
n\}$ for all $n\in{\mathbb N}$.}
\end{list}
\noindent
Note that condition (i) is automatically satisfied e.g.~if
$\Omega={\mathbb R}^3$ or $\Omega$ is a half-space in ${\mathbb R}^3$ or $\Omega$ is
a bounded or exterior Lipschitz domain in ${\mathbb R}^3$. Put
$Q_T:=\Omega\times(0,T)$ and $\Gamma_T:=
\partial\Omega\times(0,T)$. We deal with the Navier--Stokes system
\begin{align}
\partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}+\nabla p\ &=\ \nu\Delta\mathbf{u}+\mathbf{f} &&
\mbox{in}\ Q_T, \label{1.1} \\
\mathrm{div}\,\mathbf{u}\ &=\ 0 && \mbox{in}\ Q_T \label{1.2}
\end{align}
with the slip boundary conditions
\begin{equation}
\mbox{a)} \quad \mathbf{u}\cdot\mathbf{n}=0, \qquad \mbox{b)} \quad
[{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u}=\mathbf{z}ero \qquad \mbox{on}\
\Gamma_T \label{1.3}
\end{equation}
and the initial condition
\begin{equation}
\mathbf{u}\, \bigl|_{t=0} \bigr.\ =\ \mathbf{u}_0. \label{1.4}
\end{equation}
Equations (\ref{1.1}), (\ref{1.2}) describe the motion of a
viscous incompressible fluid in domain $\Omega$ in the time
interval $(0,T)$. The unknowns are $\mathbf{u}$ (the velocity) and $p$
(the pressure). Factor $\nu$ in equation (\ref{1.1}) denotes the
kinematic coefficient of viscosity (it is supposed to be a
positive constant) and $\mathbf{f}$ denotes an external body force. The
outer normal vector field on $\Omega$ is denoted by $\mathbf{n}$,
${\mathbb T}d(\mathbf{u})$ denotes the dynamic stress tensor,
$-{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}$ is the force with which the fluid acts on
the boundary of $\Omega$ (we put the minus sign in front of
${\mathbb T}d(\mathbf{u})\cdot\mathbf{n}$ because $\mathbf{n}$ is the outer normal vector
and we express the force acting on $\partial\Omega$ from the
interior of $\Omega$), subscript $\tau$ denotes the tangential
component and $\gamma$ (which is supposed to be a nonnegative
constant) is the coefficient of friction between the fluid and the
boundary of $\Omega$. The density of the fluid is supposed to be
constant and equal to one. In an incompressible Newtonian fluid,
the dynamic stress tensor satisfies
${\mathbb T}d(\mathbf{u})=2\nu\hbox to 0.7pt{}{\mathbb D}(\mathbf{u})$, where the rate of deformation
tensor ${\mathbb D}(\mathbf{u})$ equals $(\nabla\mathbf{u})_s$ (the symmetric part of
$\nabla\mathbf{u}$).
Equations (\ref{1.1}), (\ref{1.2}) are mostly studied together
with the no--slip boundary condition
\begin{equation}
\mathbf{u}\ =\ \mathbf{z}ero \label{1.5}
\end{equation}
on $\Gamma_T$. However, an increasing attention in recent years
has also been given to boundary conditions (\ref{1.3}), which have
a good physical sense. While condition (\ref{1.3}a) expresses the
impermeability of $\partial\Omega$, condition (\ref{1.4}b)
expresses the requirement that the tangential component of the
force with which the fluid acts on the boundary be proportional to
the tangential velocity. Conditions (\ref{1.3}) are mostly called
Navier's boundary conditions, because they were proposed by
H.~Navier in the first half of the 19th century.
\noindent
{\bf 1.2. Briefly on the qualitative theory of the problem
(\ref{1.1})--(\ref{1.4}).} \ As to the qualitative theory for the
problem (\ref{1.1})--(\ref{1.4}), it is necessary to note that it
is not at the moment so elaborated as in the case of the no-slip
boundary condition (\ref{1.5}). Nevertheless, the readers can find
the definition of a weak solution to the problem
(\ref{1.1})--(\ref{1.4}) and the proof of the global in time
existence of a weak solution e.g.~in the papers \cite{ChQi} (with
$\mathbf{f}=\mathbf{z}ero$), \cite{NePe2} (in a time-varying domain $\Omega$)
and \cite{Saal} (in a half-space). We repeat the definition in
section \ref{S3}. Theorems on the local in time existence of a
strong solution are proven e.g.~in \cite{ChQi} (for
$\mathbf{f}=\mathbf{z}ero$) and \cite{KuNe} (in a smooth bounded domain
$\Omega$). Steady problems are studied in \cite{AmRe1} and
\cite{AmRe2}.
\noindent
{\bf 1.3. On the contents and results of this paper.} \ We shall
see in section \ref{S3} that the definition of a weak solution to
the problem (\ref{1.1})--(\ref{1.4}) does not explicitly contain
the pressure. (This situation is well known from the theory of the
Navier--Stokes equations with the no--slip boundary condition
(\ref{1.5}).) This is also why we usually understand, under a
``weak solution'', only the velocity $\mathbf{u}$ and not the pair
$(\mathbf{u},p)$. There arises a question whether one can naturally
assign some pressure $p$ to a weak solution $\mathbf{u}$. It is known
from the theory of the Navier--Stokes equations with the no--slip
boundary condition (\ref{1.5}) that the pressure, associated with
a weak solution, generally exists only as a distribution in $Q_T$.
(See \cite{Li}, \cite{Te}, \cite{Si}, \cite{Ga2}, \cite{So},
\cite{Wo} and \cite{Ne2}.) The distribution is regular (i.e.~it
can be identified with a function with some rate of integrability
in $Q_T$) if domain $\Omega$ is ``smooth'', see \cite{SoWa},
\cite{GiSo} and \cite{Ne2}. In section \ref{S4} of this paper, we
show that one can naturally assign a pressure, as a distribution,
to a weak solution to the Navier--Stokes equations with Navier's
boundary conditions (\ref{1.3}), too. Moreover, we show in section
\ref{S4} that the associated pressure is not just a distribution,
satisfying together with the weak solution $\mathbf{u}$ equations
(\ref{1.1}), (\ref{1.2}) in the sense of distributions in $Q_T$
(where the distributions are applied to test functions from
$\mathbf{C}^{\infty}_0(Q_T)$), but that it is a distribution with a
certain structure, which can be applied to functions from
$\mathbf{C}^{\infty}(\overline{Q_T})$ with a compact support in
$\overline{\Omega}\times(0,T)$ and with the normal component equal
to zero on $\Gamma_T$. In section \ref{S5}, we show that if domain
$\Omega$ is smooth and bounded then the associated pressure is a
function with a certain rate of integrability in $Q_T$. Finally,
in section \ref{S6}, we study the regularity of the associated
pressure in a sub-domain $\Omega'\times(t_1,t_2)$ of $Q_T$, where
$\mathbf{u}$ satisfies Serrin's integrability conditions. We shall see
that the regularity depends on boundary conditions, satisfied by
the velocity on $\Gamma_T$.
\section{Notation and auxiliary results} \label{S2}
{\bf 2.1. Notation.} \ We use this notation of functions, function
spaces, dual spaces, etc.:
\begin{list}{$\circ$}
{\setlength{\topsep 2pt}
\setlength{\itemsep 1pt}
\setlength{\leftmargin 14pt}
\setlength{\rightmargin 0pt}
\setlength{\labelwidth 6pt}}
\item
$\Omega_0\subset\subset\Omega$ means that $\Omega_0$ is a bounded
domain in ${\mathbb R}^3$ such that $\overline{\Omega_0}\subset\Omega$.
\item
Vector functions and spaces of vector functions are denoted by
boldface letters.
\item
$\bfC^{\infty}_{0,\sigma}(\Omega)$ denotes the linear space of infinitely
differentiable divergence-free vector functions in $\Omega$, with
a compact support in $\Omega$.
\item
Let $1<q<\infty$. We denote by $\bfL_{\tau,\sigma}^q(\Omega)$ the closure of
$\bfC^{\infty}_{0,\sigma}(\Omega)$ in $\mathbf{L}^q(\Omega)$. The subscript $\tau$ means
that functions from $\bfL_{\tau,\sigma}^q(\Omega)$ have the normal component on
$\partial\Omega$ equal to zero in a certain weak sense of traces
and they are therefore tangential on $\partial\Omega$. The
subscript $\sigma$ expresses the fact that functions from
$\bfL_{\tau,\sigma}^q(\Omega)$ are divergence--free in $\Omega$ in the sense of
distributions. (See e.g.~\cite{Ga1} for more information.)
\item
Put $\mathbf{G}_{q}(\Omega):=\{\nabla\psi\in\mathbf{L}^{q} (\Omega);\ \psi\in
W^{1,q}_{\rm loc}(\Omega)\}$. $\mathbf{G}_{q}(\Omega)$ is a closed
subspace of $\mathbf{L}^{q}(\Omega)$, see \cite[Exercise III.1.2] {Ga1}.
\item
$\bfW_{\tau}^{1,q}(\Omega):=\{\mathbf{v}\in\mathbf{W}^{1,q}(\Omega);\,
\mathbf{v}\cdot\mathbf{n}=0\ \mbox{a.e.~on}\ \partial\Omega\}$, \\ [3pt]
$\bfW_{\tau}c^{1,q}(\Omega):=\bigl\{ \mathbf{v}arphi\in\bfW_{\tau}^{1,q}(\Omega)$,
$\mathrm{supp}\,\mathbf{v}arphi$ is a compact set in ${\mathbb R}^3\bigr\}$, \\ [3pt]
$\bfW_{\tau}s^{1,q}(\Omega):=\mathbf{W}^{1,q}(\Omega)\cap\bfL_{\tau,\sigma}^q(\Omega)\equiv
\bfW_{\tau}^{1,q}(\Omega)\cap\bfL_{\tau,\sigma}^q(\Omega)$, \\ [3pt]
$\bfW_{\tau}sc^{1,q}(\Omega):=\bfW_{\tau}s^{1,q}(\Omega)\cap\bfW_{\tau}c^{1,q}(\Omega)$.
\item
The norms in $L^q(\Omega)$ and in $\mathbf{L}^q(\Omega)$ are denoted by
$\|\, .\, \|_q$. The norms in $W^{k,q}(\Omega)$ and in
$\mathbf{W}^{k,q}(\Omega)$ (for $k\in{\mathbb N}$) are denoted by $\|\, .\,
\|_{k,q}$. If the considered domain differs from $\Omega$ then we
use e.g.~the notation $\|\, .\, \|_{q;\, \Omega'}$ or $\|\, .\,
\|_{k,q;\, \Omega'}$, etc. The scalar products in $L^2(\Omega)$
and in $\mathbf{L}^2(\Omega)$ are denoted by $(\, .\, ,\, .\, )_2$ and
the scalar products in $W^{1,2}(\Omega)$ and in
$\mathbf{W}^{1,2}(\Omega)$ are denoted by $(\, .\, ,\, .\, )_{1,2}$.
\item
The conjugate exponent is denoted by prime, so that
e.g.~$q'=q/(q-1)$. $\bfW_{\tau}^{-1,q'}(\Omega)$ denotes the dual space to
$\bfW_{\tau}^{1,q}(\Omega)$ and $\bfW_{\tau}s^{-1,q'}(\Omega)$ denotes the dual
space to $\bfW_{\tau}s^{1,q}(\Omega)$. The norm in $\bfW_{\tau}^{-1,q'}(\Omega)$,
respectively $\bfW_{\tau}s^{-1,q'}(\Omega)$, is denoted by $\|\, .\,
\|_{-1,q'}$, respectively by $\|\, .\, \|_{-1,q';\, \sigma}$.
\item
The duality between elements of $\bfW_{\tau}^{-1,q'}(\Omega)$ and
$\bfW_{\tau}^{1,q}(\Omega)$ is denoted by $\langle\, .\, ,\, .\,
\rangle_{\tau}$ and the duality between elements of
$\bfW_{\tau}s^{-1,q'}(\Omega)$ and $\bfW_{\tau}s^{1,q}(\Omega)$ is denoted by
$\langle\, .\, ,\, .\, \rangle_{\tau,\sigma}$.
\item
$\bfW_{\tau}s^{1,q}(\Omega)^{\perp}$ denotes the space of annihilators of
$\bfW_{\tau}s^{1,q}(\Omega)$ in $\bfW_{\tau}^{-1,q'}(\Omega)$. i.e.~the space
$\bigl\{\mathbf{g}\in\bfW_{\tau}^{-1,q'}(\Omega)$;
$\forall\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega):
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau}=0\bigr\}$.
\end{list}
\noindent
{\bf 2.2. $\mathbf{L}^{q'}(\Omega)$ and $\bfL_{\tau,\sigma}^{q'}(\Omega)$ as subspaces
of $\bfW_{\tau}^{-1,q'}(\Omega)$ and $\bfW_{\tau}s^{-1,q'}(\Omega)$,
respectively.} \ The Lebesgue space $\mathbf{L}^{q'}(\Omega)$ can be
identified with a subspace of $\bfW_{\tau}^{-1,q'}(\Omega)$ so that if
$\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ then
\begin{equation}
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau}\ :=\
\int_{\Omega}\mathbf{g}\cdot\mathbf{v}arphi\; \mathrm{d}\mathbf{x} \label{2.1}
\end{equation}
for all $\mathbf{v}arphi\in \bfW_{\tau}^{1,q}(\Omega)$. Similarly,
$\bfL_{\tau,\sigma}^{q'}(\Omega)$ can be identified with a subspace of
$\bfW_{\tau}s^{-1,q'}(\Omega)$ so that if $\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ then
\begin{equation}
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}\ :=\
\int_{\Omega}\mathbf{f}\cdot\mathbf{v}arphi\; \mathrm{d}\mathbf{x} \label{2.2}
\end{equation}
for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$. Thus, if
$\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ and $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$
then the dualities $\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau}$ and
$\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}$ coincide.
Note that if $\, \mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ then the integral on
the right hand side of (\ref{2.1}) also defines a bounded linear
functional on $\bfW_{\tau}s^{1,q}(\Omega)$. This, however, does not mean
that $\mathbf{L}^{q'}(\Omega)$ can be identified with a subspace of
$\bfW_{\tau}s^{-1,q'}(\Omega)$. The reason is, for instance, that the
spaces $\mathbf{L}^{q'}(\Omega)$ and $\bfW_{\tau}s^{-1,q'}(\Omega)$ do not have
the same zero element. (If $\psi$ is a non-constant function in
$C^{\infty}_0(\Omega)$ then $\nabla\psi$ is a non-zero element of
$\mathbf{L}^{q'}(\Omega)$, but it induces the zero element of
$\bfW_{\tau}s^{-1,q'}(\Omega)$.)
\noindent
{\bf 2.3. Definition and some properties of operator ${\cal P}s{q'}$.}
\ $\bfW_{\tau}s^{1,q}(\Omega)$ is a closed subspace of
$\bfW_{\tau}^{1,q}(\Omega)$. If $\mathbf{g}\in\bfW_{\tau}^{-1,q'}(\Omega)$ (i.e.~$\mathbf{f}$
is a bounded linear functional on $\bfW_{\tau}^{1,q}(\Omega)$) then we
denote by ${\cal P}s{q'}\mathbf{f}$ the element of $\bfW_{\tau}s^{-1,q'}(\Omega)$,
defined by the equation
\begin{displaymath}
\langle {\cal P}s{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}\ :=\
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau} \qquad \mbox{for all}\
\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega).
\end{displaymath}
Obviously, ${\cal P}s{q'}$ is a linear operator from
$\bfW_{\tau}^{-1,q'}(\Omega)$ to $\bfW_{\tau}s^{-1,q'}(\Omega)$, whose domain is
the whole space $\bfW_{\tau}^{-1,q'}(\Omega)$.
\begin{lemma} \label{L2.1}
The operator ${\cal P}s{q'}$ is bounded, its range is
$\bfW_{\tau}s^{-1,q'}(\Omega)$ and ${\cal P}s{q'}$ is not one-to-one.
\end{lemma}
\begin{proof} \rm
The boundedness of operator ${\cal P}s{q'}$ directly follows from the
definition of the norms in the spaces $\bfW_{\tau}^{-1,q'}(\Omega)$,
$\bfW_{\tau}s^{-1,q'}(\Omega)$ and the definition of ${\cal P}s{q'}$.
Let $\mathbf{g}\in\bfW_{\tau}s^{-1,q'}(\Omega)$. There exists (by the
Hahn-Banach theorem) an extension of $\mathbf{g}$ from
$\bfW_{\tau}s^{1,q}(\Omega)$ to $\bfW_{\tau}^{1,q}(\Omega)$, which we denote by
$\widetilde{\mathbf{g}}$. The extension is an element of
$\bfW_{\tau}^{-1,q'}(\Omega)$, satisfying
$\|\widetilde{\mathbf{g}}\|_{-1,q'}=\|\mathbf{g}\|_{-1,q';\, \sigma}$ and
\begin{displaymath}
\langle \widetilde{\mathbf{g}},\mathbf{v}arphi\rangle_{\tau}\ =\
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}
\end{displaymath}
for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$. This shows that
$\mathbf{g}={\cal P}s{q'}\widetilde{\mathbf{g}}$. Consequently, the range of
${\cal P}s{q'}$ is the whole space $\mathbf{W}^{-1,q'}_{0,\sigma}(\Omega)$.
Finally, considering $\mathbf{g}=\nabla\psi$ for $\psi\in
C^{\infty}_0(\Omega)$, we get
\begin{displaymath}
\langle {\cal P}s{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}\ =\ \langle
\mathbf{g},\mathbf{v}arphi \rangle_{\tau}\ =\ \int_{\Omega}\nabla
\psi\cdot\mathbf{v}arphi\; \mathrm{d}\mathbf{x}\ =\ 0
\end{displaymath}
for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$. This shows that the
operator ${\cal P}s{q'}$ is not one-to-one.
\end{proof}
\noindent
{\bf 2.4. The relation between operator ${\cal P}s{q'}$ and the
Helmholtz projection.} \ If each function
$\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$ can be uniquely expressed in the form
$\mathbf{g}=\mathbf{v}+\nabla\psi$ for some $\mathbf{v}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$ and
$\nabla\psi\in\mathbf{G}_{q'}(\Omega)$, which is equivalent to the
validity of the decomposition
\begin{equation}
\mathbf{L}^{q'}(\Omega)\ =\ \bfL_{\tau,\sigma}^{q'}(\Omega)\oplus\mathbf{G}_{q'}(\Omega),
\label{2.1*}
\end{equation}
then we write $\mathbf{v}=\Ps{q'}\mathbf{g}$. Decomposition (\ref{2.1*}) is
called the {\it Helmholtz decomposition} and the operator
$\Ps{q'}$ is called the {\it Helmholtz projection.} The existence
of the Helmholtz decomposition depends on exponent $q'$ and the
shape of domain $\Omega$. If $q'=2$ then the Helmholtz
decomposition exists on an arbitrary domain $\Omega$ and $\Ps{2}$,
respectively $I-\Ps{2}$, is an orthogonal projection of
$\mathbf{L}^2(\Omega)$ onto $\bfL_{\tau,\sigma}^2(\Omega)$, respectively onto
$\mathbf{G}_2(\Omega)$. (See e.g.~\cite{Ga1}.) If $q'\not=2$ then
various sufficient conditions for the existence of the Helmholtz
decomposition can be found e.g.~in \cite{FaKoSo}, \cite{FuMo},
\cite{Ga1}, \cite{GeShe}, \cite{KoYa} and \cite{SiSo}.
Further on in this paragraph, we assume that the Helmholtz
decomposition of $\mathbf{L}^{q'}(\Omega)$ exists. Let
$\mathbf{g}\in\mathbf{L}^{q'}(\Omega)$. Treating $\mathbf{g}$ as an element of
$\bfW_{\tau}^{-1,q'}(\Omega)$ in the sense of paragraph 2.2, we have
$\langle{\cal P}s{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}= \langle\mathbf{g},
\mathbf{v}arphi\rangle_{\tau}$ for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$.
Writing $\mathbf{g}=\Ps{q'}\mathbf{g}+(I-\Ps{q'})\mathbf{g}$, we also have
\begin{displaymath}
\langle\mathbf{g}, \mathbf{v}arphi\rangle_{\tau}\ =\
\bigl\langle\Ps{q'}\mathbf{g}+(I-\Ps{q'})\mathbf{g},\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}\ =\
\bigl\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}
\end{displaymath}
for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$, because
$(I-\Ps{q'})\mathbf{g}\in\mathbf{G}_{q'}(\Omega)$. Furthermore,
\begin{displaymath}
\bigl\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}\ =\
\bigl\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau,\sigma},
\end{displaymath}
because $\Ps{q'}\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$,
$\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$ and the formulas (\ref{2.1}) and
(\ref{2.2}) show that the dualities
$\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau}$ and
$\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}$ are expressed
by the same integrals. Hence
$\langle{\cal P}s{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}$ coincides
with $\langle\Ps{q'}\mathbf{g},\mathbf{v}arphi\rangle_{\tau,\sigma}$ for all
$\mathbf{v}arphi\in\bfW_{\tau}s^{1,q}(\Omega)$. Consequently, ${\cal P}s{q'}\mathbf{g}$ and
$\Ps{q'}\mathbf{g}$ represent the same element of
$\bfW_{\tau}s^{-1,q'}(\Omega)$. As $\Ps{q'}\mathbf{g}\in\bfL_{\tau,\sigma}^{q'}(\Omega)$,
${\cal P}s{q'}\mathbf{g}$ can also be considered to be an element of
$\bfL_{\tau,\sigma}^{q'}(\Omega)$, which induces a functional in
$\bfW_{\tau}s^{-1,q}(\Omega)$ in the sense of paragraph 2.2. Thus, {\it
the Helmholtz projection $\Ps{q'}$ coincides with the restriction
of ${\cal P}s{q'}$ to $\mathbf{L}^{q'}(\Omega)$.}
\noindent
{\bf 2.5. More on the space $\bfW_{\tau}s^{1,q}(\Omega)^{\perp}$.} \
Identifying $\mathbf{G}_{q'}(\Omega)$ with a subspace of
$\bfW_{\tau}^{-1,q'}(\Omega)$ in the sense of paragraph 2.2, {\it we
denote by ${}^{\perp}\mathbf{G}_{q'}(\Omega)$ the linear space
$\bigl\{\mathbf{v}arphi\in\bfW_{\tau}^{1,q}(\Omega)$;
$\forall\hbox to 0.7pt{}\mathbf{g}\in\mathbf{G}_{q'}(\Omega):
\langle\mathbf{g},\mathbf{v}arphi\rangle_{\tau}=0\bigr\}$.} Using \cite[Lemma
III.2.1]{Ga1}, we deduce that
$\bfW_{\tau}s^{1,q}(\Omega)={}^{\perp}\mathbf{G}_{q'}(\Omega)$. Hence
$\bfW_{\tau}s^{1,q}(\Omega)^{\perp}=(^{\perp}\mathbf{G}_{q'}(\Omega))^{\perp}$
and applying Theorem 4.7 in \cite{Ru}, we observe that
$\bfW_{\tau}s^{1,q}(\Omega)^{\perp}$ is a closure of $\mathbf{G}_{q'}(\Omega)$
in the weak-$*$ topology of $\bfW_{\tau}^{-1,q'}(\Omega)$. The next lemma
tells us more on elements of $\bfW_{\tau}s^{1,q}(\Omega)^{\perp}$.
\begin{lemma} \label{L2.2}
Let $\mathbf{F}\in\bfW_{\tau}s^{1,q}(\Omega)^{\perp}$ and
$\Omega_0\subset\subset\Omega$ be a nonempty sub-domain of
$\Omega$. Then there exists a unique $p\in L^{q'}_{loc}(\Omega)$
such that $p\in L^{q'}(\Omega_R)$ for all $R>0$, \
$\int_{\Omega_0}p\; \mathrm{d}\mathbf{x}=0$ \ and
\begin{alignat}{5}
& \|p\|_{q';\, \Omega_R}\ &&\leq\ c(R)\, \|\mathbf{F}\|_{-1,q} \quad &&
\mbox{for all}\ R>0, \label{2.9*} \\
& \bigl\langle\mathbf{F},\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ &&=\ -\int_{\Omega}p\
\mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x} \quad && \mbox{for all}\
\mbox{\boldmath $\psi$}\in\bfW_{\tau}c^{1,q}(\Omega). \label{2.7*}
\end{alignat}
\end{lemma}
\begin{proof}
Let $\{\Omega_n\}$ be the sequence of domains from condition (i).
We can assume without the loss of generality that
$\Omega_0\subseteq\Omega_1$. Let $n\in{\mathbb N}$. Denote by $L^q_{\rm mv=
0}(\Omega_n)$ the space of all functions from $L^q(\Omega_n)$,
whose mean value in $\Omega_n$ is zero. There exists a bounded
linear operator $\mathfrak{B}: L^q_{\rm mv=0}
(\Omega_n)\to\mathbf{W}^{1,q}_0(\Omega_n)$, such that
\begin{displaymath}
\mathrm{div}\,\mathfrak{B}(g)\ =\ g
\end{displaymath}
for all $g\in L^q_{\rm mv=0}(\Omega_n)$. Operator $\mathfrak{B}$ is often
called the {\it Bogovskij} or {\it Bogovskij--Pileckas} operator.
More information on operator $\mathfrak{B}$, including its construction,
can be found e.g.~in \cite[Sec.~III.3]{Ga1} or in \cite{BoSo}.
Denote by $\bfW_{\tau}^{1,q}(\Omega)_n$, respectively
$\bfW_{\tau}s^{1,q}(\Omega)_n$, the space of all functions from
$\bfW_{\tau}^{1,q}(\Omega)$, respectively from $\bfW_{\tau}s^{1,q}(\Omega)$, that
have a support in $\overline{\Omega_n}$. Let
$\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,q}(\Omega)_n$. Then the restriction of
$\mathrm{div}\,\mbox{\boldmath $\psi$}$ to $\Omega_n$ (which we again denote by $\mathrm{div}\,\mbox{\boldmath $\psi$}$
in order to keep a simple notation) belongs to $L^q_{\rm
mv=0}(\Omega_n)$ and $\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$}_n)\in\mathbf{W}^{1,q}_0(\Omega_n)$.
Identifying $\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})$ with a function from
$\mathbf{W}^{1,q}_0(\Omega)$ that equals zero in
$\Omega\smallsetminus\Omega_n$, we have
\begin{displaymath}
\mbox{\boldmath $\psi$}\ =\ \mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})+\mathbf{w},
\end{displaymath}
where $\mathbf{w}$ is an element of $\bfW_{\tau}s^{1,q}(\Omega)$, satisfying
$\mathbf{w}=\mbox{\boldmath $\psi$}=\mathbf{z}ero$ in $\Omega\smallsetminus\Omega_n$. Hence
\begin{equation}
\bigl\langle\mathbf{F},\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$}
\hbox to 0.7pt{}angle_{\tau}. \label{2.2*}
\end{equation}
As $\mathbf{F}$ is a bounded linear functional on $\bfW_{\tau}^{1,q}(\Omega)$,
vanishing on the subspace $\bfW_{\tau}s^{1,q}(\Omega)$, its restriction to
$\bfW_{\tau}^{1,q}(\Omega)_n$ is an element of $\bfW_{\tau}^{-1,q'}(\Omega)_n$,
vanishing on $\bfW_{\tau}s^{1,q}(\Omega)_n$. Furthermore, identifying
functions from $\bfW_{\tau}^{1,q}(\Omega)_n$ with their restrictions to
$\Omega_n$, we can also consider $\mathbf{F}$ to be an element of
$\mathbf{W}^{-1,q'}_0(\Omega_n)$, vanishing on
$\mathbf{W}^{1,q}_{0,\sigma}(\Omega_n)$. Thus, due to Lemma 1.4 in
\cite{Ne2}, there exists $c(n)>0$ and a unique function $p_n\in
L^{q'}(\Omega_n)$ such that $\int_{\Omega_0}p_n\; \mathrm{d}\mathbf{x}=0$ and
\begin{align}
\|p_n\|_{q';\, \Omega_n}\ &\leq\ c(n)\, \|\mathbf{F}\|_{-1,q;\,
\Omega_n}\ \leq\ c(n)\, \|\mathbf{F}\|_{-1,q}, \label{2.8*} \\
\bigl\langle\mathbf{F},\mathbf{z}eta\hbox to 0.7pt{}angle_{\Omega_n}\ &=\ -\int_{\Omega_n}p_n\
\mathrm{div}\,\mathbf{z}eta\; \mathrm{d}\mathbf{x} \label{2.3*}
\end{align}
for all $\mathbf{z}eta\in\mathbf{W}^{1,q}_0(\Omega_n)$. Using identity
(\ref{2.3*}) with $\mathbf{z}eta=\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})$, we obtain
\begin{displaymath}
\bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\hbox to 0.7pt{}angle_{\tau}\ \equiv\
\bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\hbox to 0.7pt{}angle_{\Omega_n}\ =\
-\int_{\Omega_n}\! p_n\ \mathrm{div}\,\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\; \mathrm{d}\mathbf{x}\ =\
-\int_{\Omega_n}\! p_n\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}.
\end{displaymath}
As the same identities also hold for $n+1$ instead of $n$, we
deduce that $p_{n+1}=p_n$ in $\Omega_n$. Hence we may define
function $p$ in $\Omega$ by the formula $p:=p_n$ in $\Omega_n$ and
we have
\begin{equation}
\bigl\langle\mathbf{F},\mathfrak{B}(\mathrm{div}\,\mbox{\boldmath $\psi$})\hbox to 0.7pt{}angle_{\tau}\ =\ -\int_{\Omega} p\
\mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}. \label{2.4*}
\end{equation}
If $\mbox{\boldmath $\psi$}\in\bfW_{\tau}c^{1,q}(\Omega)$ then
$\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,q}(\Omega)_n$ for sufficiently large $n$ and
(\ref{2.4*}) holds as well. Inequality (\ref{2.9*}) now follows
from (\ref{2.8*}). Identities (\ref{2.2*}) and (\ref{2.4*}) imply
(\ref{2.7*}).
\end{proof}
Note that if $\Omega$ is a bounded Lipschitz domain then the
choice $\Omega_0=\Omega$ is also possible in Lemma \ref{L2.2}.
\section{Three equivalent weak formulations of the Navier--Stokes
initial-boundary value problem (\ref{1.1})--(\ref{1.4})}
\label{S3}
Recall that $\Omega$ is supposed to be a locally Lipschitz domain
in ${\mathbb R}^3$.
\noindent
{\bf 3.1. The 1st weak formulation of the Navier--Stokes IBVP
(\ref{1.1})--(\ref{1.4}).} \ {\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$
and $\mathbf{f}\in L^2(0,T$; $\bfW_{\tau}^{-1,2}(\Omega))$. A function $\,
\mathbf{u}\in L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\
\bfW_{\tau}s^{1,2}(\Omega))$ is said to be a weak solution to the problem
(\ref{1.1})--(\ref{1.4}) if the trace of $\mathbf{u}$ on $\Gamma_T$ is
in $L^2(0,T$; $\mathbf{L}^2(\partial\Omega))$ and $\mathbf{u}$ satisfies
\begin{align}
\int_0^T & \int_{\Omega}\bigl[-\partial_t\mbox{\boldmath $\phi$}\cdot\mathbf{u}+
\mathbf{u}\cdot\nabla\mathbf{u}\cdot\mbox{\boldmath $\phi$}+2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:
(\nabla\mbox{\boldmath $\phi$})_s\bigr]\, \mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\
& +\int_0^T\int_{\partial\Omega}\gamma\hbox to 0.7pt{}\mathbf{u}\cdot\mbox{\boldmath $\phi$}\; \mathrm{d}
S\, \mathrm{d} t\, =\, \int_0^T\bigl\langle\mathbf{f},\mbox{\boldmath $\phi$}\hbox to 0.7pt{}angle_{\tau}\; \mathrm{d}
t+\int_{\Omega}\mathbf{u}_0\cdot\mbox{\boldmath $\phi$}(.\, ,0)\, \mathrm{d}\mathbf{x}
\label{3.1}
\end{align}
for all vector--functions $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl([0,T);\;
\bfW_{\tau}sc^{1,2}(\Omega)\bigr)$.}
Equation (\ref{3.1}) follows from (\ref{1.1}), (\ref{1.2}) if one
formally multiplies equation (\ref{1.1}) by the test function
$\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl([0,T);\; \bfW_{\tau}sc^{1,2} (\Omega)\bigr)$,
applies the integration by parts and uses the boundary conditions
(\ref{1.3}) and the initial condition (\ref{1.4}). As the integral
of $\nabla p\cdot\mbox{\boldmath $\phi$}$ vanishes, the pressure $p$ does not
explicitly appear in (\ref{3.1}).
On the other hand, if $\mathbf{f}\in\mathbf{L}^2(Q_T)$ and $\mathbf{u}$ is a weak
solution with the additional properties
$\partial_t\mathbf{u}\in\mathbf{L}^2(Q_T)$ and $\mathbf{u}\in L^2(0,T;\,
\mathbf{W}^{2,2}(\Omega))$ then, considering the test functions $\mbox{\boldmath $\phi$}$
in (\ref{3.1}) of the form $\mbox{\boldmath $\phi$}(\mathbf{x},t)=\mathbf{v}arphi(\mathbf{x})\,
\vartheta(t)$ where $\mathbf{v}arphi\in\bfW_{\tau}sc^{1,2}(\Omega)$ and
$\vartheta\in C^{\infty}_0((0,T))$, and applying the backward
integration by parts, one obtains the equation
\begin{displaymath}
\int_{\Omega} \bigl( \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}-
\nu\Delta\mathbf{u}-\mathbf{f} \bigr)\cdot\mathbf{v}arphi\; \mathrm{d}\mathbf{x}\ =\ 0
\end{displaymath}
for a.a.~$t\in(0,T)$. As $\bfW_{\tau}sc^{1,2}(\Omega)$ is dense in
$\bfL_{\tau,\sigma}^2(\Omega)$, this equation shows that $\Ps{2}[\partial_t\mathbf{u}
+\mathbf{u}\cdot\nabla\mathbf{u}-\nu\Delta\mathbf{u}- \mathbf{f}]=\mathbf{z}ero$ at a.a.~time
instants $t\in(0,T)$. Consequently, to a.a.~$t\in(0,T)$, there
exists $p\in W^{1,2}_{\rm loc}(\Omega)$ such that $\nabla
p=(I-\Ps{2})[\partial_t\mathbf{u}+ \mathbf{u}\cdot\nabla\mathbf{u}-\nu\Delta\mathbf{u}-
\mathbf{f}]$ and the functions $\mathbf{u}$ and $p$ satisfy equation
(\ref{1.1}) (as an equation in $\mathbf{L}^2(\Omega)$) at a.a.~time
instants $t\in(0,T)$. It follows from the boundedness of
projection $\Ps{2}$ in $\mathbf{L}^2(\Omega)$ and the assumed properties
of functions $\mathbf{u}$ and $\mathbf{f}$ that $\nabla p\in \mathbf{L}^2(Q_T)$.
Considering afterwards the test functions $\mbox{\boldmath $\phi$}$ as in
(\ref{3.1}), and integrating by parts in (\ref{3.1}), we get
\begin{displaymath}
\int_0^T\int_{\Omega}\bigl( \partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}-
\nu\Delta\mathbf{u}-\mathbf{f} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}+
\int_0^T\int_{\partial\Omega} \bigl( [{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]+
\gamma\mathbf{u} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t\ =\ 0
\end{displaymath}
The first integral is equal to zero, because the expression in the
parentheses equals $-\nabla p$ a.e.~in $Q_T$ and the integral
$\nabla p\cdot\mbox{\boldmath $\phi$}$ in $\Omega$ equals zero for
a.a.~$t\in(0,T)$. In the second integral, since both $\mathbf{u}(\, .\,
,t)$ and $\mbox{\boldmath $\phi$}(\, .\, ,t)$ are tangent on $\partial\Omega$, we
can replace $[{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]+\gamma\mathbf{u}$ by
$[{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u}$ and we thus obtain
\begin{displaymath}
\int_0^T\int_{\partial\Omega} \bigl(
[{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]_{\tau}+\gamma\mathbf{u} \bigr)\cdot\mbox{\boldmath $\phi$}\; \mathrm{d}
S\, \mathrm{d} t\ =\ 0.
\end{displaymath}
As this equation holds for all test functions $\mbox{\boldmath $\phi$}\in
C^{\infty}_0\bigl([0,T);\; \bfW_{\tau}sc^{1,2}(\Omega)\bigr)$, we deduce
that $\mathbf{u}$ satisfies the boundary condition (\ref{1.3}b). Recall
that this procedure works only under additional assumptions on
smoothness of the weak solution $\mathbf{u}$ and function $\mathbf{f}$. On a
general level, however, it is not known whether the existing weak
solution is smooth. Nevertheless, we show in subsection 4.4 that
there exists a certain pressure, which can be naturally associated
with the weak solution to (\ref{1.1})--(\ref{1.4}). The pressure
generally exists only as a distribution, see Theorem \ref{T4.2}.
\noindent
{\bf 3.2. The 2nd weak formulation of the Navier-Stokes IBVP
(\ref{1.1})--(\ref{1.4}).} \ We define the operators
${\cal A}:\bfW_{\tau}^{1,2}(\Omega)\to\bfW_{\tau}^{-1,2}(\Omega)$ and
${\cal B}:\bigl[\bfW_{\tau}^{1,2}(\Omega)\bigr]^2\to \bfW_{\tau}^{-1,2}(\Omega)$ by the
equations
\begin{align*}
& \bigl\langle{\cal A}\mathbf{v},\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}\ :=\ \int_{\Omega}
2\nu\hbox to 0.7pt{}(\nabla\mathbf{v})_s:(\nabla\mathbf{v}arphi)_s\;
\mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{v}\cdot\mathbf{v}arphi\; \mathrm{d} S &&
\mbox{for}\ \mathbf{v},\mathbf{v}arphi\in\bfW_{\tau}^{1,2}(\Omega), \\
& \bigl\langle{\cal B}(\mathbf{v},\mathbf{w}),\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}\ :=\
\int_{\Omega} \mathbf{v}\cdot\nabla\mathbf{w}\cdot\mathbf{v}arphi\; \mathrm{d}\mathbf{x} &&
\mbox{for}\ \mathbf{v},\mathbf{w},\mathbf{v}arphi\in\bfW_{\tau}^{1,2}(\Omega).
\end{align*}
By Korn's inequality (see e.g.~\cite[Lemma 4]{SoSc}) and
inequality \cite[(II.4.5), p.~63]{Ga1}, we have $\cn01$
\begin{equation}
\bigl\langle{\cal A}\mathbf{v},\mathbf{v}\hbox to 0.7pt{}angle_{\tau}\ =\ \int_{\Omega}\nu\,
|(\nabla\mathbf{v})_s|^2\; \mathrm{d}\mathbf{x}+ \int_{\partial\Omega}\gamma\,
|\mathbf{v}|^2\; \mathrm{d} S\ \geq\ \cc01\hbox to 0.7pt{} \nu\, \|\nabla\mathbf{v}\|_2^2.
\label{3.2}
\end{equation}
Furthermore, using the boundedness of the operator of traces from
$\bfW_{\tau}^{1,2}(\Omega)$ to $\mathbf{L}^2(\partial\Omega)$, we can also
deduce that there exists $\cn02\cc02>0$ such that
\begin{equation}
\|{\cal A}\mathbf{v}\|_{-1,2}\ \leq\ \cc02\, \|\nabla\mathbf{v}\|_2
\label{3.3}
\end{equation}
for all $\mathbf{v}\in\bfW_{\tau}^{1,2}(\Omega)$. Thus, ${\cal A}$ is a bounded
one--to--one operator, mapping $\bfW_{\tau}^{1,2}(\Omega)$ into
$\bfW_{\tau}^{-1,2}(\Omega)$. If $k>0$ then the range of ${\cal A}+kI$ is the
whole space $\bfW_{\tau}^{-1,2}(\Omega)$ (by the Lax--Milgram theorem) and
$({\cal A}+kI)^{-1}$ is a bounded operator from $\bfW_{\tau}^{-1,2}(\Omega)$
onto $\bfW_{\tau}^{1,2}(\Omega)$. If $\Omega$ is bounded then the same
statements also hold for $k=0$. The bilinear operator ${\cal B}$
satisfies
\begin{align}
& \|{\cal B}(\mathbf{v},\mathbf{w})\|_{-1,2}\ =\ \sup_{\boldsymbol{\varphi}
\in\bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{z}ero}
\frac{|\hbox to 0.7pt{}\langle {\cal B}(\mathbf{v},\mathbf{w}),\mathbf{v}arphi\rangle_{\tau}\hbox to 0.7pt{}|}
{\|\mathbf{v}arphi\|_{1,2}} \nonumber \\
& \hspace{6pt} =\ \sup_{\boldsymbol{\varphi}\in
\bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{z}ero}
\frac{|(\mathbf{v}\cdot\nabla\mathbf{w},\,\mathbf{v}arphi)_2|}
{\|\mathbf{v}arphi\|_{1,2}}\ \leq \sup_{\boldsymbol{\varphi}\in
\bfW_{\tau}^{1,2}(\Omega),\ \boldsymbol{\varphi}\not=\mathbf{z}ero}
\frac{\|\mathbf{v}\|_2^{1/2}\, \|\mathbf{v}\|_6^{1/2}\, \|\nabla\mathbf{w}\|_2\,
\|\mathbf{v}arphi\|_6}{\|\mathbf{v}arphi\|_{1,2}} \nonumber \\
\noalign{\vskip 4pt}
& \hspace{6pt} \leq\ c\, \|\mathbf{v}\|_2^{1/2}\,
\|\nabla\mathbf{v}\|_2^{1/2}\, \|\nabla\mathbf{w}\|_2. \label{3.4}
\end{align}
(We have used the imbedding inequality $\|\mathbf{v}\|_6\leq c\,
\|\mathbf{v}\|_{1,2}$. Here and further on, $c$ denotes the generic
constant.)
Let $\mathbf{u}$ be a weak solution of the IBVP (\ref{1.1})--(\ref{1.4})
in the sense of paragraph 3.1. It follows from the estimates
(\ref{3.3}) and (\ref{3.4}) that
\begin{equation}
{\cal A}\mathbf{u}\in L^2(0,T;\, \bfW_{\tau}^{-1,2}(\Omega)) \quad \mbox{and} \quad
{\cal B}(\mathbf{u},\mathbf{u})\in L^{4/3}(0,T;\, \bfW_{\tau}^{-1,2}(\Omega)).
\label{3.5}
\end{equation}
Considering $\mbox{\boldmath $\phi$}$ in (\ref{3.1}) in the form
$\mbox{\boldmath $\phi$}(\mathbf{x},t)=\mathbf{v}arphi(\mathbf{x})\, \vartheta(t)$, where
$\mathbf{v}arphi\in\bfW_{\tau}sc^{1,2}(\Omega)$ and $\vartheta\in
C^{\infty}_0((0,T))$, we deduce that $\mathbf{u}$ satisfies the equation
\begin{equation}
\frac{\mathrm{d}}{\mathrm{d}\hbox to 0.7pt{} t}\, (\mathbf{u},\mathbf{v}arphi)_2+\bigl\langle
{\cal A}\mathbf{u},\mathbf{v}arphi \hbox to 0.7pt{}angle_{\tau}+\bigl\langle {\cal B}(\mathbf{u},\mathbf{u}),
\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau}\ =\ \langle \mathbf{f},\mathbf{v}arphi \rangle_{\tau}
\label{3.6}
\end{equation}
a.e.~in\ $(0,T)$, where the derivative of $(\mathbf{u},\mathbf{v}arphi)_2$
means the derivative in the sense of distributions. As the space
$\bfW_{\tau}sc^{1,2}(\Omega)$ is dense in $\bfW_{\tau}s^{1,2}(\Omega)$,
(\ref{3.6}) holds for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,2}(\Omega)$. It
follows from (\ref{3.5}) that $\langle{\cal A}\mathbf{u},\mathbf{v}arphi
\rangle_{\tau} \in L^2(0,T)$ and $\langle{\cal B}(\mathbf{u},\mathbf{u}),\mathbf{v}arphi
\rangle_{\tau} \in L^{4/3}(0,T)$. Since
$\langle\mathbf{f},\mathbf{v}arphi\rangle_{\tau}\in L^2(0,T)$, we obtain from
(\ref{3.6}) that the distributional derivative of
$(\mathbf{u},\mathbf{v}arphi)_2$ with respect to $t$ is in $L^{4/3}(0,T)$.
Hence $(\mathbf{u},\mathbf{v}arphi)_2$ is a.e.~in $[0,T)$ equal to a
continuous function and the weak solution $\mathbf{u}$ is (after a
possible redefinition on a set of measure zero) a weakly
continuous function from $[0,T)$ to $\bfL_{\tau,\sigma}^2(\Omega)$. Now, one can
easily deduce from (\ref{3.1}) that $\mathbf{u}$ satisfies the initial
condition (\ref{1.4}) in the sense that
\begin{equation}
(\mathbf{u},\mathbf{v}arphi)_2\hbox to 0.7pt{}\bigl|_{t=0}\ =\ (\mathbf{u}_0,\mathbf{v}arphi)_2
\label{3.7}
\end{equation}
for all $\mathbf{v}arphi\in\bfW_{\tau}s^{1,2}(\Omega)$. Thus, we come to the 2nd
weak formulation of the IBVP (\ref{1.1})--(\ref{1.4}):
{\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^2(0,T;\
\bfW_{\tau}^{-1,2}(\Omega))$. Find $\mathbf{u}\in L^{\infty}(0,T;\
\bfL_{\tau,\sigma}^2(\Omega))\cap L^2(0,T$; $\bfW_{\tau}s^{1,2}(\Omega))$ (called the
weak solution) such that $\mathbf{u}$ satisfies equation (\ref{3.6})
a.e.~in $(0,T)$ and the initial condition (\ref{3.7}) for all
$\mathbf{v}arphi\in\bfW_{\tau}s^{1,2}(\Omega)$.}
We have shown that if $\mathbf{u}$ is a weak solution of the IBVP
(\ref{1.1})--(\ref{1.4}) in the sense of the 1st definition (see
paragraph 3.1) then it also satisfies the 2nd definition. Applying
standard arguments, one can also show the opposite, i.e.~if $\mathbf{u}$
satisfies the 2nd definition then it also satisfies the 1st
definition.
\noindent
{\bf 3.3. The 3rd weak formulation of the Navier-Stokes IBVP
(\ref{1.1})--(\ref{1.4}).} \ Equation (\ref{3.6}) can also be
written in the equivalent form
\begin{equation}
\frac{\mathrm{d}}{\mathrm{d}\hbox to 0.7pt{} t}\, (\mathbf{u},\mathbf{v}arphi)_2+\bigl\langle
{\cal P}s{2}\hbox to 0.7pt{}{\cal A}\mathbf{u},\mathbf{v}arphi \hbox to 0.7pt{}angle_{\tau,\sigma}+
\bigl\langle{\cal P}s{2}\hbox to 0.7pt{}{\cal B}(\mathbf{u},\mathbf{u}),\mathbf{v}arphi\hbox to 0.7pt{}angle_{\Omega,
\sigma}\ =\ \bigl\langle {\cal P}s{2}\hbox to 0.7pt{}\mathbf{f},
\mathbf{v}arphi\hbox to 0.7pt{}angle_{\tau,\sigma}. \label{3.8}
\end{equation}
Let us denote by $(\mathbf{u}')_{\sigma}$ the distributional derivative
with respect to $t$ of $\mathbf{u}$, as a function from $(0,T)$ to
$\bfW_{\tau}s^{-1,2}(\Omega)$. (We explain later why we use the notation
$(\mathbf{u}')_{\sigma}$ and not just $\mathbf{u}'$.) Equation (\ref{3.8}) can
also be written in the form
\begin{equation}
(\mathbf{u}')_{\sigma}+{\cal P}s{2}\hbox to 0.7pt{}{\cal A}\mathbf{u}+{\cal P}s{2}\hbox to 0.7pt{}{\cal B}(\mathbf{u},\mathbf{u})\ =\
{\cal P}s{2}\hbox to 0.7pt{}\mathbf{f}, \label{3.9}
\end{equation}
which is an equation in $\bfW_{\tau}s^{-1,2}(\Omega)$, satisfied a.e.~in
the time interval $(0,T)$. (This can be deduced by means of Lemma
III.1.1 in \cite{Te}.) Due to (\ref{3.5}) and (\ref{3.6}),
$(\mathbf{u}')_{\sigma}\in L^{4/3}(0,T;\, \bfW_{\tau}s^{-1,2}(\Omega))$. Hence
$\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from
$[0,T)$ to $\bfW_{\tau}s^{-1,2}(\Omega)$ and it is therefore meaningful to
prescribe an initial condition for $\mathbf{u}$ at time $t=0$. Thus, we
obtain the 3rd equivalent definition of a weak solution to the
IBVP (\ref{1.1})--(\ref{1.4}):
{\it Given $\mathbf{u}_0\in\bfL_{\tau,\sigma}^2(\Omega)$ and $\mathbf{f}\in L^2(0,T$;
$\bfW_{\tau}^{-1,2}(\Omega))$. Function $\mathbf{u}\in L^{\infty}(0,T$;
$\bfL_{\tau,\sigma}^2(\Omega))\cap L^2(0,T$; $\bfW_{\tau}s^{1,2}(\Omega))$ is called a
weak solution to the IBVP (\ref{1.1})--(\ref{1.4}) if $\mathbf{u}$
satisfies equation (\ref{3.9}) a.e.~in the interval $(0,T)$ and
the initial condition (\ref{1.4}).}
We have explained that if $\mathbf{u}$ is a weak solution in the sense
of the 2nd definition then it satisfies the 3rd definition. The
validity of the opposite implication can be again verified by
means of Lemma III.1.1 in \cite{Te}.
\noindent
{\bf 3.4. Remark.} \ \rm Recall that $(\mathbf{u}')_{\sigma}$ is the
distributional derivative with respect to $t$ of $\mathbf{u}$, as a
function from $(0,T)$ to $\bfW_{\tau}s^{-1,2}(\Omega)$. It is not the same
as the distributional derivative with respect to $t$ of $\mathbf{u}$, as
a function from $(0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$, which can be
naturally denoted by $\mathbf{u}'$. As it is important to distinguish
between these two derivatives, we use the different notation. We
can formally write $(\mathbf{u}')_{\sigma}={\cal P}s{2}\mathbf{u}'$.
Since $(\mathbf{u}')_{\sigma}\in L^{4/3}(0,T;\, \bfW_{\tau}s^{-1,2}(\Omega))$,
$\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from
$[0,T)$ to $\bfW_{\tau}s^{-1,2}(\Omega)$. According to what is said in the
first part of this remark, this, however, does not imply that
$\mathbf{u}$ coincides a.e.~in $(0,T)$ with a continuous function from
$[0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$.
\section{An associated pressure, its uniqueness and existence}
\label{S4}
{\bf 4.1. An associated pressure.} \ {\it Let $\mathbf{u}$ be a weak
solution to the IBVP (\ref{1.1})--(\ref{1.4}). A distribution $p$
in $Q_T$ is called an associated pressure if the pair $(\mathbf{u},p)$
satisfies the equations (\ref{1.1}), (\ref{1.2}) in the sense of
distributions in $Q_T$.}
\noindent
{\bf 4.2. On uniqueness of the associated pressure.} \ Let $\mathbf{u}$
be a weak solution to the IBVP (\ref{1.1})--(\ref{1.4}) and $p$ be
an associated pressure.
If $G$ is a distribution in $(0,T)$ and $\psi\in
C_0^{\infty}(Q_T)$ then we define a distribution $g$ in $Q_T$ by
the formula
\begin{equation}
\blangle\hspace{-2.5pt}\blangle g,\psi\hbox to 0.7pt{}rangle_{Q_T}\ :=\ \Bigl\langle G,\
\int_{\Omega}\psi\; \mathrm{d}\mathbf{x} \Bigr\rangle_{(0,T)}, \label{4.11}
\end{equation}
where $\langle\hspace{-1.9pt}\langle\, .\, ,\, .\, \rangle\hspace{-1.9pt}\rangle_{Q_T}$, respectively
$\langle\, .\, ,\, .\, \rangle_{(0,T)}$, denotes the action of a
distribution in $Q_T$ on a function from $C^{\infty}_0(Q_T)$ or
$\mathbf{C}^{\infty}_0(Q_T)$, respectively the action of a distribution
in $(0,T)$ on a function from $C_0^{\infty}((0,T))$. Obviously, if
$\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T);\, \bfW_{\tau}c^{1,2}(\Omega)\bigr)$
then
\begin{equation}
\blangle\hspace{-2.5pt}\blangle\nabla g,\mbox{\boldmath $\phi$}\hbox to 0.7pt{}rangle_{Q_T}\ =\ -\blangle\hspace{-2.5pt}\blangle
g,\mathrm{div}\,\mbox{\boldmath $\phi$}\hbox to 0.7pt{}rangle_{Q_T}\ =\ -\Bigl\langle G,\
\int_{\Omega}\mathrm{div}\,\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\Bigr\rangle_{(0,T)}\ =\ 0,
\label{4.12}
\end{equation}
because $\int_{\Omega}\mathrm{div}\,\mbox{\boldmath $\phi$}(\, .\, ,t)\; \mathrm{d}\mathbf{x}=0$ for all
$t\in(0,T)$. Thus, $p+g$ is a pressure, associated with the weak
solution $\mathbf{u}$ to the IBVP (\ref{1.1})--(\ref{1.4}), too.
For $h\in C_0^{\infty}((0,T))$, define
\begin{equation}
\bigl\langle G,h \hbox to 0.7pt{}angle_{(0,T)}\ :=\ \blangle\hspace{-2.5pt}\blangle
g,\psi\hbox to 0.7pt{}rangle_{Q_T}, \label{4.13}
\end{equation}
where $\psi\in C_0^{\infty}(Q_T)$ is chosen so that
$h(t)=\int_{\Omega}\psi(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ for all $t\in(0,T)$.
The definition of the distribution $G$ is independent of the
concrete choice of function $\psi$ due to these reasons: let
$\psi_1$ and $\psi_2$ be two functions from $C_0^{\infty}(Q_T)$
such that $h(t)=\int_{\Omega}\psi_1(\mathbf{x},t)\;
\mathrm{d}\mathbf{x}=\int_{\Omega}\psi_2(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ for $t\in(0,T)$.
Denote by $G_1$, respectively $G_2$, the distribution, defined by
formula (\ref{4.13}) with $\psi=\psi_1$, respectively
$\psi=\psi_2$. Since $\mathrm{supp}\,(\psi_1-\psi_2)$ is a compact subset of
$Q_T$ and $\int_{\Omega}[\psi_1(\, .\, ,t)-\psi_2(\, .\, ,t)]\;
\mathrm{d}\mathbf{x}=0$ for all $t\in(0,T)$, there exists a function
$\mbox{\boldmath $\phi$}\in\mathbf{C}_0^{\infty}(Q_T)$ such that $\mathrm{div}\,\mbox{\boldmath $\phi$}=
\psi_1-\psi_2$ in $Q_T$. (See e.g.~\cite[Sec.~III.3]{Ga1} or
\cite{BoSo} for the construction of function $\mbox{\boldmath $\phi$}$.) Then
\begin{displaymath}
\bigl\langle G_1-G_2,h \hbox to 0.7pt{}angle_{(0,T)}\ :=\ \blangle\hspace{-2.5pt}\blangle
g,\psi_1-\psi_2\hbox to 0.7pt{}rangle_{Q_T}\ =\ \blangle\hspace{-2.5pt}\blangle
g,\mathrm{div}\,\mbox{\boldmath $\phi$}\hbox to 0.7pt{}rangle_{Q_T},
\end{displaymath}
which is equal to zero due to (\ref{4.12}). Formula (\ref{4.13})
and the identity $h(t)=\int_{\Omega}\psi(\mathbf{x},t)\; \mathrm{d}\mathbf{x}$ show
that the distribution $g$ has the form (\ref{4.11}).
We have proven the theorem:
\begin{theorem} \label{T4.1}
The pressure, associated with a weak solution to the IBVP
(\ref{1.1})--(\ref{1.4}), is unique up to an additive distribution
of the form (\ref{4.11}).
\end{theorem}
\noindent
{\bf 4.3. Projections $E^{1,2}_{\tau}$ and $E^{-1,2}_{\tau}$.} \
In this subsection, we introduce orthogonal projections
$E^{1,2}_{\tau}$ and $E^{-1,2}_{\tau}$ in $\bfW_{\tau}^{1,2}(\Omega)$ and
$\bfW_{\tau}^{-1,2}(\Omega)$, respectively, which further play an
important role in the proof of the existence of an associated
pressure.
$\bfW_{\tau}^{1,2}(\Omega)$ is a Hilbert space with the scalar product
$(\, .\, ,\, .\, )_{1,2}=\bigl\langle({\cal A}_0+I)\, .\, ,\, .\
\hbox to 0.7pt{}angle_{\tau}$, where ${\cal A}_0$ is the operator ${\cal A}$ from
paragraph 3.2, corresponding to $\nu=1$ and $\gamma=0$. Similarly,
$\bfW_{\tau}^{-1,2}(\Omega)$ is a Hilbert space with the scalar product
\begin{equation}
(\mathbf{g},\mathbf{h})_{-1,2}\ :=\ \bigl\langle\mathbf{g},({\cal A}_0+
I)^{-1}\mathbf{h}\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl(({\cal A}_0+
I)^{-1}\mathbf{g},({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2}. \label{4.1}
\end{equation}
Denote by $E^{1,2}_{\tau}$ the orthogonal projection in
$\bfW_{\tau}^{1,2}(\Omega)$ that vanishes just on $\bfW_{\tau}s^{1,2}(\Omega)$,
which means that
\begin{equation}
\ker E^{1,2}_{\tau}\ =\ \bfW_{\tau}s^{1,2}(\Omega). \label{4.2}
\end{equation}
Denote by $E^{-1,2}_{\tau}$ the adjoint projection in
$\bfW_{\tau}^{-1,2}(\Omega)$. Applying (\ref{4.2}), one can verify that
the range of $E^{-1,2}_{\tau}$ is $\bfW_{\tau}s^{1,2}(\Omega)^{\perp}$.
Let $\mathbf{g}\in\bfW_{\tau}^{-1,2}(\Omega)$ and $\mbox{\boldmath $\psi$}\in\bfW_{\tau}^{1,2}(\Omega)$.
Then, due to (\ref{4.1}) and the orthogonality of
$E^{1,2}_{\tau}$, we have
\begin{displaymath}
\bigl\langle\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl(
({\cal A}_0+I)^{-1}\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\bigr)_{1,2}\ =\ \bigl(
E^{1,2}_{\tau}({\cal A}_0+I)^{-1}\mathbf{g},\mbox{\boldmath $\psi$}\bigr)_{1,2}.
\end{displaymath}
However, the duality on the left hand side can also be expressed
in another way: \ using again (\ref{4.1}) and the fact that
$E^{-1,2}_{\tau}$ is adjoint to $E^{1,2}_{\tau}$, we get
\begin{displaymath}
\bigl\langle\mathbf{g},E^{1,2}_{\tau}\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl\langle
E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl(
({\cal A}_0+I)^{-1} E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mbox{\boldmath $\psi$}\bigr)_{1,2}.
\end{displaymath}
Thus, we obtain the important identity
\begin{equation}
E^{1,2}_{\tau}\hbox to 0.7pt{}({\cal A}_0+I)^{-1}\ =\ ({\cal A}_0+I)^{-1}
E^{-1,2}_{\tau}. \label{4.3}
\end{equation}
Applying (\ref{4.3}), we can now show that the projection
$E^{-1,2}_{\tau}$ is orthogonal in $\bfW_{\tau}^{-1,2}(\Omega)$. Indeed,
if $\mathbf{g},\hbox to 0.7pt{}\mathbf{h}\in\bfW_{\tau}^{-1,2}(\Omega)$ then
\begin{align*}
& \bigl(E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},\mathbf{h}\bigr)_{-1,2}\ =\
\bigl(({\cal A}_0+I)^{-1}E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{g},
({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2} \\
& \hspace{20pt} =\ \bigr(E^{1,2}_{\tau}\hbox to 0.7pt{} ({\cal A}_0+I)^{-1}\hbox to 0.7pt{}
\mathbf{g},({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2}\ =\ \bigl(({\cal A}_0+I)^{-1}
\mathbf{g},E^{1,2}_{\tau}({\cal A}_0+I)^{-1}\mathbf{h}\bigr)_{1,2} \\
& \hspace{20pt} =\
\bigl(({\cal A}_0+I)^{-1}\mathbf{g},({\cal A}_0+I)^{-1}E^{-1,2}_{\tau}
\mathbf{h}\bigr)_{1,2}\ =\ \bigl(\mathbf{g},E^{-1,2}_{\tau}\mathbf{h}\bigr)_{-1,2}.
\end{align*}
This verifies the orthogonality of projection $E^{-1,2}_{\tau}$.
Finally, we will show that if $\phi\in C^{\infty}_0(\Omega)$ then
\begin{equation}
E^{1,2}_{\tau}\nabla\phi\ =\ \nabla\phi \qquad \mbox{for all
$\phi\in C^{\infty}_0(\Omega)$}. \label{4.4}
\end{equation}
Thus, let $\phi\in C^{\infty}_0(\Omega)$. Then
$\nabla\phi\in\bfW_{\tau}^{1,2}(\Omega)$ and $\,
({\cal A}_0+I)\nabla\phi\equiv\nabla(-\Delta+I)\phi\in\bfW_{\tau}s^{1,2}
(\Omega)^{\perp}$. Hence
\begin{displaymath}
E^{-1,2}_{\tau}({\cal A}_0+I)\nabla\phi\ =\ ({\cal A}_0+I)\nabla\phi.
\end{displaymath}
Applying (\ref{4.3}), we also get
\begin{displaymath}
E^{-1,2}_{\tau}({\cal A}_0+I)\nabla\phi\ =\ ({\cal A}_0+
I)E^{1,2}_{\tau}\nabla\phi.
\end{displaymath}
Since ${\cal A}_0+I$ is a one-to-one operator from $\bfW_{\tau}^{1,2}(\Omega)$
to $\bfW_{\tau}^{-1,2}(\Omega)$, the last two identities show that
(\ref{4.4}) holds.
\noindent
{\bf 4.4. Existence of an associated pressure.} \ In this
paragraph, we show that to every weak solution of the IBVP
(\ref{1.1})--(\ref{1.4}), an associated pressure exists and has a
certain structure.
Let $\mathbf{u}$ be a weak solution to the IBVP
(\ref{1.1})--(\ref{1.4}). Due to \cite[Lemma III.1.1]{Te},
equation (\ref{3.9}) is equivalent to
\begin{displaymath}
\mathbf{u}(t)-\mathbf{u}(0)+\int_0^t{\cal P}s{2}\bigl[{\cal A}\mathbf{u}+
{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\ =\ \mathbf{z}ero
\end{displaymath}
for a.a.~$t\in(0,T)$. (As usually, we identify $\mathbf{u}(\, .\, ,t)$
and $\mathbf{u}(t)$.) Since $\mathbf{u}(t)$ and $\mathbf{u}(0)$ are in
$\bfL_{\tau,\sigma}^2(\Omega)$, they coincide with ${\cal P}s{2}\mathbf{u}(t)$ and
${\cal P}s{2}\mathbf{u}(0)$, respectively. (See paragraph 2.4.) Hence
\begin{displaymath}
{\cal P}s{2}\biggl(\mathbf{u}(t)-\mathbf{u}(0)+\int_0^t\bigl[{\cal A}\mathbf{u}+
{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\biggr)\ =\ \mathbf{z}ero.
\end{displaymath}
Define $\mathbf{F}(t)\in\bfW_{\tau}^{-1,2}(\Omega)$ by the formula
\begin{equation}
\mathbf{F}(t)\ :=\ \mathbf{u}(t)-\mathbf{u}(0)+\int_0^t
\bigl[{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau.
\label{4.6}
\end{equation}
Since $\langle\mathbf{F}(t),\mbox{\boldmath $\psi$}\rangle_{\tau}=\langle{\cal P}s{2}
\mathbf{F}(t),\mbox{\boldmath $\psi$}\rangle_{\tau,\sigma}=0$ for all $\mbox{\boldmath $\psi$}\in
\bfW_{\tau}s^{1,2}(\Omega)$, $\mathbf{F}(t)$ belongs to
$\bfW_{\tau}s^{1,2}(\Omega)^{\perp}$. Hence
$E^{-1,2}_{\tau}\hbox to 0.7pt{}\mathbf{F}(t)=\mathbf{F}(t)$ and
$(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{F}(t)=\mathbf{z}ero$. Thus,
\begin{align*}
(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}(t) & -(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}(0) \\
& +\int_0^t(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\bigl[{\cal A}\mathbf{u}+
{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\; \mathrm{d}\tau\ =\ \mathbf{z}ero
\end{align*}
holds as an equation in $\bfW_{\tau}^{-1,2}(\Omega)$. Applying Lemma
III.1.1 from \cite{Te}, we deduce that
\begin{displaymath}
\bigl[(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}\mathbf{u}\bigr]'+(I-E^{-1,2}_{\tau})\hbox to 0.7pt{}
\bigl[{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}\bigr]\ =\ \mathbf{z}ero.
\end{displaymath}
This yields
\begin{align}
\mathbf{u}'+{\cal A}\mathbf{u} & +{\cal B}(\mathbf{u},\mathbf{u})\ =\ \mathbf{f} \nonumber \\
\noalign{\vskip 2pt}
& +E^{-1,2}_{\tau}[\mathbf{u}'+{\cal A}\mathbf{u}+{\cal B}(\mathbf{u},\mathbf{u})-\mathbf{f}].
\label{4.7}
\end{align}
(Here, $[(I-E^{-1,2}_{\tau})\mathbf{u}]'$ and $\mathbf{u}'$ are the
distributional derivatives with respect to $t$ of
$(I-E^{-1,2}_{\tau})\mathbf{u}$ and $\mathbf{u}$, respectively, as functions
from $(0,T)$ to $\bfW_{\tau}^{-1,2}(\Omega)$.) Let
$\Omega_0\subset\subset\Omega$ be a non-empty domain. By Lemma
\ref{L2.2}, there exist unique $p_1(t)$, $p_{21}(t)$, $p_{22}(t)$,
$p_{23}(t)$ in $L^2_{loc}(\Omega)$ such that
\begin{equation}
\begin{array}{rl}
\bigl\langle -E^{-1,2}_{\tau}\mathbf{u}(t),\mbox{\boldmath $\psi$}\hbox to 0.7pt{}angle_{\tau}\ &=\
{\displaystyle -\int_{\Omega} p_1(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},} \\
[10pt]
\bigl\langle -E^{-1,2}_{\tau}{\cal A}\mathbf{u}(t),\mbox{\boldmath $\psi$} \hbox to 0.7pt{}angle_{\tau}\ &=\
{\displaystyle -\int_{\Omega} p_{21}(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},}
\\ [10pt]
\bigl\langle -E^{-1,2}_{\tau}{\cal B}(\mathbf{u}(t),\mathbf{u}(t)),\mbox{\boldmath $\psi$}
\hbox to 0.7pt{}angle_{\tau}\ &=\ {\displaystyle -\int_{\Omega} p_{22}(t)\
\mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x},}
\\ [10pt]
\bigl\langle -E^{-1,2}_{\tau}\mathbf{f}(t),\mbox{\boldmath $\psi$} \hbox to 0.7pt{}angle_{\tau}\ &=\
{\displaystyle -\int_{\Omega} p_{23}(t)\ \mathrm{div}\,\mbox{\boldmath $\psi$}\; \mathrm{d}\mathbf{x}}
\end{array} \label{4.8}
\end{equation}
for a.a.~$t\in(0,T)$ and all $\mbox{\boldmath $\psi$}\in\bfW_{\tau}c^{1,2}(\Omega)$ and the
inequalities
\begin{equation}
\begin{array}{lll}
\|p_1(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}\mathbf{u}(t)\|_{-1,2}\ & \leq\ c(R)\, \|\mathbf{u}(t)\|_{-1,2}, \\
[5pt]
\|p_{21}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}{\cal A}\mathbf{u}(t)\|_{-1,2}\ & \leq\ c(R)\,
\|{\cal A}\mathbf{u}(t)\|_{-1,2}, \\ [5pt]
\|p_{22}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}{\cal B}(\mathbf{u}(t),\mathbf{u}(t))\|_{-1,2}\ & \leq\ c(R)\,
\|{\cal B}(\mathbf{u}(t),\mathbf{u}(t))\|_{-1,2}, \\ [5pt]
\|p_{23}(t)\|_{2;\, \Omega_R}\ & \leq\ c(R)\,
\|E^{-1,2}_{\tau}\mathbf{f}(t)\|_{-1,2}\ & \leq\ c(R)\,
\|\mathbf{f}(t)\|_{-1,2}
\end{array} \label{4.9}
\end{equation}
hold for all $R>0$ and a.a.~$t\in(0,T)$. Moreover,
$\int_{\Omega_0}p_1(t)\; \mathrm{d}\mathbf{x}=\int_{\Omega_0}p_{2i}(t)\;
\mathrm{d}\mathbf{x}=0$ ($i=1,2,3$) for a.a.~$t\in(0,T)$. Using the inequality
$\|\mathbf{u}(t)\|_{-1,2}\leq \|\mathbf{u}(t)\|_2$ and estimates (\ref{3.5}),
we get
\begin{equation}
\begin{array}{ll} p_1\hspace{4.1pt}\in L^{\infty}(0,T;\,
L^2(\Omega_R)), & p_{21}\in L^2(0,T;\, L^2(\Omega_R)), \\
[5pt] p_{22}\in L^{4/3} (0,T;\, L^2(\Omega_R)), \hbox to 10pt{} &
p_{23}\in L^2(0,T;\, L^2(\Omega_R)) \end{array} \label{4.10}
\end{equation}
for all $R>0$.
For a.a.~$t\in(0,T)$, the functions $p_1(t)$ and $p_{21}(t)$ are
harmonic in $\Omega$. This follows from the identities
\begin{align*}
\int_{\Omega} p_1(t)\, \Delta\phi\; \mathrm{d}\mathbf{x}\ &=\ -\bigl\langle \nabla
p_1(t),\nabla\phi\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl\langle
E^{-1,2}_{\tau}\mathbf{u}(t),\nabla\phi\hbox to 0.7pt{}angle_{\tau}\ =\ \bigl\langle
\mathbf{u}(t),E^{1,2}_{\tau}\hbox to 0.7pt{}\nabla\phi\hbox to 0.7pt{}angle_{\tau} \\
&=\ \bigl\langle\mathbf{u}(t),\nabla\phi\hbox to 0.7pt{}angle_{\tau}\ =\ \int_{\Omega}
\mathbf{u}(t)\cdot\nabla\phi\; \mathrm{d}\mathbf{x}\ =\ 0 \quad \mbox{(for all
$\phi\in C_0^{\infty}(\Omega)$).}
\end{align*}
(We have used (\ref{4.4}).) Hence, by Weyl's lemma, $p_1(t)$ is a
harmonic function in $\Omega$. The fact that $p_{21}(t)$ is
harmonic can be proved similarly.
Equation (\ref{4.7}) is an equation in $\bfW_{\tau}^{-1,2}(\Omega)$.
Applying successively each term in (\ref{4.7}) to the function of
the type $\mathbf{v}arphi(\mathbf{x})\, \eta(t)$, where
$\mathbf{v}arphi\in\bfW_{\tau}c(\Omega)$ and $\eta\in C^{\infty}_0(0,T)$, using
formulas (\ref{4.8}), and denoting $p_2:=p_{21}+p_{22}+p_{23}$, we
obtain
\begin{gather*}
\int_0^T \int_{\Omega} \bigl[ -\mathbf{u}\cdot\mathbf{v}arphi\, \eta'(t)+
\nu\hbox to 0.7pt{}\nabla\mathbf{u}:\nabla\mathbf{v}arphi\, \eta(t)+\mathbf{u}\cdot\nabla
\mathbf{u}\cdot \mathbf{v}arphi\, \eta(t) \bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d}
t+\int_0^T\int_{\Omega} \gamma\, \mathbf{u}\cdot\mathbf{v}arphi\, \eta(t)\;
\mathrm{d} S\, \mathrm{d} t \\
=\ \int_0^T \langle\mathbf{f},\mathbf{v}arphi\rangle_{\tau}\, \eta(t)\; \mathrm{d}
t-\int_0^T\int_{\Omega} p_1\ \mathrm{div}\,\mathbf{v}arphi\ \eta'(t)\; \mathrm{d}\mathbf{x}\,
\mathrm{d} t+\int_0^T\int_{\Omega} p_2\ \mathrm{div}\, \mathbf{v}arphi\ \eta(t)\;
\mathrm{d}\mathbf{x}\, \mathrm{d} t
\end{gather*}
for all functions $\mathbf{v}arphi\in\bfW_{\tau}c^{1,2}(\Omega)$ and $\eta\in
C^{\infty}_0((0,T))$. Since the set of all finite linear
combinations of functions of the type $\mathbf{v}arphi(\mathbf{x})\, \eta(t)$,
where $\mathbf{v}arphi\in\bfW_{\tau}c^{1,2}(\Omega)$ and $\eta\in
C^{\infty}_0((0,T))$, is dense in $C^{\infty}_0\bigl((0,T);\,
\bfW_{\tau}c^{1,2}(\Omega)\bigr)$ in the norm of $W^{1,2}_0(0,T;\,
\bfW_{\tau}^{1,2}(\Omega))$, we also obtain the equation
\begin{gather}
\int_0^T \int_{\Omega} \bigl[ -\mathbf{u}\cdot\partial_t\mbox{\boldmath $\phi$}+
\nu\hbox to 0.7pt{}\nabla\mathbf{u}:\nabla\mbox{\boldmath $\phi$}+\mathbf{u}\cdot\nabla\mathbf{u}\cdot\mbox{\boldmath $\phi$}
\bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_0^T\int_{\Omega} \gamma\,
\mathbf{u}\cdot\mbox{\boldmath $\phi$}\; \mathrm{d} S\, \mathrm{d} t \nonumber \\
=\ \int_0^T \langle\mathbf{f},\mbox{\boldmath $\phi$}\rangle_{\tau}\; \mathrm{d}
t-\int_0^T\int_{\Omega} p_1\ \mathrm{div}\,\partial_t\mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\,
\mathrm{d} t+\int_0^T\int_{\Omega} p_2\ \mathrm{div}\, \mbox{\boldmath $\phi$}\; \mathrm{d}\mathbf{x}\, \mathrm{d} t
\label{4.5}
\end{gather}
for all $\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T);\,
\bfW_{\tau}c^{1,2}(\Omega)\bigr)$. Choosing particularly
$\mbox{\boldmath $\phi$}\in\mathbf{C}^{\infty}_0(Q_T)$ and putting
\begin{equation}
p\ :=\ \partial_tp_1+p_2\ \equiv\ \partial_tp_1+p_{21}+p_{22}+
p_{23} \label{4.14}
\end{equation}
(where $\partial_tp_1$ is the derivative in the sense of
distributions), we observe that $(\mathbf{u},p)$ is a distributional
solution of the system (\ref{1.1}), (\ref{1.2}) in $Q_T$.
The next theorem summarizes the results of this subsection:
\begin{theorem} \label{T4.2}
Let $T>0$ and $\Omega$ be a locally Lipschitz domain in ${\mathbb R}^3$,
satisfying condition (i) from subsection 1.1. Let $\mathbf{u}$ be a weak
solution to the Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}). Then
there exists an associated pressure $p$ in the form (\ref{4.14}),
where $p_1$, $p_{21}$, $p_{22}$, $p_{23}$ satisfy
(\ref{4.8})--(\ref{4.10}). Moreover,
\begin{list}{}
{\setlength{\topsep 2pt}
\setlength{\itemsep 0pt}
\setlength{\leftmargin 20pt}
\setlength{\rightmargin 0pt}
\setlength{\labelwidth 16pt}}
\item[1) ]
if $\, \Omega_0\subset\subset\Omega$ then the functions $p_1(t),\,
p_{21}(t),\, p_{22}(t),\, p_{32}(t)$ can be chosen so that they
satisfy the additional conditions
\begin{displaymath}
\int_{\Omega_0}p_1(t)\; \mathrm{d}\mathbf{x}\ =\ \int_{\Omega_0}p_{21}(t)\;
\mathrm{d}\mathbf{x}\ =\ \int_{\Omega_0}p_{23}(t)\; \mathrm{d}\mathbf{x}\ =\
\int_{\Omega_0}p_{23}(t)\; \mathrm{d}\mathbf{x}\ =\ 0,
\end{displaymath}
\item[2) ]
the functions $p_1(t)$ and $p_{21}(t)$ are harmonic in $\Omega$
for a.a.~$t\in(0,T)$,
\item[3) ]
the functions $\mathbf{u}$, $p_1$ and $p_2\equiv p_{21}+p_{22}+p_{23}$
satisfy the integral equation (\ref{4.5}) for all test functions
$\mbox{\boldmath $\phi$}\in C^{\infty}_0\bigl((0,T;\, \bfW_{\tau}c^{1,2}(\Omega)\bigr)$.
\end{list}
\end{theorem}
\noindent
Note that if $\Omega$ is a bounded Lipschitz domain then the
choice $\Omega_0=\Omega$ is also permitted in statement 1) of
Theorem \ref{T4.2}.
\section{The case of a smooth bounded domain $\Omega$} \label{S5}
{\bf 5.1. Some results from paper \cite{AmEsGh}.} \ In this
section, we assume that $\Omega$ is a bounded domain in ${\mathbb R}^3$
with the boundary of the class $C^2$. We denote by $A_q$ (for
$1<q<\infty$) the linear operator in $\bfL_{\tau,\sigma}^q(\Omega)$ with the
domain defined by the equation
\begin{displaymath}
A_q\mathbf{v}\ :=\ -\nu\, P_q\hbox to 0.7pt{}\Delta\mathbf{v}
\end{displaymath}
for $\mathbf{v}\in D(A_q)$, where
\begin{displaymath}
D(A_q)\ :=\
\bigl\{\mathbf{v}\in\mathbf{W}^{2,q}(\Omega)\cap\bfW_{\tau}s^{1,q}(\Omega);\
[{\mathbb T}d(\mathbf{v})\cdot\mathbf{n}]_{\tau}+\gamma\hbox to 0.7pt{}\mathbf{v}_{\tau}=\mathbf{z}ero\
\mbox{on}\ \partial\Omega \bigr\}
\end{displaymath}
is the domain of operator $A_q$. Recall that ${\mathbb T}d(\mathbf{v})\equiv
2\nu\hbox to 0.7pt{}{\mathbb D}(\mathbf{v})$ is the dynamic stress tensor, induced by the
vector field $\mathbf{v}$, and $P_q$ is the Helmholtz projection in
$\mathbf{L}^q(\Omega)$. Operator $A_q$ is usually called the {\it Stokes
operator} in $\bfL_{\tau,\sigma}^q(\Omega)$. Particularly, if $q=2$ then $A_2$
coincides with the restriction of operator ${\cal A}$, defined in
subsection 3.2, to $D(A_2)$. It is shown in the paper
\cite{AmEsGh} by Ch.~Amrouche, M.~Escobedo and A.~Ghosh that
$(-A_q)$ generates a bounded analytic semigroup $\mathrm{e}^{-A_q t}$ in
$\bfL_{\tau,\sigma}^q(\Omega)$. The next lemma also comes from \cite{AmEsGh}, see
\cite[Theorem 1.3]{AmEsGh}. It concerns the solution of the
inhomogeneous non--steady Stokes problem, given by the equations
\begin{equation}
\partial_t\mathbf{u}+\nabla \pi\ =\ \nu\Delta\mathbf{u}+\mathbf{g} \label{5.1}
\end{equation}
and (\ref{1.2}) (in $Q_T$), by the boundary conditions (\ref{1.3})
and by the initial condition (\ref{1.4}). The initial velocity
$\mathbf{u}_0$ is supposed to be from the space $\mathbf{E}_r^q(\Omega)$,
which is defined to be the real interpolation space $[D(A_q),\,
\bfL_{\tau,\sigma}^q(\Omega)]_{1/r,r}$. The problem (\ref{5.1}),
(\ref{1.2})--(\ref{1.3}) can also be equivalently written in the
form
\begin{equation}
\frac{\mathrm{d}\mathbf{u}}{\mathrm{d} t}+A_q\mathbf{u}\ =\ \mathbf{g}, \qquad \mathbf{u}(0)=\mathbf{u}_0,
\label{5.1a}
\end{equation}
which is the initial--value problem in $\bfL_{\tau,\sigma}^q(\Omega)$. Although
the pressure $\pi$ does not explicitly appear in (\ref{5.1a}), it
can be always reconstructed in the way described in section
\ref{S4}.) The lemma says:
\begin{lemma} \label{L5.1}
Let $r,q\in(1,\infty)$, $T>0$, $\mathbf{g}\in L^r(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))$
and $\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)$. Then the Stokes problem
(\ref{5.1}), (\ref{1.2}), (\ref{1.3}), (\ref{1.4}) has a unique
solution $(\mathbf{u},\pi)$ in $\bigl[ W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap
L^r(0,T;\, \mathbf{W}^{2,q}(\Omega)) \bigr] \times L^r(0,T;\,
W^{1,q}(\Omega)/{\mathbb R})$. The solution satisfies the estimate
\begin{equation}
\int_0^T \|\partial_t\mathbf{u}\|_q^r\; \mathrm{d}
t+\int_0^T\|\mathbf{u}\|_{2,q}^r\; \mathrm{d} t+\int_0^T\|\pi\|_{1,q}^r\; \mathrm{d}
t\ \leq\ C\, \biggl( \int_0^T\|\mathbf{g}\|_q^r\; \mathrm{d} t+
\|\mathbf{u}_0\|_{\mathbf{E}_r^q(\Omega)}^r \biggr). \label{5.2}
\end{equation}
\end{lemma}
The proof is based on a more general theorem from the paper
\cite{GiSo} by Y.~Giga and H.~Sohr.
\noindent
{\bf 5.2. Application of Lemma \ref{L5.1}.} \ If $\mathbf{u}$ is a weak
solution to the problem (\ref{1.1})--(\ref{1.4}) then, since
$\mathbf{u}\in L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\
\bfW_{\tau}s^{1,2}(\Omega))$, one can verify that $\mathbf{u}\cdot\nabla\mathbf{u}\in
L^r(0,T;\, \mathbf{L}^q(\Omega))$ for all $1\leq r\leq 2$, $1\leq
q\leq\frac{3}{2}$, satisfying $2/r+3/q=4$. In order to be
consistent with the assumptions of Lemma \ref{L5.1} regarding $q$
and $r$, assume that $1<q<\frac{3}{2}$, $1<r<2$ and $2/r+3/q=4$.
Furthermore, assume that
$\mathbf{u}_0\in\mathbf{E}^q_r(\Omega)\cap\bfL_{\tau,\sigma}^2(\Omega)$ and function $\mathbf{f}$
on the right hand side of equation (\ref{1.1}) is in $L^r(0,T;\,
\mathbf{L}^q(\Omega)) \cap L^2(0,T;\, \bfW_{\tau}^{-1,2} (\Omega))$. Put
$\mathbf{g}:=P_q\mathbf{f}-P_q(\mathbf{u}\cdot\nabla\mathbf{u})$. Then, due to the
boundedness of projection $P_q$ in $\mathbf{L}^q(\Omega)$, $\mathbf{g}\in
L^r(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))$. Assume, moreover, that
$\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)$. Now, we are in a position that we can
apply Lemma \ref{L5.1} and deduce that the linear Stokes problem
(\ref{5.1}), (\ref{1.2})--(\ref{1.4}) has a unique solution
$(\mathbf{U},\pi)\in \bigl[ W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\,
\mathbf{W}^{2,q}(\Omega)) \bigr] \times L^r(0,T;\, W^{1,q}(\Omega)/{\mathbb R})$,
satisfying estimate (\ref{5.2}) with $\mathbf{U}$ instead of $\mathbf{u}$. In
order to show that the weak solution $\mathbf{u}$ of the nonlinear
Navier--Stokes problem (\ref{1.1})--(\ref{1.4}) satisfies the same
estimate, too, we need to identify $\mathbf{u}$ with $\mathbf{U}$.
\noindent
{\bf 5.3. The identification of $\mathbf{U}$ and $\mathbf{u}$.} \ It is not
obvious at the first sight that $\mathbf{U}=\mathbf{u}$, because while $\mathbf{U}$
is a unique solution of the problem (\ref{5.1}),
(\ref{1.2})--(\ref{1.4}) in the class $W^{1,r}(0,T;\,
\bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\, \mathbf{W}^{2,q}(\Omega))$, $\mathbf{u}$ is only
known to be in $L^{\infty}(0,T;\ \bfL_{\tau,\sigma}^2(\Omega)) \cap L^2(0,T;\
\bfW_{\tau}s^{1,2}(\Omega))$. Nevertheless, applying the so called Yosida
approximation of the identity operator in $\bfL_{\tau,\sigma}^q(\Omega)$, defined
by the formula $J^{(k)}_q:=(I+k^{-1}A_q)^{-1}$ (for $k\in{\mathbb N}$), in
the same spirit as in \cite{GiSo} or \cite{SoWa}, the equality
$\mathbf{U}=\mathbf{u}$ can be established. We explain the main steps of the
procedure in greater detail in the rest of this subsection.
At first, one can deduce from \cite[Section 3]{AmEsGh} that the
spectrum of $A_q$ is a subset of the interval $(0,\infty)$ on the
real axis, which implies that $J^{(k)}_q$ is a bounded operator on
$\bfL_{\tau,\sigma}^q(\Omega)$ with values in $D(A_q)$. Obviously, $J^{(k)}_q$
commutes with $A_q$ and with $J^{(m)}_q$ (for $k,m\in{\mathbb N}$,
$k\not=m$) and $J^{(k)}_q=J^{(k)}_s$ on $\bfL_{\tau,\sigma}^q(\Omega)\cap
\bfL_{\tau,\sigma}^s(\Omega)$ (for $1<s<\infty$). If $q=2$ then $A_2$ is a
positive selfadjoint operator in $\bfL_{\tau,\sigma}^2$, see \cite{BdV}.
Consequently, $J^{(k)}_2$ is a selfadjoint operator in
$\bfL_{\tau,\sigma}^2(\Omega)$, too. Finally, it is proven in \cite[p.~246]{Yo}
that $J^{(k)}_q\mathbf{v}\to\mathbf{v}$ strongly in $\bfL_{\tau,\sigma}^q(\Omega)$ for all
$\mathbf{v}\in\bfL_{\tau,\sigma}^q(\Omega)$ and $k\to\infty$.
Consider (\ref{3.1}) with $\mbox{\boldmath $\phi$}(\mathbf{x},t)=[J^{(k)}_q\mathbf{w}](\mathbf{x})\,
\vartheta(t)$, where $k\in{\mathbb N}$, $\mathbf{w}\in\bfC^{\infty}_{0,\sigma}(\Omega)$ and
$\vartheta\in C^{\infty}_0\bigl([0,T)\bigr)$. In this case,
(\ref{3.1}) yields
\begin{align}
\int_0^T \int_{\Omega}\bigl[-\mathbf{u} & \cdot J^{(k)}_q\mathbf{w}\,
\vartheta'+ (\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}\, \vartheta+
2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s\bigr]\, \vartheta\;
\mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\ \noalign{\vskip-4pt}
& \hspace{14pt} +\int_0^T\int_{\partial\Omega}\gamma\hbox to 0.7pt{}\mathbf{u}\cdot
J^{(k)}_q\mathbf{w}\, \vartheta\; \mathrm{d} S\, \mathrm{d} t \nonumber \\
\noalign{\vskip 2pt}
&=\ \int_0^T\int_{\Omega}\mathbf{f}\cdot J^{(k)}_q\mathbf{w}\, \vartheta\;
\mathrm{d}\mathbf{x}\, \mathrm{d} t+ \int_{\Omega}\mathbf{u}_0\cdot J^{(k)}_q\mathbf{w}\,
\vartheta(0)\, \mathrm{d}\mathbf{x}. \label{5.3}
\end{align}
The integral of $(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}$ in
$\Omega$ can be rewritten as follows:
\begin{align*}
\int_{\Omega}(\mathbf{u} & \cdot\nabla\mathbf{u})\cdot J^{(k)}_q\mathbf{w}\;
\mathrm{d}\mathbf{x} = \int_{\Omega}P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot
J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x} = \int_{\Omega}
P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_2\mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \lim_{m\to\infty}\ \int_{\Omega}J^{(m)}_q
P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot J^{(k)}_2\mathbf{w}\; \mathrm{d}\mathbf{x} =
\lim_{m\to\infty}\ \int_{\Omega}J^{(k)}_2J^{(m)}_q
P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \lim_{m\to\infty}\ \int_{\Omega}J^{(k)}_qJ^{(m)}_q
P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\; \mathrm{d}\mathbf{x} = \lim_{m\to\infty}\
\int_{\Omega}J^{(m)}_qJ^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot
\mathbf{w}\; \mathrm{d}\mathbf{x} \\
&= \int_{\Omega} J^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot \mathbf{w}\;
\mathrm{d}\mathbf{x}.
\end{align*}
This shows, except others, that the integrals of $\mathbf{v}_1\cdot
J^{(k)}_q\mathbf{v}_2$ and $J^{(k)}_q\mathbf{v}_1\cdot\mathbf{v}_2$ in $\Omega$ are
equal for $\mathbf{v}_1,\, \mathbf{v}_2\in\bfL_{\tau,\sigma}^q(\Omega)$. The integrals of
$2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s$ and
$\gamma\hbox to 0.7pt{}\mathbf{u}\cdot J^{(k)}_q\mathbf{w}$ over $\Omega$ and
$\partial\Omega$, respectively, can be modified by means of the
identities:
\begin{align*}
\int_{\Omega} & 2\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s:(\nabla J^{(k)}_q\mathbf{w})_s\;
\mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}
S \\
&=\ \int_{\Omega}2\nu\hbox to 0.7pt{}\nabla\mathbf{u} : (\nabla J^{(k)}_q\mathbf{w})_s\;
\mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}
S \\
&=\ \int_{\partial\Omega}2\nu\hbox to 0.7pt{}\mathbf{u}\cdot[(\nabla
J^{(k)}_q\mathbf{w})_s\cdot\mathbf{n}]\; \mathrm{d} S-\int_{\Omega}\nu\hbox to 0.7pt{}\mathbf{u}\cdot
\Delta J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}+\int_{\partial\Omega}\gamma
\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d} S \\
&=\ -\int_{\Omega}\nu\hbox to 0.7pt{}\mathbf{u}\cdot \Delta J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}\
=\ \int_{\Omega}\mathbf{u}\cdot A_q J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x}\ =\
\int_{\Omega}A_q\mathbf{u}\cdot J^{(k)}_q\mathbf{w}\; \mathrm{d}\mathbf{x} \\
&=\ -\int_{\Omega}J^{(k)} A_q\mathbf{u}\cdot\mathbf{w}\; \mathrm{d}\mathbf{x}\ =\
-\int_{\Omega}A_q J^{(k)}\mathbf{u}\cdot\mathbf{w}\; \mathrm{d}\mathbf{x}.
\end{align*}
Thus, we obtain from (\ref{5.3}):
\begin{align*}
\int_0^T \int_{\Omega}\bigl[-J^{(k)}_q\mathbf{u} & \cdot\mathbf{w}\,
\vartheta'+J^{(k)}_q P_q(\mathbf{u}\cdot\nabla\mathbf{u})\cdot\mathbf{w}\,
\vartheta-\nu A_q J^{(k)}_q\mathbf{u}\cdot\mathbf{w} \bigr]\, \vartheta\;
\mathrm{d}\mathbf{x}\, \mathrm{d} t \nonumber \\ \noalign{\vskip 0pt}
&=\ \int_0^T\int_{\Omega} J^{(k)}_q\mathbf{f}\cdot\mathbf{w}\, \vartheta\;
\mathrm{d}\mathbf{x}\, \mathrm{d} t+\int_{\Omega} J^{(k)}_q\mathbf{u}_0\cdot\mathbf{w}\,
\vartheta(0)\, \mathrm{d}\mathbf{x}.
\end{align*}
As $\mathbf{w}$ and $\vartheta$ are arbitrary functions from
$\bfC^{\infty}_{0,\sigma}(\Omega)$ and $C^{\infty}_0\bigl([0,T)\bigr)$, respectively,
this shows that $J^{(k)}_q\mathbf{u}$ is a solution of the
initial--value problem
\begin{equation}
(J^{(k)}_q\mathbf{u})'+A_q J^{(k)}_q\mathbf{u}\ =\ J^{(k)}_q\mathbf{g}, \qquad
J^{(k)}_q\mathbf{u}(\, .\, ,0)=J^{(k)}_q\mathbf{u}_0 \label{5.4}
\end{equation}
(which is a problem in $\bfL_{\tau,\sigma}^q(\Omega)$) in the class
$W^{1,r}(0,T;\, \bfL_{\tau,\sigma}^q(\Omega))\cap L^r(0,T;\,
\mathbf{W}^{2,q}(\Omega))$. Since $J^{(k)}_q\mathbf{U}$ solves the same
problem and belongs to the same class, we obtain the identity
$J^{(k)}_q\mathbf{U}(t)=J^{(k)}_q\mathbf{u}(t)$ for a.a.~$t\in(0,T)$.
Consequently, $\mathbf{U}(t)=\mathbf{u}(t)$ for a.a.~$t\in(0,T)$.
\noindent
{\bf 5.4. The estimate of $\mathbf{u}$ and an associated pressure $p$.}
\ Since $\mathbf{g}=P_q\mathbf{f}-P_q(\mathbf{u}\cdot\nabla\mathbf{u})$, we can also write
equation (\ref{5.1}) in the form
\begin{align*}
\partial_t\mathbf{u}+\mathbf{u}\cdot\nabla\mathbf{u}\ &=\ -\nabla\pi+\nu\Delta\mathbf{u}+
\mathbf{f}+(I-P_q)(-\mathbf{f}+\mathbf{u}\cdot\nabla\mathbf{u}) \\
&=\ -\nabla(\pi+\zeta)+\nu\Delta\mathbf{u}+\mathbf{f},
\end{align*}
where $\nabla\zeta=(I-P_q)(\mathbf{u}\cdot\nabla\mathbf{u}-\mathbf{f})$. (The fact
that $(I-P_q)(\mathbf{u}\cdot\nabla\mathbf{u}-\mathbf{f})$ can be expressed in the
form $\nabla\zeta$ follows e.g.~from \cite[section III.1]{Ga1}.)
We observe that $p:=\pi+\zeta$ is a pressure, associated with the
weak solution $\mathbf{u}$. Since the pair $(\mathbf{U},\pi)$ satisfies
(\ref{5.2}), $\mathbf{u}$ and $p$ satisfy the analogous estimate
\begin{align}
\int_0^T & \|\partial_t\mathbf{u}\|_q^r\; \mathrm{d} t+\int_0^T
\|\mathbf{u}\|_{2,q}^r\; \mathrm{d} t+\int_0^T\|p\|_{1,q}^r\; \mathrm{d} t
\nonumber \\
&\leq\ C\int_0^T\bigl( \|\mathbf{f}\|_q^r+\| P_q(\mathbf{u}\cdot
\nabla\mathbf{u})\|_q^r \bigr)\; \mathrm{d} t+C\,
\|\mathbf{u}_0\|_{\mathbf{E}_r^q(\Omega)}^r. \label{5.5}
\end{align}
We have proven the theorem:
\begin{theorem} \label{T5.1}
Let $\Omega$ be a bounded domain in ${\mathbb R}^3$ with the boundary of
the class $C^2$ and $T>0$. Let $1<q<\frac{3}{2}$, $1<r<2$,
$2/r+3/q=4$, $\mathbf{u}_0\in\mathbf{E}_r^q(\Omega)\cap \bfL_{\tau,\sigma}^2(\Omega)$ and
$\mathbf{f}\in L^r(0,T;\, \mathbf{L}^q(\Omega))\cap L^2(0,T;\,
\mathbf{L}^2(\Omega))$. Let $\mathbf{u}$ be a weak solution to the
Navier-Stokes IBVP (\ref{1.1})--(\ref{1.4}) and $p$ be an
associated pressure. Then $\mathbf{u}\in L^r(0,T;\ \mathbf{W}^{2,q}(\Omega))
\cap W^{1,r}(0,T;\, \mathbf{L}^q(\Omega))$ and $p$ can be identified
with a function from $L^r(0,T;\, L^{3q/(3-q)}(\Omega))$. The
functions $\mathbf{u}$, $p$ satisfy equations (\ref{1.1}), (\ref{1.2})
a.e.~in $Q_T$ and the boundary conditions (\ref{1.3}) a.e.~in
$\Gamma_T$. Moreover, they also satisfy estimate (\ref{5.5}).
\end{theorem}
\section{An interior regularity of the associated pressure} \label{S6}
{\bf 6.1. On previous results on the interior regularity of
velocity and pressure.} \ The next lemma recalls the well known
Serrin's result on the interior regularity of weak solutions to
the system (\ref{1.1}), (\ref{1.2}). (See e.g.~\cite{Oh},
\cite{Se} or \cite{Ga2}.) It concerns weak solutions in
$\Omega_1\times(t_1,t_2)$, where $\Omega_1$ is a sub-domain of
$\Omega$, independently of boundary conditions on $\\Gamma_T$.
\begin{lemma} \label{L6.1}
Let $\Omega_1$ be a sub-domain of $\Omega$, $0\leq t_1<t_2\leq T$
and let $\mathbf{u}$ be a weak solution to the system (\ref{1.1}),
(\ref{1.2}) with $\mathbf{f}=\mathbf{z}ero$ in $\Omega_1\times(t_1,t_2)$. Let
$\mathbf{u}\in L^r(t_1,t_2;\, \mathbf{L}^s(\Omega_1))$, where
$r\in[2,\infty)$, $s\in(3,\infty]$ and $2/r+3/s=1$. Then, if
$\Omega_2\subset\subset\Omega_1$ and $0<2\epsilon<t_2-t_1$,
solution $\mathbf{u}$ has all spatial derivatives (of all orders)
bounded in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$.
\end{lemma}
Note that Lemma \ref{L6.1} uses no assumptions on boundary
conditions, satisfied by $\mathbf{u}$ on $\partial\Omega\times(0,T)$.
The assumption that $\mathbf{u}$ is a weak solution to the system
(\ref{1.1}), (\ref{1.2}) in $\Omega_1\times(t_1,t_2)$ means that
$\mathbf{u}\in L^{\infty}(t_1,t_2;\, \mathbf{L}^{\infty}(\Omega_1))\cap
L^2(t_1,t_2;\, \mathbf{W}^{1,2}(\Omega_1))$, $\mathrm{div}\,\mathbf{u}=0$ holds in the
sense of distributions in $\Omega_1\times(t_1,t_2)$ and $\mathbf{u}$
satisfies (\ref{3.1}) for all infinitely differentiable
divergence--free test functions $\mbox{\boldmath $\phi$}$ that have a compact
support in $\Omega_1\times(t_1,t_2)$. (Then the last integral on
the left hand side and both integrals on the right hand side are
equal to zero.) Also note that applying the results of
\cite{Sereg}, one can add to the conclusions of Lemma \ref{L6.1}
that $\mathbf{u}$ is H\"older--continuous in
$\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$. Lemma \ref{L6.1}
provides no information on the associated pressure $p$ or the time
derivative $\partial_t\mathbf{u}$ in $\Omega_2\times(t_1+\epsilon,t_2-
\epsilon)$. The known results on the regularity of $\mathbf{u}$ and
$\partial_t$ in $\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$, under
the assumptions that $\mathbf{u}$ is a weak solution of (\ref{1.1}),
(\ref{1.2}) in $\Omega\times(t_1,t_2)$ satisfying the conditions
formulated in Lemma \ref{6.1} in $\Omega_1\times(t_1,t_2)$, say:
\begin{list}{}
{\setlength{\topsep 4pt}
\setlength{\itemsep 2pt}
\setlength{\leftmargin 17pt}
\setlength{\rightmargin 0pt}
\setlength{\labelwidth 8pt}}
\item[a)]
If $\Omega={\mathbb R}^3$ then $p$, $\partial_t\mathbf{u}$ and all their spatial
derivatives (of all orders) are in $L^{\infty}(\Omega_2
\times(t_1+\epsilon,t_2-\epsilon)$, see \cite{Ne1}, \cite{Ne2}
\cite{SkaKu}.
\item[b)]
If $\Omega$ is a bounded or exterior domain ${\mathbb R}^3$ with the
boundary of the class $C^{2+(h)}$ for some $h>0$ and $\mathbf{u}$
satisfies the no--slip boundary condition $\mathbf{u}=\mathbf{z}ero$ on
$\partial\Omega\times(0,T)$ then $p$ and $\partial_t\mathbf{u}$ have all
spatial derivatives (of all orders) in
$L^q(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_2))$ for any
$q\in(1,2)$, see \cite{NePe1}, \cite{Ne1}, \cite{Ne2} or
\cite{SkaKu}.
\item[c)]
If $\Omega$ is a bounded domain ${\mathbb R}^3$ with the boundary of the
class $C^{2+(h)}$ for some $h>0$ and $\mathbf{u}$ satisfies the
Navier--type boundary conditions
\begin{displaymath}
\mathbf{u}\cdot\mathbf{n}=0, \qquad \mathbf{curl}\, \, \mathbf{u}\times\mathbf{n}=\mathbf{z}ero \qquad
\mbox{on}\ \partial\Omega\times(t_1,t_2)
\end{displaymath}
then $p$ and $\partial_t\mathbf{u}$ have the same regularity in
$\Omega_2\times(t_1+\epsilon,t_2-\epsilon)$ as stated in item a),
see \cite{NeAlB}.
\end{list}
\noindent
In the proofs, it is always sufficient to show that the
aforementioned statements hold for $p$. The same statements on
$\partial_t\mathbf{u}$ follow from the fact that $\nabla p$ and
$\partial_t\mathbf{u}$ are interconnected through the Navier--Stokes
equation (\ref{1.1}).
\noindent
{\bf 6.2. An interior regularity of $p$ in case of Navier's
boundary conditions.} \ We further assume that $\Omega$ and $T$
are as in Theorem \ref{T5.1} and $\mathbf{f}=\mathbf{z}ero$. The main result
of this section says:
\begin{theorem} \label{T6.1}
Let $\Omega$ and $T$ be as in Theorem \ref{T5.1} and
$\mathbf{f}=\mathbf{z}ero$. Let $\mathbf{u}$ be a weak solution to the problem
(\ref{1.1})--(\ref{1.4}). Let $\Omega_1$ be a sub-domain of
$\Omega$, $0<t_1<t_2\leq T$ and let $\mathbf{u}\in L^r(t_1,t_2;\,
\mathbf{L}^s(\Omega_1))$, where $r\in[2,\infty)$, $s\in(3,\infty]$ and
$2/r+3/s=1$. Finally, let $\Omega_3\subset\subset\Omega_1$ and
$0<\epsilon<t_2-t_1$. Then $p$ can be chosen so that all its
spatial derivatives (of all orders) are in
$L^4(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_3))$.
Similarly, $\partial_t\mathbf{u}$ and all its spatial derivatives (of
all orders) are in $L^4(t_1+\epsilon,t_2-\epsilon;\,
\mathbf{L}^{\infty}(\Omega_3))$.
\end{theorem}
\begin{proof}
There exists $t_*\in(0,t_1)$ such that $\mathbf{u}(\, .\,
,t_*)\in\bfW_{\tau}s^{1,2}(\Omega)\subset\mathbf{E}_r^q(\Omega)$ for all $r$ and
$q$, considered in Theorem \ref{T5.1}. Hence $\mathbf{u}\in L^r(t_*,T;\
\mathbf{W}^{2,q}(\Omega))\cap W^{1,r}(t_*,T;\, \mathbf{L}^q(\Omega))$ and $p$
can be chosen so that $p\in L^r(t_*,T;\, L^{3q/(3-q)}(\Omega))$.
Let $\epsilon$ and $\Omega_2$ be the number and domain,
respectively, given by Lemma \ref{L6.1}. We may assume that
$\Omega_2$ and $\Omega_3$ are chosen so that
$\emptyset\not=\Omega_3\subset\subset\Omega_2\subset\subset\Omega$.
Applying the operator of divergence to equation (\ref{1.1}), we
obtain the equation
\begin{equation}
\Delta p\ =\ -\nabla\mathbf{u}:(\nabla\mathbf{u})^T, \label{6.1}
\end{equation}
which holds in the sense of of distributions in $Q_T$. Taking into
account that $p$ is at least locally integrable in
$\Omega_1\times(t_1,t_2)$, we obtain from (\ref{6.1}) that
\begin{displaymath}
\int_{t_1}^{t_2}\theta(t)\int_{\Omega_1} \bigl[ p\,
\Delta\varphi(\mathbf{x})+\nabla\mathbf{u}:(\nabla\mathbf{u})^T\,
\varphi(\mathbf{x})\bigr]\; \mathrm{d}\mathbf{x}\, \mathrm{d} t\ =\ 0
\end{displaymath}
for all $\theta\in C^{\infty}_0((t_1,t_2))$ and $\varphi\in
C^{\infty}_0(\Omega_1)$. From this, we deduce that equation
(\ref{6.1}) holds in $\Omega_1$ in the sense of distributions at
a.a.~fixed time instants $t\in(t_1+\epsilon,t_2-\epsilon)$. Let
further $t$ be one of these time instants and let $t$ be also
chosen so that $\mathbf{u}(\, .\, ,t)\in\mathbf{W}^{2,q}(\Omega)$,
$\partial_t\mathbf{u}(\, .\, ,t)\in\mathbf{L}^q(\Omega)$ and $p(\, .\, ,t)\in
L^{3q/(3-q)}(\Omega)$. As $p(\, .\, ,t)\in L^1_{loc}(\Omega_1)$
and the right hand side of (\ref{6.1}) (at the fixed time $t$) is
infinitely differentiable in the spatial variable in $\Omega_2$,
the function $p(\, .\, ,t)$ is also infinitely differentiable in
$\Omega_2$, see e.g.~\cite{FraFio}.
Let $\mathbf{x}_0\in\Omega_3$ and $0<\rho_1<\rho_2$ be so small that
$B_{\rho_2}(\mathbf{x}_0)\subset\Omega_2$. Define an infinitely
differentiable non-increasing cut--off function $\eta$ in
$[0,\infty)$ by the formula
\begin{displaymath}
\eta(\sigma)\ \left\{ \begin{array}{ll} =1 & \mbox{for}\
0\leq\sigma\leq\rho_1, \\ [1pt] \in(0,1) & \mbox{for}\
\rho_1<\sigma<\rho_2, \\ [1pt] =0 & \mbox{for}\ \rho_2\leq\sigma.
\end{array} \right.
\end{displaymath}
Let $\mathbf{x}\in B_{\rho_1}(\mathbf{x}_0)$ and $\mathbf{e}$ be a constant unit
vector in ${\mathbb R}^3$. Then
\begin{align*}
\nabla_{\!\mathbf{x}}\hbox to 0.7pt{} p(\mathbf{x},t)\cdot\mathbf{e}\ &=\
\eta\bigl(|\mathbf{x}-\mathbf{x}_0|\bigr)\, \nabla_{\!\mathbf{x}}\hbox to 0.7pt{}
p(\mathbf{x},t)\cdot\mathbf{e} \\
&=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3} \frac{1}{|\mathbf{y}-\mathbf{x}|}\
\Delta_{\mathbf{y}}\bigl[ \eta\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)\,
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}.
\end{align*}
Particularly, this also holds for $\mathbf{x}=\mathbf{x}_0$:
\begin{align}
\nabla_{\!\mathbf{x}}\hbox to 0.7pt{} p(\mathbf{x},t)\cdot\mathbf{e}\,
\bigl|_{\mathbf{x}=\mathbf{x}_0}\bigr.\ &=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3}
\frac{1}{|\mathbf{y}-\mathbf{x}_0|}\ \Delta_{\mathbf{y}}\bigl[
\eta\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)\, \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ -\frac{1}{4\pi}\int_{{\mathbb R}^3} \frac{1}{|\mathbf{y}|}\
\Delta_{\mathbf{y}}\bigl[ \eta\bigl(|\mathbf{y}|\bigr)\, \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ -\frac{1}{4\pi}\, \bigl[
P^{(1)}(\mathbf{x}_0)+2P^{(2)}(\mathbf{x}_0)+P^{(3)}(\mathbf{x}_0) \bigr],
\label{6.3}
\end{align}
where
\begin{align*}
P^{(1)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{z}ero)} \frac{1}{|\mathbf{y}|}\
\Delta_{\mathbf{y}}\eta\bigl(|\mathbf{y}|\bigr)\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}, \\
P^{(2)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{z}ero)} \frac{1}{|\mathbf{y}|}\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\eta\bigl(|\mathbf{y}|\bigr)\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
\bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\;
\mathrm{d}\mathbf{y}, \\
P^{(3)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{z}ero)}
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\ \Delta_{\mathbf{y}}\hbox to 0.7pt{}\bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\; \mathrm{d}\mathbf{y}.
\end{align*}
\noindent
{\it The estimate of $P^{(3)}(\mathbf{x}_0)$.} \ The estimate of the
last term is easy:
\begin{align}
\bigl| P^{(3)}(\mathbf{x}_0) \bigr|\ &=\ \biggl|
\int_{B_{\rho_2}(\mathbf{z}ero)} \Bigl( \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e}
\Bigr)\, \Delta_{\mathbf{y}} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr|
\nonumber \\
&=\ \biggl| \int_{B_{\rho_2}(\mathbf{z}ero)} \Bigl( \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e} \Bigr)\, \bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t):\bigl(
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+
\mathbf{y},t)\bigr)^T\bigr]\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\, \int_{B_{\rho_2}(\mathbf{z}ero)} \Bigl| \nabla_{\!\mathbf{y}}\,
\frac{\eta\bigl(|\mathbf{y}|\bigr)}{|\mathbf{y}|}\cdot\mathbf{e} \Bigr|\; \mathrm{d}\mathbf{y}\
\leq\ c. \label{6.4}
\end{align}
\noindent
{\it The estimate of $P^{(2)}(\mathbf{x}_0)$.} \ We can write
\begin{displaymath}
\frac{1}{|\mathbf{y}|}\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\eta\bigl(|\mathbf{y}|\bigr)\ =\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr),
\end{displaymath}
where ${\cal F}(s):=-\int_s^{\infty} \eta'(\sigma)/\sigma\; \mathrm{d}\sigma$
for $s\geq 0$. We observe that ${\cal F}$ is constant on $[0,\rho_1]$,
equal to zero on $[\rho_2,\infty)$ and ${\cal F}'(s)=\eta'(s)/s$ for
$s>0$. Thus, we have
\begin{align}
\bigl| P^{(2)}(\mathbf{x}_0) \bigr|\ &=\ \biggl|
\int_{B_{\rho_2}(\mathbf{z}ero)} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
{\cal F}\bigl(|\mathbf{y}|\bigr)\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{} \bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e} \bigr]\;
\mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2}(\mathbf{z}ero)}
\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr|. \label{6.5}
\end{align}
The vector function $\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}$
can be written in the form
\begin{equation}
\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}\ =\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y})+\mathbf{w}(\mathbf{y}), \label{6.6}
\end{equation}
where
\begin{displaymath}
\varphi(\mathbf{y})= \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr) \cdot\mathbf{e},
\qquad \mathbf{w}(\mathbf{y})=\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}-
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr)
\cdot\mathbf{e}\bigr].
\end{displaymath}
The functions $\varphi$ and $\mathbf{w}$ are infinitely differentiable
in ${\mathbb R}^3$ and $\varphi=0$, $\mathbf{w}=\mathbf{z}ero$ in ${\mathbb R}^3\smallsetminus
B_{\rho_2}(\mathbf{z}ero)$. Since
\begin{displaymath}
\mathrm{div}\,\mathbf{w}\ =\ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\Delta_{\mathbf{y}}
{\cal F}\bigl(|\mathbf{y}|\bigr)\cdot\mathbf{e}-\Delta_{\mathbf{y}}\hbox to 0.7pt{}\bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}{\cal F}\bigl(|\mathbf{y}|\bigr) \cdot\mathbf{e}\bigr]\ =\ 0,
\end{displaymath}
(\ref{6.6}) in fact represents the Helmholtz decomposition of
$\Delta_{\mathbf{y}}{\cal F}\bigl(|\mathbf{y}|\bigr)\, \mathbf{e}$ in
$B_{\rho_2}(\mathbf{z}ero)$. Substituting from (\ref{6.6}) to
(\ref{6.5}), we obtain
\begin{align}
\bigl| P^{(2)}(\mathbf{x}_0) \bigr|\ &=\ \biggl| \int_{B_{\rho_2}
(\mathbf{z}ero)} \bigl[\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y})+\mathbf{w}(\mathbf{y})\bigr]
\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr|
\nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{z}ero)}
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\varphi(\mathbf{y}) \cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{z}ero)} \varphi(\mathbf{y})\,
\Delta_{\mathbf{y}}p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ \biggl| \int_{B_{\rho_2} (\mathbf{z}ero)} \varphi(\mathbf{y})\, \bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t):
\bigl(\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\mathbf{u}(\mathbf{x}_0+\mathbf{y},t)\bigr)^T \bigr]\;
\mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ \int_{B_{\rho_2} (\mathbf{z}ero)} |\varphi(\mathbf{y})|\; \mathrm{d}\mathbf{y} \
\leq\ c. \label{6.11}
\end{align}
\noindent
{\it The estimate of $P^{(1)}(\mathbf{x}_0)$.} \ Finally, we have
\begin{align}
P^{(1)}(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{z}ero)} \frac{1}{|\mathbf{y}|}\,
\nabla_{\!\mathbf{y}}\eta\bigl(|\mathbf{y}|\bigr)\cdot \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
\bigl[\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e}\bigr]\; \mathrm{d}\mathbf{y}
\nonumber \\
& \hspace{21pt} -\int_{B_{\rho_2}(\mathbf{z}ero)}
\Bigl[\frac{\mathbf{y}}{|\mathbf{y}|^3}\cdot\nabla_{\!\mathbf{y}}\eta
\bigl(|\mathbf{y}|\bigr) \Bigr]\, \bigl[ \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}
p(\mathbf{x}_0+\mathbf{y},t)\cdot\mathbf{e}\bigr]\; \mathrm{d}\mathbf{y}. \label{6.12}
\end{align}
The first integral coincides with the integral in the formula for
$P^{(2)}(\mathbf{x}_0)$ and it can be therefore treated in the same way.
The second integral on the right hand side of (\ref{6.12}) - let
us denote it by $P^{(1)}_2(\mathbf{x}_0)$ - represents the main
obstacle, which finally causes that $p$ and all its spatial
derivatives are only in $L^4(t_1+\epsilon,t_2-\epsilon;\,
L^{\infty}(\Omega_3))$ and not in
$L^{\infty}(t_1+\epsilon,t_2-\epsilon;\, L^{\infty}(\Omega_3))$,
as in the cases from items a) and c) in subsection 6.1. The
integral can be written in the form
\begin{align}
P^{(1)}_2(\mathbf{x}_0)\ &=\ \int_{B_{\rho_2}(\mathbf{z}ero)}
\frac{\eta'\bigl(|\mathbf{y}|\bigr)} {|\mathbf{y}|^2}\,
\mathbf{e}\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{x}_0+\mathbf{y},t)\; \mathrm{d}\mathbf{y} \nonumber
\\
&=\ \int_{\Omega} \frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)}
{|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\cdot\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\;
\mathrm{d}\mathbf{y}. \label{6.13}
\end{align}
Now, we use the Helmholtz decomposition
\begin{equation}
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\ =\
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})+\mathbf{z}(\mathbf{y}), \label{6.14}
\end{equation}
in the whole domain $\Omega$, where
\begin{align*}
\Delta_{\mathbf{y}}\psi(\mathbf{y}) &=
\mathrm{div}\,\Bigl(\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)}
{|\mathbf{y}-\mathbf{x}_0|^2}\, \mathbf{e}\Bigr) = \Bigl(
\frac{\eta''\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^3}-
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr) }{|\mathbf{y}-\mathbf{x}_0|^4} \Bigr)\,
(\mathbf{y}-\mathbf{x}_0)\cdot\mathbf{e} && \mbox{for}\ \mathbf{y}\in\Omega, \\
\frac{\partial\psi}{\partial\mathbf{n}}(\mathbf{y})\ &=\ 0 && \mbox{for}\
\mathbf{y}\in \partial\Omega.
\end{align*}
As $\mathbf{z}$ is divergence--free and its normal component on
$\partial\Omega$ is zero, and the integral of
$\nabla\psi\cdot\partial_t\mathbf{u}$ is zero, we get
\begin{align}
P^{(1)}_2(\mathbf{x}_0)\ &=\ \int_{\Omega} \bigl[
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})+\mathbf{z}(\mathbf{y}) \bigr]\cdot
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\; \mathrm{d}\mathbf{y}\ =\ \int_{\Omega}
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{} p(\mathbf{y},t)\; \mathrm{d}\mathbf{y} \nonumber \\
&=\ \int_{\Omega}
\nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot\bigl[\partial_t\mathbf{u}+\mathbf{u}\cdot
\nabla\mathbf{u}-\nu\Delta\mathbf{u}\bigr](\mathbf{y},t)\; \mathrm{d}\mathbf{y}
\nonumber \\
&=\ \int_{\Omega} \nabla_{\!\mathbf{y}}\hbox to 0.7pt{}\psi(\mathbf{y})\cdot\bigl[\mathbf{u}\cdot
\nabla\mathbf{u}-\nu\Delta\mathbf{u}\bigr](\mathbf{y},t)\; \mathrm{d}\mathbf{y}
\label{6.15}
\end{align}
We have
\begin{align}
\biggl| \int_{\Omega} & \nabla_{\!\mathbf{y}}\psi\cdot(\mathbf{u}\cdot
\nabla\mathbf{u})\; \mathrm{d}\mathbf{y} \biggr|\ =\ \biggl|\int_{\Omega}
\nabla_{\!\mathbf{y}}^2\psi : (\mathbf{u}\otimes\mathbf{u})\; \mathrm{d}\mathbf{y}\biggr|\ \leq\
c\int_{\Omega}|\mathbf{u}|^2\; \mathrm{d}\mathbf{y}\ \leq\ c, \label{6.16} \\
\biggl| \int_{\Omega} & \nabla_{\!\mathbf{y}}\psi\cdot \nu\Delta\mathbf{u}\;
\mathrm{d}\mathbf{y} \biggr|\ =\ \biggl|\int_{\Omega}\nabla_{\!\mathbf{y}}\psi\cdot
\mathrm{div}\,{\mathbb T}d(\mathbf{u})\; \mathrm{d}\mathbf{y}\biggr| \nonumber \\
&=\ \biggl|\int_{\partial\Omega}\nabla_{\!\mathbf{y}}\psi\cdot
[{\mathbb T}d(\mathbf{u})\cdot\mathbf{n}]\; \mathrm{d} S-
\int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi : {\mathbb T}d(\mathbf{u})\; \mathrm{d}\mathbf{y}
\biggr| \nonumber \\
&=\ \biggl|-\int_{\partial\Omega}\nabla_{\!\mathbf{y}}\psi\cdot
\gamma\mathbf{u}\; \mathrm{d} S-\int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi :
\nu\hbox to 0.7pt{}(\nabla\mathbf{u})_s\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl|
\int_{\Omega}\nabla_{\!\mathbf{y}}^2\psi : \nu\hbox to 0.7pt{}\nabla\mathbf{u}\; \mathrm{d}\mathbf{y}
\biggr|\ =\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl|
\int_{\Omega}(\partial_i\partial_j\psi)\, \nu\,
(\partial_ju_i)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\biggl|
\int_{\partial\Omega}(\partial_j\psi)\, n_i\, \nu\,
(\partial_ju_i)\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\nu\,
\biggl|\int_{\partial\Omega}(\partial_j\psi)\,
[\partial_j(n_iu_i)-(\partial_jn_i)\, u_i]\; \mathrm{d}\mathbf{y} \biggr|
\nonumber \\
&=\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S+\nu\,
\biggl|\int_{\partial\Omega}(\partial_j\psi)\, (\partial_jn_i)\,
u_i\; \mathrm{d}\mathbf{y} \biggr| \nonumber \\
&\leq\ c\int_{\partial\Omega}|\mathbf{u}|\; \mathrm{d} S\ \leq\ c\, \biggl(
\int_{\partial\Omega}|\mathbf{u}|^2\; \mathrm{d} S\biggr)^{\! 1/2}\ \leq\ c\,
\bigl( \|\mathbf{u}\|_2+\|\mathbf{u}\|_2^{1/2}\, \|\mathbf{u}\|_{1,2}^{1/2} \bigr)
\nonumber \\
&\leq\ c+c\, \|\mathbf{u}\|_{1,2}^{1/2}. \label{6.17}
\end{align}
The right hand side is in $L^4(t_1+\epsilon,t_2-\epsilon)$. We
have used the estimate
\begin{displaymath}
\bigl| \nabla\psi \bigr|_{1+(h)}\ \leq\ c\, \Bigl| \Bigl(
\frac{\eta''\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr)} {|\mathbf{y}-\mathbf{x}_0|^3}-
\frac{\eta'\bigl(|\mathbf{y}-\mathbf{x}_0|\bigr) }{|\mathbf{y}-\mathbf{x}_0|^4} \Bigr)\,
(\mathbf{y}-\mathbf{x}_0)\cdot\mathbf{e} \Bigr|_{0+(h)}\ \leq\ c,
\end{displaymath}
where $|\, .\, |_{1+(h)}$ and $|\, .\, |_{0+(h)}$ are the norms in
the H\"older spaces $\mathbf{C}^{1+(h)}(\overline{\Omega})$ and
$C^{0+(h)}(\overline{\Omega})$, respectively, see \cite{Na}. The
integral of $|\mathbf{u}|^2$ on $\partial\Omega$ has been estimated by
means of \cite[Theorem II.4.1]{Ga1}.
We have shown that the norm of $\,
\nabla_{\!\mathbf{x}}p(\mathbf{x},t)|_{\mathbf{x}=\mathbf{x}_0}\cdot\mathbf{e}\, $ in
$L^4(t_1+\epsilon,t_2-\epsilon)$ is finite and inde\-pen\-dent of
vector $\mathbf{e}$ and a concrete position of point $\mathbf{x}_0$ in domain
$\Omega_3$. Hence $\nabla p\in L^4(0,T;\,
\mathbf{L}^{\infty}(\Omega_3))$. From this, one can deduce that $p$ can
be chosen so that $p\in L^4(0,T;\, L^{\infty}(\Omega_3))$.
Similarly, dealing with $D^{\alpha}_{\mathbf{x}} p(\mathbf{x},t)$, where
$\alpha\equiv(\alpha_1,\alpha_2,\alpha_3)$ is an arbitrary
multi-index, instead of $p(\mathbf{x},t)$, we show that $D^{\alpha} p\in
L^4(0,T;\, L^{\infty}(\Omega_3)$, too. The proof is completed
\end{proof}
\noindent
{\bf Acknowledgement.} \ The authors have been supported by the
Academy of Sciences of the Czech Republic (RVO 67985840) and by
the Grant Agency of the Czech Republic, grant No.~17-01747S.
\end{document} |
\begin{document}
\title{On plane curves given by separated polynomials and their automorphisms}
\date{}
\author{Matteo Bonini, Maria Montanucci, Giovanni Zini}
\maketitle
\begin{abstract}
Let $\mathcal{C}$ be a plane curve defined over the algebraic closure $K$ of a prime finite field $\mathbb{F}_p$ by a separated polynomial, that is $\mathcal{C}: A(y)=B(x)$, where $A(y)$ is an additive polynomial of degree $p^n$ and the degree $m$ of $B(X)$ is coprime with $p$. Plane curves given by separated polynomials are well-known and studied in the literature. However just few informations are known on their automorphism groups. In this paper we compute the full automorphism group of $\mathcal{C}$ when $m \not\equiv 1 \pmod {p^n}$ and $B(X)$ has just one root in $K$, that is $B(X)=b_m(X+b_{m-1}/mb_m)^m$ for some $b_m,b_{m-1} \in K$. Moreover, some sufficient conditions for the automorphism group of $\mathcal{C}$ to imply that $B(X)=b_m(X+b_{m-1}/mb_m)^m$ are provided. As a byproduct, the full automorphism group of the Norm-Trace curve $\mathcal{C}: x^{(q^r-1)/(q-1)}=y^{q^{r-1}}+y^{q^{r-2}}+\ldots+y$ is computed. Finally, these results are used to construct multi point AG codes with many automorphisms.
\end{abstract}
{\bf Keywords: } Plane curve, separated polynomial, AG code, code automorphisms.
{\bf MSC Code: } 14H05, 14H37, 94B27.
\section{Introduction}
Deep results on automorphism groups of algebraic curves, defined over a field of characteristic zero, have been achieved after the work of Hurwitz who was the first to prove that complex curves, other than the rational and the elliptic ones, can only have a finite number of automorphisms. Afterwards, a proof of Hurwitz's result which is independent from the characteristic of the ground field was provided, increasing the interest of studying curves defined over fields of positive charactestic, as e.g. finite fields. This is especially comprehensible recalling that curves in positive characteristic may happen to have much larger $K$-automorphism group compared to their genus, as the Hurwitz bound $|G| \leq 84(g-1)$ for a $K$-automorphism $G$ of a curve of genus $g \geq 2$ fails whenever $|G|$ is divisible by the characteristic of the ground field. From previous results, see e.g \cite{henn}, we know infinite families of curves $\mathcal{C}$ with $|{\rm Aut}(\mathcal{C})| \sim cg^3$ or with $|{\rm Aut}(\mathcal{C})| \sim cg^2$. Although curves with large automorphism groups may have several different features, they seems to share a common property, namely their $p$-rank is equal to zero.
This common property and this so different situation with respect to the zero characteristic case, raises the problem of constructing and studying curves of $p$-rank zero defined over finite fields with unusual properties that a complex curve cannot have. Artin-Schreier curves and, in particular, Hermitian curves are of this type. A family of such plane curves arises from separated polynomial. It consists of curves $\mathcal{X}: A(Y)-B(X)$ where $p \nmid m$ with $m = \deg B(X) \geq 2$ and $A(Y)$ is any additive separable polynomial. The main known properties of $\mathcal{C}$ are extracted from the local analysis of its unique singular point $P_\infty$; see \cite{Stichorig} and Section \ref{prel}. The exposition describes the genus, the Weierstrass gap sequence at $P_\infty$ and the ramification groups of its translation automorphism group fixing $P_\infty$.
The full $K$-automorphism group of $\mathcal{C}$ fixes $P_\infty$ except in two cases, namely, when $\mathcal{C}$ is the Hermitian curve $Y^{p^n} -Y -X^{p^n}+1=0$ or the curve, $Y^{p^n} + Y -X^m=0$ with $m < p^n$, and $p^n \equiv -1 \pmod m$ but now other informations are known in the literature. For $p > 2$ and $m = 2$, the latter curve is hyperelliptic. Notably for $p > 2$, these hyperelliptic curves and the Hermitian curves are the only curves whose $K$-automorphism groups have order larger than $8g^3$; see \cite{henn}. Deligne-Lusztig curves provide other examples of significant curves over finite fields, namely the DLS curves of Suzuki type and the DLR curves of Ree type. They are characterised by their genera and $K$-automorphism groups. For $p = 2$, the Hermitian curves, the DLS curves, and the hyperelliptic curves $Y^2+Y +X^{2^h}+1=0$ are the only curves with $K$-automorphism groups of order larger than $8g^3$.
In this paper we compute the full automorphism group of $\mathcal{C}$ when $m \not\equiv 1 \pmod {p^n}$ and $B(X)$ has just one root in $K$, that is $B(X)=b_m(X+b_{m-1}/mb_m)^m$ for some $b_m,b_{m-1} \in K$. Moreover, some sufficient conditions for the automorphism group of $\mathcal{C}$ to imply that $B(X)=b_m(X+b_{m-1}/mb_m)^m$ are provided. As a byproduct, the full automorphism group of the Norm-Trace curve $\mathcal{C}: x^{(q^r-1)/(q-1)}=y^{q^{r-1}}+y^{q^{r-2}}+\ldots+y$ is computed.
An important application of curves over finite fields is the construction of certain linear codes, called Algebraic Geometric codes (AG codes for short). The parameters of an AG code constructed from a curve $\mathcal{X}$ strictly depend on the geometry of $\mathcal{X}$, and in particular on two fixed divisors on $\mathcal{X}$.
The Norm-Trace curve was used in the literature to construct one-point or two-point AG codes; see \cite{BR2013,G2003,MTT2008}.
In this paper we construct multi point AG codes on the Norm-Trace curve.
Our construction starts from a divisor on $\mathcal{X}$ which is invariant under the whole automorphism group of the curve; hence, our codes turns out to inherit many automorphisms.
\section{Preliminary results} \label{prel}
\subsection{Curves given by separated polynomials}
Throughout the paper, $\mathcal{C}$ is a plane curve defined over the algebraic closure $K$ of a prime finite field $\mathbb{F}_{p}$ by an equation
\begin{equation}\label{EqSeparated}
A(Y)=B(X),
\end{equation}
satisfying the following conditions:
\begin{enumerate}
\item $\deg(\mathcal{C}) \geq 4$;
\item $A(Y)=a_n Y^{p^n} + a_{n-1} Y^{p^{n-1}}+\ldots+a_0 Y$, $a_j \in K$, $a_0,a_n \ne 0$;
\item $B(X)=b_m X^m + b_{m-1} X^{m-1} + \ldots + b_1 X + b_0$, $b_j \in K$, $b_m \ne 0$;
\item $m \not\equiv 0 \pmod p$;
\item $n \geq 1$, $m \geq 2$.
\end{enumerate}
Note that $2$ occurs if and only if $A(Y+a)=A(Y) + A(a)$ for every $a \in K$, that is, the polynomial $A(Y)$ is additive. The basic properties of $\mathcal{C}$ are collected in the following lemmas; see \cite[Section 12.1]{HKT} and \cite{Stichorig}.
\begin{lemma} \label{lem1}
The curve $\mathcal{C}$ is an irreducible plane curve with at most one singular point.
\begin{itemize}
\item[\rm (i)] If $|m-p^n| =1$, then $\mathcal{C}$ is non-singular.
\item[\rm (ii)] \begin{itemize} \item[\rm (a)] If $m > p^n+1$, then $P_\infty=(0,0,1)$ is an $(m-p^n)$-fold point of $\mathcal{C}$. \item[\rm (b)] If $p^n>m+1$, then $P_\infty=(0,1,0)$ is a $(p^n-m)$-fold point of $\mathcal{C}$. \item[\rm (c)] In both cases, $P_\infty$ is the centre of only one branch of $\mathcal{C}$; also, $P_\infty$ is the unique infinite point of $\mathcal{C}$. \end{itemize}
\item[\rm (iii)] $\mathcal{C}$ has genus $g=\frac{(p^n-1)(m-1)}{2}$;
\item[\rm (iv)] Let $K(x,y)$ with $A(y)=B(x)$ denote the function field of $\mathcal{C}$.
\begin{itemize} \item[\rm (a)] A translation $(x,y) \mapsto (x,y+a)$ preserves $\mathcal{C}$ if and only if $A(a)=0$;
\item[\rm (b)] these translations form an elementary abelian group of order $p^n$, and $Aut_K(K(x,y))$ contains an elementary abelian $p$-group $G$ of order $p^n$ that fixes a unique place $\mathcal{P}_\infty$ centered at $P_\infty$ and acts transitively on the zeros of $x$;
\item[\rm (c)] the sequence of ramification groups of $G$ at $\mathcal{P}_\infty$ is
$$G=G_{\mathcal{P}_\infty}^{(1)}=G_{\mathcal{P}_\infty}^{(2)}=\ldots=G_{\mathcal{P}_\infty}^{(m)}, \quad G_{\mathcal{P}_\infty}^{(m+1)}=\{1\};$$
\item[\rm (d)] $\{\mathcal{P}_\infty\}$ is the unique short orbit of $G$, and
$$div(K(x,y) / K(x,y)^G)=(p^n-1)(m+1) \mathcal{P}_\infty;$$
\item[\rm (e)] $K(x,y)^G$ is rational, and $\mathcal{C}$ has $p$-rank zero.
\end{itemize}
\end{itemize}
\end{lemma}
\begin{lemma} \label{lem2}
Let $M$ be a $K$-automorphism group of $\mathcal{C}$, and let $M_{\mathcal{P}_\infty}=M_{\mathcal{P}_\infty}^{(1)} \rtimes H$ where $p \nmid |H|$. Then
\begin{itemize}
\item[\rm (i)] $|H|$ divides $m(p^n-1)$;
\item[\rm (ii)] $|M_{\mathcal{P}_\infty}^{(1)}| \leq p^n(m-1)^2=\frac{4p^n}{(p^n-1)^2}g^2$;
\item[\rm (iii)] $|M_{\mathcal{P}_\infty}^{(1)}|=p^n$ when $m \not\equiv 1 \pmod {p^n}$, and so $g \not\equiv 0 \pmod {p^n}$;
\item[\rm (iv)] $|M_{\mathcal{P}_\infty}^{(2)}|=p^n$ when $m \equiv 1 \pmod {p^n}$, and so $g \equiv 0 \pmod {p^n}$.
\end{itemize}
\end{lemma}
\begin{lemma} \label{lem3}
The $K$-automorphism group $Aut_K(\mathcal{C})$ fixes the place $\mathcal{P}_\infty$ except in the following two cases.
\begin{enumerate}
\item
\begin{itemize}
\item[\rm (a)] Up to a linear substitution on $X$ and $Y$, $\mathcal{C}$ is the curve $Y^{p^n}+Y = X^m$, with $m<p^n$, $p^n \equiv -1 \pmod m$;
\item[\rm (b)] $Aut_K(\mathcal{C})$ contains a cyclic normal subgroup $C_m$ of order $m$ such that $Aut_K(\mathcal{C}) / C_m \cong PGL(2,p^n)$;
\item[\rm (c)] $C_m$ fixes each of the $p^n+1$ places with the same Weierstrass semigroup as $\mathcal{P}_\infty$;
\item[\rm (d)] $Aut_K(\mathcal{C}) / C_m$ acts on the set of such $p^n+1$ places as $PGL(2,p^n)$.
\end{itemize}
\item \begin{itemize}
\item[\rm (a)] Up to a linear substitution on $X$ and $Y$, $\mathcal{C}$ is the Hermitian curve $\mathcal H_{p^n}:Y^{p^n}+Y = X^{p^n+1}$;
\item[\rm (b)] $Aut_K(\mathcal{C}) \cong PGU(3,p^n)$;
\item[\rm (c)] $Aut_K(\mathcal{C})$ acts on the set of all places with the same Weierstrass semigroup as $\mathcal{P}_\infty$;
\item[\rm (d)] $Aut_K(\mathcal{C})$ acts on the set of such places as $PGU(3,q)$ on the Hermitian unital.
\end{itemize}
\end{enumerate}
\end{lemma}
\subsection{Algebraic Geometric codes}
We introduce in this section some basic notions on AG codes. We refer to \cite{Sti} for a detailed introduction.
Let $\mathcal{X}$ be a curve of genus $g$ over $\mathbb F_q$, $\mathbb F_q(\mathcal{X})$ be the field of $\mathbb F_q$-rational functions on $\mathcal{X}$, $\mathcal{X}(\mathbb F_q)$ be the set of $\mathbb F_q$-rational places of $\mathcal{X}$.
For an $\mathbb F_q$-rational divisor $D=\sum_{P\in\mathcal{X}(\mathbb{F}_q)}n_P P$ on $\mathcal{X}$, denote by
$$ \mathcal{L}(D):=\{f\in\mathbb{F}_q(\mathcal{X})\setminus\{0\}\mid (f)+D\geq0\}\cup\{0\} $$
the Riemann-Roch space associated to $D$, whose dimension over $\mathbb{F}_q$ is denoted by $\ell(D)$.
Consider a divisor $D=P_1+\cdots P_n$ where $P_i\in\mathcal{X}(\mathbb{F}_q)$ and $P_i\ne P_j$ for $i\ne j$, and a second $\mathbb{F}_q$-rational divisor $G$ whose support is disjoint from the support of $D$.
The \emph{functional AG code} $C_{\mathcal{L}}(D,G)$ is defined as the image of the linear evaluation map
$$\begin{array}{llll}
e_D : & \mathcal{L} (G) &\to &\mathbb{F}_q^n\\
& f & \mapsto & e_D(f)=(f(P_1),f(P_2),\ldots, f(P_n))\\
\end{array}.
$$
The code $C_{\mathcal{L}}(D,G)$ has length $n$, dimension $k=\ell(G)-\ell(G-D)$, and minimum distance $d\geq d^*=n-\deg(G)$; $d^*$ is called the \emph{designed minimum distance} (or Goppa minimum distance).
If $n>\deg(G)$, then $e_D$ is injective and $k=\ell(G)$.
If $\deg(G)>2g-2$, then $k=\deg(G)+1-g$.
The \emph{differential code} $C_{\Omega}(D,G)$ is defined as
$$C_{\Omega}(D,G)= \left\{ (res_{P_1}(\omega),res_{P_2}(\omega),\ldots, res_{P_n}(\omega) \mid \omega \in \Omega(G-D)\right\},$$
where $\Omega(G-D)= \{\omega \in \Omega(\mathcal X) \mid (\omega) \geq G-D\} \cup \{0\}.$ The linear code $C_{\Omega}(D,G)$ has dimension $n-\deg(G)+g-1$ and minimum distance at least $\deg(G)-2g+2$.
Now we define the automorphism group of $C_\mathcal{L}(D,G)$; see \cite{GK2,JK2006}.
Let $\mathcal{M}_{n,q}\leq{\rm GL}(n,q)$ be the subgroup of matrices having exactly one non-zero element in each row and column.
For $\gamma\in Aut(\mathbb{F}_q)$ and $M=(m_{i,j})_{i,j}\in{\rm GL}(n,q)$, let $M^\gamma$ be the matrix $(\gamma(m_{i,j}))_{i,j}$.
Let $\mathcal{W}_{n,q}$ be the semidirect product $\mathcal M_{n,q}\rtimes Aut(\mathbb{F}_q)$ with multiplication $M_1\gamma_1\cdot M_2\gamma_2:= M_1M_2^\gamma\cdot\gamma_1\gamma_2$.
The \emph{automorphism group} $Aut(C_\mathcal{L}(D,G))$ of $C_\mathcal{L}(D,G)$ is the subgroup of $\mathcal{W}_{n,q}$ preserving $C_\mathcal{L}(D,G)$, that is,
$$ M\gamma(x_1,\ldots,x_n):=((x_1,\ldots,x_n)\cdot M)^\gamma \in C_\mathcal{L}(D,G) \;\;\textrm{for any}\;\; (x_1,\ldots,x_n)\in C_\mathcal{L}(D,G). $$
Let $Aut_{\mathbb{F}_q}(\mathcal{X})$ be the $\mathbb{F}_q$-automorphism group of $\mathcal{X}$ and
$$ Aut_{\mathbb{F}_q,D,G}(\mathcal{X}):=\{ \sigma\in Aut_{\mathbb{F}_q}(\mathcal{X})\,\mid\, \sigma(D)=D,\,\sigma(G)\approx_D G \}, $$
where $G'\approx_D G$ if and only if there exists $u\in\mathbb{F}_q(\mathcal{X})$ such that $G'-G=(u)$ and $u(P_i)=1$ for $i=1,\ldots,n$; note that $\sigma(G)=G$ implies $\sigma(G)\approx_D G$.
Then the following holds.
\begin{proposition}{\rm (\cite[Proposition 2.3]{BMZ2017})}\label{tivoglioiniettivo}
If any non-trivial element of $Aut_{\mathbb{F}_q}(\mathcal{X})$ fixes less than $n$ $\mathbb{F}_q$-rational places of $\mathcal{X}$, then $Aut(C_{\mathcal{L}}(D,G))$ contains a subgroup isomorphic to
$$ ({\rm Aut}_{\mathbb{F}_q,D,G}(\mathcal{X})\rtimes{\rm Aut}(\mathbb{F}_q))\rtimes \mathbb{F}_q^*. $$
\end{proposition}
In the construction of AG codes, the condition ${\rm supp}(D) \cap {\rm supp}(G)=\emptyset$ can be removed as follows; see \cite[Sec. 3.1.1]{TV}.
Let $P_1,\ldots,P_n$ be distinct $\mathbb{F}_q$-rational places of $\mathcal{X}$ and $D=P_1+\ldots +P_n$, $G=\sum n_P P$ be $\mathbb{F}_q$-rational divisors of $\mathcal{X}$.
For any $i=1,\ldots,n$ let $t_i$ be a local parameter at $P_i$. The map
$$\begin{array}{llll}
e^{\prime}_{D} : & \mathcal{L} (G) &\to &\mathbb{F}_q^n\\
& f & \mapsto & e^\prime_{D}(f)=((t^{n_{P_1}}f)(P_1),(t^{n_{P_2}}f)(P_2),\ldots, (t^{n_{P_n}}f)(P_n))\\
\end{array}
$$
is linear. We define the \emph{extended AG code} $C_{ext}(D,G):=e^{\prime}(\mathcal{L}(G))$.
Note that $e^\prime_D$ is not well-defined since it depends on the choise of the local parameters; yet, different choices yield extended AG codes which are equivalent.
The code $C_{ext}$ is a lengthening of $C_{\mathcal{L}}(\hat D,G)$, where $\hat D = \sum_{P_i\,:\,n_{P_i}=0}P_i$.
The extended code $C_{ext}$ is an $[n,k,d]_q$-code for which the following properties still hold:
\begin{itemize}
\item $d\geq d^*:=n-\deg(G)$.
\item $k=\ell(G)-\ell(G-D)$.
\item If $n>\deg(G)$, then $k=\ell(G)$; if $n>\deg(G)>2g-2$, then $k=\deg(G)+1-g$.
\end{itemize}
\section{On the automorphism group of $\mathcal{C}$}\label{Sec:Aut}
At first we consider the norm-trace curve $\mathcal{N}_{q,r}$ with affine equation
$$ X^{\frac{q^r-1}{q-1}} = Y^{q^{r-1}}+Y^{q^{r-2}}+\cdots+Y, $$
where $q$ is a $p$-power and $r$ is a positive integer.
For $r=2$, this is the $\mathbb F_{q^2}$-maximal Hermitian curve, with automorphism group isomorphic to $PGU(3,q)$.
For $r>2$, we determine the automorphism group of $\mathcal N_{q,r}$.
\begin{theorem}\label{AutNormTrace}
For $r\geq3$, $Aut_K(\mathcal N_{q,r})$ has order $q^{r-1}(q^r-1)$ and is a semidirect product $G\rtimes C$, where
$$ G=\left\{ (x,y)\mapsto(x,y+a)\mid Tr_{q^r\mid q}(a)=0 \right\}, \quad C=\{(x,y)\mapsto(b x,b^{\frac{q^r-1}{q-1}}y)\mid b\in\mathbb F_{q^r}^*\}. $$
\end{theorem}
\begin{proof}
Suppose that $\mathcal N_{q,r}\cong\mathcal{H}_{\bar q}$ for some $p$-power $\bar q$.
From Lemma \ref{lem1} (iii), $g(\mathcal{N}_{q,r})=g(\mathcal{H}_{\bar q})$ reads $\frac{(\frac{q^r-1}{q-1}-1)(q^{r-1}-1)}{2} = \frac{\bar q(\bar q-1)}{2}$. This implies $\bar q=q$ and $r=2$, a contradiction to the assumption on $r$.
Now suppose that $\mathcal{N}_{q,r}$ is isomorphic to the curve $\mathcal{X}:X^s=Y^{\bar q}+Y$ for some $p$-power $\bar q$, with $s<\bar q$, $s\mid(\bar q+1)$.
From Lemma \ref{lem2}(iii), the Sylow $p$-subgroups $Aut_K(\mathcal{N}_{q,r})_{\mathcal P_\infty}^{(1)}$ and $Aut_K(\mathcal{X})_{\mathcal P_\infty}^{(1)}$ of $Aut_K(\mathcal{N}_{q,r})_{\mathcal P_\infty}$ and $Aut_K(\mathcal{X})_{\mathcal P_\infty}$ have order $q^{r-1}$ and $\bar q$, respectively.
From Lemma \ref{lem1}(e) $\mathcal{N}_{q,r}$ and $\mathcal{X}$ have zero $p$-rank. Hence, $Aut_K(\mathcal{N}_{q,r})_{\mathcal P_\infty}^{(1)}$ and $Aut_K(\mathcal{X})_{\mathcal P_\infty}^{(1)}$ are Sylow $p$-subgroups of $Aut_K(\mathcal{N}_{q,r})\cong Aut_K(\mathcal{X})$; see \cite[Lemma 11.129]{HKT}. Therefore $q^{r-1}=\bar q$.
Then $g(\mathcal{N}_{q,r})=g(\mathcal{X})$ yields $s=\frac{q^r-1}{q-1}=\bar q+\cdots+q+1$, a contradiction to $s<\bar q$.
From Lemma \ref{lem3}, this proves that $Aut_K(\mathcal N_{q^r\mid q})$ fixes $\mathcal P_\infty$.
By direct checking $Aut_K(\mathcal{N}_{q,r})$ contains the group $G\rtimes C$ defined in the statement of the theorem.
From Lemma \ref{lem3}, $Aut_K(\mathcal{N}_{q,r})=G\rtimes H$, where $H$ is a cyclic group. From Schur-Zassenhaus theorem, $H$ contains $C$ up to conjugation.
By Lemma \ref{lem1}(e) the quotient curve $\mathcal{N}_{q,r}/G$ is rational, and its function field is $K(x)$.
Hence the automorphism group $\bar H\cong H$ of $\mathcal{N}_{q,r}/G$ induced by $H$ has exactly two fixed places and acts semiregularly elsewhere; see \cite[Hauptsatz 8.27]{Hup}.
Since $C\leq H$, the two places fixed by $\bar H$ are the place $\bar {\mathcal P}_\infty$ under $\mathcal P_\infty$ and the zero $\bar P_0$ of $x$.
Let $\Omega=\{P_{(0,0)}, P_{(0,a_2)}, \ldots, P_{(0,a_{q^{r-1}})}\}$ be the orbit of $G$ lying over $\bar P_0$, so that $Aut_K(\mathcal{N}_{q,r})$ acts on $\Omega$; we denote by $P_{(0,0)}\in\Omega$ the zero of $y$, centered at the origin $(0,0)$.
The group $H$ has a fixed point in $\Omega$ by the Orbit-Stabilizer theorem, and $P_{(0,0)}$ is the only fixed place of $C$ other than $\mathcal P_{\infty}$; thus, $H$ fixes $P_{(0,0)}$.
Therefore, $H$ fixes the unique pole of $x$ and $y$, fixes the unique zero of $y$, and acts on the $q^{r-1}$ simple zeros of $x$. This implies that a generator $h$ of $H$ acts as $h(x)=\mu x$, $h(y)=\rho y$ for some $\mu,\rho\in K^*$.
By direct computation, $h$ is an automorphism of $\mathcal{N}_{q,r}$ if and only if $\rho=\rho^q$ and $\mu^{\frac{q^r-1}{q-1}}=\rho$. Hence, $H=C$.
\end{proof}
The following result generalizes Theorem \ref{AutNormTrace}.
\begin{theorem}\label{AutMonom}
Suppose that $m\not\equiv1\pmod{p^n}$ and $B(X)$ has just one root in $K$, so that Equation \eqref{EqSeparated} reads
$$b_m\left(X+\frac{b_{m-1}}{m b_m}\right)^m = A(Y).$$
Then one of the following two cases occurs.
\begin{itemize}
\item[(i)] $m$ divides $p^n+1$ and $A(Y)$ is $p^n$-linearized, that is, $A(Y)=a_n Y^{p^n}+a_0 Y$.
In this case, $\mathcal{C}$ is projectively equivalent to the curve $\mathcal{Q}_m$ with equation $X^m=Y^{p^n}+Y$ described in Case {\rm 1} of Lemma {\rm \ref{lem3}}.
\item[(ii)] $m$ does not divide $p^n+1$ or $A(Y)$ is not $p^n$-linearized.
Let $d=\gcd\left(j\geq1 : a_j\ne0\right)$ be the largest integer such that $A(Y)$ is $p^d$-linearized.
Then $Aut_K(\mathcal{C})$ has order $p^n m(p^d-1)$ and $Aut_K(\mathcal{C})=G\rtimes C$, where $G=\left\{(x,y)\mapsto(x,y+a)\mid A(a)=0\right\}$ and
$$ C=\left\{(x,y)\mapsto\left(bx+\frac{(b-1)b_{m-1}}{mb_m},b^m y\right)\mid b^{m(p^d-1)}=1\right\}. $$
\end{itemize}
\end{theorem}
\begin{proof}
Let $S$ be the stabilizer of $\mathcal P_\infty$ in $Aut_K(\mathcal{C})$.
By direct checking, $S$ contains the semidirect product $G\rtimes C$.
By Lemma \ref{lem2}, $S=G\rtimes H$, where $H$ is a cyclic group of order coprime to $p$. By Schur-Zassenhaus theorem, $H$ contains $C$ up to conjugation.
Arguing as in the proof of Theorem \ref{AutNormTrace}, $\mathcal{C}/G$ is rational, and any nontrivial of the induced automorphism group $\bar H\cong H\leq Aut_K(\mathcal{C}/G)$ fixes the pole $\bar{\mathcal P}_\infty$ of $x$ and the zero $\bar P$ of $x+\frac{b_{m-1}}{m b_m}$.
Hence $H$ acts on the $p^n$ distinct places of $\mathcal{C}$ lying over $\bar P$, and $H$ fixes one of them by the Orbit-Stabilizer theorem. The only fixed place of $C$ different from $\mathcal P_{\infty}$ is the unique zero $P$ of $y$, centered at the affine point $(\frac{-b_{m-1}}{m b_m},0)$; thus, $H$ fixes $P$.
Let $h$ be a generator of $H$. We have shown that $h$ fixes the zero and the pole of $y$, which implies $h(y)=\rho y$ for some $\rho\in K$. Also, $h$ fixes the pole and acts on the simple zeros of $x+\frac{b_{m-1}}{m b_m}$; this implies $h(x+\frac{b_{m-1}}{m b_m}) = \mu (x+\frac{b_{m-1}}{m b_m})$ for some $\mu\in K$, that is, $h(x)=\mu x + \frac{(\mu-1)b_{m-1}}{m b_m}$.
By direct checking, $h$ normalizes $G$ if and only if $A(\mu a)=0$ for all $a\in K$ satisfying $A(a)=0$. As $A(Y)$ is separable, this happens if and only if $A(\mu Y)=A(Y)$. This is equivalent to $\mu\in\mathbb F_{p^d}^*$, with $d$ defined as in the statement of this theorem.
Then, in order for $h$ to be an automorphism of $\mathcal{C}$, we have $\rho^m=\mu$.
We have shown that $S=G\rtimes C$.
From Lemma \ref{lem3}, either $Aut_K(\mathcal{C})=G\rtimes C$ and Case {\it (ii)} holds, or $\mathcal{C}$ is isomorphic to the curve $\mathcal Q_s: X^s=Y^{\bar q}+Y$ with $s\mid(\bar q+1)$, $s<\bar q$.
Suppose that $\mathcal{C}\cong \mathcal{Q}_s$. By Lemma \ref{lem2} the Sylow $p$-subgroups of $Aut_K(\mathcal{C})$ and $Aut_K(\mathcal{Q}_s)$ have size $p^n$ and $\bar q$ respectively, so that $\bar q=p^n$; as $g(\mathcal{C})=g(\mathcal{Q}_s)$, we have $s=m$.
The normalizer in $Aut_K(\mathcal{Q}_m)$ of a Sylow $p$-subgroup contains a cyclic group of order $p^n-1$, by Lemma \ref{lem3}(b). Hence, the same holds in $Aut_K(\mathcal{C})$ and $d=n$;
this means that $\mathcal{C}$ has equation
\begin{equation}\label{EqTuttoLin}
b_m\left(X+\frac{b_{m-1}}{m b_m}\right)^m = a_n Y^{p^n} + a_0 Y.
\end{equation}
Conversely, if $\mathcal{C}$ is defined by Equation \eqref{EqTuttoLin}, then $\mathcal{C}$ is isomorphic to $\mathcal{Q}_m$. In fact, define $\varphi:(x,y)\mapsto(x^\prime,y^\prime):=(\gamma x,\delta a_0 y)$ with $\delta^{p^n-1}=a b^{-p^n}$ and $\gamma^m=\delta$. Then $K(x,y)=K(x^\prime,y^\prime)$ and $\varphi(\mathcal{C})=\mathcal{Q}_m$.
Now the proof is complete.
\end{proof}
Next result provides a converse to Theorem \ref{AutMonom} and extends \cite[Theorem 12.8]{HKT}.
\begin{theorem}\label{MonomAut}
Let $d=\gcd\left(j\geq1 : a_j\ne0\right)$ be the largest integer such that $A(Y)$ is $p^d$-linearized.
If $|Aut_K(\mathcal{C})_{P_\infty}|/|Aut_K(\mathcal{C})_{P_\infty}^{(1)}|\geq m(p^d-1)$, then $B(X)$ has a unique root in $K$, that is,
$$ B(X)=b_m\left(X+\frac{b_{m-1}}{mb_m}\right)^m. $$
\end{theorem}
\begin{proof}
Let $S$ be the stabilizer of $\mathcal P_\infty$ in $Aut_K(\mathcal{C})$, $H$ be a cyclic complement of $S^{(1)}$ in $S$, and $\alpha$ be a generator of $H$.
From Lemma \ref{lem2}, $G=\{(x,y)\mapsto(x,y+a)\mid A(a)=0\}$ is normal in $S$. Hence, $\alpha$ is an automorphism of the quotient curve $\mathcal{C}/G$; by Lemma \ref{lem1}(e), $\mathcal{C}/G$ is rational with function field $K(x)$.
From \cite[Haptsatz 8.27]{Hup}, $\alpha$ has two fixed places in $K(x)$ and acts semiregularly elsewhere. One of the two places is the pole of $x$, lying under $\mathcal{P}_\infty$; the other place is the zero of $x^\prime:=x+u$ for some $u\in K$. Thus $\alpha(x^\prime)=bx^\prime$, for some $b\in K^*$ of order $ord(b)=ord(\alpha)$.
Since $\alpha$ fixes the unique pole $\mathcal{P}_\infty$ of $y$ and the Weierstrass semigroup $H(\mathcal{P}_\infty)$ is generated by $-v_{\mathcal{P}_\infty}(x)=p^n$ and $-v_{\mathcal{P}_\infty}(x)=m$, we have that $\alpha(y)=ay+Q(x)$, where $a\in K^*$ and $Q(X)$ is a polynomial satisfying either $Q(X)=0$ or $\deg(Q(X))\cdot p^n<m$.
Let $B^\prime(X^\prime):=B(X)=B(X^\prime-u)$ and $Q^\prime(X^\prime):=Q(X)=Q(X^\prime-u)$.
Since $\alpha$ is an automorphism of $\mathcal{C}$, the polynomial $A(aY+Q^\prime(X^\prime))-B^\prime(b X^\prime)$ is a multiple of the polynomial $A(Y)-B^\prime(X^\prime)$, say
\begin{equation}\label{Transf}
A(aY+Q^\prime(X^\prime))-B^\prime(b X^\prime) = k_1(A(Y)-B^\prime(X^\prime))
\end{equation}
with $k_1\in K^*$.
As $A$ is a separable polynomial, Equation \eqref{Transf} implies $A(aY)=kA(Y)$ and hence $k_1=a^{p^j}$ for any $j$ such that $a_j\ne0$; thus, $k_1=a$ and $a^{p^d-1}=1$. Equation \eqref{Transf} also implies $B^\prime(bX^\prime)=B^\prime(X^\prime)+A(Q^\prime(X^\prime))$ and hence $k_1=b^m$ from the comparison of monomials ${X^\prime}^m$; thus, $(b^m)^{p^d-1}=1$ which yields $|H|=m(p^d-1)$.
Let $\beta:=\alpha^{p^d-1}$, which has order $m$ acts as $\beta(x^\prime)=b^{p^d-1}x^\prime$, $\beta(y)=y+Q^\prime(b^{p^d-2}x^\prime)$.
As $\beta\in Aut_K(\mathcal{C})$, we have
$$
A(Y+Q^\prime(b^{p^d-2}X^\prime))-B^\prime(b^{p^d-1} X^\prime) = k_2(A(Y)-B^\prime(X^\prime))
$$
with $k_2\in K^*$. Then $k_2=1$ and
\begin{equation}\label{Transf2}
B^\prime(b^{p^d-1} X^\prime)=B^\prime(X^\prime)+A(Q^\prime(b^{p^d-2}X^\prime)).
\end{equation}
We want to show that $\beta(y)=y$.
Suppose by contradiction that $Q^\prime(b^{p^d-2}X^\prime)\ne0$.
If $Q^\prime(b^{p^d-2}X^\prime)$ is a nonzero constant, then the order of $\beta$ is a multiple of $p$, a contradiction to $ord(\beta)=m$.
If $\deg(Q^\prime(b^{p^d-2}X^\prime))>1$, then in Equation \eqref{Transf2} the right-hand side has a non-vanishing term of degree $p^n\cdot\deg(Q^\prime(X^\prime))$ while the left-hand side has not, a contradiction.
Therefore, $\beta(x^\prime)=b^{p^d-1}x^\prime$ and $\beta(y)=y$, with $ord(b)=m(p^d-1)$. Since $\beta$ is an automorphism of $\mathcal{C}$, $B^\prime(X^\prime)=\lambda {X^\prime}^m$ for some $\lambda\in K^*$, that is, $B(X)=b_m\left(X+\frac{b_{m-1}}{mb_m}\right)^m$.
\end{proof}
Even if $B(X)$ is not a monomial, the argument of the proof of Theorem \ref{MonomAut} shows the following result.
\begin{proposition}\label{Condiz}
Let $Aut_K(\mathcal{C})_{P_\infty}=Aut_K(\mathcal{C})_{P_\infty}^{(1)}\rtimes H$ with $H=\langle\alpha\rangle$, and let $d=\gcd(j\geq1:a_j\ne0)$ be the largest integer such that $A(Y)$ is $p^d$-linearized.
Then $\alpha(x)=bx+c$ for some $b,c\in K$, and $\alpha(B(x))=a B(x)$ for some $a\in\mathbb F_{p^d}^*$.
\end{proposition}
\begin{remark}
Once that $B(X)$ is explicitely given, Proposition {\rm \ref{Condiz}} provides a method to find $H$.
In fact, $H$ has one fixed affine place in $K(x)$ and acts semiregularly on the other affine places; also, $H$ acts on the zeros of $B(x)$ with the same multiplicity.
For instance:
\begin{itemize}
\item If $B(X)$ has more than one root, but only one root with fixed multiplicity $M>1$, then $|H|$ divides either $M$ or $M-1$.
\item If $B(X)$ has more than one root, and all the root have the same multiplicity $M>1$, then $H$ is trivial and $Aut_K(\mathcal{C})$ is a $p$-group of order $p^n$.
\end{itemize}
\end{remark}
\section{Multi point AG codes on the norm-trace curves}
Let $\ell,r\in\mathbb N$ with $r\geq3$, and let $\mathcal{N}_{q,r}$ be the norm-trace curve as defined in Section \ref{Sec:Aut}. Let $\Omega=\{P_{(0,y_1)},\ldots,P_{(0,y_{q^{r-1}})}\}$ be the set of the $q^{r-1}$ $\mathbb F_{q^r}$-rational places of $\mathcal{N}_{q,r}$ which are the zeros of $x$; the place $P_{(a,b)}$ is centered at the affine point $(a,b)$ of $\mathcal{N}_{q,r}$. Let $\Theta:=\mathcal{N}_{q,r}(\mathbb F_{q^r})\setminus\Omega$; note that $\Theta$ contains the place at infinity $P_\infty$.
As pointed out in the proof of Theorem \ref{AutNormTrace}, the principal divisors of the coordinate functions are the following:
\begin{itemize}
\item $(x)=\sum_{P\in \Omega}P - q^{r-1} P_\infty$ ;
\item $(y)=\frac{q^r-1}{q-1} P_{(0,0)} - \frac{q^r-1}{q-1} P_\infty$ .
\end{itemize}
Define the $\mathbb F_{q^r}$-divisors
$$G:=\sum_{P\in\Omega}\ell P\quad\textrm{and}\quad D:=\sum_{P\in\Theta}P.$$
Since $|\mathcal{N}_{q,r}(\mathbb F_{q^r})|=q^{2r-1}+1$ (see \cite[Lemma 2]{G2003}), $G$ and $D$ have degree $\ell q^{r-1}$ and $q^{2r-1}+1-q^{r-1}$, respectively.
Denote by $C:=C_{\mathcal L}(D,G)$ the associated functional AG code over $\mathbb F_{q^r}$ having length $n= q^{2r-1}+1-q^{r-1}$, dimension $k$, and minimum distance $d$. The designed minimum distance is
$$ d^*=n-\deg (G) = q^{2r-1}+1-(\ell+1)q^{r-1}. $$
The designed minimum distance is attained by $C$.
\begin{proposition}
Whenever $d^*>0$, $C$ attains the designed minimum distance $d^*$.
\end{proposition}
\begin{proof}
By direct computation, the assumption $d^*>0$ is equivalent to $\ell<q^r$.
Take $\ell$ distinct elements $c_1,\ldots,c_\ell\in \mathbb F_{q^r}^*$ and let
$$f:=\prod_{i=1}^{\ell} \left(\frac{x-c_i}{x}\right).$$
The pole divisor of $f$ is exactly $G$, so that $f\in\mathcal{L}(G)$. By the properties of the norm and trace maps, $f$ has exactly $\ell q^{r-1}$ distinct $\mathbb F_{q^r}$-rational zeros. Thus, the weigth of $e_D(f)$ is $n-\ell q^{r-1}=d^*$.
\end{proof}
We compute the dimension of $C$.
\begin{proposition}
If $\frac{q^r-1}{q-1}-2\leq\ell\leq q^r-1$, then
$$k=\ell q^{r-1}+1-\frac{1}{2}\left(\frac{q^r-1}{q-1}-1\right)\left(q^{r-1}-1\right).$$
\end{proposition}
\begin{proof}
Since $n>\deg(G)>2g-2$, $k=\deg(G)+1-g$ by the Riemann-Roch Theorem.
\end{proof}
\begin{proposition} \label{monomiallyeq}
The code $C$ is monomially equivalent to the extended one-point code $C_{ext}(D,G^\prime)$, where $G^\prime=\ell q^{r-1}\mathcal{P}_{\infty}$.
\end{proposition}
\begin{proof}
We have $G=G^\prime+(x^\ell)$ and hence $\mathcal{L}(G^\prime)=\{f\cdot x^\ell \mid f\in\mathcal{L}(G)\}$. The codeword of $C_{\mathcal{L}}(D,G^\prime)$ associated to $f\cdot x^\ell$ is obtained as
$$ \big( (f x^\ell)(P_1),\ldots,(f x^\ell)(\mathcal{P}_{\infty}),\ldots,(f x^\ell)(P_n) \big) = \big( f(P_1),\ldots,f(\mathcal{P}_{\infty}),\ldots,f(P_n) \big) \cdot M, $$
where $M$ is the diagonal matrix with diagonal entries $x(P_1)^\ell,\ldots,(t^{\ell q^{r-1}} x)(\mathcal{P}_{\infty})^\ell,\ldots,x(P_n)^\ell \in\mathbb F_{q^r}$, with $t$ a local parameter at $\mathcal{P}_\infty$. This means that $M$ defines a monomial equivalence between $C$ and $C_{ext}(D,G^\prime)$.
\end{proof}
The Weierstrass semigroup $H(P_\infty)$ at $P_\infty$ is known to be generated by $q^{r-1}$ and $\frac{q^r-1}{q-1}$; see \cite{BR2013}. Thus, Proposition \ref{monomiallyeq} allows us to compute the dimension of $C$ also in those cases for which the Riemann-Roch Theorem does not give a complete answer.
\begin{corollary}
If $1\leq\ell\leq\frac{q^r-1}{q-1}-3$, then the dimension of $C$ is
$$ k=\ell+1+\frac{(q-1)}{2}\bigg \lfloor \frac{\ell}{q} \bigg \rfloor \bigg (\bigg \lfloor \frac{\ell}{q} \bigg \rfloor+1 \bigg)+\frac{(q^2-3q+2)}{2}+\Delta,$$
where,
$$\Delta= \frac{(q-1)^2}{2} \bigg(\frac{\ell}{q}-1 \bigg)^2 + \bigg( \frac{(q-3)(q-1)}{2}\bigg) \bigg( \frac{\ell}{q}-1\bigg)+\frac{q(q-1)}{2} \bigg( \frac{\ell}{q}-1\bigg),$$
if $\ell \equiv 0 \pmod q$;
$$\Delta= \frac{(q-1)^2}{2} \bigg \lfloor \frac{\ell}{q}\bigg \rfloor^2+\bigg( \frac{(q-3)(q-1)}{2}\bigg)\bigg \lfloor \frac{\ell}{q} \bigg \rfloor +\frac{q(q-1)}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor,$$
if $q \equiv q-1 \pmod q$;
$$\Delta= \frac{(q-1)}{2} \bigg [ \bigg( \ell- \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q \bigg) \bigg \lfloor \frac{\ell}{q} \bigg \rfloor^2+\bigg( q-\ell+\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q-1\bigg) \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)^2\bigg ]+\bigg( \frac{q-3}{2}\bigg) \bigg[ \bigg(\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg \lfloor \frac{\ell}{q} \bigg \rfloor$$
$$+\bigg(q-\ell+\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q -1\bigg) \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)\bigg]+\frac{1}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor \bigg( \ell - \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg( \ell - \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q +1\bigg)$$
$$ + \frac{1}{2} \bigg ( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor -1\bigg) \bigg(q-1-\ell+ \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg( q+\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg),$$
otherwise.
\end{corollary}
\begin{proof}
Let $c:=(q^r-1)/(q-1)$.
By the assumption on $\ell$, $\deg(G)<n$; hence, $k=\ell(G)$.
From Proposition \ref{monomiallyeq}, $k=\ell(G^\prime)$ with $G^\prime=\ell q^{r-1}\mathcal{P}_{\infty}$. This means that $k$ equals the number of non-gaps $h\in H(\mathcal{P}_\infty)$ at $\mathcal{P}_\infty$ satisfying $h\leq\ell q^{r-1}$. From \cite{G2003} (see also \cite{BR2013}), $k$ is the number of couples $(i,j)\in\mathbb N^2$ such that
$$ 0\leq i<q^r,\quad 0\leq j<q^{r-1},\quad i q^{r-1}+j c \leq \ell q^{r-1}. $$
Since $\ell\leq c-3$, this implies
$$ k=\sum_{i=0}^{\ell} \left(\left\lfloor\frac{(\ell-i)q^{r-1}}{c}\right\rfloor+1\right) =\ell+1+\sum_{s=0}^{\ell}\left\lfloor\frac{ s q^{r-1}}{c}\right\rfloor. $$
Write $s=aq+b$ with $a\geq0$ and $1\leq b\leq q$. The condition $s\leq\ell$ is equivalent to $a\leq \lfloor\frac{\ell-b}{q}\rfloor$ when $b<q$, and to $a\leq \lfloor\frac{\ell}{q}\rfloor-1$ when $b=q$. Hence,
\begin{equation}\label{conto1}
k =\ell+1+\sum_{a=0}^{\lfloor\frac{\ell}{q}\rfloor-1}\left\lfloor\frac{(aq+q)q^{r-1}}{c}\right\rfloor + \sum_{b=1}^{q-1} \sum_{a=0}^{\lfloor\frac{\ell-b}{q}\rfloor} \left\lfloor\frac{(aq+b)q^{r-1}}{c}\right\rfloor.
\end{equation}
By direct computation,
\begin{small}
\begin{equation}\label{conto2}
\sum_{a=0}^{\lfloor\frac{\ell}{q}\rfloor-1}\left\lfloor\frac{(aq+q)q^{r-1}}{c}\right\rfloor = \sum_{a=0}^{\lfloor\frac{\ell}{q}\rfloor-1}\left\lfloor(a+1)(q-1)+\frac{a+1}{c}\right\rfloor = \sum_{a=0}^{\lfloor\frac{\ell}{q}\rfloor-1}(a+1)(q-1) = \frac{1}{2}(q-1)\left\lfloor \frac{\ell}{q} \right\rfloor \left(\left\lfloor \frac{\ell}{q} \right\rfloor+1\right).
\end{equation}
\end{small}
Also,
$$ \frac{(aq+b)q^{r-1}}{c} = a(q-1)+b-1+ \frac{q^r-1+a(q-1)-b(q^{r-1}-1)}{q^r-1}. $$
Assume that $1\leq b\leq q-1$ and $0\leq a\leq \left\lfloor\frac{\ell-b}{q}\right\rfloor \leq \left\lfloor\frac{\ell}{q}\right\rfloor$. By the assumption on $\ell$ follows $a\leq q\frac{q^{r-2}-1}{q-1}$. Thus,
$$ \frac{q^r-1+a(q-1)-b(q^{r-1}-1)}{q^r-1}>0, \quad \frac{q^r-1+a(q-1)-b(q^{r-1}-1)}{q^r-1}<1, $$
so that $\left\lfloor\frac{(aq+b)q^{r-1}}{c}\right\rfloor=a(q-1)+b-1$.
Thus,
$$ \sum_{b=1}^{q-1} \sum_{a=0}^{\lfloor\frac{\ell-b}{q}\rfloor} \left\lfloor\frac{(aq+b)q^{r-1}}{c}\right\rfloor = \sum_{b=1}^{q-1} \sum_{a=0}^{\lfloor\frac{\ell-b}{q}\rfloor}\left(a(q-1)+b-1\right) =$$
$$\frac{(q-1)}{2} \sum_{b=1}^{q-1} \left\lfloor\frac{\ell-b}{q}\right\rfloor^2 + \bigg( \frac{q-3}{2} \bigg) \sum_{b=1}^{q-1} \left\lfloor\frac{\ell-b}{q}\right\rfloor + \sum_{b=1}^{q-1} b \left\lfloor\frac{\ell-b}{q}\right\rfloor + \frac{q^2-3q+2}{2}.$$
Denote by,
$$A=\frac{(q-1)}{2} \sum_{b=1}^{q-1} \left\lfloor\frac{\ell-b}{q}\right\rfloor^2 , \quad B=\bigg( \frac{q-3}{2} \bigg) \sum_{b=1}^{q-1} \left\lfloor\frac{\ell-b}{q}\right\rfloor, \quad C= \sum_{b=1}^{q-1} b \left\lfloor\frac{\ell-b}{q}\right\rfloor .$$
We note that for a given $b=1,\ldots,q-1$, holds that $\bigg \lfloor \frac{\ell-b}{q} \bigg \rfloor \ne \bigg \lfloor \frac{\ell-b-1}{q} \bigg \rfloor$ if and only if $\ell-b \equiv 0 \pmod q$.
Thus if $\ell \equiv 0 \pmod q$ then $\bigg \lfloor \frac{\ell-b}{q} \bigg \rfloor=\frac{\ell}{q} - \bigg \lceil \frac{b}{q} \bigg \rceil=\frac{\ell}{q}-1$, for every $b=1,\ldots,q-1$; if $\ell \equiv q-1 \pmod q$ then $\bigg \lfloor \frac{\ell-b}{q} \bigg \rfloor=\bigg \lfloor \frac{\ell}{q} \bigg \rfloor$; while $\bigg \lfloor \frac{\ell-b}{q} \bigg \rfloor=\bigg \lfloor \frac{\ell}{q} \bigg \rfloor$ for $b=1,\ldots,\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q$ and $\bigg \lfloor \frac{\ell-b}{q} \bigg \rfloor=\bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)$ for $b=\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q+1,\ldots,q-1$, if $\ell \not\equiv 0,q-1 \pmod q$. In particular this implies that
$$A= \frac{(q-1)}{2}\sum_{b=1}^{q-1} \bigg( \frac{\ell}{q}-1\bigg)^2= \frac{(q-1)^2}{2} \bigg( \frac{\ell}{q}-1\bigg)^2,$$
if $\ell \equiv 0 \pmod q$,
$$A=\frac{(q-1)}{2} \sum_{b=1}^{q-1} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor^2=\frac{(q-1)^2}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor^2,$$
if $\ell \equiv q-1 \pmod q$, and
$$A=\frac{(q-1)}{2}\sum_{b=1}^{\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor^2+ \frac{(q-1)}{2} \sum_{b=\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q+1}^{q-1} \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)^2=$$
$$\frac{(q-1)}{2} \bigg[ \bigg( \ell- \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q \bigg) \bigg \lfloor \frac{\ell}{q} \bigg \rfloor^2+\bigg( q-\ell+\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q-1\bigg) \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)^2\bigg],$$
otherwise. Analagously,
$$B= \frac{(q-3)}{2}\sum_{b=1}^{q-1} \bigg( \frac{\ell}{q}-1\bigg)= \frac{(q-3)(q-1)}{2} \bigg( \frac{\ell}{q}-1\bigg),$$
if $\ell \equiv 0 \pmod q$,
$$B= \frac{(q-3)}{2} \sum_{b=1}^{q-1} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor=\frac{(q-1)(q-3)}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor,$$
if $\ell \equiv q-1 \pmod q$, while
$$B=\frac{(q-3)}{2}\sum_{b=1}^{\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor+ \frac{(q-3)}{2} \sum_{b=\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q+1}^{q-1} \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)=$$
$$\bigg( \frac{q-3}{2}\bigg) \bigg[ \bigg(\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg \lfloor \frac{\ell}{q} \bigg \rfloor+\bigg(q-\ell+\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q -1\bigg) \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)\bigg]$$
otherwise, and
$$C=\sum_{b=1}^{q-1} b \bigg( \frac{\ell}{q}-1\bigg)=\frac{q(q-1)}{2}\bigg( \frac{\ell}{q}-1\bigg),$$
if $\ell \equiv 0 \pmod q$,
$$C=\sum_{b=1}^{q-1} b \bigg \lfloor \frac{\ell}{q} \bigg \rfloor=\frac{q(q-1)}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor,$$
if $\ell \equiv q-1 \pmod q$ and
$$C=\sum_{b=1}^{\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q} b \bigg \lfloor \frac{\ell}{q} \bigg \rfloor+\sum_{b=\ell-\big \lfloor \frac{\ell}{q} \big \rfloor q+1}^{q-1} b \bigg( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor-1\bigg)=$$
$$\frac{1}{2} \bigg \lfloor \frac{\ell}{q} \bigg \rfloor \bigg( \ell - \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg( \ell - \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q +1\bigg)+ \frac{1}{2} \bigg ( \bigg \lfloor \frac{\ell}{q} \bigg \rfloor -1\bigg) \bigg(q-1-\ell+ \bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg) \bigg( q+\ell-\bigg \lfloor \frac{\ell}{q} \bigg \rfloor q\bigg),$$
otherwise. The claim now follows writing $k=\ell+1+\frac{(q-1)}{2}\bigg \lfloor \frac{\ell}{q} \bigg \rfloor \bigg (\bigg \lfloor \frac{\ell}{q} \bigg \rfloor+1 \bigg)+\frac{(q^2-3q+2)}{2}+A+B+C$.
\end{proof}
We show that the automorphism group of $\mathcal{N}_{q,r}$ is inherited by the code $C$.
\begin{proposition}
The automorphism group of $C$ has a subgroup isomorphic to
$$ (Aut_K(\mathcal{N}_{q,r})\rtimes Aut_K(\mathbb F_{q^r}))\rtimes \mathbb F_{q^r}^*. $$
\end{proposition}
\begin{proof}
The group $Aut_K(\mathcal{N}_{q,r})$ is defined over $\mathbb F_{q^r}$, so that $Aut_{\mathbb F_{q^r}}(\mathcal{N}_{q,r})=Aut_K(\mathcal{N}_{q,r})$.
The support $supp(G)$ of the divisor $G$ is an orbit of $Aut_K(\mathcal{N}_{q,r})$, and $Aut_K(\mathcal{N}_{q,r})$ acts on the support $supp(D)=\mathcal{N}_{q,r}(\mathbb F_{q^r})\setminus supp(G)$ of the divisor $D$. Also, all places contained in $supp(G)$ have the same weight in $G$, which implies $\sigma(G)=G$ for any $\sigma\in Aut_K(\mathcal{N}_{q,r})$; analogously, $\sigma(D)=D$.
Therefore, $Aut_{\mathbb F_{q^r},D,G}(\mathcal{N}_{q,r})$ is isomorphic to $Aut_K(\mathcal{N}_{q,r})$.
From the proof of Theorem \ref{AutNormTrace} follows that $Aut_K(\mathcal{N}_{q,r})$ has just two short orbits on $\mathcal{N}_{q,r}$.
Namely, one short orbit is the singleton $\{\mathcal{P}_\infty\}$, which is fixed by the whole group $Aut_K(\mathcal{N}_{q,r})$; the other short orbit is the set $\Omega$ of the zeros of $x$, which has size $q^{r-1}$ and is fixed pointwise by the complement $H$ of the $p$-group $G$.
Hence, any non-trivial element $\sigma\in Aut_K(\mathcal{N}_{q,r})$ is fixes at most $N:=q^{r-1}+1$ places on $\mathcal{N}_{q,r}$.
Since the length $n$ of $C$ is bigger than $N$, the claim follows from Proposition \ref{tivoglioiniettivo}.
\end{proof}
\begin{flushleft}
Matteo Bonini\\
Dipartimento di Matematica,\\
University of Trento,\\
e-mail: {\sf [email protected]}
\end{flushleft}
\begin{flushleft}
Maria Montanucci\\
Dipartimento di Matematica, Informatica ed Economia,\\
University of Basilicata,\\
e-mail: {\sf [email protected]}
\end{flushleft}
\begin{flushleft}
Giovanni Zini\\
Dipartimento di Matematica e Informatica,\\
University of Florence,\\
e-mail: {\sf [email protected]}
\end{flushleft}
\end{document} |
\begin{document}
\title{The recurrence formulas for primes and non-trivial zeros of the Riemann zeta function}
\author{Artur Kawalec}
\date{}
\maketitle
\begin{abstract}
In this article, we explore the Riemann zeta function with a perspective on primes and non-trivial zeros. We develop the Golomb's recurrence formula for the $n$th+1 prime, and assuming (RH), we propose an analytical recurrence formula for the $n$th+1 non-trivial zero of the Riemann zeta function. Thus all non-trivial zeros up the $n$th order must be known to generate the $n$th+1 non-trivial zero. We also explore a variation of the recurrence formulas for primes based on the prime zeta function, which will be a basis for the development of the recurrence formulas for non-trivial zeros based on the secondary zeta function. In the last part, we review the presented formulas and outline the duality between primes and non-trivial zeros. The proposed formula implies that all primes can be converted into an individual non-trivial zero (assuming RH), and conversely, all non-trivial zeros can be converted into an individual prime (not assuming RH). Also, throughout this article, we summarize numerical computation and verify the presented results to high precision.
\end{abstract}
\section{Introduction}
The Riemann zeta function is defined by the infinite series
\begin{equation}\label{eq:20}
\zeta(s)=\sum_{n=1}^{\infty}\frac{1}{n^s},
\end{equation}
which is absolutely convergent for $\Re(s)>1$, where $s=\sigma+it$ is a complex variable. The values for the first few special cases are:
\begin{equation}\label{eq:9}
\begin{aligned}
\zeta(1) &\sim\sum_{n=1}^{k}\frac{1}{n}\sim\gamma+\log(k) \quad \text{as}\quad k\to \infty,\\
\zeta(2) &=\frac{\pi^2}{6}, \\
\zeta(3) &=1.20205690315959\dots, \\
\zeta(4) &=\frac{\pi^4}{90}, \\
\zeta(5) &=1.03692775514337\dots.
\end{aligned}
\end{equation}
For $s=1$, the series diverges asymptotically as $\gamma+\log(k)$, where $\gamma=0.5772156649\dots$ is the Euler-Mascheroni constant. The special values for even positive integer argument are given by the Euler's formula
\begin{equation}\label{eq:9}
\zeta(2k) = \frac{\mid B_{2k}\mid}{2(2k)!}(2\pi)^{2k},
\end{equation}
for which the value is expressed as a rational multiple of $\pi^{2k}$ where the constants $B_{2k}$ are Bernoulli numbers denoted such that $B_0=1$, $B_1=-\frac{1}{2}$, $B_2=\frac{1}{6}$ and so on. For odd positive integer argument, the values of $\zeta(s)$ converge to unique constants, which are not known to be expressed as a rational multiple of $\pi^{2k+1}$ as occurs in the even positive integer case. For $n=3$, the value is commonly known as Ap\'ery's constant, who proved its irrationality.
At the heart of the Riemann zeta function are prime numbers, which are encoded by the Euler's product formula
\begin{equation}\label{eq:20}
\zeta(s)=\prod_{n=1}^{\infty}\left(1-\frac{1}{p_n^s}\right)^{-1}
\end{equation}
also valid for $\Re(s)>1$, where $p_1=2$, $p_2=3$, and $p_3=5$ and so on, denote the prime number sequence. The expression for the complex magnitude, or modulus, of the Euler prime product is
\begin{equation}\label{eq:7}
\mid \zeta(\sigma+it) \mid^2 = \frac{\zeta(4\sigma)}{\zeta(2\sigma)}\prod_{n=1}^\infty \left(1-\frac{\cos(t\log p_n)}{\cosh(\sigma\log p_n)}\right)^{-1}
\end{equation}
for $\sigma>1$, which for a positive integer argument $\sigma=k$ simplifies the zeta terms using (3), resulting in
\begin{equation}\label{eq:11}
\mid \zeta(k+it) \mid=(2\pi)^{k}\sqrt{\frac{|B_{4k}|(2k)!}{|B_{2k}|(4k)!}}\prod_{n=1}^\infty \left(1-\frac{\cos(t\log p_n)}{\cosh(k\log p_n)}\right)^{-1/2}.
\end{equation}
Using this form, the first few special values of this representation are
\begin{equation}\label{eq:29}
\begin{aligned}
\zeta(1) &\sim \frac{\pi}{\sqrt{15}}\prod_{n=1}^k \left(1-\frac{2}{p_n+p_n^{-1}}\right)^{-1/2} \sim e^{\gamma}\log(p_k),\\
\zeta(2) &= \frac{\pi^2}{\sqrt{105}}\prod_{n=1}^\infty \left(1-\frac{2}{p_n^2+p_n^{-2}}\right)^{-1/2}, \\
\zeta(3) &= \frac{\pi^3}{15} \sqrt{\frac{691}{3003}}\prod_{n=1}^\infty \left(1-\frac{2}{p_n^{3}+p_n^{-3}}\right)^{-1/2}, \\
\zeta(4) &= \frac{\pi^4}{45} \sqrt{\frac{3617}{17017}}\prod_{n=1}^\infty \left(1-\frac{2}{p_n^{4}+p_n^{-4}}\right)^{-1/2}, \\
\zeta(5) &= \frac{\pi^5}{225} \sqrt{\frac{174611}{323323}}\prod_{n=1}^\infty \left(1-\frac{2}{p_n^{5}+p_n^{-5}}\right)^{-1/2},
\end{aligned}
\end{equation}
where we let $t=0$ and reduced the hyperbolic cosine term as we have shown in [7][9]. The value for $\zeta(1)$ in terms of Euler prime product representation is asymptotic to $e^{\gamma}\log(p_k)$ due to Mertens's theorem as $k\to \infty$ [5][14]. Also, the arg of the Euler product can be found as
\begin{equation}\label{eq:20}
\text{arg } \zeta(\sigma+it) = -\sum_{n=1}^{\infty}\tan^{-1}\left(\frac{\sin(t\log p_n)}{p_n^{\sigma}-\cos(t\log p_n)}\right)
\end{equation}
thus writing the Euler product in polar form:
\begin{equation}\label{eq:20}
\zeta(s) = |\zeta(s)|e^{\text{i arg } \zeta(s)}.
\end{equation}
The Euler prime product permits the primes to be individually extracted from the infinite product under certain limiting conditions, as we have shown in [6], thus yielding the Golomb's formula for primes [4]. To illustrate this, when we expand the product we have
\begin{equation}\label{eq:20}
\zeta(s)=\left(1-\frac{1}{p_1^s}\right)^{-1}\left(1-\frac{1}{p_2^s}\right)^{-1}\left(1-\frac{1}{p_3^s}\right)^{-1}\ldots,
\end{equation}
and next, we wish to solve for the first prime $p_1$, then we have
\begin{equation}\label{eq:20}
p_1=\left(1-\frac{\epsilon_2(s)}{\zeta(s)}\right)^{-1/s},
\end{equation}
where
\begin{equation}\label{eq:20}
\epsilon_k(s)=\prod_{n=k}^{\infty}\left(1-\frac{1}{p_n^s}\right)^{-1}
\end{equation}
is the tail of Euler product starting at $p_k$. When we then consider the limit
\begin{equation}\label{eq:20}
p_1=\lim_{s\to \infty}\left(1-\frac{\epsilon_2(s)}{\zeta(s)}\right)^{-1/s},
\end{equation}
then $\epsilon_2(s)\to 1$ at a faster rate than the Riemann zeta function, that is $\zeta(s) \sim 1+O(p_1^{-s})$, while $\epsilon_2(s) \sim 1+O(p_2^{-s})$, and the gap $p_1^{-s}\gg p_2^{-s}$ is only widening as $s\to \infty$, hence the contribution due to Riemann zeta function dominates the limit, and the formula for the first prime becomes
\begin{equation}\label{eq:20}
p_1=\lim_{s\to \infty}\left(1-\frac{1}{\zeta(s)}\right)^{-1/s}.
\end{equation}
Numerical computation of (14) for $s=10$ and $s=100$ is summarized in Table $1$, where we observe convergence to $p_1$. The next prime in the sequence is found the same way by solving for $p_2$ in (10) to obtain
\begin{equation}\label{eq:20}
p_2=\lim_{s\to \infty}\left[1-\frac{\left(1-\frac{1}{p_1^s}\right)^{-1}\epsilon_3(s)}{\zeta(s)}\right]^{-1/s},
\end{equation}
where similarity as before, $\epsilon_3(s)\to 1$ at a faster rate than the Riemann zeta function and the contribution due to the first prime product $(1-p_1^{-s})^{-1}$ as $s\to\infty$, where it cancels the first prime product in $\zeta(s)$, so that $(1-p_1^{-s})\zeta(s) \sim 1+O(p_2^{-s})$, while $\epsilon_3(s) \sim 1+O(p_3^{-s})$, and the gap $p_2^{-s}\gg p_3^{-s}$ is increasing rapidly as $s\to \infty$, hence the contribution due to Riemann zeta function and the first prime product dominates the limit, and we have
\begin{equation}\label{eq:20}
p_2=\lim_{s\to \infty}\left[1-\frac{\left(1-\frac{1}{p_1^s}\right)^{-1}}{\zeta(s)}\right]^{-1/s}.
\end{equation}
Numerical computation of (16) for $s=10$ and $s=100$ is summarized in Table 1, and we observe convergence to $p_2$. And the next prime follows the same pattern $(1-p_1^{-s})(1-p_2^{-s})\zeta(s) \sim 1+O(p_3^{-s})$, while $\epsilon_4(s) \sim 1+O(p_4^{-s})$ which results in
\begin{equation}\label{eq:20}
p_3=\lim_{s\to \infty}\left[1-\frac{\left(1-\frac{1}{p_1^s}\right)^{-1}\left(1-\frac{1}{p_2^s}\right)^{-1}}{\zeta(s)}\right]^{-1/s}.
\end{equation}
Hence, this process continues for the $n$th+1 prime, and so if we define a partial Euler product up to the $n$th order as
\begin{equation}\label{eq:20}
Q_{n}(s)=\prod_{k=1}^{n}\left(1-\frac{1}{p_k^s}\right)^{-1}
\end{equation}
for $n>1$ and $Q_0(s)=1$, then we obtain the Golomb's formula for the $p_{n+1}$ prime
\begin{equation}\label{eq:20}
p_{n+1}=\lim_{s\to \infty}\left(1-\frac{Q_n(s)}{\zeta(s)}\right)^{-1/s}.
\end{equation}
We performed numerical computation of (19) in PARI/GP software package, as it is an excellent platform for performing arbitrary precision computations [10], and its functionality will be very useful for the rest of this article. Before running any script, we recommend to allocate alot of memory $\textbf{allocatemem(1000000000)}$, and setting precision to high value, for example $\textbf{\textbackslash p 2000}$. We tabulate the computational results in Table 1 for $s=10$ and $s=100$ case, and observe the convergence approaching to the $p_{n+1}$ prime based on the knowledge of all primes up to the $n$th order. When we compute for the $p_{1000}$ case, the $s=100$ variable is still too small to obverse correct convergence, hence we performed a very high precision computation for $n=999$ and $s=10000$ with precision set to $50000$ decimal places, and now the true value of the prime is revealed:
\begin{equation}\label{eq:20}
p_{1000}\approx7926.99958710978789301541492167\ldots.
\end{equation}
This formula will always converge because $p_n^{-s}\gg p_{n+1}^{-s}$ as $s\to \infty$, and also because the prime gaps are always bounded which will prevent higher order primes from modifying the main asymptote. It's just a matter of allowing the limit variable $s$ to tend a large value, however, as it seen it is not very practical for computing large primes, as very high arbitrary precision is required. The script in PARI is shown in Listing $1$ to compute the next prime using the Golomb's formula (19), which was used to compute Table $1$. The precision must be set very high, we generally set to $2000$ digits by default.
\begin{table}[hbt!]
\caption{The $p_{n+1}$ prime computed by equation (19) shown to $15$ decimal places.}
\centering
\begin{tabular}{c c c c}
\hline\hline
$n$ & $p_{n+1}$ & $s=10$ & $s=100$ \\[0.5ex]
\hline
$0$ & $p_1$ & 1.996546424130332 & 1.999999999999999 \\
$1$ & $p_2$ & 2.998128944738979 & 2.999999999999999 \\
$2$ & $p_3$ & 4.982816482987932 & 4.9999999999999991\\
$3$ & $p_4$ & 6.990872151877531 & 6.999999999999999 \\
$4$ & $p_5$ & 10.795904253794409 & 10.999999993885992 \\
$5$ & $p_6$ & 12.882858209904345 & 12.999999999999709 \\
$6$ & $p_7$ & 16.454690036492369 & 16.999997488242396 \\
$7$ & $p_8$ & 18.700432429563358 & 18.999999999042078 \\
$8$ & $p_9$ & 22.653649208924189 & 22.999999999980263 \\
$9$ & $p_{10}$ & 27.560268802131417 & 28.999632082761238 \\
$99$ & $p_{100}$ & 429.143320774398099 & 539.114941393037977 \\
$999$ & $p_{1000}$ & 5017.353999786395028 & 7747.370093956440561
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\lstset{language=C,caption={PARI script for computing equation (19).},label=DescriptiveLabel,captionpos=b}
\begin{lstlisting}[frame=single]
\\ Define partial Euler product up to nth order
Qn(x,n)=
{
prod(i=1,n,(1-1/prime(i)^x)^(-1));
}
\\ Compute the next prime
{
n=10; \\ set n
s=100; \\ set limit variable
\\ compute next prime
pnext=(1-Qn(s,n)/zeta(s))^(-1/s);
print(pnext);
}
\end{lstlisting}
The Riemann zeta function has many representations. One common form is the alternating series representation
\begin{equation}\label{eq:20}
\zeta(s) = \frac{1}{1-2^{1-s}}\sum_{n=1}^{\infty} \frac{(-1)^{n+1}}{n^s},
\end{equation}
which is convergent for $\Re(s)>0$, with some exceptions at $\Re(s)=1$ due the constant factor. By the application of the Euler-Maclaurin summation formula, the main series (1) can also be extended to domain $\Re(s)>0$ by subtracting the pole in the limit as
\begin{equation}\label{eq:20}
\zeta(s)=\lim_{k\to \infty}\Big\{\sum_{n=1}^{k-1}\frac{1}{n^s}-\frac{k^{1-s}}{1-s}\Big\}.
\end{equation}
Equations (21) and (22) are hence valid in the critical strip region $0<\Re(s)<1$.
Another important representation of $\zeta(s)$ is the Laurent expansion about $s=1$ that gives a globally convergent series valid anywhere in the complex plane except at $s=1$ as
\begin{equation}\label{eq:20}
\zeta(s)=\frac{1}{s-1}+\sum_{n=0}^{\infty}\gamma_n\frac{(-1)^n(s-1)^n}{n!}.
\end{equation}
The coefficients $\gamma_n$ are the Stieltjes constants, and $\gamma_0=\gamma$ is the usual Euler-Mascheroni constant. We observe that $\gamma_n$ are linear in the series, hence if we form a system of linear equations, then using the Cramer's rule and some properties of an Vandermonde matrix, we find that Stieltjes constants can be represented by a determinant of a certain matrix:
\begin{equation}\label{eq:20}
\gamma_n = \pm\det(A_{n+1})
\end{equation}
where the matrix $A_n(k)$ is matrix $A(k)$, but with an $n$th column swapped with a vector $B$ as given next
\begin{gather}
A(k)= \begin{pmatrix}1 & -\frac{1}{1!} & \frac{1^2}{2!} & -\frac{1^3}{3!} &\dots & \frac{1^k}{k!}\\ 1 & -\frac{2}{1!} & \frac{2^2}{2!} & -\frac{2^3}{3!} & \dots & \frac{2^k}{k!} \\ 1 & -\frac{3}{1!} & \frac{3^2}{2!} & -\frac{3^3}{3!} & \dots & \frac{3^k}{k!}\\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots\\ 1 & -\frac{(k+1)}{1!} & \frac{(k+1)^2}{2!} & -\frac{(k+1)^3}{3!} &\dots & \frac{(k+1)^k}{k!}\end{pmatrix}
\end{gather}
and
\begin{gather}
B(k)=
\begin{pmatrix}
\zeta(2)-1\\
\zeta(3)-\frac{1}{2}\\
\zeta(4)-\frac{1}{3}\\
\vdots \\
\ \zeta(k+1)-\frac{1}{k}\\
\end{pmatrix}.
\end{gather}
The $\pm$ sign depends on $k$, but to ensure a positive sign, the size of $k$ must be a multiple of $4$. Hence, the first few Stieltjes constants can be represented as:
\begin{gather}
\gamma_0= \lim_{k\to\infty}\begin{vmatrix} \zeta(2)-1 & -\frac{1}{1!} & \frac{1^2}{2!} & -\frac{1^3}{3!} &\dots & \frac{(-1)^k 1^k}{k!}\\ \zeta(3)-\frac{1}{2} & -\frac{2}{1!} & \frac{2^2}{2!} & -\frac{2^3}{3!} & \dots & \frac{(-1)^k 2^k}{k!} \\ \zeta(4)-\frac{1}{3} & -\frac{3}{1!} & \frac{3^2}{2!} & -\frac{3^3}{3!} & \dots & \frac{(-1)^k 3^k}{k!}\\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots\\ \zeta(k+1)-\frac{1}{k} & -\frac{(k+1)}{1!} & \frac{(k+1)^2}{2!} & -\frac{(k+1)^3}{3!} &\dots & \frac{(-1)^k(k+1)^k}{k!}\end{vmatrix},
\end{gather}
and the next Stieltjes constant is
\begin{gather}
\gamma_1= \lim_{k\to\infty}\begin{vmatrix} 1 & \zeta(2)-1 & \frac{1^2}{2!} & -\frac{1^3}{3!} &\dots & \frac{(-1)^k 1^k}{k!}\\ 1 & \zeta(3)-\frac{1}{2} & \frac{2^2}{2!} & -\frac{2^3}{3!} & \dots & \frac{(-1)^k 2^k}{k!} \\ 1 & \zeta(4)-\frac{1}{3} & \frac{3^2}{2!} & -\frac{3^3}{3!} & \dots & \frac{(-1)^k 3^k}{k!}\\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots\\ 1 & \zeta(k+1)-\frac{1}{k} & \frac{(k+1)^2}{2!} & -\frac{(k+1)^3}{3!} &\dots & \frac{(-1)^k(k+1)^k}{k!}\end{vmatrix},
\end{gather}
and the next is
\begin{gather}
\gamma_2= \lim_{k\to\infty}\begin{vmatrix}1 & -\frac{1}{1!} & \zeta(2)-1 & -\frac{1^3}{3!} &\dots & \frac{(-1)^k 1^k}{k!}\\ 1 & -\frac{2}{1!} & \zeta(3)-\frac{1}{2} & -\frac{2^3}{3!} & \dots & \frac{(-1)^k 2^k}{k!} \\ 1 & -\frac{3}{1!} & \zeta(4)-\frac{1}{3}& -\frac{3^3}{3!} & \dots & \frac{(-1)^k 3^k}{k!}\\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots\\ 1 & -\frac{(k+1)}{1!} & \zeta(k+1)-\frac{1}{k} & -\frac{(k+1)^3}{3!} &\dots & \frac{(-1)^k(k+1)^k}{k!}\end{vmatrix},
\end{gather}
and so on. In Table $2$, we compute the determinant formula (24) for the first $10$ Stieltjes constants for $k=500$, and observe the convergence. In Listing $2$, the script in PARI to generate values for Table $2$ is also given. This shows that the $\gamma_n$ constants can be represented by $\zeta(n)$ at positive integer values as basis
\begin{equation}\label{eq:20}
\gamma_n = \lim_{k\to\infty}\Bigg\{C_{n,1}(k)+\sum_{m=2}^{k+1}(-1)^m C_{n,m}(k)\zeta(m)\Bigg\}
\end{equation}
where the expansion coefficients $C_{n,m}$ are rational and divergent, which grow very fast as $k$ increases. The index $n\geq 0$ is the $n$th Stieltjes constant, and index $m\geq 1$ is for the $\zeta(m)$ basis value. These coefficients can be generated by expanding the determinant of $A_n$ using the Leibniz determinant rule along columns with the zeta values. For example, for $k=12$, which is a multiple of $4$, then the first few expansion coefficients are
\begin{equation}\label{eq:20}
\gamma_0 \approx -\frac{86021}{27720}+12\zeta(2)-66\zeta(3)+220\zeta(4)-495\zeta(5)+792\zeta(6)-\ldots
\end{equation}
The $C_{0,1}$ coefficient is the harmonic number $H_{12}$
\begin{equation}\label{eq:20}
C_{0,1}=-H_{k} = -\sum_{n=1}^{k}\frac{1}{n}
\end{equation}
and the next are
\begin{equation}\label{eq:20}
C_{0,m}=\binom{k}{m-1}.
\end{equation}
For the next $\gamma_n$, the first few coefficients for $k=12$ are
\begin{equation}\label{eq:20}
\gamma_1 \approx -\frac{1676701}{415800}+\frac{58301}{2310}\zeta(2)-\frac{72161}{420}\zeta(3)+\frac{76781}{126}\zeta(4)-\frac{79091}{56}\zeta(5)+\frac{80477}{35}\zeta(6)-\ldots,
\end{equation}
and for the next $\gamma_n$, we have
\begin{equation}\label{eq:20}
\gamma_2 \approx -\frac{5356117}{907200}+\frac{10418}{225}\zeta(2)-\frac{2270987}{6300}\zeta(3)+\frac{143644}{105}\zeta(4)-\frac{5520439}{1680}\zeta(5)+\frac{574108}{105}\zeta(6)-\ldots,
\end{equation}
and so on, but these coefficients are more difficult to determine and they diverge very fast.
\begin{table}[hbt!]
\caption{The first $30$ digits of $\gamma_n$ computed by equation (24) for $k=500$.}
\centering
\begin{tabular}{c c c}
\hline\hline
$n$ & $\gamma_n$ & Significant Digits \\ [0.5ex]
\hline
$0$ & 0.577215664901532860606512090082 & 34 \\
$1$ & -0.072815845483676724860586375874 & 34 \\
$2$ & -0.009690363192872318484530386035 & 33 \\
$3$ & 0.002053834420303345866160046542 & 32 \\
$4$ & 0.002325370065467300057468170177 & 31 \\
$5$ & 0.00079332381730106270175333487\underline{7} & 30 \\
$6$ & -0.0002387693454301996098724218\underline{4}2 & 29 \\
$7$ & -0.0005272895670577510460740975\underline{0}7 & 29 \\
$8$ & -0.000352123353803039509602052\underline{1}77 & 28 \\
$9$ & -0.000034394774418088048177914\underline{6}91 & 28 \\
$10$ & 0.0002053328149090647946837\underline{2}1922 & 26
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\lstset{language=C,caption={PARI script for computing equation (24)},label=DescriptiveLabel,captionpos=b}
\begin{lstlisting}[frame=single]
{
n = 0; \\ set nth Stieltjes constant
k = 100; \\ set limit variable
An=matrix(k,k); \\ allocate matrix
\\ load matrix An
for(j=1,k,
for(i=1,k,
if(j==1+n,An[i,j]=zeta(i+1)-1/i,
An[i,j]=(-i)^(j-1)/factorial(j-1))));
\\ compute determinant of An
yn = matdet(An);
print(yn);
}
\end{lstlisting}
The Hadamard infinite product formula is another global analytically continued representation of (1) to the whole complex plane
\begin{equation}\label{eq:20}
\zeta(s)=\frac{\pi^{\frac{s}{2}}}{2(s-1)\Gamma(1+\frac{s}{2})}\prod_{\rho}^{}\left(1-\frac{s}{\rho}\right)
\end{equation}
having a simple pole at $s=1$, and at the heart of this form is an infinity of complex non-trivial zeros $\rho_n=\sigma_n+it_n$, which are constrained to lie in the critical strip $0<\Re(s)<1$ region. The infinite product is assumed to be taken over zeros in conjugate pairs. Hardy proved that there is an infinity of non-trivial zeros on the critical line at $\sigma=\frac{1}{2}$. It is not yet known whether there are non-trivial zeros off of the critical line in the range $0<\Re(s)<1$ other than $\sigma=\frac{1}{2}$, a problem of the Riemann Hypothesis (RH). To date, there has been a very large number of zeros verified numerically to lie on the critical line, and none was ever found off of the critical line. The first few non-trivial zeros on the critical line $\rho_n=\frac{1}{2}+it_n$ have imaginary components $t_1 = 14.13472514...$, $t_2 = 21.02203964...$, $t_3 = 25.01085758...$ which were originally found numerically using a solver, but if (RH) is true, then can be computed analytically by the formula presented later in this article. Also, we will interchangeably refer to $\rho_n$ or $t_n$ to imply a non-trivial zero.
The Hadamard product representation can be interpreted as a volume of an s-ball (that is for a ball of complex dimension $s$). For a positive integer $n$, the n-ball defines all points satisfying $\Omega=\{x_1^2+x_2^2+x_3^2\dots +x_n^2\leq R^n\}$, and integrating gives the total volume
\begin{equation}\label{eq:20}
V(n)=\underset{\Omega}{\int\int\int\ldots\int} dx_1 dx_2 dx_3\ldots dx_n=K(n)R^n,
\end{equation}
where
\begin{equation}\label{eq:20}
K(n) = \frac{\pi^{\frac{n}{2}}}{\Gamma(1+\frac{n}{2})}
\end{equation}
is the proportionality constant. Now, when generalizing the n-ball to an s-ball of complex $s$ dimension for $\zeta(s)$, we can identify that the terms involving $\pi$ and $\Gamma(s)$ function is $K(s)$, and that the radius of the s-ball is the remaining product involving the non-trivial zeros
\begin{equation}\label{eq:25}
R(s)^s = \frac{1}{2(s-1)}\prod_{\rho}^{}\left(1-\frac{s}{\rho}\right)
\end{equation}
which is actually the Riemann xi function $\xi(s)$ multiplied by $(s-1)^{-1}$. Thus
\begin{equation}\label{eq:25}
\zeta(s) = V_s = K(s)R(s)^s
\end{equation}
can be understood as a volume quantity, which when packed into an s-ball, then the radius function in this form is being described by explicitly the non-trivial zeros. The trivial zeros at negative even integers $-2,-4,-6\ldots -2n$ are then the zeros of the proportionality constant due to the pole of $\Gamma(s)$. For example, if we consider $s=2$, then
\begin{equation}\label{eq:25}
\begin{aligned}
\zeta(2) & = K(2)R(2)^2 \\
& = \pi R^2
\end{aligned}
\end{equation}
where $R=\sqrt{\pi/6}=0.7236012545\ldots$ is the radius to give the volume quantity for $\zeta(2)$, which from (1) can be understood as packing the areas of squares with $1/n$ sides into a circle. And similarly for $s=3$
\begin{equation}\label{eq:25}
\begin{aligned}
\zeta(3) & = K(3)R(3)^3 \\
& = \frac{4}{3}\pi R^3
\end{aligned}
\end{equation}
where $R=0.6595972037\ldots$ is the radius to give the volume quantity for Ap\'ery's constant $\zeta(3)$, which from (1) can be understood as packing the volumes of cubes with $1/n$ sides into a sphere. Hence in this view, the non-trivial zeros are governing the radius quantity of an s-ball, essentially encoding the volume information of $\zeta(s)$, and while the trivial zeros are just the zeros of the proportionality constant $K(s)$, which has a role of scaling the values of non-trivial zeros across the dimension $s$ to the values that they currently are, and perhaps even on the critical line. If we plot the radius in the range $1<\sigma<\infty$, we find a local minima for $R$ which occurs between $s=2$ and $s=3$ at $s_{min}= 2.8992592006...$ and $R_{min}=0.6592484066\ldots$. That would mean that the s-ball would reach minimum radius $R_{min}$ at $s_{min}$.
Furthermore, if we consider the complex magnitude for $\zeta(s)$ for representations (21) and (22), and note that at each non-trivial zero on the critical line, a harmonic series is induced from which we can obtain formulas for the Euler-Mascheroni constant $\gamma$ expressed as a function of a single non-trivial on the critical line zero as
\begin{equation}\label{eq:20}
\gamma = \lim_{k\to \infty}\Bigg\{2\sum_{v=1}^{k}\sum_{u=v+1}^{k}\frac{(-1)^{u}(-1)^{v+1}}{\sqrt{uv}}\cos(t^{}_n \log(u/v))-\log(k)\Bigg\}
\end{equation}
and the second formula as
\begin{equation}\label{eq:20}
\gamma = \lim_{k\to \infty}\Bigg\{\frac{k+1}{(\frac{1}{2})^2+t^{2}_{n}}-2\sum_{v=1}^{k}\sum_{u=v+1}^{k}\frac{1}{\sqrt{uv}}\cos(t^{}_n \log(u/v))-\log(k)\Bigg\},
\end{equation}
where it is assumed the index variables satisfy $u>v$ starting with $v=1$ as we have shown in [8][9]. Thus, any individual non-trivial zero on the critical line $t_n$ can be converted to $\gamma$, which is independent on (RH). As a numerical example, for $t_1$ and $k=10^5$, we compute $\gamma=0.5772\underline{1}81648\ldots$ accurate to $5$ decimal places, however, the computation becomes more difficult as it grows as $O(k^2)$ due to the double series. And if we subtract equations (43) and (44), then we obtain a relation
\begin{equation}\label{eq:20}
\frac{1}{|\rho_n|^2}=\frac{1}{(\frac{1}{2})^2+t^{2}_{n}} = \lim_{k\to\infty}\frac{2}{\sqrt{k}}\sum_{m=1}^{k}\frac{1}{\sqrt{m}}\cos(t_n\log(m/k))
\end{equation}
whereby any individual non-trivial zero can be converted to its absolute value on the critical line. Also next, the infinite sum over non-trivial zeros
\begin{equation}\label{eq:20}
\sum_{n=1}^{\infty}\frac{1}{|\rho_n|^2}=\frac{1}{2}\gamma+1-\frac{1}{2}\log(4\pi),
\end{equation}
is an example of secondary zeta function family which will be discussed later.
There is also another whole side to the theory of the Riemann zeta function concerning the prime counting function $\pi(n)$ up to a given quantity $n$, and the non-trivial zero counting function $N(T)$ up to a given quantity T. It is natural to take the logarithm of the Euler prime product yielding a sum
\begin{equation}\label{eq:20}
\log[\zeta(s)] = \sum_{n=1}^{\infty}\sum_{m=1}^{\infty}\frac{1}{m}\frac{1}{p_n^{ms}}=s\int_{0}^{\infty}J(x)x^{-s-1}dx \quad \Re(s)>1
\end{equation}
from which motivates to define a step function $J(x)$ that increases by $1$ at each prime, by $\frac{1}{2}$ at prime square, by $\frac{1}{3}$ at prime cubes, and so on, as shown in [3, p.22] and [15]. Riemann then expressed $J(x)$ by Fourier inversion as
\begin{equation}\label{eq:20}
J(x)=\frac{1}{2\pi i}\int_{a-i\infty}^{a+i\infty}\log[\zeta(s)]\frac{x^s}{s}ds \quad(a>1).
\end{equation}
After finding a suitable expansion for $\log[\zeta(s)]$ in terms of non-trivial zeros using the xi function Weierstrass product over non-trivial zeros
\begin{equation}\label{eq:25}
\xi(s) = \frac{1}{2}\prod_{\rho}^{}\left(1-\frac{s}{\rho}\right),
\end{equation}
with its relation to the zeta by $\xi(s)=\pi^{-\frac{s}{2}}\zeta(s)(s-1)\Gamma(1+\frac{s}{2})$, then after a very detailed and a lengthy analysis as shown in Edwards [3], the main formula for $J(x)$ appears as
\begin{equation}\label{eq:20}
J(x) = \text{Li}(x)-\sum_{\rho}^{}\text{Li}(x^{\rho})-\log(2)+\int_{x}^{\infty}\frac{dt}{t(t^2-1)\log(t)}
\end{equation}
for $x>1$, where Li$(x)$ is a logarithmic integral, and then by applying M\"{o}bius inversion leads to recovering
\begin{equation}\label{eq:20}
\pi(x)=\sum_{n=1}^{\infty}\frac{\mu(n)}{n}J(x^{1/n}).
\end{equation}
Hence, through this formula, the non-trivial zeros are shown to be involved in the generation of primes. Although applying M\"{o}bius inversion in (51) to recover $\pi(n)$ is somewhat circular in this case, because one needs to have knowledge of all primes by $\mu(n)$, however, the main prime content is still in $J(x)$, which comes from the contribution of non-trivial zero terms. In Fig 1, we plot $J(x)$ using (50) and observe the curve approach the step function as more non-trivial zeros (taken in conjugate-pairs) are used.
Furthermore, in analysis by LeClair [11] concerning $N(T)$, it is found that $n$th non-trivial zeros satisfy the following transcendental equation:
\begin{equation}\label{eq:20}
\frac{t_n}{2\pi}\log\left(\frac{t_n}{2\pi e}\right)+\lim_{\delta\to 0}\frac{1}{\pi}\text{arg } \zeta(\frac{1}{2}+it_n+\delta)=n-\frac{11}{8},
\end{equation}
however, the contribution to due to arg function is very small, and only provides fine level tuning to the overall equation, hence when dropping the arg term, LeClair obtained an approximate asymptotic formula for non-trivial zeros via the Lambert function $W(x)e^{W(x)}=x$ transformation:
\begin{equation}\label{eq:20}
t_{n}\approx 2\pi\frac{n-\frac{11}{8}}{W\left(\frac{n-\frac{11}{8}}{e}\right)}.
\end{equation}
It turns out that this approximation works very well with an accuracy down to a decimal place. For example, with this formula, we can quickly approximate a $10^{100}$ zero:
\begin{equation}\label{eq:20}
\begin{aligned}
t_{10^{100}}\approx && 28069038384289406990319544583825640008454803016284\\
&& 6045192360059224930922349073043060335653109252473.23351
\end{aligned}
\end{equation}
in less than one second, and it should be accurate to within a decimal place. The Lambert function can be computed efficiently for large input argument, and the approximated values for $t_n$ get better for higher zeros as $n\to\infty$. In fact, LeClair computed the largest non-trivial zero known to date for $n=10^{10^6}$ using this method [12].
Also, very little is known about the properties of non-trivial zeros. For example, they are strongly believed to be simple, but remains unproven. And in the works by Wolf [16], a large sample of non-trivial zeros was numerically expanded into continued fractions, from which it was possible to compute the Khinchin’s constant, which strongly suggests they are irrational.
In this article, we propose an analytical recurrence formula for $t_{n+1}$, very similar to the Golomb's formula for primes, thus all non-trivial zeros up to $t_n$ must be known in order to compute the $t_{n+1}$ zero. The formula is based on a certain representation of the secondary zeta function
\begin{equation}\label{eq:20}
Z(s) = \sum_{n=1}^{\infty}\frac{1}{t_{n}^{s}}
\end{equation}
in the works of Voros [13], for $s>1$, which is not involving non-trivial zeros, thus avoiding circular reasoning. There is alot of work already on the secondary zeta functions published in the literature, especially concerning the meromorphic extension of $Z(s)$ via the Mellin transform techniques and tools of spectral theory.
We now introduce the main result of this paper. Assuming (RH), the full recurrence formula for the $t_{n+1}$ non-trivial zero is:
\begin{equation}\label{eq:20}
t_{n+1} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)-\sum_{k=1}^{n}\frac{1}{t_{k}^{2m}}\right]^{-\frac{1}{2m}}
\end{equation}
for $n\geq 0$, thus all non-trivial zeros up the $n$th order must be known in order to generate the $n$th+1 non-trivial zero. This formula is a solution to
\begin{equation}\label{eq:20}
\zeta(s)=0
\end{equation}
where $s=\rho_n=\frac{1}{2}+it_n$ for $\sigma_n=\frac{1}{2}$, and the zeros $t_n$ are real and ordered $0<t_1<t_2<t_3<\ldots t_{n}$. This formula is satisfied by all representations of $\zeta(s)$ on the critical strip, such as by (21), (22), (23), (36), and so on. And in the next sections, we will develop this formula, and explore some its variations, and then we will numerically compute non-trivial zeros to high precision. We will also discuss some possible limitations to this formula for $n\to \infty$.
In the last section, we will discuss formulas for $t_{n}$ which actually can be related to the primes themselves, and that one could compute $t_n$ as a function of all primes. And conversely, one could compute any individual prime $p_{n}$ as a function of all non-trivial zeros.
\section{A variation of the $n$th+1 prime formula}
Golomb described several variations of the prime formulas of the form (19), one such is
\begin{equation}\label{eq:20}
p_{n+1}=\lim_{s\to \infty}\left[\zeta(s)-Q_{n}(s)\right]^{-1/s},
\end{equation}
which will serve to motivate the next result, which is based on the prime zeta function, and that will then serve as a basis for the development of an analogue formula for the $n$th+1 non-trivial zero formula in the next section.
The prime zeta function is an analogue of (1), but instead of summing over reciprocal integer powers, we sum over reciprocal prime powers as
\begin{equation}\label{eq:20}
P(s) = \sum_{n=1}^{\infty}\frac{1}{p_{n}^{s}}.
\end{equation}
When we consider the expanded sum
\begin{equation}\label{eq:20}
P(s) = \frac{1}{p_{1}^{s}}+\frac{1}{p_{2}^{s}}+\frac{1}{p_{3}^{s}}+\ldots
\end{equation}
then similarly as before, we wish to solve for $p_1$, and obtain
\begin{equation}\label{eq:20}
\frac{1}{p_{1}^{s}}=P(s) -\frac{1}{p_{2}^{s}}-\frac{1}{p_{3}^{s}}-\ldots
\end{equation}
which leads to
\begin{equation}\label{eq:20}
p_1=\left(P(s) -\frac{1}{p_{2}^{s}}-\frac{1}{p_{3}^{s}}-\ldots\right)^{-1/s}.
\end{equation}
If we then consider the limit,
\begin{equation}\label{eq:20}
p_1=\lim_{s\to\infty}\left(P(s) -\frac{1}{p_{2}^{s}}-\frac{1}{p_{3}^{s}}-\ldots\right)^{-1/s}
\end{equation}
then we find that the higher order primes decay faster than $P(s)$, namely, $P(s)\sim p_1^{-s}$, while the tailing error is $O(p_2^{-s})$, and so $P(s)$ dominates the limit. Since $p_1^{-s}\gg p_2^{-s}$, hence we have
\begin{equation}\label{eq:20}
p_1=\lim_{s\to\infty}\left[P(s)\right]^{-1/s}.
\end{equation}
To find $p_2$ we consider (60) again
\begin{equation}\label{eq:20}
p_2=\lim_{s\to\infty}\left[P(s)-\frac{1}{p_1^s}-\frac{1}{p_3^s}\ldots\right]^{-1/s},
\end{equation}
and when taking the limit, then we must keep $p_1$, while the higher order primes decay faster, namely, $P(s)-p_1^{-s}\sim p_2^{-s}$, while the tailing error is $O(p_3^{-s})$, and so $P(s)-p_1^{-s}$ dominates the limit. Since $p_2^{-s}\gg p_3^{-s}$, hence we have
\begin{equation}\label{eq:20}
p_2=\lim_{s\to\infty}\left[P(s)-\frac{1}{p_1^s}\right]^{-1/s}.
\end{equation}
And similarly, the next prime is found the same way, but this time we must retain the two previous primes
\begin{equation}\label{eq:20}
p_3=\lim_{s\to\infty}\left[P(s)-\frac{1}{p_1^s}-\frac{1}{p_2^s}\right]^{-1/s}.
\end{equation}
Hence in general, if we define a partial prime zeta function up to the $n$th order
\begin{equation}\label{eq:20}
P_n(s) = \sum_{k=1}^{n}\frac{1}{p_{k}^{s}},
\end{equation}
then the $n$th+1 prime is
\begin{equation}\label{eq:20}
p_{n+1}=\lim_{s\to\infty}\left[P(s)-P_{n}(s)\right]^{-1/s}.
\end{equation}
At this point, knowing $P(s)$ by the original definition (60) leads to circular reasoning, hence we seek to find other representations for $P(s)$ that don't involve primes directly. We explore the well-known relation
\begin{equation}\label{eq:20}
\log[\zeta(s)]=\sum_{k=1}^{\infty}\frac{P(ks)}{k}
\end{equation}
and then by applying M\"{o}bius inversion leads to
\begin{equation}\label{eq:20}
P(s)=\sum_{k=1}^{\infty}\mu(k)\frac{\log(ks)}{k},
\end{equation}
where $\mu(k)$ is the M\"{o}bius function, which however, still depends on the primes just like (51) for $J(x)$, so it may not be best candidate for $P(s)$. And if there are other representations for $P(s)$ not involving primes, then one could certainly use them, but we are unaware of such. But to verify (69), we pre-compute $P(s)$ using primes to high precision instead, thus introducing circular reasoning, Hence, we pre-compute $P(s)$ for $s=10$ and $s=100$ as
\begin{equation}\label{eq:20}
P(10)=9.936035744369802178558507001 \times 10^{-4}\ldots
\end{equation}
and
\begin{equation}\label{eq:20}
P(100)=7.888609052210118073520537827\times 10^{-31}\ldots
\end{equation}
using a neat remainder estimation technique of (71) developed by Cohen in [2]. Next, we summarize computation for $p_{n+1}$ by the recurrence formula (69) in Table 3, and observe the convergence to the $p_{n+1}$ prime, just as the Golomb's formula for primes. And as before, the convergence works because
\begin{equation}\label{eq:20}
O(p_n^{-s})\gg O(p_{n+1}^{-s})\quad \text{as }s\to\infty,
\end{equation}
and also that the prime gaps are bounded, which prevents any higher order primes from modifying the main asymptote.
\begin{table}[hbt!]
\caption{The $p_{n+1}$ prime computed by equation (69) shown to $15$ decimal places.}
\centering
\begin{tabular}{c c c c}
\hline\hline
$n$ & $p_{n+1}$ & $s=10$ & $s=100$ \\[0.5ex]
\hline
$0$ & $p_1$ & 1.996543079767713 & 1.999999999999999 \\
$1$ & $p_2$ & 2.998128913153986 & 2.999999999999999 \\
$2$ & $p_3$ & 4.982816481260483 & 4.999999999999999\\
$3$ & $p_4$ & 6.990872151845387 & 6.999999999999999 \\
$4$ & $p_5$ & 10.79590425378718 & 10.999999993885992 \\
$5$ & $p_6$ & 12.88285820990352 & 12.999999999999709 \\
$6$ & $p_7$ & 16.45469003649213 & 16.999997488242396 \\
$7$ & $p_8$ & 18.70043242956331 & 18.999999999042078 \\
$8$ & $p_9$ & 22.65364920892418 & 22.999999999980263 \\
$9$ & $p_{10}$ & 27.5602688021314 & 28.999632082761238
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\section{The recurrence formula for non-trivial zeros}
The secondary zeta function has been studied in the literature, and there has been interesting developments concerning the analytical extension to the whole complex plane for
\begin{equation}\label{eq:20}
Z(s) = \sum_{n=1}^{\infty}\frac{1}{t_{n}^{s}}
\end{equation}
which has many parallels with the zeta function. In this article, the symbol $Z$ is implied, and is not related to the Hardy-Z function. For the first few special values the $Z(s)$ yields
\begin{equation}\label{eq:9}
\begin{aligned}
Z(2) &=\frac{1}{2}(\log |\zeta|)^{(2)}\big(\frac{1}{2}\big)+\frac{1}{8}\pi^2+\beta(2)-4 \\
&=0.023104993115418\dots, \\
&\\
Z(3) &= 0.00072954\dots, \\
&\\
Z(4) &=-\frac{1}{12}(\log |\zeta|)^{(4)}\big(\frac{1}{2}\big)-\frac{1}{24}\pi^4-4\beta(4)+16 \\
&= 0.00037172599285\dots, \\
&\\
Z(5) &= 0.00000223\dots.
\end{aligned}
\end{equation}
The special values for even positive integer argument $Z(2m)$ is:
\begin{equation}\label{eq:20}
\begin{aligned}
Z(2m) = (-1)^m \bigg[-\frac{1}{2(2m-1)!}(\log |\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\\
-\frac{1}{4}\left[(2^{2m}-1)\zeta(2m)+2^{2m}\beta(2m)\right]+2^{2m}\bigg]
\end{aligned}
\end{equation}
and is found in [13,p. 693] by works of Voros, and it's originally denoted as $\mathcal{Z}(2\sigma)$. This formula is a sort of an analogue for Euler's formula (3) for $\zeta(2n)$, and is valid for $m\geq 1$, where $m$ is an integer, and $\beta(s)$ is the Dirichlet beta function
\begin{equation}\label{eq:20}
\beta(s) = \sum_{n=0}^{\infty}\frac{(-1)^n}{(2n+1)^s}=\prod_{n=1}^{\infty}\left(1-\frac{\chi_{4}(p_n)}{p_n^s}\right)^{-1},
\end{equation}
where $\chi_4$ is the Dirichlet character modulo $4$. The value for $\beta(2)$ is the Catalan's constant. In (76), the odd values for $Z(2m+1)$ were computed numerically by summing $25000$ zeros, as it is not known whether there is a closed-form representation similarly as for the $\zeta(2m+1)$ case, and so the given values could only be accurate to several decimal places. The formula (77) assumes (RH), and is a result of a complicated development to meromophically extend (75) to the whole complex plane using tools from spectral theory.
Furthermore, using the relation, also found in [13,p. 681] as
\begin{equation}\label{eq:20}
\frac{1}{2^s}\zeta\big(s,\frac{5}{4}\big)=\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^s}=2^s\left[\frac{1}{2}\left((1-2^{-s})\zeta(s)+\beta(s)\right)-1\right],
\end{equation}
from which we have several variations of (77) for $Z(2m)$ as
\begin{equation}\label{eq:20}
Z(2m) = \frac{(-1)^{m+1}}{2} \left[\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right]
\end{equation}
and another as
\begin{equation}\label{eq:20}
Z(2m) = \frac{(-1)^{m+1}}{2} \left[\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\frac{1}{2^{2m}}\zeta\big(2m,\frac{5}{4}\big)-2^{2m}\right].
\end{equation}
The expressions involving the $\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)$ term can be computed numerically and independently of the non-trivial zeros, and there is no known closed-form representation of it, but there is for the odd values
\begin{equation}\label{eq:20}
\log (|\zeta|)^{(2m+1)}\big(\frac{1}{2}\big)=\frac{1}{2}(2m)!(2^{2m+1}-1)\zeta(2m+1)+\frac{1}{4}\pi^{2m+1}|E_{2m}|,
\end{equation}
where $E_{2m}$ are Euler numbers [13,p. 686]. Unfortunately, the $\log (|\zeta|)^{(2m+1)}(\frac{1}{2})$ term is not involved in the computation of $Z(m)$ for $m>1$. Also, the infinite series in (80) is related to the Hurwitz zeta function, and it can also be separated into two parts involving the zeta function and the beta function, which can then be related to primes via the Euler product, which we will come back to shortly.
Now we will follow the same program that we did for the prime zeta function as outlined in equations (58) to (69). If we begin with the secondary zeta function
\begin{equation}\label{eq:20}
Z(s) = \frac{1}{t_{1}^{s}}+\frac{1}{t_{2}^{s}}+\frac{1}{t_{3}^{s}}+\ldots
\end{equation}
and then solving for $t_1$ we obtain
\begin{equation}\label{eq:20}
\frac{1}{t_{1}^{s}}=Z(s) -\frac{1}{t_{2}^{s}}-\frac{1}{t_{3}^{s}}-\ldots
\end{equation}
and then we get
\begin{equation}\label{eq:20}
t_1=\left(Z(s) -\frac{1}{t_{2}^{s}}-\frac{1}{t_{3}^{s}}-\ldots\right)^{-1/s}.
\end{equation}
If we then consider the limit
\begin{equation}\label{eq:20}
t_1=\lim_{s\to\infty}\left(Z(s) -\frac{1}{t_{2}^{s}}-\frac{1}{t_{3}^{s}}-\ldots\right)^{-1/s}
\end{equation}
then, since $O(Z(s))\sim O(t_1^{-s})$, and so the higher order non-trivial zeros decay as $O(t_2^{-s})$ faster than $Z(s)$, and so $Z(s)$ dominates the limit, hence we have
\begin{equation}\label{eq:20}
t_1=\lim_{s\to\infty}\left[Z(s)\right]^{-1/s}.
\end{equation}
Now, substituting representation (80) for $Z(s)$ into (87), and $s$ is now assumed be an integer as a limit variable $2m$, then we get a direct formula for $t_1$ as
\begin{equation}\label{eq:20}
t_{1} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)\right]^{-\frac{1}{2m}}.
\end{equation}
Next we numerically verify this formula in PARI, and the script is shown in Listing $3$. We broke up the representation (88) into several parts A to D. Also, sufficient memory must be allocated and precision set to high before running the script. We utilize the Hurwitz zeta function representation, since it is available in PARI, and the \textbf{derivnum} function for computing the $m$th derivative very accurately for high $m$. The results are summarized in Table $4$ for various limit values of $m$ from low to high, and we can observe the convergence to the real value as $m$ increases. Already at $m=10$ we get several digits of $t_1$, and at $m=100$ we get over $30$ digits. We performed even higher precision computations, and the result is clearly converging to $t_1$.
\begin{table}[hbt!]
\caption{The computation of $t_1$ by equation (88) for different $m$.}
\centering
\begin{tabular}{c c c}
\hline\hline
m & $t_1$ (First 30 Digits) & Significant Digits\\ [0.5ex]
\hline
$1$ & 6.578805783608427637281793074245 & 0 \\
$2$ & 12.806907343833847091925940068962 & 0 \\
$3$ & 13.809741306055624728153992726341 & 0 \\
$4$ & 14.038096225961619450676758199577 & 0 \\
$5$ & 14.\underline{1}02624784431488524304946186056 & 1 \\
$6$ & 14.\underline{1}23297656314161936112154413740 & 1 \\
$7$ & 14.1\underline{3}0464459254236820197453483721 & 2 \\
$8$ & 14.1\underline{3}3083993992268169646789606564 & 2 \\
$9$ & 14.13\underline{4}077755601528384660110026302 & 3 \\
$10$ & 14.13\underline{4}465134057435907124435534843 & 3 \\
$15$ & 14.1347\underline{2}1950874675119831881762569 & 5 \\
$20$ & 14.13472\underline{5}096741738055664458081219 & 6\\
$25$ & 14.13472514\underline{1}055464326339414131271 & 9 \\
$50$ & 14.134725141734693\underline{7}89641535771021 & 16 \\
$100$ & 14.134725141734693790457251983562 & 34
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\lstset{language=C,caption={PARI script for computing equation (88).},label=DescriptiveLabel,captionpos=b}
\begin{lstlisting}[frame=single]
{
\\ set limit variable
m = 250;
\\ compute parameters A to D
A = derivnum(x=1/2,log(zeta(x)),2*m);
B = 1/factorial(2*m-1);
C = 2^(2*m);
D = (2^(-2*m))*zetahurwitz(2*m,5/4);
\\ compute Z(2m)
Z = (-1)^(m+1)*(1/2)*(A*B-C+D);
\\ compute t1
t1 = Z^(-1/(2*m));
print(t1);
}
\end{lstlisting}
Next, we perform a higher precision computation for $m=250$ case, and the result is
\begin{equation}\label{eq:20}
\begin{aligned}
t_1=14.13472514173469379045725198356247027078425711569924 & \\
317568556746014996342980925676494901\underline{0}212214333747\ldots
\end{aligned}
\end{equation}
accurate to $87$ decimal places. In order to find the second non-trivial zero, we comeback to (83), and solving for $t_2$ yields
\begin{equation}\label{eq:20}
t_2=\lim_{s\to\infty}\left(Z(s) -\frac{1}{t_{1}^{s}}-\frac{1}{t_{3}^{s}}-\ldots\right)^{-1/s}
\end{equation}
and since the higher order zeros decay faster than $Z(s)-t_1^{-s}$, we then have
\begin{equation}\label{eq:20}
t_2=\lim_{s\to\infty}\left(Z(s) -\frac{1}{t_{1}^{s}}\right)^{-1/s}
\end{equation}
and the zero becomes
\begin{equation}\label{eq:20}
t_{2} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)-\frac{1}{t_{1}^{2m}}\right]^{-\frac{1}{2m}}.
\end{equation}
A numerical computation for $m=250$ yields
\begin{equation}\label{eq:20}
\begin{aligned}
t_2=21.0220396387715549926284795938969027773\underline{3}355195796311 & \\
4759442381621433519190301896683837161904986197676\ldots
\end{aligned}
\end{equation}
which is accurate to $38$ decimal places, and we assumed $t_1$ used was already pre-computed to $2000$ decimal places by other means. We cannot use the same $t_1$ computed earlier with same precision, as it will cause a self-cancelation in (91), and so, the numerical accuracy of $t_{n}$ must be much higher than $t_{n+1}$ to guarantee convergence. And continuing on, the next zero is computed as
\begin{equation}\label{eq:20}
t_{3} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)-\frac{1}{t_{1}^{2m}}-\frac{1}{t_{2}^{2m}}\right]^{-\frac{1}{2m}}.
\end{equation}
A numerical computation for $m=250$ yields
\begin{equation}\label{eq:20}
\begin{aligned}
t_3=25.010857580145688763213790992562821818659549\underline{6}5846378 & \\
3317371101068278652101601382278277606946676481041\ldots
\end{aligned}
\end{equation}
which is accurate to $43$ decimal places, and we assumed $t_1$ and $t_2$ was used to high enough precision which was $2000$ decimal places in this example.
Hence, just like for the $n$th+1 Golomb prime recurrence formulas and the prime zeta function $P(s)$, the same limit works for non-trivial zeros. As a result, if we define a partial secondary zeta function up to the $n$th order
\begin{equation}\label{eq:20}
Z_n(s) = \sum_{k=1}^{n}\frac{1}{t_{k}^{s}},
\end{equation}
then the $n$th+1 non-trivial zero is
\begin{equation}\label{eq:20}
t_{n+1}=\lim_{m\to\infty}\left[Z(m)-Z_{n}(m)\right]^{-1/m}
\end{equation}
and the main recurrence formula:
\begin{equation}\label{eq:20}
t_{n+1} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)-\sum_{k=1}^{n}\frac{1}{t_{k}^{2m}}\right]^{-\frac{1}{2m}}.
\end{equation}
One can actually use any number of representations for $Z(s)$, and the challenge will be find more efficient algorithms to compute them. And finally, we report a numerical result for $Z(2m)$ for $m=250$ as:
\begin{equation}\label{eq:20}
\begin{aligned}
Z = 7.18316934899718140841650578011166023417090863769600 & \\
8517536818521464413577481501771580460474425539208\times 10^{-576}\ldots.
\end{aligned}
\end{equation}
From this number, we extracted the first $10$ non-trivial zeros, which are summarized in Table $5$ for $k=250$. The previous non-trivial zeros used were already known to high precision to $2000$ decimal places in order to compute the $t_{n+1}$ zero. One cannot use the same $t_n$ obtained earlier because it will cause self-cancelation in (98), and the accuracy for $t_n$ must be much higher than $t_{n+1}$ to ensure convergence. Initially we started with an accuracy of $87$ digits after decimal place for $t_1$, and then it dropped to $7$ to $12$ digits by the time it gets to $t_{10}$ zero. There is also a sudden drop in accuracy when the gaps get too small. Hence, these formulas are not very practical for computing high zeros as large numerical precision is required, for example, at the first Lehmer pair at $t_{6709}=7005.06288$, the gap between next zero is about $\sim 0.04$. Also, the average gap between zeros gets smaller as $t_{n+1}-t_{n}\sim\frac{2\pi}{\log(n)}$, making the use of this formula progressively harder and harder to compute.
\begin{table}[hbt!]
\caption{The $t_{n+1}$ computed by equation (98).}
\centering
\begin{tabular}{c c c c}
\hline\hline
$n$ & $t_{n+1}$ & $m=250$ & Significant Digits \\ [0.5ex]
\hline
$0$ & $t_{1}$ & 14.134725141734693790457251983562 & 87 \\
$1$ & $t_{2}$ & 21.022039638771554992628479593896 & 38 \\
$2$ & $t_{3}$ & 25.010857580145688763213790992562 & 43 \\
$3$ & $t_{4}$ & 30.424876125859513\underline{2}09940851142395 & 16 \\
$4$ & $t_{5}$ & 32.9350615877391896906623689640\underline{7}3 & 29 \\
$5$ & $t_{6}$ & 37.58617815882567125\underline{7}190902153280 & 18 \\
$6$ & $t_{7}$ & 40.918719012147\underline{4}63977678179889317 & 13 \\
$7$ & $t_{8}$ & 43.3270732809149995194961\underline{1}7449701 & 22 \\
$8$ & $t_{9}$ & 48.005150\underline{8}79831498066163921378664 & 7 \\
$9$ & $t_{10}$ & 49.77383247767\underline{2}299146155484901550 & 12
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\section{Duality between primes and non-trivial zeros}
We outline the duality between primes and non-trivial zeros. The Golomb's recurrence formula (19) is an exact formula for the $n$th+1 prime
\begin{equation}\label{eq:20}
p_{n+1}=\lim_{s\to \infty}\left(1-\frac{Q_n(s)}{\zeta(s)}\right)^{-1/s},
\end{equation}
and the Hadamard product formula establishes $\zeta(s)$ as a function of non-trivial zeros:
\begin{equation}\label{eq:20}
\zeta(s)=\frac{\pi^{\frac{s}{2}}}{2(s-1)\Gamma(1+\frac{s}{2})}\prod_{\rho}^{}\left(1-\frac{s}{\rho}\right).
\end{equation}
Hence, this is a pathway from non-trivial zeros to the primes and without assuming (RH), as the Hadamard product is over all zeros. On the other hand, the recurrence formula for the $n$th+1 non-trivial zero is
\begin{equation}\label{eq:20}
\begin{aligned}
t_{n+1} = \lim_{m\to\infty}&\Bigg[\frac{(-1)^{m+1}}{2}\Big(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)-2^{2m+1}+\\
& +2^{2m-1}\big((1-2^{-2m})\zeta(2m)+\beta(2m)\big)\Big)-\sum_{k=1}^{n}\frac{1}{t_k}\Bigg]^{-\frac{1}{2m}}
\end{aligned}
\end{equation}
where now one could substitute the Euler product for the zeta and beta functions, or both, which is what we will do next. We have
\begin{equation}\label{eq:20}
\left(1-2^{-2m}\right)\zeta(2m)=\prod_{n=2}^{\infty}\left(1-\frac{1}{p_n^{2m}}\right)^{-1}
\end{equation}
and
\begin{equation}\label{eq:20}
\beta(2m)=\prod_{n=2}^{\infty}\left(1-\frac{\chi_4(p_n)}{p_n^{2m}}\right)^{-1}.
\end{equation}
As a result,
\begin{equation}\label{eq:20}
\begin{aligned}
t_{n+1} = \lim_{m\to\infty}&\Bigg[\frac{(-1)^{m+1}}{2}\Bigg(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)-2^{2m+1}+\\
& +2^{2m-1}\Big(\prod_{n=2}^{\infty}\big(1-p_n^{-2m}\big)^{-1}+\prod_{n=2}^{\infty}\big(1-\chi_4(p_n)p_n^{-2m}\big)^{-1}\Big)\Bigg)-\sum_{k=1}^{n}\frac{1}{t_k}\Bigg]^{-\frac{1}{2m}}
\end{aligned}
\end{equation}
which completes the pathway from primes to non-trivial zeros. We note that these formulas are independent, and thus avoid any circularity, however, the recurrence formula for $t_{n+1}$ is dependent on (RH). And finally, in Appendix A, we present a PARI script to compute (105) recursively for several zeros.
\section{Conclusion}
We explored various representations of the Riemann zeta function, such as the Euler prime product, the Laurent expansion, and the Golomb's recurrence formula for primes. The Golomb's formula is a basis for developing similar recurrence formulas for the $n$th+1 non-trivial zeros via an independent formula for the secondary zeta function $Z(2m)$, which does not involve non-trivial zeros. Hence, the non-trivial zeros can be extracted under the right excitation in the limit, just like prime numbers. We verified these formulas numerically, and they indeed do converge to $t_{n+1}$. The difficultly lies in computation of the $\log (|\zeta|)^{(2m)}(\frac{1}{2})$ term. We utilized the PARI/GP software package for computing $Z(2m)$ for $m=250$, and the first zero $t_1$ achieves $87$ correct digits after the decimal place. Presently, computing beyond that caused the test computer to run out of memory. And so, if better and more efficient methods for computing $Z(2m)$ are developed, then more higher zeros can be computed accurately. But even then, computing up to a millionth zero for example, would be almost insurmountable. The only open question is whether the recurrence for the non-trivial zeros will hold up, namely the limit $O(t_n^{-s})\gg O(t_{n+1}^{-s})$ as $s\to\infty$, as the average gap between non-trivial zeroes decreases $t_{n+1}-t_{n}\sim\frac{2\pi}{\log(n)}$ as $n\to\infty$. In case of the Golomb's formula for primes, this gap is bounded.
These formulas also suggest a new criterion for (RH). It suffices to take a first zero $t_1$ represented by (88) which depends on (RH) as
\begin{equation}\label{eq:20}
t_{1} = \lim_{m\to\infty}\left[\frac{(-1)^{m+1}}{2}\left(\frac{1}{(2m-1)!}\log (|\zeta|)^{(2m)}\big(\frac{1}{2}\big)+\sum_{k=1}^{\infty}\frac{1}{\left(\frac{1}{2}+2k\right)^{2m}}-2^{2m}\right)\right]^{-\frac{1}{2m}}
\end{equation}
and passing it through to any number of representations of $\zeta(s)$ valid in the critical strip to work out
\begin{equation}\label{eq:20}
\zeta(\frac{1}{2}+i t_1)=0.
\end{equation}
For example, if we take equation (45) and substitute $t_1$ as
\begin{equation}\label{eq:20}
\frac{1}{|\rho_1|^2}=\frac{1}{(\frac{1}{2})^2+t_1^{2}} = \lim_{k\to\infty}\frac{2}{\sqrt{k}}\sum_{m=1}^{k}\frac{1}{\sqrt{m}}\cos(t_1\log(m/k)),
\end{equation}
then recovering $t_1$ would imply (RH) if there was a way work out the series.
We also would like to extend these formulas for the secondary beta function
\begin{equation}\label{eq:20}
B(s) = \sum_{n=1}^{\infty}\frac{1}{r_{n}^{s}},
\end{equation}
where $r_n$ are imaginary components of non-trivial zeros of $\beta(s)$ on the critical line. For example, the first few zeros are $r^{}_1 = 6.02094890...$, $r^{}_2 = 10.24377030...$, $r^{}_3 = 12.98809801...$. Then, the proposed recurrence formula would be
\begin{equation}\label{eq:20}
r_{n+1}=\lim_{s\to\infty}\left[B(s)-B_{n}(s)\right]^{-1/s},
\end{equation}
where
\begin{equation}\label{eq:20}
B_n(s) = \sum_{n=1}^{n}\frac{1}{r_{n}^{s}}
\end{equation}
is the partial secondary beta function up to the $n$th order. And just like for the Dirichlet beta, the same could potentially apply to other Dirichlet L-functions.
Finally, we highlighted the duality between primes and non-trivial zeros where it is possible convert non-trivial zeros into an individual prime, and conversely, to convert all primes into an individual non-trivial zero.
\texttt{Email: [email protected]}
\section{Appendix A}
The script in Listing $4$ computes the $n$th+1 non-trivial recursively from a set of primes by equation (105). The parameter $\textbf{pmax}$ specifies the number of primes to use for the Euler product. The starting limiting variable is $\textbf{m}$, and at each iteration $\textbf{m}$ is decreased by a pre-set amount $\textbf{step\textunderscore m}$, so that the accuracy for $t_n$ will be greater than for $t_{n+1}$ in order to avoid self-cancelation. The values for computed zeros are stored in an array, and the partial secondary zeta $Z_n$ is computed at every iteration. By leveraging these parameters, the output can converge to different values, and in some cases will not converge. We optimized them to give $4$ zeros accurately, and beyond that it doesn't converge and then $m$ has to be increased to a larger value. The results of running this script are summarized in Table $6$. As before, we obtain $t_1$ accurate to $87$ decimal places, but $t_2$ now is accurate to $26$ decimal places, and the next zero to $12$ and $1$ decimal places respectively. At this point the iteration has ran its course. We would like to increase $m$, but presently is outside the range of the test computer.
\begin{table}[hbt!]
\caption{The $t_{n+1}$ by PARI script in Listing $4$}
\centering
\begin{tabular}{c c c c c}
\hline\hline
$m$ & $n$ & $t_{n+1}$ & First $30$ digits of computed results & Significant Digits \\ [0.5ex]
\hline
250 & $0$ & $t_{1}$ & 14.134725141734693790457251983562 & 87 \\
175 & $1$ & $t_{2}$ & 21.0220396387715549926284795\underline{9}4245 & 26 \\
100 & $2$ & $t_{3}$ & 25.01085758014\underline{5}177681574221575793 & 12 \\
25 & $3$ & $t_{4}$ & 30.\underline{4}13415903597141481192661667214 & 1
\\ [1ex]
\hline
\end{tabular}
\label{table:nonlin}
\end{table}
\lstset{language=C,caption={PARI script for generating non-trivial zeros from primes.},label=DescriptiveLabel,captionpos=b}
\begin{lstlisting}[frame=single][hbt!]
{
m = 250; \\ starting limit variable m
step_m = -75; \\ decrease limit step_m
pmax = 2000; \\ set max number of primes
tn = vector(100); \\ allocate vector to hold zeros
n=1; \\ init non-trivial zero counter
\\ start loop
while(m != 0,
\\ compute parameters A to D
A = derivnum(x=1/2,log(zeta(x)),2*m);
B = 1/(factorial(2*m-1));
C = 2^(2*m+1); D = 2^(2*m-1);
\\ compute Euler products
P1 = prod(i=2,pmax,(1-1/prime(i)^(2*m))^(-1));
P2 = prod(i=2,pmax,
(1-(-1)^((prime(i)-1)/2)/prime(i)^(2*m))^(-1));
\\ compute Z(2m)
Z = 0.5*(-1)^(m+1)*(A*B-C+D*(P1+P2));
\\ compute Zn up to nth-1 order
if(n==1,Zn=0,
for(j=1,n-1,Zn = Zn + 1/tn[j]^(2*m)));
\\ compute and print tn
tn[n] = (Z-Zn)^(-1/(2*m));
print(m, ":", tn[n]);
m = m+step_m; \\ decrease m by step_m
n = n+1; \\ increment zero counter
)
}
\end{lstlisting}
\end{document} |
\begin{document}
\title{On graphs with a large chromatic number containing no small odd cycles}
\author{S.L. Berlov\thanks{The work was supported by RFBR grant No.~10-01-00096-A.}, Ilya I. Bogdanov\footnotemark[1]}
\maketitle
\begin{abstract}
In this paper, we present the lower bounds for the number of vertices in a graph with a large chromatic number containing no small odd cycles.
\end{abstract}
\section{Introduction}
P.~Erd\H os~\cite{erdos} showed that for every integer $n>1$ and $p>2$, there exists a graph of girth~$g$ and chromatic number greater than~$n$ which contains not more than $n^{2g+1}$ vertices. Later, he conjectured~\cite{erdos3} that for every positive integer~$s$ there exists a constant $c_s$ such that for every graph~$G$ having $N$ vertices and containing no odd cycles of length less than $c_sN^{1/s}$, its chromatic number does not exceed $s+1$.
This conjecture was proved by Kierstead, Szemer\'edi, and Trotter~\cite{kierstead}; in fact, they have proved a more general result. In our case, their result states that the chromatic number of any graph on $N$ vertices containing no odd cycles of length at most $4sN^{1/s}+1$ does not exceed~$s+1$.
Basing on these results, we introduce the following notation.
\begin{Def}
Assume that $n,k>1$ are two integers. Denote by $f(n,k)$ the maximal integer $f$ satisfying the following property: If a graph $G=(V,E)$ contains no odd cycles of length at most $2k-1$, and $|V|\leq f$, then there exists a proper coloring of its vertices in $n$ colors.
\end{Def}
Notice that a graph contains no odd cycles of length at most $2k-1$ if and only if it contains no simple odd cycles of the same lengths.
The results mentioned above imply that $f(n,k)<n^{4k+1}$ and
$f({s+1},[2sN^{1/s}]+1)\geq N$. One can obtain that the latter inequality is equivalent to the bound
\begin{equation}
f(n,k)\geq \left(\frac k{2(n-1)}\right)^{n-1}-1.
\label{lower-kst}
\end{equation}
A different upper bound for $f(n,k)$ can be obtained from the following graph constructed by Schrijver~\cite{schrijver}. Let $m,d$ be some positive integers. Set $X=\{1,2,\dots,2m+d\}$, $V=\{{x\subset X}:\; |x|=m,\; 1<|i-j|<{2m+d-1}
\mbox{ \ for all pairs of distinct\ }i,j\in x\}$, $E=\{{(x,y)\in V^2}:\; {x\cap y=\emp}\}$. The Schrijver graph $(V,E)$
is $(d+2)$-chromatic, whilst it does not contain odd cycles of length less than $\frac{2m+d}d$. Next, we have $|V|=\frac{2m+d}{m+d}\binom{m+d}{d}$; now it is easy to obtain that
\begin{equation}
f(n,k)<\frac{(n-1)(2k-1)+2}{(n-1)k+1}\binom{(n-1)k+1}{n-1}.
\label{upper-schr}
\end{equation}
When we fix the value of $n$, the bounds~\eqref{lower-kst} and~\eqref{upper-schr} become the polynomials in $k$ of the same degree; hence, in some sense they are close to each other. On the contrary, when we fix the value of $k$ and consider the values $n>k/(2e)+1$, we see that the right-hand part of~\eqref{lower-kst}
decreases (as a function in $n$). Hence for larger values of $n$ this estimate does not provide any additional information.
On the other hand, for $k=2$ the asymptotics of $f(n,2)$ is tightly connected with the asymptotics of Ramsey numbers $R_{n,3}$. In the papers of Ajtai, Koml\'os, and E. Szemer\'edi~\cite{aks} and Kim~\cite{kim} it is shown that $c_1\frac{n^2}{\log n}\leq
R(n,3)\leq c_2\frac{n^2}{\log n}$ for some absolute constants $c_1,c_2$. One can check that these results imply the bounds
$$
c_3 n^2\log n\leq f(n,2)\leq c_4 n^2 \log n
$$
for some absolute constants $c_3,c_4$.
In the present paper, we find nontrivial lower bounds for $f(n,k)$ for all values of $n\geq 2$ and $k\geq 2$. In Section~2, we make some combinatorial considerations leading to the recurrent bounds for $f(n,k)$. In Section~3, we obtain explicit bounds following from those results. In particular, we show (see Theorem~\ref{estimate}) that
$$
f(n,k)\geq \frac{(n+k)(n+k+1)\cdots(n+2k-1)}{2^{k-1}k^k}
$$
for all $n\geq2$ and $k\geq 2$.
\section{Recurrent bounds}
Firstly, we introduce some notation.
Let $G=(V,E)$ be an (unoriented) graph. We denote the {\em distance} between the vertices $u,v\in V$ by $\mathop{\operatorfont dist}_G(u,v)$.
Consider a vertex $v\in V$, and let $r$ be a nonnegative integer. We denote by $U_r(v,G)=\{u\in V\mid \mathop{\operatorfont dist}_G(u,v)\le r\}$ the {\em ball} of radius $r$ with the center at~$v$, and by $S_r(v,G)=\{u\in V\mid \mathop{\operatorfont dist}_G(u,v)= r\}$ the {\em sphere} with the same radius and center. In particular, $S_0(v,G)=U_0(v,G)=\{v\}$. Denote also by $\dd^{\rm out}_G
V_1=\{u\in V\setminus V_1\mid
\exists v\in V_1: (u,v)\in E\}$ the {\em outer boundary} of a set $V_1\subseteq V$. In particular, $S_r(v,G)=\dd^{\rm out}_G U_{r-1}(v,G)$.
For a set $V_1\subseteq V$, we denote by $G(V_1)$ the induced subgraph on the set of vertices~$V_1$.
Let us fix some integers $n$ and $k$ which are greater than~1. We need the following easy proposition.
\begin{Prop}
Graph $G$ does not contain odd cycles of length not exceeding $2k-1$ if and only if for each vertex $v\in V$ and each positive integer $r<k$, the subgraph $G(S_r(v,G))$ contains no edges.
\label{sph-empty}
\end{Prop}
\proof Assume that the subgraph $G(S_r(v,G))$ contains an edge $(u_1,u_2)$. Supplementing this edge by shortest paths from $v$ to $u_1$ and $u_2$, we obtain a cycle of length~$2r+1\leq 2k-1$.
Conversely, assume that $G$ contains a cycle of length $\leq 2k-1$.
Consider such a cycle $C$ of the minimal length $2r+1$ (then $r<k$). Choose any its vertex~$v$, and let $u_1,u_2$ be two vertices of~$C$ such that $\mathop{\operatorfont dist}_C(v,u_1)=\mathop{\operatorfont dist}_C(v,u_2)=r$. In fact, we have
$\mathop{\operatorfont dist}_G(u_1,v)=\mathop{\operatorfont dist}_G(u_2,v)=r$. Actually, assume that $\mathop{\operatorfont dist}_G(v,u_1)<r$, and choose a path~$P$ of the minimal length connecting $v$ and $u_1$. Then one can supplement it by one of the two subpaths of~$C$ connecting $u_1$ and $v$ to obtain an odd cycle $C'$. The length of $C'$ is smaller than $r+(r+1)=2r+1$, that contradicts the choice of~$C$.
Thus, $u_1,u_2\in S_r(v,G)$, and the graph $G(S_r(v,G))$ contains an edge.
\qed
Now let us fix an arbitrary graph $G=(V,E)$ with a minimal number of vertices such that it contains no odd cycles of length not exceeding $2k-1$, and $\chi(G)>n$ (hence $|V|=f(n,k)+1$). By the minimality condition, the graph~$G$ is connected. Moreover, for every $v\in V$ and $0\leq r\leq k$, the sphere~$S_r(v,G)$ is nonempty. Otherwise we would have $G=\cup_{i=0}^{r-1} S_i(v,G)$, where all the graphs $G(S_i(v,G))$ contain no edges by Proposition~\ref{sph-empty}. Therefore, it is possible to color this graph properly in two colors: the vertices of the sets $S_i(v,G)$ with even~$i$ in color~1, while those for odd~$i$ --- in color~2 (the vertex~$v$ should be colored in color~1).
Let us introduce the number $d=\max_{v\in V} |U_{k-1}(v,G)|$.
\begin{Lemma}
\label{big okr}
For every vertex $v\in V$, we have $|U_{k-1}(v,G)|\geq n(k-1)+1$. In particular, $d\geq n(k-1)+1$.
\end{Lemma}
\proof
Notice that $U_{k-1}(v,G)=\bigcup_{r=0}^{k-1}S_r(v,G)$. Assume that $|S_r(v,G)|\geq n$ for every $r=1,\dots,k-1$; then
$$
|U_{k-1}(v,G)|=\sum_{r=0}^{k-1}|S_r(v,G)|\geq 1+(k-1)\cdot n,
$$
as desired.
Assume now that $|S_r(v,G)|< n$ for some $1\leq r\leq k-1$. Consider a subgraph $G'=G\bigl(V\setminus U_{r-1}(v,G)\bigr)$. From the minimality condition, it can be colored properly in $n$ colors. Consider an arbitrary such proper coloring; then the vertices of $S_r(v,G)$ are colored in at most $n-1$ colors, so there exists a color (say, color~1) different from them. Let us now color the vertices of~$S_{r-1}(v,G)$ in color~1, and then color all the remaining vertices of the sets~$S_i(v,G)$ ($i<r-1$) alternately: we use colors~1 and~2 (here 2 is any remaining color) for even and odd values of~$i-(r-1)$, respectively. It follows from Proposition~\ref{sph-empty} that this coloring is proper. This contradicts the choice of~$G$.
\qed
\begin{Lemma}
\label{Sergey}
$|V|\geq f(n-1,k)+d+1$.
\end{Lemma}
\proof
Choose a vertex~$v$ such that $d=|U_{k-1}(v,G)|$. Assume that $|V\setminus
U_{k-1}(v,G)|\leq f({n-1},k)$; then one can color properly vertices of the set $V\setminus U_{k-1}(v,G)$ in $n-1$ colors. Now we can color the vertices of the set $U_{k-1}(v,G)$ in colors~1 and~$n$ (where $n$ is a new color, and 1 is any of the old colors) in the following way: we color all the vertices of~$S_r(v,G)$ in color~1 or~$n$, if $r-(k-1)$ is odd or even, respectively. By Proposition~\ref{sph-empty}, we obtain a proper coloring of~$G$ in $n$ colors which is impossible.
Thus, our assumption is wrong, so $|V\setminus
U_{k-1}(v,G)|\geq f(n-1,k)+1$, and
$$
|V|\geq f(n-1,k)+1+|U_{k-1}(v,G)|=f(n-1,k)+d+1.
\eqno\qed
$$
\begin{Lemma}
\label{Ilya}
$\ds |V|\geq \frac{d^{1/(k-1)}}{d^{1/(k-1)}-1}\bigl(f(n-2,k)+1\bigr)$.
\end{Lemma}
\proof
We will construct inductively a sequence of partitions of~$V$ into nonintersecting parts,
$$
V=U_1\sqcup U_2\sqcup\dots\sqcup U_s\sqcup N_s\sqcup V_s,
$$
such that the following conditions are satisfied:
(i) for all $i=1,\dots,s$ we have $\dd^{\rm out}_G U_i\subseteq N_s$; moreover, $\dd^{\rm out}_G V_s\subseteq N_s$;
(ii) for every $i=1,2,\dots,s$ the graph $G(U_i)$ is bipartite (in fact, $U_i$ is a ball with radius not exceeding~$k-1$ in a certain subgraph of~$G$);
(iii) $(d^{1/(k-1)}-1)(|U_1|+\dots+|U_s|)\geq |N_s|$.
For the base case $s=0$, we may set $V_0=V$, $N_0=\emp$ (there are no sets~$U_i$ in this case).
For the induction step, suppose that the partition $V=U_1\sqcup U_2\sqcup\dots\sqcup U_{s-1}\sqcup N_{s-1}\sqcup V_{s-1}$ has been constructed, and assume that the set~$V_{s-1}$ is nonempty. Consider the graph $G_{s-1}=G(V_{s-1})$ and choose an arbitrary vertex $v\in V_{s-1}$. Now consider the sets
$$
U_0(v,G_{s-1})=\{v\},\quad U_1(v,G_{s-1}), \quad \dots, \quad U_{k-1}(v,G_{s-1}).
$$
One of the ratios
$$
\frac{|U_1(v,G_{s-1})|}{|U_0(v,G_{s-1})|}, \quad \frac{|U_2(v,G_{s-1})|}{|U_1(v,G_{s-1})|}, \quad
\dots, \quad \frac{|U_{k-1}(v,G_{s-1})|}{|U_{k-2}(v,G_{s-1})|}
$$
does not exceed~$d^{1/(k-1)}$, since the product of these ratios is
$$
|U_{k-1}(v,G_{s-1})|\leq |U_{k-1}(v,G)|\leq d.
$$
So, let us choose $1\leq m\leq k-1$ such that
$$
\frac{|U_m(v,G_{s-1})|}{|U_{m-1}(v,G_{s-1})|}\leq d^{1/(k-1)}.
$$
Now we set
$$
U_s=U_{m-1}(v,G_{s-1}), \quad N_s=N_{s-1}\cup S_m(v,G_{s-1}),
\quad
V_s=V_{s-1}\setminus U_m(v,G_{s-1}).
$$
Since the condition~(i) was satisfied on the previous step, we have
$$
\dd^{\rm out}_G V_s\subseteq \dd^{\rm out}_G V_{s-1}\cup S_m(v,G_{s-1})\subseteq N_s
$$
and
$$
\dd^{\rm out}_G U_s\subseteq \dd^{\rm out}_G V_{s-1}\cup S_m(v,G_{s-1})\subseteq N_s,
$$
so this condition also holds now. The condition~(ii) is satisfied by Proposition~\ref{sph-empty}. Finally, the choice of~$m$ and the condition~(iii) for the previous step imply that
\begin{gather*}
d^{1/(k-1)} |U_s|=d^{1/(k-1)}|U_{m-1}(v,G_{s-1})|\geq |U_m(v,G_{s-1})|,\\
(d^{1/(k-1)}-1)(|U_1|+\dots+|U_{s-1}|)\geq |N_{s-1}|
\end{gather*}
and hence
$$
(d^{1/(k-1)}-1)(|U_1|+\dots+|U_{s}|)\geq
|N_{s-1}|+|U_m(v,G_{s-1})|-|U_{m-1}(v,G_{s-1})|
=|N_s|.
$$
Thus, the condition~(iii) also holds on this step.
Continuing the construction in this manner, we will eventually come to the partition with $V_s=\emp$ since the value of $|V_s|$ strictly decreases. As the result, we obtain the partition $V=U_1\sqcup U_2\sqcup\dots\sqcup U_s\sqcup N_s$ such that $|N_s|\leq
(d^{1/(k-1)}-1)(|U_1|+\dots+|U_s|)$. So,
$$
d^{1/(k-1)}|N_s|\leq (d^{1/(k-1)}-1)(|U_1|+\dots+|U_s|)+(d^{1/(k-1)}-1)|N_s|=|V|(d^{1/(k-1)}-1),
$$
or $\ds |N_s|\leq |V|\frac{d^{1/(k-1)}-1}{d^{1/(k-1)}}$.
Assume now that $|N_s|\leq f(n-2,k)$; then one may color the vertices of~$G(N_s)$ in $n-2$ colors, and then color the vertices of each bipartite graph $G(U_i)$ in two remaining colors. This coloring might be not proper only if some vertices of two subgraphs~$G(U_i)$ and~$G(U_j)$ ($i\neq j$) are adjacent, which is impossible by the condition~(i). So, $G$ is $n$-colorable which is wrong. Therefore, $|N_s|\geq f(n-2,k)+1$ and hence $\ds |V|\geq \frac{d^{1/(k-1)}}{d^{1/(k-1)}-1}|N_s|\geq
\frac{d^{1/(k-1)}}{d^{1/(k-1)}-1}{(f(n-2,k)+1)} $, as desired.
\qed
\Zam 1. In the statement of the Lemma above, one may use the number
$\ds d'=\max_{\emp\ne V'\subseteq V}\min_{u\in V'}|U_{k-1}(u,G(V'))|$ instead of~$d$. For reaching that, on each step it is sufficient to choose the vertex $v\in V_{s-1}$ such that
$$
|U_{k-1}(v,G_{s-1})|=\min_{u\in V_{s-1}}|U_{k-1}(u,G_{s-1})|.
$$
Clearly, we have $d'\leq d$.
\Zam2.
On the other hand, the number $d^{1/(k-1)}$ in the same statement can be replaced by $(f(n,k)+1)^{1/k}$. Now, in the proof one may deal with $k+1$ sets
$$
U_0(v,G_{s-1})=\{v\},\quad U_1(v,G_{s-1}), \quad \dots, \quad U_k(v,G_{s-1})
$$
and use the condition $|U_k(v,G_{s-1})|\leq |V|=f(n,k)+1$.
The next theorem follows immediately from the Lemmas~\ref{Sergey} and~\ref{Ilya}.
\begin{Theorem}
\label{recurr}
For all integer $n,k\geq 2$, we have
\begin{equation}
f(n,k)\geq
\min_{t\geq n(k-1)+1}\max\left\{f(n-1,k)+t,\frac{t^{1/(k-1)}}{t^{1/(k-1)}-1}(f(n-2,k)+1)-1\right\}.
\label{minmax}
\end{equation}
\label{theo}
\end{Theorem}
\proof
From the choice of~$G$ we have $f(n,k)=|G|-1$. From Lemmas~\ref{Sergey} and~\ref{Ilya} it follows that
$$
|G|\geq \max\left\{f(n-1,k)+d,\frac{d^{1/(k-1)}}{d^{1/(k-1)}-1}(f(n-2,k)+1)-1\right\}+1.
$$
Since $d\geq n(k-1)+1$ by Lemma~\ref{big okr}, the statement holds.
\qed
\begin{Cor}
For every real $g>1$, we have
\begin{equation}
f(n,k)\geq
\min\left\{f(n-1,k)+g,
\frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}(f(n-2,k)+1)-1\right\}.
\label{min}
\end{equation}
\label{cor}
\end{Cor}
\proof Let $t_0$ be the integer for which the minimum in~\eqref{minmax} is achieved. As $t>1$ increases, the value of $f(n-1,k)+t$ also increases, while the value of $\ds \frac{t^{1/(k-1)}}{t^{1/(k-1)}-1}(f(n-2,k)+1)-1$ decreases. Thus, if $g\leq t_0$, then we have
$$
{f(n-1,k)+g}\leq {f(n-1,k)+t_0}\leq f(n,k).
$$
Otherwise, we have $g>t_0$ and
$$
\frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}{(f(n-2,k)+1)}-1\leq
\frac{t_0^{1/(k-1)}}{t_0^{1/(k-1)}-1}{(f(n-2,k)+1)}-1\leq f(n,k).
\eqno\qed
$$
\section{Explicit bounds}
Now we present the explicit lower bounds for $f(n,k)$ following from the results of the previous section.
Notice that for every $k$ we have $f(1,k)=1$ and $f(2,k)=2k$. Lemma~\ref{Sergey} implies now the following statement.
\begin{Theorem}
For all integer $n\geq 1$ and $k\geq 2$ the inequality
$\ds f(n,k)\geq n+\frac{(k-1)(n-1)(n+2)}2$ holds.
\label{Sergey2}
\end{Theorem}
\proof Induction on~$n$. In the base cases $n=1$ or $n=2$ the statement holds. Assume now that $n>2$. By Lemmas~\ref{big okr} and~\ref{Sergey} we have $f(n,k)\geq f(n-1,k)+n(k-1)+1$. Next, the hypothesis of the induction implies that
$$
f(n-1,k)\geq (n-1)+\frac{(k-1)(n-2)(n+1)}2.
$$
Therefore,
$$
f(n,k)\geq f(n-1,k)+n(k-1)+1\geq
n+\frac{(k-1)(n-1)(n+2)}2,
$$
as desired.
\qed
The next estimate uses the whole statement of the Theorem~\ref{theo}. For the convenience, we use the notation $n\^k=n(n+1)\dots(n+k-1)$.
\begin{Lemma}
Suppose that for some integer $n_0\geq 1$, integer $k\geq 2$, and real $a$, the inequality
\begin{equation}
f(m,k)\geq \frac{(m+a)\^k}{2^{k-1}k^k}
\label{asym1}
\end{equation}
holds for two values $m=n_0$ and $m=n_0+1$. Then the same estimate holds for all integer $m\geq n_0$.
\label{common}
\end{Lemma}
\proof
We prove by induction on $n\geq n_0$ that the estimate~\eqref{asym1} holds for $m=n$. The base cases $m=n_0$ and $m=n_0+1$ follow from the theorem assumptions.
For the induction step, suppose that $n\geq n_0+2$. Let $c=2^{1-k}k^{-k}$, $g=ck(n+a)\^{k-1}$. By the induction hypothesis, we have
\begin{equation}
f(n-1,k)+g\geq c(n+a-1)\^k+ck(n+a)\^{k-1}
=c(n+a)\^{k-1}(n+a-1+k)=c(n+a)\^k.
\label{est-lm}
\end{equation}
Notice that Lemmas~\ref{big okr} and~\ref{Sergey} imply that $f(n,k)\geq
f(n-1,k)+n(k-1)+1$. Hence, if $g\leq n(k-1)+1$, then $f(n,k)\geq
f(n-1,k)+g\geq c(n+a)\^k$, as desired.
Thus we may deal only with the case $g>n(k-1)+1$; in particular, $g>1$. We intend to use Corollary~\ref{cor}; for this, let us estimate the second term in the right-hand part of~\eqref{min}.
From the AM--GM inequality we have
$$
g^{1/(k-1)}=(ck)^{1/(k-1)}\left((n+a)\^{k-1}\right)^{1/(k-1)}
\leq \frac1{2k}\left(n+a+\frac k2-1\right).
$$
Let $s=n+a+\frac k2-1$; then $s\geq 2kg^{1/(k-1)}>2k$. Therefore,
\begin{multline*}
\frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}
\geq \frac{s}{s-2k}\geq \frac{s+k-1}{s-(k+1)}\geq \\
\geq \frac{s^2+s(k-1)+\frac{k(k-2)}4}{s^2-s(k+1)+\frac{k(k+2)}4}
=\frac{(n+a+k-2)(n+a+k-1)}{(n+a-2)(n+a-1)}.
\end{multline*}
Finally, from the induction hypothesis we get
\begin{multline}
\frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}(f(n-2,k)+1)-1
\geq \frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}f(n-2,k)\geq\\
\geq \frac{(n+a+k-2)(n+a+k-1)}{(n+a-2)(n+a-1)}\cdot
c(n+a-2)\^k=c(n+a)\^k.
\label{est-lm2}
\end{multline}
Thus, for the value of~$g$ chosen above, Corollary~\ref{cor} and the estimates~\eqref{est-lm} and~\eqref{est-lm2} provide that
$$
f(n,k)\geq
\min\left\{f(n-1,k)+g,
\frac{g^{1/(k-1)}}{g^{1/(k-1)}-1}(f(n-2,k)+1)-1\right\}
\geq c(n+a)\^k,
$$
as desired.
\qed
Finally, let us show that the constant $a$ in the previous Lemma can be chosen relatively large.
\begin{Theorem}
For all $k\geq 2$ and $n\geq 2$, we have $\ds f(n,k)\geq \frac{\left(n+k\right)\^k}{2^{k-1}k^k}$.
\label{estimate}
\end{Theorem}
\proof
Set $a=k$. Let us check the inequality~\eqref{asym1} for $n=2$ and $n=3$. Recall that $f(2,k)=2k$. Now for $m=2$ we get
$$
2^{k-1}k^kf(2,k)=2^kk^{k+1}=(2k)^{k-1}\cdot 2k^2\geq (k+2)(k+3) \dots
2k\cdot (2k+1)=(k+2)\^k.
$$
For $m=3$, Theorem~\ref{Sergey2} yields $f(3,k)\geq 5k-2$, and the previous estimate now implies that
$$
2^{k-1}k^kf(3,k)\geq 2^{k-1}k^k(5k-2)\geq 2^kk^kf(2,k)
\geq 2(k+2)\^k>(k+3)\^k.
$$
Thus, the inequality~\eqref{asym1} holds for $m=2$ and $n=3$, and hence for all $n\geq 2$ by Lemma~\ref{common}.
\qed
The authors are very grateful to the referees for the valuable comments.
\end{document} |
\begin{document}
\title[]{Root groupoid and related Lie superalgebras}
\author{M.~Gorelik}
\mathrm{ad}dress{Weizmann Institute of Science}
\email{[email protected]}
\author{V.~Hinich}
\mathrm{ad}dress{University of Haifa}
\email{[email protected]}
\author{V. Serganova}
\mathrm{ad}dress{UC Berkeley}
\email{[email protected]}
\begin{abstract}
We introduce a notion of a root groupoid as a replacement of the notion of Weyl group for (Kac-Moody) Lie superalgebras. The objects of the root
groupoid classify certain root data, the arrows are defined by generators and relations. As an abstract groupoid the root groupoid has many connected components and we show that to some of them one can associate
an interesting family of Lie superalgebras which we call root
superalgebras. We classify root superalgebras satisfying some additional assumptions.
To each root groupoid component we associate a graph (called skeleton) generalizing the Cayley graph of the Weyl group. We establish the
Coxeter property of the skeleton generalizing in this way the fact that the Weyl group of a Kac-Moody Lie algebra is Coxeter.
\end{abstract}
\maketitle
\section{Introduction}
\subsection{Generalities}
\subsubsection{}
In this paper we present an attempt to generalize the notion of Weyl group
to Lie superalgebras. For a semisimple Lie algebra Weyl group parametrizes
Borel subalgebras containing a fixed torus. This cannot be directly extended
to Lie superalgebras since there are essentially different choices of Borel
subalgebras.
In order to describe all Borel subalgebras, the notion of odd (or isotropic) reflection was introduced many years ago, \cite{S2},~\cite{P},~\cite{DP}.
An odd reflection can not be naturally extended to
an automorphism of the Lie superalgebra. For many years a strong feeling
persisted among the experts that one should extend Weyl group
to ``Weyl groupoid''.
One attempt was made in \cite{S3}. A somewhat reminiscent construction
of groupoid was suggested by I.~Heckenberger and collaborators ~\cite{H},
\cite{HY}, see also~\cite{AA}.
More recently another notion named Weyl
groupoid was introduced by Sergeev and Veselov~\cite{SV} for
finite-dimensional superalgebras in order to describe the character ring.
In~\mathit{re}f{ss:comment} we comment on these definitions.
The notion of root groupoid presented in this paper is close to the notion
defined in~\cite{S3}.
\subsubsection{}The connection between semisimple Lie algebras and root systems can be described from two opposite perspectives.
One can start with a Lie algebra, choose a Cartan subalgebra and study the geometry of the set of roots. On the other hand, one can start with a Cartan matrix
and construct a Lie algebra by generators and relations. The second approach was vastly extended to construct a very important family of infinite-dimensional Lie algebras
by Kac, Moody, Borcherds and others. Our approach follows the same pattern for construction of Lie superalgebras from combinatorial data.
\subsubsection{}
Another idea that motivated our work is the observation that the classical
Serre relation can be interpreted as reflected Chevalley relations. This led
us to the notion of root algebra which, roughly speaking, respects the
symmetries determined by a root groupoid. In many cases there is a unique
root algebra which can be defined by Chevalley relations reflected in all
possible ways. Sometimes there is a number of root algebras defined by a
given root datum. The description of all root algebras is
an open question --- we don't know the answer even for Lie algebras.
For finite dimensional and affine Lie superalgebras all Serre's relations were described in \cite{Y}, see also~\cite{Zh}. One can see from this description that
Serre's relations may involve more than two generators.
\subsection{Root groupoid}
In \cite{Kbook}, 1.1, Kac defines a realization of a Cartan matrix $A=(a_{xy})$, $x,y\in X$, as a triple
$(\mathfrak{h}, a,b)$ such that $a=\{a(x)\in\mathfrak{h}\}$, $b=\{b(x)\in\mathfrak{h}^*\}$
and $\langle a(x),b(x)\rangle=a_{xy}$. Adopting this definition to Lie superalgebras,
we add the parity function $p:X\to\mathbb{Z}_2$ on the index set $X$ and make a quadruple
$v=(\mathfrak{h},a,b,p)$ an
object of {\sl the root groupoid $\mathcal{R}$} --- the main object of our study. Every quadruple $v$ defines a Cartan matrix by the formula above.
The pair $(A,p)$ is called in this paper {\sl Cartan datum}. There are three kinds of generators
in the set of arrows in $\mathcal{R}$. Two of them are quite dull: one (a homothety) rescales
$a(x)$, another (tautological arrow) is defined by an isomorphism $\theta:\mathfrak{h}\to\mathfrak{h}'$;
the third kind are {\sl reflexions} that retain the same vector space $\mathfrak{h}$ but change the collections $a(x)$ and $b(x)$ by usual (even or odd) reflection formulas.
Each generator $f:v\to v'$ defines a linear transformation $\mathfrak{h}_v\to\mathfrak{h}_{v'}$
(it is identity for homotheties and reflexions, and $\theta$ for the tautological arrow
defined by $\theta$); two compositions of generators leading from $v$ to $v'$ are equivalent
if they define the same linear transformation. The root groupoid $\mathcal{R}$ has a lot of
components, some of them, most probably, useless. However, some connected components
(we call them admissible) lead to interesting Lie superalgebras. It is worth mentioning that
Cartan matrices $A_v$ are different even inside one component: one type of reflexions,
{\sl isotropic reflexions}, modify Cartan matrices in a certain way (see the formulas in
\mathit{re}f{sss:cartanmatrix-change}).
\subsection{Root algebras}
For each $v\in\mathcal{R}$ one defines a (huge) Lie superalgebra $\widetilde\mathfrak{g}(v)$ (we call it
half-baked Lie superalgebra) basically in the same way as did our predecessors V.~Kac and R.~Moody, see~\mathit{re}f{sss:half} in this paper. For an arrow $\gamma:v\to v'$ in $\mathcal{R}$ the
isomorphism
$\mathfrak{h}(\gamma):\mathfrak{h}(v)\to \mathfrak{h}(v')$ does not extend to a homomorphism of of the half-baked algebras.
We define a root algebra supported on a component $\mathcal{R}_0$ of $\mathcal{R}$ as a collection of
quotients $\mathfrak{g}(v)$ of $\widetilde\mathfrak{g}(v)$ such that for any $\gamma:v\to v'$ the isomorphism
$\mathfrak{h}(\gamma)$ extends to an isomorphism $\mathfrak{g}(v)\to\mathfrak{g}(v')$.
A component $\mathcal{R}_0$ of $\mathcal{R}$ is called admissible if it admits a root algebra. Admissibility
can be expressed in terms of weak symmetricity of the Cartan matrices at $\mathcal{R}_0$, see
Theorem~\mathit{re}f{thm:admissible=wsym}.
For an admissible component $\mathcal{R}_0$ there always exists an initial and a final object
in the category of root algebras. The initial root algebra $\mathfrak{g}^\mathtt{U}$ is called {\sl universal}. The final root algebra $\mathfrak{g}^\mathtt{C}$ is called {\sl contragredient}. Note that $\mathfrak{g}^\mathtt{C}$ in the admissible case is defined as the quotient of $\widetilde\mathfrak{g}(v)$ by the maximal
ideal having zero intersection with $\mathfrak{h}$. The universal root algebra $\mathfrak{g}^\mathtt{U}$ is obtained by imposing on $\widetilde\mathfrak{g}(v)$ reflected Chevalley relations --- so generalizing the classical Serre relations.
{Note that these were two different approaches of the founding fathers of
Kac-Moody Lie algebras: Victor Kac~\cite{Kbook} factored the half-baked
algebra by the maximal ideal having zero intersection with the Cartan
subalgebra, whereas Robert Moody~\cite{M} imposed on it the Serre relations.}
\subsection{Groups associated to the root groupoid}
An only algebraic invariant of a connected groupoid is the automorphism group of its
object. The group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is one of a plethora of groups we assign to an admissible
component $\mathcal{R}_0$. It acts (up to a torus) on any root Lie algebra and on the set of its
roots. For the component corresponding to a semisimple Lie algebra, $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ coincides with the Weyl group.
In the case of conventional Kac-Moody Lie algebras $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is the product
of the Weyl group and a certain group of ``irrelevant'' automorphisms.
The group of irrelevant automorphism $K(v)$ is very easy to describe. This is a subgroup
of automorphisms $\theta\in\mathrm{GL}(\mathfrak{h}(v))$ preserving all $b(x)\in\mathfrak{h}^*$ as well as
all $a(x)$ up to constant. It is a unipotent abelian group in the case of Kac-Moody algebras. The equality $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W\times K$ does not hold already for $\mathfrak{g}l(1|1)$,
see~\mathit{re}f{sss:aut-gl11}.
\subsubsection{Skeleton} We will now present a combinatorial description of the quotient group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)$. Let $\mathtt{Sk}\subset\mathcal{R}$ (skeleton) be the subgroupoid whose arrows are
the compositions of reflexions. Denote by $\mathtt{Sk}(v)$ the connected component of $v\in\mathtt{Sk}$.
This is a contractible groupoid; it makes sense to study it as a marked graph, whose edges
are reflexions marked by the elements of the index set $X$. We denote by $\mathtt{Sk}^D(v)$ the
subset of vertices in $\mathtt{Sk}(v)$ having a Cartan datum $D$-equivalent
to $A_v$, see~\mathit{re}f{dfn:Deq}. The set $\mathtt{Sk}^D(v)$ has a group structure and Proposition~\mathit{re}f{prp:structure-Aut}(3) claims that there is an isomorphism
$\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$.
\subsubsection{Weyl group}
For a vertex $v$ in an admissible $\mathcal{R}_0$ we define Weyl group $W(v)$ (up to isomorphism, it depends on the component only) as a certain subgroup of $\mathrm{GL}(\mathfrak{h})$ generated by reflections (more precisely, by the reflections with respect to anisotropic roots, see~\mathit{re}f{ss:weyl}). The Weyl group $W(v)$ is a normal subgroup of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
\subsubsection{Spine}
We define the spine $\mathtt{Sp}$ as the subgroupoid of $\mathtt{Sk}$ whose arrows are generated by isotropic reflexions only. For instance, if there are no isotropic reflexions (for example, if $p(x)=0$ for all $x$) then $\mathtt{Sp}$ has no arrows.
The connected component of $v$ in $\mathtt{Sp}$ is denoted $\mathtt{Sp}(v)$ and the intersection
$\mathtt{Sp}^D(v)=\mathtt{Sp}(v)\cap\mathtt{Sk}^D(v)$ is a subgroup. Proposition~\mathit{re}f{prp:structure-Aut}
claims that $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$ is a semidirect product $W(v)\rtimes\mathtt{Sp}^D(v)$.
In particular, if $\mathtt{Sp}^D$ is trivial, this gives $\mathrm{op}eratorname{Aut}(v)=W\times K$.
\subsection{Coxeter properties}
A fundamental property of Kac-Moody Lie algebras is that its Weyl group
is a Coxeter group. We generalize this result to the Weyl groups appearing in any admissible component. Similarly to the classical result, the length of an element $w\in W$ can be expressed as the number of positive anisotropic roots that become negative under $w$,
see Corollary~\mathit{re}f{crl:W-len}.
An analog of Coxeter property holds also for the skeleton $\mathtt{Sk}(v)$. The length of the
shortest path from $v$ to $v'$ in $\mathtt{Sk}(v)$ can also be expressed as the number of real positive roots that become negative, see~\mathit{re}f{crl:Sk-len}.
Coxeter property for groups provides its presentation in terms of generators and relations,
with relations defined by ``pairwise interaction'' of the generators.
It turns out that a similar presentation exists for the skeleton.
In Section~\mathit{re}f{sec:coxeter2} we define the notion of Coxeter graph
that generalizes that of Coxeter group, and prove that the skeleton
$\mathtt{Sk}(v)$ satisfies this property.
\subsection{Fully reflectable components}
Admissible Cartan matrices are not in general required to allow reflexions $r_x$ for all $x\in X$. We call a component $\mathcal{R}_0$ fully reflectable if all reflexions are allowed
at all vertices of $\mathcal{R}_0$. This means that $\mathtt{Sk}(v)$ is a regular graph of degree $|X|$.
In Section~\mathit{re}f{sec:trichotomy} we divide all admissible indecomposable fully reflectable components into three types:
finite, affine and indefinite. This trichotomy extends the similar trichotomy for Kac-Moody Lie algebras. There is a full classification of those types that contain an isotropic
root; it has been done by C.~Hoyt and V. Serganova, see~\cite{Hoyt}, \cite{S3}. Curiously, there are only two new indefinite series having an isotropic root; they are called $Q^\pm(m,n,k)$.
\subsection{On the (lack of) uniqueness of a root Lie superalgebra} We have already mentioned that, for an admissible component $\mathcal{R}_0$ there is an initial $\mathfrak{g}^\mathtt{U}$ and a final $\mathfrak{g}^\mathtt{C}$ root algebra supported at $\mathcal{R}_0$. The natural map $\mathfrak{g}^\mathtt{U}\to\mathfrak{g}^\mathtt{C}$ is surjective and all root algebras are factors lying in between. In Sections~\mathit{re}f{sect:sym}
and \mathit{re}f{sect:aff}
we study the gap between $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$ in the fully reflectable case.
The result of these sections can be summarized as follows.
\begin{Thm}
Let $\mathcal{R}_0$ be an admissible indecomposable fully reflectable component. Then
$\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$ except for the cases $\mathfrak{g}^\mathtt{C}=\mathfrak{g}\mathfrak{l}(1|1)$, $\mathfrak{g}^\mathtt{U}=\mathfrak{s}l(n|n)^{(i)}$,
$(i=1,2,4)$, $\mathfrak{s}\mathfrak q(n)^{(2)}$ and the case
when $\mathcal{R}_0$ is indefinite and nonsymmetrizable.
\end{Thm}
The similar result for symmetrizable Kac-Moody Lie algebras was proven by Gabber-Kac,
\cite{GabberKac}. Their proof was adapted to our symmetrizable case in Section~\mathit{re}f{sect:sym}.
In the case when $\mathfrak{g}^\mathtt{C}=\mathfrak{g}\mathfrak{l}(1|1)$ the algebra $\mathfrak{g}^\mathtt{U}$ has dimension $(4|2)$
and the algebras $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$ are the only two root algebras in this component,
see~\mathit{re}f{rank1}.
Note that the explicit realization of $\mathfrak{g}^\mathtt{C}$ for $\mathfrak{g}^\mathtt{U}=\mathfrak{s}l(n|n)^{(i)}$, $(i=1,2,4)$
and $\mathfrak{s}\mathfrak q(n)^{(2)}$ is given in~\cite{S3}.
The results for nonsymmetrizable affine algebras, $S(2,1,b)$ and $\mathfrak{s}\mathfrak q(n)^{(2)}$ are new.
We also prove that if $\mathfrak{g}^{KM}\neq\mathfrak{gl}(1|1)$ then any algebra $\mathfrak{g}$
sandwiched between $\mathfrak{g}^{\mathtt{C}}$ and $\mathfrak{g}^{\mathtt{U}}$ is a root algebra.
\subsection{Examples of calculation of $\mathrm{op}eratorname{Aut}(v)$}
In the last Section~\mathit{re}f{sec:app} we compute the group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ for two classes of connected
components.
The first one is the case of a ``star-shaped'' spine. It includes the algebras
$\mathfrak{s}\mathfrak q(3)^{(2)}$, $B(1|1)^{(1)}$, $D(2|1,a)$, $D(2|1,a)^{(1)}$, $Q^{\pm}(m,n,k)$.
Here one has $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W\times K$ as in this case $\mathtt{Sp}^D(v)$ is trivial.
For the same reason $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W$ for all finite dimensional Lie superalgebras except for
the case of $\mathfrak{g}l(n|n)$; the latter is considered in~\mathit{re}f{sss:glmn}.
The second class is the class of components whose skeleton identifies with that of
$\mathfrak{s}l_n^{(1)}$. This includes the root data for $\mathfrak{s}l(k|l)^{(1)}$, $\mathfrak{s}\mathfrak q(n)^{(2)}$
and $S(2|1,b)$. In these cases the Weyl group $W(\mathfrak{s}l_n^{(1)})$ acts simply transitively
on the skeleta $\mathtt{Sk}(v)$. This allows one to realize the Weyl group and $\mathtt{Sk}^D(v)=\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)$ as subgroups of $W(\mathfrak{s}l_n^{(1)})$.
\subsection{Borcherds-Kac-Moody algebras}
{R.~Borcherds in \cite{Bo} introduced a generalization of Kac-Moody algebras, where the Cartan matrix is real symmetric and satisfies additional conditions. The proof of Gabber-Kac is valid for this class
(see~\cite{Kbook}, 11.13) and give $\mathfrak{g}^{\mathtt{C}}=\mathfrak{g}^\mathtt{U}$
if the Cartan matrix is symmetrizable and satisfies the conditions
(C1')--(C3') in ~\cite{Kbook}, 11.13).
Borcherds-Kac-Moody (BKM) superalgebras were studied by M.~Wakimoto in~\cite{W}. Note that any Kac-Moody algebra is a BKM algebra,
but many Kac-Moody superalgebras (including $\mathfrak{g}l(m|n)$ for $m,n>2$)
are not BKM superalgebras.}
\subsection{Comment on the groupoids studied in~\cite{HY,AA}}
\label{ss:comment}
\VH{In~\cite{HY} the authors assign a groupoid (called Coxeter groupoid)
to a collection of vectors in a vector space $\mathfrak{h}^*$ endowed with a
nondegenerate symmetric bilinear form.}
The objects of "Coxeter groupoid" appearing in the definition
in~\cite{HY} correspond to different choices of (accessible) Borel
subalgebras \VH{of a Kac-Moody superalgebra given by a symmetrizable Cartan datum};
thus, they correspond to the vertices of what we call a skeleton
component. The arrows are generated by reflections with respect to all
simple roots. Our result claiming coxeterity of the skeleton
(Theorem~\mathit{re}f{thm:skeleton-coxeter}) means that the groupoid defined
in~\cite{HY} is contractible. For instance, it assigns to a semisimple
Lie algebra the Cayley graph of its Weyl group (which is contractible
when considered as a groupoid). In order to get for a semisimple Lie
algebra the classifying groupoid of the Weyl group instead of the
contractible Cayley graph, one has to identify the vertices having the
same Cartan matrix as it is done in~\cite{AA}.
In the present paper we do something similar to~\cite{AA}. However,
from a categorical point of view it is better to replace factoring the
set of objects of a groupoid by an equivalence relation with adding
isomorphisms connecting the objects that we are willing to declare
equivalent. This is precisely what we do. In our root groupoid we
have generators for the arrows of three different types: apart from
reflexions, we have tautological arrows and homotheties that connect
vertices with D-equivalent Cartan matrices. In absence of isotropic
reflexions (for instance for Kac-Moody algebras) the Weyl group
coincides with the automorphism group of an object of the
corresponding component of a root groupoid. However, this does not
hold in general, see~\mathit{re}f{prp:structure-Aut} and Section~\mathit{re}f{sec:app}.
\section{Setup}
\subsection{Groupoid of root data}
\label{ss:RDG}
For a complex vector space $\mathfrak{h}$ and a set $X$, a map $a:X\to\mathfrak{h}$
will be called injective if the induced map $\mathtt{Sp}an_\mathbb{C}(X)\to \mathfrak{h}$ is an injective map of vector spaces.
Once and forever we fix a finite set $X$. The cardinality of $X$
will be called {\sl the rank} of root data and of Lie superalgebras
connected to them.
\subsubsection{}
We now define {\sl the root groupoid} $\mathcal{R}$.
The objects of $\mathcal{R}$ (the root data) are the quadruples
$(\mathfrak{h}, a:X\to \mathfrak{h}, b:X\to \mathfrak{h}^*, p:X\to\mathbb{Z}_2)$ where
$\mathfrak{h}$ is a finite dimensional vector space over $\mathbb{C}$
such that $a,b$ are injective.
We will define the arrows of $\mathcal{R}$ by generators and
relations.
We have generating arrows of three types:
\begin{itemize}
\item[1.] a reflexion~\footnote{In this paper we follow the idea of
K.~Chukovsky~\cite{Krokodil} to use synonyms for different
(although related) objects. In {\sl loc. cit} these are Hyppopotamus and Behemoth that are synonymous in Russian. In this paper we will later introduce {\sl reflections} generating the Weyl group, that will be related to, but different from the reflexions defined now.}
$r_x:(\mathfrak{h},a,b,p) \to (\mathfrak{h},a',b',p')$
defined by a source $(\mathfrak{h},a,b,p)$ and
{\sl a reflectable element}
$x\in X$, see~\mathit{re}f{ss:reflexions} for the explicit formulas;
\item[2.] a tautological arrow $t_\theta:(\mathfrak{h},a,b,p)\to
(\mathfrak{h}',a',b',p)$ determined by $\theta:\mathfrak{h}\mathit{st}ackrel{\sim}{\to}
\mathfrak{h}'$. Here $a':=\theta\circ a$, $b'=((\theta^*)^{-1})
\circ b$.
\item[3.] a homothety $h_\lambda:(\mathfrak{h},a,b,p)\to (\mathfrak{h},a',b,p)$
determined by $\lambda:X\to\mathbb{C}^*$, with
$a'(x)=\lambda(x)a(x)$.
\end{itemize}
This collection of objects and arrows (=quiver) generates a free category denoted (temporarily) $\mathcal{F}$.
The groupoid $\mathcal{R}$ will be defined as the one with the same objects as
$\mathcal{F}$, and whose arrows are equivalence classes of the arrows above. The
equivalence relation is defined below.
First of all, we define a functor $\mathfrak{h}:\mathcal{F}\to\mathtt{Vect}$
to the category of vector spaces carrying $(\mathfrak{h},a,b,p)$
to $\mathfrak{h}$, carrying the reflexions and the homotheties
to the identities, and tautological arrows to the respective
isomorphisms of the underlying vector spaces.
\subsubsection{}
\label{sss:eq}
The equivalence relation on each Hom-set of $\mathcal{F}$
is defined as follows: two compositions of arrows
$(\mathfrak{h},a,b,p)\to(\mathfrak{h}',a',b',p')$ are equivalent
if they induce the same isomorphism $\mathfrak{h}\to\mathfrak{h}'$.
\subsection{Formulas for the reflexions}
\label{ss:reflexions}
Any root datum $(\mathfrak{h},a,b,p)$ determines a Cartan matrix
$A(a,b)=(a_{xy})_{x,y\in X}$ given by the formula
$$a_{xy}:=\langle a(x),b(y)\rangle.$$
\begin{dfn}
An element $x\in X$ is called {\sl reflectable} at $v=(\mathfrak{h},a,b,p)$ if
the following conditions hold.
\begin{itemize}
\item[1.] If $a_{xx}=0$ then $p(x)=1$;
\item[2.] If $a_{xx}\ne 0$ and $p(x)=0$ then $\mathfrak{r}ac{2a_{xy}}{a_{xx}}
\in\mathbb{Z}_{\leq 0}$.
\item[3.] If $a_{xx}\ne 0$ and $p(x)=1$ then $\mathfrak{r}ac{a_{xy}}{a_{xx}}
\in\mathbb{Z}_{\leq 0}$.
\end{itemize}
\end{dfn}
\subsubsection{}
\label{sss:reflexionformulas}
Let $x\in X$ be reflectable at $v=(\mathfrak{h},a,b,p)$. The reflexion $r_x:v\to
v'=(\mathfrak{h},a',b',p')$ is defined as follows.
\begin{itemize}
\item[(anisotropic)] If $a_{xx}\not=0$, then $p':=p$ and
$$a'(y):=a(y)- 2\mathfrak{r}ac{a_{yx}}{a_{xx}}a(x),\ \ \
b'(y):=b(y)- 2\mathfrak{r}ac{a_{xy}}{a_{xx}}b(x).$$
\item[(isotropic)] If $a_{xx}=0$ then $p(x)=1$ and
$$(a'(y),b'(y),p'(y)):=\left\{\begin{array}{l}
(-a(x),-b(x),p(x)) \ \ \ \ \ \text{ if } x=y,\\
(a(y),b(y),p(y)) \ \ \ \ \ \ \ \ \ \text{ if } x\not=y,\ \ a_{xy}=0,\\
(a(y)+\mathfrak{r}ac{a_{yx}}{a_{xy}}a(x),
b(y)+b(x), 1+p(y))
\ \ \text{ if } a_{xy}\not=0.\end{array}
\right.$$
\end{itemize}
\begin{dfn}
The pair $(A=\{a_{xy}\}, p)$ will be called {\sl Cartan datum} for $v$.
\end{dfn}
Note that the reflectability of $x\in X$, as well as the formulas for the reflexion $r_x$
depend only on the Cartan datum.
\subsubsection{}
\label{sss:cartanmatrix-change}
Let us indicate what happens to a Cartan matrix under a reflexion.
Anisotropic reflexions preserve the Cartan matrix. If $r_x:v\to v'$ is
an isotropic reflexion ($a_{xx}=0$), the Cartan matrix $(a'_{yz})$ is given by the following formulas
$$\begin{array}{ll}
a'_{xy}=-a_{yx}, \\
a'_{yx}=-a_{xy},\\
a'_{yy}=\left\{\begin{array}{ll}
a_{yy}+2a_{yx}&\text{ if } a_{xy}\ne 0\\
a_{yy}&\text{ if } a_{xy}=0.\end{array}
\right. \\
a'_{yz}=\left\{\begin{array}{ll}
a_{yz} \ \ \ \ \ & \text{ if }a_{xz}=0, x,y\not=z,\\
a_{yz}+a_{yx} &\text{ if } a_{xz}\not=0, a_{xy}=0, x,y\not=z\\
a_{yz}+a_{yx}(1+\mathfrak{r}ac{a_{xz}}{a_{xy}}) & \text{ if } a_{xz}\not=0, a_{xy}\not=0.\end{array}
\right.
\end{array}$$
\begin{PRP}
The category $\mathcal{R}$ is a groupoid.
\end{PRP}
\begin{proof}
It is enough to verify that each generating arrow in
$\mathcal{F}$ has invertible image in $\mathcal{R}$. First of all,
in our category the composition of two tautological arrows is
tautological, defined by the composition of the
corresponding isomorpisms $\mathfrak{h}\mathit{st}ackrel{\sim}{\to}\mathfrak{h}'\mathit{st}ackrel{\sim}{\to}\mathfrak{h}''$. Similarly,
composition of two homotheties is a homothety. This implies
that tautological arrows and homotheties are invertible.
Invertibility of reflexions follows from the formulas:
one has $r_x^2=\mathrm{id}$ for all $x$ (this is an explicit calculation).
\end{proof}
Note the following observation.
\begin{lem}
\label{lem:sym-stable}
All reflexions preserve the symmetricity of a Cartan matrix.
\end{lem}
\begin{proof}
Anisotropic reflexion does not change the Cartan matrix. Isotropic reflexions do change, but the resulting Cartan matrix remains symmetric if the original matrix was symmetric. This results from a direct calculation.
\end{proof}
\begin{dfn}
A connected component $\mathcal{R}_0$ of $\mathcal{R}$ is called symmetrizable if there
exists $v\in\mathcal{R}_0$ having a symmetric Cartan matrix.
\end{dfn}
Note that $\mathcal{R}_0$ is symmetrizable if all Cartan matrices of $v'\in\mathcal{R}_0$
are symmetrizable in the sense of Kac~\cite{Kbook}.
\subsection{Properties}
\subsubsection{}
\label{sss:comcom}
One has obviously $t_\theta\circ t_{\theta'}=t_{\theta\circ\theta'}$ and
$h_\lambda\circ h_{\lambda'}=h_{\lambda\lambda'}$.
The morphisms $r_x$, $t_\theta$ and $h_\lambda$ commute with each other.
\
The root groupoid $\mathcal{R}$ consists of connected
components some of which will lead to
interesting Lie superalgebras.
We present below properties of a component $\mathcal{R}_0$ of $\mathcal{R}$
that will be relevant to Lie theory.
This is weak symmetricity.
\begin{dfn}
\label{dfn:quasisym}
$ $
\begin{itemize}
\item[1.] A root datum is {\sl locally weakly symmetric} if
$a_{xy}=0$ implies $a_{yx}=0$ for any reflectable $x$.
\item[2.] A root datum is weakly symmetric
if all root data in its connected component are locally weakly symmetric.
\end{itemize}
\end{dfn}
\begin{rem}
Let $v\in\mathcal{R}$. The group of automorphisms $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ acts on
$\mathfrak{h}(v)$. This action is faithful by definition of the equivalence
relation on the Hom sets of $\mathcal{F}$, see \mathit{re}f{sss:eq}.
\end{rem}
\begin{rem}
The root groupoid $\mathcal{R}$ is an object of ``mixed'' nature.
It is a groupoid, but its objects and Hom sets carry an extra information
(markings $a,b,p$, generators $r_x,t_\theta,h_\lambda$ for arrows).
This is why we cannot easily replace $\mathcal{R}$ with any equivalent groupoid
(for instance, leaving only one object for each connected component).
Nevertheless, we can safely assume that $\mathfrak{h}$ is the same vector space at
all objects of a given connected component $\mathcal{R}_0$, allowing however
the tautological arrows $t_\theta$ for automorphisms $\theta:\mathfrak{h}\to\mathfrak{h}$.
\end{rem}
\begin{rem}
Tautological arrows and anisotropic
reflexions (those with $a_{xx}\not=0$) preserve the Cartan datum.
Homotheties also preserve local weak symmetricity.
Isotropic reflexions usually do not satisfy this property. For this reason admissible root data with isotropic reflexions can be classified under the assumption
that all elements $x\in X$ are reflectable at every vertex, \cite{Hoyt}.
\end{rem}
\begin{dfn}
\label{dfn:Deq}
Two Cartan data, $(A,p)$ and $(A',p')$, will be called $D$-equivalent if $p=p'$ and there exists
an invertible diagonal matrix $D$ such that $A'=DA$.
\end{dfn}
Obviously, homotheties carry a Cartan datum to a $D$-equivalent one.
\begin{rem}
In studying a connected component $\mathcal{R}_0$ of $\mathcal{R}$ it is often important to describe Cartan data $(A(v),p)$ at all vertices $v\in\mathcal{R}_0$, up to $D$-equivalence. Since only isotropic reflections
change the Cartan data, it is sufficient to perform only sequences of isotropic reflections, see~\mathit{re}f{sss:spine}.
\end{rem}
\subsection{Examples: reflectability}
\subsubsection{}
\label{ex:nonreflectable}
We present an example of a reflexion $r_x:v\to v'$ such that
all $y\in X$ are reflectable at $v$ but some are not reflectable
at $v'$.
Take the root datum $v$ with $X=\{x,y\}$, the Cartan matrix
$\begin{pmatrix} 0 &-s\\-s &1\end{pmatrix}$, $s\in\mathbb{N}$, $p(x)=p(y)=1$. Then
$x$ and $y$ are reflectable at $v$. For
the reflexion $r_x:v\to v'$
the reflected Cartan matrix is
$\begin{pmatrix} 0& s\\s &1-2s\end{pmatrix}$
and $p'(x)=1$, $p'(y)=0$.
Thus $y$ is reflectable at $v'$ only if $\mathfrak{r}ac{2s}{2s-1}\in\mathbb{N}$ that is
for $s=0,1$.
\subsection{Examples: calculation of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$}
\subsubsection{Semisimple case}
\label{sss:ss}
Let $v=(\mathfrak{h},a,b,p)$ represent a root system of a finite dimensional semisimple Lie algebra. This
means that $p(x)=0$, $a:X\to\mathfrak{h}$ is a set of simple coroots
and $b:X\to\mathfrak{h}^*$ is the set of simple roots. Both
$a$ and $b$ give bases in $\mathfrak{h}$ and $\mathfrak{h}^*$. Let us calculate
the group of automorphisms of $(\mathfrak{h},a,b,0)$.
Any reflexion $r_x:(\mathfrak{h},a,b,0)\to(\mathfrak{h},a',b',0)$ gives rise
to an automorphism $s_x:v\to v$, $s_x=t_{s_{b(x)}}\circ r_x$ where
the automorphism $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$ of $\mathfrak{h}$ is the standard reflection on $\mathfrak{h}$ with respect
to $b(x)\in\mathfrak{h}^*$. Note that $s_x:v\to v$
induces precisely the automorphism $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$, so that the assignment $s_{b(x)}\mapsto s_x$ is compatible with the action of the Weyl group
$W$ and of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ on $\mathfrak{h}$. Since the actions are faithful,
this defines an injective group homomorphism
$$
i:W\to\mathrm{op}eratorname{Aut}_\mathcal{R}(v).
$$
We claim that it is bijective. In fact,
any automorphism $\eta:v\to v$ in $\mathcal{R}$ is a composition
of reflexions $r_x$, tautological arrows and homotheties.
Since reflexions, tautological arrows and homotheties commute, one
can, using~\mathit{re}f{sss:comcom}, present
\begin{equation}
\label{eq:ss-deco}
\eta=h_\lambda\circ t_\theta\circ i(w),
\end{equation}
for a certain $w\in W$. It remains to verify that if $h_\lambda\circ t_\theta\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$, then it is identity. Since $t_\theta$ does not change the Cartan matrix, $h_\lambda=\mathrm{id}$. Since any automorphism of
$v$ carries $a(x)$ and $b(x)$ to themselves, and $a(x)$ generate $\mathfrak{h}$,
$\theta=\mathrm{id}$.
\subsubsection{The case of Kac-Moody algebras}
\label{sss:KMexample}
In the case when $(\mathfrak{h},a,b,0)$
has the Cartan matrix satisfying the conditions of~\cite{Kbook}, 1.1,
the calculation of ~\mathit{re}f{sss:ss} works almost as well.
Let $W$ denote the Weyl group and let $\widetilde W=\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
We have a group homomorphism $i:W\to\widetilde W$ defined exactly as in the
semisimple case. Precisely as in the semisimple case we have a
decomposition~(\mathit{re}f{eq:ss-deco}) of an automorphism $\eta\in\widetilde W$
and deduce that $h_\lambda=\mathrm{id}$ as the Cartan matrix has no zero rows.
Denote
$$
K=\{\theta:\mathfrak{h}\to\mathfrak{h}|\theta(a(x))=a(x),\theta^*(b(x))=b(x),\ x\in X\}.
$$
Any $\theta\in K$ commutes with $s_{b(x)}:\mathfrak{h}\to\mathfrak{h}$.
This implies that $\widetilde W=W\times K$.
Let us show $K$ is a commutative unipotent group.
Denote $A\subset\mathfrak{h}$ and $B\subset\mathfrak{h}^*$ the subspaces spanned by the images of $a$ and $b$. One has $\dim A=\dim B=|X|$ and $\dim\mathfrak{h}=2|X|-r$ where $r$ is the rank of the
Cartan matrix. This is equivalent to saying that the orthogonal complement $B^\perp\subset\mathfrak{h}$ of $B$ lies in $A$.
If $\theta$ is an automorphism of the triple $(\mathfrak{h},a,b)$,
$\theta-1$ vanishes on $A$ and has image in $B^\perp$.
This means that $(\theta-1)^2=0$. Moreover, any two such automorphisms
commute. The dimension of $K$ is $(|X|-r)^2$.
\subsubsection{Root datum for $\mathfrak{g}\mathfrak{l}(1|1)$}
\label{sss:aut-gl11}
We assume $\dim(\mathfrak{h})=2$, $X=\{x\}$, $a=a(x)\in\mathfrak{h}$, $b=b(x)\in\mathfrak{h}^*$
so that $a\ne 0, b\ne 0$ but $\langle b,a\rangle=0$. The only isotropic reflexion
carries the quadruple $v=(\mathfrak{h},a,b,p=1)$ to $v'=(\mathfrak{h},-a,-b,1)$.
The tautological arrow $t_{-1}:v'\to v$ is defined by $-1':\mathfrak{h}\to\mathfrak{h}$.
The composition $t_{-1}\circ r_x$ is an automorphism of $v$ of order $2$.
It is easy to see that $\mathrm{op}eratorname{Aut}(v)=\mathbb{Z}_2\times K$ where $\mathbb{Z}_2$
is generated by the automorphism described above and
$K=\{\theta:\mathfrak{h}\to\mathfrak{h}|\ \theta(a)\in\mathbb{C}^*a,\theta^*(b)=b\}$.
For more examples see~\mathit{re}f{sss:gl12} and Section~\mathit{re}f{sec:app}.
\section{Root Lie superalgebras}
\label{sec:root}
In this section we define root Lie superalgebras corresponding to
certain (admissible) connected components of the groupoid $\mathcal{R}$ of root
data.
\subsection{Half-baked Lie superalgebra}
\subsubsection{}
\label{sss:half}
Let $v=(\mathfrak{h},a,b,p)\in\mathcal{R}$. We assign to $v$ a Lie superalgebra
$\widetilde\mathfrak{g}(v)$ generated by $\mathfrak{h}=\mathfrak{h}(v)$, $\tilde e_x,\tilde f_x,\ x\in X$,
with the parity given by $p(\mathfrak{h})=0,\ p(\tilde e_x)=p(\tilde f_x)=p(x)$,
subject to the relations
\begin{itemize}
\item[1.] $[\mathfrak{h},\mathfrak{h}]=0$,
\item[2.] $[h,\tilde{e}_x]=\langle b(x), h\rangle \tilde{e}_x,
[h,\tilde{f}_x]=-\langle b(x), h\rangle \tilde{f}_x$
\item[3.] $[\tilde e_x,\tilde f_y]=0$ for $y\ne x$
\item[4.] $[\tilde e_x,\tilde f_x]=a(x)$
\end{itemize}
for each $x\in X$.
We call $\widetilde\mathfrak{g}(v)$ {\sl the half-baked Lie superalgebra} defined by
the root datum $v\in\mathcal{R}$.
\subsubsection{}
\label{sss:properties}
The following properties of $\widetilde\mathfrak{g}:=\widetilde\mathfrak{g}(v)$ are proven in Thm. 1.2 of~\cite{Kbook}
~\footnote{It is assumed in ~\cite{Kbook} that $\widetilde\mathfrak{g}$ is a Lie algebra. The proof, however, works verbatim for superalgebras.}.
\begin{itemize}
\item[1.] The algebra $\mathfrak{h}$ acts diagonally on
$\widetilde\mathfrak{g}$. We denote by $\widetilde\mathfrak{g}_\mu$ the weight space of weight
$\mu$, so that
$\widetilde\mathfrak{g}=\mathrm{op}lus_{\mu\in\mathtt{Sp}an_\mathbb{Z}(b)}\widetilde\mathfrak{g}_\mu$, where $\mathtt{Sp}an_\mathbb{Z}(b)$ denotes the abelian subgroup of $\mathfrak{h}^*$ generated by $b(x), x\in X$.
\item[2.]There is a standard triangular decomposition
$$
\widetilde\mathfrak{g}=\widetilde\mathfrak{n}^+\mathrm{op}lus\mathfrak{h}\mathrm{op}lus\widetilde\mathfrak{n}^-,
$$
where $\widetilde\mathfrak{n}^+$ is freely generated by
$\tilde e_x$, $x\in X$
and $\widetilde\mathfrak{n}^-$ is freely generated by $\tilde f_x$.
\item[3.] For each $x\not=y$ one has
$\widetilde\mathfrak{g}_{jb(x)+b(y)}=0$ for $j\not\in\mathbb{Z}_{\geq 0}$ and
$\widetilde\mathfrak{g}_{jb(x)+b(y)}$ is spanned by $(\mathrm{ad} \tilde{e}_x)^j\tilde{e}_y$.
\end{itemize}
The following theorem is very similar to \cite{Kbook}, Thm. 2.2.
\begin{prp}
\label{prp:likekac22}
Let $v\in\mathcal{R}$ have a symmetric Cartan matrix $(a_{xy})$.
Let $(\cdot|\cdot)$ be a nondegenerate symmetric form on $\mathfrak{h}$
satisfying the condition
\begin{itemize}
\item[] $(a(x)|h)=\langle b(x),h\rangle$ for any $x\in X$, $h\in\mathfrak{h}$
\footnote{Such form exists as the Cartan matrix is symmetric.}.
\end{itemize}
Then there exists a unique extension of $(\cdot|\cdot)$ to an invariant
symmetric bilinear form on $\widetilde\mathfrak{g}=\widetilde\mathfrak{g}(v)$. This extension
enjoys the following properties.
\begin{itemize}
\item[1.] $(\tilde e_x|\tilde f_y)=\delta_{xy}$.
\item[2.] $(\widetilde\mathfrak{g}_\alpha|\widetilde\mathfrak{g}_\beta)=0$ unless $\alpha+\beta=0$.
\item[3.] $[z,t]=(z|t)\nu(\alpha)$ for $z\in\widetilde\mathfrak{g}_\alpha$,
$t\in\widetilde\mathfrak{g}_{-\alpha}$, where $\nu:\mathfrak{h}^*\to\mathfrak{h}$ is the isomorphism
defined by the original nondegenerate form.
\end{itemize}
\end{prp}
\qed
\subsubsection{}
\label{sss:automorphism}
The algebra $\widetilde\mathfrak{g}(v)$ admits a standard {\sl superinvolution} $\theta$,
that is an automorphism whose square is $\mathrm{id}$ on the even part
and $-\mathrm{id}$ on the odd part of $\widetilde\mathfrak{g}(v)$.
We will define the superinvolution $\theta$ by the following formulas.
\begin{itemize}
\item $\theta|_\mathfrak{h}=-\mathrm{id}$.
\item $\theta(\tilde e_x)=\tilde f_x$.
\item $\theta(\tilde f_x)=(-1)^{p(x)}\tilde e_x$.
\end{itemize}
\subsubsection{Example: rank one}
\label{sss:rank1-wt}
Let $X=\{x\}$. The Cartan matrix
is a $1\times 1$ matrix $(a_{xx})$.
If $a_{xx}\not=0$ and $p(x)=0$, we have $\widetilde\mathfrak{g}=\mathfrak{s}l_2$;
if $p(x)=1$, we have $\widetilde\mathfrak{g}=\mathfrak{osp}(1|2)$.
If $a_{xx}=0$ and $p(x)=0$, $\widetilde\mathfrak{g}$ is the $(4|0)$-dimensional algebra
$a(x),d, e_x,f_x$,
with $\mathfrak{h}=\mathtt{Sp}an(a(x),d)$~\footnote{this is the smallest possible $\mathfrak{h}$.
The general case can be treated using~\mathit{re}f{sss:decomposable}.},
$a(x)=[e_x,f_x]$ central and $[d,e_x]=e_x$, $[d,f_x]=-f_x$.
In the remaining case
$p(x)=1$ and $a_{xx}=0$. The algebra $\widetilde\mathfrak{g}$ has dimension $(4|2)$
with a basis
$$a(x),d, e_x,f_x,e_x^2,f_x^2,$$
($e_x$ and $f_x$ odd)
with $\mathfrak{h}=\mathtt{Sp}an(a(x),d)$,
$a(x)=[e_x,f_x]$ central and $[d,e_x]=e_x$, $[d,f_x]=-f_x$.
\subsubsection{}
\label{sss:properties-2}
The space
$[\widetilde\mathfrak{g}_{jb(x)+b(y)},\widetilde\mathfrak{g}_{-jb(x)-b(y)}]$ lies in
$\mathfrak{h}$ for any $j\geq 0$ and is at most one-dimensional.
We wish to describe, under certain assumptions, the greatest
value of $j$ for which it is nonzero.
Assume that $x\ne y\in X$, $x$ is reflectable at $v$.
Let $r_x:v\to v'=(\mathfrak{h},a',b',p')$ be the corresponding reflexion in
$\mathcal{R}$.
Choose $j_0$ such that $b(y)+j_0b(x)=b'(y)$,
that is $j_0=-2\mathfrak{r}ac{a_{xy}}{a_{xx}}$
for $a_{xx}\not=0$,
$j_0=1$ for $a_{xx}=0$, $a_{xy}\ne 0$,
and $j_0=0$ for $a_{xx}=0=a_{xy}$.
\begin{lem}
\label{lem:rk2-ideal}
Assume that $X=\{x,y\}$ and $x$ is reflectable at $v=(\mathfrak{h},a,b,p)$.
Let $j_0$ be defined as above.
Define the ideal $I$ of $\widetilde\mathfrak{g}=\widetilde\mathfrak{g}(v)$ generated by
the elements
\begin{equation}
\label{eq:rk2-ideal}
E:=(\mathrm{ad}\tilde e_x)^{j_0+1}\tilde e_y,\
F:=(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y.
\end{equation}
Then
\begin{itemize}
\item[(a)] If $a_{xx}=0$ then the ideal $I'$ generated by $\tilde e_x^2$,
$\tilde f_x^2$ satisfies $I'\cap\mathfrak{h}=0$.
\item[(b)] If $a_{xx}=0$, $a_{xy}\ne 0$ then $I\subset I'$ and
$I=I'$ iff $a_{yx}\ne 0$.
\item[(c)] $I\cap\mathfrak{h}\ne 0$ if and only if $a_{xx}\ne 0, a_{yx}\ne 0$ and $a_{xy}=0$.
\end{itemize}
\end{lem}
\begin{proof}
(a)
Let $a_{xx}=0$.
Then $p(x)=1$ and
\begin{equation}\label{eff}
[\tilde e_x,\tilde f_x^{2}]=0.\end{equation}
Since $[\tilde e_y,\tilde f_x^{2}]=0$ we obtain $[\widetilde\mathfrak{n}^+, f_x^2]=0$;
similarly, $[\widetilde\mathfrak{n}^-, e_x^2]=0$. This gives
$I'\cap\mathfrak{h}=0$ and establishes (a).
(b) Take
$a_{xx}=0$, $a_{xy}\not=0$. Then $j_0=1$ so
$$
F=(\mathrm{ad}\tilde f_x)^{2}\tilde f_y=(\mathrm{ad}\tilde f_x^{2})\tilde f_y,\ \ \
E=(\mathrm{ad}\tilde e_x^{2})\tilde e_y
$$
In particular, $I\subset I'$ and
$$[\tilde e_y, F]=\pm [\tilde f_x^{2},
a(y)]=\pm 2 a_{yx}\tilde f_x^2.$$
This gives $I=I'$ if $a_{yx}\not=0$. Consider the case $a_{yx}=0$. By above,
$[\tilde e_y, F]=0$. By~(\mathit{re}f{eff}) we have
$[\tilde e_x, F]=0$. Hence $[\widetilde\mathfrak{n}^+, F]=0$ and so
$F\not\in I'$. This completes the proof of (b).
(c) By (a), (b) it follows that $I\cap \mathfrak{h}=0$ if $a_{xx}=0$, $a_{xy}\not=0$.
Therefore we may assume that $a_{xy}=a_{yx}=0$ or $a_{xx}\not=0$.
It is enough to verify that $[\tilde e_z,F]=[\tilde f_z,E]=0$
for $z=x,y$. These formulas are similar so we will check only the formula
$[\tilde e_z,F]=0$.
If $a_{xy}=a_{yx}=0$, then $j_0=0$ and
$$[\tilde e_x, F]=[\tilde e_x, [\tilde f_x,\tilde f_y]]=[[\tilde e_x, \tilde f_x]\tilde f_y]=[a(x),f_y]=-a_{xy}f_y=0$$
as well as $[\tilde e_y, F]=\pm a_{yx}f_x=0$
as required.
Consider the case when $a_{xx}, a_{xy}, a_{yx}\not=0$. Then $j_0=-2\mathfrak{r}ac{a_{xy}}{a_{xx}}$.
Recall that $\tilde f_x,\tilde e_x$
generate $\mathfrak{s}l_2$ if $p(x)=0$ and $\mathfrak{osp}(1|2)$ if $p(x)=1$. Since
$[\tilde e_x,\tilde f_y]=0$, a direct computation implies
$$(\mathrm{ad} \tilde e_x)(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y=0.$$
On the other hand, $[\tilde e_y,\tilde f_x]=0$ implies
$$[\tilde e_y, F]=\pm (\mathrm{ad}\tilde f_x)^{j_0+1} a(y)=\pm a_{yx}(\mathrm{ad}\tilde f_x)^{j_0} \tilde f_x=0$$
since $[\tilde f_x,\tilde f_x]=0$ for $p(x)=0$ and
$[\tilde f_x,[\tilde f_x,\tilde f_x]]=0$ if $p(x)=1$ (in the case $a_{xx}\not=0$, $p(x)=1$ the condition that $x$ is reflectable at $v$ implies that $j_0$ is even, in particular, $j_0\geq 2$).
Hence $[\tilde e_y, F]=[\tilde e_x, F]=0$ as required.
Finally, if $a_{xx}\ne 0$, $a_{xy}=0$, $a_{yx}\ne 0$, then $b'(y)=b(y)$ and $a'(y)=a(y)-2\mathfrak{r}ac{a_{yx}}{a_{xx}}a(x)$. Furthermore,
$E=[\tilde{e}_x,\tilde{e}_y]$, so that
$$
[\tilde f_x,[\tilde{f}_y,E]]=\pm[\tilde f_x,[\tilde{e}_x,a(y)]]=\pm a_{yx}a(x)\ne 0.
$$
\end{proof}
\begin{prp}
\label{prp:bracket}
Assume that $x\not=y\in X$ and $x$ is reflectable.
We also assume that if $a_{xx}\ne 0$ and $a_{xy}=0$ then $a_{yx}=0$.
\begin{itemize}
\item[1.]The bracket
$[\widetilde\mathfrak{g}_{jb(x)+b(y)},\widetilde\mathfrak{g}_{-jb(x)-b(y)}]$ is zero for $j>j_0$.
\item[2.]$[\widetilde\mathfrak{g}_{b'(y)},\widetilde\mathfrak{g}_{-b'(y)}]$ is spanned by $a'(y)$.
\end{itemize}
\end{prp}
\begin{proof}
The claim immediately reduces to the case $X=\{x,y\}$.
Denote by
$I$ the ideal of $\widetilde\mathfrak{g}$ generated by the elements
$$
E:=(\mathrm{ad}\tilde e_x)^{j_0+1}\tilde e_y,\
F:=(\mathrm{ad}\tilde f_x)^{j_0+1}\tilde f_y.
$$
By Lemma~\mathit{re}f{lem:rk2-ideal} we have $I\cap\mathfrak{h}=0$.
The homomorphism $\widetilde\mathfrak{g}\to\mathfrak{g}=\widetilde\mathfrak{g}/I$ is identity on
$\mathfrak{h}$, so both claims of the proposition would follow from the similar claims for $\mathfrak{g}$. Since the first claim of the proposition tautologically holds for $\mathfrak{g}$, we have proven it also for $\widetilde\mathfrak{g}$.
To prove the second claim for $\mathfrak{g}$,
we will study the isotropic and the anisotropic cases separately.
{\sl The case $a_{xx}\ne 0$.}
The rank one subalgebra defined by
$\{x\}\in X$ contains a copy of $\mathfrak{s}l_2$.
$\mathfrak{g}$ is integrable as an $\mathfrak{s}l_2$-module as it is generated by
the elements on which $\tilde e_x,\tilde f_x$ act locally nilpotently,
see~\cite{Kbook}, Lemma 3.4.
Therefore, the automorphism $\sigma:\mathfrak{g}\to\mathfrak{g}$
given by the formula
\begin{equation}
\label{eq:sigma:gtog}
\sigma=\exp(\tilde f_x)\circ\exp(-\tilde e_x)\circ\exp(\tilde f_x),
\end{equation}
is defined. Its restriction on $\mathfrak{h}$ is given by the standard
formula
$
\sigma(h)=h-\mathfrak{r}ac{2}{a_{xx}}\langle h,b(x)\rangle a(x),
$
so
$\sigma(\mathfrak{g}^\mathtt{U}_\mu)=\mathfrak{g}^\mathtt{U}_{\sigma(\mu)}$,
where the action of $\sigma$ on $\mathfrak{h}^*$ is induced by its action on
$\mathfrak{h}$. The latter implies the second claim of the proposition for the algebra $\mathfrak{g}$.
{\sl The case $a_{xx}=0$. } If $a_{xy}=0$, the second claim is immediate.
In the case $a_{xy}\ne 0$ a direct calculation shows that
$$
[[\tilde e_x,\tilde e_y],[\tilde f_x,\tilde f_y]]=
(-1)^{p(y)}a_{xy}(a(y)+\mathfrak{r}ac{a_{yx}}{a_{xy}}a(x)).
$$
\end{proof}
\subsection{Coordinate systems and root algebras}
\begin{dfn}
Let $v\in\mathcal{R}$.
A $v$-coordinate system on a Lie superalgebra $\mathfrak{g}$
is a surjective homomorphism $\widetilde\mathfrak{g}(v)\to\mathfrak{g}$ whose kernel has zero intersection with $\mathfrak{h}(v)$.
\end{dfn}
In other words, a $v$-coordinate system on $\mathfrak{g}$ consists of an
injective map of Lie superalgebras $\mathfrak{h}\to\mathfrak{g}$ ($\mathfrak{h}$ is even commutative), and a collection of generators $e_x,f_x$ such that the
relations 1--4 of \mathit{re}f{sss:half} hold.
Here is our main definition.
\begin{dfn}
Let $\mathcal{R}_0\subset\mathcal{R}$ be a connected component.
A root Lie superalgebra $\mathfrak{g}$ supported on $\mathcal{R}_0$ is a collection
of Lie superalgebras $\mathfrak{g}(v),\ v\in\mathcal{R}_0$, endowed with
$v$-coordinate systems so that for any $\alpha:v\to v'$ in $\mathcal{R}_0$
there exists an isomorphism $a:\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending the isomorphism $\mathfrak{h}(\alpha):\mathfrak{h}(v)\to\mathfrak{h}(v')$.
\end{dfn}
Let $\mathfrak{g}$ be a root Lie superalgebra at $\mathcal{R}_0$. There is a weight space decomposition
$$
\mathfrak{g}(v)=\mathfrak{h}(v)\mathrm{op}lus\bigoplus_{\mu\in\mathit{Del}ta(v)}\mathfrak{g}(v)_\mu
$$
with $\mathit{Del}ta(v)\subset\mathtt{Sp}an_\mathbb{Z}(b)$. The elements of $\mathit{Del}ta(v)$ are called
{\sl the roots} of $\mathfrak{g}$ (at $v$). The elements $b(x),\ x\in X$, are {\sl the simple roots}
at $v$. Any $\alpha:v\to v'$ carries the root decomposition at $v$ to that at $v'$.
\begin{dfn}
A component $\mathcal{R}_0$ of $\mathcal{R}$ is called {\sl admissible} if it admits
a root Lie superalgebra.
\end{dfn}
\subsubsection{}
Let $v\in\mathcal{R}$. The half-baked algebra $\widetilde\mathfrak{g}(v)$ has a triangular decomposition. This implies the existence of the maximal
ideal $\mathfrak{r}(v)$ having zero intersection with $\mathfrak{h}(v)$. If $\mathcal{R}_0$ is admissible, then
the collection of $\mathfrak{g}^\mathtt{C}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{r}(v)$ is a root
Lie superalgebra supported at $\mathcal{R}_0$.
In fact, given a root algebra $\mathfrak{g}$ with $\mathfrak{g}(v)=\widetilde\mathfrak{g}(v)/I(v)$, the quotient ideal
$\bar\mathfrak{r}(v)=\mathfrak{r}(v)/I(v)$ is the maximal ideal in $\mathfrak{g}(v)$ having zero
intersection with $\mathfrak{h}(v)$. Obviously, any isomorphism
$a:\mathfrak{g}(v)\to\mathfrak{g}(v')$ over $\alpha:v\to v'$ in $\mathcal{R}$ carries
$\bar\mathfrak{r}(v)$ to $\bar\mathfrak{r}(v')$, and therefore induces an isomorphism
$\mathfrak{g}^\mathtt{C}(v)\to\mathfrak{g}^\mathtt{C}(v')$.
We call the collection
$\mathfrak{g}^\mathtt{C}=\{\mathfrak{g}^\mathtt{C}(v)\}_{v\in\mathcal{R}_0}$ the {\sl contragredient} Lie superalgebra
supported at an admissible component $\mathcal{R}_0$. In other words,
the contragredient
Lie superalgebra $\mathfrak{g}^\mathtt{C}$ is the terminal object in the category of
root Lie superalgebras
supported at an admissible component $\mathcal{R}_0$.
The superinvolution $\theta$ of $\widetilde\mathfrak{g}$ defined in
\mathit{re}f{sss:automorphism} induces an automorphism of $\mathfrak{g}^\mathtt{C}$.
\subsubsection{Rank one}
\label{rank1}
The Lie algebra $\mathfrak{s}l_2$ plays a prominent role in Lie theory.
A similar role in our setup will be played by root algebras of rank 1.
Let us describe them all.
Let $X=\{x\}$. In this case $\widetilde{\mathfrak{g}}(v)$ is described in
\mathit{re}f{sss:rank1-wt}. It is a root algebra.
If $a_{xx}\not=0$ or $p(x)=0$, then $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}$.
If $a_{xx}=0$ and $p(x)=1$, the maximal ideal $\mathfrak{r}$ of $\widetilde\mathfrak{g}$ having zero intersection
with $\mathfrak{h}$ is spanned by
$e_x^2,f_x^2$ and $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}/\mathfrak{r}\cong \mathfrak{g}l(1|1)$. The algebras
$\widetilde\mathfrak{g}$ and $\mathfrak{g}^\mathtt{C}$ are
exactly two root algebras in this case as only these two allow an automorphism lifting $\gamma=t_{-1}\circ r_x$, see~\mathit{re}f{sss:aut-gl11}.
\subsubsection{Decomposable root datum}
\label{sss:decomposable}
Let $X=X_1\sqcup X_2$ and let
$v_i=(\mathfrak{h}_i, a_i:X_i\to\mathfrak{h}_i,b_i:X_i\to\mathfrak{h}_i^*,p_i:X_i\to\mathbb{Z}_2$,
$i=1,2$, be two root data of ranks $|X_1|$ and $|X_2|$ respectively.
We define their sum $v=v_1+v_2$ in an obvious way, as the root datum
with $\mathfrak{h}=\mathfrak{h}_1\mathrm{op}lus\mathfrak{h}_2$ and $a:X\to\mathfrak{h}$, $b:X\to\mathfrak{h}^*$ and $p:X\to\mathbb{Z}_2$ defined by the conditions
$$
a_{|X_i}=s_i(a_i), b_{|X_i}=s_i^*(b_i),\ p|_{X_i}=p_i,
$$
where $s_i:\mathfrak{h}_i\to\mathfrak{h}$ and $s_i^*:\mathfrak{h}_i^*\to\mathfrak{h}^*$
are the obvious embeddings.
We will denote by $\mathcal{R}(X),\ \mathcal{R}(X_1)$ and $\mathcal{R}(X_2)$ the groupoids of root data for the sets $X,X_1$ and $X_2$. The component $\mathcal{R}_0$
of $\mathcal{R}(X)$ containing $v=v_1+v_2$ is obviously a direct product
$\mathcal{R}'_0\times\mathcal{R}''_0$ of the corresponding components of $\mathcal{R}(X_1)$ and
$\mathcal{R}(X_1)$.
If $\mathfrak{g}_1$ and $\mathfrak{g}_2$ are root algebras supported on the components $\mathcal{R}'_0$ and $\mathcal{R}''_0$
respectively, the product $\mathfrak{g}=\mathfrak{g}_1\times\mathfrak{g}_2$ is a root algebra of $\mathcal{R}_0$. In particular,
$\mathfrak{g}^\mathtt{C}_1\times\mathfrak{g}^\mathtt{C}_2$ is the contragredient root algebra for $\mathcal{R}_0$.
Theorem~\mathit{re}f{thm:admissible=wsym} implies that if $\mathcal{R}_0$ is admissible, then both
$\mathcal{R}'_0$ and $\mathcal{R}''_0$ are admissible.
It is not true in general that any root algebra supported on $\mathcal{R}_0$ is a product.
Here is the best we can say.
\begin{prp}
\label{prp:deco}
Let $X=X_1\sqcup X_2$, $v=v_1+v_2$ be defined as above, with
$v\in\mathcal{R}_0$, $v_1\in\mathcal{R}_0'$ and $v_2\in\mathcal{R}_0''$. Assume that all $x\in X_1$ are reflectable
at all $v'\in\mathcal{R}'_0$. Then any root algebra
supported on $\mathcal{R}_0$ uniquely decomposes as a product of a root
algebra supported on $\mathcal{R}_0'$ and a root algebra supported on $\mathcal{R}_0''$.
\end{prp}
\begin{proof}
The algebra $\mathfrak{g}=\mathfrak{g}(v)$ is generated by $\mathfrak{h}$, $e_x,f_x,e_y,f_y$ where $x\in X_1$ and $y\in X_2$.
We have to verify that $[e_x,e_y]=0=[f_x,f_y]$ for $x\in X_1$ and $y\in X_2$.
The reflexion $r_x:v\to v'$ with respect to $x\in X_1$ carries, up to scalars, $e_x$ to $f'_x$ and
$f_x$ to $e'_x$, retaining $e_y$ and $f_y$. Since $[e'_x,f'_y]=0=[e'_y,f'_x]$, we deduce
$[e_x,e_y]=0=[f_x,f_y]$.
\end{proof}
We can apply the sum of root data operation to an empty root datum
$\emptyset_V$
corresponding to $X=\emptyset$ and uniquely defined by a vector space
$V$. For $v=(\mathfrak{h},a,b,p)$ the sum $\emptyset_V+v$ has form
$(\mathfrak{h}\mathrm{op}lus V,a,b,p)$ and any root algebra based on it is the direct product of a root algebra based on $v$ with the commutative algebra $V$.
The following result is a corollary of ~\mathit{re}f{rank1}.
\begin{crl}\label{corgalpha}
Let $\mathcal{R}_0$ be an admissible component of $\mathcal{R}$ and $\mathfrak{g}:=\mathfrak{g}(v)$ be a
root algebra.
Fix $x\in X$ and set $\alpha:=b(x)$.
We denote by $\mathfrak{g}\langle\alpha\rangle$ the subalgebra of $\mathfrak{g}$
generated by $\mathfrak{g}_{\alpha}$ and $\mathfrak{g}_{-\alpha}$.
\begin{enumerate}
\item
If $a_{xx}\not=0$ and $p(x)=0$,
one has $\mathfrak{g}\langle\alpha\rangle=\mathfrak{s}l_2$ and
$\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$.
\item
If $a_{xx}\not=0$ and $p(x)=1$,
one has $\mathfrak{g}\langle\alpha\rangle=\mathfrak{osp}(1|2)$ and $\mathfrak{g}_{i\alpha}=0$
for $i\not\in\{0,\pm 1,\pm 2\}$.
\item If $a_{xx}=0$ and $p(x)=0$ then $\mathfrak{g}\langle\alpha\rangle$ is
the Heisenberg algebra and $\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$.
\item
If $p(x)=1$, $a_{xx}=0$ and $a_{xy}, a_{yx}\not=0$ for some $y$
then $\mathfrak{g}\langle\alpha\rangle\cong \mathfrak{sl}(1|1)$ and
$\mathfrak{g}_{i\alpha}=0$ for $i\not\in\{0,\pm 1\}$.
\item
If $p(x)=1$, $a_{xx}=0$ and $a_{xy}, a_{yx}=0$ for all $y$
then $\mathfrak{g}\langle\alpha\rangle$ is the $(4|2)$-dimensional algebra
described in~\mathit{re}f{sss:rank1-wt}.
\end{enumerate}
\end{crl}
\begin{proof}
Clearly, $\mathfrak{g}\langle\alpha\rangle$ is a quotient of the
algebra $[\widetilde\mathfrak{g},\widetilde\mathfrak{g}]$ where $\widetilde\mathfrak{g}$ is the corresponding algebra listed in~\mathit{re}f{rank1}; this gives
(1), (2), (3) and shows that in (4) it is enough to verify
$\mathfrak{g}_{2b(x)}=0$. This follows from Lemma~\mathit{re}f{lem:rk2-ideal}(b).
\end{proof}
Note that Corollary~\mathit{re}f{corgalpha} implies that $x\in X$ is
reflectable at $v$ iff for $\alpha=b(x)$ the algebra $\mathfrak{g}\langle
\alpha\rangle$ is not the Heisenberg algebra and $e_\alpha$ acts on $\mathfrak{g}$
locally nilpotently.
\subsection{Admissibility is just a weak symmetricity}
In this subsection we prove the following result.
\begin{thm}
\label{thm:admissible=wsym}
A connected component $\mathcal{R}_0$ of $\mathcal{R}$ is admissible iff it is
weakly symmetric.
\end{thm}
\begin{proof}
1. Let $\mathcal{R}_0$ be a weakly symmetric component of $\mathcal{R}$. We claim
that the collection of $\mathfrak{g}^\mathtt{C}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{r}(v)$ forms a root
Lie superalgebra. Let $r_x:v'\to v$ be a reflexion. Denote
$\widetilde\mathfrak{g}'=\widetilde\mathfrak{g}(v')$, $\mathfrak{g}=\mathfrak{g}^\mathtt{C}(v)$. Let us show that
there exists a homomorphism $\rho:\widetilde\mathfrak{g}'\to\mathfrak{g}$ identical on $\mathfrak{h}$.
The half-baked Lie superalgebra $\widetilde\mathfrak{g}(v')$ is generated by $\mathfrak{h}$,
$\tilde e'_y$ and $\tilde f'_y$, $y\in X$. In order to construct
$\rho$, we have to find $\rho(\tilde e'_y)$, $\rho(\tilde f'_y)$,
and verify the (very few) relations.
The weight of $\tilde e'_y$ is $b'(y)$, so we have to look for
$\rho(\tilde e'_y)$ in $\mathfrak{g}^\mathtt{C}_{b'(y)}$. We know that
$\widetilde\mathfrak{g}_{b'(y)}$ is one-dimensional. By Proposition~\mathit{re}f{prp:bracket} (2), the ideal generated by $\widetilde\mathfrak{g}_{b'(y)}$ contains $a'(y)\in\mathfrak{h}$,
so $\mathfrak{r}(v)$ does not contain it. Therefore, $\mathfrak{g}_{b'(y)}$ is also one-dimensional.
We will define arbitrarily $0\ne\rho(\tilde e'_y)\in\mathfrak{g}_{b'(y)}$
and choose $\rho(\tilde f'_y)\in\mathfrak{g}_{-b'(y)}$ so that
$[\rho(\tilde e'_y),\rho(\tilde f'_y)]=a'(y)$. The latter is also
possible by~Proposition~\mathit{re}f{prp:bracket}(2).
It remains to verify that
$[\rho(\tilde e'_y),\rho(\tilde f'_z)]=0$ for $y\ne z$.
(a) $y\ne x,\ z\ne x$. In this case the bracket should have weight
$b'(y)-b'(z)=b(y)-b(z)+cb(x)$ for some $c\in\mathbb{Z}$. This is not a weight
of $\widetilde\mathfrak{g}$, so the bracket should vanish.
(b) $z=x\ne y$. In this case the bracket should have weight
$b'(y)-b'(x)=b(y)+j_0b(x)+b(x)$ where $j_0$ is defined as
in~\mathit{re}f{sss:properties-2}. According to Lemma~\mathit{re}f{lem:rk2-ideal}(c)
the ideal generated by this weight space has no intersection with $\mathfrak{h}$,
so this is not a weight of $\mathfrak{g}$ and the bracket vanishes.
Therefore, we have constructed a homomorphism $\rho:\widetilde\mathfrak{g}'\to\mathfrak{g}$
for each reflexion $r_x:v'\to v$. It is identity on $\mathfrak{h}$, so
it induces a homomorphism $\mathfrak{g}'\to\mathfrak{g}$. Any reflexion has order two,
so there is also a homomorphism $\mathfrak{g}\to\mathfrak{g}'$ in the opposite direction.
Their composition preserves weight spaces, so it is invertible.
2. Assume now that $\mathcal{R}_0$ is an admissible component. We will deduce
that it is necessarily weakly symmetric. Assume
that there exists $v\in\mathcal{R}_0$, a $v$-reflectable element $x\in X$ and
another $y\in X$ such that $a_{xy}=0$. Let $\mathfrak{g}$ be a root algebra.
Look at the $x$-reflexion $r_x:v\to v'$.
Since
$$b'(x)=-b(x),\ \ b'(y)=b(y)$$
one has $\tilde{\mathfrak{g}}'_{b(x)+b(y)}=0$ so $\mathfrak{g}_{b(x)+b(y)}=0$.
Therefore $[e_x,e_y]=0$. One has
$$a_{yx} e_x=[a(y),e_x]=[[e_y,f_y],e_x]=0$$
so $a_{yx}=0$ as required.
\end{proof}
\subsection{Admissible components in rank two}
\label{ss:ranktwo}
In this subsection we show that any locally weakly symmetric
root datum of rank two belongs to an admissible component (that is, a
local weak symmetricity implies a weak symmetricity).
\subsubsection{Fully reflectable} A component $\mathcal{R}_0$ of $\mathcal{R}$ is called
{\sl fully reflectable} if all $x\in X$ are reflectable at all $v\in\mathcal{R}_0$.
Classification of fully reflectable root data is available for all ranks. Fully reflectable admissible root data without isotropic real roots can be easily classified as all Cartan matrices
in the component are $D$-equivalent.
The classification of fully reflectable admissible root data with isotropic real roots was obtained
in~\cite{Hoyt}.
\subsubsection{Symmetrizable}
The cases $a_{xy}=a_{yx}=0$ as well as $a_{xy}\ne 0$ and $a_{yx}\ne 0$ are symmetrizable, therefore, symmetrizable at all vertices by
Lemma~\mathit{re}f{lem:sym-stable}.
\subsubsection{Weakly symmetric but not symmetrizable}
This is possible only if $\mathcal{R}_0$ contains an object $v$ having nonreflectable $y\in X$. Thus, the Cartan matrix should have form
$$
A=\begin{pmatrix}
a_{xx} & a_{xy}\\
0 & a_{yy}\end{pmatrix},
$$
with $a_{xy}\ne 0$. Since $y$ is nonreflectable, $a_{yy}=0$ and $p(y)=0$.
(a) Let $a_{xx}=0$ so $p(x)=1$ since $x$ is reflectable. Then
$$
A=\begin{pmatrix}
0 & a_{xy}\\
0 & 0\end{pmatrix},
$$
that, after the reflexion, will become
$$
A'=\begin{pmatrix}
0 & -a_{xy}\\
0 & 0\end{pmatrix}
$$
which is $D$-equivalent to $A$.
(b) $a_{xx}\ne 0$. In this case the Cartan matrix is not changed
and therefore the component is weakly symmetric.
\subsection{The canonical extension of $\mathcal{R}_0$}
\subsubsection{}
Let $\mathcal{G},\mathcal{H}$ be groupoids.
A functor $f:\mathcal{G}\to\mathcal{H}$ is called
a {\sl fibration} if for any $g\in\mathcal{G}$ and $\beta:f(g)\to h$ in $\mathcal{H}$
there exists $\alpha:g\to g'$ in $\mathcal{G}$ such that $f(\alpha)=\beta$.
Given a fibration $f:\mathcal{G}\to\mathcal{H}$ and $h\in\mathcal{H}$, the fiber of $f$
at $h$, $\mathcal{G}_h$, is defined as follows.
\begin{itemize}
\item $\mathrm{op}eratorname{Ob}(\mathcal{G}_h)=\{g\in\mathcal{G}| f(g)=h\}$.
\item $\mathrm{Hom}_{\mathcal{G}_h}(g,g')=\{\alpha:g\to g'|f(\alpha)=\mathrm{id}_h\}$
~\footnote{For a general $f$ the fiber $\mathcal{G}_h$ defined as above may change if one replaces $\mathcal{G}$ with an equivalent groupoid.
A more invariant notion of fiber has as objects pairs $(g,\alpha:f(g)\to h)$.}
.
\end{itemize}
\subsubsection{}
\label{sss:wtR}
Let $\mathcal{R}_0$ be an admissible component of the root groupoid and let
$\mathfrak{g}$ be a root algebra on $\mathcal{R}_0$. Define the groupoid of symmetries
of $\mathfrak{g}$, $\mathcal{G}_0$, together with a fibration $\pi:\mathcal{G}_0\to\mathcal{R}_0$,
as follows. The groupoids $\mathcal{G}_0$ and $\mathcal{R}_0$ have the same objects.
For $\alpha:v\to v'\in\mathcal{R}_0$, we define $\mathrm{Hom}^\alpha_{\mathcal{G}_0}(v,v')$,
the set of arrows $v\to v'$ in $\mathcal{G}_0$, as the set of isomorphisms
$\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending the isomorphism $\mathfrak{h}(\alpha)$.
The fiber of $\pi$ at $v\in\mathcal{R}_0$ consists of automorphisms of $\mathfrak{g}(v)$
that are identity on $\mathfrak{h}(v)$. Any such automorphism $a$ preserves the weight spaces, and so it is uniquely given by a collection of
$\lambda_x\in\mathbb{C}^*$, $\mu_x$ so that $a(e_x)=\lambda_x e_x$, $a(f_x)=\mu_xf_x$. Since $[e_x,f_x]=a(x)\ne 0$, one necessarily has
$\mu_x=\lambda_x^{-1}$.
Therefore, the fiber of $\pi$ at $v$ identifies with the classifying
groupoid~\footnote{Recall that the classifying groupoid of a group $G$
is the groupoid having a single object with the group of
automorphisms $G$.} of the torus $(\mathbb{C}^*)^X$.
\subsubsection{Canonicity of $\mathcal{G}_0$}
Let $\mathfrak{g}$ be a root algebra on $\mathcal{R}_0$. For any $v$ the algebra $\mathfrak{g}(v)$ has a maximal ideal $\mathfrak{r}(v)$ having no intersection with $\mathfrak{h}(v)$.
Thus, $\mathfrak{g}(v)/\mathfrak{r}(v)=\mathfrak{g}^\mathtt{C}(v)$ for all $v$. Let $\alpha:v\to v'$ be an arrow in $\mathcal{R}$. Any isomorphism $\mathfrak{g}(v)\to\mathfrak{g}(v')$ extending $\mathfrak{h}(\alpha)$
induces an isomorphism $\mathfrak{g}^\mathtt{C}(v)\to\mathfrak{g}^\mathtt{C}(v')$. This leads to a
functor $\mathcal{G}_0\to\mathcal{G}_0^\mathtt{C}$ over $\mathcal{R}_0$, where $\mathcal{G}_0^\mathtt{C}$
denotes (temporarily) the groupoid extension of $\mathcal{R}_0$ constructed
as in \mathit{re}f{sss:wtR} with the root algebra $\mathfrak{g}^\mathtt{C}$. It is an equivalence
as it induces an equivalence of fibers at any $v\in\mathcal{R}_0$.
\subsection{Universal root algebra}
\subsubsection{}
In this subsection we will prove the existence of an initial object
in the category of root algebras associated to an admissible component
$\mathcal{R}_0$ of $\mathcal{R}$.
Let $\mathfrak{g}$ be a root Lie superalgebra for the component $\mathcal{R}_0$.
Fix $v\in\mathcal{R}_0$. The $v$-coordinate system for $\mathfrak{g}$ is a Lie superalgebra epimorphism $\widetilde\mathfrak{g}(v)\to\mathfrak{g}(v)$. Let $\mathfrak{k}(v)$ be its kernel.
Choose an arrow $\alpha:v'\to v$ in $\mathcal{R}$ presentable as a composition of reflexions. We denote $\mathfrak{g}'=\mathfrak{g}(v')$ and $\mathfrak{g}=\mathfrak{g}(v)$.
The existence of isomorphism $\mathfrak{g}'\to\mathfrak{g}$
lifting $\alpha$ proves that $\mathfrak{g}_{b'(x)-b'(y)}=0$ for $y\ne x$, so that
$\mathfrak{k}(v)\supset\mathfrak{s}(v)$ where $\mathfrak{s}(v)$ is the ideal of $\widetilde\mathfrak{g}(v)$
generated by $\sum\widetilde\mathfrak{g}_{b'(x)-b'(y)}(v)$, the sum being taken
over all $\alpha:v'\to v$ presentable as compositions of reflexions.
Let us verify that the collection
$\mathfrak{g}^\mathtt{U}=\{\mathfrak{g}^\mathtt{U}(v)=\widetilde\mathfrak{g}(v)/\mathfrak{s}(v),\ v\in\mathcal{R}_0\}$ is a root Lie superalgebra.
Note that $\mathfrak{s}(v)\subset \mathfrak{k}(v)$, so one has an obvious surjective
homomorphisms
$q:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}(v)$.
We have to define, for each arrow $\alpha:v\to v'$ in $\mathcal{R}$, an
isomorphism $\tilde\alpha:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ extending
$\mathfrak{h}(\alpha):\mathfrak{h}\to\mathfrak{h}'$. This is enough to verify separately for
reflexions, homotheties and tautological arrows. In the case when
$\alpha$ is a tautological arrow or a homothety, it extends to an
isomorphism $\tilde\alpha:\widetilde\mathfrak{g}(v)\to\widetilde\mathfrak{g}(v')$. Since the homotheties
and the tautological arrows commute with the reflexions,
$\tilde\alpha$ carries $\mathfrak{s}(v)$ to $\mathfrak{s}(v')$, and this induces
an isomorphism $\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$.
It remains to define, for each reflexion $r_x:v\to v'$ in $\mathcal{R}$,
an isomorphism $\rho=\tilde r_x:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ extending
$\mathrm{id}_\mathfrak{h}$.
The algebra $\mathfrak{g}^\mathtt{U}(v)$ is generated over $\mathfrak{h}$ by the elements
$e_y$ of weight $b(y)$, $f_z$ of weight $-b(z)$, subject to relations
listed in \mathit{re}f{sss:half} and factored out by $\mathfrak{s}(v)$. Thus, in order
to construct $\rho$, we have to choose $\rho(e_y)\in\mathfrak{g}^\mathtt{U}_{b(y)}(v')$,
$\rho(f_z)\in\mathfrak{g}^\mathtt{U}_{-b(z)}(v')$, so that $\rho$ vanishes at all
the relations.
The weight spaces $\mathfrak{g}^\mathtt{U}_{b(y)}(v')$ and $\mathfrak{g}^\mathtt{U}_{-b(y)}(v')$
are one-dimensional by property 3 of
\mathit{re}f{sss:properties} as the map
$q:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}(v)$ is surjective and the weight spaces
$\mathfrak{g}_{b(y)}(v')$ and $\mathfrak{g}_{-b(y)}(v')$ are one-dimensional.
We will define arbitrarily $0\ne\rho(e_y)\in\mathfrak{g}^\mathtt{U}_{b(y)}(v')$
and choose $\rho(f_y)\in\mathfrak{g}^\mathtt{U}(v')$ so that
$[\rho(e_y),\rho(f_y)]=a(y)$. The latter is possible
by~Proposition~\mathit{re}f{prp:bracket}(2). The rest of the relations say that,
for any composition of reflexions $\gamma:v''\to v$ with
$v''=(\mathfrak{h},a'',b'',p'')$, the weight space $\mathfrak{g}^\mathtt{U}_{b''(y)-b''(z)}(v)$
vanishes for all $y\ne z$.
Now $\rho$ defined as above
yields a homomorphism as
$\mathfrak{g}^\mathtt{U}_{b''(y)-b''(z)}(v')=0$ by definition of $\mathfrak{s}(v')$.
Thus, we have constructed an algebra homomorphism $\rho:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$.
Any reflexion has order two,
so there is also a homomorphism in the opposite direction. Their
composition preserves weight spaces, so it is invertible.
This proves that the collection of algebras
$\mathfrak{g}^\mathtt{U}=\{\widetilde\mathfrak{g}(v)/\mathfrak{s}(v)\}$ is the initial object
in the category of root algebras based on $\mathcal{R}_0$.
\begin{dfn}
\label{dfn:universalroot}
The root algebra $\mathfrak{g}^\mathtt{U}=\{\widetilde\mathfrak{g}(v)/\mathfrak{s}(v)\}$ defined as above
is called {\sl the universal root Lie superalgebra} defined
by the component $\mathcal{R}_0$~\footnote{It was J.~Bernstein who once
pointed out that factoring out
by the maximal ideal having no intersection with the Cartan may be
unjustified. The present work is to a large extent outcome of his
remark.} .
\end{dfn}
The superinvolution $\theta$ of $\widetilde\mathfrak{g}$ defined in \mathit{re}f{sss:automorphism} induces an automorphism of the universal root algebra.
\subsubsection{Serre relations}
The classical Serre relations
$$
(\mathrm{ad} e_x)^{-a_{xy}+1}(e_y)=0,\
(\mathrm{ad} f_x)^{-a_{xy}+1}(f_y)=0,
$$
for $x,y\in X$ such that $a_{xx}\ne 0$ are among the most obvious
relations defining the universal Lie superalgebra. They correspond to
the summand $\widetilde\mathfrak{g}_{\pm(b'(x)-b'(y))}$ of $\mathfrak{s}(v)$ defined by the
reflexion $r_x:v'\to v$. The ideal $\mathfrak{s}(v)$, however, is usually not
generated by the classical Serre relations.
\subsubsection{}
\label{sss:invariantideals}
Let $\mathfrak{g}^\mathtt{U}=\{\mathfrak{g}^\mathtt{U}(v)\}$ denote the universal root algebra
and let $\mathfrak{g}=\{\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/I(v)\}$ be a root algebra.
Any automorphism $\eta\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ lifts to an automorphism of
$\mathfrak{g}^\mathtt{U}(v)$ preserving $I(v)$.
The converse of this fact also holds; one has the following easy result.
\begin{Lem}
Let $\mathfrak{g}^\mathtt{U}$ be the universal root algebra at a component $\mathcal{R}_0$,
$v\in\mathcal{R}_0$. Any $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$-invariant ideal
$J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ such that $J(v)\cap\mathfrak{h}=0$ defines a canonical root algebra $\mathfrak{g}$ whose
$v$-component is $\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/J(v)$.
\end{Lem}
\begin{proof}
For any $v'\in\mathcal{R}_0$
choose an isomorphism $\tilde\gamma:\mathfrak{g}^\mathtt{U}(v)\to\mathfrak{g}^\mathtt{U}(v')$ and set
$J(v')=\tilde\gamma(J(v))$. By invariance of $J(v)$ the ideal $J(v')$ is
independent of the choice of $\tilde\gamma$.
\end{proof}
\begin{rem}
The lemma above implies that a root Lie superalgebra is canonically
determined by any its component $\mathfrak{g}(v)=\widetilde\mathfrak{g}(v)/I(v)$.
An ideal $I(v)\subset\widetilde\mathfrak{g}(v)$ defines a root superalgebra iff it
contains $\mathfrak{s}(v)$ and its image in $\mathfrak{g}^\mathtt{U}(v)$ is
$\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$-invariant.
\end{rem}
\subsection{A side remark: groupoid extensions}
\label{ss:side}
The groupoid extension $\pi:\mathcal{G}_0\to\mathcal{R}_0$ has fibers isomorphic to
classifying spaces of a torus. This very special type of extension
admits a description in terms of gerbes.
For $v\in\mathcal{R}_0$ and $\gamma:v\to v$ in $\mathcal{R}_0$ choose a lifting
$\tilde\gamma:v\to v$ in $\mathcal{G}_0$. This defines an automorphism
of the fiber $(\mathcal{G}_0)_v$ given by the formula $\alpha\mapsto
\tilde\gamma\circ\alpha\circ\tilde\gamma^{-1}$. The result is independent
of the choice of $\tilde\gamma$ as tori are abelian groups.
The above described action can be encoded into a groupoid extension
$p:\mathcal{T}\to\mathcal{R}_0$ that is a group over $\mathcal{R}_0$: one has a multiplication
$$m:\mathcal{T}\times_{\mathcal{R}_0}\mathcal{T}\to\mathcal{T}$$
corresponding to the fiberwise multiplication. Finally,
$\pi:\mathcal{G}_0\to\mathcal{R}_0$ is a $\mathcal{T}$-torsor: there is an action
$$\mathcal{T}\times_{\mathcal{R}_0}\mathcal{G}_0\to\mathcal{G}_0.$$
In more classical terms, we are talking about presenting an abelian
group extension as a torsor over a split abelian group extension
that is a semidirect product of the base and the fiber.
The group $p:\mathcal{T}\to\mathcal{R}_0$ is easy to describe. The groupoid $\mathcal{R}_0$
comes with the functor $\mathfrak{h}:\mathcal{R}_0\to\mathtt{Vect}$.
We define a functor $T:\mathcal{R}_0\to\mathtt{Gp}$ into the category of groups
assigning to $v$ the factor group $T(v)=\mathfrak{h}(v)/K(v)$ where
$$K(v)=\{h\in\mathfrak{h}|b(x)(h)\in 2\pi i\mathbb{Z}\textrm{ for all }x\in X\}.$$
The functor $T$ gives rise to a groupoid extension $p:\mathcal{T}\to\mathcal{R}_0$
with $\mathrm{op}eratorname{Ob}(\mathcal{T})=\mathrm{op}eratorname{Ob}(\mathcal{R}_0)$ and $\mathrm{Hom}_\mathcal{T}(v',v)=\mathrm{Hom}_{\mathcal{R}_0}(v',v)\times
T(v)$.
The action $\mathcal{T}\times_{\mathcal{R}_0}\mathcal{G}_0\to\mathcal{G}_0$ is defined
as follows. Let $\mathfrak{g}=\{\mathfrak{g}(v)\}$ be a root algebra based on $\mathcal{R}_0$.
To $(\alpha,\tau)\in\mathrm{Hom}_{\mathcal{R}}(v',v)\times T(v)$ and
$\tilde\alpha:\mathfrak{g}(v')\to\mathfrak{g}(v)$, we assign
$\tau\circ\tilde\alpha$ where $\tau:\mathfrak{g}(v)\to\mathfrak{g}(v)$ is given by rescaling.
Note that the torsor $\mathcal{G}_0$ is nontrivial as, for instance,
for $\mathfrak{g}=\mathfrak{s}l_2$ the groupoid extension $\pi:\mathcal{G}_0\to\mathcal{R}_0$
is the projection $N(T)\to W$ of the normalizer of the torus to the Weyl group that is not split.
\section{Weyl group}
Throughout this section we assume that $\mathcal{R}_0$ is an admissible
component of $\mathcal{R}$.
\subsection{Real roots}
For $v\in\mathcal{R}_0$ we denote
$$Q(v)=\mathtt{Sp}an_\mathbb{Z}\{b(x)\}_{x\in X}\subset \mathfrak{h}^*(v),$$
The parity function $p:X\to\mathbb{Z}_2$ extends to a group homomorphism
$p:Q(v)\to\mathbb{Z}_2$ that we denote by the same letter $p$.
\begin{lem}
\begin{itemize}
\item[1.]
For any $\gamma:v\to v'$ the isomorphisms $\mathfrak{h}(v)\to\mathfrak{h}(v')$ and
$\mathfrak{h}^*(v)\to\mathfrak{h}^*(v')$ induce isomorphisms
$\mathtt{Sp}an_\mathbb{C}\{a(x)\}_{x\in X}\to\mathtt{Sp}an_\mathbb{C}\{a'(x)\}_{x\in X}$
and $Q(v)\to Q(v')$.
\item[2.] The isomorphisms $Q(v)\to Q(v')$ are compatible with the parity
$p$.
\end{itemize}
\end{lem}
\begin{proof}
The claim directly follows from the formulas for reflexions.
\end{proof}
\begin{dfn}
An element $\alpha\in Q(v)$ is called a real root if there exists
$\gamma:v'\to v$ and $x\in X$ so that $\gamma(b'(x))=\alpha$.
\end{dfn}
\subsubsection{}
\label{sss:realinall}
The collection of real roots in $\mathfrak{h}(v)$ is denoted by
$\mathit{Del}ta^\mathit{re}(v)$. By~\mathit{re}f{corgalpha}, for any root algebra $\mathfrak{g}$,
$\mathit{Del}ta^\mathit{re}(v)\subset\mathit{Del}ta(v)$ and all real root spaces of $\mathfrak{g}$ are one-dimensional. Real roots coming as described above from
$\gamma:v\to v'$ form a subset $\Sigma_{\gamma}(v)$.
We write $\Sigma(v)=\Sigma_\mathrm{id}(v)$ for the set
of simple roots at $v$.
Clearly
\begin{equation}
\label{eq:rroots-bigunion}
\mathit{Del}ta^\mathit{re}(v)=\bigcup_{\gamma:v\to v'}\Sigma_\gamma(v),
\end{equation}
but the union is not disjoint. Any $\alpha:v\to v'$
sends bijectively $\mathit{Del}ta^\mathit{re}(v)$ to $\mathit{Del}ta^\mathit{re}(v')$ and
$\Sigma_{\gamma\circ\alpha}(v)$ to $\Sigma_\gamma(v')$.
\subsection{Isotropic, anisotropic and nonreflectable real roots}
\begin{dfn}
\begin{itemize}
\item[1.]
A simple root $b(x)\in\mathfrak{h}^*(v)$ is called isotropic if $x$ is reflectable at
$v$ and $\langle a(x),b(x)\rangle=0$. {\sl One has always $p(x)=1$ for an
isotropic root $b(x)$.}
\item[2.]A simple root $b(x)\in\mathfrak{h}^*(v)$ is called anisotropic
if $x$ is reflectable at $v$ and $\langle a(x),b(x)\rangle\ne 0$.
\item[3.]For an anisotropic simple root $\alpha=b(x)$ we define
$\alpha^\vee=\mathfrak{r}ac{2a(x)}{a_{xx}}\in\mathfrak{h}(v)$.
\end{itemize}
\end{dfn}
We are going to extend these definitions to real roots.
Since a real root at $v$ is defined by a path $\gamma:v\to v'$
and a simple root at $v'$, the extension is possible if two simple roots
at $v'$ and $v''$ defining the same real root, are of the same type.
\begin{prp}
\label{prp:rroots-class}
Let $\alpha\in\Sigma_{\gamma_1}(v)\cap\Sigma_{\gamma_2}(v)$ so that
$\alpha=\gamma_1^*(b_1(x_1))=\gamma_2^*(b_2(x_2))$
for $\gamma_i:v\to v_i$. Then one of the
following options holds.
\begin{itemize}
\item[1.] Both $b_i(x_i)\in\mathfrak{h}^*(v_i)$ are isotropic roots.
\item[2.] Both $b_i(x_i)\in\mathfrak{h}^*(v_i)$ are anisotropic roots
and $(\gamma_2\circ\gamma_1^{-1})^*(b_1(x_1)^\vee)=b_2(x_2)^\vee$.
\item[3.] $x_1$ is nonreflectable at $v_1$ and $x_2$ is nonreflectable
ay $v_2$.
\end{itemize}
\end{prp}
\begin{proof}
We can assume, without loss of generality, that $\gamma_1=\mathrm{id}_v$
and $\gamma_2=\gamma:v\to v'$. Then $\alpha=b(x)=\gamma^*(b'(y))$.
Let $\mathfrak{g}$ be a root algebra and let $\alpha=b(x)$ for $v\in\mathcal{R}_0$ so that $x$ is $v$-reflectable. Then $\mathfrak{g}\langle\alpha\rangle$ is not the Heisenberg algebra and $e_x$ acts locally nilpotently on $\mathfrak{g}$. If, for
$\gamma:v\to v'$, $\alpha=\gamma^*(b'(y))$, $e'_y$ acts locally
nilpotently on $\mathfrak{g}(v')$, and, since $\mathfrak{g}\langle\alpha\rangle$ is not the Heisenberg algebra, this implies that $y$ is reflectable at $v'$. Let now $x$ be reflectable at $v$ and $y$ reflectable at $v'$.
Then Corollary~\mathit{re}f{corgalpha} describes possible options for $\mathfrak{g}\langle\alpha\rangle$.
This implies the claim.
\end{proof}
\subsubsection{}
Proposition~\mathit{re}f{prp:rroots-class} allows one to extend the classification of simple roots to all real roots.
One has a decomposition
\begin{equation}
\label{eq:reunion}
\mathit{Del}ta^\mathit{re}(v)=\mathit{Del}ta_\mathit{iso}(v)\sqcup\mathit{Del}ta_\mathit{an}(v)
\sqcup\mathit{Del}ta_\mathit{nr}(v),
\end{equation}
where
\begin{itemize}
\item[] $\mathit{Del}ta_\mathit{iso}(v)$ is the set of isotropic real roots that
are reflectable simple roots at some $v\in\mathcal{R}_0$.
\item[] $\mathit{Del}ta_\mathit{an}(v)$ is the set of anisotropic real roots that
are reflectable simple roots at some $v\in\mathcal{R}_0$. Any anisotropic
real root $\alpha\in\mathit{Del}ta_\mathit{an}(v)$ defines a coroot
$\alpha^\vee\in\mathfrak{h}(v)$.
\item[] $\mathit{Del}ta_\mathit{nr}(v)$ is the set of non-reflectable real roots,
those that for any $v\in\mathcal{R}_0$ and $x\in X$ such that $\alpha=b(x)$,
$x$ is non-reflectable at $v$.
\end{itemize}
{\begin{Rem}In our definition isotropic roots are necessarily real.
In another tradition, a root of a symmetrizable Lie superalgebra is
called isotropic if it has length zero. For the real roots both notions
of isotropicity coincide.
\end{Rem}}
For $\alpha\in\mathit{Del}ta_\mathit{an}(v)$ the pair $(\alpha,\alpha^\vee)$ defines
a reflection $s_\alpha$
acting both on $\mathfrak{h}(v)$ and on $\mathfrak{h}^*(v)$ by the usual formulas
\begin{equation}
\label{eq:salpha}
s_\alpha(\beta)=\beta-\langle \beta,\alpha^\vee\rangle\alpha,\
s_\alpha(h)=h-\langle \alpha,h\rangle\alpha^\vee.
\end{equation}
\begin{crl}
\label{crl:deltare-w}
\begin{itemize}
\item[1.] The set of real roots $\mathit{Del}ta^\mathit{re}(v)\subset\mathfrak{h}^*(v)$ is
$\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$-invariant.
\item[2.] For $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ and
$\alpha\in\mathit{Del}ta_\mathit{an}(v)$ one has
\begin{equation}
s_{\gamma(\alpha)}=\gamma s_\alpha \gamma^{-1}.
\end{equation}
\end{itemize}
\end{crl}
\begin{proof}
The first claim is a direct consequence of formula (\mathit{re}f{eq:rroots-bigunion})
and \mathit{re}f{prp:rroots-class}. The second claim directly follows from the formulas for $s_\alpha$.
\end{proof}
\subsubsection{Skeleton}
\label{sss:skeleton}
We define $\mathtt{Sk}\subset\mathcal{R}$ as the subgroupoid having the same objects
as $\mathcal{R}$; an arrow $\gamma:v\to v'$ is in $\mathtt{Sk}$ if it can be presented
as a composition of reflexions. This is {\sl the skeleton groupoid}.
We denote by $\mathtt{Sk}(v)$ the connected component of the skeleton containing
$v$. Note that, by definition, any arrow in $\mathtt{Sk}(v)$ induces the identity
map of $\mathfrak{h}(v)$, so any two arrows with the same ends coincide.
Therefore, $\mathtt{Sk}(v)$ is a contractible groupoid. Note that any arrow
$\gamma:v\to v'$ in $\mathcal{R}$ can be decomposed $\gamma=\gamma''\circ\gamma'$
where $\gamma'$ is in $\mathtt{Sk}$ and $\gamma''$ is a composition of a
homothety and a tautological arrow.
\begin{rem}
\label{rem:uniqueness}
As we prove later in~\mathit{re}f{crl:unique-in-sk}, this decomposition is
unique.
\end{rem}
\subsubsection{}
If $\beta:v\to v'$ is a homothety or a tautological arrow,
$\beta(\Sigma(v'))=\Sigma(v)$. Therefore, for
$\gamma=\gamma''\circ\gamma'$ as above,
$\Sigma_\gamma(v)=\Sigma_{\gamma''}(v)$.
Since $\mathtt{Sk}(v)$ is contractible, it makes sense to denote
$\Sigma_{v'}(v)=\Sigma_\gamma(v)$ for $\gamma:v\to v'$ in $\mathtt{Sk}(v)$.
Thus, we have
\begin{equation}
\mathit{Del}ta^\mathit{re}(v)=\bigcup_{v'\in\mathtt{Sk}(v)}\Sigma_{v'}(v)
\end{equation}
(the union still does not have to be disjoint).
\subsubsection{Spine}
\label{sss:spine}
We denote by $\mathtt{Sp}$ the subgroupoid of $\mathtt{Sk}$ spanned by the isotropic reflections only. The component of $\mathtt{Sp}$ containing $v$ is denoted by
$\mathtt{Sp}(v)$. It is obviously contractible. Cartan data of $\mathtt{Sp}(v)$ describe all
possible Cartan data for the component $\mathcal{R}_0$ of $\mathcal{R}$ containing $v$,
up to $D$-equivalence.
\subsection{Weyl group and its actions}
\label{ss:weyl}
In this subsection we define the Weyl group assigned to a component
$\mathcal{R}_0$. By definition, the Weyl group identifies with a subgroup of
$\mathrm{GL}(\mathfrak{h}(v))$, for every $v$. Any arrow $\gamma:v\to v'$
defines an isomorphism of the Weyl groups at $v$ and at $v'$.
We also define an action of $W(v)$ on $\mathtt{Sk}(v)$.~\footnote{The objects of $\mathtt{Sk}(v)$ classify the (attainable) Borel
subalgebras containing a given Cartan subalgebra $\mathfrak{h}(v)$.}
\begin{dfn}
\label{dfn:weylgroup}
The Weyl group $W=W(v)$ (at $v\in\mathcal{R}$) is the group of automorphisms
of $\mathfrak{h}(v)$ generated by the reflections with respect to anisotropic
real roots.
\end{dfn}
\subsubsection{Embedding $i:W(v)\to\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$}
\label{sss:weyltoaut}
The representation of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ in $\mathfrak{h}=\mathfrak{h}(v)$ is faithful
by definition of $\mathcal{R}$. Let us show that $W(v)$ is a subgroup of
the image of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ in $\mathrm{GL}(\mathfrak{h}(v))$. Let $\alpha=b'(x)$ be an anisotropic root. Without loss of generality we can assume that there is an arrow $\gamma:v\to v'$ in $\mathtt{Sk}(v)$. Then the composition
$$\gamma^{-1}\circ t_{s_\alpha}\circ r_x\circ\gamma:v\to v$$
induces the reflection $s_\alpha$ on $\mathfrak{h}$. This proves that
generators of $W(v)$ are in the image of the embedding $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)\to\mathrm{GL}(\mathfrak{h}(v))$, so that the Weyl group identifies with a subgroup of
$\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
It is clear that any arrow $\gamma:v\to v'$ intertwines the
canonoical embeddings $W(v)\to\mathrm{op}eratorname{Aut}(v)$ and $W(v')\to\mathrm{op}eratorname{Aut}(v')$.
Note that $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ acts on $W(v)$ so that the embedding
$i$ commutes with this action. This means that $W(v)$ is a normal subgroup of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
\begin{lem}
\label{lem:step1}
Let $r_x:v\to v'=(\mathfrak{h},a',b',p)$ be an anisotropic reflexion,
$\alpha=b(x)\in\mathfrak{h}^*$.
Then $s_\alpha(a(y))=a'(y)$ and $s_\alpha(b(y))=b'(y)$
for all $y\in X$.
\end{lem}
\begin{proof}
Immediate from the formulas~\mathit{re}f{sss:reflexionformulas}
and~(\mathit{re}f{eq:salpha}).
\end{proof}
\begin{lem}
\label{lem:step2}
Let $r_x: v\to v'=(\mathfrak{h},a',b',p')$ and
$r_x:w=(\mathfrak{h},a_w,b_w,p_w)\to w'=(\mathfrak{h},a'_w,b'_w,p'_w)$ be reflexions. Let
$\alpha\in\mathit{Del}ta^\mathit{re}$ satisfy the conditions
\begin{equation}
\label{eq:v-to-w}
s_\alpha(a(y))=a_w(y),\ s_\alpha(b(y))=b_w(y),\ p(y)=p_w(y),\ y\in X.
\end{equation}
Then
\begin{equation}
\label{eq:v'-to-w'}
s_\alpha(a'(y))=a'_w(y),\ s_\alpha(b'(y))=b'_w(y),\ p'(y)=p'_w(y),\
y\in X.
\end{equation}
\end{lem}
\begin{proof}
The automorphism $s_\alpha$ carries the basis $\{b(y)\}$ of $Q(v)$
to the basis $\{b_w(y)\}$ of $Q(w)$. The Cartan matrices at $v$ and $w$
coincide and the formulas defining $r_x$ are the same.
\end{proof}
\begin{rem}
Note that if (\mathit{re}f{eq:v-to-w}) holds then
$x$ is reflectable
at $v$ if and only if it is reflectable at $w$. This is so as the
Cartan matrices of $v$ and of $w$ coincide.
\end{rem}
\begin{prp}
\label{prp:WSigma}
Let $w\in W(v)$, $v'=(\mathfrak{h},a',b',p')\in\mathtt{Sk}(v)$. Then there exists a unique
$v''=(\mathfrak{h},a'',b'',p')\in\mathtt{Sk}(v)$ such that
\begin{equation}
\label{eq:v'-to-w'}
w(a'(y))=a''(y),\ w(b'(y))=b''(y),\ y\in X.
\end{equation}
\end{prp}
The proposition defines an action of the Weyl group $W$ on
$\mathtt{Sk}(v)$.
\begin{proof}
The uniqueness claim is obvious.
For the existence, it is sufficient to verify the claim for $w=s_\alpha$. We can assume that $\alpha=b(x)$ is a simple root at $v$
and let $r_x:v\to u$ be the reflexion. If $v'=v$ then $v''=u$ satisfies
the requirements by
Lemma~\mathit{re}f{lem:step1}. Otherwise, choose
an isomorphism $\phi:v\to v'$, present it as a composition
$\phi=\phi_n\circ\ldots\circ\phi_1$, where each $\phi_i$ is a reflexion. We define an arrow
$\psi:u\to v''$ as the composition $\psi=\psi_n\circ\ldots\circ\psi_1$ where $\psi_i=r_y$ if $\phi_i=r_y$~\footnote{Note that
$\psi_i$ and $\phi_i$ are {\sl namesakes}: they have the same name
but are applied to different objects of the groupoid.}.
Note that the composition
$\psi$ necessarily makes sense. Now a consecutive application
of Lemma~\mathit{re}f{lem:step2} yields the result.
\end{proof}
\begin{rem}
\label{rem:explicit}
The proof provides us with an explicit formula: Let $\alpha=b_v(x)$. Then $v''=s_\alpha(v')$
is the target of the composition $\psi\circ r_x\circ\phi^{-1}:v'\to v''$, see the picture below.
\end{rem}
\begin{equation}
\label{eq:pic-salpha}
\xymatrix{
&\overset{v}{\bullet}\ar_{r_x}^{\alpha=b_v(x)}[dd]\ar^{\phi_1=r_{y_1}}[rr]&&\bullet
&\dots&\bullet\ar^{\phi_n=r_{y_n}}[rr]&&\overset{v'}{\bullet}\ar@/^1pc/
@[red]^{\color{red}{u'=s_\alpha(v')}}[dd] \\
&&&&&&\\
&\overset{u}{\bullet}\ar^{\psi_1=r_{y_1}}@{-->}[rr]&&\bullet
&\dots&\bullet\ar^{\phi_n=r_{y_n}}@{-->}[rr]&&\overset{v''}{\bullet}
}
\end{equation}
The embedding $i:W(v)\to\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ can be easily expressed in terms
of the action of $W$ on $\mathtt{Sk}(v)$.
\begin{crl}
\label{crl:w}
For any $w\in W(v)$ let $\gamma_w:v\to w(v)$ be the arrow in $\mathtt{Sk}(v)$.
Then
$$
i(w)=t_w\circ\gamma_w.
$$
\end{crl}
\begin{proof}
The composition $t_w\circ\gamma_w$ is an endomorphism of $v$.
The automorphism $i(w)$ is uniquely defined by its action on $\mathfrak{h}$.
The composition $t_w\circ\gamma_w$ provides the same action.
\end{proof}
We will show later (see~\mathit{re}f{crl:Wfree}) that the action of the
Weyl group $W(v)$ on $\mathtt{Sk}(v)$ is free. It is not transitive in general.
Here is what we can say about the orbits of the action.
\begin{lem}
\label{lem:decomposition0}
For every $v,\ v'\in\mathtt{Sk}(v)$ there exists $w\in W(v)$ and a sequence of
isotropic reflexions
$$
v\mathit{st}ackrel{r_{x_1}}{\to}\ldots\mathit{st}ackrel{r_{x_k}}{\to}v''
$$
such that $v'=w(v'')$. In other words, there exists $w\in W(v)$ and
$v''\in\mathtt{Sp}(v)$ so that $v'=w(v'')$.
\end{lem}
\begin{proof}
Choose a presentation of $\phi:v\to v'$ as a composition
$\phi=\phi_n\circ\ldots\circ\phi_1$ of reflexions.
If $i$ is the first index for which $\phi_i$ is an anisotropic
reflexion, we can, as in the proof of Proposition~\mathit{re}f{prp:WSigma},
erase it, replacing reflexions $\phi_j$, $j>i$ with their namesakes
$\psi_j$, so that the target of the composition
$$
\psi_n\circ\ldots\circ\psi_{i+1}\circ\phi_{i-1}\circ\ldots\circ\psi_1:
v\to v''
$$
satisfies the property $s_\alpha(v'')=v'$, for an anisotropic root
$\alpha$ defined by $\phi_i$. Continuing parsing the
decomposition of $\phi$ in this way,
we end up with the required decomposition.
\end{proof}
\subsubsection{Principal reflections}
In the case $p(x)=0$ for all $x$ and for all $v\in\mathcal{R}_0$, the Weyl group
$W$ is known to be generated by simple reflections $s_{b(x)}, x\in X$
for a fixed vertex $v\in\mathcal{R}_0$. This is not true in general, as, for instance, there may exist $v\in\mathcal{R}_0$ for which all $a_{xx}=0$.
Here is what can be said in general.
\begin{dfn}
Fix $v\in\mathcal{R}_0$. A root $\alpha\in\mathit{Del}ta_\mathit{an}(v)$,
is called $v$-principal if there exists $v'\in\mathtt{Sp}(v)$
and an element $x\in X$ such that $\alpha=b'(x)$.
A reflection $s_\alpha$ with respect to a $v$-principal root is
called a $v$-principal reflection.
\end{dfn}
One has
\begin{prp}\label{prp:generators}
The Weyl group $W(v)$ is generated by $v$-principal reflection.
\end{prp}
\begin{proof}
Let $\alpha\in\Sigma_\gamma(v)$ be anisotropic where $\gamma:v\to v'=(\mathfrak{h},a',b',p')$
is a composition of reflexions and $\alpha=b'(x)$. We will prove the
claim by induction on length of the presentation of $\gamma$ as a composition of reflexions.
If the sequence consists of isotropic reflexions only, $\alpha$ is
principal and there is nothing to prove. Otherwise there is an
anisotropic reflexion in the sequence. We denote below by $\phi'$ a
composition of isotropic reflexions and by $r_y$ the first anisotropic
reflexion.
$$
v\mathit{st}ackrel{\phi'}{\to}v_1\mathit{st}ackrel{r_y}{\to} v_2\mathit{st}ackrel{\phi}{\to}v'.
$$
Let $v_1=(\mathfrak{h},a_1,b_1,p_1)$ and $\beta=b_1(y)$. By Proposition~\mathit{re}f{prp:WSigma}, $s_\beta$ carries $v'$ to
a vertex $v''$ obtained as the target of a composition of reflexions
$\psi:v_1\to v''$ having the same indices as the components of
$\phi:v_2\to v'$. We denote $v''=(\mathfrak{h},a'',b'',p'')$ and we get
$b'(x)=s_\beta(b''(x))$. Therefore, $s_\alpha=s_{b'(x)}=s_{s_\beta(b''(x))}=s_\beta s_{b''(x)}s_\beta$, the last equality
by~\mathit{re}f{crl:deltare-w}. Now $s_\beta$ is principal and $v''$
has a shorter sequence of reflexions connecting it to $v$.
\end{proof}
\begin{rem}
\label{rem:aniso-w}
The proof of \mathit{re}f{prp:generators} implies that any root
$\alpha\in\mathit{Del}ta_\mathit{an}(v)$ is $W$-conjugate to a principal root.
\end{rem}
\subsection{Modules over a root algebra}
\begin{dfn}
Let $\mathfrak{g}:=\mathfrak{g}(v)$ be a root Lie superalgebra supported at $\mathcal{R}_0$.
A weight $\mathfrak{g}$-module $M$ is, by definition, an $\mathfrak{g}(v)$-module $M$ whose
restriction
$\mathfrak{h}$ is semisimple.
\end{dfn}
For a weight $\mathfrak{g}$-module $M$ we denote by $\Omega(M)$ the set of weights of $M$.
We will now define integrable $\mathfrak{g}$-modules.
\begin{dfn}
Let $\mathfrak{g}=\mathfrak{g}(v)$ be a root Lie superalgebra.
We say that a weight $\mathfrak{g}$-module $M$ is {\sl integrable}
if $\mathfrak{g}_\alpha$ acts locally nilpotently on $M$
for each anisotropic $\alpha\in\mathit{Del}ta^\mathit{re}$.
\end{dfn}
Note that the adjoint representation of any root Lie superalgebra
is integrable.
Let $\mathfrak{g}$ be a root Lie superalgebra and let $M$ be an integrable
$\mathfrak{g}$-module. Corollary~\mathit{re}f{corgalpha} implies that $\Omega(M)$
is $W$-invariant. Moreover, the multiplicities of the weights $\mu$ and $w(\mu)$
coincide.
The adjoint representation of any root Lie superalgebra $\mathfrak{g}$ is integrable. In particular, the set of roots
$\mathit{Del}ta(\mathfrak{g})$ of any root algebra is $W$-invariant.
\section{Coxeter structures}
\label{sec:coxeter}
\subsection{Introduction}
A Coxeter structure on a group $G$ is a set of elements $s_i\in G$
such that $(G,\{s_i\})$ is a Coxeter group.
A Coxeter structure on a group provides its combinatorial description.
In this section we prove that the Weyl group of any admissible component
$\mathcal{R}_0$ has a Coxeter structure. A somewhat similar combinatorial description can be given to the components of the root groupoid.
\subsubsection{}
\label{sss:notation-coxeter}
Fix an indecomposable admissible component $\mathcal{R}_0$ and $v\in\mathcal{R}_0$.
In what follows we use the notation of \mathit{re}f{sss:skeleton}, suppressing
the parameter $v$ from the notation. Thus, we will
write $\mathfrak{h}$ for $\mathfrak{h}(v)$,
$\Sigma$ for $\Sigma(v)$, and, for $v'\in\mathtt{Sk}(v)$, $\Sigma_{v'}$ for $\Sigma_{v'}(v)$.
Recall that $\Sigma=\{b(x)\}_{x\in X}$ and $Q=\mathtt{Sp}an_\mathbb{Z}(\Sigma_{v'})$
is independent of $v'$.
We set
$$ Q^+_{v'}:=\mathbb{Z}_{\geq 0}\Sigma_{v'}\subset Q, \ \
\ Q^+:=Q^+_v.$$
\subsection{Coxeter structure of the Weyl group}
Fix a vertex $v\in\mathcal{R}_0$.
Let $\alpha_1,\dots,\alpha_m$ be the set of $v$-principal roots
and $s_i$ be the reflection $s_{\alpha_i}$.
The Weyl group $W$ is generated by $s_i$. We say that $w=s_{i_1}\ldots s_{i_l}$ is a reduced decomposition if it has a minimal length. In this case we say
that $\ell(w)=l$ is the length of $w$.
Let $$C:=\bigcap_{v'\in\mathtt{Sp}(v)}Q^+_{v'}.$$
\begin{lem} Let $\alpha$ be an anisotropic real root.
\begin{enumerate}
\item There is $w\in W$ such that $w(\alpha)$ is $v$-principal.
\item If $\alpha\in Q^+_{v'}$ for some $v'\in\mathtt{Sp}(v)$ then $\alpha\in C$.
\item Either $\alpha\in C$ or $\alpha\in -C$.
\end{enumerate}
\end{lem}
\begin{proof} For (1) see~\mathit{re}f{rem:aniso-w}.
To prove (2) we notice that
$(Q^+_{v'}\setminus Q^+_v)\cap\mathit{Del}ta^{re}$ consists of isotropic roots.
Now let us show (3). By (1) and (2) it suffices to check that if $\alpha\in C$ and $s_i$ is a principal reflection then $s_i(\alpha)\in C$ or
$s_i(\alpha)\in -C$. Indeed, let $v'$ be a vertex such that $\alpha_i\in \Sigma_{v'}$. Then $s_i(\alpha)\in Q^+_{v'}$ unless
$\alpha=-\alpha_i$.
In the latter case $\alpha\in -C$.
\end{proof}
Lemma above claim (2) means that
$$Q^+_{v'}\cap\mathit{Del}ta_\mathit{an}=C\cap\mathit{Del}ta_\mathit{an}.$$
This is the set of positive anisotropic roots (with respect to
any $v'\in\mathtt{Sp}(v)$).
\begin{lem}\label{lem:reduced} Let $w=s_{i_1}\dots s_{i_t}$ and let
$\alpha_i$ be a principal root such that $w(\alpha_i)\in -C$. Then there exists $j$
such that $ws_i=s_{i_1}\dots \hat{s}_{i_j}\dots s_{i_t}$.
\end{lem}
\begin{proof} Define $\beta_k:=s_{i_{k+1}}\dots s_{i_t}(\alpha_i)$
for $k=0,\ldots,t-1$ and $\beta_t:=\alpha_i$. Since $\beta_t\in C$ and $\beta_0\in-C$ there is $j$ such that
$\beta_j\in C$ and $\beta_{j-1}\in -C$.
Hence $\beta_{j}=\alpha_{i_j}$. We get $\alpha_{i_j}=u(\alpha_i)$ for $u:=s_{i_{j+1}}\dots s_{i_t}$. Using the formula $us_{\alpha}u^{-1}=s_{u\alpha}$, see~\mathit{re}f{crl:deltare-w},
we obtain
$$ws_i=s_{i_1}\dots s_{i_{j-1}}(us_iu^{-1})us_i=s_{i_1}\dots \hat{s}_{i_j}\dots s_{i_t}.$$
\end{proof}
\begin{crl}\label{crl: exchange} If $w=s_{i_1}\dots s_{i_l}$ is a reduced decomposition and $\alpha_i$ is a principal root then
\begin{enumerate}
\item $\ell(ws_i)<\ell(w)=l$ if and only if $w(\alpha_i)\in -C$.
\item $w(\alpha_{i_l})\in -C$.
\item If $\ell(ws_i)<\ell(w)$ then for some $j$
$$s_{i_j}\dots s_{i_l}=s_{i_{j+1}}\dots s_{i_l}s_i.$$
\end{enumerate}
\end{crl}
\begin{proof} See \cite{Kbook}, Lemma 3.11.
\end{proof}
\begin{crl} $W$ is a Coxeter group generated by $s_1,\ldots,s_m$.
In the Coxeter relations $(s_is_j)^m=1$ the possible values of
$m$ are $2,3,4,6$ or $\infty$.
\end{crl}
\begin{proof} See~\cite{B}, Th\'eor\`eme 6.1, \S 1, Ch.~4.
If $\alpha$ and $\beta$ are principal roots so that
$s_1=s_\alpha$ and $s_2=s_\beta$, it is easy to see that the
union $W'(\alpha)\cup W'(\beta)$, where $W'$ is the subgroup
of $W$ generated by $s_1$ and $s_2$, is a classical root system of rank $2$. This implies that $m=2,3,4,6$ or $\infty$.
\end{proof}
\begin{crl} If $w(\alpha_i)\in C$ for all $i$ then $w=1$.
\end{crl}
\begin{proof} Follows from~\mathit{re}f{crl: exchange} (2).
\end{proof}
\begin{crl}
\label{crl:Wfree}
Let $v'=w(v)\in\mathtt{Sk}(v)$. If $Q^+_v=Q^+_{v'}$ then $w=1$. In particular, the action of $W$ on $\mathtt{Sk}(v)$ is faithful.
\end{crl}
\begin{proof}
If $Q^+_v=Q^+_{v'}$ then $w(Q^+_v)=Q^+_{v}$ and hence $w(\alpha)\in C$ for all anisotropic $\alpha\in Q^+_v$.
\end{proof}
\subsubsection{}
We denote by $\mathit{Del}ta^+_\mathit{re}(v)$ the set of real roots positive at $v$.
We set $\mathit{Del}ta^+_\mathit{an}(v)=\mathit{Del}ta_\mathit{an}\cap\mathit{Del}ta^+_\mathit{re}(v)$.
\begin{crl}
\label{crl:W-len}
Let $v'=w(v)\in\mathtt{Sk}(v)$. Then $\ell(w)$ is the cardinality
of the set $\mathit{Del}ta^+_\mathit{an}(v)-\mathit{Del}ta^+_\mathit{an}(v')$.
\end{crl}
\begin{proof}
Let $w=s_{i_1}\ldots s_{i_l}$ be a reduced decomposition. Set
$\beta_j=s_{i_1}\ldots s_{i_{j-1}}(\alpha_{i_j})$.
Then $v'=s_{\beta_l}\ldots s_{\beta_1}(v)$ and $\mathit{Del}ta^+_\mathit{an}(v)-\mathit{Del}ta^+_\mathit{an}(v')=\{\beta_1,\ldots,\beta_l\}$.
\end{proof}
{
\begin{crl}
\label{crl:Spiff}
For $v'\in \mathtt{Sk}(v)$ there exists a unique $v''\in\mathtt{Sp}(v)$ and $w\in W$
such that $v'=w(v'')$. The cardinality
of the set $\mathit{Del}ta^+_\mathit{an}(v)-\mathit{Del}ta^+_\mathit{an}(v')$ is equal to $\ell(w)$.
\end{crl}
\begin{proof}
The existence of $v'',w$ follows from
Lemma~\mathit{re}f{lem:decomposition0}.
An isotropric reflection does not change the set $\mathit{Del}ta^+_{\mathit{an}}$, so
$\mathit{Del}ta^+_{\mathit{an}}(v'')=\mathit{Del}ta^+_{\mathit{an}}(v)$ and the required formula for
$\ell(w)$ follows from~\mathit{re}f{crl:W-len}.
For the uniqueness assume that $v'=w_1(v_1)=w_2(v_2)$
for $v_1,v_2\in \mathtt{Sp}(v)$. Then $w_1^{-1}w_2(v_2)=v_1$
and $\mathit{Del}ta^+_{\mathit{an}}(v_1)=\mathit{Del}ta^+_{\mathit{an}}(v_2)$, so $\ell(w_1^{-1}w_2)=0$.
Thus $w_1=w_2$ and $v_1=v_2$ as required. \end{proof}
}
\subsection{Skeleton as a graph}
\label{skeleton_property}
A structure similar to the Coxeter structure on the Weyl group
exists also on admissible components of the root groupoid. We fix
$v_0\in\mathcal{R}$ and study a combinatorial structure of $\mathtt{Sk}(v_0)$. Note that, from the algebraic point of view, $\mathtt{Sk}(v_0)$ is a contractible groupoid, so it may be seen as something lacking any interest. However,
its arrows are compositions of reflexions, and remembering these
reflexions makes a lot of sense. In this subsection we present
a description of the shortest path length in this graph, similar to
the one given in~\mathit{re}f{crl:W-len}. {It has a nice application
to the description of the group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ in \mathit{re}f{ss:autv}.}
In Section~\mathit{re}f{sec:coxeter2} we
study a Coxeter property of $\mathtt{Sk}(v)$.
\subsubsection{}
We look at the skeleton $\mathtt{Sk}(v_0)$ as the graph where the reflexions
connect the vertices. Thus, the reflexions are the edges of our graph.
We color the edges by elements of $\mathfrak{h}^*=\mathfrak{h}(v_0)^*$:
a reflexion $v\xrightarrow{r_{x}}v'$ is colored by the real root
$\alpha=-b(x)=b'(x)$. Note that $\mathit{Del}ta^+_\mathit{re}(v')$ is obtained
from $\mathit{Del}ta^+_\mathit{re}(v)$ by replacing the (existing) root $-\alpha$
with $\alpha$.
For a path
$$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d\xrightarrow{r_{x_{t}}}v_{t}=v'$$
colored by the sequence $(\alpha_1,\ldots,\alpha_t)$ we have
\begin{equation}\label{Delta+rev'}
\mathit{Del}ta^+_{\mathit{re}}(v')=\bigl(\mathit{Del}ta^+_{\mathit{re}}(v')\cup\{\alpha_i\}_{i=1}^t\bigr)
\setminus \{-\alpha_i\}_{i=1}^t.\end{equation}
In particular, if a path is colored by the sequence $(\alpha_1,\ldots,\alpha_t)$
with $\alpha_t=\alpha_1$, then there exists $i$ such that
$\alpha_i=-\alpha_1$.
We will start with an obvious remark.
\subsubsection{Remark}
\label{axyyx=0}
Let $v\xrightarrow{r_{x}}v'$ be a reflection.
If $a_{xy}=a_{yx}=0$ and $x\ne y$, then
$a'(y)=a(y)$, $b'(y)=b(y)$ and the $y$th rows (and the $y$th columns)
of the Cartan matrices $A_v,A_{v'}$ are equal.
\begin{lem}
\label{lem:independent}
Given a path $v_0\mathit{st}ackrel{r_x}{\to} v_1\mathit{st}ackrel{r_y}{\to} v_2$ colored by $(\alpha,\beta)$, $\alpha\ne-\beta$, the
following conditions are equivalent.
\begin{itemize}
\item[(1)] $\alpha-\beta\not\in\mathit{Del}ta^\mathtt{C}$ (the set of roots of
$\mathfrak{g}^\mathtt{C}$).
\item[(2)] There exists a path $v_0\mathit{st}ackrel{r_y}{\to} v_3\mathit{st}ackrel{r_x}{\to} v_2$ colored by
$(\beta,\alpha)$.
\end{itemize}
\end{lem}
\begin{proof}
If (2) is fulfilled, both $\alpha$ and $\beta$ are simple roots at $v_2$, so their difference is not a root. Let us prove that
(1) implies (2). We have $\alpha, -\beta\in\Sigma_{v_1}$,
$-\alpha\in\Sigma_{v_0}$ and $\beta\in\Sigma_{v_2}$.
We will denote by $A^i=(a^i_{xy})$ the Cartan matrix at $v_i$
and we will write $a_i(z)$ and $b_i(z)$ instead of $a_{v_i}(z)$ and $b_{v_i}(z)$.
If $\beta$ is anisotropic, $\langle\alpha,\beta^\vee\rangle=0$ as
otherwise both $s_{-\beta}(\alpha)=\alpha-\langle\alpha,\beta^\vee\rangle\beta$ and $\alpha$ are roots, which would imply that
$\alpha-\beta$ is also a root. This implies that $a^1_{xy}=0$. If
$\beta$ is isotropic, we still have $a^1_{xy}=0$ as otherwise $r_y$
would carry $\alpha=b_1(x)$ to $\alpha-\beta$ that is not a root.
Thus, by admissibility, $a^1_{yx}=0$. Using Remark~\mathit{re}f{axyyx=0},
we deduce $-\beta\in\Sigma_{v_0}$ and $\alpha\in\Sigma_{v_2}$ so that
$$b_0(x)=-\alpha, b_0(y)=-\beta$$
$$b_1(x)=\alpha, b_1(y)=-\beta\textrm{ and } a_1(y)=a_0(y)$$
$$b_2(x)=\alpha,\ b_2(y)=\beta\textrm{ and } a_2(x)=a_1(x).$$
We will show that $x$ is reflectable at $v_2$, $y$ is reflectable at $v_3$ and $r_y\circ r_x$ carries $v_2$ to $v_0$. This will give the square in $\mathtt{Sp}(v)$ shown in the picture.
$$
\xymatrix{
& & v_1\ar^{r_y}_\beta[rd] & \\
& v_0\ar^{r_x}_\alpha[ru]&& v_2\ar^{r_x}_{-\alpha}[ld]\\
&& v_3\ar^{r_y}_{-\beta}[lu] &
}
$$
Reversing the lower reflexions, we get the required result.
Note that reflectability of $x\in X$ at $v$ is determined by the
$x$-th row of the Cartan matrix at $v$. By~\mathit{re}f{axyyx=0} the $x$-row of $A^2$ is equal to the $x$-row of $A^1$, so $x$ is reflectable at $v_2$.
Since $b_1(x)=b_2(x)$, $a_1(x)=a_2(x)$ and the $x$th row
(resp., $x$th column)
of $A^2$ is equal to the $x$th row (resp., $x$th column) of $A^1$ we have
$$b_0(z)-b_1(z)=b_3(z)-b_2(z),\ \ \ a_0(z)-a_1(z)=a_3(z)-a_2(z).$$
Once more, by ~\mathit{re}f{axyyx=0} applied to $r_x:v_2\to v_3$, the $y$ row of $A^3$ is equal to the $y$-row of $A^2$, so $y$ is reflectable at $v_3$. It remains to show that $r_y$ carries $v_3$ to $v_0$.
Since $b_2(y)=b_3(y)$, $a_2(y)=a_3(y)$ and the $y$th row
(resp., $y$th column)
of $A^3$ is equal to the $y$th row (resp., $y$th column) of $A^2$, we have
$$b_1(z)-b_2(z)=b'_0(z)-b_3(z),\ \ \ a_1(z)-a_2(z)=a'_0(z)-a_3(z).$$
Therefore, $b'_0(z)=b_0(z)$ and $a'_0(z)=a_0(z)$. Hence $v'_0=v_0$.
\end{proof}
\begin{lem}
\label{lem:pre-exchange}
Let
$$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_s}}v_s$$
be a path in $\mathtt{Sp}(v)$ colored by a sequence $(\alpha_1,\ldots,\alpha_s)$
with the property $\alpha_i\not=-\alpha_j$ for $i\not=j$.
Assume that $\alpha=b_{v_0}(x)=b_{v_s}(y)$ is isotropic.
Then $\alpha-\alpha_i\not\in\mathit{Del}ta^{\mathtt{C}}$, $x=y$ and
$b_{v_i}(x)=\alpha$, $a_{v_i}(x)=a_{v_0}(x)$ for all $i$.
\end{lem}
\begin{proof}
Set $\beta:=\alpha-\alpha_1$. Let us show that $\beta$ is not a root.
Assume the contrary. Then
$\beta$ is even and
$\mathfrak{r}ac{\beta}{2}$ is not a root.
Since {the set of even positive roots $\beta$ such that
$\mathfrak{r}ac{\beta}{2}$ is not a root }
is preserved by isotropic reflections,
$\beta=\alpha-\alpha_1\in \mathit{Del}ta^+_{v_s}$. Therefore
$\alpha_1\in -\mathit{Del}ta^+_{v_s}$.
Since $\alpha_1\in \mathit{Del}ta^+_{v_1}$, there should exist $1<i\leq s$ such
that $\alpha_i=-\alpha_1$, a contradiction.
Since $\beta\not\in\mathit{Del}ta^\mathtt{C}$, we have
$b_{v_1}(x)=b_{v_0}(x)=\alpha$ and $a_{v_1}(x)=a_{v_0}(x)$.
Now the assertion follows by induction in $s$.
\end{proof}
The following result describes an exchange property for a sequence of
isotropic reflections.
\begin{prp}
\label{prp:spine-com}
Let
$$v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d\xrightarrow{r_{x_{d+1}}}v_{d+1}$$
be a path in $\mathtt{Sp}(v)$ colored by a sequence $(\alpha_1,\ldots,\alpha_{d+1})$
with the property $\alpha_{d+1}=-\alpha_1$ and $\alpha_i\not=-\alpha_j$
for $1\leq i<j\leq d$. Then
$x_{d+1}=x_1$ and there is a sequence of isotropic reflections
$$v_0\xrightarrow{r_{x_2}}v'_2\xrightarrow{r_{x_3}}\dots \xrightarrow{r_{x_{d-1}}}v'_{d-1}\xrightarrow{r_{x_d}}v_{d+1}$$
colored by the sequence $(\alpha_2,\ldots,\alpha_d)$.
\end{prp}
\begin{proof}
We apply Lemma~\mathit{re}f{lem:pre-exchange}
to the sequence of reflexions
$v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d$
and the root $\alpha:=\alpha_1$. We deduce that
$\alpha_1-\alpha_2\not\in\mathit{Del}ta^\mathtt{C}$. This implies
that, by Lemma~\mathit{re}f{lem:independent}, one can replace the sequence $v_0\to v_1\to v_2$ with $v_0\to v_2'\to v_2$ and then a simple induction gives the required result.
\end{proof}
\begin{rem}
Lemma \mathit{re}f{lem:pre-exchange}
implies that for $v,v'$ in $\mathtt{Sp}(v_0)$ we have
$$b_v(x)=b_{v'}(y)\in \mathit{Del}ta_{\mathit{iso}}\ \ \Longrightarrow\ \ x=y, a_v(x)=a_{v'}(y).$$
In \mathit{re}f{ss:s21b} below
we will see that $b_v(x)=b_{v'}(y)\in \mathit{Del}ta_{\mathit{an}}$ does not imply
neither $x=y$ or $a_v(x)=a_{v'}(y)$.
\end{rem}
\begin{crl}
\label{crl:unique-in-sk}
Let $v'\in\mathtt{Sk}(v)$ satisfy $\mathit{Del}ta^+_\mathit{re}(v')=\mathit{Del}ta^+_\mathit{re}(v)$.
Then $v'=v$. In particular, if a homothety can be presented as a composition of reflexions, it is the identity.
\end{crl}
\begin{proof}
By Lemma~\mathit{re}f{lem:decomposition0} there exist $v''\in\mathtt{Sp}(v)$ and
$w\in W$ such that $v'=w(v'')$.
The sets of positive anisotropic roots at $v$ and at $v''$ coincide as none of them can become negative under an isotropic reflection. Therefore, $w=1$ by \mathit{re}f{crl:W-len}. This implies that
$v'\in\mathtt{Sp}(v)$. Let
$$v=v_0\xrightarrow{r_{x_1}}v_1\xrightarrow{r_{x_2}}\dots \xrightarrow{r_{x_d}}v_d=v'$$
be a sequence of isotropic reflections colored by a sequence $(\alpha_1,\ldots,\alpha_d)$. Since $\mathit{Del}ta^+_\mathit{re}(v')=\mathit{Del}ta^+_\mathit{re}(v)$,
the formula(\mathit{re}f{Delta+rev'}) implies $\alpha_i=-\alpha_j$ for some $i,j$. Then by ~\mathit{re}f{prp:spine-com} the sequence of isotropic reflections can be shortened.
\end{proof}
\begin{dfn}
For two vertices $v,v'\in\mathtt{Sk}(v_0)$ the distance $d(v,v')$ is defined
to be the minimal number of reflexions in the decomposition of the arrow
$v\to v'$.
\end{dfn}
\begin{crl}
\label{crl:Sk-len}
For $v,v'\in\mathtt{Sk}(v_0)$ the distance $d(v,v')$ is the cardinality
of $\mathit{Del}ta^+_\mathit{re}(v)-\mathit{Del}ta^+_\mathit{re}(v')$.
\end{crl}
\begin{proof}
If the difference $\mathit{Del}ta^+_\mathit{re}(v)-\mathit{Del}ta^+_\mathit{re}(v')$ is nonempty,
it has an element that is a simple root $\alpha$ at $v$ that can be replaced with $-\alpha$ by a reflection. Continuing this, we can get,
after the required number of steps, a vertex $v''$ having the same
$\mathit{Del}ta^+_\mathit{re}(v'')$ as $\mathit{Del}ta^+_\mathit{re}(v')$. Then by~\mathit{re}f{crl:unique-in-sk} $v''=v'$.
\end{proof}
Note the following description of non-reflectable roots.
\begin{crl}\label{crl:rereflectable}
$\mathit{Del}ta_\mathit{nr}=\mathit{Del}ta^\mathit{re}\setminus(-\mathit{Del}ta^\mathit{re})$.
\end{crl}
\begin{proof}
Obviously, if $\alpha$ is isotropic or anisotropic,
$-\alpha\in\mathit{Del}ta_\mathit{re}$. Let us assume that $-\alpha\in\mathit{Del}ta_\mathit{re}$,
$\alpha\in\Sigma_v$ and $-\alpha\in\Sigma_{v'}$. By formula
(\mathit{re}f{Delta+rev'}) any path connecting $v$ with $v'$ contains an edge
where $\alpha$ becomes negative. This proves reflectability of
$\alpha$.
\end{proof}
\subsubsection{Weyl vector}
\label{sss:weylvector}
Choose $\rho_v\in\mathfrak{h}^*$ such that
\begin{equation}
\label{eq:rho-v}
2\langle\rho_v, a_v(x)\rangle =\langle b_v(x), a_v(x)\rangle
\end{equation}
for all $x\in X$. For each $v'\in \mathtt{Sk}(v)$ we define
$$\rho_{v'}:=\rho_v+\sum_{\alpha\in \mathit{Del}ta^+_{\mathit{an}}(v')-\mathit{Del}ta^+_{\mathit{an}}(v)}\alpha
-\sum_{\alpha\in \mathit{Del}ta^+_{\mathit{iso}}(v')-\mathit{Del}ta^+_{\mathit{iso}}(v)}\alpha.
$$
Note that the formula~(\mathit{re}f{eq:rho-v}) holds for all $v'\in\mathtt{Sk}(v)$.
\begin{Rem}
If $\rho_v=\rho_{v_0}$ and $v\in \mathtt{Sp}(v_0)$, then $v=v_0$.
\end{Rem}
The collection of $\rho_{v'}$, $v'\in\mathtt{Sk}(v)$, is called the Weyl
vector. The choice of $\rho_v$ is not unique. Weyl vectors play an important role in Lie theory.
\subsection{Structure of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ }
\label{ss:autv}
The action of $W(v)$ on $\mathtt{Sk}(v)$ extends to an action of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
\begin{prp}
\label{prp:Aut-action-skeleton}
There is a unique action of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ on $\mathtt{Sk}(v)$ such that
for any $u\in\mathtt{Sk}(v)$ and $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$,
$
b_{\gamma(u)}(x)=\gamma(b_u(x)).
$
\end{prp}
\begin{proof}
Uniqueness follows from \mathit{re}f{crl:unique-in-sk}. It is therefore sufficient to verify that for each $u\in\mathtt{Sk}(v)$ and $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ there is $u'\in\mathtt{Sk}(v)$ satisfying the
property $b_{u'}(x)=\gamma(b_u(x))$. We proceed as follows. We present
$\gamma=\gamma''\circ\gamma'$ where $\gamma':v\to v'$ is a composition
of reflexions and $\gamma''$ is a composition of a homothety with a tautological arrow. Choose a path
$$
v=v_0\mathit{st}ackrel{r_{x_1}}{\to}\ldots\mathit{st}ackrel{r_{x_k}}{\to}v_k=u
$$
of reflexions connecting $v$ with $u$. Since the Cartan data at
$v$ and at $v'$ are $D$-equivalent, there is a namesake path
$$
v'=v'_0\mathit{st}ackrel{r_{x_1}}{\to}\ldots\mathit{st}ackrel{r_{x_k}}{\to}v'_k=u'
$$
defining $u'\in\mathtt{Sk}(v)$. One obviously has $b_{u'}(x)=\gamma(b_u(x))$
which proves the claim.
\end{proof}
\begin{crl}
\label{crl:Aut-action-roots}
The action of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ on $\mathfrak{h}^*$ preserves $\mathit{Del}ta^\mathit{re}$,
as well as $\mathit{Del}ta_\mathit{iso}$, $\mathit{Del}ta_\mathit{an}$, $\mathit{Del}ta_\mathit{nr}$.
\end{crl}
\begin{proof}
The first claim follows from the formula
$b_{\gamma(u)}(x)=\gamma(b_u(x))$. The rest follows from the fact
that $u$ and $u'=\gamma(u)$ have $D$-equivalent Cartan data.
\end{proof}
The group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ has a trivial part which we now describe.
\begin{dfn}
An automorphism $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is called irrelevant if it can be presented as a composition of a homothety and a tautological arrow.
\end{dfn}
The group of irrelevant automorphisms identifies with
\begin{equation}
K(v)=\{\theta:\mathfrak{h}\to\mathfrak{h}|\forall x\in X\ \theta(a(x))\in\mathbb{C}^*a(x),\theta^*(b(x))=b(x)\}.
\end{equation}
\begin{lem}
\label{lem:Kisnormal}
$K(v)$ is a normal subgroup of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
\end{lem}
\begin{proof}
$K(v)$ is the kernel of the action of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ on $\mathit{Del}ta^\mathit{re}$.
\end{proof}
We can easily describe the image of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ in the automorphisms
of $\mathtt{Sk}(v)$. The description of the action presented above implies
that the automorphism of $\mathtt{Sk}(v)$ defined by $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$
is uniquely determined by the target $v'$ of $\gamma':v\to v'$ where
$\gamma'$ is the composition of reflexions appearing in the decomposition of $\gamma$. The vertex $v'\in\mathtt{Sk}(v)$ has the Cartan datum
$D$-equivalent to that of $v$. This identifies the image of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ with the set of the vertices on $\mathtt{Sk}(v)$ satisfying this property.
\subsubsection{}
We denote by $\mathtt{Sk}^D(v)$ the subset of (the vertices of) $\mathtt{Sk}(v)$ consisting of the vertices whose Cartan data are $D$-equivalent to that of $v$. The set $\mathtt{Sk}^D(v)$ is endowed with the group structure induced from
the group structure on $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$. It is combinatorially described
using ``namesake path'' construction described in the proof of
Proposition~\mathit{re}f{prp:Aut-action-skeleton}. By construction we have
an isomorphism
\begin{equation}
\label{eq:skd}
\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v).
\end{equation}
The composition $W(v)\mathit{st}ackrel{i}{\to}\mathrm{op}eratorname{Aut}_\mathcal{R}(v)\to\mathtt{Sk}^D(v)$
is injective as $W(v)\cap K(v)$ is trivial by~\mathit{re}f{rem:uniqueness}.
\subsubsection{}
The group $\mathtt{Sk}^D(v)$ has a subgroup $\mathtt{Sp}^D(v)$ defined as the subset
of $\mathtt{Sk}^D(v)$ belonging to $\mathtt{Sp}(v)$.
The following proposition summarizes what we know about the structure
of the automorphism group.
\begin{prp}
\label{prp:structure-Aut}
\begin{itemize}
\item[1.] $W(v)\subset\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is a normal subgroup.
\item[2.] $K(v)\subset \mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is a normal subgroup.
\item[3.] There is a canonical isomorphism $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)/K(v)=\mathtt{Sk}^D(v)$.
\item[4.] $\mathtt{Sk}^D(v)=W(v)\rtimes\mathtt{Sp}^D(v)$.
\end{itemize}
\end{prp}
\begin{proof}Only Claim 4 needs proof.
The intersection $W(v)\cap\mathtt{Sp}^D(v)$ is trivial. Indeed, let $v'=w(v)\in\mathtt{Sp}^D(v)$. Any isotropic reflexion preserves the set of positive
anisotropic roots, so $\mathit{Del}ta^+_\mathit{an}(v)=\mathit{Del}ta^+_\mathit{an}(v')$. Thus, $w=1$
by~\mathit{re}f{crl:W-len}.
Every automorphism $\phi:v\to v$ decomposes as
$$
v\mathit{st}ackrel{\psi}{\to}v'\mathit{st}ackrel{\eta}{\to}v
$$
where $\psi$ is a composition of reflexions and $\eta$ is a composition
of a homothety with a tautological arrow. By~\mathit{re}f{lem:decomposition0}
$\psi$ decomposes as $v\mathit{st}ackrel{\rho}{\to}v''\mathit{st}ackrel{\gamma_w}{\to}v'$
where $\rho$ denotes a composition of isotropic reflexions and
$\gamma_w$
is the unique arrow in $\mathtt{Sk}(v)$ connecting
$v''$ with $v'=w(v'')$.
The Cartan data of $v'=w(v'')$ and $v''$ are $D$-equivalent
(actually, the same), so $\mathtt{Sk}^D(v)$ is generated by $W$ and $\mathtt{Sp}^D$.
\end{proof}
\begin{crl}
\label{crl:invariantideals}
Let $\mathfrak{g}^\mathtt{U}$ be the universal root algebra at a component $\mathcal{R}_0$,
$v\in\mathcal{R}_0$. An ideal
$J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ such that $J(v)\cap\mathfrak{h}=0$ defines a root algebra
$\mathfrak{g}$ having the
$v$-component $\mathfrak{g}(v)=\mathfrak{g}^\mathtt{U}(v)/J(v)$ if and only if it is invariant
with respect to $\mathtt{Sp}^D(v)$. In particular, if $\mathcal{R}_0$ has no isotropic
reflexions, any ideal of $\mathfrak{g}^\mathtt{U}$ having zero intersection with $\mathfrak{h}$
defines a root algebra.
\end{crl}
\begin{proof}
By~\mathit{re}f{sss:invariantideals} one has to verify that $J(v)$ is invariant
with respect to any $\gamma\in\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$. We will verify that any
ideal is invariant with respect to the action of $W(v)$ and of $K(v)$.
The Weyl group is generated by reflections that are inner automorphisms
by formula (\mathit{re}f{eq:sigma:gtog}). So, the Weyl group preserves all
ideals. Any $\gamma\in K(v)$ preserves the weights, so it preserves
the weight spaces. Thus, its multiplies by a constant each $\mathfrak{g}^\mathtt{U}_\alpha$
where $\alpha$ is a simple root or its opposite. Since any root $\beta$
of $\mathfrak{g}^\mathtt{U}$ is either sum of simple roots or a sum of its opposites,
$\gamma$ acts on each $\mathfrak{g}^\mathtt{U}_\beta$ by multiplication by a constant.
Since any ideal of $\mathfrak{g}^\mathtt{U}(v)$ is a sum of its weight subspaces, any
$\gamma\in K(v)$ preserves it. Proposition~\mathit{re}f{prp:structure-Aut}
now implies the claim.
\end{proof}
We will see in~\mathit{re}f{rootalg} that for all admissible fully reflectable indecomposable components
$\mathcal{R}_0$, except for $\mathfrak{g}l(1|1)$, any ideal $J(v)$ of $\mathfrak{g}^\mathtt{U}(v)$ having zero intersection
with $\mathfrak{h}$ is automatically $\mathtt{Sp}^D(v)$-invariant and therefore gives rise to a root algebra.
\begin{crl}
\label{crl:all-different}
Assume that no Cartan data at different vertices of $\mathtt{Sp}(v)$ are
$D$-equivalent. Then $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$ is the direct product of the Weyl group $W$ and the subgroup $K$ of irrelevant automorphisms.
If, moreover, the Cartan matrix $A(a,b)$ at $v$ has no zero rows and
$\dim\mathfrak{h}=2|X|-\operatorname{rk} A(a,b)$ is minimal possible, $K(v)$ is a commutative unipotent group.
\end{crl}
\begin{proof}
Under the assumption, $\mathtt{Sp}^D(v)$ is trivial and so $\mathtt{Sk}^D(v)=W(v)$.
Since $W(v)$ is a normal subgroup of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$, one has a direct
decomposition $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W(v)\times K(v)$.
\end{proof}
\begin{prp}
\label{prp:finsuper}
Let $\mathcal{R}_0$ be an admissible component with finite dimensional
$\mathfrak{g}^\mathtt{C}\ne\mathfrak{g}l(n|n)$. Then $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W(v)$.
\end{prp}
\begin{proof}
By~\cite{S1} the conditions of Corollary~\mathit{re}f{crl:all-different} are fulfilled. The rest follows from triviality of the group $K$.
\end{proof}
Note that for $\mathfrak{g}^\mathtt{C}=\mathfrak{g}l(n|n)$ one has $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W(v)\rtimes \mathbb{Z}_2$, see~\mathit{re}f{sss:glmn}.
\subsubsection{Example}
\label{sss:gl12}
Look at the root datum containing the root algebra $\mathfrak{g}l(1|2)$.
Here $X=\{1,2\}$,
$\mathfrak{h}=\mathtt{Sp}an\{e,h_1,h_2\}$ and $\mathfrak{h}^*=\mathtt{Sp}an\{\epsilon,\delta_1,\delta_2\}$
(the dual basis), the spine $\mathtt{Sp}(v_0)$ has three vertices
\begin{itemize}
\item[$v_0$:] $a(1)=-e-h_1$, $a(2)=h_1-h_2$, $b(1)=\epsilon-\delta_1$, $b(2)=\delta_1-\delta_2$, $p(1)=1$, $p(2)=0$;
\item[$v_1$:] $a(1)=e+h_1$, $a(2)=-e-h_2$, $b(1)=\delta_1-\epsilon$, $b(2)=\epsilon-\delta_2$, $p(1)=p(2)=1$;
\item[$v_2$:] $a(1)=h_1-h_2$, $a(2)=e+h_2$, $b(1)=\delta_1-\delta_2$, $b(2)=\delta_2-\varepsilon$, $p(1)=0$, $p(2)=1$.
\end{itemize}
The Weyl group $W(v_0)$ has two elements, with the nonunit interchanging
$\delta_1$ with $\delta_2$. The group $\mathrm{op}eratorname{Aut}_\mathcal{R}(v_0)$ coincides with
$W(v_0)$ by~\mathit{re}f{prp:finsuper}.
\section{The Coxeter property of the skeleton}
\label{sec:coxeter2}
In this section we define Coxeter graphs and prove that the skeleton $\mathtt{Sk}(v)$ satisfies this
property. The notion of Coxeter graph generalizes that of a Coxeter group. The Cayley graph
of a group $G$ with respect to a set of generators $S=\{s_i\}$ is Coxeter iff $(G,S)$
is a Coxeter group. There are, however, Coxeter graphs that do not come from Coxeter groups.
It is an interesting question to describe all finite Coxeter graphs.
\subsection{Coxeter graphs}
Let $X$ be a finite set, $G$ a graph with the set of vertices $V$ and the set of edges $E$,
endowed with a marking $r:E\to X$. We assume that $G$ is connected
and that the edges having a common end, have different markings. We denote by $r_x:v\to v'$ the edge connecting $v$ and $v'$ marked with $x$. By the assumption, for a chosen $v$ such edge is unique, if exists.
Note that $r_x$ comes with a choice of direction for the edge connecting $v$ and $v'$.
A path $\phi:v\to v'$ consists of a sequence of arrows
$$
v=v_0\mathit{st}ackrel{r_{x_1}}{\to}\dots\mathit{st}ackrel{r_{x_n}}{\to}v_n=v'.
$$
We denote $\ell(\phi)=n$ the length of $\phi$.
The path $\phi^{-1}:v'\to v$ is obtained from $\phi$ by changing the direction of all
arrows.
\begin{dfn}
A Coxeter loop $\phi:v\to v$ is one of the following.
\begin{itemize}
\item[1.] $\phi=r_x^2$ (These are called the trivial loops.)
\item[2.] $\phi=(r_y\circ r_x)^m$.
(These are called the loops of length $2m$).
\end{itemize}
\end{dfn}
\begin{dfn}
Let $\phi,\psi:v\to v'$ be a pair of paths. If the concatenation $\psi^{-1}\circ\phi$
is a Coxeter loop, we will say that one has an elementary Coxeter modification $\phi\mathbf{R}arrow\psi$.
\end{dfn}
\begin{dfn}
A Coxeter modification from $\phi$ to $\psi$ is a presentation
$\phi=\phi_1\circ\phi_2\circ\phi_3$, $\psi=\psi_1\circ\psi_2\circ\psi_3$ such hat
$\phi_1=\psi_1$, $\phi_3=\psi_3$ and one has an elementary Coxeter modification $\phi_2\mathbf{R}arrow\psi_2$.
\end{dfn}
\begin{dfn}
A marked graph $(X,G,r)$ is called Coxeter if any pair of paths from $v$ to $v'$
can be connected by a sequence of Coxeter modifications.
\end{dfn}
\subsubsection{}
As an example, take a group $\mathbb{G}amma$ generated by a set $S$ of elements with $s^2=1$.
Let $G$ be the corresponding Cayley graph, where the vertices are $g\in\mathbb{G}amma$, $X=S$, and $g$ and $h$ are connected by the edge marked by $s$ if $g=hs$.
Then $\mathbb{G}amma$ is a Coxeter group iff $G$ is a Coxeter graph.
Let $v\in\mathcal{R}$. We look at the skeleton $\mathtt{Sk}(v)$ as marked graph, with the reflection
$r_x$ marked with $x\in X$.
Conversely, one has the following easy result.
\begin{prp} Let $(X,G,r)$ be a Coxeter graph such that for any $v\in V$
and $x\in X$ there exists an edge $r_x:v\to v'$. Then $(X,G,r)$ is
the Cayley graph of a Coxeter group if and only if for any pair $x,y\in X$ the length $2m_{xy}$ of $(x,y)$ loop
$\phi=(r_y\circ r_x)^{m_{xy}}:v\to v$
is independent of $v$.
\end{prp}
\begin{proof}
The necessity of the condition is clear. Define $\mathbb{G}amma$ as the Coxeter group generated by $s_x,\ x\in X$ subject to the relations
$(s_xs_y)^{m_{xy}}=1$. The isomorphism of $(X,G,r)$ with the Cayley graph
of $\mathbb{G}amma$ is defined by an arbitrary choice of a vertex $v\in V$ and
the assignment of $s_x$ to $r_x$. Coxterity of the graph implies that
any two paths $v\to v'$ in $G$ define the same image in $\mathbb{G}amma$.
\end{proof}
Here is our main result.
\begin{thm}
\label{thm:skeleton-coxeter}
\begin{itemize}
\item[1.]$\mathtt{Sk}(v)$ is a Coxeter graph.
\item[2.] Nontrivial Coxeter loops may have length $2m$ where $m=2,3,4$ or $6$.
\end{itemize}
\end{thm}
The proof of the theorem is based on a presentation of the skeleton
$\mathtt{Sk}(v)$ as the $1$-skeleton of a convex polyhedron. In the following subsection we present basic facts about convex polyhedra.
In \mathit{re}f{ss:proof-skeleton-coxeter} we construct a polyhedron having
$\mathtt{Sk}(v)$ as its $1$-skeleton. This easily implies Theorem~\mathit{re}f{thm:skeleton-coxeter}.
\begin{rem} Note that in the case when $\mathcal{R}_0$ is fully reflectable and all reflexions are anisotropic the skeleton $\mathtt{Sk}(v)$ is isomorphic to the
Cayley graph of the Weyl group.
\end{rem}
\subsection{Convex polyhedra: generalities}
\subsubsection{Polytopes}
Recall that a polytope $P$ in a real finite dimensional affine space
$E$ is defined as the convex hull of a finite set of points.
The dimension of $P$ is, by definition, the dimension of the affine span of $P$.
A polytope $P$ of dimension $n$ has stratification
$P=P_0\sqcup\ldots\sqcup P_n$,
where $P_n$ is the inerior of $P$ in its affine span and $P_k$ for $k<n$ consists of points $v$ for which the intersection of all supporting hyperplanes at $v$ has dimension $k$.
Thus, $P_0$ is the set of vertices of $P$ and $P$ is the convex hull of $P_0$.
\subsubsection{Polyhedra}
In this paper we use a slightly generalized notion of
a convex polyhedron. We collect all necessary material here.
\begin{Dfn}
A polyhedron $\mathcal{P}$ in $E$ is a closed convex set such that any $v\in\mathcal{P}$
has a neighborhood isomorphic to a neighborhood of a point
of a polytope.~\footnote{The isomorphism is meant to be given by an
affine transformation.
}
\end{Dfn}
The dimension of a polyhedron is the dimension of its affine span.
The stratification of points of a convex polytope extends to
a stratification of a polyhedron: one has
$\mathcal{P}=\mathcal{P}_0\sqcup\dots\sqcup\mathcal{P}_n$
where $\mathcal{P}_n$ is the interior of $\mathcal{P}$ in its affine span and
$\mathcal{P}_k$ consists of the points for which the intersection of all supporting hyperplanes has dimension $k$. In particular, $\mathcal{P}_0$ is
the set of vertices of $\mathcal{P}$. This is a discrete subset of $E$, not necessarily finite. Moreover, $\mathcal{P}$ is in general not a convex hull
of $\mathcal{P}_0$.
For any $v\in\mathcal{P}_{n-1}$ there is a unique supporting hyperplane at $v$.
Its intersection with $\mathcal{P}$ is a face of dimension $n-1$. Each of them is a polyhedron of dimension $n-1$ and their union is $\partial\mathcal{P}$.
The following notation is used below. A linear hyperplane $H\subset V$
and $v\in E$ define an affine hyperplane $v+H$. The complement $V\setminus H$ consists of two components; their closures are the halfspaces defined by $H$ and denoted by $H^+$ and $H^-$. In the same manner $v+H^+$ denotes
the affine halfspace.
Note that $\mathcal{P}$ coincides with the intersection of the affine halfspaces
$v+H^+$ defined by the faces of $\mathcal{P}$ of maximal dimension.
\begin{dfn}
Let $A$ be the set of supporting hyperplanes $v_\alpha+H_\alpha$ of $\mathcal{P}$
and let $v+H_\alpha^+$ be the affine halfspaces containing $\mathcal{P}$. The cone
of $\mathcal{P}$, $C(\mathcal{P})$ is defined as the intersection
$\cap_{\alpha\in A}H^+_\alpha.$
\end{dfn}
Obviously, if $A_0\subset A$ satisfies the condition
$\mathcal{P}=\cap_{\alpha\in A_0}(v_\alpha+H^+_\alpha)$ then
$C(\mathcal{P})=\cap_{\alpha\in A_0} H^+_\alpha$. In particular, $C(\mathcal{P})$ is
the intersection of the linear halfspaces $H_\alpha$ defined by
the $(n-1)$-faces of $\mathcal{P}$.
Note that by definition $C(\mathcal{P})$ is a convex cone in $V$ and $\mathcal{P}$ is invariant under the action of $C(\mathcal{P})$: for $\xi\in\mathcal{P}$ and $\eta\in C(\mathcal{P})$ one has $\xi+\eta\in\mathcal{P}$.
\begin{lem}
\begin{itemize}
\item[1.] If $C(\mathcal{P})\ne\{0\}$ then $\partial\mathcal{P}$ is contractible.
\item[2.] $C(\mathcal{P})=\{0\}$ iff $\mathcal{P}$ is compact.
\item[3.] $\mathcal{P}$ is compact iff it is a polytope.
\end{itemize}
\end{lem}
\begin{proof}
Choose an interior point $\zeta\in\mathcal{P}$ and define the projection from
$\partial\mathcal{P}$ to the unit sphere $S$ with the center at $\zeta$ by the formula $$\phi(\xi):=(\zeta+\mathbb R^+(\xi-\zeta))\cap S.$$ Since $\mathcal{P}$ is convex, $\phi$ is injective. From the definition of $\mathcal{P}$ we see that $\xi\in S$ is not in the image of $\phi$ iff $\xi\in\zeta- C(\mathcal{P})$. Set $U=(\zeta- C(\mathcal{P}))\cap S$. The restriction of $\phi$ to any $(n-1)$-face
is a stereographic projection. Since any point of $\mathcal{P}$ has a neighborhood isomorphic to a neighborhood of a point of a polytope,
the map $\phi$ is an open embedding and so it defines a homeomorphism of
$\partial\mathcal{P}$ with $S\setminus U$.
If $C(\mathcal{P})\ne\{0\}$, $U$ is a nonempty convex subset of $S$, so
$S\setminus U$ is contractible. This proves Claim 1.
To prove Claim 2, note that the $C(\mathcal{P})$ acts of $\mathcal{P}$: if $c\in C(\mathcal{P})$
and $p\in\mathcal{P}$ then $p-c\in\mathcal{P}$. Therefore, if $C(\mathcal{P})\ne\{0\}$, $\mathcal{P}$ cannot be compact. On the contrary, if $C(\mathcal{P})=\{0\}$, $\partial\mathcal{P}$
is homeomorphic to sphere, so it is compact. $\mathcal{P}$ is the convex hull
of its boundary, so it is also compact.
Finally, if $\mathcal{P}$ is compact
then it is a convex hull of its boundary that is a finite union of
compact polyhedra of smaller dimension. This implies that $\mathcal{P}$ is the convex hull of the set of its vertices.
\end{proof}
The only result we need in our study of Coxeter property of the skeleton
is the following.
\begin{crl}
\label{crl:h1}
For any polyhedron $\mathcal{P}$ {of dimension $>2$} one has $H^1(\partial\mathcal{P})=0$.
\end{crl}
\qed
\subsection{A polyhedron defined by $\mathtt{Sk}(v)$}
\label{ss:proof-skeleton-coxeter}
Let $\mathcal{R}_0$ be an admissible component of a root groupoid, $n=|X|$ and $\mathtt{Sk}(v)$ a skeleton.
Let $Q_{\mathbb R}:=Q\otimes_{\mathbb Z}\mathbb R$ and for any vertex $u$ of $\mathtt{Sk}(v)$ set $Q^+_{u,\mathbb R}:=\sum_{\alpha\in\Sigma_u}\mathbb R^+\alpha$.
\begin{lem}
\label{lem:lambda}
There exist an injective map $\lambda:\mathtt{Sk}(v)\to Q$, $u\mapsto \lambda_u$ such that
$$\lambda_u-\lambda_{u'}=\sum_{\alpha\in \mathit{Del}ta_{re}^+(u)-\mathit{Del}ta_{re}^+(u')}\alpha.$$
\end{lem}
\begin{proof} Choose $\lambda_v=0$, and set $$\lambda_u:=\sum_{\alpha\in \mathit{Del}ta_{re}^+(u)-\mathit{Del}ta_{re}^+(v)}\alpha.$$
Here we use Corollary 5.3.7 and Corollary 5.3.9 of the main text to check injectivity of $\lambda$.
\end{proof}
We define
\begin{equation}
\label{eq:thepolyhedron}
\mathcal{P}=\bigcap_{u\in\mathtt{Sk}(v)}(\lambda_u- Q^+_{u,\mathbb R})
\end{equation}
and
\begin{equation}
\label{eq:q++}
Q^{++}_\mathbb{R}=\bigcap_{u\in\mathtt{Sk}(v)}Q^+_{u,\mathbb R}.
\end{equation}
\begin{prp}
\label{prp:ppolyhedron}
$\mathcal{P}$ is a polyhedron in $Q_\mathbb{R}$ and $C(\mathcal{P})=-Q^{++}_\mathbb{R}$.
\end{prp}
\begin{proof}
Set $\lambda_v=0$. Let $f$ be the linear function on $Q_\mathbb{R}$ such that $f(b_x(v))=1$ for all $x\in X$. Denote
$$H_N:=\{\xi\in Q_{\mathbb R}\mid f(\xi)= N\},\ H_N^+:=\{\xi\in Q_{\mathbb R}\mid f(\xi)\geq N\},
$$
$$\mathcal{P}_N:=\mathcal{P}\cap H_N^+,\ \ \mathtt{Sk}_N(v):=\{u\in\mathtt{Sk}(v)\mid f(\lambda_u)\geq N\},\ \ \mathcal{Q}_N:=H^+_N\cap\bigcap_{u\in \mathtt{Sk}_N(v)}(\lambda_u- Q^+_{u,\mathbb R}).
$$
The following claims are obvious:
\begin{enumerate}
\item $\mathtt{Sk}_N(v)$ is finite (the vertices are in $-Q^+(v)$).
\item $\mathcal{P}=\bigcup_{N<0}\mathcal{P}_{N}$,
\item $\mathcal{P}_N\subset\mathcal{Q}_N$,
\item $\mathcal{Q}_N$ is a convex polytope (compact, bounded by finitely many hyperplanes).
\end{enumerate}
We intend to show that $\mathcal{P}_N=\mathcal{Q}_N$ and that the vertices of the
polytope $\mathcal{P}_N$ belonging to $H^+_N\setminus H_N$ are precisely
$\{\lambda_u|f(\lambda_u)>N\}$. This implies that $\mathcal{P}$ is a polyhedron.
In fact, for $\mu\in\mathcal{P}$ choose $N$ so that
$f(\mu)>N$. Then $\mu\in\mathcal{P}_N=\mathcal{Q}_N$, so $\mu$ has a neighborhood
that is a neighborhood in a polytope.
Note that all $\lambda_u$ are vertices of $\mathcal{P}$ since there is
a hyperplane in $Q_\mathbb{R}$ intersecting $\mathcal{P}$ at one point $\lambda_u$.
For the same reason all $\lambda_u$
satisfying $f(\lambda_u)>N$ are vertices of $\mathcal{Q}_N$.
In order to show that $\mathcal{Q}_N=\mathcal{P}_N$, it is sufficient to verify that
any vertex $\mu$ of $\mathcal{Q}_N$ belongs to $\mathcal{P}$.
The 1-skeleton of $\mathcal{Q}_N$ is connected, so it is enough to verify that
any edge of $\mathcal{Q}_N$ connecting $\lambda_u$ with another vertex $\mu$,
belongs to $\mathcal{P}$. We know all edges of $\mathcal{Q}_N$ in a neighborhood of
$\lambda_u$: they are just $b_u(x)$, $x\in X$. If $x$ is reflectable at $u$, there is an arrow $r_x:u\to u'$, and $\mu$ lies
on the segment connecting $\lambda_u$ with $\lambda_{u'}$. If $b_u(x)$ is
non-reflectable, $b_u(x)\in\mathcal{Q}^{++}$, so $\lambda_u-\mathbb{R}^+ b_u(x)$ is the
infinite edge of $\mathcal{P}$ containing $\mu$.
The minus sign in the formula for $C(\mathcal{P})$ is due to the minus sign in
the formula (\mathit{re}f{eq:thepolyhedron}).
\end{proof}
\begin{lem}
Let $\mathcal{P}$ be bounded. Then $\mathcal{R}_0$ is fully reflectable, $\mathtt{Sk}(v)$ is
finite.
\end{lem}
\begin{proof}
$\mathtt{Sk}(v)$ embeds into the intersection of $\mathcal{P}$ with a lattice, therefore, it is finite.
If $x\in X$ is not reflectable at $u\in\mathtt{Sk}(v)$, the root $b_u(x)$
belongs to $Q^+_u$, and, therefore, to all $Q^+_{u'},\ u'\in\mathtt{Sk}(v)$.
This contradicts the condition $Q^{++}_\mathbb{R}=\{0\}$.
\end{proof}
\
We will now be able to describe the faces of $\mathcal{P}$.
Let $Y\subset X$, $|Y|=k$ and $u\in \mathtt{Sk}(v)$. Let $H_Y(u)$ be the affine $k$-plane passing through $\lambda_u$ and spanned by $b_u(y), y\in Y$.
Set $F_Y(u):=\mathcal{P}\cap H_Y(u)$. By definition $F_{\emptyset}(u)=\lambda_u$.
\begin{lem}\label{lem:faces}
\begin{itemize}
\item[1.] Any $k$-dimensional face of $\mathcal{P}$ is of the form $F_Y(u)$
for a certain $u\in\mathtt{Sk}(v)$ and a $k$-element set $Y\subset X$.
\item[2.] One has
$$F_Y(u)=\bigcap_{u'\in \mathtt{Sk}_Y(u)}(\lambda_{u'}-\sum_{y\in Y}\mathbb R^+b_{u'}(y)),
$$
where $\mathtt{Sk}_Y(u)$ denotes the connected component of $u\in\mathtt{Sk}(v)$ in the subgraph spanned by the arrows $r_y$ for $y\in Y$.
\end{itemize}
\end{lem}
\begin{proof} The boundary $\partial\mathcal{P}$ of $\mathcal{P}$ by the proof of
\mathit{re}f{prp:ppolyhedron} lies in the union of
hyperplanes $H_Y(u)$ for all $(n-1)$-element subsets $Y$ of $X$. It is clear that $\lambda_{u'}\in F_Y$ if and only if $\lambda_{u'}-\lambda _u\in -\sum_{y\in Y}\mathbb R^+b_{u}(y)$. Note that
$\lambda_{u}-\lambda_{u'}=
\sum_{\alpha\in \mathit{Del}ta_{re}^+(u)-\mathit{Del}ta_{re}^+(u')}\alpha$, so
each of
$\alpha\in \mathit{Del}ta_{re}^+(u)-\mathit{Del}ta_{re}^+(u')$ lies in the
non-negative span of $b_u(y)$ for $y\in Y$. Consider
the arrow $u\xrightarrow{\gamma}u'$. Write it as $\gamma=r_{x_s}\ldots r_{x_1}$ so that $s$ is minimal possible. Let us show that all $x_i\in Y$.
Let $\gamma_i=r_{x_i}\dots r_{x_1}$, $\gamma_i:u\to u_i$ and $\beta_i=b_{u_{i-1}}(x_i)$. Choose minimal $i$ such that $x_i\notin Y$.
Then $\beta_i\mathit{eq}uiv b_u(x_i)\mod \sum_{y\in Y}\mathbb R b_u(y)$ --- a contradiction.
That proves (2).
Now for $k=n-1$ the statement (1) follows since (2) implies that $F_Y(u)$ has codimension $1$. For general $k$ it follows by induction in codimension.
\end{proof}
\begin{crl}
\label{crl:identification}
The map $\lambda$ as in Lemma~\mathit{re}f{lem:lambda} establishes a one-to-one correspondence between $\mathtt{Sk}(v)$ and the set of vertices of $\mathcal{P}$.
Moreover, $\mathtt{Sk}(v)$ identifies with the $1$-skeleton of $\mathcal{P}$ so that
the reflexions $r_x:u\to u'$ in $\mathtt{Sk}(v)$ identify with the edges
connecting $\lambda_u$ with $\lambda_{u'}$.
\end{crl}
\begin{crl} \label{lem:twodim}
The two-dimensional face $F_Y(u)$ of $\mathcal{P}$ defined by a two-element
subset $Y$ of $X$ is bounded iff $\mathtt{Sk}_Y(u)$ is the finite skeleton of a
rank 2 fully reflectable component. In this case
$\mathtt{Sk}_Y(u)$ isomorphic to the Cayley graph of the dihedral group $D_m$ where $m=2,3,4$ or $6$ . The noncompact face $F_Y(u)$ has a non-compact
contractible boundary.~\footnote{The set of vertices in it is linearly ordered. If there is a smallest (or greatest) vertex, it has a non-compact
edge corresponding to a non-reflectable root.}
\end{crl}
\begin{proof} The claim immediately follows from Lemma ~\mathit{re}f{lem:faces}. The allowable values for $m$ result from a well-known classification of rank $2$ fully reflectable components with finite skeleton, see, for example, \cite{S3}.
\end{proof}
\subsubsection{Proof of Theorem~\mathit{re}f{thm:skeleton-coxeter}}
By~\mathit{re}f{crl:identification} $\mathtt{Sk}(v)$ indentifies with the $1$-skeleton
of the polyhedron $\mathcal{P}$. By~\mathit{re}f{crl:h1} any pair of paths leading
from $u$ to $u'$ in $\mathtt{Sk}(v)$ is connected by relations defned by
compact $2$-faces. Finally, by~\mathit{re}f{lem:twodim}, compact $2$-faces
gives rise to Coxeter relations with $m=2,3,4,6$.
\section{A trichotomy for admissible fully reflectable components}
\label{sec:trichotomy}
\subsection{Overview}
From now on we will consider only indecomposable admissible fully reflectable components.
In this section we define three types of such
components: finite, affine and indefinite. We investigate
the structure of the sets of roots of corresponding root algebras.
Expectedly, the trichotomy for admissible components is closely
connected to the trichotomy for the
types of Cartan matrices defined by Kac in~\cite{Kbook}, Theorem 4.3.
\subsubsection{}
We keep the notation of~\mathit{re}f{sss:notation-coxeter}.
Fix an indecomposable admissible fully reflectable component $\mathcal{R}_0$ and $v\in\mathcal{R}_0$. Let $\mathfrak{g}$ be a root Lie superalgebra supported at $\mathcal{R}_0$.
We denote by $\mathit{Del}ta=\mathit{Del}ta(\mathfrak{g})$ the set of roots of
$\mathfrak{g}$ and by $\mathfrak{r}$ the kernel of the canonical map $\mathfrak{g}\to\mathfrak{g}^\mathtt{C}$.
Recall that $\mathfrak{r}$ is the maximal ideal of $\mathfrak{g}$ having zero intersection
with $\mathfrak{h}$.
In this section we will deduce a certain information abot the ideal
$\mathfrak{r}$ for different types of components, see~\mathit{re}f{crlfin}, \mathit{re}f{corfindim}. In particular, we will be able to deduce,
for certain types of components, that they admit a unique root Lie superalgebra $\mathfrak{g}^\mathtt{C}$.
\subsection{Roots}
Recall that $\Sigma_{v'}=\{b_{v'}(x)\}_{x\in X}$ and
$Q^+_{v'}:=\mathbb{Z}_{\geq 0}\Sigma_{v'}\subset Q,
\ Q^+:=Q^+_v.$
We have $\mathit{Del}ta\subset (-Q^+\cup Q^+)$.
Recall~\mathit{re}f{sss:realinall} that
$$
\mathit{Del}ta^\mathit{re}=\bigcup_{v'\in\mathtt{Sk}(v)} \Sigma_{v'}\subset\mathit{Del}ta
$$
and the root spaces
$\mathfrak{g}_{\alpha}$, $\alpha\in\mathit{Del}ta^\mathit{re}$, are one-dimensional, in particular, are purely even or purely odd. This yields a decomposition of the family of real roots into
even and odd part
$$\mathit{Del}ta^\mathit{re}=\mathit{Del}ta^{\mathit{re},0}\sqcup\mathit{Del}ta^{\mathit{re},1}.
$$
For anisotropic $\alpha\in\mathit{Del}ta^\mathit{re}$ the elements
$\alpha^\vee\in\mathfrak{g}\langle\alpha\rangle\cap\mathfrak{h}$ are defined so that
$\langle\alpha,\alpha^\vee\rangle=2$.
We define
$$
\mathit{Del}ta^\mathit{im}=\{\alpha\in \mathit{Del}ta|\ \mathbb{Q}\alpha\cap\mathit{Del}ta^\mathit{re}=\emptyset\}.
$$
For each $v'\in\mathtt{Sk}(v)$ we have
the triangular decompositions
$$\mathit{Del}ta=\mathit{Del}ta^+_{v'}\sqcup (-\mathit{Del}ta^+_{v'}),\ \ \text{
where }\mathit{Del}ta^+_{v'}:=\mathit{Del}ta\cap Q^+_{v'}.$$
\begin{prp}\label{crlDeltare}
\begin{enumerate}
\item
For $v'\mathit{st}ackrel{r_x}{\to}v''$ with $x\in X$,
let $\alpha=b_{v'}(x)$. One has
$$\mathit{Del}ta^+_{v''}=\left\{
\begin{array}{ll}
\{-\alpha\}\cup \mathit{Del}ta^+_{v'} \setminus\{\alpha\} \ & \text{ if } 2\alpha\not\in\mathit{Del}ta\\
\{-\alpha,-2\alpha\}\cup \mathit{Del}ta^+_{v'} \setminus\{\alpha,2\alpha\}\ &
\text{ if } 2\alpha\in\mathit{Del}ta.\\
\end{array}\right.$$
\item
For any $v'$ one has
$\mathit{Del}ta^\mathit{im}\cap \mathit{Del}ta^+_{v'}=\mathit{Del}ta^\mathit{im}\cap \mathit{Del}ta^+_{v}$.
\item
$\Omega(\mathfrak{r})\subset \mathit{Del}ta^\mathit{im}$, except for the rank one
algebra $\widetilde\mathfrak{g}=\mathfrak{g}^\mathtt{U}$ with $\mathfrak{g}^\mathtt{C}=\mathfrak{g}l(1|1)$, see~\mathit{re}f{rank1}.
\item
If $\mathcal{R}_0$ has rank greather than one, then
$$\mathit{Del}ta=\mathit{Del}ta^\mathit{re}\cup\mathit{Del}ta^\mathit{im}\cup
\{2\alpha|\ \alpha\in\mathit{Del}ta^{\mathit{re},1}\ \text{ is anisotropic}\}.$$
\end{enumerate}
\end{prp}
\begin{proof}
Claim (1) is standard and (2) follows from (1). Claims (3) and (4) follow
from~\mathit{re}f{corgalpha}.
\end{proof}
\subsection{Types of $\mathcal{R}_0$}
\subsubsection{The case of Kac--Moody Lie algebras}
In ~\cite{Kbook}, Thm. 4.3 Kac-Moody Lie algebras are divided in three types according to the corresponding type of Cartan matrices as follows.
Let $V:=\mathbb{R}\otimes_\mathbb{Z} Q$; for $v\in V$ we set $v>0$ (resp., $v\geq 0$) if $v=\sum_{\alpha\in\Sigma} k_{\alpha}\alpha$ with $k_{\alpha}\geq 0$
(resp., $k_{\alpha}>0$) for each $\alpha\in\Sigma$.
View an indecomposable Cartan matrix $A$ as a linear operator on $V$.
It is given by the formula
$$
A(v)=\sum_iv(\alpha_i^\vee)\alpha_i,\ v\in V.
$$
By~\cite{Kbook}, Thm.4.3,
$A$ satisfies exactly one of the following conditions
\begin{itemize}
\item $\exists v>0$ such that $Av>0$ (type (FIN)).
\item $\exists v>0$ such that $Av=0$ (type (AFF)).
\item $\exists v>0$ such that $Av<0$ (type (IND)).
\end{itemize}
Moreover, one has
\begin{itemize}
\item (FIN) $Au\geq 0$ implies $u>0$ or $u=0$.
\item (AFF) $Au\geq 0$ implies $u\in\mathbb{R}v$.
\item(IND) $Au\geq 0$ with $u\geq 0$ implies $u=0$.
\end{itemize}
It is proven there that the Kac-Moody Lie algebras of type (FIN) are all simple finite-dimensional Lie algebras, the Kac-Moody Lie algebras of type
(AFF) have finite growth: they are always symmetrizable and can be obtained as (twisted) affinizations of simple finite-dimensional Lie algebras.
The Kac-Moody algebras of indefinite type have infinite growth.
We present below a version of this trichotomy in terms of connected components
of root groupoids. The component is required to be indecomposable and
fully reflectable. Note that both conditions hold in the context
of \cite{Kbook}, Thm. 4.3.
\subsubsection{} Let $\mathcal{R}_0$ be a component of the root groupoid with a
fixed vertex $v$ and indecomposable $A(v)$.
Set
$$Q^{++}:=\displaystyle\bigcap_{v'\in\mathtt{Sk}(v)} Q^+_{v'}.$$
{Obviously, $Q^{++}=Q^{++}_\mathbb{R}\cap Q$.}
Note that the sets $\mathit{Del}ta^\mathit{re}$ and $Q^{++}$ depend on the component
$\mathtt{Sk}(v)$ only.
One has $Q^{++}\cap \mathbb{Q}\alpha=0$ for each
$\alpha\in\mathit{Del}ta^\mathit{re}$.
In the definition below we introduce three classes of components
analogous to the classes (FIN), (AFF), (IND) of Cartan matrices
defined in \cite{Kbook}, Thm. 4.3.
\begin{dfn}
\label{dfn:types}
We say that $\mathcal{R}_0$ {\em is of type }
\begin{itemize}
\item[(Fin)] if $Q^{++}=\{0\}$.
\item[(Aff)] if $Q^{++}=\mathbb{Z}_{\geq 0}\delta$
for some $\delta\not=0$.
\item[(Ind)] if $\mathcal{R}_0$ is not of type (Fin) or (Aff).
\end{itemize}
\end{dfn}
\subsubsection{Purely anisotropic case}\label{Deltareisoempty}
Assume that all simple roots $b(x)$ at $v$ are anisotropic. Then
the Cartan matrices $A(v')$ are the same at all $v'\in\mathcal{R}_0$.
Lemma~\mathit{re}f{WorbitQ+} below shows that in this case the classes (Fin), (Aff) and (Ind) coincide with (FIN), (AFF) and (IND).
Indeed, in this case
$Q^{++}=\bigcap_{w\in W}w(Q^+)$
is the union of $W$-orbits belonging to $Q^+$.
\begin{lem}\label{WorbitQ+}$ $
\begin{itemize}
\item[1.] In the case {\rm(FIN)} the unique $W$-orbit lying in
$Q^+$ is $\{0\}$.
\item[2.]
In the case {\rm(AFF)} all $W$-orbits lying in
$Q^+$ are of the form $\{j\delta\}$ for $j\in\mathbb{Z}_{\geq 0}$
for some $\delta\ne 0$.
\item[3.]
In the case {\rm(IND)} the unique finite $W$-orbit lying in $Q^+$ is
$\{0\}$; $Q^+$ contains an infinite $W$-orbit.
\end{itemize}
\end{lem}
\begin{proof}
Notice that $Au\geq 0$ ($Au=0$) for $u\in V\subset \mathfrak{h}^*$ means
$u(\alpha^{\vee})\geq 0$ (resp., $u(\alpha^{\vee})=0$ for each $\alpha\in\Sigma$.
For $\nu=\sum_{\alpha\in\Sigma}k_{\alpha}\alpha\in Q^+$ set
$\mathrm{op}eratorname{ht} \nu:=\sum_{\alpha\in\Sigma}k_{\alpha}$.
Let $\nu\in Q^+$ be such that $W\nu\in Q^+$
and $\mathrm{op}eratorname{ht} \nu$ is minimal in its orbit.
Viewing $\nu$ as an element of $V$ we have $\nu\geq 0$ and
$\mathrm{op}eratorname{ht} r_{\alpha}\nu\geq \mathrm{op}eratorname{ht}\nu$
for each $\alpha\in\Sigma$. Then $\nu(\alpha^\vee)\leq 0$ for all
$\alpha\in\Sigma$
and therefore $A\nu\leq 0$.
Hence $\nu=0$ in type {\rm(FIN)} and
$\nu$ is proportional to $\delta$ in type {\rm(AFF)}.
In the remaining type {\rm(IND)}, assume $W\nu\subset Q^+$ is finite
and $\mathrm{op}eratorname{ht} \nu$ is maximal. Then $\nu(\alpha^\vee)\geq 0$ for all $\alpha$
and, therefore,
$A\nu\geq 0$. Hence $\nu=0$. By the assumption there exists $v>0$ such that $Av<0$. Then $Wv\subset Q^+$ by~\cite{Kbook},
Lemma 5.3 and, by above, this is an infinite orbit.
\end{proof}
\subsubsection{Purely anisotropic components of finite and affine types}
\label{sss:aniso-fin-aff}
If $p(x)=0$ for each $x$, then $\mathfrak{g}^\mathtt{C}$ is a Kac-Moody Lie algebra.
In this case $\mathfrak{g}^\mathtt{C}$ is finite-dimensional if and only if the Cartan matrix $A$ if of type {\rm(FIN)} and a (twisted) affine Lie algebra if $A$ is of type {\rm(AFF)}.
If we do not require all generators to be even, we have an extra
requirement saying that the $x$-row of $A$
consists of even entries if $p(x)=1$.
Therefore, to every anisotropic component one can associate a Kac-Moody Lie algebra by changing the parity of
all generators to $0$. As we showed in the previous subsection, this operation does not change the type of the corresponding components.
We call all contragredient Lie superalgebras obtained in this way from a Kac-Moody Lie algebra $\mathfrak{g}$ {\sl the cousins of } $\mathfrak{g}$.
The Cartan matrices of types (FIN) and (AFF) are
well-known. Let us describe the cases when such a matrix has a row
with even entries.
In the type (FIN) the only such case is the type $B_n$ and it has exactly
one row with even entries. The Kac-Moody Lie algebra with Cartan matrix
$B_n$ is $\mathfrak{so}(2n+1)$ and its cousin is a finite-dimensional
simple Lie superalgebra $\mathfrak{osp}(1|2n)$.
The affine Kac-Moody Lie algebras whose Cartan matrices have at least one row with even entries are $\mathfrak{so}(2n+1)^{(1)}$,
$\mathfrak{sl}(2n+1)^{(2)}$ and
$\mathfrak{so}(2n+2)^{2}$. The cousin of $\mathfrak{so}(2n+1)^{(1)}$ is $\mathfrak{sl}(1|2n)^{(2)}$, the cousin of $\mathfrak{sl}(2n+1)^{(2)}$ is
$\mathfrak{osp}(1|2n)^{(1)}$, and $\mathfrak{so}(2n+2)^{(2)}$ has two cousins $\mathfrak{osp}(2|2n)^{(2)}$ and $\mathfrak{sl}(1|2n+1)^{(4)}$, see ~\cite{vdL}
for construction of (twisted) affine superalgebras.
\subsection{Components of type {\rm (Fin)}}
Most of the root Lie superalgebras of finite type have isotropic
roots.
\begin{lem}\label{crlfin}
Assume that $\mathcal{R}_0$ is of type {\rm(Fin)}. Then
\begin{enumerate}
\item $\mathit{Del}ta^\mathit{im}=\emptyset$.
\item $\mathfrak{g}=\mathfrak{g}^\mathtt{C}$ except for the case $\mathfrak{g}^\mathtt{C}=\mathfrak{g}l(1|1)$ (see~\mathit{re}f{rank1}).
\item $\mathfrak{g}$ is finite-dimenisonal.
\end{enumerate}
\end{lem}
\begin{proof} (1) follows from \mathit{re}f{crlDeltare}(4), (2) and (3) from \mathit{re}f{crlDeltare} (5).
\end{proof}
\begin{crl}\label{corfindim}
If $\dim\mathfrak{g}<\infty$ then $\mathcal{R}_0$ is of type {\rm(Fin)}.
\end{crl}
\begin{proof}
It suffices to check that $\mathtt{Sk}(v)$ contains $v'$ with $\Sigma_{v'}=-\Sigma$
which is equivalent to
$\mathit{Del}ta^+_{v'}(\mathfrak{g}^\mathtt{C})=-\mathit{Del}ta^+_{v}(\mathfrak{g}^\mathtt{C})$.
Since $\dim\mathfrak{g}<\infty$, $\mathit{Del}ta(\mathfrak{g}^\mathtt{C})$ is finite.
For each $v'\in\mathtt{Sk}(v)$ let $k(v')$ be the cardinality of
$\mathit{Del}ta^+_{v'}(\mathfrak{g}^\mathtt{C})\cap \mathit{Del}ta^+_{v}(\mathfrak{g}^\mathtt{C})$.
If $k(v')\not=0$, then $\mathit{Del}ta^+_{v'}(\mathfrak{g}^\mathtt{C})$ does not lie
in $-\mathit{Del}ta^+_{v}(\mathfrak{g}^\mathtt{C})$, so there exists $\alpha\in\Sigma_{v'}$ with
$\alpha\in \mathit{Del}ta^+_{v}(\mathfrak{g}^\mathtt{C})$. By~\mathit{re}f{crlDeltare} (2),
there is a reflexion $v'\to v''$ that replaces $\alpha$
(and, possibly, $2\alpha$) in $\mathit{Del}ta^+_{v'}$ with $-\alpha$ (and, possibly, $-2\alpha$). This means that
$k(v'')$ is equal to $k(v')-1$ or to $k(v')-2$.
Hence $k(v')=0$ for some $v'\in\mathtt{Sk}(v)$.
\end{proof}
\subsubsection{} The results of C.~Hoyt~\cite{Hoyt}, see~\mathit{re}f{sss:hoytclass} below, together
with \mathit{re}f{sss:aniso-fin-aff}, imply that $\mathfrak{g}^{\mathtt{C}}$ of finite type
are: $\mathfrak{g}l(1|1)$ and
all basic classical Lie superalgebras (except
that the simple algebra $\mathfrak{psl}(n|n)$ should be replaced with
$\mathfrak{g}^\mathtt{C}=\mathfrak{gl}(n|n)$). In all cases except
$\mathfrak{gl}(1|1)$ we have $\mathfrak{g}^{\mathtt{C}}=\mathfrak{g}^\mathtt{U}$ by \mathit{re}f{crlDeltare}(4).
\subsection{Components of type {\rm(Aff)}}
\begin{lem}\label{crlaff}
Let $\mathcal{R}_0$ be of type {\rm(Aff)}. Then
\begin{enumerate}
\item $\Omega(\mathfrak{r})\subset\mathit{Del}ta^\mathit{im}\subset\mathbb{Z}\delta\setminus\{0\}$.
\item $\mathfrak{r}$ lies in the center of $[\mathfrak{g},\mathfrak{g}]$.
\item If $\langle\delta,a(x)\rangle\not=0$ for some $x\in X$ then $\mathfrak{g}=\mathfrak{g}^{\mathtt{C}}$.
\end{enumerate}
\end{lem}
\begin{proof}
Using~\mathit{re}f{crlDeltare}
we get (1) and $\Omega(\mathfrak{r})\subset\mathit{Del}ta^{im}\subset\mathbb{Z}\delta\setminus\{0\}$.
Since $\mathfrak{g}=[\mathfrak{g},\mathfrak{g}]+\mathfrak{h}$, $\mathfrak{r}$ lies in $[\mathfrak{g},\mathfrak{g}]$ and $[\mathfrak{g},\mathfrak{g}]$
is generated by $\mathfrak{g}_{\pm\alpha}$ for $\alpha\in\Sigma$. Since $j\delta\pm\alpha\not\in\mathbb{Z}\delta$,
$[\mathfrak{g}_{\pm\alpha},\mathfrak{r}]=0$. This gives $[[\mathfrak{g},\mathfrak{g}],\mathfrak{r}]=0$ and establishes (2).
For (3) assume that $\mathfrak{r}\not=0$. Then $\mathfrak{r}\cap\mathfrak{g}_{j\delta}\not=0$ for some $j\not=0$. Hence
$\mathfrak{g}_{j\delta}$ has a non-zero intesection with the center of
$[\mathfrak{g},\mathfrak{g}]$. Since $a(x)\in [\mathfrak{g},\mathfrak{g}]$ for each $x\in X$ this gives
$\langle\delta,a(x)\rangle=0$.
\end{proof}
\subsubsection{Hoyt's classification}
\label{sss:hoytclass}
Indecomposable contragredient Lie superalgebras with at least one simple isotropic root were classified in~\cite{Hoyt}.
In this subsection we review the results of C.~Hoyt classification
that will be used in the following sections.
Exactly one of the following options holds in this case:
\begin{enumerate}
\item $\dim \mathfrak{g}^\mathtt{C}<\infty$.
\item $\dim\mathfrak{g}^\mathtt{C}=\infty$ and $\mathit{Del}ta^{im}=\mathbb Z\delta$,
$\mathit{Del}ta\subset\mathbb Z\delta+\mathit{Del}ta'$
for some finite set $\mathit{Del}ta'\subset \mathfrak{h}^*$ and some $\delta\in\mathit{Del}ta^+$ \footnote{$\mathit{Del}ta^+_v$ depends on $v$; however, since $\delta$ is imaginary, it is positive or negative regardless of the choice of $v\in\mathcal{R}_0$.}. In this case all symmetrizable contragredient Lie
superalgebras are twisted affinizations of simple
finite-dimensional Lie superalgebras. They also appear in Van de Leur classification of symmetrizable Kac-Moody superalgebras of finite growth.
In addition, there is one-parameter contragredient superalgebra $S(2,1;a)$ and the twisted affinization $\mathfrak q(n)^{(2)}$ of the strange superalgebra
$\mathfrak{psq}(n)$ for $n\geq 3$. By direct inspection one can check that there exists $m\in\mathbb Z$ such that if $\alpha\in\mathit{Del}ta$ then
$\alpha\pm m\delta\in \mathit{Del}ta$.
\item The algebra $\mathfrak{g}^\mathtt{C}=Q^{\pm}(m,n,t)$ with $\dim(\mathfrak{h})=3$ where
$m,n,t$ are negative integers, not all equal to $-1$, with
non-symmetrizable and nondegenerate Cartan matrices. There are three
linearly independent principal roots, therefore the Weyl group has no non-zero fixed vectors in $\mathfrak{h}^*$.
Hence $Q^{\pm}(m,n,t)$ are of type {\rm(Ind)}. Little is known about Lie superalgebras of this type.
\end{enumerate}
\subsubsection{}
Let $\mathcal{R}_0$ be a component of $\mathcal{R}$ of type (2) in Hoyt's classification~\mathit{re}f{sss:hoytclass}. We will prove that it is of type {\rm(Aff)}.
\begin{lem}\label{lem_aff-iso} Let $F:=Q_\mathbb{R}^*$ and $\gamma\in F$ satisfy $\langle\gamma,\delta\rangle=1$ and $\langle\gamma,\beta\rangle\neq 0$ for any $\beta\in\mathit{Del}ta$. Then there exists $v\in\mathcal{R}_{0}$
such that $\langle\gamma,\alpha\rangle>0$ for any $\alpha\in\Sigma_v$.
\end{lem}
\begin{proof} Choose a vertex $u\in\mathcal{R}_0$. Let
$$T_u(\gamma)=\{\beta\in\mathit{Del}ta^+_u\mid \langle\gamma,\beta\rangle<0\}.$$
We claim that $T_u(\gamma)$ is finite. Indeed, since $\delta\in \mathit{Del}ta^+_u$ we have $\alpha+M\delta\in \mathit{Del}ta^+_u$ for sufficiently large $M$ and all $\alpha\in\mathit{Del}ta'$ while $\alpha-M\delta\notin \mathit{Del}ta^+_u$. On the other hand, if
we choose $$M>\max\{\langle\gamma,\alpha\rangle\mid\alpha\in\mathit{Del}ta'\},$$
then $\langle\gamma,\alpha+s\delta\rangle>0$ for all $s>M$. Thus,
$$T_u(\gamma)\subset \{\alpha+s\delta\mid \alpha\in\mathit{Del}ta', -M\leq s\leq M\}$$
and hence $T_u(\gamma)$ is finite.
Suppose that $u$ does not satisfy the conditions of the lemma. Then there is $x\in X$ such that $ \langle\gamma,b(x)\rangle<0$. Consider
$u\mathit{st}ackrel{r_x}{\longrightarrow}u'$. By Corollary ~\mathit{re}f{crlDeltare}(2) we get
$T_{u'}(\gamma)=T_u(\gamma)\setminus\{b(x)\}$ or $T_u(\gamma)\setminus\{b(x),2 b(x)\}$ if $2b(x)$ is a root. Anyway $|T_{u'}(\gamma)|<|T_u(\gamma)|$. Repeating the argument several times, we end up with a vertex $v$ such that
$T_v(\gamma)=\emptyset$.
\end{proof}
\begin{crl}\label{cor1_aff-iso} If $\mathcal{R}_0$ is of type (2), then $Q^{++}=\mathbb Z_{\geq 0}\delta$ and hence $\mathcal{R}_0$ is of type {\rm(Aff)}.
\end{crl}
\begin{proof} Let $$F_1:=\{\gamma\in F\mid \langle\gamma,\delta\rangle=1\},\quad S_\gamma^+=\{\nu\in Q\mid \langle\gamma,\delta\rangle\geq 0\}.$$
Then by Lemma ~\mathit{re}f{lem_aff-iso}
$$Q^{++}=\cap_{\gamma\in F_1}S_\gamma^+=\mathbb Z_{\geq 0}\delta.$$
\end{proof}
\subsection{}
Combining the results of~\cite{Hoyt} with~\mathit{re}f{Deltareisoempty}
we obtain the following result.
\begin{prp}
Let $\mathcal{R}_0$ be an indecomposable fully reflectable component.
\begin{enumerate}
\item
The following conditions are equivalent:
\begin{itemize}
\item
$\mathcal{R}_0$ of type {\rm(Fin)};
\item $W$ is finite;
\item
$\dim\mathfrak{g}<\infty$;
\item
$\dim\mathfrak{g}^\mathtt{C}<\infty$.
\end{itemize}
\item
The following conditions are equivalent:
\begin{itemize}
\item
$\mathcal{R}_0$ of type {\rm(Aff)};
\item $W$ is infinite and $\mathfrak{h}^*$ contains a non-zero trivial $W$-orbit.
\end{itemize}
\item
The following conditions are equivalent:
\begin{itemize}
\item
$\mathcal{R}_0$ of type {\rm(Ind)};
\item $\mathfrak{g}$ has an infinite Gelfand-Kirillov dimension.
\end{itemize}
\end{enumerate}
\end{prp}
\begin{rem}
Cartan matrices of components of type (Fin) are usually nondegenerate. The only exception
is $\mathfrak{g}l(n|n)$. Cartan matrices of type (Aff) are always degenerate, usually of corank one.
The only exception is $\mathfrak{s}l(n|n)^{(1)}$ where corank is two.
\end{rem}
\section{Symmetrizable root data}
\label{sect:sym}
\label{sectKacThm}
We retain the notation of Section~\mathit{re}f{sec:trichotomy}. We continue to assume that all $x\in X$ are reflectable at all $v\in\mathcal{R}_0$. In this section we prove, following a method of Gabber-Kac~\cite{GabberKac},
that if $\mathcal{R}_0$ has a symmetric Cartan matrix (and, therefore,
all Cartan matrices associated to $\mathcal{R}_0$
are symmetrizable) then $\mathfrak{g}^\mathtt{C}$ is the only root algebra, except
for the cases $\mathfrak{g}^\mathtt{C}=\mathfrak{g}l(1|1)$ and $(\rho|\delta)=0$ where $(-|-)$
is the nondegenerate symmetric bilinear form on $\mathfrak{h}^*$
introduced in~\mathit{re}f{prp:likekac22} and $\rho$ is as in~\mathit{re}f{sss:weylvector}.
Fix $v\in\mathcal{R}_0$, an admissible component of $\mathcal{R}$. We keep the notation
of Section~\mathit{re}f{sec:root} for the half-baked algebra
$\widetilde\mathfrak{g}=\widetilde\mathfrak{n}^-\mathrm{op}lus\mathfrak{h}\mathrm{op}lus\widetilde\mathfrak{n}^+$,
a root algebra $\mathfrak{g}$ and the contragredient algebra $\mathfrak{g}^\mathtt{C}=\widetilde\mathfrak{g}/\mathfrak{r}$.
We set $\widetilde{\mathfrak{b}}:=\widetilde{\mathfrak{n}}^+ +\mathfrak{h}$, its image $\mathfrak{b}$ in $\mathfrak{g}$ and $\mathfrak{r}^{\pm}:=\mathfrak{r}\cap\tilde{\mathfrak{n}}^{\pm}$. Note
that $\mathfrak{r}^{\pm}$ are ideals of $\widetilde{\mathfrak{g}}$.
\subsection{Verma modules}
Let $\widetilde{M}(\lambda)$ (resp., $M(\lambda)$, $M^\mathtt{C}(\lambda)$) denote
a Verma module of highest weight $\lambda$ over $\widetilde{\mathfrak{g}}$
(resp., $\mathfrak{g}$, $\mathfrak{g}^\mathtt{C}$). Since $\Omega(\widetilde{M}(\lambda))
\subset\lambda-Q^+$, the module
$\widetilde{M}(\lambda)$ admits a unique maximal proper submodule
$\widetilde{M}'(\lambda)$.
The Verma modules $\widetilde{M}(\lambda)$, $M(\lambda)$, $M^\mathtt{C}(\lambda)$
admit unique simple quotients.
\begin{lem}\label{lemMM}
One has
$$M(\lambda)=\mathcal{U}(\mathfrak{g})\otimes_{\mathcal{U}(\widetilde{\mathfrak{g}})} \widetilde{M}(\lambda).$$
\end{lem}
\qed
\subsection{Embedding of $\mathfrak{r}^-/[\mathfrak{r}^-,\mathfrak{r}^-]$}
The composition
$$\mathfrak{r}^-\hookrightarrow \widetilde{\mathfrak{g}}/\widetilde{\mathfrak{b}}\hookrightarrow
\mathcal{U}(\widetilde{\mathfrak{g}})/\mathcal{U}(\widetilde{\mathfrak{g}})\widetilde{\mathfrak{b}}=\widetilde{M}(0)$$
has the image in $\widetilde{M}'(0)=\bigoplus_{\alpha\in\Sigma}\widetilde{M}(-\alpha)$.
We denote by
\begin{equation}
\label{eq:phifromrf-}
\phi:\mathfrak{r}^-\to \bigoplus_{\alpha\in\Sigma}M^\mathtt{C}(-\alpha)
\end{equation}
the composition of this with the projection
$$
\bigoplus_{\alpha\in\Sigma}\widetilde{M}(-\alpha)\to
\bigoplus_{\alpha\in\Sigma}M^\mathtt{C}(-\alpha).
$$
\begin{prp}
\label{propK911}
The map $\phi$ defined above is a map of $\widetilde\mathfrak{g}$-modules with kernel
$[\mathfrak{r}^-,\mathfrak{r}^-]$.
\end{prp}
\begin{proof}
This result is the main part of the proof of Proposition 9.11 of~\cite{Kbook}.
\end{proof}
\subsubsection{Example}
If $\mathfrak{g}^\mathtt{C}=\mathfrak{s}l_2\times\mathfrak{s}l_2$ with $\Sigma=\{\alpha_1,\alpha_2\}$,
the image of $\phi$ in $M^\mathtt{C}(-\alpha_i)$ is equal to
$M^\mathtt{C}(-\alpha_1-\alpha_2)$.
\
Recall that $\mathfrak{g}^\mathtt{U}$ denotes the universal root algebra.
\begin{crl}\label{crlanotherideal}
Assume that $\bigoplus_{\alpha\in\Sigma} M^\mathtt{C}(-\alpha)$ has no nonzero
integrable subquotients. Then $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$.
\end{crl}
\begin{proof}
Let $\mathfrak{s}=\mathrm{Ker}(\widetilde\mathfrak{g}\to\mathfrak{g}^\mathtt{U})$. Set
$\mathfrak{s}^-:=\widetilde{\mathfrak{n}}^-\cap \mathfrak{s}$. Obviously, $\mathfrak{s}\subset\mathfrak{r}$ so
$\mathfrak{s}^-\subset\mathfrak{r}^-$.
Assume that $\mathfrak{r}^-/\mathfrak{s}^-\ne 0$. This Lie superalgebra is a semisimple
$\mathfrak{h}$-module with the weights belonging to $-Q^+\setminus\{0\}$.
This implies that it does not coincide with its commutator, that is,
that $\mathfrak{r}^-/(\mathfrak{s}^-+[\mathfrak{r}^-,\mathfrak{r}^-])\ne 0$. Since the adjoint representation
of $\mathfrak{g}$ is integrable, $\mathfrak{r}^-/(\mathfrak{s}^-+[\mathfrak{r}^-,\mathfrak{r}^-])$ is a nonzero
integrable $\mathfrak{g}$-module. Using \mathit{re}f{propK911} we get a nonzero
integrable subquotient in $\bigoplus_{\alpha\in\Sigma} M^\mathtt{C}(-\alpha)$
which contradicts the conditions.
Thus, $\mathfrak{s}^-=\mathfrak{r}^-$, so automatically
$\mathfrak{s}^+=\mathfrak{r}^+$ as the automorphisms $\theta$, see~\mathit{re}f{sss:automorphism}, defined on $\widetilde\mathfrak{g}$, $\mathfrak{g}^\mathtt{U}$ and $\mathfrak{g}^\mathtt{C}$, identifies
$\mathfrak{s}^+$ with $\mathfrak{s}^-$ and $\mathfrak{r}^+$ with $\mathfrak{r}^-$.
\end{proof}
\subsection{Main result}
In this subsection we assume that the Cartan matrix for $r$ is symmetric, i.e.
$$\forall x,y\in X\ \ \ \ \langle b(x),a(y)\rangle=\langle b(y),a(x)\rangle.$$
Note that by \mathit{re}f{lem:sym-stable} all Cartan matrices at $r'\in\mathcal{R}_0$ are symmetrizable.
By Proposition~\mathit{re}f{prp:likekac22} \ $\widetilde{\mathfrak{g}}$ admits an invariant
bilinear form such that the restriction of this form on $\mathfrak{h}$ is non-degenerate and
and $(a(x)|h)=\langle b(x),h\rangle$ for each $h\in\mathfrak{h}$.
\subsubsection{}
Let us show that $\mathfrak{r}$ coincides with the kernel of this form. Indeed,
since the kernel is an ideal and the restriction of $(-|-)$ on $\mathfrak{h}$ is
non-degenerate, the kernel lies in $\mathfrak{r}$.
Since $(\widetilde{\mathfrak{g}}_{\alpha}|\widetilde{\mathfrak{g}}_{\beta})=0$
for $\alpha+\beta\not=0$, one has $(\mathfrak{h}|\mathfrak{r})=0$. Thus
$$\mathfrak{r}^{\perp}:=\{g\in\widetilde{\mathfrak{g}}|\ (g|\mathfrak{r})=0\}$$
is an ideal containing $\mathfrak{h}$, so $\mathfrak{r}^{\perp}=\widetilde{\mathfrak{g}}$, that is
$\mathfrak{r}$ lies in the kernel of $(-|-)$~\footnote{For symmetrizable Kac-Moody algebras this was earlier noted in~\cite{SchV}.}.
Thus, the algebra $\mathfrak{g}^\mathtt{C}$ inherits a non-degenerate invariant
bilinear form having the properties listed in \mathit{re}f{prp:likekac22}.
\begin{thm}{}
\label{thm:symmetric-g-gkm}
Let $\mathcal{R}_0$ be symmetrizable and let $\mathfrak{g}$ be a root Lie superalgebra.
Then $\mathfrak{g}=\mathfrak{g}^\mathtt{C}$, except for the cases $\mathfrak{g}l(1|1)$ and {\rm(Aff)} with
$(\rho|\delta)=0$.
\end{thm}
\begin{proof}
Symmeric nondegenerate bilinear form of $\mathfrak{g}^\mathtt{C}$ allows one to define a {\em Casimir operator}, see~\cite{Kbook}, 2.5.
This operator acts on $M^\mathtt{C}(\lambda)$ by $(\lambda|\lambda+2\rho)\cdot\mathrm{id}$.
This implies
\begin{equation}
\label{Casimir}
[M^\mathtt{C}(\lambda):L^\mathtt{C}(\mu)]\not=0\Longrightarrow
(\lambda|\lambda+2\rho)=(\mu|\mu+2\rho).
\end{equation}
Assume that $\mathfrak{r}\not=\mathfrak{s}$.
By~\mathit{re}f{crlanotherideal}, for some
$\alpha\in\Sigma$ there is a non-zero homomorphism
$$\mathfrak{r}^-\to M^\mathtt{C}(-\alpha).$$
Hence $M^\mathtt{C}(-\alpha)$ admits an integrable subquotient $L^\mathtt{C}(\mu)$
for some $\mu$.
Since $L^\mathtt{C}(-\alpha)$ is a subquotient of $M^\mathtt{C}(0)$,
the formula (\mathit{re}f{Casimir}) gives
\begin{equation}
\label{rhonu}
(\mu|\mu+2\rho)=0.
\end{equation}
If $\mathcal{R}_0$ is of type (Fin) and not $\mathfrak{g}l(1|1)$ then $\mathfrak{r}=\mathfrak{s}$ by~\mathit{re}f{crlfin} (1).
Let us consider the case when $\mathcal{R}_0$ is of type (Aff). By~\mathit{re}f{crlfin} (2),
$\mu=j\delta$
for some $j\in\mathbb{Z}_{>0}$ and $\delta(h)=0$
for each $h\in\mathfrak{h}\cap [\mathfrak{g},\mathfrak{g}]$. Therefore
$(\delta|\alpha)=0$ for each $\alpha\in\Sigma$. This gives
$(\delta|\delta)=0$. Using~(\mathit{re}f{rhonu}), we get
$h^{\vee}_v=2(\rho|\delta)=0$.
It remains to consider the component $\mathcal{R}_0$ of type (Ind).
By~\cite{Hoyt}, the algebras $Q^{\pm}(m,n,t)$ are not symmetrizable.
The rest of indefinite types satisfy $\mathit{Del}ta_\mathit{iso}=\emptyset$. Then $a_{xx}\not=0$
for each $x\in X$ and $a_{xy}=a_{yx}$.
It is easy to see that we can choose $v\in\mathcal{R}_0$ in such a way that
$a_{xx}\in\mathbb{Z}_{>0}$. Then the integrability gives
$(\mu|\alpha)\geq 0$ for each $\alpha\in \Sigma$.
Since $-\mu\in Q^+$ and $\mu\not=0$, we obtain
$(\mu|\rho)<0, (\mu|\mu)<0$, a contradiction to (\mathit{re}f{rhonu}).
\end{proof}
\section{The affine case}
\label{sect:aff}
\subsection{}In this section we prove the following result.
\begin{thm}
\label{thm:UKM-ns}
Let $\mathcal{R}_0$ be an indecomposable component of type
(Aff).
If $\mathcal{R}_0$ is of type $A(n-1|n-1)^{(1)}$ (resp., $A(2n-1|2n-1)^{(2)}$,
$A(2n|2n)^{(4)}$), then
$\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(n|n)^{(1)}$ (resp., $\mathfrak{g}^\mathtt{U}=\mathfrak{sl}(2n|2n)^{(2)}$,
$\mathfrak{sl}(2n+1|2n+1)^{(4)}$).
If $\mathcal{R}_0$ is of type $\mathfrak q(n)^{(2)}$ then $\mathfrak{g}^\mathtt{U}=\mathfrak{sq}(n)^{(2)}$.
In the rest of the cases $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$.
\end{thm}
Let us first notice that for $S(2,1,b)$ Lemma~\mathit{re}f{crlaff} (3) and
\mathit{re}f{sss:s21b}
imply $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$.
In all other cases we define for any root algebra $\mathfrak{g}$ its subfactor
$\bar{\mathfrak{g}}:=[\mathfrak{g},\mathfrak{g}]/Z(\mathfrak{g})$.
Then
$\bar{\mathfrak{g}}^{\mathtt{C}}=[\mathfrak{g}^\mathtt{C},\mathfrak{g}^\mathtt{C}]/Z(\mathfrak{g}^\mathtt{C})$ is isomorphic to the twisted loop algebra $\mathcal{L}(\mathfrak{s})^{\sigma}$ for some simple superalgebra $\mathfrak{s}$ and an automorphism $\sigma$ of finite
order $m$. In particular, $\bar\mathfrak{g}^\mathtt{C}$ is perfect.
The superalgebra $\mathfrak{s}$ is basic classical, exceptional or $\mathfrak{p}\mathfrak{s}\mathfrak q_n$.
Its even part $\mathfrak{s}_{\bar 0}$, therefore, is a reductive Lie algebra.
Let $\mathfrak{h}'$ be the even part of the Cartan subalgebra of $\mathfrak{s}$. One can choose $\sigma$ so that $\sigma(\mathfrak{h}')=\mathfrak{h}'$.
Furthermore, if $k\delta$ is an even root and $\varepsilon=e^{\mathfrak{r}ac{2\pi i}{m}}$ then
$$\bar{\mathfrak{g}}^\mathtt{C}_{k\delta}=\{h\otimes t^k\mid h\in\mathfrak{h}',\sigma(h)=\varepsilon^k h\}.$$
The cohomology group $H^i(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)$ has a natural structure of $\mathfrak{h}$-module. We write
$H^i(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_\mu$ for the cohomology group of weight $\mu$ with
respect to $\mathfrak{h}$-action.
\begin{lem}\label{lem:extension} For every $k\neq 0$
$$\dim\mathfrak{g}^{\mathtt{U}}_{k\delta}-\dim\mathfrak{g}^{\mathtt{C}}_{k\delta}=\dim H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{k\delta}.$$
\end{lem}
\begin{proof} Let $\hat\mathfrak{g}$ be the graded central extension of $\bar{\mathfrak{g}}^{\mathtt{C}}$ given by the exact sequence
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \hat\mathfrak{g}\to \bar{\mathfrak{g}}^{\mathtt{C}}\to 0.$$
Take the pullback
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \hat\mathfrak{g}'\to [{\mathfrak{g}}^{\mathtt{C}},{\mathfrak{g}}^{\mathtt{C}}]\to 0,$$
and then extend to the exact sequence
$$0\to \bigoplus_{k\neq 0}H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)^*_{k\delta}\to \mathfrak{g}\to {\mathfrak{g}}^{\mathtt{C}}\to 0$$
using the semidirect product decomposition
$\mathfrak{g}^{\mathtt{C}}=\mathfrak{t}\ltimes [{\mathfrak{g}}^{\mathtt{C}},{\mathfrak{g}}^{\mathtt{C}}]$ where $\mathfrak{t}\subset\mathfrak{h}$ is a suitable abelian subalgebra.
We claim that $\mathfrak{g}$ is a root algebra. Indeed, we just have to check the relations \mathit{re}f{sss:half} at every vertex $v\in\mathcal{R}_0$. The only
non-trivial relation is $[\tilde e_x,\tilde f_y]=0$ for $x\neq y$. This is equivalent to $b(x)-b(y)\neq k\delta$ and the latter follows
from $k\delta\in Q^+(v)$ for positive $k$ and $k\delta\in -Q^+(v)$ for negative $k$.
Finally, let us prove that $\mathfrak{g}=\mathfrak{g}^{\mathtt{U}}$. Indeed, by ~\mathit{re}f{crlaff} the kernel $\mathfrak{k}$ of the map $\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}$ lies in the center of $[\mathfrak{g}^{\mathtt{U}},\mathfrak{g}^{\mathtt{U}}]$
and is a direct sum $\bigoplus_{k\neq 0}\mathfrak{k}_{k\delta}$. Therefore $\mathfrak{g}^{\mathtt{U}}=\mathfrak{g}$.
\end{proof}
\subsubsection{}
Let $\delta$ have degree $d$ in the standard grading of $\mathcal{L}(\mathfrak{s})^{\sigma}$.
The base change $H^2(\mathfrak{s},\mathbb{C})\to H^2(\mathcal{L}(\mathfrak{s}),\mathbb{C}[t,t^{-1}])$ composed
with the linear map $\mathbb{C}[t,t^{-1}]\to\mathbb{C}$ carrying $\sum c_it^i$ to $c_{kd}$, yields a homomorphism
\begin{equation}
\label{eq:h2map}
H^2(\mathfrak{s},\mathbb{C})\to H^2(\mathcal{L}(\mathfrak{s})^\sigma,\mathbb{C}).
\end{equation}
It is given on $2$-cocycles by the
formula
\begin{equation}
\label{eq:2cocycles}
\tilde c(x\otimes t^a,y\otimes t^b)=\delta_{kd,a+b}c(x,y).
\end{equation}
Let $\bar\mathfrak{g}^{\mathtt{C}}=\mathcal{L}(\mathfrak{s})^\sigma$ and $\mathfrak{h}$ be a Cartan subalgebras of $\mathfrak{g}^{\mathtt{C}}$. Set
$\mathfrak{h}^{\circ}:=\ker\delta$. Then $\mathfrak{h}^\circ$ acts on $\mathfrak{s}$ and therefore on $H^2(\mathfrak{s},\mathbb{C})$. We denote by $H^2(\mathfrak{s},\mathbb{C})^{\circ}$ the
$\mathfrak{h}^{\circ}$-invariant subspace.\footnote{In most cases $\mathfrak{h}^\circ=(\mathfrak{h}')^\sigma$ and $H^2(\mathfrak{s},\mathbb{C})^{\circ}=H^2(\mathfrak{s},\mathbb{C})$.
The only case $\mathfrak{h}^\circ\neq(\mathfrak{h}')^\sigma$ is when the Cartan matrix of $\mathfrak{g}^{\mathtt{C}}$ has corank $2$ and that happens for $\mathfrak{s}=\mathfrak{psl}(n|n)$,
$n\geq 2$ and $\sigma=\mathrm{id}$.} The automorphism $\sigma$ acts on
$H^2(\mathfrak{s},\mathbb C)^{\circ}$ and induces a $\mathbb Z/m\mathbb Z$-grading.
\begin{lem}\label{lem:redfindim} If $k\delta$ is an even root and $kd\mathit{eq}uiv p\mod m$ then the homomorphism (\mathit{re}f{eq:h2map}) induces an isomorphism
$H^2(\mathfrak{s},\mathbb C)^{\circ}_{p}\simeq H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$.
\end{lem}
\begin{proof} The correspondence between the weight spaces follows from formula (\mathit{re}f{eq:2cocycles}). Injectivity of the map is straightforward. To prove surjectivity it suffices to show that every class in
$H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$ is represented by a cocycle $\varphi$ such that
\begin{equation}
\label{eq:goodcocycle}
\varphi(x\otimes t^{a-m},y\otimes t^{b+m})=\varphi(x\otimes t^{a},y\otimes t^{b})
\end{equation}
for all $a,b\in\mathbb{Z}$ and $x,y\in\mathfrak{s}$.
The Lie algebra $\mathfrak{s}'=[\mathfrak{s}_{\bar 0},\mathfrak{s}_{\bar 0}]$ is semisimple.
The corresponding twised affine Lie algebra $\hat\mathfrak{s}'$ is symmetrizable and, therefore, $(\hat\mathfrak{s}')^\mathtt{U}=(\hat\mathfrak{s}')^\mathtt{C}$.
By Lemma~\mathit{re}f{lem:extension} $H^2(\mathcal{L}(\mathfrak{s}')^\sigma,\mathbb C)_{k\delta}=0$. On the other hand $\mathcal{L}(\mathfrak{s})_{\bar 0}^\sigma=\mathcal{L}(\mathfrak{s}')^\sigma\mathrm{op}lus \mathfrak{a}$ for some abelian Lie algebra
$\mathfrak{a}$. Thus, we can choose $\varphi$ so that
$\varphi(\mathcal{L}(\mathfrak{s}')^\sigma,\mathcal{L}(\mathfrak{s})^\sigma_{\bar 0})=0$.
Since $k\delta$ is an even root, $\varphi$ is an even cocycle, so
$\varphi(\mathcal{L}(\mathfrak{s}')^\sigma,\mathcal{L}(\mathfrak{s})^\sigma)=0$.
In particular, for every $h\in(\mathfrak{h}'\cap\mathfrak{s}')^\sigma$ we have $\varphi(h\otimes t^m,\mathcal{L}(\mathfrak{s})^\sigma)=0$. Let $\alpha$ be a non-zero weight of
$\mathfrak{s}$ with respect to
$(\mathfrak{h}'\cap\mathfrak{s}')^\sigma$ and $x\in \mathfrak{s}_\alpha, y\in \mathfrak{s}_{-\alpha}$, we can choose
$h$ so that $\alpha(h)\ne 0$. Then the cocycle condition
$$d\varphi(x\otimes t^{a-m},y\otimes t^{b},h\otimes t^m)=0$$
implies (\mathit{re}f{eq:goodcocycle}) for $x\in \mathfrak{s}_\alpha, y\in \mathfrak{s}_{-\alpha}$. Since the $\mathfrak{s}_{\alpha}$ for all nonzero weights $\alpha$ generate $\mathfrak{s}$
and $\varphi(x\otimes t^a,y\otimes t^b)=0$ for $x\in\mathfrak{s}_\alpha$
and $y\in\mathfrak{s}_\beta$ with $\alpha+\beta\ne 0$,
one proves the desired identity for all $x,y$ using linearity and the cocycle condition.
\end{proof}
Lemma~\mathit{re}f{lem:redfindim} implies Theorem~\mathit{re}f{thm:UKM-ns} in all
cases when $\delta$ is an even root. If $\mathfrak{s}\ne\mathfrak{psl}(n|n)$ or $\mathfrak{p}\mathfrak{s}\mathfrak q(n)$, $H^2(\mathfrak{s},\mathbb C)=0$ and then $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^\mathtt{C}$.
If $\mathfrak{s}=\mathfrak{psl}(n|n)$ or $\mathfrak{p}\mathfrak{s}\mathfrak q(n)$,
$H^2(\mathfrak{s},\mathbb C)^{\circ}=\mathbb C$, see, for instance, \cite{S4}. This gives the cases
$\mathfrak{g}^\mathtt{U}=\mathfrak{s}l(n|n)^{(1)}$ and $\mathfrak{g}^\mathtt{U}=\mathfrak{s}l(2n|2n)^{(2)}$.
The only cases left are $\mathfrak{g}^\mathtt{C}=\mathfrak{p}\mathfrak{s}l(2n+1|2n+1)^{(4)}$ and $\mathfrak{p}\mathfrak{s}\mathfrak q(n)^{(2)}$ where $\delta$ is an odd root.
For these remaining cases the theorem will follow from the lemma below.
\begin{lem}\label{lem:oddextensions} If $\mathcal{R}_0$ is of type $A(2n|2n)^{(4)}$ or $\mathfrak q(n)^{(2)}$ then
$H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{k\delta}=0$ for any odd $k$.
\end{lem}
\begin{proof} First let us deal with $A(2n|2n)^{(4)}$. In this case $\mathfrak{s}=\mathfrak{psl}(2n+1|2n+1)$, $m=4$ and we can choose $\sigma$ so that
$\mathfrak{s}^{\sigma}=\mathfrak{so}(2n+1)\mathrm{op}lus\mathfrak{so}(2n+1)$. We will establish
an isomorphism $H^2(\mathfrak{s},\mathbb C)^\circ_{p}\simeq H^2(\mathcal{L}(\mathfrak{s})^{\sigma},\mathbb C)_{k\delta}$ for odd $k$. As in the proof of
Lemma ~\mathit{re}f{lem:redfindim}, it suffices to check that we can choose a cocycle $\varphi$ satisfying (\mathit{re}f{eq:goodcocycle}). This in
turn would follow from the condition $\varphi(h\otimes t^4,\mathcal{L}(\mathfrak{s})^\sigma)=0$ for all $h\in(\mathfrak{h}')^{\sigma}$. Using the root
description,~\cite{vdL}, we see that $\alpha$ and
$-\alpha+k\delta$ are both real roots of $\bar{\mathfrak{g}}^{\mathtt{C}}$ only for the short anisotropic $\alpha$. Thus, if $x\in\bar{\mathfrak{g}}^{\mathtt{C}}_\beta$ for
some long anisotropic root $\beta$ then $\varphi(x,\bar{\mathfrak{g}}^{\mathtt{C}})=0$. On the other hand, every $h\otimes t^4$ can be obtained as a linear
combination of $[x,y]$, $x\in\bar{\mathfrak{g}}^{\mathtt{C}}_{\beta}$ and $y\in\bar{\mathfrak{g}}^{\mathtt{C}}_{-\beta+4\delta}$ for some long anisotropic roots $\beta$. Therefore
$\varphi(h\otimes t^4,\mathcal{L}(\mathfrak{s})^\sigma)=0$ for all $h\in(\mathfrak{h}')^{\sigma}$. The statement of lemma now follows from
$H^2(\mathfrak{s})_{\bar 1}=0$.
In the case of $\mathfrak q(n)^{(2)}$ we have a grading $\bar{\mathfrak{g}}^{\mathtt{C}}=\bigoplus\bar{\mathfrak{g}}^{\mathtt{C}}_i$ induced by the standard grading on Laurent polynomials, with
$\bar{\mathfrak{g}}^{\mathtt{C}}_0=\mathfrak{sl}(n)$. For every $i$ the term $\bar{\mathfrak{g}}^{\mathtt{C}}_i$ is the adjoint $\bar{\mathfrak{g}}^{\mathtt{C}}_0$-module. The parity of
$\bar{\mathfrak{g}}^{\mathtt{C}}_i$ equals the parity of $i$. Let $s=2k+1$. To compute $H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}$ we consider the first layer
of Hochshild-Serre spectral sequence (see, for instance, \cite{F}, Sect. 5) with respect to subalgebra $\bar{\mathfrak{g}}^{\mathtt{C}}_0$:
$$H^2(\bar{\mathfrak{g}}^{\mathtt{C}}_0,\mathbb C)\mathrm{op}lus H^1(\bar{\mathfrak{g}}^{\mathtt{C}}_0,(\bar{\mathfrak{g}}^{\mathtt{C}}_s)^*)\mathrm{op}lus
H^0(\bar{\mathfrak{g}}_0^{\mathtt{C}},\mathrm{op}lus_{a+b=s} (\bar{\mathfrak{g}}^{\mathtt{C}}_a\otimes\bar{\mathfrak{g}}^{\mathtt{C}}_b)^*).$$
Since $H^2(\bar{\mathfrak{g}}^{\mathtt{C}}_0,\mathbb C)=0$, $H^1(\bar{\mathfrak{g}}^{\mathtt{C}}_0,(\bar{\mathfrak{g}}^{\mathtt{C}}_s)^*)=0$ and
$H^0(\bar{\mathfrak{g}}_0^{\mathtt{C}},(\bar{\mathfrak{g}}^{\mathtt{C}}_a\otimes\bar{\mathfrak{g}}^{\mathtt{C}}_b)^*)=\mathbb C$ we obtain that every cocycle
$c\in H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}$ can be written in the form
$$c(x\otimes t^a,y\otimes t^b)=\gamma(a,b)\mathrm{op}eratorname{tr} (xy),\ \gamma:\mathbb{Z}\times\mathbb{Z}\to\mathbb{C}.$$
Furthermore $\gamma$ has the following properties
\begin{itemize}
\item weight condition: $\gamma(a,b)=0$ unless $a+b=s$;
\item skew-symmetry: $\gamma (a,b)=-\gamma(b,a)$;
\item $\gamma(0,s)=0$;
\item cocycle condition: $\gamma(a,b+c)=\gamma(a+b,c)-\gamma(b,a+c)$.
\end{itemize}
The last condition follows by direct computation using the property of the trace
$\mathrm{op}eratorname{tr}(uvw)=\mathrm{op}eratorname{tr}(vwu)$.
Without loss of generality assume that $s>0$. By the cocycle condition and
skew-symmetry
$$\gamma(p,s-p)=\gamma(p,s-p+1-1)=\gamma(s+1,-1)+\gamma(p-1,s-p+1).$$
By induction
$$\gamma(p,s-p)=p\gamma(s+1,-1)+\gamma(0,s)=p\gamma(s+1,-1).$$
Hence $0=\gamma(s,0)=s\gamma(s+1,-1)$
that implies $\gamma(s+1,-1)=0$. Therefore $\gamma\mathit{eq}uiv 0$. Thus, $H^2(\bar{\mathfrak{g}}^{\mathtt{C}},\mathbb C)_{s\delta}=0$.
\end{proof}
\section{Description of root algebras. Examples}
\label{sec:app}
In Subsection \mathit{re}f{rootalg} we describe root algebras in the indecomposable fully reflectable case. In the rest of this section
we compute some of the groups $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
In this section we identify admissibile components of $\mathcal{R}$ by
root Lie superalgebras supported on them.
\subsection{}\label{rootalg} By contrast with the case $\mathfrak{gl}(1|1)$, see~\mathit{re}f{rank1},
we have the following
\begin{thm} Let $\mathcal{R}_0$ be a indecomposable admissible fully reflectable component of the root groupoid, not isomorphic to $\mathfrak{gl}(1|1)$.
Then any ideal of $\mathfrak{g}^\mathtt{U}$ having zero intersection with $\mathfrak{h}$
defines a root algebra. If $\mathcal{R}_0$ is of type (Aff) and $\mathfrak{g}^{\mathtt{U}}\neq \mathfrak{g}^{\mathtt{C}}$ then all such ideals are in natural bijection with subsets of $\mathbb Z\setminus 0$.
\end{thm}
\begin{proof} By~\mathit{re}f{crl:invariantideals} we need to consider only components with isotropic reflexions. Furthermore, we are only interested in the case $\mathtt{Sp}^D(v)\neq \{1\}$
and $\mathfrak{g}^{\mathtt{C}}\neq \mathfrak{g}^{\mathtt{U}}$. By~\mathit{re}f{crlfin} and~\mathit{re}f{Qpm} (see below)
this leaves us with components of type (Aff) listed in~\mathit{re}f{thm:UKM-ns}. Let $\mathfrak{g}$ be a root algebra and
$$J^{\mathtt{C}}:=\mathrm{Ker}(\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}^{\mathtt{C}}),\ \ \ J:=\mathrm{Ker}(\mathfrak{g}^{\mathtt{U}}\to\mathfrak{g}).$$
By~\mathit{re}f{thm:UKM-ns} we have
$$J^{\mathtt{C}}=\bigoplus_{s\in\mathbb Z\setminus 0} J^{\mathtt{C}}_{s\delta},\ \ \ \dim J^{\mathtt{C}}_{s\delta}\leq 1.$$
It follows from the definition of $Q^{++}$ that it is $\mathrm{op}eratorname{Aut}_{\mathcal{R}}(v)$-stable. Since $Q^{++}=\mathbb Z_{\geq 0}\delta$ and $ J^{\mathtt{C}}$ is
$\mathrm{op}eratorname{Aut}_{\mathcal{R}}(v)$-stable we obtain that
$ J^{\mathtt{C}}_{s\delta}$ is $\mathrm{op}eratorname{Aut}_{\mathcal{R}}(v)$-stable for any $s$. Therefore any graded subspace of $ J^{\mathtt{C}}$ is $\mathrm{op}eratorname{Aut}_{\mathcal{R}}(v)$-stable. Moreover, by~\mathit{re}f{crlaff} (2)
any graded subspace of $ J^{\mathtt{C}}$ is an ideal. Hence by~\mathit{re}f{sss:invariantideals} the root algebras are in bijection with the graded subspaces of $J^{\mathtt{C}}$.
The last assertion follows from the description of $\mathfrak{g}^{\mathtt{U}}$ given in~\mathit{re}f{thm:UKM-ns}.
\end{proof}
\begin{rem} Note that by above theorem a root algebra may not admit a superinvolution $\theta$ defined in~\mathit{re}f{sss:automorphism}.
\end{rem}
\subsection{Star-shaped spines} Here we calculate the automorphism groups in a few small
examples.
\subsubsection{Example}\label{q32}
The following root datum contains root algebra $q(3)^{(2)}$.
Take $X=\{x_1,x_2,x_3\}$ and let $\mathfrak{h}=\mathfrak{h}(v)$ have dimension 4
with the Cartan matrix
$$\begin{pmatrix}
0 & -1 & 1\\
-1 & 0 & 1\\
1 & -1& 0
\end{pmatrix},\ \ \ p(x_i)=1\ \text{ for } i=1,2,3.$$
Then the graph $\mathtt{Sp}(v)$ is a star with $v$ at the center and three other vertices $v_i$ with
$r_{x_i}:v\to v_i$ and the Cartan matrices
$$v_1:\ \begin{pmatrix}
0 & -1 & 1\\
1 & -2 & 1\\
-1 & -1& 2
\end{pmatrix}
\ \ \
v_2:\ \begin{pmatrix}
-2 & 1 & 1\\
1 & 0 & -1\\
1 & 1& -2
\end{pmatrix}
\ \ \
v_3: \begin{pmatrix}
2 & -1 & -1\\
-1 & 2 & -1\\
-1 & 1& 0
\end{pmatrix}$$
with $p_{v_j}(x_i)=\delta_{ij}$. We have three principal
reflections $s_{\alpha_k}$, where
$$\alpha_k:=b(x_i)+b(x_j)=b_{v_i}(x_j)=b_{v_j}(x_i)$$
for $\{i,j,k\}=\{1,2,3\}$. The Weyl group is generated
by these reflections (this group is isomorphic to the affine Weyl group $A_2^{(1)})$. The group $K(v)$ is the additive group $\mathbb{C}$. If we choose
$\mathfrak{h}$ of dimension greater than $4$, the Weyl group will remain the same, but $K(v)$ will be different. Regardless of $\mathfrak{h}$, $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=
W(v)\times K(v)$ by~\mathit{re}f{crl:all-different}.
\subsubsection{Example: $B(1|1)^{(1)}$, $D(2|1,a)$, $D(2|1,a)^{(1)}$, $Q^{\pm}(m,n,t)$}\label{Qpm} All these cases are similar to~\mathit{re}f{q32}. We can
(and will) choose a vertex $v$ such that
$p(x)=1$ for all $x\in X$. We always have $a_{xy}\not=0$ if $x\not=y$.
The graph $\mathtt{Sp}(v)$ is a star with the center at $v$. The other
vertices are $v_x$ with the edges $r_{x}: v\to v_x$.
If $a_{xx}=0$ then $p'(y)=0$ for each $y\not=x$. Hence $\mathtt{Sp}(v)$ consists of $v$ and all
$v_x$ such that $a_{xx}=0$. Cartan data at all vertices of $\mathtt{Sp}(v)$ are not
$D$-equivalent, so~\mathit{re}f{crl:all-different} is applicable.
This gives $\mathrm{op}eratorname{Aut}(v)=W\times K$.
\subsection{$\mathfrak{s}l_n^{(1)}$, its relatives and friends}
There is a number of components of the root groupoid whose Cartan matrices satisfy
common properties listed below in (\mathit{re}f{aij01}) and whose automorphism groups allow
a more or less uniform description. We call them ``relatives and friends of $\mathfrak{s}l_n^{(1)}$''
and they consist of the types
$\mathfrak{s}l(k|\ell)^{(1)}$ for $k,\ell$ such that
$k+\ell=n$ and $\mathfrak q_n^{(2)}$.
We take $X=\{x_i\}_{i\in\mathbb{Z}_n}$.
Let $v\in\mathcal{R}_0$ be a vertex with the Cartan matrix of the following form:
\begin{equation}\label{aij01}
\begin{array}{ll}
a_{ij}=0\ \text{ for }j\not=i,i\pm 1;\\
a_{i,i\pm 1}\in \{\pm 1\},\ \ \
a_{i,i-1}+a_{ii}+a_{i,i+1}=0\\
p(x_i)=1\ \ \Longleftrightarrow\ \ a_{ii}=0.
\end{array}\end{equation}
\subsubsection{}\label{goodmatrices}
It is easy to check that
\begin{itemize}
\item
If a Cartan matrix satisfies~(\mathit{re}f{aij01}), then all $x_i$ are reflectable
at $v$ and $\sum_i b_{v}(x_i)=\sum_i b_{v'}(x_i)$ for each reflexion
$v\to v'$;
\item
all Cartan matrices in $\mathtt{Sk}(v)$ satisfy~(\mathit{re}f{aij01});
\item two Cartan matrices $A,A'$ satisfying~(\mathit{re}f{aij01}) are $D$-equivalent
if and only if $p(x_i)=p'(x_i)$ for all $i$.
\end{itemize}
\subsubsection{}
\label{sss:iota}
Let $\overline{\mathcal{R}}_0$ be the component of $\mathcal{R}$ corresponding to $\mathfrak{s}l_n^{(1)}$;
we will use bar notation $\bar v$ etc. for the objects connected to $\overline\mathcal{R}_0$.
Fix a linear isomorphism $\iota: Q_{\bar v}\mathit{st}ackrel{\sim}{\to} Q_{v}$
given by $\iota({b}_{\bar v}(x_i)):=b_v(x_i)$.
Let $v\to v'$ be a path in $\mathcal{R}_0$ and $\bar v\to \bar v'$ be its namesake
in $\overline{\mathcal{R}}_0$. It is easy to see that
$$b_v(x_i)=\iota({b}_{\overline{v}}(x_i)).$$
This provides a bijection between the sets of real roots $\mathit{Del}ta^\mathit{re}=\overline{\mathit{Del}ta}^\mathit{re}$.
Note that all roots of $\overline\mathit{Del}ta^\mathit{re}$ are anisotropic.
Since the set $\{b_v(x_i)\}_{i\in\mathbb{Z}_n}$ determines a vertex in $\mathtt{Sk}(v)$
by~\mathit{re}f{crl:unique-in-sk},
this gives a bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$.
\subsubsection{}
We identify $Q_{v}$ and $Q_{\bar v}$ via $\iota$.
By~\mathit{re}f{crl:Wfree} the Weyl group $W(\mathfrak{s}l_n^{(1)})$ acts freely on $\mathtt{Sk}(\bar v)$.
By~\mathit{re}f{lem:decomposition0} this action is transitive.
This gives a simply transitive action of $W(\mathfrak{s}l_n^{(1)})$ on $\mathtt{Sk}(v)$.
Note that the Weyl group $W$ can be identified with a subgroup of $W(\mathfrak{s}l_n^{(1)})$ as it is generated by a part of the reflections
belonging to $W(\mathfrak{s}l_n^{(1)})$.
Let us compute
$$\mathrm{op}eratorname{Aut}(v)/K(v)=\mathtt{Sk}^D(v)=\{w\in W(\mathfrak{s}l_n^{(1)})|\ \
A_{w(v)}\ \text{ is $D$-equivalent to }A_v\}.$$
\subsubsection{Action of $W(\mathfrak{s}l_n^{(1)})$}
By~\mathit{re}f{goodmatrices}, the vector
$$\delta:=\sum_{i=1}^n b_{v'}(x_i)$$
does not depend on the choice of $v'\in\mathtt{Sk}(v)$.
View $Q_{v}$ as a subset of $V=\mathtt{Sp}an_\mathbb{Z}(\varepsilon_1,\dots,\varepsilon_n,\delta)$ by setting
$$b(x_i)=\varepsilon_i-\varepsilon_{i+1}\ \text{ for }i=1,\ldots,n-1; \ \
b(x_n)=\delta+\varepsilon_n-\varepsilon_1.$$
We can extend the parity function $p: Q_v\to \mathbb{Z}_2$ to $p: V\to \mathbb{Z}_2$ by setting $p(\varepsilon_1)=0$. Set
$$\bar Q:=\{\sum_{i=1}^n k_i\varepsilon_i|\ \sum_{i=1}^n k_i=0,\ k_i\in\mathbb{Z}\}.$$
(Note: $\bar Q$ is the lattice for the finite root system $A_{n-1}$.)
By \cite{Kbook}, Thm. 6.5, $W(\mathfrak{s}l_n^{(1)})=S_n\ltimes \bar Q$ and this group acts on $V$
as follows:
\begin{itemize}
\item
$S_n$ acts on $\{\varepsilon_i\}_{i=1}^n$ by permutations and stabilizes $\delta$;
\item $\bar Q$
acts on $V$ by the formula
$$\nu*\mu:=\mu-(\mu,\nu)\delta\ \ \text{ for }\nu\in \bar Q,\ \mu\in V$$
where the bilinear form on $V$ is given by
$$(\varepsilon_i,\varepsilon_j)=\delta_{ij},\ \ \ (\varepsilon_i,\delta)=(\delta,\delta)=0.$$
\end{itemize}
Note that $W(\mathfrak{s}l_n^{(1)})$ stabilizes $\delta$.
By~\mathit{re}f{goodmatrices}, $A_{w(v)}$ is $D$-equivalent to $A_v$ if and only if
$p_v(x_i)=p_{w(v)}(x_i)$ for all $i$.
Therefore,
\begin{equation}
\label{eq:skd=}
w\in\mathtt{Sk}^D(v)\ \Longleftrightarrow\ \
p(w\varepsilon_i)-p(\varepsilon_i)\ \text{ is independent of $i$}.
\end{equation}
We will now compute the groups $\mathtt{Sk}^D(v)$ using the formula (\mathit{re}f{eq:skd=}).
\subsubsection{Case $\mathfrak{s}l(k|\ell)^{(1)}$, $k,\ell\not=0$}
We can choose $v$ in such a way that
$p(x_i)=0$ for $i\not=k,n$ and $p(x_n)=p(x_k)=1$. Note that $p(\delta)=0$.
Denote by $S_k\subset S_n$ (resp., $S_\ell\subset S_n$)
the group of permutations of $\{\varepsilon_i\}_{i=1}^k$ (resp., of $\{\varepsilon_i\}_{i=k+1}^n$).
In this case $p(w\varepsilon_i)=p(\varepsilon_i)$ for $w\in \bar Q$, so $\mathtt{Sk}^D(v)\supset \bar Q$.
One has
$$S_n\cap\mathtt{Sk}^D(v)=\{w\in S_n|\ p'(w(\varepsilon_i-\varepsilon_{i+1}))=p'(\varepsilon_i-\varepsilon_{i+1})\ \text{ for }i=1,\ldots,n-1\}.$$
If $k\ne\ell$ this gives $S_n\cap\mathtt{Sk}^D(v)=S_k\times S_{\ell}$. In the case $k=\ell$
we have \newline
$S_n\cap\mathtt{Sk}^D(v)=(S_k\times S_k)\rtimes\mathbb{Z}_2$, where
$\mathbb{Z}_2$ interchanges the two copies of $S_k$.
Hence
$$\mathtt{Sk}^D(v)=\left\{ \begin{array}{ll}
(S_k\times S_{\ell})\ltimes \bar Q\ & \text{ if }k\not=\ell\\
((S_k\times S_k)\rtimes\mathbb{Z}_2)\ltimes \bar Q\ & \text{ if }k=\ell.\end{array}
\right.$$
Note that the Weyl group has the form
$W=(S_k\times S_{\ell})\ltimes Q_0$
where $Q_0\subset\bar Q$ is the subgroup spanned
$\{\varepsilon_i-\varepsilon_{i+1}\}_{i=1}^{k-1}\coprod \{\varepsilon_i-\varepsilon_{i+1}\}_{i=k+1}^{n-1}$. Observe that $W$ has an infinite index in $\mathtt{Sk}^D(v)$.
\begin{rem}
\label{sss:glmn}
For $\mathcal{R}_0$ of type $A(k-1|\ell-1)$ a
similar reasoning (replacing the index set
$X=\{x_i\}_{i\in\mathbb{Z}_n}$ with the set
$X=\{x_1,\ldots, x_n\}$)
shows that $S_{k+\ell}$ acts transitively on $\mathtt{Sk}(v)$ and that
$$\mathtt{Sk}^D(v)=\left\{ \begin{array}{ll}
S_k\times S_{\ell} & \text{ if }k\not=\ell\\
(S_k\times S_k)\rtimes\mathbb{Z}_2 & \text{ if }k=\ell.\end{array}
\right.$$
Note that the Weyl group is in both cases $S_k\times S_{\ell}$.
\end{rem}
If $k=l$ then $K(v)=\mathbb{C}$ and $\mathrm{op}eratorname{Aut}(v)$ is a nontrivial semidirect
product of $\mathbb{C}$ and $\mathtt{Sk}^D(v)$.
\subsubsection{Case $\mathfrak q_n^{(2)}$}
Using~\cite{Kbook}, Thm. 6.5 and \cite{S3}, one gets
$$W=S_n\ltimes 2\bar Q.$$
We will choose $v$ so that
$p(x_i)=0$ for $i=1,\ldots,n-1$ and $p(x_n)=1$. Note that $p(\delta)=1$.
In this case $p(w\varepsilon_i)=p(\varepsilon_i)$ for $w\in S_n$, so $S_n\subset\mathtt{Sk}^D(v)$.
Hence
$$\mathtt{Sk}^D(v)=S_n\ltimes Q'$$
where $Q'=\bar Q\cap \mathtt{Sk}^D(v)$.
Take $\nu\in\bar Q$. One has
$$p(\nu*\varepsilon_i)-p(\varepsilon_i)\mathit{eq}uiv (\nu,\varepsilon_i)\mod 2,$$
so
$$Q'=\{\sum_{i=1}^n k_i\varepsilon_i|\ \sum_{i=1}^n k_i=0,\ k_i\in\mathbb{Z},
k_i-k_j\mathit{eq}uiv 0\mod 2\}.$$
If $n$ is odd this gives
$Q'=2\bar Q$, so $\mathtt{Sk}^D(v)=W$ and $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)=W\times K$.
If $n$ is even, $2\bar Q$ has index $2$ in $Q'$. Thus $W$ has index two in $\mathtt{Sk}^D(v)$,
so that $W\times K$ is an index 2 subgroup of $\mathrm{op}eratorname{Aut}_\mathcal{R}(v)$.
\subsection{A deformation of $\mathfrak{s}l(2|1)^{(1)}$}
\label{ss:s21b}
A very interesting relative of $\mathfrak{s}l(2|1)^{(1)}$
is the root Lie superalgebra $S(2|1,b)$ defined in~\cite{S3}. We will recall some of the results of~\cite{S3} below. Set $X:=\{x_1,x_2,x_0\}$ and fix $\mathfrak{h}$ with $\dim\mathfrak{h}=4$.
Let $\mathcal{R}(b)$, $b\ne 0$, be the component of $\mathcal{R}$ containing a vertex $v$
such that $p_v(x_1)=p_v(x_2)=1$, $p_v(x_0)=0$ and
the Cartan matrix $A_v$ is equal to
$$A(b):=\begin{pmatrix}
0 & b & 1-b\\
-b & 0 &1+b\\
-1 & -1 & 2
\end{pmatrix}$$
for $b\not=0$.
In studying skeleta of $\mathcal{R}(b)$ it is convenient to allow permutations of the elements of $X$. This leads to the action of $S_3$ on the components of $\mathcal{R}$ with the index set $X$
and, as we will see soon, carries components $\mathcal{R}(b)$ to components of the same type.
Permuting $x_1$ and $x_2$ in $A(b)$ we obtain
$A(-b)$, so $\mathcal{R}(b)$ is mapping to $\mathcal{R}(-b)$. In particular,
each root algebra for $S(2|1;b)$ is isomorphic to a root algebra
for $S(2|1; -b)$.
\begin{lem}\label{lemS21b}
For any vertex $v\in\mathcal{R}(b)$ the Cartan matrix $A_v=(a^{(v)}_{xy})$ is of the form
$\sigma(D A(b+i))$ where $i\in \mathbb{Z}$,
$D$ is an invertible diagonal matrix and $\sigma\in S_3$ is an even permutation.
One has $p_v(x)=1$ if $a^{(v)}_{xx}=0$ and $p_v(x)=0$ otherwise.
\end{lem}
\begin{proof}
It is enough to verify what happens to the Cartan datum under an isotropic reflexion
$r_{x}:\ v\to v'$. Since permuting $x_1$ and $x_2$ in $A(b)$ yields
$A(-b)$, it is enough to verify the assertion for $x=x_1$. In this case we have
$$A_{v'}=\begin{pmatrix}
0 & -b & -1+b\\
b & -2b & b\\
1 & \mathfrak{r}ac{2-b}{b-1} & 0
\end{pmatrix}.$$
Taking the homothety $h_{\lambda}:v'\to v''$ with $\lambda=(-1,-b^{-1}, b-1)$ we get
$$A_{v''}=\begin{pmatrix}
0 & b & 1-b\\
-1 & 2 & -1\\
b-1 & 2-b & 0
\end{pmatrix}.$$
Applying now the cyclic permutation carrying $x_2$ to $x_1$, we get the Cartan matrix
$A(b-1)$.
It is easy to see that going along the other isotropic reflection would produce in the same way the matrix $A(b+1)$.
\end{proof}
\begin{crl}\label{crlS21b}
\begin{itemize}
\item[1.] $\mathcal{R}(b)$ is admissible
if and only if $b\not\in\mathbb{Z}$;
\item[2.] if $\mathcal{R}(b)$ is admissible, then
for $i\in\mathbb{Z}$
each root algebra for
$S(2|1;\pm b\pm i)$ is isomorphic to a root algebra for $S(2|1;b)$.
\end{itemize}
\end{crl}
\begin{proof}
Note that $A(b)$ is locally weakly symmetric for $b\not=\pm 1$.
Using Lemma~\mathit{re}f{lemS21b} we obtain the assertions.
\end{proof}
\subsubsection{}\label{S21bproperties}
From now on we assume that
$\mathcal{R}(b)$ is admissible i.e. $b\not\in\mathbb{Z}$.
Using Lemma~\mathit{re}f{lemS21b} we obtain
\begin{enumerate}
\item all $x$ are reflectable at each $v\in\mathcal{R}(b)$;
\item for each reflexion $r_{x}: v\to v'$ we have $b_{v'}(y)=b_{v}(x)+b_{v}(y)$
if $y\not=x$;
\item a real root is isotropic if and only if
it is odd.
\end{enumerate}
\subsubsection{}
Let ${\mathcal{R}}_{\bar v}$ be the component of the root groupoid with $\dim\mathfrak{h}'=4$ and
a vertex $\bar v$ such that $p_{\bar v}(x_1)=p_{\bar v}(x_2)=1$, $p_{\bar v}(x_0)=0$ and
the Cartan matrix
$$A_{\bar v}:=\begin{pmatrix}
0 & -1 & -1\\
-1 & 0 &-1\\
-1 & -1 & 2
\end{pmatrix}.$$
Then the component ${\mathcal{R}}_{\bar v}$ of $\bar v$ is of type $\mathfrak{s}l(2|1)^{(1)}$.
As in \mathit{re}f{sss:iota}, \mathit{re}f{S21bproperties}(2) yields a linear isomorphism $\iota: Q_{\bar v}\to Q_{v}$ by setting $\iota(b_{\bar v}(x_i)):=
b_v(x_i)$; by the same arguments, this gives a bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$ with
$b_{v}(x_i)=\iota(b_{\bar v}(x_i)$.
Note that, contrary to~\mathit{re}f{sss:iota}, $\iota$ preserves $p(x_i)$.
\subsubsection{}
\label{sss:s21b}
We have
$$Q^{++}_v=\iota(Q^{++}_{\bar v})=\mathbb{N}\delta\ \text{ for }
\delta:=\sum b_v(x_i).$$
Therefore, $S(2|1,b)$ is of type (Aff).
Note that $\langle\delta,a_v(x_1)\rangle=1\not=0$, so by
Corollary~\mathit{re}f{crlfin}(3) $\mathfrak{g}^\mathtt{U}=\mathfrak{g}^{\mathtt{C}}$.
\subsubsection{}
By~\mathit{re}f{S21bproperties}(3) we see that $\iota:Q_{\bar v}\to Q_v$ establishes bijection of
real, isotropic and anisotropic roots for $\bar v$ and $v$.
Moreover, the bijection between $\mathtt{Sk}(v)$ and $\mathtt{Sk}(\bar v)$ gives a bijection between
the spines $\mathtt{Sp}(v)$ and $\mathtt{Sp}(\bar v)$. In particular,
$\mathtt{Sp}(v)$ has two principal roots
$\alpha:=b_{v}(x_0)$ and $b_v(x_1)+b_v(x_2)=\delta-\alpha$. Using~\mathit{re}f{S21bproperties} we obtain
$$W=\overline{W}\cong A_1^{(1)}$$
and for each $\nu\in Q_{\bar v}$ we have
$w\iota(\nu)=\iota(w\nu)$.
\begin{prp}
$\mathrm{op}eratorname{Aut}(v)=W\times K$.
\end{prp}
\begin{proof}
It is enough to check that all Cartan matrices in
$\mathtt{Sp}(v)$ are not $D$-equivalent. Note that $\mathtt{Sp}(v)$
can be seen as the infinite graph
$$\ldots\mathit{st}ackrel{r_{x_0}}{\to} v_{-1} \mathit{st}ackrel{r_{x_2}}{\to} v_0\mathit{st}ackrel{r_{x_1}}{\to} v_1 \mathit{st}ackrel{r_{x_0}}{\to} v_2\mathit{st}ackrel{r_{x_2}}{\to} v_3\mathit{st}ackrel{r_{x_1}}{\to} v_3 \mathit{st}ackrel{r_{x_0}}{\to}\ldots $$
Consider the equivalence relation on the set of $3\times 3$ matrices generated
by the action of $A_3$ (the group of even permutations
in $S_3$) and $B\sim DB$ for a diagonal invertible matrix $D$.
Observe that $A(b)\not\sim A(b')$ if $b\not=b'$.
In the proof of Lemma~\mathit{re}f{lemS21b} we showed that
if $A_v\sim A(b)$, then
for an isotropic reflexion $v\mathit{st}ackrel{r_{x}}{\to} v'$ we have $A_{v'}\sim A(b\pm 1)$.
This implies that $A_{v_k}\sim A(b-k)$, so $A_{v_k}\not\sim A_{v_0}$ for any $k\not=0$.
Hence the group
$\mathtt{Sp}^D(v_0)$ is trivial, so $\mathrm{op}eratorname{Aut}(v_0)=W\times K$.
\end{proof}
\end{document} |
\begin{document}
\setcounter{tocdepth}{1}
\title[Projective plane graphs and 3-rigidity]{Projective plane graphs and 3-rigidity}
\author[E. Kastis and S.C. Power]{E. Kastis and S.C. Power}
\thanks{2010 {\it Mathematics Subject Classification.}
{52C25, 51E15} \\
Key words and phrases: projective plane, embedded graphs, geometric rigidity\\
This work was supported by the Engineering and Physical Sciences Research Council [EP/P01108X/1]}
\address{Dept.\ Math.\ Stats.\\ Lancaster University\\
Lancaster LA1 4YF \{\mathcal{U}}.K. }
\email{[email protected]}
\email{[email protected]}
\maketitle
\begin{abstract}
It is shown that a simple graph which is embeddable in the real projective plane is minimally 3-rigid if and only if it is $(3,6)$-tight. Moreover the topologically uncontractible embedded graphs of this type are constructible from one of 8 embedded graphs by a sequence of vertex splitting moves. In particular the characterisation of minimal 3-rigidity holds for a triangulated
M\"{o}bius strip.
\end{abstract}
\section{Introduction}
Let $G$ be the graph of a triangulated sphere. Then an associated bar-joint framework $(G,p)$ in ${\mathbb{R}}^3$ is known to be minimally rigid if the placements $p(v)$ of the vertices $v$ is strictly convex (Cauchy \cite{cau}) or if the placement is generic. The latter case follows from
Gluck's result \cite{glu} that any generic placement is in fact infinitesimally rigid.
An equivalent formulation of Gluck's theorem asserts that if $G$ is a simple graph which is embeddable in the sphere then $G$ is minimally 3-rigid, in the sense of the next paragraph, if and only if it satisfies a $(3,6)$-tight sparsity condition.
We obtain here the exact analogue of this formulation in the case of simple graphs that are embeddable in the real projective plane ${\mathcal{P}}$.
As indicated more fully below, the proof rests on viewing embedded graphs as partial triangulations of ${\mathcal{P}}$ and employing inductive arguments based on edge contractions for certain admissible edges. Accordingly we may state this combinatorial characterisation in the following form.
An immediate corollary is that this characterisation also holds for triangulated M\"{o}bius strips.
A simple graph $G$ is \emph{3-rigid} if its generic bar-joint frameworks in ${\mathbb{R}}^3$ are infinitesimally rigid and is \emph{minimally 3-rigid} if no subgraph with the same vertex set has this property.
\begin{thm}\label{t:projectiveA}
Let $G$ be a simple graph
associated with a partial triangulation
of the real projective plane.
Then $G$ is minimally $3$-rigid if and only if $G$ is $(3,6)$-tight.
\end{thm}
Recall that a graph $G=(V,E)$ is $(3,6)$-tight if it satisfies the Maxwell count $|E|=3|V|-6$ and the sparsity condition $|E'|\leq 3|V'|-6$ for subgraphs $G'$ with an edge or at least 3 vertices. In particular it follows that such a graph falls 3 edges short of arising from a full triangulation of ${\mathcal{P}}$.
The proof of Theorem \ref{t:projectiveA} depends heavily on our main result, Theorem \ref{t:construction}, which gives a purely combinatorial constructive characterisation of (3,6)-tight graphs $G$ which are embeddable in the real projective plane.
A key step is a criterion for the existence of an edge contraction move for an embedded edge that lies in two 3-cycle faces (called $FF$ edges), such that the $(3,6)$-sparsity condition is preserved. This is done, in Section \ref{s:contractionmoves}, by exploiting the topological environment of the embedded graph. An associated edge contraction sequence must terminate and the terminal embedded graph is said to be \emph{irreducible}. We show that such irreducibles have the defining property that each contractible embedded edge (one for which the contraction is simple) lies on the boundary walk of a $(3,6)$-tight embedded subgraph. These boundary walks are the critical walks in $G$ discussed in Section \ref{ss:criticalcycles}.
In Section \ref{s:theirreducibles} we show that there are 9 irreducible embedded graphs, including 2 embedded graphs for $K_3$.
As a corollary of this identification we see that the irreducibles coincide with the apparently smaller class of (3,6)-tight embedded graphs which have no contractible edges, that is, they have no $FF$ edges for which the contraction gives a simple graph.
The identification of the irreducibles makes repeated use of
Corollary \ref{c:propercontainment}. This ensures that the boundary walk of a nontriangular face of an irreducible cannot be interior, in a natural sense, to a critical cycle of the same length. The various proofs proceed by a case by case analysis according to the existence of $FF$ edges and the number nontriangular faces. In the appendix we give a different proof strategy and determine directly the 9 \emph{uncontractible} embedded graphs. This is a case by case analysis depending on the minimum hole incidence degree given in Definition \ref{d:holedegree}.
The determination of construction schemes and their base graphs for various classes of graphs is of general interest, both for embedded graph theory and for the rigidity of bar-joint frameworks.
We note, for example, that Barnette \cite{bar} employed vertex splitting moves for the construction of triangulations of 2-manifolds and showed that there are 2 (full) triangulations of ${\mathcal{P}}$ which are uncontractible. Also, Barnette and Edelson \cite{bar-ede-1}, \cite{bar-ede-2} have shown that all 2-manifolds have finitely many minimal uncontractible triangulations.
With respect to generic rigidity, Fogelsanger \cite{fog} has shown that a finite simple graph given by a triangulated compact surface without boundary is 3-rigid. For the projective plane this was obtained earlier by Whiteley \cite{whi} using the vertex splitting method and Barnette's characterisation of the uncontractible graphs.
With the exception of the sphere, the graph of a fully triangulated surface without boundary is over-constrained, in the
sense that $|E| > 3|V | - 6$. Characterising the minimal 3-rigidity of partial triangulations is therefore a natural topic and is one which requires additional methods.
Following Whiteley's demonstration that vertex splitting preserves generic rigidity this construction move
has become an important tool in combinatorial rigidity theory \cite{gra-ser-ser}. See, for example, the more recent studies for the graphs of modified spheres \cite{fin-whi}, \cite{fin-ros-whi}, \cite{cru-kit-pow-1}, \cite{jor-tan}, and for the graphs given by a partially triangulated torus \cite{cru-kit-pow-2}.
The structure of the proof of the main results here follows a similar path to the torus case. In particular we use so-called \emph{face graphs} to define embeddings, as in Figure \ref{f:irred_Five}, where opposite vertices and edges of the boundary of the hexagon are identified.
Also, Lemmas \ref{l:obstacle1} and \ref{l:obstacle2} are projective plane counterparts of Lemma 4.4 of \cite{cru-kit-pow-2}. On the other hand we find it convenient to introduce \emph{surface graphs}, as defined in Section \ref{s:terminology}, where graphs carry a given triangle face structure. The proof of Theorem \ref{t:projectiveA}, given in Section \ref{s:mainproof}, follows from Whiteley's theorem, the identification of the irreducible embedded graphs, given in Section \ref{s:theirreducibles},
and the fact that the irreducibles have graphs that are minimally 3-rigid.
\section{Surface Graphs}\label{s:terminology} Let ${\mathcal{M}}$ be a classical surface, by which we shall mean a connected compact surface, possibly with boundary. Then we define
a \emph{surface graph for ${\mathcal{M}}$} to be a triple $G=(V,E,F)$ where $(V,E)$ is a simple graph, with no loop edges, $F$ is a set of $3$-cycles of edges, called facial 3-cycles, and where there exists a faithful embedding of $G$ in ${\mathcal{M}}$ for which the facial 3-cycles correspond to the 3-sided faces determined by the embedded graph. A surface graph for ${\mathcal{M}}$, which we also refer to as an \emph{${\mathcal{M}}$-graph}, can thus be viewed as a simple graph together with a set of ``facial" 3-cycles which is obtained from a triangulation of ${\mathcal{M}}$ by discarding vertices, edges and faces.
We also say that $G$ is a \emph{triangulated surface graph for ${\mathcal{M}}$} (or a fully triangulated surface graph for ${\mathcal{M}}$ for clarity), if no vertices, edges or faces are discarded, so that the union of the embedded faces is equal to ${\mathcal{M}}$.
Classical compact surfaces are classified up to homeomorphism by combinatorial surfaces and, moreover, combinatorial surfaces arise from
triangulated polygons by means of an identification of certain pairs of boundary edges. See \cite{gil-por} for example. We now formally define labelled graphs of this type {together with their facial structure} and refer to them as \emph{face graphs}. In this definition by a \emph{triangulated disc} we mean, in the terminology above, a triangulated surface graph for the surface which is a closed topological disc.
\begin{definition}\label{d:facegraph}
A \emph{face graph}
is a pair $(B, \lambda)$ where $B$ is a triangulated disc and $\lambda$ is a partition of the edges of the boundary of $B$ such that each set of the partition has $1$ or $2$ edges, and the paired edges of the partition are directed.
\end{definition}
A face graph $(B, \lambda)$ defines a simplicial complex $M$, with $1$-simplexes provided by edges and identified edge pairs, and 2-simplexes provided by the facial 3-cycles, and this complex defines a surface ${\mathcal{M}}$. We shall be concerned mainly with the projective plane ${\mathcal{P}}$, and its associated subsurfaces such as M\"obius strips and closed discs and cylinders. We remark that the sphere does not arise in this way but is obtained from a triangulated disc with 3-cycle boundary with this 3-cycle added as an additional facial 3-cycle.
If the identification graph, denoted $B/\lambda$, is simple then it follows that $B/\lambda$, with the set $F$ of facial 3-cycles of $M$, is a triangulated surface graph for ${\mathcal{M}}$. We also write $B/\lambda$ for this surface graph.
We now define particular ${\mathcal{M}}$-graphs in the following similar fashion in terms of a \emph{modified face graph} $(B_0,\lambda)$. By this we mean that
$B_0$ is a proper subgraph of $B$ which contains the boundary subgraph
$\partial B$ where $(B,\lambda)$ is a face graph as above. Here $\partial B$ is the graph induced by the edges which have one incident face. Now $(B_0,\lambda)$ gives an identification graph $B_0/\lambda$ with a facial structure inherited from the triangulation of $B$. Also the construction gives a particular embedding of this surface graph in ${\mathcal{M}}$. Such ${\mathcal{M}}$-graphs are special in that
they contain the embedded subgraph associated with $\partial B$.
\begin{center}
\begin{figure}
\caption{A modified face graph $(B_0,\lambda)$ for a ${\mathcal{P}
\label{f:mobiussmall}
\end{figure}
\end{center}
{ Let us note that, formally, an \emph{embedding} $\alpha:G\to {\mathcal{M}}$ of a surface graph $G=(V,E,F)$ in the surface ${\mathcal{M}}$ is a triple of maps, $\alpha_V, \alpha_E, \alpha_F$, where $(\alpha_V,\alpha_E)$ is a graph embedding of $(V,E)$ (with $\alpha_E(e)$ a closed set for each edge $e$), and $\alpha_F(f)$ is the closed face of the embedded graph $(\alpha_V(V),\alpha_E(E))$ corresponding to the facial 3-cycle $f \in F$.
\begin{example} \label{e:mobiusfacegraph}
Figure \ref{f:mobiussmall} shows a modified face graph $(B_0,\lambda)$. The labelling of outer boundary edges and vertices determines $\lambda$, that is, the pairs of directed edges that are identified. Any triangulation of the interior of the inner 6-cycle gives a containing face graph $(B,\lambda)$ for a (fully) triangulated surface graph for a surface, as long as $B/\lambda$ is simple. In view of the identifications the topological surface for $(B,\lambda)$ is the real projective plane ${\mathcal{P}}$ and so $(B_0,\lambda)$ determines a surface graph $G=(V,E,F)=B_0/\lambda$ for ${\mathcal{P}}$, with 6 facial 3-cycles.
\end{example}
The surface graph $G$ of Example \ref{e:mobiusfacegraph} happens to be a fully triangulated surface graph for the M\"{o}bius strip. In general however, the closed set in ${\mathcal{P}}$ determined by the embedding of a modified face graph and its faces need not be a surface, or surface with boundary. This is because of the possibility of exposed edges which are not incident to any face. This is also true for the embeddings of $(3,6)$-tight graphs that we consider in the next section. See, for example, the first graph in Figure \ref{f:irred_Five}. This exhibits a modified face graph $(B_0,\lambda)$ where $\lambda$ identifies the opposite edges of a 6-cycle and where all other edges and vertices of a containing face graph $(B,\lambda)$ have been removed. It defines a surface graph $G=(V,E,F)$ with $F$ the empty set and with underlying graph equal to $K_3$.
\subsection{${\mathcal{P}}$-graphs}\label{ss:P-graphs} Of particular concern for us are the embeddings of $(3,6)$-tight graphs in the projective plane.
Let $(B,\lambda)$ be a triangulated face graph for ${\mathcal{P}}$ which is given by a triangulated disc $B$ whose outer boundary is a directed cycle of even length $2r, r \geq 2$, with $\lambda$ the set of paired opposite directed edges. Let $(B_0,\lambda)$ be a modified face graph for $(B,\lambda)$ such that
the graph $B_0/\lambda$ is simple. Then, as in the example above, the modified face graph determines a surface graph $H$ for ${\mathcal{P}}$ and an associated embedding $\pi(H)$. We also write $B_0/\lambda$ and $B/\lambda$ for the surface graphs associated with $(B_0,\lambda)$ and $(B,\lambda)$.
\begin{definition}\label{d:annular} (i) A modified face graph $(B_0,\lambda)$ for ${\mathcal{M}}$ is \emph{annular} if it is obtained from a face graph $(B,\lambda)$ by deleting the \emph{internal} edges and vertices of a triangulated subdisc $D$, that is, the edges and vertices not in $\partial D$.
(ii) If $\lambda$ is trivial then $(B_0,\lambda)$ is an \emph{annulus face graph}, and $B_0$ is a \emph{triangulated annulus}, if the boundary graphs $\partial B$ and $\partial B_0$ are disjoint. Also $(B_0,\lambda)$ is a
\emph{degenerate annulus face graph}, and $B_0$ is a \emph{degenerate triangulated annulus} if the boundary graphs are not disjoint.
\end{definition}
An annular modified face graph $(B_0,\lambda)$ of a $2r$-cycle face graph for ${\mathcal{P}}$ can be viewed as having a single ``hole". In a similar way we can consider modified face graphs $(B_0,\lambda)$ that contain a number of holes corresponding to the removal of the interior edges and vertices of a number of triangulated disc subgraphs of $(B,\lambda)$ with disjoint interiors.
We now note that the ${\mathcal{P}}$-graph for modified $2r$-cycle face graph fails to have the following topological property.
\begin{definition}\label{d:contractible}
An embedding $\pi(H)$ of an ${\mathcal{M}}$-graph $H$ in the surface ${\mathcal{M}}$ is
\emph{topologically contractible} if the closed set given by the union of the embedded edges and faces of $H$ is contained in an open subdisc of ${\mathcal{M}}$. \end{definition}
To see that the embedded surface graph $\pi(B_0/\lambda)$ in ${\mathcal{P}} =\pi(B/\lambda)$ is not topologically contractible note first that the image of $\partial B/\lambda$ determines a closed curve in ${\mathcal{P}}$ with nontrivial homotopy class. On the other hand every closed curve in an open subdisc of ${\mathcal{P}}$ has trivial homotopy class.
In Lemma \ref{l:converseforuncontractibles} we show that every $(3,6)$-tight embedded ${\mathcal{P}}$-graph that is topologically uncontractible may be represented by a modified face graph having either 1, 2 or 3 holes.
\begin{rem} We have found it useful to introduce graphs with an explicit facial structure since the edge contraction operations used in the reduction proofs below are for edges that lie in two facial 3-cycles. Moreover the facial structure arising from an embedding of a simple graph is already given in a modified face graph realisation of the embedding.
The following equivalent definition of a fully triangulated surface graph is purely combinatorial. This is of interest since the simplicial complex setting is appropriate for generalisations, both to higher dimensions (with homology cycles generalising surfaces) and for graph embeddings in nonmanifolds.
Let $G = G(M)$ be the
determined by the $1$-skeleton of a finite simplicial complex $M$
together with a set $F$ of facial 3-cycles determined by the $2$-simplexes
of $M$ where $M$ has the following properties.
\begin{enumerate}[(i)]
\item $M$ consists of a finite set of $2$-simplexes
together with their $1$-simplexes and $0$-simplexes.
\item Every $1$-simplex lies in at most two $2$-simplexes.
\item The 2-simplexes incident to each 0-simplex induce the simplicial complex of a triangulated disc.
\end{enumerate}
Condition (i) implies that each 1-simplex lies in at least one 2-simplex and so if $G$ is a connected graph then $M$ can be viewed as a {combinatorial surface} and this determines a classical topological surface ${\mathcal{M}}$, possibly with boundary. It follows that $G$, with the facial structure $F$ provided by 2-simplexes, is a triangulated surface graph for ${\mathcal{M}}$.
\end{rem}
\section{Contraction moves and (3,6)-sparsity.}\label{s:contractionmoves}
Let $G=(V,E,F)$ be a surface graph.
An edge of $G$ is of {\em type $FF$} if it is contained in two facial $3$-cycles and an $FF$ edge is \emph{contractible} if it is not contained in any non-facial $3$-cycle. We say that $G$ is \emph{contractible} if it has a contractible $FF$ edge. For such an edge $e=uv$ there is a natural contraction move $G \to G'$ corresponding to a contraction of $e$ merging $u$ and $v$ to a single vertex, leading to a surface graph $G'=(V',E',F')$ where $|V'|=|V|-1, |E'|=|E|-3, |F'|=|F|-2$.
To define formally the contracted graph $G'$, let $e=vw$ be a contractible $FF$ edge in $G$ and let $avw$ and $bvw$ be the two facial 3-cycles which contain $e$. Then $G'$ is obtained from $G$ by an \emph{edge contraction} on $e=vw$ if $G'$ is obtained by (i) deleting the edges $aw$ and $bw$, (ii) replacing all remaining edges of the form $xw$ with $xv$, (iii) deleting the edge $e$ and the vertex $w$ and discarding the faces $avw$ and $bvw$.
That $G'$ is simple follows from the fact that a contractible $FF$ edge does not lie on a nonfacial 3-cycle.
Given an edge contraction move $G \to G'$ we may consider the inverse move, recovering $G$ from $G'$, which we define to be a \emph{planar vertex splitting move}, or \emph{vertex splitting move of planar type}, at the vertex $v$. In particular this move introduces a new vertex $w$, 2 new facial 3-cycles, and the new $FF$ edge $vw$. Intuitively, taking account of an embedding of the surface graph $G'$ in a surface ${\mathcal{M}}$, this corresponds to a construction of a new
surface graph $G$ with embedding in ${\mathcal{M}}$. For comparison we note the form of a general vertex splitting move defined at the level of simple graphs.
Let $H' = (V',E')$ be a simple graph with vertices $v_1, v_2, \dots , v_r$ and let $v_1v_2, v_1v_3, \dots , v_1v_n$, with $n\geq 3$, be
the edges of $E'$ that are incident to $v_1$. Let $H = (V, E)$ arise from $H'$ by the introduction of a
new vertex $w$, new edges $wv_1, wv_2, wv_3$ and the replacement of any number of the remaining
edges $v_1v_t$, for $t > 3$, by the edges $wv_t$. Then, the move $H' \to H$ is said to be a vertex splitting move on $v_1$.
The \emph{boundary} $\partial G$ of a surface graph $G$ is defined to be the graph determined by the set of edges which are not of $FF$ type. Note that this graph does not depend on any particular embedding of the surface graph.
\subsection{(3,6)-sparse ${\mathcal{P}}$-graphs.}\label{ss:mobius36tight}
If $H=(V,E)$ is a graph then its \emph{freedom number} is defined to be $f(H)=3|V|-|E|$. Then $H$ is \emph{$(3,6)$-sparse} if $f(H')\geq 6$ for any subgraph $H'$ with at least 3 vertices, or an edge, and is
\emph{$(3,6)$-tight} if it is $(3,6)$-sparse
and $f(H)=6$. In particular a $(3,6)$-tight graph is a simple connected graph, with no loop edges and no parallel edges. We also consider the freedom number of a surface graph to be the freedom number of its underlying graph.
Let us recall that a triangulated surface graph $G=(V,E,F)$ for the sphere $S^2$ is $(3,6)$-tight. Indeed, regard the associated graph $H$ as a planar graph and consider a reduction $H\to H'$ on deleting a single interior vertex
and its incident edges and adding chordal edges to the new face to triangulate it. Then $3|V'|-|E'|=3|V|-|E|$ and by induction
$3|V|-|E|$ agrees with the count for the triangle, and so $3|V|-|E|=6$. Similarly for any subgraph $H''$ of $H$ it follows that $3|V''|=|E''|\geq 6$.
\begin{lemma}
Let $G$ be a topologically contractible $(3,6)$-tight surface graph for ${\mathcal{P}}$ with more than three vertices. Then $G$ has a contractible $FF$ edge.
\end{lemma}
\begin{proof}
It follows from the definition of topologically contractible that $G=(V,E,F)$ has a surface graph embedding in an open subdisc ${\mathcal{D}}$ of ${\mathcal{P}}$ and so each face of $G$ maps to a face of the embedding of $(V,E)$ in ${\mathcal{P}}$ that is contained in ${\mathcal{D}}$. Since $G$ is $(3,6)$ tight it follows from the previous discussion that the surface graph $G$ is a triangulated surface graph
of a face graph $(B,\lambda)$ where $|\partial B|=3$ and $\lambda$ is trivial.
The existence of a contractible $FF$ edge clearly holds when k=4. Assume then that such an edge exists whenever $4 \leq |V|\leq n$ and that $ |V(G)| =n +1$. Consider an interior (non boundary) edge of $G$, say $e =uv$, with associated edges $xu, xv$ and $yu, yv$ for its adjacent faces. If $e$ is not contractible then there is a nonfacial triangle in $G$ with edges $zu, zv, uv$. The subgraph consisting of the 3-cycle $zu, zv, uv$ and its interior determines a new face graph with triangle boundary with fewer vertices than $G$ and it contains at least 4 vertices,
and so by the induction hypothesis $G$ contains a contractible interior edge.
\end{proof}
Assume now, as in the previous section, that $(B,\lambda)$ is a face graph for ${\mathcal{P}}$ with $\partial B$ a directed cycle graph of even length $2r, r \geq 2$, with $\lambda$ the set of paired opposite directed edges.
If the identification graph of $B/\lambda$ is simple then $B/\lambda$ is a triangulated surface graph, $S$ say, for ${\mathcal{P}}$.
The freedom number $f(B)$ is equal to $6+(2r-3)$, since $B$ may be viewed as a triangulated sphere (which has freedom number $6$) with $2r-3$ edges removed. Noting that $S$ is related to $B$ by the loss of $r$ vertices and $r$ edges it follows that
\[
f(S)= (3+2r) - 3r + r = 3.
\]
Let $G$ be a ${\mathcal{P}}$-graph which is determined by
an annular modified face graph $(B_0,\lambda)$ associated with $B$. If the inner boundary cycle has length $s$ then $f(G) = f(S)+ (s-3)$. Thus $f(G)=6$ if and only if $s=3$. Similarly suppose that $(B_0,\lambda)$
is obtained from $(B,\lambda)$ by removing the interior edges and vertices of several interior-disjoint triangulated subdiscs of $B$. Then $f(G)=6$ if and only if either there are two such subdiscs with boundary cycle lengths 5 and 4, or three subdiscs each with a boundary cycle of length 4.
\begin{lemma}\label{l:tightimplies12or3}
Let $G$ be the ${\mathcal{P}}$-graph determined by a modified face graph $(B_0,\lambda)$ for the face graph $(B,\lambda)$ for ${\mathcal{P}}$. If $G$ is (3,6)-tight then $B_0$ has $k$ nontriangular faces, for $k=1, 2$ or $3$ where, for $k=1$ the face has a 6-cycle boundary, for $k=2$ the boundary cycles have length 5 and 4, and for $k=3$ the boundary cycles have length 4.
\end{lemma}
\begin{proof}
Note first that there can be no interior vertices of $B$ which appear in $B_0$ with degree 1 for these vertices would appear in $G$ with degree 1. It follows that $B_0$ is obtained from $B$ by deleting the interior edges and vertices of several interior-disjoint triangulated subdiscs of $B$. Since the Maxwell count $f(G)=6$ must hold it follows from our previous remarks that $B_0$ satisfies the conditions of the lemma.
\end{proof}
The necessary conditions given in Lemma \ref{l:tightimplies12or3} for $(3,6)$-tightness are not sufficient conditions since, as we see more precisely in Section \ref{ss:criticalcycles}, there are constraints on the lengths of cycles which go around holes.
As we have noted in the previous section, the ${\mathcal{P}}$-graphs of Lemma \ref{l:tightimplies12or3} are not topologically contractible. On the other hand Lemma \ref{l:converseforuncontractibles} shows that every $(3,6)$-tight ${\mathcal{P}}$-graph which is not topologically contractible has a modified face graph representation as in Lemma \ref{l:tightimplies12or3}.
\begin{lemma}\label{l:technicalLemma}
An embedding of a $(3,6)$-tight surface graph in ${\mathcal{P}}$ is topologically contractible if the image of each cycle of edges lies in an open subdisc.
\end{lemma}
\begin{proof} A $(3,6)$-tight surface graph $G$ is not a tree and so contains cycles $c$.
Let $\pi:G\to {\mathcal{P}}$ be the embedding and note that the open set ${\mathcal{P}}\backslash \pi(c)$ has two components, one an embedded open disc ${\mathcal{D}}_c$ with boundary $\pi(c)$, and the other a M\"obius strip. Consider the union of two such open discs together with their boundary curves. This is a proper closed subset since otherwise it would contain a non contractible curve and hence a noncontractible cycle in $\pi(c_1)\cup\pi(c_2)$, contrary to the hypotheses. Similarly, by induction, the union, ${\mathcal{B}}$ say, of all the sets ${\mathcal{D}}_c$ and their boundaries $\pi(c)$ is a proper closed subset which contains $\pi(G)$ (including the images of the faces). Since $G$ is 2-connected the boundary of this closed set is a subset of $\pi(E)$ which is a union of cycles. By construction the boundary must be a single cycle, and ${\mathcal{B}}$ is an embedded closed disc, and so the lemma follows.
\end{proof}
\begin{lemma}\label{l:converseforuncontractibles}
Let $G$ be a topologically uncontractible $(3,6)$-tight ${\mathcal{P}}$-graph. Then there is a modified face graph $(B_0,\lambda)$ for a $2r$-cycle face graph $(B,\lambda)$ for ${\mathcal{P}}$ such that $G$ is isomorphic to the surface graph $B_0/\lambda$.
\end{lemma}
\begin{proof} Let $\pi$ be an embedding of the face graph $G$ in ${\mathcal{P}}$. In particular the set of embedded faces, $\pi(F)$, accounts for all the triangular faces determined by $\pi(E)$.
By the previous lemma there is a cycle $c$ of edges $e_1, \dots , e_r$ for which $\pi(c)$ is not topologically contractible. The complement of $\pi(c)$ in ${\mathcal{P}}$ is therefore an open subdisc which is partially triangulated by the set by $\pi(F)$. Moreover this partial triangulation may be extended to a full triangulation of ${\mathcal{P}}$ by triangulating the nontriangular embedded faces.
This is associated with a corresponding triangulation of a closed disc $B$ with a boundary curve a $2r$-cycle, corresponding to a repetition of the $r$-cycle $c$, and the desired representation of $G$ follows.
\end{proof}
\begin{rem}\label{r:uniqueness}
We have shown that a topologically uncontractible $(3,6)$-tight ${\mathcal{P}}$-graph $G$ has a modified face graph representation $(B_0,\lambda)$, with $k=1,2$ or $3$ nontriangular holes, and an associated $2r$-cycle face graph $(B,\lambda)$ for ${\mathcal{P}}$. This defines a particular surface graph embedding $\alpha:G \to {\mathcal{P}}$. We now note that any two surface graph embeddings
of $G$ are naturally equivalent. In fact this equivalence is not needed in subsequent arguments.
Two surface graph embeddings $\alpha, \beta:G \to {\mathcal{P}}$ are \emph{equivalent} if
there exists a homeomorphism $\phi$ of ${\mathcal{P}}$ such that $\phi\circ \beta = \alpha$, that is, such that the following equalities of closed sets holds:
\[
\phi(\beta_V(v)) = \alpha_V(v),\quad
\phi(\beta_E(e)) = \alpha_E(e),\quad
\phi(\beta_F(f)) = \alpha_F(f), \quad \forall v\in V, e\in E, f\in F.
\]
To see that surface graph embeddings $\alpha, \beta: G\to {\mathcal{P}}$ are equivalent in this sense we may assume that $\alpha$ is equal to the modified face graph embedding $\lambda_\alpha$ associated with $(B_0^\alpha,\lambda_\alpha)$ and the face graph $(B^\alpha,\lambda_\alpha)$. Thus $\alpha$ has an extension to an embedding $\alpha^+$ where
\[\alpha: G=B_0^\alpha/\lambda_\alpha \to {\mathcal{P}}={\mathcal{P}}_\alpha, \quad \quad
\alpha^+: G^+= B^\alpha/\lambda_\alpha \to {\mathcal{P}}={\mathcal{P}}_\alpha.
\]
The second embedding, namely
\[\beta: G =B_0^\alpha/\lambda_\alpha \to {\mathcal{P}}_\alpha,
\]
gives rise to a new partial triangulation of ${\mathcal{P}}_\alpha$ given by the faces of $\beta(G)$. In other words, with $G=(V,E,F), $ we have the two partial triangulations of ${\mathcal{P}}$ given by $\alpha(F)$ and $\beta(F)$ as well as a full triangulation $\alpha^+(F^+)$ of ${\mathcal{P}}$ which extends $\alpha(F)$ by means of a triangulation of the remaining nontriangular faces of the embedded graph $\alpha((V,E))$.
Note that the nontriangular faces of $\alpha((V,E))$ and $\beta((V,E))$ have interiors which are open discs and their boundary walks are given by the embeddings of the boundaries of the holes of $B_0^\alpha$.
In view of this we may construct an extension $\beta^+: G^+ \to {\mathcal{P}}$ of $\beta$
by triangulating each nontriangular face of $\beta(G)$ with a pattern that matches the given triangulation of the corresponding nontriangular face of $\alpha(G)$.
It is now elementary book-keeping to construct a homeomorphism $\phi$ so that $\phi\circ \beta^+=\alpha^+$. Considering restrictions to $G$ we see that $\alpha$ and $\beta$ are equivalent.
\end{rem}
We make use of following notation for topologically uncontractible $(3,6)$-tight ${\mathcal{P}}$-graphs.
\begin{definition}\label{d:P_k}
The set ${\mathfrak{P}}_k,$ for $k=1,2,3$, is the set of $(3,6)$-tight ${\mathcal{P}}$-graphs which are representable by modified face graphs with $k$ nontriangular faces.
\end{definition}
We also define an \emph{embedded triangulated disc} in ${\mathcal{P}}$ to be the image of a triangulated disc surface graph under an embedding,
in the relaxed sense of that distinct faces map to distinct faces but distinct vertices or edges on the boundary may have the same image in ${\mathcal{P}}$.
Such an embedded triangulated disc can always be extended to a triangulated surface graph for ${\mathcal{P}}$.
Note that the interior of the closure of the faces of an embedded triangulated disc is evidently homeomorphic to an open disc. On the other hand the closure of the faces need not be homeomorphic to a closed disc and indeed can be equal to ${\mathcal{P}}$.
\subsection{When contracted surface graphs are $(3,6)$-tight}
A contraction move $G \to G'$ on a contractible $FF$ edge $e$ of a surface graph preserves the Maxwell count but need not preserve $(3,6)$-tightness.
We now examine this more closely in the case of a surface graph for the real projective plane ${\mathcal{P}}$.
Suppose that $G_1\subseteq G$ and that both $G_1$ and $G$ are in ${\mathfrak{P}}_1$.
If $e$ is a contractible $FF$ edge of $G$ which lies on the boundary graph of $G_1$ then, since $G_1$ contains only one of the facial 3-cycles incident to $e$, the contraction $G\to G'$ for $e$ gives a contraction $G'$ which is not $(3,6)$-sparse, since $f(G_1')=5$. We shall show in Lemmas \ref{l:obstacle1}, \ref{l:obstacle2} that the failure of an edge contraction to preserve $(3,6)$-sparsity is due to such subgraph obstacles.
The following lemma, which we refer to as the filling in lemma, was obtained for the torus in Lemma 4.3 of \cite{cru-kit-pow-2}, and an earlier variant for block and hole graphs is Lemma 26 of \cite{cru-kit-pow-1}.
\begin{lemma}\label{l:fillingin}
Let $G_*$ be the underlying graph of a $(3,6)$-tight surface graph $G$ for ${\mathcal{P}}$ and let $H$ be the graph of an embedded triangulated disc graph in $G$ with boundary graph $\partial H$.
(i) If $K$ is a $(3,6)$-tight subgraph of $G_*$ with $K\cap H = \partial H$ then $\partial H$ is a $3$-cycle graph.
(ii) If $K$ is a $(3,6)$-sparse subgraph of $G_*$ with $f(K)=7$ and $K\cap H = \partial H$ then $\partial H$ is either a $3$-cycle or $4$-cycle graph.
\end{lemma}
\begin{proof}(i) Write $H^c$ for
the subgraph of $G_*$ which contains the edges of $\partial H$ and the edges of $G_*$ not contained in $H$.
Since $G_* = H^c\cup H$ and
$H^c\cap H= \partial H$ we have
\[
6=f(G_*) =f(H^c) +f(H)-f(\partial H).
\]
Since $f(H^c)\geq 6$
we have
$f(H)-f(\partial H)\leq 0$.
On the other hand,
\[
6\leq f(K\cup H) =
f(K)+ f(H) -f(\partial H)
\]
and $f(K)=6$ and so
it follows that $f(H) -f(\partial H)=0$.
Let $i:D \to G$ be the triangulated disc embedding with $H$ the underlying graph of the surface graph $i(D)$. Since $i$ is injective on the set of interior vertices of $D$ and the set of interior edges of $D$, it follows that
\[f(H)-f(D) = f(\partial H) - f(\partial D).\]
We deduce that $f(D)-f(\partial D)=0$. We have $f(D) = 6+(s-3)$ when the boundary is an $s$-cycle, while $f(\partial D) = 2s$ and so $s=3$. It follows that $i(D)$ is a 3-cycle graph (even though, in general, $i$ need not be injective on the boundary edges of $D$).
(ii) The argument above leads to $-1 \leq f(H) -f(\partial H)$ and hence $-1 \leq f(D) -f(\partial D)$. It follows now that
$\partial D$ is either a $3$-cycle graph or a $4$-cycle graph. Since $G_*$ is simple it follows that the graph $\partial H = i(\partial D)$ is also
a $3$-cycle graph or a $4$-cycle graph.
\end{proof}
We shall also make use of the following topological property of ${\mathcal{P}}$.
\begin{lemma}\label{l:topologicalproperty}
Let $(B,\lambda)$ be a face graph for ${\mathcal{P}}$ and let $U\subset {\mathcal{P}}$ be a connected open set which is the interior of the union of a set embedded faces of $B$. Then one of the following holds. (i) $U$ is an open disc, (ii) the closure $\overline{U}$ contains a M\"obius strip, (iii) the complementary open set ${\mathcal{P}}\backslash \overline{U}$, is not connected.
\end{lemma}
\begin{proof} Suppose that (ii) does not hold. Then every cycle in $B/\lambda$ is contractible. The proof of Lemma \ref{l:technicalLemma} applies and so $U$ is contained in an embedded open disc. Thus either (i) or (iii) must hold.
\end{proof}
\begin{lemma}\label{l:obstacle1}
Let $G\in{\mathfrak{P}}_1$, let $e$ be a contractible $FF$ edge in $G$, and let $G'$ be the simple surface graph arising from the contraction move $G \to G'$ associated with $e$.
Then either $G'\in {\mathfrak{P}}_1$ or
$e$ lies on one face of a surface subgraph $G_1$ of $G$, with $G_1\in {\mathfrak{P}}_1$.
\end{lemma}
\begin{proof} Assume that $G'\notin {\mathfrak{P}}_1$. It follows that $G'$ must fail the $(3,6)$-sparsity count.
Thus there exists a subgraph $K$ of the underlying graph $G_*$ of $G$ containing $e$ for which the edge contraction results in a graph $K'$ satisfying $f(K')<6$.
Let $e=vw$ and let $c$ and $d$ be the facial $3$-cycles of $G$ which contain $e$. If both $c$ and $d$ are subgraphs of $K$ then
$f(K)=f(K')<6$, which contradicts the sparsity count for $G$. Thus $K$ must contain at most one of these facial $3$-cycles.
\emph{Case 1}. Suppose first that $K$ contains $c$ and not $d$ and is maximal among all subgraphs of $G_*$ which contain the cycle $c$, do not contain $d$, and for which contraction of $e$ results in a simple graph $K'$ with $f(K')<6$.
Note that $f(K)=f(K')+1$ which implies $f(K)= 6$ and $f(K')=5$.
In particular, $K$ is $(3,6)$-tight, and is a connected graph. Also we may view $K$ as a surface graph for ${\mathcal{P}}$ endowed with the inherited facial structure from $G$.
Let $(B_0,\lambda)$ be a face graph for $G$ with an associated face graph $(B,\lambda)$ for a triangulated surface graph for $S=(V,E,F)$ for ${\mathcal{P}}$.
In particular $(B,\lambda)$ provides a faithful topological embedding $\pi: S \to {\mathcal{P}}$.
Let $X(K)\subset {\mathcal{P}}$ be the closed set $\pi_E(E(K))$ and let
$\tilde{X}(K)$ be the union of $X(K)$ and the embeddings of the faces for the facial $3$-cycles belonging to $K$. Finally, let $U_1, \dots , U_n$ be the maximal connected open sets of the complement of $\tilde{X}(K)$ in ${\mathcal{P}}$.
Note that each such connected open set $U_i$ is determined by a set ${\mathcal{U}}_i$ of embedded faces of $S$ with the property: each pair of embedded faces of $U_i$ are the endpoints of a path of edge-sharing embedded faces in ${\mathcal{U}}_i$. From Lemma \ref{l:topologicalproperty}, $U_i$ has one of the following 3 properties.
(i) $U_i$ is an open disc.
(ii) $U_i$ contains a M\"{o}bius strip.
(iii) The complement of $U_i$ is not connected.
\noindent The third property cannot hold
since the embedding of $K$ is contained in the complement of $U_i$ and contains the boundary of $U_i$, and yet $K$ is a connected graph.
From the second property it follows that $K$ and its facial 3-cycles is embedded in the complement of a M\"{o}bius strip and this is an open disc. This is also a contradiction, since
the edge contraction of a contractible $FF$ edge in a planar triangulated graph preserves $(3,6)$-sparsity.
Each set $U_i$ is therefore the interior of the closed set determined by an embedding of a triangulated disc graph in $S$, say $H(U_i)$. Indeed, the facial 3-cycles in $S$ defining $H(U_i)$ are those whose projective plane embedding have interior set contained in $U_i$.
We may assume that $U_1$ is the open set that contains the
single nontriangular face of the embedding of $G$. Thus, if $n=1$ then we may take $G_1$ to be the surface subgraph of $S$ with underlying graph $K$.
Suppose that $i>1$. By the filling in lemma, Lemma \ref{l:fillingin}, it follows that $\partial H(U_i)$ is a 3-cycle.
{Note that no triangulated disc $H(U_i)$ can contain the facial 3-cycle $d$, for in this case the boundary 3-cycle of $H(U_i)$ contains $e$ and the contracted graph $K'$ fails to be simple.
By the maximality of $K$ we have $n=1$, since adding the edges and vertices of $S$ interior to these nonfacial 3-cycle boundaries gives a subgraph of $G_*$ with the same freedom count and which does not contain the 3-cycle $d$.}
Thus, $K$ is the graph of a surface subgraph $G_1$ of $G$ obtained from $S$ by removing the faces of $H(U_1)$ and its interior vertices and edges, and so the proof is complete in this case.
\emph{Case 2.}
It remains to consider the case when $K$ contains neither of the facial $3$-cycles $c, d$ which contain $e$. Thus $f(K)=f(K')+2$ and $f(K)$ is 6 or 7.
Once again we assume that $K$ is a maximal subgraph of $G_*$ with respect to these properties and consider the components $U_1, \dots , U_n$ of the complement of $\tilde{X}(K)$. As before, each set $U_i$ is homeomorphic to a disc and determines an embedded triangulated disc graph $H(U_i)$ in $S$, one of which, say $H(U_1)$, contains the triangulated disc in $S$ associated with the single hole of $G$. If $n=1$ then the proof is complete since we may take $G_1$ to be the surface graph associated with $K$.
On the other hand,
Lemma \ref{l:fillingin} implies that each boundary of $H(U_i)$, for $i>1$, is a $3$-cycle or a $4$-cycle. As in Case 1, maximality implies that a $3$-cycle boundary is not possible. Consider the 4-cycle boundary of $H(U_i)$, for some $i>1$, and note first that it cannot contain both $3$-cycles $c$ and $d$, since any edge of $K$, and in particular the edge $e$, belongs to $H(U_i)$ only if it belongs to the boundary cycle of $H(U_i)$.
Suppose then that $H(U_i)$ contains $c$ but not $d$. If $f(K)=6$, rather than $7$, then, with $H(U_i)_*$ the underlying graph of $H(U_i)$ we have $f(K\cup H(U_i)_*)=f(K)+f(H_i(U_i))-f(K\cap H(U_i)_*) =6+7-8=5$, contradicting $(3,6)$-sparsity. It follows that $K\cup H(U_i)_*$ is $(3,6)$-tight and contains $c$ but not $d$. Since this is Case 1 the proof is complete.
\end{proof}
The filling in lemma holds for the surface graphs in ${\mathfrak{P}}_2, {\mathfrak{P}}_3$ and we may extend Lemma \ref{l:obstacle1} in the following manner.
\begin{lemma}\label{l:obstacle2}
Let $G\in{\mathfrak{P}}_k$, for $k=1, 2$ or $3$, let $e$ be a contractible $FF$ edge in $G$, and let $G'$ be the simple surface graph arising from the contraction move $G \to G'$ associated with $e$.
Then either $G'\in {\mathfrak{P}}_k$ or
$e$ lies on one face of a surface subgraph $G_1$ of $G$, with
$G_1\in {\mathfrak{P}}_l$, for some $1\leq l\leq k$.
\end{lemma}
\begin{proof}
The proof for $k=2, 3$ follows the same pattern as in the previous proof for the case $k=1$. Thus we assume that $G' \notin {\mathfrak{P}}_k$ and consider a subgraph $K$ of $G_*$ subject to two cases. In Case 1 $K$ is maximal among all subgraphs which contain $c$ and not $d$, where $c, d$ are the 3-cycles incident to $e$, and $f(K')=5$. In Case 2 $K$ is maximal among subgraphs of $G$ not containing $c, d$ and for which $f(K')$ is equal to 4 or 5. We consider again the open set which is the complement of the embedding in ${\mathcal{P}}$ of $K$ and its facial 3-cycles.
This open set has connected components $U_1, \dots , U_n$ and each is the interior of a union of an edge-connected set of ${\mathcal{P}}$-embedded facial 3-cycles of the surface graph $S$ for ${\mathcal{P}}$. Also the graphs $H(U_j)$ are the associated surface subgraphs of $S$. It follows as before that each $U_j$ is an open disc.
Suppose that $H(U_j)$ does not contain any of the $k=2$ or $3$ triangulated discs which define $G$. In Case 1, by the filling in lemma the boundary of $H(U_j)$ must be a 3-cycle, and so the proof is completed as before. Indeed, $1\leq n \leq 3$ and each of the surface graphs $H(U_1), \dots , H(U_n)$ contains at least one of the triangulated discs that define $G$ and we may take $G_1$ to be the surface graph determined by $K$.
In Case 2, by the filling in lemma, the boundary of $H(U_j)$ is a 3-cycle or a 4-cycle. As before, by maximality, the boundary is not a 3-cycle.
If it is a 4-cycle then either $K\cup H(U_j)$ contradicts the maximality or $H(U_j)$ or $K\cup H(U_j)$ contains one or both of $c, d$.
As in the previous proof, containing both is not possible and so $K\cup H(U_j)$ contains one of $c$ and $d$. Since this is Case 1, the proof is complete.
\end{proof}
We remark that Lemma \ref{l:obstacle2} is analogous to the critical cycle lemma, Lemma 27 of \cite{cru-kit-pow-1}, for $(3,6)$-tight block and hole graphs where, roughly speaking, there is a single block that is complementary to face graph of a multiconnected surface graph in the plane.
\subsection{Critical embedded cycles}\label{ss:criticalcycles}
Lemma \ref{l:obstacle2} reveals the obstacle to the preservation of $(3,6)$-sparsity when contracting the contractible edge $e$ of a surface graph $G$ in ${\mathfrak{P}}_k$, namely that $e$ lies on the boundary of a surface subgraph $G_1$ of $G$ which is in ${\mathfrak{P}}_l$ for some $l\leq k$.
Let $S$ be a triangulated surface graph for ${\mathcal{P}}$ that contains $G$, so that $G$ is given by removing the interior vertices, edges and faces of interior disjoint embedded triangulated discs $\pi(D_1), \dots ,\pi(D_k)$. Then $G_1$ is given similarly in terms of interior-disjoint
embedded triangulated discs $\mu_1(B_1), \dots , \mu_l(B_l)$, where the edge $e$ is in $\mu_1(\partial B_1)$ and $\mu_1(B_1)$ contains one or more of
$\pi(D_1), \dots , \pi(D_k)$.
If $c_1$ is the boundary $r$-cycle of $B_1$ then we refer to $c=\mu_1(c_1)$ as an \emph{critical embedded $r$-cycle} or \emph{critical walk (or $r$-walk)} in $G$, where $|\partial B_1|= r$, with $r=4,5 $ or $6$. We also use this terminology to include the boundary walks $\pi(D_i)$ of the surface graph $G$. A boundary walk is formally a sequence of edges and for simple graphs can be indicated by specifying the associated sequence of vertices.
Note that $G_1$ is a vertex-induced surface subgraph of $G$ since it is $(3,6)$-tight.
Also, considering Maxwell counts, it follows in all cases that
\[
r-3= |\partial B_1|-3 = \sum_{i\in I}(|\partial D_i|-3)
\]
where $I=\{i:\pi(D_i)\subseteq \mu_1(B_1)\}$.
An \emph{embedded planar $r$-cycle} in a surface graph $G$ is defined to be the boundary walk of an embedded triangulated disc in $G$.
In particular a critical embedded $r$-cycle of $G\in {\mathfrak{P}}_k$, for some $r=4,5$ or 6, is not an embedded planar $r$-cycle in $G$.
\begin{lemma}\label{l:planar3cycle}
Let $G$ be a surface graph in ${\mathfrak{P}}_k$ for some $k=1,2,3$, with an embedded planar 3-cycle which is not a facial 3-cycle. Then there is a contractible edge $e$ with $G/e$ in ${\mathfrak{P}}_k$.
\end{lemma}
\begin{proof}
Let $\pi(\partial B)$ be the planar embedded 3-cycle with triangulated disc $B$ with 4 or more vertices. Then there is an edge $e'$ of $B$, not in $\partial B$, for which $B/e'$ is simple and hence the edge $e=\pi(e') $ is a contractible edge. Since a surface subgraph associated with a critical walk is an induced subgraph it follows that $e$ cannot lie on a critical walk, and so $G/e$ is in ${\mathfrak{P}}_k$.
\end{proof}
It is convenient to refer to a 3-cycle in $G$ as an \emph{essential 3-cycle of $G$} if it is not a facial 3-cycle and not an embedded planar 3-cycle. In fact these cycles are the 3-cycles with an associated homotopy class that is nonzero.
The following useful lemma enables the creation of new critical embedded cycles by means of subwalk substitutions. Figure \ref{f:blendingCriticals}(i) gives an intuitive sketch of this. The bold ellipses indicate critical walks of lengths $4$ and $5$ in a surface graph $G$ in ${\mathfrak{P}}_3$ and the boundary walk of the union of the interior of the ellipses is necessarily a critical walk.
\begin{lemma}
\label{l:blendingcriticals}
Let $G$ be a surface graph in $ {\mathfrak{P}}_k$, with $1\leq k\leq 3,$ and let $c_a, c_b$ be critical walks in $G$ with associated embedded discs $\pi(D_a), \pi(D_b)$ and surface subgraphs $G_a, G_b$ in $ {\mathfrak{P}}_l$, for $1\leq l\leq k,$. Suppose that $G_a\cap G_b$ contains a face and the embedded open discs $\pi(D_a^\circ), \pi(D_b^\circ)$ have union equal to an embedded open disc.
Then the boundary walk of $\pi(D_a)\cup \pi(D_b)$ is a critical walk of the (3,6)-tight surface graph $G_a\cap G_b$.
\end{lemma}
\begin{proof}
We have the freedom count equation
\[
f(G_a\cup G_b)=f(G_a)+f(G_b)-f(G_a\cap G_b)
\]
together with $f(G_a)=6, f(G_b)=6$ and $f(G_a\cap G_b)\geq 6$.
Thus $f(G_a\cup G_b)=6$ in view of (3,6)-sparsity. Thus $f(G_a\cap G_b)=6$ and so $G_a\cup G_b$ and $G_a\cap G_b$ are $(3,6)$-tight.
The hypotheses ensure that one of the embedded discs that defines $G_a\cap G_b$ is equal to $\pi(D_a\cup D_b)$ and so $\pi(D_a\cup D_b)$ has a well-defined boundary walk. It follows then that this walk is a critical embedded cycle in $G$.
\end{proof}
\begin{center}
\begin{figure}
\caption{Schematic diagrams of embedded critical walks (in bold) in ${\mathcal{P}
\label{f:blendingCriticals}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{The critical embedded 6-cycle $v_1uv_3v_2v_2$ is not the repetition of an essential 3-cycle.}
\label{f:2criticalsFailingHypothesis}
\end{figure}
\end{center}
Figure \ref{f:2criticalsFailingHypothesis} gives the modified face graph
$(B_0,\lambda)$ of a surface graph $G$ in ${\mathcal{P}}_2$ which illustrates various of critical 6-walks. The closed walk $v_1v_2v_3v_1v_2v_3v_1$ is the repetition of the essential 3-cycle $v_1v_2v_3$ and its associated embedded disc in a containing triangulated surface graph $B/\lambda$ for ${\mathcal{P}}$ contains all the faces of $\pi(B)$. Other critical walks of this type are associated with the essential 3-cycles $v_1uv_3$, $v_1uv$, $v_2uv$. On the other hand the critical 6-walks $v_1uv_3v_1v_2$ and
$v_1uvv_2uv$ are not repeated 3-cycles.
\section{The irreducibles}\label{s:theirreducibles}
Let $k=1,2$ or $3$. Then a surface graph $G$ in ${\mathfrak{P}}_k$ admits a reduction sequence
\[
G=G_1 \to G_2 \to \dots \to G_n
\]
where (i) each $G_k$ is in ${\mathfrak{P}}_k$, (ii) each move $G_k \to G_{k+1}$ is an edge contraction for an $FF$ edge, and (iii) $G_n$ is \emph{irreducible} in ${\mathfrak{P}}_k$ in the sense that it admits no edge contraction to a surface graph in ${\mathfrak{P}}_k$. We show in this section that there are 8 such irreducible surface graphs, denoted $G_{b,1}, \dots , G_{b,8}$ and also referred to as \emph{base graphs}. They are given in Figure \ref{f:irred_Five} in terms of modified face graphs.
\begin{center}
\end{center}
\begin{center}
\begin{figure}
\caption{Modified face graphs for the irreducibles $G_{b,1}
\label{f:irred_Five}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{Isomorphs of (i) $G_{b,8}
\label{f:irred_PlusExtra}
\end{figure}
\end{center}
A surface graph is \emph{uncontractible} if every $FF$ edge lies on a nonfacial 3-cycle and so
an uncontractible surface graph $G$ in ${\mathfrak{P}}_k$ is certainly irreducible. That the reverse implication also holds is a corollary of the identification of the irreducibles.
Let us also note that the surface graphs $G_{b,6}, G_{b,7}, G_{b,8}$ are augmentations of $G_{b,2}$ by a degree 3 vertex and a facial 3-cycle. They are nonisomorphic surface graphs by virtue of the fact that they are nonisomorphic as graphs. Figure \ref{f:irred_PlusExtra} shows other representations of $G_{b,7}$ and $G_{b,8}$ which arise this way.
Also, as graphs we have $G_{b,1}=K_3, G_{b,2}= K_5-e, G_{b,3}=K_4$ and $G_{b,5}$ is the cone over $K_{3,3}$. The remaining graphs $G_{b,4}, G_{b,6},G_{b,7},G_{b,8}$ are depletions of $K_6$ by 3 edges where these edges
are (i) disjoint, (ii) form a copy of $K_3$, (iii) have 1 vertex incident to a pair of the edges, (iv) have 2 vertices incident to a pair of the edges.
In fact the 8 graphs account for all possible $(3,6)$-tight graphs on $n$ vertices for $n=3, 4, 5, 6$, together with 1 of the 26 such graphs for $n=7$. We remark that for $n=8, 9, 10$ the number of $(3,6)$-tight graphs rises steeply, with values 375, 11495, 613092 (Graseggar \cite{gra}).
\begin{lemma}\label{l:degree3lemma}
Let $G$ be a surface graph in ${\mathfrak{P}}_k$, with $k=1, 2$ or $3$.
If $e$ is an $FF$ edge that is incident to a degree 3 vertex then
the contraction $G/e$ is in ${\mathfrak{P}}_k$.
\end{lemma}
\begin{proof}
The edge $e$ is contractible, that is, $G/e$ is simple, since it does not lie on a nonfacial 3-cycle. Let $e$ be the edge $uv$ with $\deg(v)=3$ and with facial 3-cycles $uvx$ and $uvy$. If it lies on a critical embedded 4-, 5- or 6-cycle, then the associated surface subgraph fails to be vertex induced. Thus Lemma \ref{l:obstacle2} completes the proof.
\end{proof}
\begin{lemma}\label{l:oneholelemma}
Let $G_1 \subset G$ be surface graphs in ${\mathfrak{P}}_1$ determined by embeddings $\pi(D_1)$ and $\pi(D)$, respectively, of triangulated discs, where $D$ is a proper subset of $D_1$.
Then $G$ is constructible from $G_1$ by planar vertex splitting moves.
\end{lemma}
\begin{proof}
Suppose that $|V(G)|=|V(G_1)|+1.$
It follows that the vertex of the boundary of $G$ which is not in $G_1$ has degree $3$. By Lemma \ref{l:degree3lemma} $G$ is constructible from $G_1$ by a single planar vertex splitting move.
Assume next that the lemma is true whenever $|V(G)|=|V(G_1)|+j$, for $j = 1,2,\dots , N-1$, and suppose that $|V(G)|=|V(G_1)|+N$. We claim that the embedded annulus graph $\pi(A)$, where $A$ is defined by removing the interior vertices and edges of $D$ from $D_1$, has an $FF$ edge. To see this let $v_1=\pi(w_1)$ be a vertex of the embedded 6-cycle $c=\pi(c_1)$, where $c_1=w_1w_2\dots w_6$ is the boundary cycle for $D_1$, which is not a vertex of $D$. Since, by $(3,6)$-sparsity, $G_1$ is an induced graph in $G$ there is no edge $\pi(w_2)\pi(w_6)$ in $G_1$. It follows that there is an interior edge $e=v_1v$ in $\pi(A)$ and moreover that $e$ is an $FF$ edge, since $A$ is a (possibly degenerate) triangulated annulus.
If the contraction $G/e$ is in ${\mathfrak{P}}_1$ then it follows from the induction step assumption that $G$ is constructible from $G_1$ by planar vertex splitting moves. So, by Lemma \ref{l:obstacle1} we may assume (i), that $e$ lies on a critical embedded $6$-cycle, $c_e$ say,
or (ii), that $e$ lies on a nonfacial 3-cycle of $G$. In the former case
we may substitute a subwalk of the critical embedded 6-cycle $c_b=\pi(\partial D_1)$ by a subwalk of $c_e$ of the same length which is interior to $\pi(\partial D_1)$ and has the same initial and final vertices and where these subwalks form the boundary of an embedded triangulated disc in $\pi(D_1\backslash D)$.
The resulting critical embedded 6-cycle $c'$ lies strictly inside $c_b$. If $G_1'\in {\mathfrak{P}}_1$ is its associated surface graph then we have
$|V(G)|-|V(G_1')|<N$ and $|V(G_1')|-|V(G_1)|<N$, and it follows from the induction step that the lemma holds for $G$ and $G_1$.
We may assume then that (ii) holds.
Since $e$ is an $FF$ edge in $\pi(A)$ it follows that any nonfacial 3-cycle containing $e$ is embedded planar 3-cycle in $G$ of the form $\pi(\partial B)$ for an embedded disc $\pi(B)$ with $B$ contained in $A$. By Lemma \ref{l:planar3cycle} there is a contractible edge $f$ with $G/f$ in ${\mathfrak{P}}_1$ and so the induction step follows in this case also.
\end{proof}
The previous lemma shows that $G_{b,1}$ is the unique irreducible surface graph in ${\mathfrak{P}}_1$. It is the surface graph given by the graph $K_3$ together with an empty facial structure.
For the surface graphs $G$ in ${\mathfrak{P}}_2, {\mathfrak{P}}_3$ the reduction arguments are more involved since, the embedded disc associated with a critical $s$-walk, for $s=5$ or $6$ can contain more than one boundary walk of $G$. However, the next lemma implies that $G$ is not irreducible if there is a critical walk that properly contains a critical walk of the same length, and we use this corollary frequently.
\begin{lemma}\label{l:mholelemma}
Let $r=2$ or 3 and let $G_1 \subset G$ be surface graphs in ${\mathfrak{P}}_r$ determined by embedded triangulated discs $\pi(B_i), 1\leq i\leq r,$ and $\pi(D_i), 1\leq i\leq r,$, respectively, where $D_i$ is a proper subset of $B_i$ for each $i$.
Then $G$ is constructible from $G_1$ by planar vertex splitting moves.
\end{lemma}
\begin{proof} We may argue by induction as in the previous lemma.
Suppose for example that $r=2$ and that $e$ is an $FF$ edge of $G\backslash G_1$ and that $e$ is the embedding of an edge in $B_1\backslash D_1$ where the boundary cycles of $B_1, D_1$ are of length 5.
The induction step can be completed if $G/e$ is in ${\mathfrak{P}}_2$ and so we may assume that either (i), $e$ is on a critical embedded $s$-cycle $c_e$, for $s=5$ or 6, or (ii), $e$ lies on a nonfacial 3-cycle. If (i) holds with $r=6$ then we may apply Lemma \ref{l:blendingcriticals} to see that $e$ also lies on a critical embedded 5-cycle $c_e'$ which contains the embedding of $B_1$, in the usual sense.
Thus $c_e'$ determines an intermediate surface subgraph in ${\mathfrak{P}}_2$, and so we may complete the induction step as in the previous proof. If (i) holds then, as before, we similarly obtain an intermediate surface subgraph in ${\mathfrak{P}}_2$. For the case $r=3$ Lemma \ref{l:blendingcriticals} can again be used to obtain an intermediate surface graph in ${\mathfrak{P}}_3$ and so in all cases the induction step may be completed.
\end{proof}
\begin{cor}\label{c:propercontainment}
Let $r=2$ or 3 and let $G_1 \subset G$ be determined by embedded triangulated discs $\pi(B_i), 1\leq i\leq r,$ and $\pi(D_i), 1\leq i\leq r,$, respectively. If $D_1$ is a proper subset of $B_1$ and $D_i=B_i$ for $i\neq 1$ then $G$ is not irreducible.
In particular, if $G$ is irreducible then it is not possible for $G$ to have a critical embedded 4-cycle containing an $FF$ edge.
\end{cor}
We next determine the surface graphs which contain no $FF$ edges. From the definition these are necessarily irreducible. The next three propositions, together with our remarks following Lemma \ref{l:obstacle1}, show that they are $G_{b,1}$ in ${\mathfrak{P}}_1$ and $G_{b,3}, G_{b,4}$ in ${\mathfrak{P}}_3$.
\begin{prop}\label{p:noFacesB}
Let $G$ be a surface graph in ${\mathfrak{P}}_k$, for $k=2$ or 3. If $G$ has no facial 3-cycles then $k=3$ and $G = G_{b,3}$.
\end{prop}
\begin{proof} Let $(B_0,\lambda)$ be a modified face graph representation for $G$ with boundary a $2r$-cycle. Note that for $k=2$ or 3 each vertex of $G$ that is not on the embedded $2r$-cycle has degree 3. In particular removing this vertex and its incident edges does not change the Maxwell count of 6. It follows, by removing all interior vertices that $r$ must be equal to 3 and the conclusion follows.
\end{proof}
\begin{prop}\label{p:noFacesB}
Every surface graph in ${\mathfrak{P}}_2$ has $FF$ edges.
\end{prop}
\begin{proof} Let $G$ in ${\mathfrak{P}}_2$ have a modified face graph representation $(B_0,\lambda)$ with outer boundary cycle of length $2r$. We may assume from the previous proposition that $G$ has faces. Suppose, by way of contradiction, that $B_0$ has 2 holes and $G$ has no $FF$ edges. Then there is an edge $xy$ in the boundary $2r$-cycle of $B_0$ which has a face $xyv$. The paired edge $x'y'$ therefore belongs to the boundary cycle of one of the holes of $B_0$. See Figure \ref{f:2holesNoFF} where $D_1$ indicates this hole. Note that it is not possible for $D_1$ to be incident to $x$ or $y$. Indeed, if $D_1$ has a 5-cycle boundary then since the edges $xy'$ does not exist, by the simplicity of $G$, the boundary walk of $D_1$ from $x$ to $x'$ has length 1 or 2, contradicting the simplicity of $G$. Since there are no $FF$ edges the other hole boundary must contain the edges $xv$ and $yv$, and this is a contradiction since it implies that $v$ has degree 2.
\begin{center}
\begin{figure}
\caption{$(B_0,\lambda)$ with a face and no $FF$ edges.}
\label{f:2holesNoFF}
\end{figure}
\end{center}
\end{proof}
\begin{prop}\label{p:faceNoFF}
Let $G$ be a surface graph in ${\mathfrak{P}}_3$ with facial 3-cycles and no $FF$ edges. Then $G = G_{b,4}$.
\end{prop}
\begin{proof}
Let $(B_0,\lambda)$ be a modified face graph representation for $G$ with boundary a $2r$-cycle. We show that $r$ can be $3, 4$ or 5 and in all cases the surface graph $G= B_0/\lambda$ is isomorphic to $G_{b,4}$.
\begin{center}
\begin{figure}
\caption{Possible subgraphs of $(B_0,\lambda)$ when $r=3$ (shown with bold edges).}
\label{f:case_ris3}
\end{figure}
\end{center}
We may assume that $B_0$ has a boundary edge $e=xy$ on a facial 3-cycle. Let $e'$ be the edge in the boundary of $B_0$ which is identified with $e$ by $\lambda$. Suppose first that $r=3$. By our assumptions $e'$ belongs to the boundary 4-cycle of a hole of $B_0$. It follows that $B_0$ contains a surface subgraph with one of the forms given in Figure \ref{f:case_ris3}.
In the first case the boundary 4-cycle is $v,w,y,x$ with vertices $v, w \neq z$.
The remaining two boundary 4-cycles of $B_0$ contain the edges $uy$ and $ux$ respectively, and neither 4-cycle contains both edges. Since there are no $FF$ edges it follows that $G=G_{b,4}$. In the second case, with $z=v$, we have $u \neq w$, by the simplicity of $G$. Since there are no $FF$ edges it follows that $ywz$ is a face since otherwise the boundary 4-cycle containing $ux$ must be incident to $y$, contradicting simplicity. But the presence of the face $ywz$ also contradicts the simplicity of $G$, and so the second case does not arise.
\begin{center}
\begin{figure}
\caption{Some modified face graph representations for $G_{b,4}
\label{f:OctagonEtc}
\end{figure}
\end{center}
One can argue in the same way when $r=4$ and when $r=5$ to obtain the forms of $B_0$ indicated in Figure \ref{f:OctagonEtc}. Their associated surface graphs are isomorphic to $G_{b,4}$, completing the proof.
\end{proof}
The following general construction of a critical embedded 6-cycle $c_g$ with
a maximality property will be used in the next two proofs. We refer to it in the proofs as the \emph{maximal critical walk construction}.
Suppose that $G$ in ${\mathfrak{P}}_2$ or ${\mathfrak{P}}_3$ is irreducible and contains a critical $6$-walk $c_e$ with an $FF$ edge $e$. Let $G=G_e\cup A_e$ be the associated decomposition where $c_e$ is the boundary walk $\pi(\partial D_e)$ of an embedded triangulated disc $\pi(D_e)$ in a fixed triangulated surface graph $S$ for ${\mathcal{P}}$. If $G_e$ is equal to the irreducible $G_{b,1}$ then the construction stops. Otherwise $G_e$ has an $FF$ edge $f$ and we continue.
The edge $f$ must lie on a critical embedded cycle $c_f$ of $G$ which in turn is the boundary walk $\pi(\partial D_f)$ of an embedded triangulated disc $\pi(D_f)$. If the interior of the embedding of $D_e^\circ \cup D_f^\circ)$ is an open disc then Lemma \ref{l:blendingcriticals} applies and one obtains a critical 6-walk with embedded triangulated disc $\pi(D_e\cup D_f)$. Continuing, the construction process stops, either with $G_g= G_{b,1}$ and $c_g$ a repeated essential 3-cycle, or with $c_g$ a critical 6-walk $c_g$ with $FF$ edge $g$ and associated decomposition $G=G_g \cup A_g$ with the following properties: (i) There is a face in $G_g$, and (ii) if $c_h$ is a critical walk through an $FF$ edge $h$ of $G_g$ then the interior of $\pi(D_g)\cup \pi(D_h)$ is not an open disc. This means that $c_h$ separates holes in the manner shown in Figure \ref{f:2criticalsFailingHypothesis}(ii).
\begin{prop}\label{p:2holesIrred}
Let $G$ be an irreducible surface graph in $ {\mathfrak{P}}_2$. Then
$G=G_{b,2}$.
\end{prop}
\begin{proof}
By Proposition \ref{p:noFacesB} $G$ contains $FF$ edges.
Also, by Corollary \ref{c:propercontainment} $G$ contains no $FF$ edges on critical walks of length 4. Moreover, since $G$ is in $ {\mathfrak{P}}_2$ it follows from Lemma \ref{l:mholelemma} that no $FF$ edge lies on a critical walk of length 5. Thus there is an $FF$ edge $e$ on a critical embedded 6-cycle and the maximal critical walk construction applies to give the critical walk $c_g$.
We consider first the second outcome of the construction. Since $G_g$ is not equal to $G_{b,1}$ it is reducible with an $FF$ edge $h$ with $G_g/h$ in ${\mathfrak{P}}_1$. By the irreducibility of $G$ and the construction of $G_g$ the edge $h$ lies on a critical embedded 6-cycle $c_h$ for which the open set $\pi(D_g^\circ)\cup \pi(D_h^\circ)$ is not an open disc. The possibilities for this are limited as we now show.
The walk $c_h$, with length 6, necessarily has two subwalks that are interior to $c_g$ (in the usual sense) one with length 2 and the other of length 2 or 3.
Suppose first that both subwalks are of length 2.
Then $c_h$ could be a repeated essential 3-cycle with two edges interior to $c_g$, as illustrated in Figure \ref{f:2holesPossiblesBandA}(i) and (ii). Moreover, further forms
are only possible by concatenating certain pairs of essential 3-cycles which share a vertex. So we may assume that $h$ lies on an essential 3-cycle as shown in Figure \ref{f:2holesPossiblesBandA}(i) or in Figure \ref{f:2holesPossiblesBandA}(ii).
\begin{center}
\begin{figure}
\caption{Two forms of an essential 3-cycle with 2 edges interior to the embedded $6$-cycle $c_g$.}
\label{f:2holesPossiblesBandA}
\end{figure}
\end{center}
In the first case note that if there is an additional $FF$ edge of $G_g$, with faces in $G_g$, that is incident to a vertex of $c_g$ then its critical embedded 6-cycle must contain a ``diagonal" essential 3-cycle, as in the second case. So we may assume that $G_g$ only has the edge $v_2v_4$ as an $FF$ edge. However, this implies that $v_2v_4v_3$ is a facial 3-cycle, a contradiction.
In the second case, illustrated by Figure \ref{f:2holesPossiblesBandA}(ii), we may assume, by Corollary \ref{c:propercontainment} and symmetry, that the boundary of the 5-cycle hole is $v_4v_5v_6v_1v_7$. Note that the 4-cycle for $c_1$ cannot be incident to both $v_1$ and $v_4$. Thus, if neither $v_7v_2$ or $v_7v_3$ exists then there would be an edge $v_7w$ with $w$ strictly interior to the 5-cycle $c=v_1v_2v_3v_4v_7$. Also $v_7w$ would not be an $FF$ edge since it cannot lie on a critical walk. So, without loss of generality we can assume that $c_1$ is the walk $v_7wv_3v_4$. It follows that there is an $FF$ edge $v_7w'$ with $w'$ interior to is an $FF$ edge. However, it fails to lie on a critical walk, a contradiction.
In the second case then we may assume that $v_7v_2 $ exists.
Thus $G$ contains the subgraph shown in the ${\mathcal{P}}$-diagram
of Figure \ref{f:2holesBasicC}, and $G$ is obtained by adding edges and vertices that are exterior to $c_g$.
Consider an $FF$ edge of $G_g$ (with faces in $G_g$) which is incident to $v_3, v_5$ or $v_6$. This edge must lie on a critical walk separating the holes of $G$ in the manner of Figure \ref{f:2criticalsFailingHypothesis}(ii) and this is not possible in each case. It follows that $v_6=v_3$ and that $v_5=v_2$. Thus the edges $v_4v_2$ and $v_4v_6$ exist and so $G=G_{b,2}$.
\begin{center}
\begin{figure}
\caption{A surface subgraph of $G$ when the essential 3-cycle has 2 edges interior to $c_g$.}
\label{f:2holesBasicC}
\end{figure}
\end{center}
Suppose next that the subwalks of $c_h$ have lengths 2 and 3 and $h$ does not lie on an essential 3-cycle. Then $c_h$ has the form as shown in Figure \ref{f:2holesBasicC_23case}, for a relabelling of $v_1,\dots v_6$, where $w$ is distinct from $u, v$ and where the walk $v_1, v_2,w,v_4,v_5,v,u$ is the boundary walk of an embedded triangulated disc in $G$.
By Corollary \ref{c:propercontainment} the cycles $v_2wv_4v_3$ and $v_1uvv_5v_6$ correspond to the boundary walks of the holes of $G$. It follows that there is an $FF$ edge incident to $u$ or to $v$. Thus does not lie on a critical embedded cycle and so this case does not arise.
\begin{center}
\begin{figure}
\caption{A surface subgraph of $G$ with an embedded critical 6-cycle
$v_1uvv_5wv_4$.}
\label{f:2holesBasicC_23case}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{(i) A face graph for $G_g=G_{b,1}
\label{f:2holes3cycleCase}
\end{figure}
\end{center}
Suppose, finally, that the maximal critical walk construction stops with $G_g=G_{b,1}$. Then there is a modified face graph $(B_0,\lambda)$ for $G$ in which the outer boundary of $B_0$ embeds as $c_g$, with both faces of the $FF$ edge $g$ interior to $c_g$. This is illustrated in Figure \ref{f:2holes3cycleCase}(i) where $g=v_1v_2$. If the edge $uv_3$ exists then $uv_2$ is an $FF$ edge and $v_1uv_3v_1v_2v_3$ is a critical embedded 6-cycle, $c$ say. With the pair $uv_2, c$ playing the roles of $g, c_g$ above it follows from our previous arguments that $G=G_{b,2}$.
We may assume then that none of the edges $uv_3,uv_3',vv_3,vv_3'$ exist.
Since there are 2 holes in the modified face graph $(B_0,\lambda)$ for $G$ it follows that at least one of the vertices $v_1,v_2,v_1',v_2'$ is not incident to either hole of $B_0$. For if not then there is an embedded planar 5-cycle containing both holes of $G$. By symmetry we assume that $v_1$ is such a vertex.
It follows that there is a face $v_1uw$ with $w$ interior to $c_g$. Both $v_1w$ and $v_1u$ are $FF$ edges and so lie on a critical embedded 6-cycles, $c_w, c_u$ say, since $G$ is in ${\mathfrak{P}}_2$. Edges $v_1w, wy$ (resp. $v_1u, uz$) are included in a subwalk of $c_w$ (resp. $c_u$) from $v_1$ to another vertex of $\partial B_0$. See Figure \ref{f:2holes3cycleCase}(ii). We show that in all cases there exists a reducible edge, that is, an $FF$ edge that is not on a critical walk.
If $y=z$ then $uw$ is $FF$ and so lies on a critical walk. The only possibility for this is that $wv_2'$ exists.
Thus the interior of the 4-cycle $v_1wv_2'v_3'$ is triangulated with an edge incident to $w$, and this edge is reducible. If $y=v_2'$ then, for the same reason, there is a reducible edge. If $y=v$ then $zv_1'$ must exist and there are then three 5-cycle faces one of which is triangulated, and in each case there is a reducible edge. The same conclusion holds if the edge $yv_1'$ exists. Thus the critical 6-cycle $c_g$ cannot exist and the proof is complete.
\end{proof}
The next proof is similar in style to the previous proof. However this time the outcome $G_g=G_{b,1}$ of the maximal critical walk construction leads to the identification of an additional irreducible.
\begin{prop}\label{p:444irreducible}
Let $G$ be an irreducible surface graph in ${\mathfrak{P}}_3$ with $FF$ edges.
Then $G$ is one of the base graphs $G_{b,5}, \dots , G_{b,8}$.
\end{prop}
\begin{proof}
We consider $G$ to be determined by a triangulated surface graph for ${\mathcal{P}}$ and 3 interior-disjoint embedded triangulated discs $\pi(D_1), \pi(D_2), \pi(D_3)$ with boundary walks $c_1, c_2, c_3$ which are critical embedded 4-cycles. By Corollary \ref{c:propercontainment} there are no critical walks of length 4.
Suppose that there is an $FF$ edge on a critical embedded 6-cycle and that
$G=G_g \cup A_g$ is the decomposition given by the maximal critical walk construction. Consider first the outcome $G_g\neq G_{b,1}$. Then $G_g$ contains an $FF$ edge $h$, with both faces in $G_g$. This edge must lie on a critical walk $c_h$ in $G$ and, by the maximal property of $G_g$ there are 2 subwalks of $c_h$ each with at least 2 edges interior to $c_g$.
\begin{center}
\begin{figure}
\caption{An additional form for $c_h$.}
\label{f:3holes_6cycle_ch}
\end{figure}
\end{center}
We consider the case that $|c_h|=6$ and both subwalks are of length 2. Then in addition to the forms implied by Figure \ref{f:2holesPossiblesBandA}, as repeated or concatenated essential 3-cycles, we also have the form shown in Figure \ref{f:3holes_6cycle_ch} where the subwalks have no coincident vertices and $v_7\neq v_8$. However, by Corollary \ref{c:propercontainment} the 4-cycles $v_1v_8v_5v_6$ and $v_2v_3v_4v_7$ are boundary cycles determining $c_1$ and $c_2$ say, while the interior of the 6-cycle $v_1v_2v_7v_4v_5v_8$ is partially triangulated in $G$ with a single 4-cycle face associated with $c_3$. In particular the planar embedded 6-cycle $c_h$ cannot be critical and so this case does not arise.
We may assume then that $h=v_1v_4$ lies on an essential 3-cycle $v_1v_7v_4$ as shown in Figure \ref{f:2holesPossiblesBandA}(ii). We may assume also that the cycle $c_1$ is contained in the right hand side 5-cycle, $c=v_1v_2v_3v_4v_7$, and $c_2, c_3$ are contained in the other 5-cycle (in the usual inclusion sense). The argument in the previous proof applies, even though critical walks of length 5 might now be present, as we now show.
The 4-cycle for $c_1$ cannot be incident to both $v_1$ and $v_4$ and so if neither $v_7v_2$ or $v_7v_3$ exists then there would be an edge $v_7w$ with $w$ strictly interior to $c=v_1v_2v_3v_4v_7$. Also $v_7w$ cannot be an $FF$ edge since it does not lie on a critical walk of length 5 or 6. So we can assume that $c_1$ is the walk $v_7wv_3v_4$. It follows that there is an $FF$ edge $v_7w'$ with $w'$ interior to is an $FF$ edge. However, it fails to lie on a critical walk of length 5 or 6 and this contradiction shows that we may assume that $v_7v_2$ exists, so that $G$ contains the surface subgraph of Figure \ref{f:3holesBasicC}.
\begin{center}
\begin{figure}
\caption{A surface subgraph of $G$ when an essential 3-cycle has 2 edges interior to $c_g$.}
\label{f:3holesBasicC}
\end{figure}
\end{center}
Suppose, by way of contradiction, that there is an $FF$ edge interior to the embedded 5-cycle $v_1v_7v_4v_5v_6$ containing $c_1$ and $c_2$. Then it lies on a critical embedded cycle which must pass through $v_7$. Since this is not possible there is a single degree 3 vertex interior to the embedded cycle. It also follows, as in the previous proof, that $v_1v_4$ is the only $FF$ edge of $G_g$. Thus $G$ is equal to one of 5 surface graphs, namely $G_{b,6}, G_{b,7}, G_{b,8}$, or one of the isomorphs of $G_{b,7}$ and $G_{b,8}$ shown in Figure \ref{f:irred_PlusExtra}.
\begin{center}
\begin{figure}
\caption{A surface subgraph of $G$ with $c_h$ on a critical embedded 5-cycle.}
\label{f:3holes_5cycle_chB}
\end{figure}
\end{center}
The case that $c_h$ has length 6 with subwalks of lengths 2 and 3 with $h$ not lying on an essential 3-cycle does not arise. This is because the argument for this case in the proof of Proposition \ref{p:2holesIrred} also holds when $G$ is in ${\mathfrak{P}}_3$.
Since $G$ is in ${\mathfrak{P}}_3$ it is possible that $c_h$ has length 5 with subwalks of lengths 2 as shown in Figure \ref{f:3holes_5cycle_chB}.
In view of previous cases we may assume that the edge $h=v_1v_4$, does not lie on an essential 3-cycle. Thus the edge $v_1w, uv_4$ do not exist. Also $uv_2$ and $v_5w$ do not exist, by the simplicity of $G$. It follows that there is an $FF$ edge $uz$ with $z$ interior to $v_1v_2wv_4v_5u$. This edge cannot lie on a critical walk of length 5 or 6 contradicting irreducibility in this case.
Next we consider the outcome $G_g=G_{b,1}$ in the construction of $G_g$ and show that $G=G_{b,5}$ in this case. Figure \ref{f:2holes3cycleCase}(i) illustrates a modified face graph for $G_g$ augmented by the two faces of $g=v_1v_2$. The other vertices of $G$ are interior to $c_g$ and the walks $c_1, c_2, c_3$ in $G$ derive from 3 quadrilateral faces of the associated modified face graph $(B_0,\lambda)$ for $G$.
Suppose first that $v_1$ does not belong to any of the walks $c_1, c_2, c_3$. Then the edges that are interior to $c_g$ and incident to $v_1$, say $v_1u, v_1u_1 ,\dots , v_1u_r$, are $FF$ edges. See Figure \ref{f:3holesInteriorVertexCase}(i).
Each 4-cycle face of $(B_0,\lambda)$ includes at most one of the edges $v_2u, uu_1, u_1u_2, \dots u_rv_3'$ and so if $r>1$ at least one of the edges $uu_1, \dots ,u_{r-1}u_r$
is an $FF$ edge, $e$ say. Suppose $e=uu_1$. To be on a critical walk requires $u_1v_2'$ to exist (since $uv_3$ does not exist). It follows that $u_jv_2'$ must exist for each $j$, since each edge $v_1u_i$ lies on a critical walk, and so $u_1u_2$ is a reducible edge. The argument is the same for each $u_iu_{i+1}$. Thus $r=1$ and the degree of $v_1$ in the face graph $(B_0,\lambda)$ is 4. Since $uu_1$ is incident to a 4-cycle face it follows readily that irreducibility implies that $G=G_{b,5}$.
\begin{center}
\begin{figure}
\caption{(i) When $v_1$ is not incident to a 4-cycle hole. (ii) When $v_1, v_2, v_1', v_2'$ are each incident to a 4-cycle hole.}
\label{f:3holesInteriorVertexCase}
\end{figure}
\end{center}
It remains to consider the case that each vertex $v_1, v_2, v_1', v_2'$ belongs to at least one of the walks $c_1, c_2, c_3$. Relabelling we may assume that $c_1$ is the embedding of a 4-cycle $v_1yv_2'x$, as in Figure \ref{f:3holesInteriorVertexCase}(ii), where, by simplicity, $y\neq u, v$. It follows that $x= v_3'$
Since $v_3$ does not have degree 2 neither remaining 4-cycle hole can be incident to both $v_2$ and $v_1'$. Also it is not possible for incidence to the pair $v_1, v_2$ or to the pair $v_1', v_2'$. It follows that all further possible edges of the form $v_1z$ or of the form $v_2'x$, are $FF$ edges. In particular we note that $v_1u$ is an $FF$ edge. On the other hand, given that $c_2$ is incident to $v_1'$ and $c_3$ is incident to $v_2$, it also follows that $v_u$ cannot lie on a critical walk of length 5 or 6, a contradiction.
This contradiction completes the proof when critical walks of length 6 exist. It remains to show that there is no irreducible surface graph $G$ in ${\mathfrak{P}}_3$ with $FF$ edges which only lie on critical walks of length 5. In this case consider one such edge $g$ with critical walk $c_g$ and associated subgraph $G_g$. Since $G_g$ is in ${\mathfrak{P}}_2$ it has an $FF$ edge $h$ and so this lies on a critical 5-walk $c_h$. Let $\pi(D_g), \pi(D_h)$ be the embedded triangulated discs in ${\mathcal{P}}$ with boundary walks $c_g, c_h$. Since $G$ is finite we may assume that $D_h$ does not contain $D_g$, for otherwise we could replace $g$ by $h$, continuing with similar replacements if necessary. If the the union of
the open sets $\pi(D_g^\circ), \pi(D_h^\circ)$
is an open disc then by Lemma \ref{l:blendingcriticals} the associated boundary walk is a critical walk. In fact it is a critical 6-walk, since it contains $c_1,c_2,c_3$, and this is contrary to our assumptions.
On the other hand if the union is not an open disc then $c_h$ has 2 subwalks interior to $c_g$ which is not possible for a 5-walk.
\end{proof}
\section{Constructibility and 3-rigidity}
\label{s:mainproof}
Combining results of the previous sections we obtain the construction theorem, Theorem \ref{t:construction} and the proof of Theorem \ref{t:projectiveA} which we repeat here as Theorem \ref{t:projectiveArepeat}.
\begin{thm}\label{t:construction}
Let $G\in {\mathfrak{P}}_k$, for $k=1,2$ or 3. Then $G$ is constructible by a finite sequence of planar vertex splitting moves from one of the eight irreducible surface graphs $G_{b,1}, \dots , G_{b,8}$.
\end{thm}
\begin{thm}\label{t:projectiveArepeat}
Let $G$ be a simple graph
associated with a partial triangulation
of the real projective plane.
Then $G$ is minimally $3$-rigid if and only if $G$ is $(3,6)$-tight.
\end{thm}
\begin{proof}
Let $H$ be the graph determined a partial triangulation of the real projective plane. If $H$ is minimally 3-rigid then it is well-known that $H$ is necessarily $(3,6)$-tight \cite{gra-ser-ser}.
Suppose on the other hand that $H$ is a $(3,6)$-tight graph which is embeddable in ${\mathcal{P}}$. If this embedding is topologically contractible then $H$ is a planar graph which is $(3,6)$-tight. Such graphs are known to be 3-rigid, since $H$ is either a triangle or is the graph of a triangulation of the sphere. On the other hand if the embedding is not topologically contractible then the associated surface graph, $G(H)$ say, belongs to ${\mathfrak{P}}_1, {\mathfrak{P}}_2$ or ${\mathfrak{P}}_3$. By Theorem \ref{t:construction} the graph $H$ is constructible by planar vertex splitting moves from one of eight irreducible graphs, each of which has fewer than $8$ vertices. It is well-known that all $(3,6)$-tight graphs with fewer than $8$ vertices are minimally 3-rigid.
Since vertex splitting preserves minimal 3-rigidity (Whiteley \cite{whi}) it follows that $G$ is minimally 3-rigid.
\end{proof}
{\bf Acknowledgements.} {This research was supported by the EPSRC grant EP/P01108X/1, for the project \emph{Infinite bond-node frameworks}, and by a visit to the Erwin Schroedinger Institute in September 2018 in connection with the workshop on \emph{Rigidity and Flexibility of Geometric Structures}.
We thank referees for comments which have improved the presentation.}
\def\lfhook#1{\setbox0=\hbox{#1}{\ooalign{\hidewidth
\lower1.5ex\hbox{'}\hidewidth\crcr\unhbox0}}}
\section{Appendix: The uncontractible surface graphs.}
We give an alternative systematic approach to the identification of the uncontractible embeddings of $(3,6)$-tight graphs. As we have remarked these coincide with the topologically contractible embedding of $K_3$ and the 8 topologically uncontractible embeddings provided by Theorem \ref{t:construction}. This is because these irreducible embeddings happen to be uncontractible. We present them here anew in Figures \ref{f:irreduciblesA}, \ref{f:irreduciblesB}, \ref{f:irreduciblesC},
with new notation which reflects the invariant of Definition \ref{d:holedegree} which we use as an organising principle for their determination.
Specifically, we write $G^h_n, G^h_{n,\alpha}, G^h_{n,\beta} $ where $n$ is the number of vertices and $h=h(G)$ indicates the \emph{minimum hole incidence degree}.
\begin{definition}\label{d:holedegree} Let $v$ be a vertex of the surface graph $G$ in ${\mathfrak{P}}_k$ for some $k=1,2,3$. Then (i) $\deg_F(v)$ is the number of facial 3-cycles incident to $v$, (ii)
$
\deg_h(v) = \deg(v)-\deg_F(v)
$
is the \emph{hole incidence degree} for $v$, and (iii)
$h(G)= \min_v \operatorname{deg}_h(v)$ is the \emph{minimum hole incidence degree}.
\end{definition}
\begin{center}
\begin{figure}
\caption{$G^2_3=G_{b,1}
\label{f:irreduciblesA}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{$G^0_7= G_{b,5}
\label{f:irreduciblesB}
\end{figure}
\end{center}
\begin{center}
\begin{figure}
\caption{$G^1_5=G_{b,2}
\label{f:irreduciblesC}
\end{figure}
\end{center}
We first note the following proposition identifying the unique uncontractible surface graph in ${\mathfrak{P}}_k$ with $h(G)=0$ which is to say that there is an interior vertex not contained in the boundary of $G$.
\begin{prop}\label{p:interiorvertex}
Let $G$ be an uncontractible surface graph in ${\mathfrak{P}}_k$, for $k=1, 2$ or $3$ with $h(G)=0$. Then $k=3$ and $G$ is the hexagon graph $G^0_7$.
\end{prop}
\begin{proof}
Let $v_1$ be the interior vertex of $G$. Since it is incident to an $FF$ edge and this edge necessarily lies on an essential 3-cycle, in view of Lemma \ref{l:planar3cycle}, it follows that $G$ has a modified face graph representation $(B_0,\lambda)$, with 6-cycle boundary, for which the faces incident to $v_1$ provide edges forming paths $\pi_1, \pi_2$ from $v_2$ to $v_3'$ and from $v_2'$ to $v_3$ respectively, as indicated in Figure \ref{f:interiorNEW}.
\begin{center}
\begin{figure}
\caption{Faces incident to $v_1, v_1'$ associated with an interior vertex of $G$.}
\label{f:interiorNEW}
\end{figure}
\end{center}
If $u_rv_2'$ exists then $v_3'u_r$ is $FF$ and there exist edges $u_rz, zv_3$ for an essential 3-cycle for $v_3'u_r$. Thus the edges $u_iv_3$ necessarily exists for an essential 3-cycle for $v_1u_i$, for $i=1,\dots ,u_{r-1}$. It follows that the $FF$ edge $v_2u_1$ does not lie on an essential 3-cycle if $r\geq 2$. However, for both $r=1,2$, including the possibility that $z=u_1$, there is no completion such that $G$ is (3,6)-tight.
Since $u_rv_2'$ does not exist there exists an $FF$ edge $v_3'y$ of $B_0$
together with edges $yz, zv_3$ for an essential 3-cycle. Once again, in the manner of the previous paragraph, there is no completion of $B_0$ to form a modified face graph for a (3,6)-tight surface graph for ${\mathcal{P}}$.
\end{proof}
The next lemma is key to the determination of the uncontractible surface graphs $G$ in ${\mathfrak{P}}_k$ for $k=2$ or $3$. In the proof we use the fact, from Corollary \ref{c:propercontainment}, that it is not possible for $G$ to have a 4-cycle hole whose boundary walk is contained in an embedded 4-cycle of planar type.
\begin{lemma}\label{degh1lemma}
Let $G\in {\mathfrak{P}}_k$, for $k=2$ or 3, be uncontractible with no interior vertex and
let $v_1$ be a vertex with $\operatorname{deg}_h(v_1)=1$ which lies on the boundary of a 4-cycle hole of $G$ with edges $v_1v_2,v_2v_3,v_3v_4, v_4v_1$. Then $\operatorname{deg}(v_1)= 4$ if $v_1$ is not adjacent to $v_3$ and $\operatorname{deg}(v_1)= 5$ otherwise.
\end{lemma}
\begin{proof}
Let $v_2=w_1,w_2,\dots,w_n=v_4$ be the neighbours of $v_1$ in cyclic order. Since $\operatorname{deg}_h(v_1)=1$, we also have the edges $w_1w_2,\dots,w_{n-1}w_n$. Note that $\deg(v_1)\geq 4$ since if the degree is 3 then the edge $v_1w_2$ is contractible.
{\bf Case (a).} $v_3\neq w_i$, for every $i\in\{2,\dots,n-1\}$. \\
Suppose, by way of contradiction, that $n\geq 5$.
For $2\leq i \leq n-1$ it follows from the uncontractibility of the $FF$ edge $v_1w_i$ that there exists an edge $w_iw_r$,
with $1\leq r \leq n$.
Moreover the 3-cycle $w_iw_rv_1w_i$ cannot be an embedded planar 3-cycle since it is not a face and since it contains no holes of $G$ there would be a contractible edge.
Since there are at most 3 holes and the Case (a) assumption is in force, there is an index $1\leq i<n$ and an associated cycle of edges through $w_i, w_{i+1}, w_s, w_r$, with $r \leq s$, which is triangulated by faces of $G$.
Thus $w_iw_{i+1}$ is an $FF$ edge.
Since $G$ is uncontractible $w_iw_{i+1}$ lies in a non-facial 3-cycle. Since $v_1w_j$ is also an $FF$ edge for every $j\in\{2,\dots,n-1\}$, it follows that there are just two candidate non-facial 3-cycles: $w_{i-1}w_iw_{i+1}w_{i-1}$ or $w_iw_{i+1}w_{i+2}w_i$.
\begin{description}
\item[(i)] If $w_iw_{i+1}$ lies on the cycle $w_{i-1}w_iw_{i+1}w_{i-1}$, then the 4-cycle $w_{i-1}v_1w_rw_{i+1}w_{i-1}$ contains strictly the hole boundary $v_1v_2v_3v_4v_1$, contradicting Corollary \ref{c:propercontainment}.
Note that this 4-cycle does contain the hole in our sense, as shown by the shading in Figure \ref{f:case_a} indicating a triangulated disc in ${\mathcal{P}}$ with boundary equal to this 4-cycle.
\begin{figure}
\caption{The 4-cycle $w_{i-1}
\label{f:case_a}
\end{figure}
\item[(ii)] If $w_iw_{i+1}$ lies on the cycle $w_iw_{i+1}w_{i+2}w_i$, then, {noting that $w_{i+2}w_i$ is an edge}, we claim that the 5-cycle $w_iv_1w_rw_{i+1}w_{i+2}w_i$ contains all the holes, which is a contradiction.
To see this note that by
Corollary \ref{c:propercontainment} the 4-cycle $v_1w_rw_iw_{i+2}v_1$ contains no holes. See Figure \ref{f:case_a'}.
\begin{figure}
\caption{The 5-cycle $w_iv_1w_rw_{i+1}
\label{f:case_a'}
\end{figure}
\end{description}
These contradictions, together with Lemma \ref{l:degree3lemma} show that $n=4$ in this case.
{\bf Case (b).} $v_3= w_{i_0}$, for some $i_0\in\{2,\dots,n-1\}$.\\
Since $G$ is a simple graph and $v_1v_3$ is now an uncontractible $FF$ edge we have $\operatorname{deg}(v_1)\geq 5$. Suppose, by way of contradiction, that $n\geq 6$. As in case (a) there then exists an $FF$ edge $w_iw_{i+1}$ and some vertex $w_r$ providing a facial 3-cycle $w_iw_{i+1}w_r$. (See Figure \ref{MDsep2}.) The only possible non-facial 3-cycle for the $FF$ edge $w_iw_{i+1}$ is $v_3w_iw_{i+1}v_3$. However, this gives a contradiction since the 4-cycle $w_iv_3v_4v_1w_i$ strictly
contains the hole $v_1v_2v_3v_4v_1$. Thus $n=5$.
\textcolor[rgb]{0,0,1}.
\begin{figure}
\caption{The 4-cycle $v_4v_3v_1w_iv_4$ contains strictly the 4-hole $v_1v_2v_3v_4v_1$.}
\label{MDsep2}
\end{figure}
\end{proof}
\begin{prop}\label{p:3uncontractibles}
Let $G\in {\mathfrak{P}}_k$, for $k=1, 2$ or 3, be uncontractible with no interior vertex. If there exists a vertex $v_1\in V(G)$ with $\operatorname{deg}_h(v_1)=1$ then $G$ is
one of the surfaces graphs $ G^1_{6,\alpha}, G^1_{6,\beta}, G^1_5$.
\end{prop}
\begin{proof}
{\bf Case (a).}
Assume first that $v_1$ lies on the 4-cycle boundary of the hole $H_1$, with vertices $v_1, v_2, v_3, v_4$, and let $v_2=w_1, w_2,\dots, w_n=v_4$ be all the neighbours of $v_1$. Since $\operatorname{deg}_h(v_1)=1$ the edges $w_1w_2,\dots,w_{n-1}w_n$ exist.
There are two subcases.
\begin{description}
\item[(i)] $v_3\neq w_i$, for every $i\in\{2,\dots, n-1\}$.\\
By Lemma \ref{degh1lemma} we have $\operatorname{deg}(v_1)=4$.
By the uncontractibility of the edges $v_1w_2$ and $v_1w_3$ the edges $w_2w_4$ and $w_1w_3$ must exist. Thus $G$ contains the surface graph in Figure \ref{f:1_6_alpha}, except possibly for the edge $v_3w_3$.
It follows that the 4-cycle $w_1w_2w_4w_3w_1$ must be the boundary of a 4-hole $H_2$, since otherwise the 5-cycle $v_1w_1w_3w_2w_4v_1$ contains all the holes, in the sense, as before, of being the boundary of an embedded disc in ${\mathcal{P}}$, $B$ say, which contains the holes. This contradicts $(3,6)$-tightness. We claim now that the edge $v_3w_2$ or $v_3w_3$ must exist, for otherwise there is a contractible edge in $B$. To see this check that since $\operatorname{deg}(v_3)\geq 3$ there exists a vertex $z$ in the interior of the 5-cycle $v_3w_4w_2w_3w_1v_3$, such that $v_3z\in E(G)$. Since $v_3z$ does not lie on a non facial 3-cycle, it follows that it lies on the boundary of the third 4-hole. Thus, if $v_3w_3$ is not allowed, we may assume by symmetry that $w_1z$ is an $FF$ edge in $E(G)$, and so it lies on the non-facial 3 cycle $w_1zw_2w_1$. Hence the third hole is described by the 4-cycle $w_4v_3zw_1w_4$. However, this implies that $zw_3\in E(G)$, which is a contractible $FF$ edge, so we have proved the claim. Hence without loss of generality $G$ contains the surface subgraph $G^1_{6,\alpha}$ as indicated in Figure \ref{f:1_6_alpha}. Since $G$ is uncontractible it follows that the 3-cycle $v_3w_3w_1$ is a face and so $G=G^1_{6,\alpha}$.
\begin{figure}
\caption{The uncontractible surface graph $G^1_{6,\alpha}
\label{f:1_6_alpha}
\end{figure}
\item[(ii)] $v_3=w_{i_0}$ for some $i_0\in \{3,\dots,n-2\}$.\\
By Lemma \ref{degh1lemma} $\operatorname{deg}(v_1)=5$ and so $v_3=w_3$. Since $v_1w_2$ is an $FF$ edge, it follows that $w_2w_4\in E(G)$ and so $G$ contains the surface graph
$G=G^1_{6,\beta}$ of Figure \ref{f:1_6_beta}.
Since $G$ is uncontractible it follows as before
that it is equal to $G$.
\begin{figure}
\caption{The uncontractible surface graph $G^1_{6,\beta}
\label{f:1_6_beta}
\end{figure}
\end{description}
{\bf Case (b).}
Let $v_1$ lie on the boundary of a 5-hole $H$ with boundary edges $v_1v_2$, $v_2v_3$, $v_3v_4$, $v_4v_5$, $v_5v_1$. We may assume that $\operatorname{deg}_h(v_i)=2$, for every $i=2,3,4,5$, since otherwise there is a vertex $v$ on a 4-hole of $G$.
Since $G$ has two holes it is straightforward to check that $\operatorname{deg}(v_1)=4$ and that the second hole is described by the 4-cycle $v_2v_3v_5v_4v_2$. Thus we obtain that $G$ is the uncontractible (3,6)-tight graph $G_5^1$ given by Figure \ref{f:1_5}.
\begin{figure}
\caption{The uncontractible surface graph $G^1_5$.}
\label{f:1_5}
\end{figure}
\end{proof}
\begin{prop}
Let $G\in {\mathfrak{P}}_k$, for $k=1, 2$ or 3, be uncontractible with
$\operatorname{deg}_h(v)\geq 2$ for all $v\in V(G)$. Then $G$ is one of the four surface graphs $G^2_{6,\alpha}, G^2_{6,\beta}, G^3_4, G_3^2$.
\end{prop}
\begin{proof}
Suppose first that
$G$ has 2 or 3 holes. Then the hole boundaries have length 4 or 5 and it follows from the simplicity of $G$ that every vertex is common to at least 2 holes. Since there are either 2 or 3 holes it follows readily that $|V|\leq 6$.
{\bf Case (a).} Suppose that $G$ contains at least one $FF$ edge, say $v_1v_2$, with non facial 3-cycle $v_1v_2v_3$, and associated 3-cycle faces $v_1v_2v_4v_1$ and $v_1v_2v_5v_1$.
We claim that one of the edges $v_3v_4$ or $v_3v_5$ lies in $E(G)$. Suppose, by way of contradiction, that neither edge exists. Then we show that the edge $v_4v_5$ is also absent. Indeed, if $v_4v_5\in E(G)$, then we have two planar 5-cycles; $v_1v_4v_5v_2v_3v_1$ and $v_1v_5v_4v_2v_3v_1$, as in Figure \ref{f:two5cycles}.
\begin{center}
\begin{figure}
\caption{A subgraph with the 5-cycles $v_1v_4v_5v_2v_3v_1$ and $v_1v_5v_4v_2v_3v_1$.}
\label{f:two5cycles}
\end{figure}
\end{center}
By the sparsity condition one of these has a vertex in the interior with 3 incident edges and the other has a single chordal edge in the interior and by symmetry we may assume that the planar 5-cycle $v_1v_4v_5v_2v_3v_1$ has the single chordal edge. However, of the 5 possibilities $v_1v_2, v_2v_4, v_1v_5$ are not available, by the simplicity of $G$, and the edges $v_3v_4, v_3v_5$ are absent by assumption. This contradiction shows that $v_4v_5$ is indeed absent and so, since $v_4, v_5$ have degree at least 2, the edges $v_4v_6, v_5v_6$ must exist. Now the complement of the 2 3-cycle faces is bounded by two 6-cycles. By the sparsity condition there are now only 2 further edges to add and so there must be a 5-cycle hole, a contradiction, and so the claim holds.
Without loss of generality we suppose that $v_3v_4\in E(G)$. Since $\operatorname{deg}_h(v_2)\geq 2$, it follows that
$v_2v_6\in E(G)$. Moreover, the edges $v_6v_2$,$v_2v_3$ should be on the boundary of a planar 4-hole $H_1$, and this implies that $v_1v_6\in E(G)$. Similarly we obtain that the two remaining holes are determined by the cycles $v_1v_3v_4v_5v_1$, and $v_2v_5v_4v_6v_2$. The resulting (3,6)-tight triangulated surface graph is given in Figure \ref{f:CaseAholedegree2} and is the uncontractible surface graph $G_{6,\alpha}^2$.
\begin{figure}
\caption{The uncontractible surface graph with $h(G)=2$ and an $FF$ edge; $G_{6,\alpha}
\label{f:CaseAholedegree2}
\end{figure}
{\bf Case (b).} Suppose now $G$ has at least one 3-cycle face, $v_1v_2v_3$, and no $FF$ edges.
Then the edge $v_1v_2$ is on the boundary of a 4-hole $H_1$, that is determined by the edges
$v_1v_2$, $v_2v_4$, $v_4v_5$ and $v_5v_1$.
To see that $|V|\neq 5$ note that without loss of generality the edge $v_3v_4$ exists and $G$ contains the surface subgraph shown in Figure \ref{f:case_b_noFF}. Also, since $v_5$ cannot have degree 2 at least one of the edges $v_5v_3, v_5v_2$ exists.
\begin{center}
\begin{figure}
\caption{A necessary subgraph.}
\label{f:case_b_noFF}
\end{figure}
\end{center}
If $v_5v_2$ exists then the edge $v_2v_3$ is adjacent to a 4-cycle hole and $v_5v_3$ is absent. We note next that the planar 5-cycle $v_3v_1v_5v_2v_4v_3$
must contain a chord edge (and so provide the third 4-cycle hole). The only available edge (by simplicity) is $v_3v_5$. This however is inadmissible since it introduces a second 3-cycle face $v_3v_5v_1$ adjacent to $v_1v_2v_3$.
Similarly, if $v_5v_3$ exists then we have the planar 6-cycle
$v_3v_1v_5v_3v_2v_4v_3$ and there must exist a diameter edge to create the 2 additional 4-cycle holes. As there is no such edge we conclude that $|V|=6$.
Introducing $v_6$ the fact that $v_2v_3$ and $v_3v_1$ lie on 4-cycle hole boundaries leads to the surface graph $G_{6,\beta}^2$ indicated in Figure \ref{f:CaseBholedegree2}.
\begin{figure}
\caption{The uncontractible surface graph $G_{6,\beta}
\label{f:CaseBholedegree2}
\end{figure}
{\bf Case (c).} Let now $G$ be a surface graph with no 3-cycle faces. Since $\operatorname{deg}(v)\geq 3$ for each vertex it follows that $\operatorname{deg}_h(v)= 3$ and $\deg(v)=3$, for all $v\in V(G)$. Thus
$|V|=4$ and it follows that $G$ is the uncontractible (3,6)-tight surface graph $G_4^3$ given by Figure \ref{f:coloured4vertexgraph}.
\begin{figure}
\caption{The uncontractible surface graph $G_4^3$ with $h(G)=3$.}
\label{f:coloured4vertexgraph}
\end{figure}
{\bf Case (d).}
Finally, suppose that $G\in \mathfrak{P}_1$. We claim that the $G$ has no faces and the surface graph is given by
Figure \ref{f:G3,2}.
Assume first that there exists an $FF$ edge, say $v_1v_2$, that lies on the faces $v_1v_2v_3v_1$ and $v_1v_2v_4v_1$. By the uncontractibility, $v_1v_2$ lies on a non facial 3-cycle $v_1v_2v_5v_1$. Note that $v_3v_4\notin E(G)$, since otherwise the 6-hole would lie inside a 5-cycle, either $v_1v_3v_4v_2v_5v_1$ or $v_1v_4v_3v_2v_5v_1$, contradicting the sparsity requirement. It follows that we cannot have $|V(G)|\leq 5$.
Indeed, in this case (see Figure \ref{f:lessthan5}) $v_3v_5\in E(G)$, since $\operatorname{deg}(v_3)\geq 3$, and so without loss of generality, in view of the symmetry, $v_1v_3$ is an $FF$ edge. But this edge does not lie on a non-facial 3-cycle, a contradiction.
\begin{figure}
\caption{$|V(G)|\leq 5$ leads to a contradiction.}
\label{f:lessthan5}
\end{figure}
Thus $|V(G)|=6$ and it remains to consider two subcases:
\begin{enumerate}
\item[(i)] $v_3v_5\in E(G)$. In this case $v_1v_3$ lies on the non-facial 3-cycle $v_1v_3v_6v_1$. However, this leads to a contradiction, since the 6-hole is contained either in the 5-cycle $v_5v_3v_6v_1v_2v_5$ or in the 5-cycle $v_6v_3v_2v_5v_1v_6$. Hence by symmetry neither of the edges $v_3v_5,v_4v_5$ is allowed.
\item[(ii)] $v_3v_6,v_4v_6\in E(G)$. In this case, indicated in Figure \ref{f:2edges}, we may assume that the hole is contained in the planar 6-cycle $v_1v_5v_2v_4v_6v_3v_1$ and that the planar 6-cycle $v_1v_5v_2v_3v_6v_4v_1$ is triangulated. This implies that $v_2v_3$ is an $FF$ edge and so lies on non-facial 3-cycle. However, the only candidate cycle is $v_3v_2v_6v_3$ and if $v_2v_6$ lies in $E(G)$ then the hole is contained in the 5-cycle $v_1v_5v_2v_6v_3v_1$, a contradiction.
\begin{figure}
\caption{Edges $v_3v_6, v_4v_6$ in $G$ leads to a contradiction.}
\label{f:2edges}
\end{figure}
We have shown that no $FF$ edge is allowed. Suppose now that $G$ contains a face, described by the vertices $v_1,v_2$ and $v_3$. Since there are no $FF$ edges, all edges $v_1v_2,v_2v_3$ and $v_1v_3$ lie on the boundary of the hole. Moreover, since they form a face of the surface graph, they cannot form a 3-cycle path in the boundary of the hole. Only 3 edges of the boundary cycle are left to be determined, so we may assume that the path $v_1v_2v_3$ lies on the boundary. Therefore, without loss of generality, there exists a vertex $v_4$ on the boundary that connects the two paths, $v_1v_2v_3$and $v_1v_3$, so we obtain the 5-path $v_1v_3v_4v_1v_2v_3$. But this implies that the remaining edge of the 6-hole is $v_1v_3$, which would contradict graph simplicity. Hence the surface graph contains no faces and the proof is complete.
\end{enumerate}
\end{proof}
\begin{figure}
\caption{The uncontractible surface graph $G^2_3$.}
\label{f:G3,2}
\end{figure}
\end{document} |
\begin{document}
\def\Ubf#1{{\baselineskip=0pt\vtop{\hbox{$#1$}\hbox{$\sim$}}}{}}
\def\ubf#1{{\baselineskip=0pt\vtop{\hbox{$#1$}\hbox{$\scriptscriptstyle\sim$}}}{}}
\def{\Bbb R}{{\Bbb R}}
\def{\Bbb V}{{\Bbb V}}
\def{\Bbb N}{{\Bbb N}}
\def{\Bbb Q}{{\Bbb Q}}
\title{Vaught's conjecture on analytic sets}
\author{Greg Hjorth \footnote{Research partially supported by NSF grant DMS 96-22977}}
\date{\today}
\maketitle
{\bf $\S$0 Prehistory}
In rough historical these are the groups for which we know the topological
Vaught conjecture:
{\bf 0.1 Theorem} (Folklore) All locally compact Polish groups satisfy Vaught's
conjecture -- that is to say, if $G$ is a locally compact Polish group acting
continuously on a Polish space $X$ then either $|X/G|\leq\aleph_0$ or there is a
perfect set of points with different orbits (and hence $|X/G|\geq 2^{\aleph_0}$).
{\bf 0.2 Theorem} (Sami) Abelian Polish groups satisfy Vaught's conjecture.
{\bf 0.3 Theorem} (Hjorth-Solecki) Invariantly metrizable and nilpotent Polish
groups
satisfy Vaught's conjecture.
{\bf 0.4 Theorem} (Becker) Complete left invariant metric and solvable Polish
groups satisfy Vaught's conjecture.
In each of these case the result was shortly or immediately after extended to
analytic sets. For this purpose let us write TVC$(G,\Ubf{\Sigma}^1_1)$ if whenever
$G$ acts continuously on a Polish space $X$ and $A\subset X$ is $\Ubf{\Sigma}^1_1$
(or {\it analytic}) then either $|A/G|\leq\aleph_0$ or there is a
perfect set of orbit inequivalent points in $A$. Thus we have TVC$(G,\Ubf{\Sigma}^1_1)$
for each of the group in the class mentioned in 0.1-0.4 above.
On the other hand, and in contrast to the usual topological Vaught conjecture, that
merely asserts that 0.1-0.4 hold for arbitrary Polish groups, it is known that
TVC$(S_{\infty},\Ubf{\Sigma}^1_1)$ {\it fails}.
Here it is shown that the presence of $S_{\infty}$ is a necessary condition for
TVC$(G,\Ubf{\Sigma}^1_1)$ to fail:
{\bf 0.5 Theorem} If $G$ is a Polish group on which the Vaught conjecture fails on
analytic sets then there is a closed subgroup of $G$ that has $S_{\infty}$ as a continuous homomorphic image.
The converse of 0.5 is known and by now considered trivial in light of 2.3.5 of
\cite{beckerkechris}. Thus we have an exact characterization of
TVC$(G,\Ubf{\Sigma}^1_1)$. If as widely believed the Vaught conjecture
should fail for $S_{\infty}$ then this would as well characterize the groups for
which the topological Vaught conjecture holds.
{\bf $\S$1 Preliminaries}
All of this can be found in \cite{hjorthorbit}.
{\bf 1.1 Theorem} (Effros) Let $G$ be a Polish group acting continuously on
a Polish space $X$ (in other words, let $X$ be a {\it Polish $G$-space}.
For $x\in X$ we have $[x]_G\in \Ubf{\Pi}^0_2$ if and only if
\[G\rightarrow [x]_G,\]
\[g\mapsto g\cdot x\]
is open.
{\bf 1.2 Corollary} Let $G$ be a Polish group and
$X$ a Polish $G$-space.
Suppose that $[x]_G$ is $\Ubf{\Pi}^0_2$.
Then for all $V$ containing the identity we may find open
$U$
such that for all $ x'\in U\cap[x]_G$ and $U'\subset X$ open
\[[x]_G\cap U'\cap U\neq\emptyset\]
implies that there exists $g\in V$
such that
\[g\cdot x'\in U'.\]
{\bf 1.3 Definition} Let $X$ be a Polish space and ${\cal B}$ a basis. Let ${\cal L}({\cal B})$ be the
propositional language formed from the atomic propositions $\dot{x}\in U$, for $U\in{\cal B}$.
Let ${\cal L}_{\infty,0}({\cal B})$ be the infinitary version, obtained by closing under negation and
arbitrary disjunction and conjunction.
$F\subset$ ${\cal L}_{\infty, 0}({\cal B})$ is a {\it fragment} if it is closed under subformulas and
the finitary Boolean operations of negation and finite disjunction and finite conjunction.
For a point $x\in X$ and $\varphi\in$${\cal L}_{\infty 0}({\cal B})$,
we can then define
$x\models \varphi$ by induction in the usual fashion, starting with
\[x\models \dot{x}\in U\]
if in fact $x\in U$. In the case that $X$ is a
Polish $G$-space and $V\subset G$ open we may also define
$\varphi^{\Delta V}$ by induction on the logical complexity of $\varphi$ so that
in any generic extension in which $\varphi$ is hereditarily countable
\[x\models \varphi^{\Delta V}\]
if and only if
\[\exists ^* g\in V (g\cdot x\models\varphi)\]
(where $\exists^*$ is the categoricity quantifier
``there exists non-meagerly many'').
{\bf 1.4 Lemma} Let $X$ be a Polish $G$-space. ${\Bbb P}$ a forcing notion, $p\in {\Bbb P}$
a condition, $\sigma$ a ${\Bbb P}$-term.
Suppose that ${\cal B}$ is a countable basis for $X$ and ${\cal B}_0$ a countable
basis for $G$. Suppose that $G_0$ is a countable dense subgroup of $G$ and ${\cal B}$ is
closed under $G_0$ translation and that ${\cal B}_0$ is closed under left and right
$G_0$ translation.
Suppose that
\[p{\Bbb V}dash_{\Bbb P}\sigma[\dot{G}]\in X\]
and that $p$ decides the equivalence class of $\sigma$ in the sense that
\[(p,p){\Bbb V}dash_{{\Bbb P}\times{\Bbb P}}\sigma[\dot{G}_l]E_G\sigma[\dot{G}_r].\]
Then there is a formula $\varphi_0$ and a fragment $F_0$ containing $\varphi_0$
so that:
\leftskip 0.5in
\noindent (i) $\{\{x\in X: x\models \psi^{\Delta V}\}:
\psi\in F_0, V\in {\cal B}_0\}$ provides the basis for a topology $\tau_0(F_0)$,
and in any generic extension in which $F_0$ becomes countable $(X,\tau_0(F_0))$
is a Polish $G$-space;
\noindent (ii) $\varphi_0$ describes the equvalence class indicated by the triple
$({\Bbb P},p,\sigma)$, in the sense that
\[p{\Bbb V}dash_{\Bbb P}\forall x\in X(x E_G\sigma[\dot{G}]\Leftrightarrow x\models
\varphi_0);\]
\noindent (iii) and (ii) perserveres through all further forcing extensions, in that
if $H\subset {\Bbb P}$ is $V$-generic below $p$ and $x=\sigma[H]$, then for all forcing notions ${\Bbb P}'\in V[H]$
\[ V[H]\models {\Bbb P}'{\Bbb V}dash \forall y\in X(y E_G x\Leftrightarrow y\models \varphi_0).\]
\leftskip 0in
{\bf 1.5 Lemma} Let $G$ be a Polish group, $X$ a Polish $G$-space, $A\subset X$ a
$\Ubf{\Sigma}^1_1$ set displaying a counterexample to TVC$(G,\Ubf{\Sigma}^1_1)$ --
so that $A/G$ has uncountably many orbits, but no perfect set of $E_G$-inequivalent
points.
Then for each ordinal $\delta$ there is a sequence $({\Bbb P}_{\alpha},
p_{\alpha},\sigma_{\alpha})_{\alpha <\delta}$ so that:
\leftskip 0.5in
\noindent (i) for each $\alpha<\delta$
\[(p_{\alpha},p_{\alpha}){\Bbb V}dash_{{\Bbb P}_{\alpha}\times{\Bbb P}_{\alpha}}\sigma_{\alpha}[\dot{G}_l]E_G\sigma_{\alpha} [\dot{G}_r];\]
\noindent (ii) for each $\alpha<\beta< \delta$
\[(p_{\alpha},p_{\beta}){\Bbb V}dash_{{\Bbb P}_{\alpha}\times{\Bbb P}_{\beta}}\neg(\sigma_{\alpha}[\dot{G}_l] E_G\sigma_{\beta} [\dot{G}_r]).\]
\leftskip 0in
{\bf $\S$2 Proof}
{\bf 2.1 Definition} $U$ is a {\it regular open} set if
\[(\overline{U})^o=U\]
-- $U$ equals the interior of its closure.
For $A$ a set let $RO(A)=(\overline{A})^o$.
Note then that $RO(A)$ is always a regular open set.
{\bf 2.2 Lemma} Let $G$ be a Polish group. For $V_0, V_1\subset G$
regular open sets,
\[\{g\in G: V_0\cdot g= V_1\}\]
is a closed subset of $G$.
($\Box$)
I need that the reader is willing to allow that we may speak of an
$\omega$-model of set theory containing a Polish space, group, action,
Borel set, and so on, provided suitable codes exist in the well founded
part. Illfounded $\omega$-models are essential to the arguments below.
In what follows let ZFC$^*$ be some large fragment of ZFC, at the
very least
strong enough to prove all the lemmas of $\S$1, but weak enough to admit a finite
axiomatization.
{\bf 2.3 Lemma} Let $M$ be an $\omega$-model of ZFC$^*$. Let $X$, $G$, $G_0$,
${\Bbb P}$,
$F_0$, and so on, be as in 1.4 inside $M$. Suppose
\[\pi:M\cong M\]
is an automorphism of $M$ fixing $X$, $G$, $G_0$, ${\Bbb P}$, $F_0$, $\varphi_0$, and
all elements of ${\cal B}$ and ${\cal B}_0$.
Suppose $H\subset$ Coll($\omega, F_0$) is $M$-generic and
$x\in X^{M[H]}$ with
\[x\models \varphi_0.\]
Then there exists $\bar{g}\in G$ so that for all
$\psi \in F_0$ and $V\in{\cal B}_0$
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models \psi^{\Delta V})\})\bar{g}^{-1}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models \pi(\psi)^{\Delta V})\}).\]
Proof. It suffices to find $g_0, g_1\in G$ so that
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models \psi^{\Delta V})\}){g}_0^{-1}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models \pi(\psi)^{\Delta V})\})g^{-1}_1\]
for all $\psi$ and $V$.
Let ${{\Bbb P}_0} $ be the forcing notion Coll$(\omega, F_0)$.
Fixing $d_G$ a complete metric on $G$ we also build $h_i, h_i'\in G_0$,
$\psi_i, \psi_i'\in F_0$, $W_i, W_i'\in {\cal B}_0$ so that
\leftskip 0.5in
\noindent (i) each $W_i$ is an open neighbourhood of the
identity, $W_{i+1}\subset W_i$, $d_G(W_i)<2^{-i}$;
\noindent (ii) $\pi(\psi_i)=\psi_i'$;
\noindent (iii) $h_{2i}=h_{2i+1}$; $\forall g\in
W_{2i+1}h_{2i}(d_G(g, h_{2i})<2^{-i})$;
\noindent (iv) $h_{2i+1}'=h_{2i+2}'$; $\forall g\in
W_{2i+2}h_{2i+1}'(d_G(g, h_{2i+1}')<2^{-i})$;
\noindent (v) $h_{i+1}\in W_ih_i$; $h_{i+1}'\in W_ih_i'$;
\noindent (vi) $M[H]\models (h_i\cdot x\models (\psi_i)^{\Delta V_i})$;
\noindent (vii) $M[H]\models (h_i'\cdot x\models (\psi_i')^{\Delta V_i})$;
\noindent (viii) $M^{{\Bbb P}_0} $ satisfies that for all $y_0, y_1\in X$
all $\psi\in F_0$, and all $V\in {\cal B}_0$,
if
\[y_0\models \varphi_0\wedge (\psi_i)^{\Delta V_i}\]
\[y_1\models \varphi_0\wedge (\psi_i)^{\Delta V_i}\wedge \psi^{\Delta V}\]
then
\[y_0\models ((\psi_i)^{\Delta V_i}\wedge \psi^{\Delta V})^{\Delta W_i};\]
\noindent (ix) conversely $M^{{\Bbb P}_0} $ satisfies that for all $y_0, y_1\in X$,
$\psi\in F_0$, $V\in {\cal B}_0$,
if
\[y_0\models \varphi_0\wedge (\psi_i')^{\Delta V_i}\]
\[y_1\models \varphi_0\wedge (\psi_i')^{\Delta V_i}\wedge \psi^{\Delta V}\]
then
\[y_0\models ((\psi_i')^{\Delta V_i}\wedge \psi^{\Delta V})^{\Delta W_i}.\]
\leftskip 0in
\noindent Note that (ix) actually follows from (viii), (ii), and the elementarity of
$\pi$.
Before verifying that we may produce $h_i, h_i'\in G_0$,
$\psi_i, \psi_i'\in F_0$, $W_i, V_i\in {\cal B}_0$ as above, let us imagine that
it is already completed and see how to finish. Using (iii) and (iv) we may obtain
$g_0=$ lim$h_i$ and $g_1=$ lim$h_i'$. It suffices to check that for all
\[ g\in RO(\{h\in G_0: M[H]\models(x\models \psi^{\Delta V})\}){g}_0^{-1}\]
we have
\[g\in \overline{(\{h\in G_0:
M[H]\models(h\cdot x\models \pi(\psi)^{\Delta V})\})} g_1^{-1}\]
(since the converse implication will be exactly symmetric).
Then for sufficiently large $i$ we may choose a sufficiently small
open neighbourhood
$W$ of the identity and $\hat{g}\in G_0$ sufficiently close to $g$
so that $W\hat{g} W_i$ is an arbitrarily
small neighbourhood of $g$ and
\[M[H]\models (\hat{g} h_i\cdot x\models \psi^{\Delta V})\]
\[\therefore M[H]\models ( h_i\cdot x\models
(\psi^{\Delta V})^{\Delta W\hat{g}})\]
hence, as witnessed by $y=h_i\cdot x$
\[M^{{\Bbb P}_0}\models \exists y( y\models \varphi_0\wedge (\psi_i)^{\Delta V_i}
\wedge (\psi^{\Delta V})^{\Delta W\hat{g}}),\]
\[\therefore M^{{\Bbb P}_0}\models \exists y( y\models \varphi_0\wedge (\psi_i')^{\Delta V_i}
\wedge (\pi(\psi)^{\Delta V})^{\Delta W\hat{g}}),\]
by elementarity of $\pi$,
\[\therefore M[H]\models (h_i'\cdot x\models (\pi(\psi)^{\Delta V_i})^{\Delta W\hat{g}})
^{\Delta W_i})\]
by (ix), and so there exists some $\bar{g}\in W\hat{g} W_i$ so that
\[M[H]\models (\bar{g}h_i'\cdot x\models \pi(\psi)^{\Delta V}).\]
By letting $d_G(W\hat{g} W_i)\rightarrow 0$ and $h_i'\rightarrow g_1$ we get
\[g\in \overline{\{h\in G_0:
M[H]\models(x\models \pi(\psi)^{\Delta V})\}} g_1^{-1},\]
as required.
We are left to hammer out the sequence.
Suppose that we have $\psi_j, \psi'_j, W_j,V_j, h_j, h_j'$ for $j\leq 2i$. Immediately
we may find $W_{2i+1}\subset W_{2i}$ giving (iii), and then by 1.2 and 1.4(i) we
can produce $\psi_{2i+1}$, $V_{2i+1}$ satisfying (viii) and such that
\[M[H]\models h_{2i}\cdot x=_{df} h_{2i+1}\cdot x\models (\psi_{2i+1})^{V_{2i+1}}.\]
Then by considering that $\pi$ is elementary
\[M^{{\Bbb P}_0} \models \exists y(y\models \varphi_0\wedge \pi(\psi_{2i})^{\Delta V_{2i}}
\wedge \pi(\psi_{2i+1})^{\Delta V_{2i+1}}).\]
Thus by (ix) we may find $h'\in G_0\cap W_{2i}$ so that
\[M[H]\models (h'h_{2i}\cdot x\models \pi(\psi_{2i})^{\Delta V_{2i}}
\wedge \pi(\psi_{2i+1})^{\Delta V_{2i+1}}).\]
In other words, by (ii), if we let $\psi_{2i+1}'=\pi(\psi_{2i+1})$ then
\[M[H]\models (h'h_{2i}\cdot x\models (\psi_{2i}')^{\Delta V_{2i}}
\wedge (\psi_{2i+1}')^{\Delta V_{2i+1}}).\]
Taking $h_{2i+1}'=h'h_{2i}'$ we complete the transition from $2i$ to $2i+1$.
The further step of producing $\psi_{2i+2}, \psi_{2i+2}',
W_{2j+2}, h_{2j+2}$, $V_{2j+2}$ and $h_{2j+2}'$ is completely symmetrical.
$\Box$
{\bf 2.4 Definition} $S_{\infty}$ {\it divides} a Polish
group $G$ if there is a closed subgroup $H<G$ and a continuous onto homomorphism
\[\pi:H\twoheadrightarrow S_{\infty}.\]
(By Pettis' lemma, any Borel homomorphism between Polish groups must be continuous.)
{\bf 2.5 Lemma} $S_{\infty}$ divides Aut$({{\Bbb Q}} , <)$, the automorphism
group of the rationals equipped with the usual linear ordering.
($\Box$)
{\bf 2.6 Definition} For $X$, $G$, $F_0$, and so on, as in 1.4, ${\Bbb P}_0$= Coll$(\omega,
F_0)$, $\psi_0, \psi_1\in F_0$, $V_0, V_1\in {\cal B}_0$, set
\[(\psi_0, V_0)R (\psi_1, V_1)\]
if in $V^{{\Bbb P}_0}$ for all $x\models \varphi_0$
\[RO(\{g\in G_0: g\cdot x\models (\psi_0)^{\Delta V_0}\})\cap
RO(\{g\in G_0: g\cdot x\models (\psi_1)^{\Delta V_1}\}\neq\emptyset).\]
For $V\in {\cal B}_0$ let ${\cal B}(V)$ be the set of pairs $(\varphi, W)$ such that
for all $\psi\in F_0$ and $W'\in {\cal B}_0$
\[V^{{\Bbb P}_0}\models \forall x_0\models \varphi_0\wedge \varphi^{\Delta W}
((\exists x_1\models \varphi_0\wedge \varphi^{\Delta W}\wedge \psi^{\Delta W'})
{\Bbb R}ightarrow x_0\models (\varphi^{\Delta W}\wedge \psi^{\Delta W'})^{\Delta V}).\]
In other words, ${\cal B}(V)$ corresponds to the basic open sets witnessing 1.2
for $V$ in the topology $\tau_0(F_0)$.
The next lemma states that if the equivalence class corresponding to $\varphi_0$
requires large forcing to be introduced then the formulas $\{\psi^{\Delta V}:
\psi\in F_0, V\in{\cal B}_0\}$ have large $R$-discrete sets.
{\bf 2.7 Lemma} Let $X$, $G$, $F_0$, ${\Bbb P}$, $\varphi_0$,
and so on, be as in 1.4. Let $R$ be as in 2.6. Let $\kappa$ be a cardinal.
Suppose no forcing notion of size less than $\kappa$ introduces a point in
$X$ satisfying $\varphi_0$.
Then there is no infinite $\delta<\kappa$ such that each ${\cal B}(V)$ for $V\in
{\cal B}_0$ has a maximal $R$-discrete set of size $\leq \delta$.
Proof. Suppose otherwise and choose large $\theta>\kappa$ so that $V_{\theta}\models$
ZFC$^*$ and choose an elementary substructure
\[A {\prec} V_{\theta}\]
so that
\[|A|=\delta,\]
\[\delta+1\subset A,\]
and $X$, $G$, $F_0$, $\varphi_0$, and so on, in $A$. Let $N$ be the transitive
collapse of $A$ and
\[\pi: N\rightarrow V_{\theta}\]
the inverse of the collapsing map. Set $\hat{{\Bbb P}}=\pi^{-1}({\Bbb P}_0)$
(where ${{\Bbb P}_0}$= Coll$(\omega, F_0)$), $ \hat{\varphi}_0=\pi^{-1}(\varphi_0)$,
$\hat{F}_0=\pi^{-1}(F_0)$,
choose
\[\hat{H}\subset \hat{\Bbb P},\]
\[H\subset {\Bbb P}_0\]
to be $V$-generic, and choose $\hat{x}\in N[\hat{H}]$ and $x\in V[H]$ so that
\[N[\hat{H}]\models (\hat{x}\models \hat{\varphi}_0),\]
\[V[H]\models (x\models\varphi_0).\]
It suffices to show
\[\hat{x}E_Gx.\]
As in the proof of 2.3 find $h_i, h_i'\in G_0$, $\psi_i\in F_0$,
$\psi_i'\in {\hat{F}}_0$, $V_i, V'_i \in{\cal B}_0$, $W_i\in {\cal B}_0$
and $U_i\subset X$ basic open so that :
\leftskip 0.5in
\noindent (i) $W_{i+1}\subset W_i$, $W_i=(W_i)^{-1}$, $d_G(W_i)<2^{-i},$ $1_G\in W_i$;
$U_{i+1}\subset U_i$,
$d_X(U_i)<2^{-i}$;
\noindent (ii) $\pi(\psi_i')=\psi_i$;
\noindent (iii) $\forall g\in (W_{2i+1})^3 h_{2i}(d_G(g, h_{2i})<2^{-i})$;
$h_{2i+1}=h_{2i}$;
\noindent (iv) $\forall g\in (W_{2i+2})^3 h_{2i+1}'(d_G(g, h_{2i+1})<2^{-i})$;
$h_{2i+2}'=h_{2i+1}'$;
\noindent (v) $h_{i+1}\in (W_i)^3 h_i$, $h_{i+1}'\in (W_i)^3 h_i'$;
\noindent (vi) $V[H]\models (h_i\cdot {x}\models (\psi_i)^{\Delta V_i})$;
\noindent (vii) $N[\hat{H}]\models (h_i'\cdot \hat{x}\models (\psi_i')^{\Delta V_i'})$;
\noindent (viii) $V^{{\Bbb P}_0}\models (\psi_i, V_i)\in {\cal B}(W_i)$;
\noindent (ix) $N^{{\Bbb P}_0}\models (\psi_i', V_i')\in {\cal B}(W_i)$;
\noindent (x) $(\pi(\psi_i'), V_i') R(\psi_i, V_i)$;
\noindent (xi) $h_i\cdot x, h_i'\cdot \hat{x} \in U_i$.
\leftskip 0in
Granting all this may be found we finish quickly. By (iii) and (iv) we
get $g_0$= lim$h_i$ and $g_1$= lim$h_i'$, whence
\[g_0\cdot x=g_1\cdot \hat{x}\]
by (xi). This would contradict ${\hat{\Bbb P}}$ being too small to introduce
a representative of $[x]_G$.
So instead suppose we have built $V_j, V_j', \psi_j$ and so on for $j\leq 2i$ and
concentrate on trying to show that we may continue the construction up to
$2i+2$.
First choose $W_{2i+1}\subset W_{2i}$ in accordance with (i) and (iii) and then for
(xi) and (i) choose $U_{2i+1}\subset U_{2i}$ containing $h_{2i}\cdot x (=_{df}
h_{2i+1}\cdot x)$ with $d_X(U_{2i+1})<2^{-2i-1}$. Then by 1.2 we may choose
$V_{2i+1}, \psi_{2i+1}$ as indicated at (vi) and (viii).
On the $N$ side we use the assumption on $R$ to find $V_{2i+1}'$ and $\psi_{2i+1}'$ in
$N$ so that
\[N^{\hat{\Bbb P}_0}\models (\psi_{2i+1}', V_{2i+1}')\in {\cal B}(W_{2i+1})\]
and
\[(\pi(\psi_{2i+1}'), V_{2i+1}') R (\psi_{2i+1}, V_{2i+1}).\]
Unwinding the definitions gives
\[V^{{\Bbb P}_0}\models (y\models \varphi_0 \wedge \pi(\psi_{2i}')^{\Delta V_{2i}'})
{\Bbb R}ightarrow y\models ((\psi_{2i})^{\Delta V_{2i}}
\wedge \pi(\psi_{2i}')^{\Delta V_{2i}'})^{\Delta W_{2i}},\]
\[V^{{\Bbb P}_0}\models (y\models \varphi_0 \wedge (\psi_{2i})^{\Delta V_{2i}})
{\Bbb R}ightarrow y\models ((\psi_{2i})^{\Delta V_{2i}}
\wedge (\psi_{2i+1})^{\Delta V_{2i+1}})^{\Delta W_{2i}},\]
\[V^{{\Bbb P}_0}\models (y\models \varphi_0 \wedge (\psi_{2i+1})^{\Delta V_{2i+1}})
{\Bbb R}ightarrow y\models ((\psi_{2i+1})^{\Delta V_{2i+1}}
\wedge \pi(\psi_{2i+1}')^{\Delta V_{2i+1}'})^{\Delta W_{2i+1}}.\]
In particular, assuming without loss of generality that
\[(\psi_{2i+1})^{\Delta V_{2i+1}}{\Bbb R}ightarrow \dot{x}\in U_{2i+1}\]
we have
\[V^{{\Bbb P}_0}\models (y\models \varphi_0 \wedge \pi(\psi_{2i}')^{\Delta V_{2i}'})
{\Bbb R}ightarrow y\models ((\psi_{2i+1}')^{\Delta V_{2i+1}{'}}
\wedge \dot{x}\in U_{2i+1})^{\Delta (W_{2i})^3}.\]
Thus by elementarity of $\pi$ we may find $h'\in (W_{2i})^3\cap G_0$ so that $h'h_{2i}'\cdot \hat{x}\in
U_{2i+1}$ and
\[N[\hat{H}]\models (h'h_{2i}'\cdot \hat{x}\models (\psi_{2i+1}')^{\Delta V_{2i+1}'}).\]
Then setting $h_{2i+1}=h'h_{2i}$ completes the transition from $2i$ to $2i+1$.
The step from $2i+1$ to $2i+2$ is similar.
$\Box$
We need a fact from infinitary model theory.
{\bf 2.8 Theorem} Let $\varphi\in {\cal L}_{\omega_1,\omega}$ and suppose
\[N\models \varphi\]
and $P$ is a predicate in the language of $N$ with
\[|(P)^N|\geq \beth_{\aleph_1}.\]
Then $\varphi$ has a model with generating indiscernibles in $P$.
More precisely there is a model $M$ with language ${\cal L}^*\supset
{\cal L}(N)$, ${\cal L}^*$ having a new symbol $<$, along with new
function symbols of the form $f_{\hat{\varphi}}$ for $\hat{\varphi}$ in the
fragment of ${\cal L}(N)_{\omega_1,\omega}$ generated by $\varphi$,
and distinguished elements $(c_i)_{i\in {\Bbb N}}$,
so that:
\leftskip 0.5in
\noindent (i) $(<)^M$ linearly orders $(P)^M$;
\noindent (ii) each $f_{\hat{\varphi}}$ is a Skolem function for
$\hat{\varphi}$;
\noindent (iii) $M$ is the Skolem hull of $\{c_i:i\in{\Bbb N}\}$ (under the
functions of the form $f_{\hat{\varphi}}$);
\noindent (iv) each $c_i\in (P)^M$;
\noindent (v) for all $\psi$ in the fragment of ${\cal L}^*_{\omega_1,\omega}$
generated by $\varphi$ and $i_1<i_2<...<i_n$, $j_1<...<j_n$ in ${\Bbb N}$
\[M\models \psi(c_{i_1},c_{i_2},...,c_{i_n})\Leftrightarrow
\psi(c_{j_1},c_{j_2},...,c_{j_n});\]
\noindent (vi) $M\models \varphi$.
\leftskip 0in
See \cite{keisler}.
($\Box$)
{\bf 2.9 Theorem} Let $G$ be a Polish group for which TVC($G,\Ubf{\Sigma}^1_1$) fails.
Then $S_{\infty}$ divides $G$.
Proof. Choose some Polish $G$-space $X$ witnessing the failure of TVC($G,\Ubf{\Sigma}^1_1$). Following 1.5 we may find some $({\Bbb P}, p,\sigma)$
introducing an equivalence class as in 1.4 that may not be produced by a forcing
notion of size less than $\beth_{\aleph_1}$. Fix $\varphi_0$, ${\cal B}$,
${\cal B}_0$, $F_0$, $G_0$, and so on, as in 1.4, so that in all generic extensions $V[H]$ of $V$
\[V[H]\models p{\Bbb V}dash_{\Bbb P}\forall y\in X(yE_G\sigma[\dot{G}]\Leftrightarrow
y\models \varphi_0).\]
Let $V_{\theta}$ be large enough to contain $X$, $G$, $\varphi_0$, and so on, and
satisfy ZFC$^*$. By 2.7 choose $P\subset V_{\theta}$ to be of size $\beth_{\aleph_1}$ and
$R$-discrete (or more precisely,
so for all $(\psi, V)\neq(\psi',V')\in P$ we have for any $V$-generic $H\subset$
Coll$(\omega, F_0)$ that $V[H]\models \neg ((\psi, V)R(\psi',V'))$).
Applying 2.8 to $N=(V_{\theta}; \in, P, X, G, G_0, \varphi_0,...)$ and
we may obtain an $\omega$-model
with indiscernibles $(\psi_q, V_q)_{q\in {\Bbb Q}}$ in $B^M$. Let $H\subset$
Coll($\omega, (F_0)^M$) be $M$-generic. Choose $x\in M[H]$ so that
\[M[H]\models (x\models \varphi_0).\]
All this granted we may define $G_1$ to be the set of $\bar{g}\in G$ so that for all $q\in
{\Bbb Q}$ there exists $r\in{\Bbb Q}$ with
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}^{-1}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\})\] and for
$q\in
{\Bbb Q}$ there exists $r\in{\Bbb Q}$ with
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\}).\]
$G_1$ is $\Ubf{\Pi}^0_2$ in $G$, by 2.2 and since $\bar{g}$ is in $G_1$ if and only if the
following four conditions hold:
\leftskip 0.5in
\noindent (i) for all $q\in
{\Bbb Q}$ there exists $r\in{\Bbb Q}$ with
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}^{-1}\cap
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\})\neq\emptyset,\]
\noindent (ii) for all $q,r\in
{\Bbb Q}$
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}^{-1}\cap
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\})\neq\emptyset\]
implies
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}^{-1}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\});\]
\noindent (iii) for all $q\in
{\Bbb Q}$ there exists $r\in{\Bbb Q}$ with
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}\cap
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\})\neq\emptyset,\]
\noindent (iv) for all $q,r\in
{\Bbb Q}$
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_q)^{\Delta V_q})\})\bar{g}\cap
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\})\neq\emptyset\]
implies
\[RO(\{g\in G_0: M[H]\models(g\cdot x\models \psi_q^{\Delta V_q})\})\bar{g}^{-1}=
RO(\{g\in G_0: M[H]\models(g\cdot x\models (\psi_r)^{\Delta V_r})\}).\]
\leftskip 0in
\noindent Since $G_1$ is a $\Ubf{\Pi}^0_2$ subgroup of $G$ it must be closed.
For $g\in G_1$ we may define the permutation $\hat{\pi}(g)$ of ${\Bbb Q}$ by the
specification that for all $q\in {\Bbb Q}$
\[(\hat{\pi}(g))(q)=r\]
if and only if $r$ is as above in the definition of $G_1$. This is well defined by
the $R$-discreteness of the set $(P)^M$.
Now let $G_2$ be the set of $g\in G_1$ such that $\hat{\pi}(g)$ defines an
automorphism of the structure $({\Bbb Q}, <)$. $G_2$ is a closed subgroup of
$G_1$ and hence $G$. Since every order preserving permutation of the indiscernibles
induces an automorphism of $M$ the map
\[\hat{\pi}:G_2\rightarrow {\rm Aut}({\Bbb Q}, <)\]
is onto by 2.3. Then by 2.5 $S_{\infty}$ divides $G$.
$\Box$
{\bf 2.10 Conjecture} Assume AD$^{L({\Bbb R})}$. Let $G$ be a Polish group,
$X$ a Polish $G$-space, $A\subset X$ in $\Ubf{\Sigma}^1_1$, and suppose in
${L({\Bbb R})}$ there is an injection
\[i:A/G\hookrightarrow 2^{<\omega_1}.\]
Then there is a Polish $S_{\infty}$-space $Y$ and a $\Ubf{\Sigma}^1_1$ set
$B\subset Y$ and a bijection
\[\pi:A/G\cong B/S_{\infty}.\]
6363 MSB
Mathematics
UCLA
CA90095-1555
[email protected]
\end{document} |
\begin{document}
\begin{abstract}
Projectively coresolved Gorenstein flat modules were introduced by Saroch and Stovicek and were shown to be Gorenstein projective. We give characterizations of Gorenstein projective, Gorenstein flat and projectively coresolved Gorenstein flat modules over a group ring $RG$, where $G$ is an $\textsc{\textbf{lh}}\mathfrak{F}$-group or a group of type $\Phi_R$ and $R$ is a commutative ring of finite Gorenstein weak global dimension. In this situation, we prove that every Gorenstein projective $RG$-module is projectively coresolved Gorenstein flat. We deduce that every Gorenstein projective $RG$-module is Gorenstein flat. The existence of weak characteristic modules for a group $G$ over a commutative ring $R$ plays a central role in our results. Furthermore, we determine the Gorenstein homological dimension of an $\textsc{\textbf{lh}}\mathfrak{F}$-group over a commutative ring of finite Gorenstein weak global dimension.
\end{abstract}
\title{Gorenstein modules and dimension over large families of infinite groups}
\section{Introduction}Gorenstein homological algebra is the relative homological algebra, which is based on Gorenstein projective, Gorenstein injective and Gorenstein flat modules. The standard reference for these modules and the relative dimensions which are based on them is \cite{H1}. Recently, Saroch and Stovicek \cite{SS} introduced the notion of projectively coresolved Gorenstein flat modules (PGF modules, for short). Over a ring $R$, these modules are the syzygies of the acyclic complexes of projective modules that remain acyclic after applying the functor $I\otimes_R \_\!\_$ for every injective module $I$. It is clear that PGF modules are Gorenstein flat. While in classical homological algebra every projective module is flat, the relation between Gorenstein projective and Gorenstein flat modules is still mysterious. As shown in \cite[Theorem 4.4]{SS}, every PGF module is Gorenstein projective. A natural question is whether the class of Gorenstein projective modules is contained in the class of PGF modules.
In this paper, we study Gorenstein projective, Gorenstein flat and PGF modules over the group algebra $RG$ of a group $G$ with coefficients in a commutative ring $R$ and give characterizations of them, in terms of the module $B(G,R)$ of all bounded $R$-valued functions on $G$. Assuming that the commutative ring $R$ is of finite Gorenstein weak global dimension and $G$ is either an $\textsc{\textbf{lh}}\mathfrak{F}$-group or of type $\Phi$, we prove that every weak Gorenstein flat module is Gorenstein flat and every weak Gorenstein projective module is PGF. By doing so, we conclude that every Gorenstein projective $RG$-module is Gorenstein flat. Moreover, we determine the Gorenstein homological dimension of an $\textsc{\textbf{lh}}\mathfrak{F}$-group over a commutative ring of finite Gorenstein weak global dimension. We note that the condition of the commutative ring $R$ being of finite Gorenstein weak global dimension is equivalent with the finiteness of $\textrm{sfli}R$, where $\textrm{sfli}R$ is the supremum of the flat lengths (dimensions) of injective $R$-modules (see \cite[Theorem 2.4]{CET}). Our methods are based on the notion of a weak characteristic module for $G$, i.e. an $R$-pure $RG$-monomorphism $0 \rightarrow R \rightarrow A$ where $A$ is $R$-flat and $\textrm{fd}_{RG}A<\infty$. The notion of a weak characteristic module generalizes the characteristic modules which were used to prove many properties of the Gorenstein cohomological dimension $\textrm{Gcd}_R G$ of a group $G$ (see \cite{BDT,Tal}). As shown in \cite[Theorem 5.10]{St}, for every commutative ring $R$ of finite Gorenstein weak global dimension, the existence of a weak characteristic module for a group $G$ over a commutative ring $R$ is equivalent with the finiteness of the Gorenestein homological dimension $\textrm{Ghd}_R G$ of $G$. Furthermore, we make use of the stability properties of Gorenstein flat and PGF modules established in \cite{BK} and \cite{St}. Finally, over an $\textsc{\textbf{lh}}\mathfrak{F}$-group, our arguments are based on transfinite induction.
In Section 2, we establish notation, terminology and preliminary results that will be used in the sequel. In Sections 3 and 4 we consider a commutative ring $R$ of finite Gorenstein weak global dimension and a group $G$ such that there exists a weak characteristic module for $G$ over $R$. By noting first that the tensor product of a weak Gorenstein flat $RG$-module and an $R$-flat module is Gorenstein flat (with diagonal action), we prove in Section 3 that the class of Gorenstein flat $RG$-modules coincides with the class of modules which are syzygies in a double infinite exact sequence of flat $RG$-modules and moreover with the class of $RG$-modules such that after being tensored with $B(G,R)$ yield a Gorenstein flat module, with diagonal action (see Theorem 3.7). In Section 4, we note first that the tensor product of a weak Gorenstein projective $RG$-module and an $R$-projective module is PGF (with diagonal action). Working in a similar way as in Section 3, we prove that the class of PGF $RG$-modules coincides with the class of modules which are syzygies in a double infinite exact sequence of projective $RG$-modules and moreover with the class of $RG$-modules which after being tensored with $B(G,R)$ yield a PGF module, with diagonal action. In this way, we infer also that the class of Gorenstein projective $RG$-modules coincides with the the class of PGF $RG$-modules, and hence every Gorenstein projective $RG$-module is Gorenstein flat (see Theorem 4.7). This result is noteworthy, since it is not known whether all Gorenstein projective modules are Gorenstein flat over an arbitrary ring. Since for every group of type $\Phi_R$, the $RG$-module $B(G,R)$ is weak characteristic, we obtain in Sections 3 and 4 similar results for this class of groups.
In Sections 5 and 6, we consider a commutative ring of finite Gorenstein weak global dimension and an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$. Under these assumptions, we prove the same characterizations of Gorenstein flat, Gorenstein projective and PGF $RG$-modules, as in Sections 3 and 4. It seems that, under the assumption for the commutative ring $R$ to be of finite Gorenstein weak global dimension, the existence of a weak characteristic module for a group $G$ over $R$ is essentialy equivalent with $G$ being an $\textsc{\textbf{lh}}\mathfrak{F}$-group.
In our final section, we study the Gorenstein homological dimension $\textrm{Ghd}_{R}G$ of an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$ over a commutative ring $R$ of finite Gorenstein weak global dimension and we prove that $\textrm{Ghd}_{R}G=\textrm{fd}_{RG}B(G,R)$ (see Theorem 7.7). For this purpose, we firstly prove that $\textrm{f.k}(RG)=\textrm{sfli}(RG)=\textrm{fin.f.dim}(RG)$, where we denote by $\textrm{f.k}(RG)$ the supremum of flat dimensions of $RG$ modules $M$ which have finite flat dimension over every finite subgroup of $G$ (see Corollary 7.5). The Gorenstein cohomological dimension $\textrm{Gcd}_{R}G$ of an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$ over a commutative ring $R$ of finite global dimension, was studied in \cite[Theorem 3.1]{Bis2} and \cite[Theorem A.1]{ET2}.
\noindent
{\em Terminology.}
All rings are assumed to be associative and unital and all ring homomorphisms will be unit preserving. Unless otherwise specified, all modules will be left $R$-modules.
\section{Preliminaries}
In this section, we collect certain notions and preliminary results that will be used in the sequel.
\subsection{Gorenstein projective, Gorenstein flat and PGF modules.}
An acyclic complex $\textbf{P}$ of projective modules is said to be a complete
projective resolution if the complex of abelian groups $\mbox{Hom}_R(\textbf{P},Q)$
is acyclic for every projective module $Q$. Then, a module is Gorenstein
projective if it is a syzygy of a complete projective resolution. We let ${\tt GProj}(R)$
be the class of Gorenstein projective modules. The
Gorenstein projective dimension $\mbox{Gpd}_RM$ of a module $M$ is the
length of a shortest resolution of $M$ by Gorenstein projective modules.
If no such resolution of finite length exists, then we write
$\mbox{Gpd}_RM = \infty$.
An acyclic complex $\textbf{F}$ of flat modules is said to be a complete flat
resolution if the complex of abelian groups $I \otimes_R \textbf{F}$ is acyclic
for every injective right module $I$. Then, a module is Gorenstein
flat if it is a syzygy of a complete flat resolution. We let ${\tt GFlat}(R)$
be the class of Gorenstein flat modules. The Gorenstein flat dimension
$\mbox{Gfd}_RM$ of a module $M$ is the length of a shortest resolution
of $M$ by Gorenstein flat modules. If no such resolution of finite length
exists, then we write $\mbox{Gfd}_RM = \infty$.
The projectively coresolved Gorenstein flat modules (PGF-modules, for short) were introduced by Saroch and Stovicek \cite{SS}. Such a module is a syzygy of an acyclic complex of projective modules $\textbf{P}$, which is such that the complex of abelian groups $I \otimes_R \textbf{P}$ is acyclic for every injective module $I$. It is clear that the class ${\tt PGF}(R)$ of PGF modules is contained in ${\tt GFlat}(R)$. The inclusion ${\tt PGF}(R) \subseteq {\tt GProj}(R)$ is proved in \cite[Theorem 4.4]{SS}. Moreover, the class of PGF $R$-modules, is closed under extensions, direct sums, direct summands and kernels of epimorphisms. The PGF dimension $\mbox{PGF-dim}_RM$ of a module $M$ is the length of a shortest resolution of $M$ by PGF modules. If no such resolution of finite length exists, then we write $\mbox{PGF-dim}_RM = \infty$ (see \cite{DE}).
\subsection{Group rings.}Let $R$ be a commutative ring, $G$ be a group and consider the associated group ring $RG$. The standard reference for group cohomology is \cite{Br}. Using the diagonal action of the group $G$, the tensor product $M\otimes_R N$ of two $RG$-modules is also an $RG$-module using the diagonal action of $G$; we define $g\cdot (x \otimes y)=gx \otimes gy \in M\otimes_R N$ for every $g\in G$, $x\in M$ and $y\in N$. We note that for every projective $RG$-module $M$ and every $R$-projective $RG$-module $N$, then the diagonal $RG$-module $M\otimes_R N$ is also projective. Similarly, for every flat $RG$-module $M$ and every $R$-flat $RG$-module $N$, the diagonal $RG$-module $M\otimes_R N$ is also flat. Indeed, since the class ${\tt Flat}(RG)$ of flat $RG$-modules is closed under filtered colimits and direct sums, invoking the Govorov-Lazard theorem, we may assume that $M=RG$.
\subsection{$\textsc{\textbf{lh}}\mathfrak{F}$-groups and groups of type $\Phi_R$} The class $\textsc{\textbf{h}}\mathfrak{F}$ was defined by Kropholler in \cite{Kr}. This is the smallest class of groups, which contains the class $\mathfrak{F}$ of finite groups and is such that whenever a group $G$ admits a finite dimensional
contractible $G$-CW-complex with stabilizers in $\textsc{\textbf{h}}\mathfrak{F}$, then we also have $G\in \textsc{\textbf{h}}\mathfrak{F}$. More precisely, we define $\textsc{\textbf{h}}_0\mathfrak{F}:=\mathfrak{F}$, and for every ordinal number $\alpha>0$, we say that a group $G$ belongs to the class $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$ iff there exists a finite dimensional contractible CW-complex on which $G$ acts such that every isotropy subgroup of the action belongs to $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$ for some ordinal $\beta < \alpha$. A group belongs to the class $\textsc{\textbf{h}}\mathfrak{F}$, if it belongs to the class $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$ for some ordinal $\alpha$. The class $\textsc{\textbf{lh}}\mathfrak{F}$ consists of those groups, all of whose finitely generated subgroups are in $\textsc{\textbf{h}}\mathfrak{F}$. All
soluble groups, all groups of finite virtual cohomological dimension and all automorphism
groups of Noetherian modules over a commutative ring are $\textsc{\textbf{lh}}\mathfrak{F}$-groups. The class $\textsc{\textbf{lh}}\mathfrak{F}$
is closed under extensions, ascending unions, free products with amalgamation and HNN
extensions.
A group $G$ is said to be of type $\Phi_R$ if it has the property that for every $RG$-module $M$, $\textrm{pd}_{RG}M<\infty$ if and only if $\textrm{pd}_{RH}M<\infty$ for every finite subgroup $H$ of $G$. These groups were defined over $\mathbb{Z}$ in \cite{Ta}. Over a commutative ring $R$ of finite global dimension, every group of finite virtual cohomological dimension and every group which acts on a tree with finite stabilizers is of type $\Phi_R$ (see \cite[Corollary 2.6]{MS}).
Let $B(G,R)$ be the $RG$-module which consists of all functions from $G$ to $R$ whose image is a finite subset of $R$. The $RG$-module $B(G,R)$ is $R$-free and $RH$-free for every finite subgroup $H$ of $G$. For every element $\lambda \in R$, the constant function $\iota(\lambda)\in B(G,R)$ with value $\lambda$ is invariant under the action of $G$. The map $\iota: R \rightarrow B(G,R)$ which is defined in this way is then $RG$-linear and $R$-split. Indeed, for every fixed element $g\in G$, there exists an $R$-linear splitting for $\iota$ by evaluating functions at $g$. Moreover, the cokernel $\overline{B}(G,R)$ of $\iota$ is $R$-free (see \cite[Lemma 3.3]{Kr2} and \cite[Lemma 3.4]{BC}). We note that $\textrm{pd}_{RG}B(G,R)<\infty$ over any group $G$ of type $\Phi_R$. Thus, $B(G,R)$ is a (weak) characteristic module for every group $G$ of type $\Phi_R$ over any commutative ring $R$.
\subsection{Gedrich-Gruenberg invariants and Gorenstein global dimensions}The invariants $\textrm{silp}R$, $\textrm{spli}R$ were defined by Gedrich and Gruenberg in \cite{GG} as the supremum of the injective lengths (dimensions) of projective modules and the supremum of the projective lengths (dimensions) of injective modules, respectively. The invariant $\textrm{sfli}R$ is defined similarly as the supremum of the flat lengths (dimensions) of injective modules. Since projective modules are flat, the inequality $\textrm{sfli}R\leq \textrm{spli}R$ is clear. Moreover, for every commutative ring $R$ we have the inequality $\textrm{silp}R\leq\textrm{spli}R$, with equality if $\textrm{spli}R<\infty$ (see \cite[Corollary 5.4]{DE}). Thus, for every commutative ring $R$, invoking \cite[Theorem 4.1]{Emm3}, we infer that the finiteness of $\textrm{spli}R$ is equivalent to the finiteness of $\textrm{Ggl.dim}R$, and then $\textrm{Ggl.dim}R=\textrm{spli}R$. Furthermore, for every commutative ring $R$, invoking \cite[Theorem 2.4]{CET}, we infer that the finiteness of $\textrm{sfli}R$ is equivalent to the finiteness of $\textrm{Gwgl.dim}R$, and then $\textrm{Gwgl.dim}R=\textrm{sfli}R$.
\subsection{Weak Gorenstein modules}Let $R$ be a commutative ring. We denote by ${\tt WGProj}(R)$ the class of modules which are syzygies of an acyclic complex of projective modules $\textbf{P}$. We note that ${\tt GProj}(R)\subseteq {\tt WGProj}(R)$ and ${\tt PGF}(R)\subseteq {\tt WGProj}(R)$. Since the finiteness of $\textrm{sfli}R$ yields ${\tt WGProj}(R)\subseteq {\tt PGF}(R)$, we have ${\tt WGProj}(R)={\tt PGF}(R)={\tt GProj}(R)$ (see \cite[Theorem 4.4]{SS}).
Analogously, we denote by ${\tt WGFlat}(R)$ the class of modules which are syzygies of an acyclic complex of flat modules $\textbf{F}$. We note that ${\tt GFlat}(R)\subseteq {\tt WGFlat}(R)$. Moreover, the finiteness of $\textrm{sfli}R$ implies that ${\tt WGFlat}(R)\subseteq {\tt GFlat}(R)$, and hence ${\tt WGFlat}(R)={\tt GFlat}(R)$.
\subsection{Weak characteristic modules}Let $R$ be a commutative ring and $G$ be a group. We define a weak characteristic module for $G$ over $R$ as an $R$-flat $RG$-module $A$ with $\textrm{fd}_{RG}A <\infty$, which admits an $R$-pure $RG$-linear monomorphism $\jmath: R \rightarrow A$. We note that the existence of a weak characteristic module is equivalent with the existence of an $R$-projective $RG$-module $A'$ with $\textrm{fd}_{RG}A <\infty$, which admits an $R$-split $RG$-linear monomorphism $\jmath': R \rightarrow A'$ (see \cite[Theorem 5.10]{St}). If $\textrm{sfli}R<\infty$, the existence of a weak characteristic module for $G$ over $R$ is equivalent with the finiteness of $\textrm{sfli}(RG)$ (see \cite[Theorem 5.10]{St}).
\section{Gorenstein flat modules over groups with weak characteristic modules} We consider a commutative ring $R$ such that $\textrm{sfli}R<\infty$ and a group $G$ such that there exists a weak characteristic module for $G$ over $R$. Our goal in this section is to give a characterization of the class ${\tt GFlat}(RG)$, in terms of the $RG$-module $B(G,R)$. Moreover, under these conditions, we conclude that the class ${\tt GFlat}(RG)$ coincides with the class ${\tt WGFlat}(RG)$. Since for every group of type $\Phi_R$, the $RG$-module $B(G,R)$ is weak characteristic, similar results are obtained.
\begin{Proposition}\label{newlemmad}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then for every $RG$-modules $M$, $N$ such that $M$ is a weak Gorenstein flat and $N$ is R-flat, the $RG$-module $M\otimes_R N$ is Gorenstein flat.
\end{Proposition}
\begin{proof}Let $M$ be a weak Gorenstein flat $RG$-module and $N$ be an $R$-flat $RG$-module. Then, there exists an acyclic complex of projective $RG$-modules $$\textbf{F}=\cdots \rightarrow F_{2}\rightarrow F_1\rightarrow F_0 \rightarrow F_{-1}\rightarrow \cdots,$$ such that $M=\textrm{Im}(F_1 \rightarrow F_0)$. Since $N$ is $R$-flat, we obtain the induced complex of $RG$-flat modules (with diagonal action) $$\textbf{F}\otimes_R N = \cdots \rightarrow F_{2}\otimes_R N\rightarrow F_1\otimes_R N\rightarrow F_0\otimes_R N \rightarrow F_{-1}\otimes_R N\rightarrow \cdots,$$ where $M\otimes_R N= \textrm{Im}(F_1\otimes_R N \rightarrow F_0\otimes_R N)$. Since $\textrm{sfli}R<\infty$, the existence of a weak characteristic module is equivalent to the finiteness of $\textrm{sfli}(RG)$ by \cite[Theorem 5.10]{St}. Thus, the complex $I\otimes_{RG}(\textbf{F}\otimes_R N)$ is acyclic for every injective $RG$-module $I$. We conclude that the $RG$-module $M\otimes_R N$ is Gorenstein flat.
\end{proof}
\begin{Definition}Let $R$ be a commutative ring and $G$ be a group. We denote by $\mathcal{X}_{B,{\tt GFlat}}$ the class of $RG$-modules $\mathscr{X}_{B,{\tt GFlat}}=\{M\in \textrm{Mod}(RG): \, M\otimes_R B(G,R)\in {\tt GFlat}(RG)\}$.
\end{Definition}
\begin{Corollary}\label{coor24}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then, ${\tt WG Flat}(RG)\subseteq\mathscr{X}_{B,{\tt GFlat}}$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $R$-free, this is an immediate consequence of Proposition \ref{newlemmad}.
\end{proof}
\begin{Proposition}\label{ppst} Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then,
$\mathscr{X}_{B,{\tt GFlat}}\subseteq {\tt GFlat}(RG)$.
\end{Proposition}
\begin{proof}Let $B=B(G,R)$, $\overline{B}=\overline{B}(G,R)$ and consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R B$ is Gorenstein flat. We also let $V_i=\overline{B}^{\otimes i}\otimes_R B$ for every $i\geq 0$, where $\overline{B}^{\otimes 0}=R$. Since the short exact sequence of $RG$-modules $0\rightarrow R \rightarrow B \rightarrow \overline{B}\rightarrow 0$ is $R$-split, we obtain for every $i\geq 0$ a short exact sequence of $RG$-modules of the form $$0\rightarrow M\otimes_R\overline{B}^{\otimes i}\rightarrow M\otimes_R V_i \rightarrow M\otimes_R \overline{B}^{\otimes i+1}\rightarrow 0.$$ Then, the splicing of the above short exact sequences for every $i\geq 0$ yields an exact sequence of the form
\begin{equation}\label{eq1}
0\rightarrow M \xrightarrow{\alpha} M\otimes_R V_0 \rightarrow M\otimes_R V_1 \rightarrow M\otimes_R V_2 \rightarrow \cdots.
\end{equation}
Since the $RG$-module $M\otimes_R B$ is Gorenstein flat and $\overline{B}$ is $R$-flat, we obtain that the $RG$-module $M\otimes_R V_i\cong (M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is Gorenstein flat for every $i\geq 0$, by Lemma \ref{newlemmad}. We also consider an $RG$-flat resolution of $M$
\begin{equation*}
\textbf{Q}=\cdots \rightarrow Q_2 \rightarrow Q_1 \rightarrow Q_0 \xrightarrow{\beta} M \rightarrow 0.
\end{equation*}
Splicing the resolution $\textbf{Q}$ with the exact sequence (\ref{eq1}), we obtain an acyclic complex of Gorenstein flat $RG$-modules
\begin{equation*}
\mathfrak{P}=\cdots \rightarrow Q_2 \rightarrow Q_1 \rightarrow Q_0 \xrightarrow{\alpha \beta} M\otimes_R V_0 \rightarrow M\otimes_R V_1 \rightarrow M\otimes_R V_2 \rightarrow \cdots
\end{equation*}
which has syzygy the $RG$-module $M$. It suffices to prove that the complex $I\otimes_{RG}\mathfrak{P}$ is acyclic for every injective $RG$-module $I$. Using \cite[Theorem 1.2]{BK} we will then obtain that the $RG$-module $M$ is Gorenstein flat. Let $I$ be an injective $RG$-module. Then, the $R$-split short exact sequence of $RG$-modules $0\rightarrow R \rightarrow B \rightarrow \overline{B}\rightarrow 0$ yields an induced exact sequence of $RG$-modules with diagonal action $0\rightarrow I\rightarrow B \otimes_{R}I\rightarrow \overline{B}\otimes_{R} I\rightarrow 0$ which is $RG$-split. Thus, it suffices to prove that the complex $(B\otimes_{R}I)\otimes_{RG}\mathfrak{P}$ is acyclic. Since $B$ is $R$-flat, we obtain that the acyclic complex $\textbf{Q}\otimes_R B$ is a flat resolution of the Gorenstein flat $RG$-module $M\otimes_{R}B$. Hence, every syzygy module of $\textbf{Q}\otimes_R B$ is also a Gorenstein flat $RG$-module (see \cite[Lemma 2.4]{Ben}). Moreover, the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}\cong (M\otimes_R \overline{B}^{\otimes i})\otimes_R B$ is Gorenstein flat for every $i\geq 0$. Consequently, every syzygy module of the acyclic complex
\begin{equation*}
\mathfrak{P}\otimes_R B =\cdots\rightarrow Q_1\otimes_R B\rightarrow Q_0\otimes_R B \rightarrow M\otimes_R V_0 \otimes_R B\rightarrow M\otimes_R V_1 \otimes_R B\rightarrow \cdots
\end{equation*} is a Gorenstein flat $RG$-module. As the functor $\textrm{Tor}^{RG}_1 (I,\_\!\_)$ vanishes on Gorenstein flat $RG$-modules, we conclude that the complex $(B\otimes_{R}I)\otimes_{RG}\mathfrak{P}\cong I\otimes_{RG}(\mathfrak{P}\otimes_{R}B)$ is acyclic, as needed.\end{proof}
\begin{Remark}\rm \label{remarkaki} A careful examination of the proof of Proposition \ref{nppst} shows that the existence of a weak characteristic module for $G$ over $R$ was only needed to ensure that the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is Gorenstein flat for every $i\geq 0$.
\end{Remark}
\begin{Remark}\rm Let $R$ be a commutative ring such that $\textrm{sfli}(R)<\infty$ and $G$ be a group such that there exists a weak characteristic module $A$ for $G$ over $R$. We also consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R A$ is Gorenstein flat. Then, $M$ is a Gorenstein flat $RG$-module.
Indeed, there exists an $R$-pure $RG$-short exact sequence $0\rightarrow R \rightarrow A \rightarrow \overline{A}\rightarrow 0$, where the $RG$-modules $A,\overline{A}$ are $R$-flat. Following step by step the proof of Proposition \ref{ppst}, we construct an acyclic complex of Gorenstein flat modules
\begin{equation*}
\mathfrak{P'}=\cdots \rightarrow Q_2' \rightarrow Q_1' \rightarrow Q_0' \rightarrow M\otimes_R V_0' \rightarrow M\otimes_R V_1' \rightarrow M\otimes_R V_2' \rightarrow \cdots,
\end{equation*} where $V_i'={\overline{A}}^{\otimes i}\otimes_R A$, for every $i\geq 0$, and has syzygy the $RG$-module $M$. Using the $R$-pure $RG$-short exact sequence $0\rightarrow R \rightarrow A \rightarrow \overline{A} \rightarrow 0 $ and \cite[Theorem 1.2]{BK}, it suffices to show that the complex $I\otimes_{RG}(\mathfrak{P'}\otimes_{R}A)$ is acyclic for every injective $RG$-module $I$. This follows exactly as in the proof of Proposition \ref{ppst}, since every syzygy module of $\mathfrak{P'}\otimes_{R}A$ is Gorenstein flat.
\end{Remark}
\begin{Theorem}\label{the410}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then, $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$.
\end{Theorem}
\begin{proof}Invoking Corollary \ref{coor24}, we have ${\tt WG Flat}(RG)\subseteq\mathscr{X}_{B,{\tt GFlat}}$. Moreover, Proposition \ref{ppst} yields $\mathscr{X}_{B,{\tt GFlat}}\subseteq{\tt GFlat}(RG)$ and the inclusion ${\tt GFlat}(RG)\subseteq{\tt WGFlat}(RG)$ is clear. We conclude that $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$, as needed. \end{proof}
\begin{Corollary}\label{cor212}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group. If $\textrm{fd}_{RG}B(G,R)<\infty$, then $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$.
\end{Corollary}
\begin{proof}Since $\textrm{fd}_{RG}B(G,R)<\infty$, the $RG$-module $B(G,R)$ is a weak characteristic module for $G$ over $R$. The result is now a direct consequence of Theorem \ref{the410}.\end{proof}
\begin{Corollary}\label{cor39}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group of type $\Phi_R$. Then, $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $RH$-free for every finite subgroup $H$ of $G$, the definition of a group of type $\Phi_R$ implies that $\textrm{fd}_{RG}B(G,R)<\infty$. The result is now an immediate consequence of Corollary \ref{cor212}.
\end{proof}
\begin{Corollary} Let $R$ be a commutative ring of finite weak global dimension and $G$ be a group of type $\Phi_R$. Then, $\mathscr{X}_{B,{\tt Flat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$, where $\mathscr{X}_{B,{\tt Flat}}=\{M\in \textrm{Mod}(RG): \, M\otimes_R B(G,R)\in {\tt Flat}(RG)\}$.
\end{Corollary}
\begin{proof}Invoking Corollary \ref{cor39}, it suffices to show that $\mathscr{X}_{B,{\tt GFlat}}\subseteq\mathscr{X}_{B,{\tt Flat}}$. Let $M\in \mathscr{X}_{B,{\tt GFlat}}$. Then, $M\in {\tt WGFlat}(RG)\subseteq {\tt WGFlat}(R)$, and hence the finiteness of $\textrm{wgl.dim}(R)$ implies that $M$ is $R$-flat. Since $\textrm{fd}_{RG} B(G,R)<\infty$, we obtain that $\textrm{fd}_{RG}M\otimes_R B(G,R)<\infty$. We conclude that $M\otimes_R B(G,R)\in {\tt GFlat}(RG)\cap \overline{{\tt Flat}}(RG)={\tt Flat}(RG)$ (see \cite[Lemma 2.4]{St}).
\end{proof}
\section{Gorenstein projective and PGF modules over groups with weak characteristic modules} We consider a commutative ring $R$ such that $\textrm{sfli}R<\infty$ and a group $G$ such that there exists a weak characteristic module for $G$ over $R$. Our goal in this section is to give a characterization of the class ${\tt GProj}(RG)$ related to the $RG$-module $B(G,R)$. Moreover, under these conditions, we conclude that the classes ${\tt GProj}(RG)$, ${\tt PGF}(RG)$ and ${\tt WGProj}(RG)$ coincide. As a result we have that, under the above conditions, every Gorenstein projective $RG$-module is Gorenstein flat. Since for every group of type $\Phi_R$, the $RG$-module $B(G,R)$ is weak characteristic, similar results are obtained.
\begin{Proposition}\label{Newlemmad}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then for every $RG$-modules $M$, $N$ such that $M$ is weak Gorenstein projective and $N$ is $R$-projective, the $RG$-module $M\otimes_R N$ is PGF.
\end{Proposition}
\begin{proof}Let $M$ be a weak Gorenstein projective $RG$-module and $N$ be an $R$-projective $RG$-module. Then, there exists an acyclic complex of projective $RG$-modules $$\textbf{P}=\cdots \rightarrow P_{2}\rightarrow P_1\rightarrow P_0 \rightarrow P_{-1}\rightarrow \cdots,$$ such that $M=\textrm{Im}(P_1 \rightarrow P_0)$. Since $N$ is $R$-projective, we obtain the induced complex of $RG$-projective modules (with diagonal action) $$\textbf{P}\otimes_R N = \cdots \rightarrow P_{2}\otimes_R N\rightarrow P_1\otimes_R N\rightarrow P_0\otimes_R N \rightarrow P_{-1}\otimes_R N\rightarrow \cdots,$$ where $M\otimes_R N= \textrm{Im}(P_1\otimes_R N \rightarrow P_0\otimes_R N)$. Since $\textrm{sfli}R<\infty$, the existence of the weak characteristic module $A$ is equivalent to the finiteness of $\textrm{sfli}(RG)$ by \cite[Theorem 5.10]{St}. Thus, the complex $I\otimes_{RG}(\textbf{P}\otimes_R N)$ is acyclic for every injective $RG$-module $I$. We conclude that the $RG$-module $M\otimes_R N$ is PGF.
\end{proof}
\begin{Definition}Let $R$ be a commutative ring and $G$ be a group. We denote by $\mathcal{X}_{B,{\tt PGF}}$ the class of $RG$-modules $\mathscr{X}_{B,{\tt PGF}}=\{M\in \textrm{Mod}(RG): \, M\otimes_R B(G,R)\in {\tt PGF}(RG)\}$.
\end{Definition}
\begin{Corollary}\label{coor43}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then, ${\tt WG Proj}(RG)\subseteq\mathscr{X}_{B,{\tt PGF}}$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $R$-free, this is an immediate consequence of Proposition \ref{Newlemmad}.\end{proof}
\begin{Proposition}\label{nppst} Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then,
$\mathscr{X}_{B,{\tt PGF}}\subseteq {\tt PGF}(RG)$.
\end{Proposition}
\begin{proof}Let $B=B(G,R)$, $\overline{B}=\overline{B}(G,R)$ and consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R B$ is PGF. We also let $V_i=\overline{B}^{\otimes i}\otimes_R B$ for every $i\geq 0$, where $\overline{B}^{\otimes 0}=R$. Since the short exact sequence of $RG$-modules $0\rightarrow R \rightarrow B \rightarrow \overline{B}\rightarrow 0$ is $R$-split, we obtain for every $i\geq 0$ a short exact sequence of $RG$-modules of the form $$0\rightarrow M\otimes_R\overline{B}^{\otimes i}\rightarrow M\otimes_R V_i \rightarrow M\otimes_R \overline{B}^{\otimes i+1}\rightarrow 0.$$ Then, the splicing of the above short exact sequences for every $i\geq 0$ yields an exact sequence of the form
\begin{equation}\label{Eeq1}
0\rightarrow M \xrightarrow{\alpha} M\otimes_R V_0 \rightarrow M\otimes_R V_1 \rightarrow M\otimes_R V_2 \rightarrow \cdots.
\end{equation}
Since the $RG$-module $M\otimes_R B$ is PGF and $\overline{B}$ is $R$-projective, we obtain that the $RG$-module $M\otimes_R V_i\cong (M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is PGF for every $i\geq 0$, by Proposition \ref{Newlemmad}. We also consider an $RG$-projective resolution of $M$
\begin{equation*}
\textbf{P}=\cdots \rightarrow P_2 \rightarrow P_1 \rightarrow P_0 \xrightarrow{\beta} M \rightarrow 0.
\end{equation*}
Splicing the resolution $\textbf{P}$ with the exact sequence (\ref{Eeq1}), we obtain an acyclic complex of PGF $RG$-modules
\begin{equation*}
\mathfrak{P}=\cdots \rightarrow P_2 \rightarrow P_1 \rightarrow P_0 \xrightarrow{\alpha \beta} M\otimes_R V_0 \rightarrow M\otimes_R V_1 \rightarrow M\otimes_R V_2 \rightarrow \cdots
\end{equation*}
which has syzygy the $RG$-module $M$. It suffices to prove that the complex $I\otimes_{RG}\mathfrak{P}$ is acyclic for every injective $RG$-module $I$. Using \cite[Theorem 6.7]{St} we will then obtain that the $RG$-module $M$ is PGF. Let $I$ be an injective $RG$-module. Then, the $R$-split short exact sequence of $RG$-modules $0\rightarrow R \rightarrow B \rightarrow \overline{B}\rightarrow 0$ yields an induced exact sequence of $RG$-modules with diagonal action $0\rightarrow I\rightarrow B \otimes_{R}I\rightarrow \overline{B}\otimes_{R} I\rightarrow 0$ which is $RG$-split. Thus, it suffices to prove that the complex $(B\otimes_{R}I)\otimes_{RG}\mathfrak{P}$ is acyclic. Since $B$ is $R$-projective, we obtain that the acyclic complex $\textbf{P}\otimes_R B$ is a projective resolution of the PGF $RG$-module $M\otimes_{R}B$. Therefore, every syzygy module of $\textbf{P}\otimes_R B$ is also a PGF $RG$-module (see \cite[Proposition 2.1]{St}). Moreover, the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}\cong (M\otimes_R \overline{B}^{\otimes i})\otimes_R B$ is PGF for every $i\geq 0$. Consequently, every syzygy module of the acyclic complex
\begin{equation*}
\mathfrak{P}\otimes_R B =\cdots\rightarrow P_1\otimes_R B\rightarrow P_0\otimes_R B \rightarrow M\otimes_R V_0 \otimes_R B\rightarrow M\otimes_R V_1 \otimes_R B\rightarrow \cdots
\end{equation*} is a PGF $RG$-module. As the functor $\textrm{Tor}^{RG}_1 (I,\_\!\_)$ vanishes on PGF modules, we conclude that the complex $(B\otimes_{R}I)\otimes_{RG}\mathfrak{P}\cong I\otimes_{RG}(\mathfrak{P}\otimes_{R}B)$ is acyclic, as needed.\end{proof}
\begin{Remark}\rm \label{Remarkaki} A careful examination of the proof of Proposition \ref{nppst} shows that the existence of a characteristic module for $G$ over $R$ was only needed to ensure that the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is PGF for every $i\geq 0$.
\end{Remark}
\begin{Remark}\label{rem46}\rm Let $R$ be a commutative ring such that $\textrm{sfli}(R)<\infty$ and $G$ be a group such that there exists a weak characteristic module $A$ for $G$ over $R$. We also consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R A$ is PGF. Then, $M$ is a PGF $RG$-module.
Indeed, there exists an $R$-split $RG$-short exact sequence $0\rightarrow R \rightarrow A \rightarrow \overline{A}\rightarrow 0$, where the $RG$-modules $A,\overline{A}$ are $R$-projectives (this follows from \cite[Theorem 5.10(v)]{St}). Following step by step the proof of Proposition \ref{nppst}, we construct an acyclic complex of PGF modules
\begin{equation*}
\mathfrak{P'}=\cdots \rightarrow P_2' \rightarrow P_1' \rightarrow P_0' \rightarrow M\otimes_R V_0' \rightarrow M\otimes_R V_1' \rightarrow M\otimes_R V_2' \rightarrow \cdots,
\end{equation*} where $V_i'={\overline{A}}^{\otimes i}\otimes_R A$, for every $i\geq 0$, and has syzygy the $RG$-module $M$. Using the $R$-split $RG$-short exact sequence $0\rightarrow R \rightarrow A \rightarrow \overline{A} \rightarrow 0 $ and \cite[Theorem 6.7]{St}, it suffices to show that the complex $I\otimes_{RG}(\mathfrak{P'}\otimes_{R}A)$ is acyclic for every injective $RG$-module $I$. This follows exactly as in the proof of Proposition \ref{nppst}, since every syzygy module of $\mathfrak{P'}\otimes_{R}A$ is PGF.
\end{Remark}
\begin{Theorem}\label{The410}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group such that there exists a weak characteristic module for $G$ over $R$. Then, $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WProj}(RG)={\tt GProj}(RG)$.
\end{Theorem}
\begin{proof}Invoking Corollary \ref{coor43} and Proposition \ref{nppst}, we have the inclusions ${\tt WG Proj}(RG)\subseteq\mathscr{X}_{B,{\tt PGF}}\subseteq{\tt PGF}(RG)$. Moreover, ${\tt PGF}(RG)\subseteq {\tt GProj}(RG)$ by \cite[Theorem 4.4]{SS} and the inclusion ${\tt GProj}(RG)\subseteq{\tt WGProj}(RG)$ is clear. We conclude that $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WGProj}(RG)={\tt GProj}(RG)$, as needed.\end{proof}
\begin{Remark}\rm (i) We note that Theorem \ref{The410} implies that for every commutative ring $R$ such that $\textrm{sfli}R<\infty$ and every group $G$ such that there exists a weak characteristic module for $G$ over $R$, the class $\mathscr{X}_{B,{\tt PGF}}$ coincides with the class $\mathscr{X}_{B,{\tt GProj}}=\{M\in \textrm{Mod}(RG): \, M\otimes_R B(G,R)\in {\tt GProj}(RG)\}$.
(ii) Let $R$ be a commutative ring such that $\textrm{sfli}(R)<\infty$ and $G$ be a group such that there exists a weak characteristic module $A$ for $G$ over $R$. We also consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R A$ is Gorenstein projective. Then, $M$ is a Gorenstein projective $RG$-module. This follows from Remark \ref{rem46} and Theorem \ref{The410}.
\end{Remark}
\begin{Corollary}\label{Cor212}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group. If $\textrm{fd}_{RG}B(G,R)<\infty$, then $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WGProj}(RG)={\tt GProj}(RG)$.
\end{Corollary}
\begin{proof}Since $\textrm{fd}_{RG}B(G,R)<\infty$, the $RG$-module $B(G,R)$ is a weak characteristic module for $G$ over $R$. The result is now a direct consequence of Theorem \ref{The410}.\end{proof}
\begin{Corollary}\label{cor410}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group of type $\Phi_R$. Then, $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WGProj}(RG)={\tt GProj}(RG)$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $RH$-free for every finite subgroup $H$ of $G$, the definition of a group of type $\Phi_R$ implies that $\textrm{fd}_{RG}B(G,R)<\infty$. The result is now an immediate consequence of Corollary \ref{Cor212}.
\end{proof}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group of type $\Phi_R$. Then, ${\tt GProj}(RG)\subseteq {\tt GFlat}(RG)$. Moreover, for every $RG$-module $M$ we have $\textrm{Gfd}_{RG}M\leq \textrm{Gpd}_{RG}M =\textrm{PGF-dim}_{RG}M$.
\end{Corollary}
\begin{proof}This is a direct consequence of Corollary \ref{cor410}, since ${\tt PGF}(RG)\subseteq {\tt GFlat}(RG)$.
\end{proof}
\begin{Corollary} Let $R$ be a commutative ring of finite weak global dimension and $G$ be a group of type $\Phi_R$. Then, $\mathscr{X}_{B,{\tt Proj}}={\tt PGF}(RG)={\tt WProj}(RG)={\tt GProj}(RG)$, where $\mathscr{X}_{B,{\tt Proj}}=\{M\in \textrm{Mod}(RG): \, M\otimes_R B(G,R)\in {\tt Proj}(RG)\}$ is the class of Benson's cofibrants.
\end{Corollary}
\begin{proof}Invoking Corollary \ref{cor410}, it suffices to show that $\mathscr{X}_{B,{\tt PGF}}\subseteq\mathscr{X}_{B,{\tt Proj}}$. Let $M\in \mathscr{X}_{B,{\tt PGF}}$. Then, $M\in {\tt WGProj}(RG)\subseteq {\tt WGFlat}(R)$, and hence the finiteness of $\textrm{wgl.dim}(R)$ implies that $M$ is $R$-flat. Since $\textrm{fd}_{RG} B(G,R)<\infty$, we obtain that $\textrm{fd}_{RG}M\otimes_R B(G,R)<\infty$. We conclude that $M\otimes_R B(G,R)\in {\tt PGF}(RG)\cap \overline{{\tt Flat}}(RG)={\tt Proj}(RG)$ (see \cite[Lemma 5.2]{St}).
\end{proof}
\section{Gorenstein flat modules over $\textsc{\textbf{lh}}\mathfrak{F}$-groups} We consider a commutative ring $R$ such that $\textrm{sfli}R<\infty$ and an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$. Our goal in this section is to achieve the same characterization of the class ${\tt GFlat}(RG)$, in terms of the $RG$-module $B(G,R)$, as in Section 3. Firstly, we prove that the tensor product of a weak Gorenstein flat $RG$-module and an $R$-flat module (with diagonal action) is Gorenstein flat. Moreover, we obtain that the class ${\tt GFlat}(RG)$ coincides with the class ${\tt WGFlat}(RG)$. By doing so, we may replace the existence of a weak characteristic module for $G$ over $R$ with the property that $G$ is an $\textsc{\textbf{lh}}\mathfrak{F}$-group in all the previous results of Section 3.
\begin{Lemma}\label{llem45} Let $R$ be a commutative ring, $G$ be a group and $H$ be a subgroup of $G$. Then, for every Gorenstein flat $RH$-module $M$, the $RG$-module $\textrm{Ind}^G_H M$ is also Gorenstein flat.
\end{Lemma}
\begin{proof} Let $M$ be a Gorenstein flat $RH$-module. Then, there exists an acyclic complex of flat $RH$-modules $$\textbf{F}=\cdots \rightarrow F_{2}\rightarrow F_1\rightarrow F_0 \rightarrow F_{-1}\rightarrow \cdots,$$ such that $M=\textrm{Im}(F_1 \rightarrow F_0)$ and the complex $I\otimes_{RH}\textbf{F}$ is exact, whenever $I$ is an injective $RH$-module. Thus, the induced complex $$\textrm{Ind}^G_H\textbf{F}=\cdots \rightarrow\textrm{Ind}^G_H F_2 \rightarrow\textrm{Ind}^G_H F_1\rightarrow\textrm{Ind}^G_H F_0 \rightarrow\textrm{Ind}^G_H F_{-1}\rightarrow \cdots,$$ is an acyclic complex of flat $RG$-modules and has the $RG$-module $\textrm{Ind}^G_H M$ as syzygy. Since every injective $RG$-module $I$ is restricted to an injective $RH$-module, the isomorphism of complexes $I\otimes_{RG}\textrm{Ind}^G_H \textbf{F} \cong I\otimes_{RH}\textbf{F}$ implies that the $RG$-module $\textrm{Ind}^G_H M$ is Gorenstein flat.
\end{proof}
\begin{Proposition}\label{prop1}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Consider a weak Gorenstein flat $RG$-module $M$ and an $RG$-module $N$ which is flat as $R$-module. Then, $M\otimes_R N \in{\tt G Flat}(RG)$.
\end{Proposition}
\begin{proof}Let $M\in{\tt WGFlat}(RG)$ and $N\in{\tt Flat}(R)$. We will first show that $M\otimes_R N$ is Gorenstein flat as $RH$-module over any $\textsc{\textbf{h}}\mathfrak{F}$-subgroup $H$ of $G$. We use transfinite induction on the ordinal number $\alpha$, which is such that $H\in \textsc{\textbf{h}}_{\alpha}\mathfrak{F}$. If $\alpha=0$, then $H$ is finite and hence $\textrm{Ghd}_R H=0$, by \cite[Proposition 3.6]{St1}. Invoking \cite[Proposition 5.7]{St}, we obtain that $\textrm{sfli}(RH)\leq \textrm{sfli}R <\infty$. Since $M\in {\tt WGFlat}(RG)\subseteq {\tt WGFlat}(RH)$ and $N\in{\tt Flat}(R)$, we obtain that $M\otimes_R N \in {\tt WGFlat}(RH)$. Thus, the finiteness of $\textrm{sfli}(RH)$ implies that $M\otimes_R N \in {\tt GFlat}(RH)$. Now we assume that $M\otimes_R N$ is Gorenstein flat as $RH'$-module for every $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup $H'$ of $G$ and every $\beta<\alpha$. Let $H$ be an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-subgroup of $G$. Then, there
exists an exact sequence of $\mathbb{Z}H$-modules $$0\rightarrow C_r \rightarrow \cdots \rightarrow C_1 \rightarrow C_0 \rightarrow \mathbb{Z} \rightarrow 0,$$ where each $C_i$
is a direct sum of permutation $\mathbb{Z}H$-modules of the form $\mathbb{Z}[H/H']$, with $H'$ an $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup of $H$ for some $\beta<\alpha$. We note that the integer $r$ is the dimension of the $H$-CW-complex provided by the definition of $H$ being an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-group. The above exact sequence yields an exact sequence of $RH$-modules of the form
\begin{equation}\label{eqqqq}
0\rightarrow K_r \rightarrow \cdots \rightarrow K_1 \rightarrow K_0 \rightarrow M\otimes_R N \rightarrow 0,
\end{equation}
such that every $K_i$ is a direct sum of modules of the form ${\textrm{Ind}^H_{H'}}{\textrm{Res}^H_{H'}} (M \otimes_R N)$, where $H'\in \textsc{\textbf{h}}_{\beta}\mathfrak{F}$, $\beta<\alpha$ (see also \cite[Lemma 2.3]{Bis2}).
Our induction hypothesis implies that ${\textrm{Res}^H_{H'}} (M \otimes_R N)$ is a Gorenstein flat $RH'$-module. Invoking Lemma \ref{llem45}, we infer that ${\textrm{Ind}^H_{H'}}{\textrm{Res}^H_{H'}}(M \otimes_R N)$ is a Gorenstein flat $RH$-module. Since the class ${\tt GFlat}(RH)$ is closed under direct sums, we obtain that the $RH$-module $K_i$ is Gorenstein flat, for every $i=0,\dots r$. Thus, the exact sequence (\ref{eqqqq}) yields $\textrm{Gfd}_{RH}(M\otimes_R N)\leq r$. Moreover, $M\in {\tt WGFlat}(RG)$, and hence there exists an exact sequence of $RG$-modules of the form $$0\rightarrow M \rightarrow F_{r-1} \rightarrow \cdots \rightarrow F_1 \rightarrow F_0 \rightarrow M' \rightarrow 0,$$ where $F_i$ is flat for every $i=0,1,\dots,r-1$ and $M'\in {\tt WGFlat}(RG)$. Since $N$ is $R$-flat, we obtain the induced exact sequence of $RG$-modules (with diagonal action)
\begin{equation}\label{eqqq}
0\rightarrow M \otimes_R N\rightarrow F_{r-1}\otimes_R N \rightarrow \cdots \rightarrow F_0\otimes_R N \rightarrow M'\otimes_R N \rightarrow 0,
\end{equation}where $F_i \otimes_R N$ is a flat $RG$-module (and hence is flat as $RH$-module) for every $i=0,1,\dots,r-1$. The same argument as above for the $RG$-module $M' \in {\tt WGflat}(RG)$ yields $\textrm{Gfd}_{RH}(M'\otimes_R N)\leq r$. Since every ring is ${\tt GF}$-closed, using \cite[Theorem 2.8]{Ben}, we conclude that $M\otimes_R N$ is a Gorenstein flat $RH$-module.
Let $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $G$ can be expressed the filtered union of its finitely generated subgroups $(G_{\lambda})_{\lambda}$, which are all contained in $\textsc{\textbf{h}}\mathfrak{F}$. Since $G_{\lambda}\in \textsc{\textbf{h}}\mathfrak{F}$, the $RG_{\lambda}$-module $M\otimes_R N$ is Gorenstein flat. Invoking Lemma \ref{llem45}, we obtain that the $RG$-module $\textrm{Ind}^G_{G_{\lambda}}(M\otimes_R N)$ is Gorenstein flat as well. Thus, the $RG$-module $M\otimes_R N\cong {\lim\limits_{\longrightarrow}}_{\lambda}\textrm{Ind}^G_{G_{\lambda}}(M\otimes_R N)$ is Gorenstein flat as direct limit of Gorenstein flat modules (see \cite[Corollary 4.12]{SS}).\end{proof}
\begin{Corollary}\label{theor1}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, ${\tt WG Flat}(RG)\subseteq\mathscr{X}_{B,{\tt GFlat}}$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $R$-free, this is an immediate consequence of Proposition \ref{prop1}.
\end{proof}
\begin{Remark}\rm The existence of a weak characteristic module Proposition \ref{ppst} may be replaced with the assumption that $G$ is an $\textsc{\textbf{lh}}\mathfrak{F}$-group.
\end{Remark}
\begin{Proposition}\label{theor2}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $\mathscr{X}_{B,{\tt GFlat}}\subseteq{\tt GFlat}(RG)$.
\end{Proposition}
\begin{proof}Let $B=B(G,R)$, $\overline{B}=\overline{B}(G,R)$ and consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R B$ is Gorenstein flat. Since the $RG$-module $\overline{B}$ is $R$-flat, we obtain that the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is Gorenstein flat for every $i\geq 0$, by Proposition \ref{prop1}. Given that, the proof is identical to that of Proposition \ref{ppst} (see also Remark \ref{remarkaki}).\end{proof}
\begin{Theorem}\label{cora}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$.
\end{Theorem}
\begin{proof}Invoking Corollary \ref{theor1}, we have ${\tt WG Flat}(RG)\subseteq\mathscr{X}_{B,{\tt GFlat}}$. Moreover, Proposition \ref{theor2} yields $\mathscr{X}_{B,{\tt GFlat}}\subseteq{\tt GFlat}(RG)$ and the inclusion ${\tt GFlat}(RG)\subseteq{\tt WGFlat}(RG)$ is clear. We conclude that $\mathscr{X}_{B,{\tt GFlat}}={\tt GFlat}(RG)={\tt WGFlat}(RG)$, as needed.
\end{proof}
\section{Gorenstein projective and PGF modules over $\textsc{\textbf{lh}}\mathfrak{F}$-groups}
We consider a commutative ring $R$ such that $\textrm{sfli}R<\infty$ and an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$. Our goal in this section is to achieve the same characterization of the class ${\tt GProj}(RG)$, in terms of the $RG$-module $B(G,R)$, as in Section 4. Firstly, we prove that the tensor product of a weak Gorenstein projective $RG$-module and an $R$-projective module (with diagonal action) is PGF. Moreover, we obtain that the classes ${\tt GProj}(RG)$, ${\tt PGF}(RG)$ and ${\tt WGProj}(RG)$ coincide. As a result, we have that every Gorenstein projective $RG$-module is Gorenstein flat. By doing so, we may replace the existence of a weak characteristic module for $G$ over $R$ with the property that $G$ is an $\textsc{\textbf{lh}}\mathfrak{F}$-group in all the previous results of Section 4.
\begin{Lemma}\label{Llem45}{\rm(\cite[Lemma 2.12]{St1})} Let $R$ be a commutative ring, $G$ be a group and $H$ be a subgroup of $G$. Then, for every PGF $RH$-module $M$, the $RG$-module $\textrm{Ind}^G_H M$ is also PGF.
\end{Lemma}
\begin{Proposition}\label{Prop1}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Consider a weak Gorenstein projective $RG$-module $M$ and an $RG$-module $N$ which is projective as $R$-module. Then, $M\otimes_R N \in{\tt PGF}(RG)$.
\end{Proposition}
\begin{proof}Let $M\in{\tt WGProj}(RG)$ and $N\in{\tt Proj}(R)$. We will first show that $M\otimes_R N$ is PGF as $RH$-module over any $\textsc{\textbf{h}}\mathfrak{F}$-subgroup $H$ of $G$. We use transfinite induction on the ordinal number $\alpha$, which is such that $H\in \textsc{\textbf{h}}_{\alpha}\mathfrak{F}$. If $\alpha=0$, then $H$ is finite and hence $\textrm{Ghd}_R H=0$, by \cite[Proposition 3.6]{St1}. Invoking \cite[Proposition 5.7]{St}, we obtain that $\textrm{sfli}(RH)\leq \textrm{sfli}R <\infty$. Since $M\in {\tt WGProj}(RG)\subseteq {\tt WGProj}(RH)$ and $N\in{\tt Proj}(R)$, we have $M\otimes_R N \in {\tt WGProj}(RH)$. Thus, the finiteness of $\textrm{sfli}(RH)$ implies that $M\otimes_R N \in {\tt PGF}(RH)$. Now we assume that $M\otimes_R N$ is PGF as $RH'$-module for every $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup $H'$ of $G$ and every $\beta<\alpha$. Let $H$ be an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-subgroup of $G$. Then, there
exists an exact sequence of $\mathbb{Z}H$-modules $$0\rightarrow C_r \rightarrow \cdots \rightarrow C_1 \rightarrow C_0 \rightarrow \mathbb{Z} \rightarrow 0,$$ where each $C_i$
is a direct sum of permutation $\mathbb{Z}H$-modules of the form $\mathbb{Z}[H/H']$, with $H'$ an $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup of $H$ for some $\beta<\alpha$. We note that the integer $r$ is the dimension of the $H$-CW-complex provided by the definition of $H$ being an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-group. The above exact sequence yields an exact sequence of $RH$-modules of the form
\begin{equation}\label{Eqqqq}
0\rightarrow K_r \rightarrow \cdots \rightarrow K_1 \rightarrow K_0 \rightarrow M\otimes_R N \rightarrow 0,
\end{equation}
such that every $K_i$ is a direct sum of modules of the form ${\textrm{Ind}^H_{H'}}{\textrm{Res}^H_{H'}} (M \otimes_R N)$, where $H'\in \textsc{\textbf{h}}_{\beta}\mathfrak{F}$, $\beta<\alpha$ (see also \cite[Lemma 2.3]{Bis2}).
Our induction hypothesis implies that ${\textrm{Res}^H_{H'}} (M \otimes_R N)$ is a PGF $RH'$-module. Invoking Lemma \ref{Llem45}, we infer that ${\textrm{Ind}^H_{H'}}{\textrm{Res}^H_{H'}}(M \otimes_R N)$ is a PGF $RH$-module. The class ${\tt PGF}(RH)$ is closed under direct sums, and hence the $RH$-module $K_i$ is PGF, for every $i=0,\dots r$. Thus, the exact sequence (\ref{Eqqqq}) yields $\textrm{PGF-dim}_{RH}(M\otimes_R N)\leq r$. Moreover, $M\in {\tt WGProj}(RG)$, and hence there exists an exact sequence of $RG$-modules of the form $$0\rightarrow M \rightarrow P_{r-1} \rightarrow \cdots \rightarrow P_1 \rightarrow P_0 \rightarrow M' \rightarrow 0,$$ where $P_i$ is projective for every $i=0,1,\dots,r-1$ and $M'\in {\tt WGProj}(RG)$. As $N$ is $R$-projective, we obtain the induced exact sequence of $RG$-modules (with diagonal action)
\begin{equation}\label{Eqqq}
0\rightarrow M \otimes_R N\rightarrow P_{r-1}\otimes_R N \rightarrow \cdots \rightarrow P_0\otimes_R N \rightarrow M'\otimes_R N \rightarrow 0,
\end{equation}where $P_i \otimes_R N$ is a projective $RG$-module (and hence projective as $RH$-module) for every $i=0,1,\dots,r-1$. The same argument as above for the weak Gorenstein projective $RG$-module $M'$ shows that $\textrm{PGF-dim}_{RH}(M'\otimes_R N)\leq r$. Invoking \cite[Proposition 2.2]{DE}, we conclude that $M\otimes_R N$ is a PGF $RH$-module.
Let $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. We will proceed by induction on the cardinality of $G$. If $G$ is a
countable group, then $G$ acts on a tree with stabilizers certain finitely generated subgroups of $G$, and hence $G\in \textsc{\textbf{h}}\mathfrak{F}$. Thus, we assume that $G$ is uncountable. The group $G$ may then be expressed as a continuous ascending union of subgroups $G = \cup_{\lambda<\delta} G_{\lambda}$, for some ordinal $\delta$, where each $G_{\lambda}$ has strictly smaller cardinality than $G$. By induction we have $M\otimes_R N$ is PGF as $RG_{\lambda}$-module, for every $\lambda<\delta$. Thus, invoking \cite[Proposition 4.5]{St1}, we infer that $\textrm{PGF-dim}_{RG}(M\otimes_R N)\leq 1$. Since $M\in {\tt WGProj}(RG)$, there exists a short exact sequence of $RG$-modules of the form $$0\rightarrow M \rightarrow P \rightarrow M'' \rightarrow 0,$$ where $M''\in {\tt WGProj}(RG)$ and $P\in {\tt Proj}(RG)$. As $N$ is $R$-projective, we obtain the following short exact sequence of $RG$-modules (with diagonal action) \begin{equation}\label{equu}0\rightarrow M\otimes_R N\rightarrow P \otimes_R N\rightarrow M''\otimes_R N\rightarrow 0,
\end{equation} where the $RG$-module $P\otimes_R N$ is projective. The same argument as before for the $RG$-module $M'' \in {\tt WGProj}(RG)$ yields $\textrm{PGF-dim}_{RG}(M''\otimes_R N)\leq 1$, and hence the exact sequence (\ref{equu}) implies that the $RG$-module $M\otimes_R N$ is PGF, as needed.\end{proof}
\begin{Remark}\rm The existence of a weak characteristic module Proposition \ref{nppst} may be replaced with the assumption that $G$ is an $\textsc{\textbf{lh}}\mathfrak{F}$-group.
\end{Remark}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Consider a Gorenstein projective $RG$-module $M$ and an $RG$-module $N$ which is projective as $R$-module. Then, $M\otimes_R N \in{\tt GProj}(RG)$.
\end{Corollary}
\begin{Corollary}\label{Theo1}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, ${\tt WG Proj}(RG)\subseteq\mathscr{X}_{B,{\tt PGF}}$.
\end{Corollary}
\begin{proof}Since the $RG$-module $B(G,R)$ is $R$-free, this is an immediate consequence of Proposition \ref{Prop1}.\end{proof}
\begin{Proposition}\label{Ppst} Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then,
$\mathscr{X}_{B,{\tt PGF}}\subseteq {\tt PGF}(RG)$.
\end{Proposition}
\begin{proof}Let $B=B(G,R)$, $\overline{B}=\overline{B}(G,R)$ and consider an $RG$-module $M$ such that the $RG$-module $M\otimes_R B$ is PGF. Since the $RG$-module $\overline{B}$ is $R$-projective, we obtain that the $RG$-module $(M\otimes_R B)\otimes_R \overline{B}^{\otimes i}$ is PGF for every $i\geq 0$, by Proposition \ref{Prop1}. Given that, the proof is identical to that of Proposition \ref{nppst} (see also Remark \ref{Remarkaki}).
\end{proof}
\begin{Theorem}\label{Cora}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WGProj}(RG)={\tt GProj}(RG)$.
\end{Theorem}
\begin{proof}Invoking Corollary \ref{Theo1} and Proposition \ref{Ppst}, we have the inclusions ${\tt WG Proj}(RG)\subseteq\mathscr{X}_{B,{\tt PGF}}\subseteq{\tt PGF}(RG)$. Moreover, ${\tt PGF}(RG)\subseteq {\tt GProj}(RG)$ by \cite[Theorem 4.4]{SS} and the inclusion ${\tt GProj}(RG)\subseteq{\tt WGProj}(RG)$ is clear. We conclude that $\mathscr{X}_{B,{\tt PGF}}={\tt PGF}(RG)={\tt WGProj}(RG)={\tt GProj}(RG)$, as needed.
\end{proof}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, ${\tt GProj}(RG)\subseteq {\tt GFlat}(RG)$.
\end{Corollary}
\begin{proof}This is a direct consequence of Theorem \ref{Cora}, since ${\tt PGF}(RG)\subseteq {\tt GFlat}(RG)$.
\end{proof}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, for every $RG$-module $M$ we have $\textrm{Gfd}_{RG}M\leq \textrm{Gpd}_{RG}M =\textrm{PGF-dim}_{RG}M$.
\end{Corollary}
\section{Gorenstein homological dimension of $\textsc{\textbf{lh}}\mathfrak{F}$-groups}
Our goal in this section is to determine the Gorenstein homological dimension $\textrm{Ghd}_R G$ of an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$ over a commutative ring of finite Gorenstein weak global dimension.
\begin{Definition}Let $R$ be a commutative ring and $G$ be a group.
$\textrm{f.k}(RG):=\textrm{sup}\{\textrm{fd}_{RG}M \, : \, M\in \textrm{Mod}(RG), \, \textrm{fd}_{RH}M<\infty \, \textrm{for every finite} \,\, H\leq G\}$.
$\textrm{fin.f.dim}(RG):=\textrm{sup}\{\textrm{fd}_{RG}M \, : \, M\in \textrm{Mod}(RG), \, \textrm{fd}_{RG}M<\infty\}$.
\end{Definition}
\begin{Lemma}\label{l220}Let $R$ be a commutative ring and $G$ be a group. Then, for every subgroup $H$ of $G$ we have $\textrm{fin.f.dim}(RH)\leq \textrm{fin.f.dim}(RG)$.
\end{Lemma}
\begin{proof}It suffices to assume that $\textrm{fin.f.dim}(RG)=n<\infty$. Let $M$ be an $RH$-module such that $\textrm{fd}_{RH}M=k<\infty$. Then, there exists an $RH$-flat resolution of $M$ of length $k$ $$0\rightarrow F_k \rightarrow \cdots \rightarrow F_1 \rightarrow F_0 \rightarrow M \rightarrow 0,$$ and hence we obtain an exact sequence of $RG$-modules of the form $$0\rightarrow \textrm{Ind}^G_H F_k \rightarrow \cdots \rightarrow \textrm{Ind}^G_H F_1 \rightarrow \textrm{Ind}^G_H F_0 \rightarrow \textrm{Ind}^G_H M\rightarrow 0,$$ which constitutes an $RG$-flat resolution of $\textrm{Ind}^G_H M$ of length $k$. Since M is isomorphic to a direct summand of $\textrm{Res}^G_H \textrm{Ind}^G_H M$, we obtain that $\textrm{fd}_{RG}\textrm{Ind}^G_H M=k$. Thus, $\textrm{fd}_{RH}M=k\leq n$ for every $RH$-module $M$ of finite flat dimension. We conclude that $\textrm{fin.f.dim}(RH)\leq \textrm{fin.f.dim}(RG)$, as needed.
\end{proof}
\begin{Proposition}\label{prop224}Let $R$ be a commutative ring and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $\textrm{f.k}(RG)\leq \textrm{fin.f.dim}(RG)$.
\end{Proposition}
\begin{proof}It suffices to assume that $\textrm{fin.f.dim}(RG)=n<\infty$. Let $M$ be an $RG$-module such that $\textrm{fd}_{RF}M<\infty$ for every finite subgroup $F$ of $G$. We will first show that $\textrm{fd}_{RH}M\leq n$ over any $\textsc{\textbf{h}}\mathfrak{F}$-subgroup $H$ of $G$. We use transfinite induction on the ordinal number $\alpha$, which is such that $H\in \textsc{\textbf{h}}_{\alpha}\mathfrak{F}$. If $\alpha=0$, then $H$ is finite and hence $\textrm{fd}_{RH}M<\infty$. Then, Lemma \ref{l220} yields $\textrm{fd}_{RH}M\leq\textrm{fin.f.dim}(RH)\leq \textrm{fin.f.dim}(RG)= n$. Now we assume that $\textrm{fd}_{RH'}M\leq n$ for every $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup $H'$ of $G$ and every $\beta<\alpha$. Let $H$ be an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-subgroup of $G$. Then, there
exists an exact sequence of $\mathbb{Z}H$-modules $$0\rightarrow C_r \rightarrow \cdots \rightarrow C_1 \rightarrow C_0 \rightarrow \mathbb{Z} \rightarrow 0,$$ where each $C_i$
is a direct sum of permutation $\mathbb{Z}H$-modules of the form $\mathbb{Z}[H/H']$, with $H'$ an $\textsc{\textbf{h}}_{\beta}\mathfrak{F}$-subgroup of $H$ for some $\beta<\alpha$. We note that the integer $r$ is the dimension of the $H$-CW-complex provided by the definition of $H$ being an $\textsc{\textbf{h}}_{\alpha}\mathfrak{F}$-group. The above exact sequence yields an exact sequence of $RH$-modules
\begin{equation}\label{eq14}
0\rightarrow M_r \rightarrow \cdots \rightarrow M_1 \rightarrow M_0 \rightarrow M \rightarrow 0,
\end{equation}
where each $M_i$ is a direct sum of modules of the form ${\textrm{Ind}^H_{H'}}{\textrm{Res}^H_{H'}} M$, where $H'\in \textsc{\textbf{h}}_{\beta}\mathfrak{F}$, $\beta<\alpha$ (see also \cite[Lemma 2.3]{Bis2}). Our induction hypothesis implies that $\textrm{fd}_{RH'}\textrm{Res}^H_{H'}M\leq n$, for every $H'\in \textsc{\textbf{h}}_{\beta}\mathfrak{F}$, $\beta<\alpha$, and hence we also have $\textrm{fd}_{RH}\textrm{Ind}^H_{H'}\textrm{Res}^H_{H'}M\leq n$ for every $H'\in \textsc{\textbf{h}}_{\beta}\mathfrak{F}$, $\beta<\alpha$. Consequently, $\textrm{fd}_{RH}M_i <\infty$, for every $i=0,\dots,r$, and equation (\ref{eq14}) yields $\textrm{fd}_{RH}M <\infty$. Invoking Lemma \ref{l220}, we infer that $\textrm{fd}_{RH}M\leq \textrm{fin.f.dim}(RH)\leq \textrm{fin.f.dim}(RG)= n$.
Let $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $G$ can be expressed as the filtered union of its finitely generated subgroups $(G_{\lambda})_{\lambda}$, which are all contained in $\textsc{\textbf{h}}\mathfrak{F}$. Since $G_{\lambda}\in \textsc{\textbf{h}}\mathfrak{F}$, we have $\textrm{fd}_{RG_{\lambda}}M\leq n$. We consider an exact sequence of $RG$-modules
\begin{equation}\label{eq15}
0 \rightarrow K_n \rightarrow F_{n-1} \rightarrow \cdots \rightarrow F_1 \rightarrow F_0 \rightarrow M \rightarrow 0,
\end{equation}
where the $RG$-module $F_i$ is flat for every $i=0,\dots,n-1$. Then, $K_n$ is a flat $RG_{\lambda}$-module, and hence the $RG$-module $\textrm{Ind}^G_{G_{\lambda}}K_n$ is also flat for every $\lambda$. Consequently, the $RG$-module $K_n\cong {\lim\limits_{\longrightarrow}}_{\lambda}\textrm{Ind}^G_{G_{\lambda}}K_n$ is flat as direct limit of flat modules. Thus, the exact sequence (\ref{eq15}) yields $\textrm{fd}_{RG}M\leq n$. We conclude that $\textrm{f.k}(RG)\leq n$, as needed.
\end{proof}
\begin{Lemma}\label{prop225}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be a group. Then, $\textrm{sfli}(RG)\leq \textrm{f.k}(RG)$.
\end{Lemma}
\begin{proof}It suffices to assume that $\textrm{f.k}(RG)=n<\infty$. Let $I$ be an injective $RG$-module and $H$ a finite subgroup of $G$. Then, $\textrm{Ghd}_R H=0$ (see \cite[Proposition 3.6]{St1}), and hence \cite[Proposition 5.7]{St} yields $\textrm{sfli}(RH)\leq \textrm{sfli}R <\infty$. Since $I$ is injective as $RH$-module, we obtain that $\textrm{fd}_{RH}I<\infty$. It follows that $\textrm{fd}_{RG}I\leq \textrm{f.k}(RG)=n $, for every injective $RG$-module $I$. We conclude that $\textrm{sfli}(RG)\leq \textrm{f.k}(RG)$, as needed.
\end{proof}
\begin{Corollary}\label{cor226}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group. Then, $\textrm{f.k}(RG)=\textrm{sfli}(RG)=\textrm{fin.f.dim}(RG)$.
\end{Corollary}
\begin{proof}Since $RG\cong {(RG)}^{\textrm{op}}$, by \cite[Proposition 2.4(i)]{Emta} we obtain that $\textrm{fin.f.dim}(RG)\leq \textrm{sfli}(RG)$. Invoking Proposition \ref{prop224}, we have $\textrm{f.k}(RG)\leq \textrm{fin.f.dim}(RG)$. Moreover, Lemma \ref{prop225} yields $\textrm{sfli}(RG)\leq \textrm{f.k}(RG)$. We conclude that $\textrm{f.k}(RG)=\textrm{sfli}(RG)=\textrm{fin.f.dim}(RG)$, as needed.
\end{proof}
\begin{Remark}\label{rem711} \rm Since the $RG$-module $B(G,R)$ is $R$-free and admits an $R$-split $RG$-linear monomorphism $\iota: R \rightarrow B(G,R)$, we infer that $B(G,R)$ is a weak characteristic module for $G$ over $R$ if and only if $\textrm{fd}_{RG} B(G,R)<\infty$.
\end{Remark}
\begin{Theorem}\label{theo712}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and consider an $\textsc{\textbf{lh}}\mathfrak{F}$-group $G$. Then:
\begin{itemize}
\item[(i)] $B(G,R)$ is a weak characteristic module for $G$ if and only if ${{\textrm{Ghd}}_{R}G}<\infty$,
\item[(ii)] ${{\textrm{Ghd}}_{R}G}=\textrm{fd}_{RG} B(G,R)$.
\end{itemize}
\end{Theorem}
\begin{proof}(i) If $B(G,R)$ is a weak characteristic module, then \cite[Theorem 5.10]{St} implies that ${\textrm{Ghd}}_{R}G<\infty$. Conversely, we assume that ${\textrm{Ghd}}_{R}G<\infty$. Then, Corollary \ref{cor226} yields $\textrm{f.k}(RG)=\textrm{sfli}(RG)\leq {\textrm{Ghd}}_{R}G + \textrm{sfli}R <\infty$ (see \cite[Proposition 5.7]{St}). Since $B(G,R)$ is free as $RH$-module for every finite subgroup $H$ of $G$, we obtain that $\textrm{fd}_{RG}B(G,R)\leq \textrm{f.k}(RG)<\infty$. We conclude that $B(G,R)$ is a weak characteristic module for $G$ over $R$ (see Remark \ref{rem711}).
(ii) Using (i) and Remark \ref{rem711}, we have ${{\textrm{Ghd}}_{R}G}=\infty$ if and only if $\textrm{fd}_{RG}B(G,R)=\infty$. If ${{\textrm{Ghd}}_{R}G}<\infty$, then (i) implies that $B(G,R)$ is a weak characteristic module for $G$ over $R$, and hence, invoking \cite[Corollary 5.12(i),(iii))]{St}, we conclude that ${{\textrm{Ghd}}_{R}G}=\textrm{fd}_{RG} B(G,R)$.
\end{proof}
\begin{Remark}\rm \label{rem78}Let $R$ be a commutative ring and $G$ be a group such that $\textrm{fd}_{\mathbb{Z}G}B(G,\mathbb{Z})<\infty$. Then $\textrm{fd}_{RG}B(G,R)\leq \textrm{fd}_{\mathbb{Z}G}B(G,\mathbb{Z})<\infty$. Indeed, let $\textrm{fd}_{\mathbb{Z}G}B(G,\mathbb{Z})=n$ and consider a $\mathbb{Z}G$-flat resolution $$0\rightarrow F_n \rightarrow F_{n-1}\rightarrow \cdots \rightarrow F_0 \rightarrow B(G,\mathbb{Z}) \rightarrow 0,$$ of $B(G,\mathbb{Z})$. Since $B(G,\mathbb{Z})$ is $\mathbb{Z}$-free (and hence $\mathbb{Z}$-flat), the above exact sequence is $\mathbb{Z}$-pure. Thus, we obtain an exact sequence of $RG$-modules $$0\rightarrow F_n\otimes_{\mathbb{Z}}R \rightarrow F_{n-1}\otimes_{\mathbb{Z}}R \rightarrow \cdots \rightarrow F_0\otimes_{\mathbb{Z}}R \rightarrow B(G,\mathbb{Z})\otimes_{\mathbb{Z}}R =B(G,R) \rightarrow 0,$$ wich constitutes an $RG$-flat resolution of $B(G,R)$, and hence $\textrm{fd}_{RG}B(G,R)\leq \textrm{fd}_{\mathbb{Z}G}B(G,\mathbb{Z})$.
\end{Remark}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group of type FP$_{\infty}$. Then, ${{\textrm{Ghd}}_{R}G}=\textrm{fd}_{RG} B(G,R)<\infty$.
\end{Corollary}
\begin{proof}The equality ${{\textrm{Ghd}}_{R}G}=\textrm{fd}_{RG} B(G,R)$ follows from Theorem \ref{theo712}. Since the $\textsc{\textbf{lh}}\mathfrak{F}$-group is of type FP$_{\infty}$, using \cite[Corollary B.2(2)]{Kr2}, which is also valid for $\textsc{\textbf{lh}}\mathfrak{F}$-groups, we infer that $\textrm{fd}_{\mathbb{Z}G} B(G,\mathbb{Z})<\infty$. Then, $\textrm{fd}_{RG}B(G,R)\leq \textrm{fd}_{\mathbb{Z}G}B(G,\mathbb{Z})< \infty$ (see Remark \ref{rem78}).
\end{proof}
\begin{Corollary}Let $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group of type FP$_{\infty}$. Then, ${{\textrm{Ghd}}_{\mathbb{Z}}G}=\textrm{fd}_{\mathbb{Z}G} B(G,\mathbb{Z})<\infty$.
\end{Corollary}
\begin{Corollary}Let $R$ be a commutative ring such that $\textrm{sfli}R<\infty$ and $G$ be an $\textsc{\textbf{lh}}\mathfrak{F}$-group of type FP$_{\infty}$. Then, $\textrm{f.k}(RG)=\textrm{sfli}(RG)=\textrm{fin.f.dim}(RG)<\infty$. In particular, if $M$ is an $RG$-module, then $\textrm{fd}_{RG}M<\infty$ if and only if $\textrm{fd}_{RH}M<\infty$ for every finite subgroup $H$ of $G$.
\end{Corollary}
\begin{proof}In view of Corollary \ref{cor226}, it suffices to prove that $\textrm{sfli}(RG)<\infty$. Invoking \cite[Corollary B.2(2)]{Kr2}, which is also valid for $\textsc{\textbf{lh}}\mathfrak{F}$-groups, and Remarks \ref{rem711}, \ref{rem78}, we obtain that $B(G,R)$ is a weak characteristic module for $G$ over $R$. Thus, \cite[Theorem 5.10]{St} yields $\textrm{sfli}(RG)<\infty$, as needed.\end{proof}
{\small {\sc Department of Mathematics,
University of Athens,
Athens 15784,
Greece}}
{\em E-mail address:} {\tt [email protected]}
\end{document} |
\begin{document}
\draft
\title{Efficient Raman Sideband Generation in a Coherent Atomic Medium}
\author{A.F. Huss, N. Peer, R. Lammegger, E.A. Korsunsky, and L. Windholz}
\address{Institut f\"{u}r Experimentalphysik, Technische Universit\"{a}t Graz, A-8010
Graz, Austria}
\date{\today{}}
\maketitle
\begin{abstract}
We demonstrate the efficient generation of Raman sidebands in a medium
coherently prepared in a dark state by continuous-wave low-intensity laser
radiation. Our experiment is performed in sodium vapor excited in $\Lambda $
configuration on the D$_{1}$ line by two laser fields of resonant
frequencies $\omega _{1}$ and $\omega _{2}$, and probed by a third field $
\omega _{3}$. First-order sidebands for frequencies $\omega _{1}$, $\omega
_{2}$ and up to the third-order sidebands for frequency $\omega _{3}$ are
observed. The generation starts at a power as low as 10 microwatt for each
input field. Dependencies of the intensities of both input and generated
waves on the frequency difference ($\omega _{1}-\omega _{2}$), on the
frequency $\omega _{3}$ and on the optical density are investigated.
\end{abstract}
\pacs{42.50.Gy, 32.80.Qk, 42.50.Hz}
\section{Introduction}
Nonlinear optics assisted by electromagnetically induced transparency (EIT)
\cite{harr97} has attracted a great deal of attention in recent years. The
effect of EIT is due to creation, via interference of the excitation paths,
of a coherent superposition of quantum states which does not participate in
the atom-field interaction (''dark'' state) \cite{cpt}. The preparation of
atoms in this superposition gives rise to a strongly reduced absorption and
refraction of the medium, and at the same time, it may lead to enhancement
of the nonlinear optical susceptibility \cite{harr90}. Therefore, nonlinear
optical processes are very efficient in such a coherently prepared medium
(which is called sometimes as ''phaseonium'').
There are several directions in the research of EIT-assisted nonlinear
optics which are actively developed at present. One is an efficient
nonlinear frequency conversion and generation of coherent electromagnetic
radiation unattainable by conventional methods. For example, up-conversion
to VUV wavelengths outside the transparency window of most birefringent
nonlinear crystals has been experimentally demonstrated with unity
photon-conversion efficiency \cite{gener,mer99}; and a high-efficient scheme
for generation of c.w. terahertz radiation by use of EIT has been proposed
\cite{thz}. Another interesting application of the coherent medium concept
is a laser frequency modulation by parametric Raman sideband generation,
with a total bandwidth extending over the infrared, visible, and ultraviolet
spectral regions, and with a possiblity of subfemtosecond pulse compression
\cite{sok,hak}. The third very promising direction is based on the fact that
the intensity ''threshold'' for EIT is given by the decay rate of the dark
state, which can be made extremely small if the dark state is a
superposition of atomic ground state sublevels. For example, a rate below 40
Hz has been observed in a cell with buffer gas \cite{wyn97}, and of order of
1 Hz in a cell with antirelaxation coating \cite{m-o1}. Then, the necessary
intensity corresponds to only a few photons per atomic cross section \cite
{harr98,harr99,luk99}. Therefore, one may expect a highly nonlinear response
of phaseonium to applied e.m. fields at very low intensity levels, even at a
few photon level. The potential of phaseonium has been demonstrated in a
recent series of exciting experiments where the group velocity for a light
pulse of the order of a few meters per second have been measured \cite
{m-o1,hau99,kash99}. Such a slow light propagation velocity is an indication
of huge optical nonlinearities necessary for strong interactions between
very weak optical fields. Recent theoretical work shows that this regime can
be used for photon switching \cite{harr98}, for quantum noise correlations
\cite{luk99}, for generation of nonclassical states of the e.m. field and
atomic ensembles, including entangled states \cite{agar93,luk00a,luk00b},
and for quantum information processing \cite{luk00a,luk00b,flei00}.
In the present paper we report on the experimental observation of an
EIT-assisted nonlinear optical process which combines the latter two
directions: generation of a broad spectrum of Raman sidebands in a medium
with small dark state decay rate, hence having a high efficiency even at low
input light intensities. The process occurs in an atomic medium interacting
with laser radiation in a $\Lambda $ scheme (Fig. 1).
\begin{figure}
\caption{\small{$\Lambda $ system in atoms used in our experiment. (a) The $
\Lambda $-medium is prepared in the dark state by the resonant
pair of fields $\omega _{1}
\label{fig1}
\end{figure}
In this system, a pair
of laser frequencies $\omega _{1}$ and $\omega _{2}$ resonantly excites the $
\Lambda $ system (Fig. 1(a)), which leads to creation of a dark
superposition of both ground states $\left| 1\right\rangle $ and $\left|
2\right\rangle $ and to the preparation of the atoms in this state via
optical pumping. Atoms in the dark superposition act as a local oscillator
at frequency $\omega _{12}$ of the Raman transition. When a third frequency $
\omega _{3}$ is applied to the system, it will beat against the local
oscillator to produce sum and difference frequencies $\omega _{3}\pm n\cdot
\omega _{12}$, i.e., {\it Raman sideband frequencies} (Fig. 1(b)). $\omega
_{3}$ may be either of the resonant frequencies ($\omega _{1}$ or $\omega
_{2}$) applied off-resonance to the conjugated transition ($\left|
2\right\rangle -\left| 3\right\rangle $ or $\left| 1\right\rangle -\left|
3\right\rangle $, respectively). Otherwise, it might be derived from an
independent laser and tuned either on resonance or off resonance with one of
the transitions to some additional upper state (in this case, the
interaction scheme is called a double $\Lambda $ scheme). In our experiment,
we have realized these possibilities with all pump waves $\omega _{1}$, $
\omega _{2}$ and $\omega _{3}$ propagating collinearly, and we have observed
the manifold of the Raman sidebands for all three applied frequencies.
One should note that the use of a coherently prepared medium for the
efficient generation of Raman sidebands has also been theoretically proposed
in Ref. \cite{sok}, and very recently experimentally realized \cite{hak}. In
this work, however, the medium (molecular hydrogen and deuterium) is
prepared in the dark state by far-off-resonance pulsed radiation, which
requires very high intensities. At the same time, the total electromagnetic
energy dissipation is very small by virtue of the large detuning from any
(molecular) state. Therefore, a very broad spectrum of Raman sidebands can
be generated. In our scheme, in opposite, the coherent medium is prepared by
resonant low-intensity c.w. radiation. Since in this case the preparation
relies on the dissipative process of optical pumping \cite{kor97}, a part of
the energy is lost. Nevertheless, the generation is still quite efficient
even at very low pump powers. Besides our present work, the generation of a
single Raman sideband (Stokes field for frequency $\omega _{1}$) in coherent
medium prepared by low-intensity c.w. field has been observed in Ref. \cite
{kash99} for collinear geometry, and several Raman sidebands have been seen
in the related experiment of Ref. \cite{zib99} on parametric
self-oscillation with the counterpropagating driving waves. The aim of the
present experiment is to observe the generation of a broad spectrum of Raman
sidebans in collinear geometry and to study the dependence of each of them
on input laser parameters as well as on the optical density of the medium.
\section{Experimental setup}
Our $\Lambda $ system is generated by the excitation of sodium atoms in a
vapor cell. The lower states are the two hyperfine levels $F=1$ and $F=2$,
spaced by $\omega _{12}=1771.626$ MHz, of the ground state $3^{2}S_{1/2}$,
while the upper state is the hyperfine level $F^{\prime }=2$ of the excited
state $3^{2}P_{1/2}$. The vapor cell contains condensed Na and is
additionally filled with He as puffer gas at a pressure of 6 torr (at room
temperature). It is of cubic form with the length of 10.3 mm and is made of
sapphire to avoid darkening of the cell windows. To compensate stray
magnetic fields the Na cell is placed inside an arrangement of three
mutually orthogonal Helmholtz coils. For the same reason the heating wires
of the cell oven are made of non-magnetic material (Ta) and winded
bifilarly. The cell temperature is electronically controlled and stabilized
to an absolute accuracy of better than 1 ${{}^{\circ }}$C. We have performed
experiments for temperatures ranging from 100${{}^{\circ }}$C to 230${
{}^{\circ }}$C which corresponds to the saturated Na vapor density from 3$
\cdot $10$^{9}\,$cm$^{-3}$ to 2$\cdot $10$^{13}\,$cm$^{-3}$. The optical
density $\tau $ has been determined via the absorption of a very weak laser
beam tuned on resonance with $3^{2}S_{1/2},F=2-3^{2}P_{1/2},F^{\prime }=2$
transition: at $\tau =1$, a power of this beam falls by the factor $1/e$.
\begin{figure}
\caption{\small{Scheme of the experimental setup. See description
in the text.}
\label{fig2}
\end{figure}
The three-frequency radiation is produced by two independent Ar$^{+}$
-laser-pumped dye lasers with a linewidth of about 1 MHz (Fig. 2). The first
laser system is used to produce EIT in the medium by resonant excitation of
the $\Lambda $-system $F=1-F=2-F^{\prime }=2$. The frequency of this first
laser is stabilized to the $F=2-F^{\prime }=2$ hyperfine transition of the D$
_{1}$ line (frequency $\omega _{1}$) with a frequency accuracy of 2-3 MHz by
use of saturation spectroscopy on an external temperature-stabilized Na
cell. A part of the beam from this laser, which is linearly vertical
polarized, is split off and led through an acousto-optical modulator (AOM)
driven by a precise tunable RF-generator at 1700-1800 MHz (resolution of
less than 1Hz). The first order sideband produced by the AOM is used as
radiation with the frequency $\omega _{2}$. When an AOM modulation frequency
of 1771,626 MHz is used, the first order sideband is exactly resonant to the
$F=1-F^{\prime }=2$ transition, which corresponds to the Raman resonance
necessary for establishment of EIT. A second cw-dye laser system identical
to the first one provides the laser beam of frequency $\omega _{3}$ with
horizontal linear polarization. The frequency $\omega _{3}$ can be tuned
through the (Doppler broadened with an FWHM of the order of 1 GHz) resonance
$3^{2}S_{1/2},F=2-3^{2}P_{1/2}$ in well defined steps over a range of 32 GHz.
The three laser beams are collinearly overlapped and circularly polarized by
a quarter wave plate (QW1). Thus the frequencies $\omega _{1}$ and $\omega
_{2}$ have in the cell the same circular polarization ($\sigma ^{+}$). This
polarization configuration was found in our preliminary experiments to
produce the best EIT conditions at small intensities. The third frequency $
\omega _{3}$ is circularly polarized in the opposite direction ($\sigma ^{-}$
). The combined light beam has a Gaussian transversal profile with a waist
of 0.8 mm, and is almost parallel inside the cell. The input power can be
adjusted with a neutral-density filter, and measured before the cell by a
photo diode (not shown in Fig. 3). After passing the Na cell, the light is
again linear polarized by a second quarter-wave plate (QW2) and the beams of
opposite polarizations are separated by use of a polarizing beamsplitter
cube (PBS). Each of the beams passes an optical spectrum analyzer (scanning
Fabry-Perot interferometer with a free spectral range of 2 GHz, SA1 and SA2)
and is detected on separate photo diodes (D1 and D2) connected to a storage
oscilloscope (OS1 or OS2). The oscilloscopes are read out by a data
acquisition system on a computer (PC). This setup allows us to observe each
frequency component, both input and generated ones. The waves $\omega _{1}$
and $\omega _{2}$ and their generated Raman sidebands (having $\sigma ^{+}$
polarization in the cell) are detected by the system (SA1, D1, OS1), while $
\omega _{3}$ and its Raman sidebands (having $\sigma ^{-}$ polarization in
the cell) are detected by the second system (SA2, D2, OS2). We should note
that the Raman sidebands have been observed in a setup with all three input
frequencies having the same $\sigma ^{+}$ polarization, too. However, the
efficiency was lower because in this case part of the atomic population is
optically pumped into the state $F=2,m_{F}=+2$ which is not excited by the
applied fields. Moreover, the oscilloscope picture was overcrowded and its
analysis was much more complicated.
\section{Results}
The first step of our experiments was the measurement of the dark state
relaxation rate $\Gamma $. For this purpose, the frequency $\omega _{3}$ was
blocked, and the transmission of frequencies $\omega _{1}$ and $\omega _{2}$
has been measured as a function of the AOM modulation frequency. The result
is a typical EIT transmission peak at 1771,6 MHz, with a halfwidth given by $
\delta _{EIT}=\Gamma +C\cdot I$, where $I$ is the total intensity of the
frequencies $\omega _{1}$ and $\omega _{2}$, and $C$ is some constant \cite
{cpt}. Thus, the axis offset value of the linear fit to the measured
dependence $\delta _{EIT}(I)$ can be used as an upper limit for $\Gamma $.
In this way, we obtained that the value of $\Gamma $ in our setup is below 3
kHz, which is determined by an AOM frequency jitter and a transit time
broadening (due to finite diffusion time of atoms through the beam).
\begin{figure}
\caption{\small{Typical oscilloscope signal showing the generation
of Raman sidebands for all three input frequencies. (a)
frequencies with $\sigma ^{+}
\label{fig3}
\end{figure}
Fig. 3 shows typical oscilloscope signals demonstrating the generation of
Raman sidebands for all three input frequencies. For the frequencies $\omega
_{1}$ and $\omega _{2}$ only the first-order sidebands (Stokes and
anti-Stokes fields, respectively) have been observed, while for the field $
\omega _{3}$ up to the third-order sidebands have been seen, with the
higher-order sidebands appearing at larger input power. The intensity of the
generated sidebands grows almost linearly with the input power in the range
of up to 2 mW for each of the waves $\omega _{1}$, $\omega _{2}$ and $\omega
_{3}$. The minimum input power necessary for the generation of the
first-order sidebands was found in our experiment to be of the order of 10 $
\mu W$ for each wave (the intensity is about 2 mW/cm$^{2}$). We stress that
the generation was achieved without the use of buildup cavities. We believe
that these results can be considered as an experimental confirmation of the
possibility for nonlinear-optical generation processes with a few photons.
With a pulse of a duration of a few $\mu s$ (as used, e.g., in experiment
Ref. \cite{hau99}) the intensity of 2 mW/cm$^{2}$ would correspond to the
energy of only a few light quanta per atomic cross-section. Thus, one is
approaching the regime of nonlinear quantum optics where a large
nonlinearity of the medium is created by single photons.
\begin{figure}
\caption{\small{Dependence of the transmitted light power (plotted
on a linear scale) on the Raman detuning $\delta _{R}
\label{fig4}
\end{figure}
The nonlinear generation at such low light intensities is already
an indirect confirmation of the EIT in action. For a direct
confirmation, we measured the dependence of all transmitted
frequencies on the AOM modulation frequency (Fig. 4). One can see
that the generation of all sideband frequencies occurs only in the
narrow range of Raman detuning $\delta _{R}=(\omega _{2}-\omega
_{1})-\omega _{12}$. This is exactly the same range where input
frequencies $\omega _{1}$ and $\omega _{2}$ experience reduced
absorption (EIT). The width of the range (10 - 200 kHz depending
on the input intensity) is much narrower than the natural width of
the excited state $3^{2}P_{1/2}$ (10 MHz), and its dependence on
the intensity of the
frequencies $\omega _{1}$ and $\omega _{2}$ follows the expected dependence $
\delta _{EIT}=\Gamma +C\cdot I$. The generation peak is shifted from exact
Raman resonance due to both the buffer gas effects and the a.c. Stark shift
\cite{wyn97,wyn99}. The a.c. Stark shift is a differential shift of the
ground states $3^{2}S_{1/2},F=1$ and $F=2$ due to the off-resonance coupling
to the second excited state $3^{2}P_{1/2},F^{\prime }=1$ of the D$_{1}$ line
(which is 189 MHz apart from the $3^{2}P_{1/2},F^{\prime }=2$ state). Our
measurements show that this shift is proportional to the {\it input}
intensity of the frequencies $\omega _{1}$ and $\omega _{2}$ with a slope
depending on the optical density; for the parameters of Fig. 4 ($\tau
\approx 6$) the slope is about 0.2 kHz/(mW/cm$^{2}$). The total absorption
of $\omega _{1}$ and $\omega _{2}$ in Fig. 4 corresponds to 98\% outside the
EIT transparency window, while at resonance the absorption reduces only
moderately to 93\%. One must conclude therefore that it is not only
reduction of absorption, but also enhancement of the nonlinear
susceptibility that assist the generation.
\begin{figure}
\caption{\small{Dependence of the transmitted light power (plotted
on a linear scale) on the detuning $\Delta _{3}
\label{fig5}
\end{figure}
The intensity of both generated and transmitted pump fields considerably
depends on the value of frequency $\omega _{3}$. Figure 5 shows the
dependence of the intensities on detuning $\Delta _{3}$ of the $\omega _{3}$
wave from the transition $3^{2}S_{1/2},F=2-3^{2}P_{1/2},F^{\prime }=2$ in
the range of $\pm $16 GHz. The $\omega _{3}$ wave has almost no influence on
transmission of the resonant pump waves $\omega _{1}$ and $\omega _{2}$
(Fig. 5(a)) and their Raman sidebands (Fig. 5(b)) when $\left| \Delta
_{3}\right| $ is much larger than the Doppler width $\Delta _{D}\approx $1
GHz of the D$_{1}$ line. The $\omega _{3}$ wave itself is absorbed very
weakly in this range (therefore it is not shown in Fig. 5(a)). However, as
the resonance with the D$_{1}$ line is approached, the $\Delta _{3}$
dependence becomes more and more dramatic. Starting from the value $\left|
\Delta _{3}\right| $ of about 8 GHz, one can observe weak intensity
oscillations whose amplitude drastically increases for $\Delta _{3}$ being
in the immediate range of the resonance. When $\omega _{3}$ is tuned close
to $3^{2}S_{1/2},F=2-3^{2}P_{1/2}$ ($\Delta _{3}\approx -1.0\div 0$ GHz),
transmission of the $\omega _{2}$ wave decreases, while that of the $\omega
_{1}$ wave increases (Fig. 5(a)). This is because a larger part of atomic
population is pumped into the state $3^{2}S_{1/2},F=1$. Similar process
occurs at $\omega _{3}$ being tuned close to $3^{2}S_{1/2},F=1-3^{2}P_{1/2}$
($\Delta _{3}\approx 1.7\div 2.7$ GHz), where transmission of the $\omega
_{1}$ wave decreases, while that of the $\omega _{2}$ wave increases. It is
interesting, however, that at these detunings the total transmitted
intensity of the $\omega _{1}$ and $\omega _{2}$ waves (solid curve in Fig.
5(a)) slightly increases over the ''transparency level'' in the absence of
the $\omega _{3}$ wave. Despite of this fact, the intensity of generated
Stokes ($\omega _{1}-\omega _{12}$) and anti-Stokes ($\omega _{2}+\omega
_{12}$) fields diminishes sharply in the same range of detuning (Fig. 5(b)),
so that the total transmitted intensity of $\omega _{1}$, $\omega _{2}$ and
their sidebands remains approximately the same as for very large detunings.
This suggests that the absorption is not increased when the $\omega _{3}$
field is tuned on resonance, but the nonlinear susceptibility for the Stokes
and anti-Stokes fields decreases. At the same time, generation of Raman
sidebands of the $\omega _{3}$ field is maximized. All these facts indicate
that when $\omega _{3}$ is close to resonance with the D$_{1}$ line, the
processes of coherent scattering from each input wave into its Raman
sidebands become tightly coupled to one another and start to compete. Thus,
the decrease of the ($\omega _{1}-\omega _{12}$) and ($\omega _{2}+\omega
_{12}$) fields generation in the range of resonance may be explained by
direct competition of this process with the generation of ($\omega _{3}\pm
\omega _{12}$) fields. Inside the resonance range, the ($\omega _{1}-\omega
_{12}$) and ($\omega _{2}+\omega _{12}$) fields (Fig. 5(b)) reflect the
trend of the $\omega _{1}$ and $\omega _{2}$ fields (Fig. 5(a)),
respectively, i.e., increase of the $\omega _{1}$ intensity leads to
(moderate) increase of the ($\omega _{1}-\omega _{12}$) intensity, etc. On
the contrary, the behavior of the ($\omega _{3}\pm \omega _{12}$) sidebands
is more complicated: they reveal nice periodical oscillations with detuning $
\Delta _{3}$, shifted in phase with respect to each other. These
oscillations are apparently related to those observed in \cite{mer99} and
also theoretically predicted in \cite{kor99} for the generation of ($\omega
_{3}+\omega _{12}$) frequency in double $\Lambda $ atoms, which show a
sinusoidal dependence of the generated wave intensity on detuning $\Delta
_{3}$ and optical density $\tau $. So, the shift of the ($\omega _{3}+\omega
_{12}$)-wave intensity with respect to the ($\omega _{3}-\omega _{12}$) one
may be explained simply by their different detunings from the resonance.
Finally, Fig. 6 demonstrates the measurement results for the optical density
dependence of all the waves, both pump and generated ones. The measurements
have been performed by taking the oscilloscope pictures at different cell
temperatures corresponding to different optical densities. The results
presented in Fig. 6 are obtained for the particular case when $\omega _{3}$
is tuned exactly on resonance with transition $
3^{2}S_{1/2},F=2-3^{2}P_{1/2},F^{\prime }=1$. This configuration corresponds
to the all-resonant double $\Lambda $ system, where one expects generation
of the ($\omega _{4}=\omega _{3}+\omega _{12}$) wave resonant with the $
F=1-F^{\prime }=1$ transition, and propagation dynamics leading to the
matching of the field Rabi frequencies to relation $g_{1}/g_{2}=g_{3}/g_{4}$
\cite{kor99,mer00}. However, we have not observed such a matching.
\begin{figure}
\caption{\small{ Optical density dependence of the transmitted
light power normalized to the input power $P_{in}
\label{fig6}
\end{figure}
One can
see from Fig. 6 that power of the pump waves $\omega _{1}$, $\omega _{2}$
and $\omega _{3}$ is attenuated monotonically with the optical density. This
attenuation occurs quite fast, with a rate being only slightly lower than
that given by Beer's exponential decay. There are at least two reasons for
this behavior. First of all, we don't have here real all-resonant double $
\Lambda $ system since all the waves couple to all possible transitions
(which is, in fact, the reason for the broad Raman spectrum generation).
Therefore, the energy is transferred not only between the resonant fields,
but also goes to the off-resonant Raman sidebands. Second, the preparation
of the medium in the dark state relies in our case on optical pumping. In
this process, photons from the pump waves $\omega _{1}$, $\omega _{2}$ are
absorbed and then rescattered in part spontaneously to bring atomic
population in the dark state. The number of spontaneously scattered photons
is proportional to the excited state population which is in turn
proportional to the light intensity. Therefore, the pump beams experience
exponential losses during propagation in the medium. Nevertheless, the atoms
are prepared in the coherent superposition of both lower states $\left|
1\right\rangle $ and $\left| 2\right\rangle $, and as soon as they are
prepared, the generation of the Raman sidebands goes on very efficient. As
can be seen from Fig. 6, the first-order sidebands appear already at quite
small optical densities ($\tau =1\div 2$), grow very fast and reach their
maximum at densities of the order of $\tau =2.5\div 5$. The energy
conversion efficiency is approaching the value of 5-7\% for the sidebands of
$\omega _{1}$ and $\omega _{2}$ fields, and of 3-4\% for the first-order
sidebands of $\omega _{3}$ field. At this point of maximum generation, the
intensity of the first-order sidebands is large enough to induce the
generation of the second-order sidebands. Immediately after the initial,
very fast, stage of generation, the Raman sidebands start to decay. The
resonant sideband ($\omega _{3}+\omega _{12}$) is attenuated as fast as the
pump waves, while the other Raman sidebands decay slowly due to large
detuning from any resonance and, hence, small absorption. It is interesting
that different decay rates give rise to a curious situation at larger
optical densities when the generated sidebands are more intensive than the
pump fields.
\section{Conclusions}
In conclusion, we have experimentally demonstrated generation of Raman
sidebands in sodium atomic vapor excited on the D$_{1}$ line by resonant
c.w. optical fields of frequencies $\omega _{1}$ and $\omega _{2}$. The
first-order sidebands for frequencies $\omega _{1}$, $\omega _{2}$ and up to
the third-order sidebands for the probe field of frequency $\omega _{3}$
have been observed. The efficient generation takes place due to the
preparation of atoms in a dark superposition leading to reduced absorption
and enhanced nonlinear susceptibility. This has been directly confirmed by
measuring the frequency difference ($\omega _{1}-\omega _{2}$) dependence of
the transmission, which evidences that the generation of all sidebands as
well as the reduced absorption of pump fields occur only in the narrow range
around Raman resonance $\omega _{1}-\omega _{2}=\omega _{12}$. Since the
decay rate of the dark state is only a few kHz in our experiment, the
generation is efficient even at very low pump powers, with the threshold
value being of 10 $\mu W$ for each input wave. Our measurements show that
the Raman sidebands are generated at any value of the frequency $\omega _{3}$
. However, the generation of Raman sidebands of the $\omega _{3}$ field is
maximized and competes with generation of the sidebands of $\omega _{1}$ and
$\omega _{2}$ fields when $\omega _{3}$ is close to resonance with the D$_{1}
$ line. Inside this resonance range, the sideband intensities reveal
periodical oscillations with $\omega _{3}$. In the present scheme, the
coherent medium is prepared with the c.w. radiation by means of the
dissipative process of optical pumping. Therefore, a large part of the
radiation energy is lost. This is reflected in the optical density
dependence exhibiting fairly fast and almost exponential decay of the
resonant pump waves. We believe that much smaller energy losses and,
correspondingly, higher conversion efficiency may be achieved by use of the
adiabatic passage technique for the preparation of phaseonium \cite{apt}.
Together with a further reduction of the dark state decay rate (which is
certainly possible by using better magnetic field shielding, stabilizing AOM
frequency and optimizing the buffer gas pressure), this should allow to
readily approach the regime of a few-photon nonlinear optics. Our
experimental results also give rise to the challenge of developing a theory
of Raman sideband generation by resonant c.w. radiation that will provide an
understanding of a complicated interplay of the participating e.m. waves.
This work was supported by the Austrian Science Foundation under project No.
P 12894.
\begin{references}
\bibitem{harr97} S.E. Harris, Physics Today {\bf 50}, No. 7, 36 (1997).
\bibitem{cpt} E. Arimondo, in {\it Progress in Optics,} ed. E. Wolf
(Elsevier, Amsterdam, 1996), vol. 35, p.257.
\bibitem{harr90} {S.E. Harris {\it et al.}}, Phys. Rev. Lett. {\bf 64},
1107 (1990).
\bibitem{gener} {G.Z. Zhang, D.W. Tokaryk, B.P. Stoicheff, and K. Hakuta,}
Phys. Rev. A {\bf 56}, 813 (1997); {M. Jain, H. Xia, G.Y. Yin, A.J. Merriam,
and S.E. Harris}, Phys. Rev. Lett. {\bf 77}, 4326 (1996); {A.J. Merriam,
S.J. Sharpe, H. Xia, D. Manuszak, G.Y. Yin, and S.E. Harris}, Opt. Lett.
{\bf 24}, 625 (1999).
\bibitem{mer99} {A.J. Merriam, S.J. Sharpe, H. Xia, D. Manuszak, G.Y. Yin,
and S.E. Harris}, {IEEE Journal of Selected Topics in Quantum Electronics}
{\bf 5}, 1502 (1999).
\bibitem{thz} E.A. Korsunsky and D.V. Kosachiov, J. Opt. Soc. Amer. B {\bf
17}, in press (2000).
\bibitem{sok} S.E. Harris and A.V. Sokolov, Phys. Rev. Lett. {\bf 81}, 2894
(1998); D.D. Yavuz, A.V. Sokolov, and S.E. Harris, Phys. Rev. Lett. {\bf 84}
, 75 (2000).
\bibitem{hak} K. Hakuta, M. Suzuki, M. Katsuragawa, and J.Z. Li, Phys. Rev.
Lett. {\bf 79}, 209 (1997); A.V. Sokolov, D.R. Walker, D.D. Yavuz, G.Y. Yin,
and S.E. Harris, unpublished; J.Q. Liang, M. Katsuragawa, Fam Le Kien, and
K. Hakuta, unpublished.
\bibitem{zib99} A.S. Zibrov, M.D. Lukin, and M.O. Scully, Phys. Rev. Lett.
{\bf 83}, 4049 (1999).
\bibitem{wyn97} S{. Brandt, A. Nagel, R. Wynands, and D. Meschede}, Phys.
Rev. A {\bf 56}, R1063 (1997).
\bibitem{m-o1} D. Budker, D.F. Kimball, S.M. Rochester, and V.V. Yashchuk,
Phys. Rev. Lett. {\bf 83}, 1767 (1999).
\bibitem{harr98} S.E. Harris and Y. Yamomoto, Phys. Rev. Lett. {\bf 81},
3611 (1998).
\bibitem{harr99} S.E. Harris and L. Hau, Phys. Rev. Lett. {\bf 82}, 4611
(1999).
\bibitem{luk99} M.D. Lukin, A.B. Matsko, M. Fleischhauer, and M.O. Scully,
Phys. Rev. Lett. {\bf 82}, 1847 (1999).
\bibitem{hau99} L.V. Hau, S.E. Harris, Z. Dutton, and C.H. Behroozi, Nature
{\bf 397}, 594 (1999).
\bibitem{kash99} M.M. Kash et al., Phys. Rev. Lett. {\bf 82}, 5229 (1999).
\bibitem{agar93} {G.S. Agarwal}, Phys. Rev. Lett. {\bf 71}, 1351 (1993).
\bibitem{luk00a} M.D. Lukin and A. Imamoglu, Phys. Rev. Lett. {\bf 84},
1419 (2000).
\bibitem{luk00b} M.D. Lukin, S.F. Yelin, and M. Fleischhauer, Phys. Rev.
Lett. {\bf 84}, 4232 (2000).
\bibitem{flei00} M. Fleischhauer and M.D. Lukin, Phys. Rev. Lett. {\bf 84},
5094 (2000).
\bibitem{kor97} {E.A. Korsunsky, W. Maichen, and L. Windholz}, Phys. Rev. A
{\bf 56}, 3908 (1997).
\bibitem{wyn99} {A. Nagel, S. Brandt, D. Meschede, and R. Wynands,}
Europhys. Lett. {\bf 48}, 385 (1999).
\bibitem{kor99} E.A. Korsunsky and D.V. Kosachiov, Phys. Rev. A {\bf 60},
4996 (1999).
\bibitem{mer00} {A.J. Merriam, S.J. Sharpe, M. Shverdin, D. Manuszak, G.Y.
Yin, and S.E. Harris}, Phys. Rev. Lett. {\bf 84}, 5308 (2000).
\bibitem{apt} K. Bergmann, H. Theuer, and B.W. Shore, Rev. Mod. Phys. {\bf
70}, 1003 (1998).
\end{references}
\end{document} |
\begin{document}
\title{Semipurity of tempered Deligne cohomology}
\author{Jos\'e Ignacio Burgos Gil}
\address{Facultad de Matem\'aticas\\Universidad de Barcelona\\
Gran V\'\i{}a C. C. 585\\ Barcelona, Spain}
\thanks{Partially supported by Grants
BFM2003-02914 and MTM2006-14234-C02-01}
\begin{abstract}
In this paper we define the formal and tempered Deligne cohomology
groups, that are obtained by applying the Deligne complex functor
to the complexes of formal differential forms and tempered currents
respectively. We then prove the existence of a duality between them,
a vanishing theorem for the former and
a semipurity property for the latter. The motivation of these
results comes from the
study of covariant arithmetic Chow groups. The semi-purity property
of tempered Deligne cohomology implies, in particular, that several
definitions of covariant arithmetic Chow groups agree for
projective arithmetic varieties.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
\label{sec:introduction}
The aim of this note is to study some properties of formal and
tempered Deligne cohomology (with real coefficients). These cohomology
groups are defined by
applying the Deligne complex functor to the complexes of formal
differential forms and tempered currents respectively.
Let $X$ be a
complex projective manifold and let $W$ be a Zariski locally closed
subset of $X$. Let $i:W\longrightarrow X$ denote the inclusion and let
$i^{*},i^{!}, i_{{\text{\rm l,ll,a}}t}, i_{!}$ be the induced functors in the derived
category of abelian sheaves. Then
the complex of formal differential forms of $W$
computes the cohomology of $W$ with compact supports. That is, it
computes the groups $H^{{\text{\rm l,ll,a}}t}(X,i_{!}i^{{\text{\rm l,ll,a}}t}\underline
{\mathbb{R}})$. The
complex of tempered currents on $W$ compute the cohomology of $X$
with
supports on $W$, that is, it computes the groups
$H^{{\text{\rm l,ll,a}}t}(X,i_{*}i^{!}\underline {\mathbb{R}})$. Following Deligne, the
previous groups have a mixed Hodge structure, hence a Hodge
filtration that we will call the Deligne-Hodge filtration. The
complexes of formal differential forms and tempered
currents are examples of Dolbeault complexes (see
\cite{BurgosKramerKuehn:cacg}). Therefore they have a Hodge
filtration obtained from the bigrading of differential forms. In
general, this Hodge filtration does not induce the Deligne-Hodge
filtration in cohomology. Moreover, the spectral sequence associated
to this Hodge filtration does not degenerate at the $E^{1}$-term.
This implies that formal and tempered Deligne cohomology groups
with real coefficients will not have, in general, the same properties
as Deligne-Beilinson cohomology. For instance they do not need to be finite
dimensional. They have a structure of topological vector spaces,
but they may be non-separated.
Note however that, in the particular case when $W=X$, the formal and
tempered Deligne cohomology groups with real coefficients, agree with
the usual real Deligne cohomology groups.
In this note we will construct a (Poincar\'e like) duality between
formal Deligne
cohomology and tempered Deligne cohomology, that induce a perfect
pairing between the corresponding separated vector spaces. In particular,
applying this duality to the case $W=X$ we obtain an exceptional duality for
real Deligne Beilinson cohomology (corollary \ref{cor:3}) of smooth
projective varieties that, to my knowledge, is new. The shape of this
exceptional duality reminds very much the functional equation of
$L$-functions. It would be interesting to know whether this duality
has any arithmetic meaning.
The second result is a vanishing result for formal Deligne cohomology.
Thanks to the previous duality, the vanishing result of formal Deligne
cohomology implies a semipurity property of
tempered Deligne cohomology (corollary
\ref{cor:1}).
The motivation for these results comes from the study of covariant
arithmetic Chow groups introduced in \cite{Burgos:acr} and
\cite{BurgosKramerKuehn:cacg}. The
covariant arithmetic Chow groups are a variant of the arithmetic Chow
groups defined by Gillet and Soul\'e, that are covariant for
arbitrary proper morphism. By contrast, the groups defined by
Gillet and Soul\'e are only covariant for proper morphisms
between arithmetic varieties that induce smooth maps between the
corresponding complex varieties. The covariant arithmetic Chow groups
do not have a product structure, but they are a module over the
contravariant arithmetic Chow groups (see
\cite{BurgosKramerKuehn:cacg} for more details). Similar definitions
of covariant Chow groups have been given by Kawaguchi and Moriwaki
\cite{KawaguchiMoriwaki:isfav}
and by Zha \cite{zha99:_rieman_roch}. These two definitions are
equivalent except for the fact that Zha neglects the structure of real
manifold induced on the complex manifold associated to an arithmetic variety.
Although not explicitly stated, in the paper
\cite{BurgosKramerKuehn:cacg}, the covariant arithmetic
Chow groups are defined by means of tempered Deligne
cohomology. The semi-purity
property of tempered Deligne cohomology was announced and used in
\cite{BurgosKramerKuehn:cacg}. Hence this paper can be seen as a
complement of \cite{BurgosKramerKuehn:cacg}. A new consequence of the
semipurity
property is that, for an arithmetic variety that is generically
projective, the covariant Chow groups introduced in \cite{Burgos:acr}
and \cite{BurgosKramerKuehn:cacg}
are isomorphic to the covariant Chow groups introduced
by Kawaguchi and Moriwaki.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Acknowledgments.} In the course of preparing this manuscript,
I had many stimulating discussions with many colleagues. We would
like to thank them all. In particular, I would like to express my
gratitude to J.-B.~Bost, U.~K\"uhn, J.~Kramer, K.~K\"uhnemann, V.~Maillot,
D.~Roessler, C.~Schapira, J~Wildeshaus. Furthermore, I would
like to thank the CRM (Bellaterra, Barcelona), for partial support of
this work.
\section{Complexes of forms and currents}
\label{sec:compl-forms-curr}
By a complex algebraic manifold we will mean the analytic manifold
associated to a smooth scheme over $\mathbb{C}.$
Let $X$ be a projective complex algebraic manifold. We will consider
the following situation: let $Z\subset Y$
be closed subvarieties of $X$, let $U$ and $V$ be the open subsets
$U=X\setminus Y$, $V=X\setminus
Z$ and let $W$ be the locally closed subset $W=Y\setminus Z$.
\subsection{Flat forms and Whitney forms}
\label{sec:flat-forms-whitney}
\
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The complex of Whitney forms.} Let
$\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}$ denote the
sheaf of smooth differential forms on $X$. We will denote by $E^{{\text{\rm l,ll,a}}t}(U)$ the complex
of global differential forms over $U$ and by $E^{{\text{\rm l,ll,a}}t}_{c}(U)$ the complex of
differential forms with compact support.
Let
$\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat Y)$ denote the ideal sheaf of
differential forms that are flat
along $Y$. Recall that a differential form on $X$ is called flat
along $Y$ if its
Taylor expansion vanishes at all points of
$Y$. We write
$$\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}=
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}/\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat
Y).$$
The sections of this complex of sheaves are called Whitney forms on $Y$.
Whitney's extension theorem (\cite{Tougeron:Ifd} IV theorem 3.1),
gives us a precise description of the space of Whitney
forms in terms of jets over $Y$. For instance, if $Y$ is the smooth
subvariety of $\mathbb{C}^{n}$ defined by the equations $z_1=\dots
=z_{k}=0$, then the germ of the sheaf of Whitney functions on $Y$ at
the point $x=(0,\dots ,0)$ is
\begin{displaymath}
\mathscr{E}^{0}_{Y^{\infty},x}=
\mathscr{E}^{0}_{Y,x}[[z_{k+1},\dots ,z_{n},\bar z_{k+1},\dots ,\bar
z_{n}]].
\end{displaymath}
We will
write
\begin{displaymath}
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}(\fflat Z)=
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat Z)/\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat
Y).
\end{displaymath}
Observe that $\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}(\fflat Z)$ can also be
defined as the kernel of the morphism
\begin{displaymath}
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}\longrightarrow
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Z^{\infty}}.
\end{displaymath}
The sheaf $\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}(\fflat Z)$
agrees with the sheaf denoted $\mathbb{C}_{W}\overset{W}{\otimes }
\mathcal{C}_{X}^{\infty} $ in \cite{KashiwaraSchapira:mfcacs}.
The complex
$\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}(\fflat Z)$ is a complex of fine sheaves.
We will denote the corresponding
complex of global sections by
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(W):={\Gamma}mma
(X,\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}(\fflat Z))$. Note that the complex
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(W)$ depends only on the locally closed
subspace $W\subset X$ and not on a particular choice of closed subsets $Y$ and
$Z$. Observe also that
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(X)=E^{{\text{\rm l,ll,a}}t}(X)$ is the usual complex of smooth
differential forms on $X$.
We will denote by
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}},\mathbb{R}}(W)$ the
real subcomplex underlying $E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(W)$.
By the
acyclicity of fine sheaves, there is a diagram of short exact
sequences
\begin{equation}\label{eq:1}
\xymatrix{ &&&0\ar[d]&\\
& 0 \ar[d]& 0 \ar[d]& E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(W) \ar[d]&\\
0\ar[r] &E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(U)\ar[r]\ar[d]&
E^{{\text{\rm l,ll,a}}t}(X)\ar[r]\ar[d]&
E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(Y)\ar[r]\ar[d]&0\\
0\ar[r] &E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(V)\ar[r]\ar[d]&
E^{{\text{\rm l,ll,a}}t}(X)\ar[r]\ar[d]&
E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(Z)\ar[r]\ar[d]&0\\
& E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(W) \ar[d]& 0 & 0 & \\
& 0 &&&
}
\end{equation}
The complex $E^{{\text{\rm l,ll,a}}t}(X)$ is a topological vector space with the
$C^{\infty}$ topology. With this topology $E^{{\text{\rm l,ll,a}}t}(X)$ is a Fr\'echet
topological vector space (\cite{bourbaki87:_topol_vector_spaces_chapt}
III p. 9). Moreover $E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(U)$ is a
closed subspace. In fact, by \cite{Tougeron:Ifd} V corollaire 1.6,
it is the closure of the
complex of differential forms that have compact support contained in
$U$, that we denote $E^{{\text{\rm l,ll,a}}t}_{c}(U)$. More generally, all the
monomorphisms in
diagram \eqref{eq:1} are closed immersions.
The following result states that, being $U$ an algebraic open subset of $X$,
the complex $E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(U)$
does not depend on $X$ but only on $U$.
\begin{proposition}\label{prop:3}
Let $\pi :\text{{\rm w}}idetilde X\longrightarrow X$ be a proper birational
morphism with $D=\pi ^{-1}(Y)$, that induces an isomorphism between
$\text{{\rm w}}idetilde X\setminus D$ and $U$, then the natural map
\begin{displaymath}
\pi ^{{\text{\rm l,ll,a}}t}:E^{{\text{\rm l,ll,a}}t}(X)
\longrightarrow
E^{{\text{\rm l,ll,a}}t}(\text{{\rm w}}idetilde X)
\end{displaymath}
induces an isomorphism $\pi ^{{\text{\rm l,ll,a}}t}:{\Gamma}mma (X,\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat Y))
\longrightarrow
{\Gamma}mma (\text{{\rm w}}idetilde X,\mathscr{E}^{{\text{\rm l,ll,a}}t}_{\text{{\rm w}}idetilde X}(\fflat D))$.
\end{proposition}
\begin{proof}
By \cite{Poly:shcsesa} the morphism
\begin{displaymath}
\pi ^{{\text{\rm l,ll,a}}t}:E^{{\text{\rm l,ll,a}}t}(X)\longrightarrow E^{{\text{\rm l,ll,a}}t}(\text{{\rm w}}idetilde X)
\end{displaymath}
is a closed immersion. Since ${\Gamma}mma
(X,\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat Y))
$ and $
{\Gamma}mma (\text{{\rm w}}idetilde X,\mathscr{E}^{{\text{\rm l,ll,a}}t}_{\text{{\rm w}}idetilde X}(\fflat D))$
are the closure of $E^{{\text{\rm l,ll,a}}t}_{c}(U)$ in $E^{{\text{\rm l,ll,a}}t}(X)$ and
$E^{{\text{\rm l,ll,a}}t}(\text{{\rm w}}idetilde X)$ respectively, then they are identified by
$\pi ^{{\text{\rm l,ll,a}}t}$.
\end{proof}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The cohomology of the complex of Whitney forms.} By
\cite{Poly:shcsesa} (see also \cite{BrasseletPflaum:_whitn} for a
more general statement) we have
\begin{proposition} \label{prop:4}
The complex $\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty}}$ is a resolution
of the constant
sheaf $\underline {\mathbb{C}}$ on $Y$ by fine sheaves. Therefore
\begin{displaymath}
H^{{\text{\rm l,ll,a}}t}(E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(W))=H^{{\text{\rm l,ll,a}}t}_{c}(W,\mathbb{C}),
\end{displaymath}
where $H^{{\text{\rm l,ll,a}}t}_{c}$ denotes cohomology with compact supports.
$\square$
\end{proposition}
\subsection{Currents with support in a subvariety}
\label{sec:curr-with-supp}
\
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The complex of currents.} We first recall the definition of the
complex of currents and we fix the sign convention and some
normalizations. We
will follow the conventions of \cite{BurgosKramerKuehn:cacg} \S 5.4
but with the homological grading.
Let $\mathscr
{D}_{n}^{X}$ be the sheaf of degree $n$ currents on $X$. That is,
for any open subset $V$ of $X$, the group $\mathscr
{D}_{n}^{X}(V)$ is the topological dual of the group of sections
with compact support $E^{n}_{c}(V)$. The
differential
\begin{displaymath}
\dd:\mathscr{D}_{n}^{X}\longrightarrow\mathscr{D}_{n-1}^{X}
\end{displaymath}
is defined by
\begin{displaymath}
\dd T(\varphi)=(-1)^{n}T(\dd\varphi);
\end{displaymath}
here $T$ is a current and $\varphi$ a test form.
Note that we are using the sign convention of, for instance
\cite{Jannsen:DcHD}, instead of the sign convention of
\cite{GriffithsHarris:pag}.
The bigrading
$\mathscr{E}^{n}_{X}=\bigoplus_{p+q=n}\mathscr{E}^{p,q}_{X}$ induces a
bigrading
$$\mathscr{D}_{n}^{X}=\bigoplus_{p+q=n}\mathscr{D}_{p,q}^{X},$$ with
$\mathscr
{D}_{p,q}^{X}(V)$ the topological dual of
${\Gamma}mma_{c}(V,\mathscr{E}^{p,q}_{X})$.
The real structure of $\mathscr{E}^{n}_{X}$ induces a real structure
\begin{displaymath}
\mathscr{D}_{n}^{X,\mathbb{R}}\subset \mathscr{D}_{n}^{X}.
\end{displaymath}
We will denote
\begin{displaymath}
\mathscr{D}_{n}^{X,\mathbb{R}}(p)=\frac{1}{(2\pi i)^{p}}
\mathscr{D}_{n}^{X,\mathbb{R}}\subset \mathscr{D}_{n}^{X}.
\end{displaymath}
If $X$ is equidimensional of dimension $d$ we will write
\begin{equation}\label{eq:4}
\mathscr{D}^{n}_{X}=\mathscr{D}_{2d-n}^{X}, \quad
\mathscr{D}^{p,q}_{X}=\mathscr{D}_{d-p,d-q}^{X}, \quad
\text{and}\quad
\mathscr{D}^{n}_{X,\mathbb{R}}(p)=\mathscr{D}_{2d-n}^{X,\mathbb{R}}(d-p).
\end{equation}
We will use all the conventions of \cite{BurgosKramerKuehn:cacg} \S
5.4. In particular, if $y$ is an algebraic cycle of $X$ of dimension
$e$, we will write $\delta _{y}\in \mathscr{D}_{e,e}^{X}\cap
\mathscr{D}_{2e}^{X,\mathbb{R}}(e)$ for the current
\begin{displaymath}
\delta _{y}(\eta)=\frac{1}{(2\pi i)^{e}}\int_{y}\eta.
\end{displaymath}
Furthermore, there is an action
\begin{displaymath}
\begin{matrix}
\mathscr{E}^{n}_{X}\otimes\mathscr{D}_{m}^{X}&\longrightarrow &
\mathscr{D}_{m-n}^{X},\\
\omega\otimes T&\longmapsto &\omega
\land T
\end{matrix}
\end{displaymath}
where the current $\omega \land T$ is defined by
\begin{displaymath}
(\omega\land T)(\eta)=T(\eta\land\omega).
\end{displaymath}
This action induces actions
\begin{displaymath}
\mathscr{E}^{p,q}_{X}\otimes\mathscr{D}_{r,s}^{X}\longrightarrow
\mathscr{D}_{r-p,s-q}^{X}, \quad\text{and}\quad
\mathscr{E}^{n}_{X,\mathbb{R}}(p)\otimes
\mathscr{D}_{m}^{X,\mathbb{R}}(q)\longrightarrow
\mathscr{D}_{m-n}^{X,\mathbb{R}}(q-p).
\end{displaymath}
Finally, if $X$ is equidimensional of dimension $d$, there is a
fundamental current $\delta _{X}\in \mathscr{D}_{d,d}^{X}\cap
\mathscr{D}_{2d}^{X,\mathbb{R}}(d)$, and a morphism
\begin{equation}\label{eq:11}
\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}\longrightarrow \mathscr{D}_{2d-{\text{\rm l,ll,a}}t}^{X}=
\mathscr{D}^{{\text{\rm l,ll,a}}t}_{X},\quad\omega \longmapsto [\omega ]=\omega
\land \delta _{X}.
\end{equation}
This morphism sends $\mathscr{E}^{n}_{X\mathbb{R}}(p)$ to
$\mathscr{D}_{2d-n}^{X,\mathbb{R}}(d-p)=\mathscr{D}^{n}_{X,\mathbb{R}}(p)$.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Currents with support on a subvariety and tempered currents.}
As in the previous section let $Z\subset Y$ denote two closed
subvarieties of $X$
and put $U=X\setminus Y$, $V=X\setminus Z$ and $W=Y\setminus Z$.
We denote by $\mathscr
{D}_{{\text{\rm l,ll,a}}t}^{Y^{\infty}}$ the subcomplex of $\mathscr{D}_{{\text{\rm l,ll,a}}t}^{X}$
formed by currents with support on
$Y$. In other words, for any open subset $U'$ of $X$ we have
\begin{displaymath}
\mathscr
{D}_{n}^{Y^{\infty}}(U')=\{T\in \mathscr
{D}_{n}^{X}(U')\mid T(\eta)=0,\ \forall \eta\in
{\Gamma}mma_{c}(U'\cap U,\mathscr{E}^{n}_{X})\}.
\end{displaymath}
Observe that, by continuity, the sections of
$\mathscr{D}_{n}^{Y^{\infty}}(U')$ vanish on the subgroup ${\Gamma}mma
_{c}(U',\mathscr{E}^{{\text{\rm l,ll,a}}t}_{X}(\fflat Y))$.
We write $\mathscr{D}_{n}^{X/Y^{\infty}}=\mathscr
{D}_{n}^{X}\left / \mathscr
{D}_{n}^{Y^{\infty}}\right .$ and $
\mathscr{D}_{n}^{Y^{\infty}/Z^{\infty}}=
\mathscr{D}_{n}^{Y^{\infty}}/\mathscr{D}_{n}^{Z^{\infty}}.$
As in the case of differential forms, the complex $
\mathscr{D}_{n}^{Y^{\infty}/Z^{\infty}}$ can also be defined as the
kernel of the morphism
\begin{displaymath}
\mathscr{D}_{n}^{X/ Z^{\infty}} \longrightarrow
\mathscr{D}_{n}^{X/ Y^{\infty}}.
\end{displaymath}
All the above sheaves inherit a bigrading and a real structure.
Observe that, except for the fact that we are using here the
homological grading, the complex of sheaves $\mathscr{D}_{n}^{X/ Y^{\infty}}$
agrees with the complex denoted by
$\mathcal{TH}om(\mathbb{C}_{W},\mathcal{D}b_{X})$ in
\cite{KashiwaraSchapira:mfcacs}.
The complex $\mathscr{D}_{n}^{ Y^{\infty}/Z^{\infty}}$ is a complex of fine
sheaves. We will denote the complex of global sections by
$D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W^{\infty})={\Gamma}mma (X,
\mathscr{D}_{{\text{\rm l,ll,a}}t}^{Y^{\infty}/Z^{\infty}})$. Thus the complex
$D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W^{\infty})$ is defined for any Zariski
locally closed subset $W\subset X$. The corresponding real
complex will be denoted by $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}},\mathbb{R}}(W^{\infty})$.
By \cite{Poly:shcsesa}, the complex $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(U)$ can be
identified with the
image of the morphism
\begin{displaymath}
D^{{\text{\rm l,ll,a}}t}(X)\longrightarrow D^{{\text{\rm l,ll,a}}t}(U).
\end{displaymath}
That is, it is the complex of currents on $U$ that can be extended to a
current on the whole $X$. The elements of $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(U)$ will
be called tempered currents. In the literature they are called also
moderate, temperate or extendable currents. Moreover, as was the case with the complex
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(U)$, being $U$ a Zariski open subset, the
complex $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(U)$
only depends on $U$
and not on $X$.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The pairing between forms and currents.} We have already
introduced an action
\begin{equation}\label{eq:5}
E^{n}(X)\otimes D_{m}(X)\longrightarrow
D_{m-n}(X),\quad\omega\otimes T\longmapsto\omega
\land T,
\end{equation}
where the current $\omega \land T$ is defined by
\begin{displaymath}
(\omega\land T)(\eta)=T(\eta\land\omega).
\end{displaymath}
The subspace $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(Y)$ is invariant under this action and
annihilates
the subspace $E_{X^{\mathcal{W}}}^{{\text{\rm l,ll,a}}t}(U)$. Therefore we obtain induced actions
\begin{equation}\label{eq:6}
E^{n}_{X^{\mathcal{W}}}(Y)\otimes D^{X^{\mathcal{T}}}_{m}(Y)\longrightarrow
D^{X^{\mathcal{T}}}_{m-n}(Y),\qquad E^{n}_{X^{\mathcal{W}}}(U)\otimes
D_{m}^{X^{\mathcal{T}}}(U)\longrightarrow
D_{m-n}^{X^{\mathcal{T}}}(U)
\end{equation}
and, more generally, an action
\begin{equation}\label{eq:7}
E^{n}_{X^{\mathcal{W}}}(W)\otimes
D_{m}^{X^{\mathcal{T}}}(W)\longrightarrow
D_{m-n}^{X^{\mathcal{T}}}(W).
\end{equation}
Since $X$ is proper, there is a canonical morphism
\begin{displaymath}
\deg: D_{0}(X)\longrightarrow \mathbb{C}
\end{displaymath}
given by $\deg(T)=T(1)$. Observe that
$\deg(D_{0}^{\mathbb{R}}(X))\subset \mathbb{R}$.
Combining the degree and the above actions, we recover the pairing
\begin{displaymath}
E^{n}(X)\otimes D_{n}(X)\longrightarrow \mathbb{C},
\end{displaymath}
that identifies $D_{n}(X)$ with the topological dual of
$E^{n}(X)$. Under this identification, the subspace $E^{n}_{X^{\mathcal{W}}}(U)$
is the orthogonal to the subspace $D_{n}^{X^{\mathcal{T}}}(Y)$. Therefore
$D_{n}^{X^{\mathcal{T}}}(U)$ is the topological dual of
$E^{n}_{X^{\mathcal{W}}}(U)$
and $D_{n}^{X^{\mathcal{T}}}(Y)$ is the topological dual of
$E^{n}_{X^{\mathcal{W}}}(Y)$. More generally $D_{n}^{X^{\mathcal{T}}}(W)$ is
the topological dual of $E^{n}_{X^{\mathcal{W}}}(W)$. Note that
here, the key point is the fact that $E^{n}_{X^{\mathcal{W}}}(U)$ is the closure
of ${\Gamma}mma _{c}(U,\mathscr{E}^{n}_{X})$ and hence a closed subspace.
The above pairings induce a pairing
\begin{displaymath}
E^{n}_{\mathbb{R}}(X)(p)\otimes
D_{n}^{\mathbb{R}}(X)(p)\longrightarrow \mathbb{R},
\end{displaymath}
and similar pairings for the other complexes of forms and currents.
Finally, observe that there is a commutative diagram with exact rows
and columns
\begin{equation}\label{eq:2}
\xymatrix{ &&&0\ar[d]&\\
& 0 \ar[d]& 0 \ar[d]& D^{X^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W) \ar[d]&\\
0\ar[r] &D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(Z)\ar[r]\ar[d]&
D_{{\text{\rm l,ll,a}}t}(X)\ar[r]\ar[d]&
D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(V)\ar[r]\ar[d]&0\\
0\ar[r] &D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(Y)\ar[r]\ar[d]&
D_{{\text{\rm l,ll,a}}t}(X)\ar[r]\ar[d]&
D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(U)\ar[r]\ar[d]&0\\
& D^{X^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W) \ar[d]& 0 & 0 & \\
& 0 &&&
}
\end{equation}
that is the topological dual of the diagram \eqref{eq:1}.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The homology of the complexes of currents.} By \cite{Poly:shcsesa}
we have
\begin{proposition}
The homology of the complexes $D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W)$ is
given by
\begin{displaymath}
H_{{\text{\rm l,ll,a}}t}(D^{X^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W))=H_{{\text{\rm l,ll,a}}t}^{BM}(W,\mathbb{C}),
\end{displaymath}
where $H_{\text{\rm l,ll,a}}t^{BM}$ denote Borel-Moore homology. In particular,
since we are assuming $Y$ proper,
\begin{displaymath}
H_{{\text{\rm l,ll,a}}t}(D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(Y))=H_{{\text{\rm l,ll,a}}t}(Y,\mathbb{C}).
\end{displaymath}
$\square$
\end{proposition}
\subsection{Formal and tempered Deligne cohomology}
\label{sec:form-deligne-cohom}
\
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Formal Deligne cohomology.}
The complex
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}},\mathbb{R}}(W)$ is an example of a Dolbeault
algebra (see \cite{BurgosKramerKuehn:cacg}).
Recall that, following Deligne, the cohomology of any
complex variety has a mixed
Hodge structure. We will call the Hodge filtration of this mixed
Hodge structure the Deligne-Hodge filtration.
From the structure of Dolbeault algebra of
$E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(W)$ we can define a Hodge
filtration. It is the filtration associated to the bigrading.
In general, this Hodge filtration does not induce the
Deligne-Hodge filtration in cohomology. Moreover, the spectral sequence
associated to this Hodge filtration does not need to degenerate at the
$E_{1}$ term. Therefore, the Dolbeault cohomology groups
$H^{p,q}_{\overline \partial}(E^{{\text{\rm l,ll,a}}t}(Y^{\infty}))$ are not, in general, direct
summands of $H^{p+q}(Y,\mathbb{C})$. In fact, they can be infinite
dimensional as can be seen in the easiest example: Put
$X=\mathbb{P}^{1}_{\mathbb{C}}$. Let $t$ be the absolute coordinate
and let $Y$ be
the point $t=0$. Then $H^{0,0}_{\bar
\partial}(E^{{\text{\rm l,ll,a}}t}(Y^{\infty}))=\mathbb{C}[[t]]$, the ring of formal
power series in one variable.
Following \cite{Burgos:CDB} and \cite{BurgosKramerKuehn:cacg}, to
every Dolbeault algebra we can associate a Deligne algebra.
We
refer the reader to \cite{Burgos:CDB} and
\cite{BurgosKramerKuehn:cacg} \S 5 for the definition and properties
of Dolbeault algebras, Dolbeault complexes and the associated Deligne
complexes. We will use freely the notation therein. In particular the
Deligne algebra associated to the above Dolbeault algebra
will be denoted $\mathcal{D}^{{\text{\rm l,ll,a}}t}(E^{{\text{\rm l,ll,a}}t}_{X^{\mathcal{W}}}(W),{\text{\rm l,ll,a}}t)$.
\begin{definition} \label{def:1} The real formal Deligne cohomology
of $W$ (with
compact supports) is defined by
\begin{align*}
H_{\mathcal{D}^{f},c}^{{\text{\rm l,ll,a}}t}(W^{\infty},\mathbb{R}(p))&=
H^{{\text{\rm l,ll,a}}t}(\mathcal{D}^{{\text{\rm l,ll,a}}t}(E_{X^{\mathcal{W}}}(W),p)).
\end{align*}
When $W$ is proper we will just write
$H_{\mathcal{D}^{f}}^{{\text{\rm l,ll,a}}t}(W^{\infty},\mathbb{R}(p))$.
\end{definition}
The notation $W^{\infty}$ is a reminder that this cohomology depends,
not only on $W$ but on an infinitesimal neighborhood of infinite
order of $W$ in $X$.
\begin{remark}
Since we are assuming that $X$ is smooth and proper, the formal Deligne
cohomology of $X$,
$H_{\mathcal{D}^{f}}^{{\text{\rm l,ll,a}}t}(X^{\infty},\mathbb{R}(p))$, given in
the previous definition, agrees with the usual Deligne cohomology of $X$.
Nevertheless, by the discussion before the definition, the formal
Deligne cohomology with
compact supports of $U$ or the formal Deligne cohomology of $Y$,
do not agree, in general, with the usual Deligne-Beilinson
cohomology. For instance the groups
$H_{\mathcal{D}^{f}}^{{\text{\rm l,ll,a}}t}(U,\mathbb{R}(p))$ can be infinite
dimensional.
\end{remark}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Homological Dolbeault complexes and homological Deligne
complexes.} In order to define formal Deligne homology we
first translate the notions of \cite{BurgosKramerKuehn:cacg}
\S 5.2 to the homological grading.
\begin{definition}
\label{def:12}
A \emph{homological Dolbeault complex} $A=(A_{{\text{\rm l,ll,a}}t}^{\mathbb{R}},\dd_{A})$ is
a graded complex of real vector spaces, which is bounded from above
and equipped with a bigrading on $A^{\mathbb{C}}=A^{\mathbb{R}}
\otimes_{\mathbb{R}}{\mathbb{C}}$, i.e.,
\begin{displaymath}
A_{n}^{\mathbb{C}}=\bigoplus_{p+q=n}A_{p,q},
\end{displaymath}
satisfying the following properties:
\begin{enumerate}
\item[(i)]
The differential $\dd_{A}$ can be decomposed as the sum $\dd_{A}=
\partial+\bar{\partial}$ of operators $\partial$ of type $(-1,0)$,
resp. $\bar{\partial}$ of type $(0,-1)$.
\item[(ii)]
It satisfies the symmetry property $\overline{A_{p,q}}=A_{q,p}$,
where $\overline{\phantom{M}}$ denotes complex conjugation.
\end{enumerate}
\end{definition}
\begin{notation}
\label{def:13}
Given a homological Dolbeault complex $A=(A_{{\text{\rm l,ll,a}}t}^{\mathbb{R}},\dd_{A})$, we
will use the following notations. The Hodge filtration $F$ of $A$
is the increasing filtration of $A^{\mathbb{C}}_{{\text{\rm l,ll,a}}t}$ given by
\begin{displaymath}
F_{p}A_{n}=F_{p}A_{n}^{\mathbb{C}}=\bigoplus_{p'\leq p}A_{p',n-p'}.
\end{displaymath}
The filtration $\overline F$ of $A$ is the complex conjugate of $F$,
i.e.,
\begin{displaymath}
\overline{F}_{p}A_{n}=\overline{F}_{p}A_{n}^{\mathbb{C}}=\overline
{F_{p}A_{n}^{\mathbb{C}}}.
\end{displaymath}
For an element $x\in A^{\mathbb{C}}$, we write $x_{i,j}$ for its
component in $A_{i,j}$. For $k,k' \in \mathbb{Z}$, we define an operator
$F_{k,k'}:A^{\mathbb{C}}\longrightarrow A^{\mathbb{C}}$ by the
rule
\begin{displaymath}
F_{k,k'}(x):=\sum_{l\leq k,l'\leq k'}x_{l,l'}.
\end{displaymath}
We note that the operator $F_{k,k'}$ is the projection of $A^{{\text{\rm l,ll,a}}t}_
{\mathbb{C}}$ onto the subspace $F_{k}A_{{\text{\rm l,ll,a}}t}\cap\overline{F}_{k'}
A_{{\text{\rm l,ll,a}}t}$. This subspace will be denoted $F_{k,k'}A_{{\text{\rm l,ll,a}}t}$. We will
also denote by $F_{k}$ the operator $F_{k,\infty}$.
We denote by $A_{n}^{\mathbb{R}}(p)$ the subgroup $(2\pi i)^{-p}\cdot
A_{n}^{\mathbb{R}}\subseteq A_{n}^{\mathbb{C}}$, and we define the
operator
\begin{displaymath}
\pi_{p}:A^{\mathbb{C}}\longrightarrow A^{\mathbb{R}}(p)
\end{displaymath}
by setting $\pi_{p}(x):=\frac{1}{2}(x+(-1)^{p}\bar{x})$.
\end{notation}
To any homological Dolbeault complex we can associate a homological
Deligne complex.
\begin{definition}
Let $A$ be a homological Dolbeault complex. We denote by $A_{{\text{\rm l,ll,a}}t}(p)^
{\mathcal{D}}$ the complex $s(A^{\mathbb{R}}(p)\text{{\rm op}}lus F_{p}A
\overset{u}{\longrightarrow}A^{\mathbb{C}})$, where $u(a,f)=
-a+f$ and $s(\ )$ denotes the simple complex of a morphism of
complexes.
\end{definition}
\begin{definition}
Let $A$ be a homological Dolbeault complex. Then, the
\emph{(homological) Deligne complex
$(\mathcal{D}^{{\text{\rm l,ll,a}}t}(A,{\text{\rm l,ll,a}}t),\dd_{\mathcal{D}})$ associated to $A$}
is the graded complex given by
\begin{align*}
&\mathcal{D}_{n}(A,p)=
\begin{cases}
A^{\mathbb{R}}_{n+1}(p+1)\cap F_{n-p,n-p}A_{n+1}^{\mathbb{C}},
&\qquad\text{if}\quad n\geq 2e+1, \\
A^{\mathbb{R}}_n(p)\cap F_{p,p}A_{n}^{\mathbb{C}},
&\qquad\text{if}\quad n\leq 2p,
\end{cases}
\intertext{with differential given, for $x\in\mathcal{D}_{n}(A,p)$, by}
&\dd_{\mathcal{D}}x=
\begin{cases}
-F_{n-p+1,n-p+1}\dd_{A}x,
&\qquad\text{if}\quad n>2p+1, \\
-2\partial\bar{\partial}x,
&\qquad\text{if}\quad n=2p+1, \\
\dd_{A}x,
&\qquad\text{if}\quad n\leq 2p.
\end{cases}
\end{align*}
\end{definition}
For instance, let $A$ be a Dolbeault complex satisfying $A_{p,q}=0$
for $p<0$, $q<0$, $p>n$, or $q>n$. Then, for $p\ge n$, the complex
$\mathcal{D}(A,p)$
agrees with the real complex $A_{{\text{\rm l,ll,a}}t}^{\mathbb{R}}(p)$. For $0\le p<n$, we
have represented $\mathcal{D}(A,p)$ in figure \ref{fig:1}, where
the upper right square is shifted by one; this means in particular that
$A_{n,n}$ sits in degree $2n-1$ and $A_{p+1,p+1}$ sits in degree
$2p+1$. For $p<0$ the complex $\mathcal{D}(A,p)$
agrees with the real complex $A_{{\text{\rm l,ll,a}}t}^{\mathbb{R}}(p+1)[1]$.
\begin{figure}
\caption{$\mathcal{D}
\label{fig:1}
\end{figure}
\begin{remark}
It is clear from the definition that, for all $p\in \mathbb{Z}$, the functor $\mathcal{D}
(\cdot,p)$ is exact.
\end{remark}
The main property of the Deligne complex is expressed by the
following proposition; for a proof in the cohomological case see
\cite{Burgos:CDB}.
\begin{proposition}
\label{prop:32}
The complexes $A_{{\text{\rm l,ll,a}}t}(p)^{\mathcal{D}}$ and $\mathcal{D}_{{\text{\rm l,ll,a}}t}
(A,p)$ are homotopically equivalent. The homotopy equivalences
$\psi:A_{n}(p)^{\mathcal{D}}\longrightarrow\mathcal{D}_{n}(A,p)$,
and $\varphi:\mathcal{D}_{n}(A,p)\longrightarrow A_{n}(p)^{\mathcal
{D}}$ are given by
\begin{displaymath}
\psi(a,f,\omega)=
\begin{cases}
\pi(\omega),\qquad&\text{if }n\ge 2p+1, \\
F_{p,p}a+2\pi_{p}(\partial\omega_{p+1,n-p-1}),\quad&\text{if }n\le 2p,
\end{cases}
\end{displaymath}
where $\pi(\omega)=\pi_{p+1}(F_{n-p,n-p}\omega)$, i.e., $\pi$ is
the projection of $A_{\mathbb{C}}$ over the co\-kernel of $u$, and
\begin{displaymath}
\varphi(x)=
\begin{cases}
(\partial x_{p+1,n-p}-\bar{\partial}x_{n-p,p+1},2\partial
x_{p+1,n-p},x),\quad&\text{if }n\ge 2p+1, \\
(x,x,0),&\text{if }n\le 2p.
\end{cases}
\end{displaymath}
Moreover, $\psi\circ\varphi=\Id$, and $\varphi\circ\psi-\Id=\dd h+
h\dd$, where $h:A_{n}(p)^{\mathcal{D}}\longrightarrow A_{n+1}(p)^
{\mathcal{D}}$ is given by
\begin{displaymath}
h(a,f,\omega)=
\begin{cases}
(\pi_{p}(\overline{F}_{p}\omega+\overline{F}_{n-p}\omega),-2F_{p}
(\pi_{p+1}\omega),0),\quad&\text{if }n\ge 2p+1, \\
(2\pi_{p}(\overline{F}_{n-p}\omega),-F_{p,p}\omega-2F_{n-p}
(\pi_{p+1}\omega),0),\quad&\text{if }n\le 2p.
\end{cases}
\end{displaymath}
\end{proposition}
$\square$
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Tempered Deligne homology.} Applying the above discussion to the
complex of currents
$D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}},\mathbb{R}}(W)$ we define the homological
Deligne complex
$\mathcal{D}_{{\text{\rm l,ll,a}}t}(D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W),{\text{\rm l,ll,a}}t).$
\begin{definition} \label{def:2} \emph{The tempered Deligne (Borel-Moore)
homology of $W$} is defined by
\begin{displaymath}
H^{\mathcal{D}^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W^{\infty},\mathbb{R}(p))=
H_{{\text{\rm l,ll,a}}t}(\mathcal{D}_{{\text{\rm l,ll,a}}t}(D^{X^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W),p)).
\end{displaymath}
\end{definition}
\begin{remark}
\begin{enumerate}
\item Again, since $X$ is smooth and proper, the tempered Deligne
homology of
$X$ agrees with the Deligne homology of $X$. In particular,
the group
$H^{\mathcal{D}}_{n}(X,\mathbb{R}(p))$ agrees with the group denoted
${}'H^{-n}_{\mathcal{D}}(X,\mathbb{R}(-p))$ in
\cite{Jannsen:DcHD}. But, since the Hodge filtration of the complex
of currents with support on $Y$ does not induce the Deligne-Hodge
filtration in the
homology of $Y$, the tempered Deligne homology does not
agree in general with Deligne-Beilinson homology.
\item As in the case of formal cohomology, the notation
$H^{\mathcal{D}^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(W^{\infty},\mathbb{R}(p))$ reminds us
that these groups do not depend only on $W$ but on an
infinitesimal neighborhood of $W$ of infinite order.
\end{enumerate}
\end{remark}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Equidimensional manifolds.} If $X$ is equidimensional of
dimension $d$ the morphism \eqref{eq:11} induces morphisms
\begin{equation}
\mathcal{D}^{n}(E^{{\text{\rm l,ll,a}}t}(X),p)\longrightarrow
\mathcal{D}_{2d-n}(D_{{\text{\rm l,ll,a}}t}(X),d-p), \quad p \in \mathbb{Z},
\end{equation}
that, in turn, induce the Poincar\'e duality isomorphisms
\begin{equation}\label{eq:8}
H^{n}_{\mathcal{D}}(X,\mathbb{R}(p))
\longrightarrow H_{2d-n}^{\mathcal{D}}(X,\mathbb{R}(d-p)), \quad n,p \in \mathbb{Z}.
\end{equation}
By analogy, we can define tempered Deligne cohomology groups as follows
\begin{align*}
H_{\mathcal{D}^{\mathcal{T}}}^{n}(U,\mathbb{R}(p))&=
H^{\mathcal{D}^{\mathcal{T}}}_{2d-n}(U,\mathbb{R}(d-p)),\\
H_{\mathcal{D}^{\mathcal{T}},W}^{n}(V,\mathbb{R}(p))&=
H^{\mathcal{D}^{\mathcal{T}}}_{2d-n}(W^{\infty},\mathbb{R}(d-p)).
\end{align*}
In general, if $X$ is a disjoint union of equidimensional algebraic
manifolds, then we define the tempered Deligne cohomology of $X$
as the direct sum of the tempered Deligne cohomology of its components.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The module structure of tempered Deligne homology.}
The notion of Dolbeault module over a
Dolbeault algebra introduced in \cite{BurgosKramerKuehn:cacg} can be
easily modified to define homological Dolbeault modules over a
Dolbeault algebra. The actions \eqref{eq:5}, \eqref{eq:6} and
\eqref{eq:7} provide the basic examples. Modifying the construction of
\cite{BurgosKramerKuehn:cacg} 5.17 and 5.18 we obtain
\begin{proposition}
There is a
pseudo-associative action
\begin{displaymath}
\mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W),p)\otimes
\mathcal{D}_{m}(D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W),q)\longrightarrow
\mathcal{D}_{m-n}(D_{{\text{\rm l,ll,a}}t}^{X^{\mathcal{T}}}(W),q-p)
\end{displaymath}
that induces an associative action
\begin{displaymath}
H_{\mathcal{D}^{f},c}^{n}(W^{\infty},\mathbb{R}(p))\otimes
H^{\mathcal{D}^{\mathcal{T}}}_{m}(W^{\infty},\mathbb{R}(q))\longrightarrow
H^{\mathcal{D}^{\mathcal{T}}}_{m-n}(W^{\infty},\mathbb{R}(q-p)).
\end{displaymath}
$\square$
\end{proposition}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The exceptional duality.} In general, Poincar\'e duality for
Deligne cohomology is not given by a bilinear pairing, but by the
isomorphism \eqref{eq:8} between Deligne cohomology and Deligne
homology (see for instance \cite{Jannsen:DcHD}). Nevertheless, in the case
of real Deligne cohomology, there is an exceptional duality that comes
from the symmetry of the Deligne complex associated with a Dolbeault
complex. This duality can be generalized to a pairing between formal
Deligne cohomology and tempered Deligne homology.
\begin{proposition}\label{prop:5}
For every pair of integers $n,p$, there is a pairing
\begin{displaymath}
\mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W),p)\otimes
\mathcal{D}_{n-1}(D^{X^{\mathcal{T}}}(W),p-1)\longrightarrow \mathbb{R}
\end{displaymath}
given by $\omega \otimes T\longmapsto T(\omega )$.
This pairing identifies $\mathcal{D}_{n-1}(D^{X^{\mathcal{T}}}(W),p-1)$
with the topological dual of
$\mathcal{D}^{n}(E_{X_{\mathcal{W}}}(W),p)$. Moreover, it is
compatible, up to the sign, with the differential in the Deligne complex:
\begin{displaymath}
T (\dd_{\mathcal{D}}\omega ) =
\begin{cases}
(-1)^{n+1}
(\dd_{\mathcal{D}}T)(\omega),& \text{ if } n\le 2p-1,\\
(-1)^{n}
(\dd_{\mathcal{D}}T)(\omega),& \text{ if } n\ge 2p.\\
\end{cases}
\end{displaymath}
It is also compatible, up to the sign, with the action of
$\mathcal{D}^{{\text{\rm l,ll,a}}t}(E_{X^{\mathcal{W}}}(W),{\text{\rm l,ll,a}}t)$. That is, if the
forms
$\omega \in \mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W^{\infty}),p)$ and
$\eta \in \mathcal{D}^{l}(E_{X^{\mathcal{W}}}(W),r)$, and the current
$T\in \mathcal{D}_{m}(D^{X^{\mathcal{T}}}(W),q)$, with
$n-m+l=1$ and $p-q+r=1$ then
\begin{displaymath}
(\omega \bullet T)(\eta)=
\begin{cases}
(-1)^{n} T(\eta \bullet \omega), & \text{ if } m>2q,\ l\ge 2r,\\
T(\eta \bullet \omega), & \text{ if } m\le 2q,\ l< 2r,\\
(-1)^{m-1} T(\eta \bullet \omega), & \text{ if } m>2q,\ l< 2r,\\
(-1)^{l} T(\eta \bullet \omega), & \text{ if } m\le 2q,\ l\ge 2r.\\
\end{cases}
\end{displaymath}
\end{proposition}
\begin{proof}
Assume that $n<2p$. Put $q=p-1$ and
$m=n-1$. Then
\begin{align*}
&\mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W),p)\\
&\phantom{A}=
E^{n-1}_{X^{\mathcal{W}},\mathbb{R}}(W)(p-1)\left /
(F^{p} E^{n-1}_{X^{\mathcal{W}}}(W) + \bar F ^{p}
E^{n-1}_{X^{\mathcal{W}}}(W))\cap
E^{n-1}_{X^{\mathcal{W}}}(W)_{\mathbb{R}}(p-1) \right.\\
&\phantom{A}= E^{n-1}_{X^{\mathcal{W}},\mathbb{R}}(W)(p-1)
\cap \bar F ^{n-p}
E^{n-1}_{X^{\mathcal{W}}}(W))\cap
F^{n-p} E^{n-1}_{X^{\mathcal{W}}}(W),\\
&\mathcal{D}_{m}(D^{X^{\mathcal{T}}}(W),q)\\
&\phantom{A}=D_{m}^{X^{\mathcal{T}},\mathbb{R}}(W^{\infty})(q)\cap
F_{q} D_{m}^{X^{\mathcal{T}}}(W) \cap \bar F _{q}
D_{m}^{X^{\mathcal{T}}}(W))\\
&\phantom{A}=D_{n-1}^{X^{\mathcal{T}},\mathbb{R}}(W)(p-1)\cap
F_{p-1} D_{n-1}^{X^{\mathcal{T}}}(W) \cap \bar F _{p-1}
D_{n-1}^{X^{\mathcal{T}}}(W)).
\end{align*}
Therefore, the first statement follows from the duality between
$E_{X^{\mathcal{W}}}(W)$ and $D^{X^{\mathcal{T}}}(W)$ and the fact
that, under this duality,
$D_{n-1}^{X^{\mathcal{T}},\mathbb{R}}(W)(p-1)$ is identified
with the dual of $E^{n-1}_{X^{\mathcal{W}},\mathbb{R}}(W)(p-1)$ and
$F_{p-1} D_{n-1}^{X^{\mathcal{T}}}(W) $ is identified with the
dual of $\bar F ^{n-p}
E^{n-1}_{X^{\mathcal{T}}}(W)$.
The compatibility with the differential is a
straightforward computation using the formulas for the differential
given in \cite{Burgos:CDB} theorem 2.6. For instance,
if $\omega \in
\mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W),p)$, with $n<2p-1$ and $T\in
\mathcal{D}_{m}(D^{X^{\mathcal{T}}}(W),q)$, with $m=n$ and
$q=p-1$, then we have
\begin{align*}
(\dd_{\mathcal{D}} T)(\omega )
&=(\dd T)(\omega)\\
&=(-1)^{n}T(\dd \omega)\\
&=(-1)^{n}T(F^{n-p+1,n-p+1}\dd \omega)\\
&=(-1)^{n}T(-\dd_{\mathcal{D}} \omega ).
\end{align*}
In the third equality we have used that $T\in F_{q}\cap \bar
F_{q}=F_{p-1,p-1}$, which implies that, for any form $\eta$, we have
$T(\eta)=T(F^{n-p+1,n-p+1}\eta)$. The other cases are analogous.
Similarly, the compatibility with the product follows from
\cite{Burgos:CDB} theorem 2.6. For instance,
let
$\omega \in \mathcal{D}^{n}(E_{X^{\mathcal{W}}}(W),p)$,
$T\in \mathcal{D}_{m}(D^{X^{\mathcal{T}}}(W),q)$ and
$\eta \in \mathcal{D}^{l}(E_{X^{\mathcal{W}}}(W),r)$, with
$n-m+l=1$ and $p-q+r=1$. Assume that $n<2p$, $m>2q$, $l\ge 2r$,
then
\begin{displaymath}
(\omega \bullet T)(\eta)=
((-1)^{n}r_{p}(\omega )\land T+\omega \land r_{q}(T))(\eta),
\end{displaymath}
where $r_p(\omega )=2\pi _{p}(F^{p}\dd \omega )$ and $r_q(T)=2\pi
_{q}(F_{q}\dd T )$.
But
\begin{displaymath}
(-1)^{n}r_{p}(\omega )\land T(\eta)=
(-1)^{n}T(\eta \land r_{p}(\omega )),
\end{displaymath}
and
\begin{align*}
(\omega \land r_{q}(T))(\eta)&= r_{q}(T)(\eta\land \omega )\\
&=2\pi _{q}F_{q}(\dd T)(\eta\land \omega )\\
&= 2 F_{q}(\dd T)(\eta\land \omega )\\
&= 2 \partial T_{q+1,m-q}(\eta\land \omega )\\
&= T \left( 2 (-1)^{m-1}\partial (\eta\land \omega)^{q,m-q}\right)\\
&= T \left( 2 (-1)^{n+l}\partial (\eta\land \omega)^{p+r-1,n+l-p-r}\right).
\end{align*}
On the other hand
\begin{displaymath}
T(\eta \bullet \omega )=T\left(\eta \land r_{p}(\omega )+
(-1)^{l}2 \partial (\omega \land \eta)^{p+r-1,n+l-p-r}\right).
\end{displaymath}
The other cases are analogous.
\end{proof}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Duality.} We summarize in the next proposition the
basic properties of formal Deligne cohomology and tempered Deligne
homology that follow from the
previous discussions.
\begin{proposition}\label{prop:2} For every pair of integers $n$ and
$p$, by applying the exact functors
$\mathcal{D}^{{\text{\rm l,ll,a}}t}(\underline{\phantom{A}},p)$ and
$\mathcal{D}_{{\text{\rm l,ll,a}}t}(\underline{\phantom{A}},p-1)$ to the
diagrams \eqref{eq:1} and
\eqref{eq:2} respectively, we obtain the corresponding diagrams of
Deligne complexes that are the topological dual of each other. In
particular we obtain long exact sequences
\begin{multline}\label{eq:13}
H^{n}_{\mathcal{D}^{f},c}(W^{\infty},\mathbb{R}(p))
\rightarrow H^{n}_{\mathcal{D}^{f}}(Y^{\infty},\mathbb{R}(p))
\rightarrow H^{n}_{\mathcal{D}^{f}}(Z^{\infty},\mathbb{R}(p))
\rightarrow \\ H^{n+1}_{\mathcal{D}^{f},c}(W^{\infty},\mathbb{R}(p))
\rightarrow
\end{multline}
and
\begin{multline}\label{eq:12}
\leftarrow H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(W^{\infty},\mathbb{R}(p-1))
\leftarrow
H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(Y^{\infty},\mathbb{R}(p-1))
\leftarrow \\
H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(Z^{\infty},\mathbb{R}(p-1))
\leftarrow H_{n}^{\mathcal{D}^{\mathcal{T}}}(W^{\infty},\mathbb{R}(p-1))
\end{multline}
and pairings
\begin{align*}
H^{n}_{\mathcal{D}^{f}}(Y^{\infty},\mathbb{R}(p))\otimes
H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(Y^{\infty},\mathbb{R}(p-1))
&\longrightarrow \mathbb{R},\\
H^{n}_{\mathcal{D}^{f},c}(W^{\infty},\mathbb{R}(p))\otimes
H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(W^{\infty},\mathbb{R}(p-1))
&\longrightarrow \mathbb{R},\\
H^{n}_{\mathcal{D}^{f}}(Z^{\infty},\mathbb{R}(p))\otimes
H_{n-1}^{\mathcal{D}^{\mathcal{T}}}(Z^{\infty},p-1)
&\longrightarrow \mathbb{R}.
\end{align*}
that are compatible with the above sequences.
Moreover, the topologies of the space of differential forms and of the
space of currents induce structures of topological vector spaces on
the real formal Deligne cohomology groups and the tempered Deligne
homology groups. The
above pairings induce a perfect pairing of the corresponding
separated vector spaces.
\end{proposition}
\begin{proof}
This is a direct consequence of the exactness of the functors
$\mathcal{D}^{{\text{\rm l,ll,a}}t}(\underline{\phantom{A}},p)$ and
$\mathcal{D}_{{\text{\rm l,ll,a}}t}(\underline{\phantom{A}},p-1)$ and proposition
\ref{prop:5}.
\end{proof}
The image of $\dd_{\mathcal{D}}$ in the complex
$\mathcal{D}^{{\text{\rm l,ll,a}}t}(E_{\flat}(U),p)$ does not need to be
closed. Therefore the pairing between formal cohomology and
tempered homology do
not need to be perfect. Only the induced pairing in the
corresponding separated vector spaces is perfect. Nevertheless, in
the case of a
proper algebraic complex manifold $X$, by Hodge theory, we obtain
a perfect pairing between Deligne-Beilinson cohomology and homology.
\begin{corollary}[Exceptional duality for Deligne cohomology] \label{cor:3}
Let $X$ be a proper complex algebraic manifold, equidimensional of
dimension $d$.
Then there is a
perfect duality
\begin{displaymath}
H_{\mathcal{D}}^{n}(X,\mathbb{R}(p))\otimes
H_{\mathcal{D}}^{2d-n+1}(X,\mathbb{R}(d-p+1))\longrightarrow \mathbb{R}
\end{displaymath}
which is compatible, up to a sign, with the product in Deligne
cohomology.
\end{corollary}
\begin{proof}
By Poincar\'e duality in Deligne cohomology (cf. \cite{Jannsen:DcHD}
1.5) there is a natural isomorphism
\begin{displaymath}
H_{\mathcal{D}}^{2d-n+1}(X,\mathbb{R}(d-p+1))\cong
H^{\mathcal{D}}_{n-1}(X,\mathbb{R}(p-1)).
\end{displaymath}
By Hodge theory we know that
\begin{displaymath}
H_{\mathcal{D}}^{n}(X,\mathbb{R}(p))=
\begin{cases}
H^{n-1}(X,\mathbb{R}(p-1))\cap \overline F^{n-p} \cap F^{n-p},&
\text{ if } n<2p,\\
H^{n}(X,\mathbb{R}(p))\cap \overline F^{p} \cap F^{p},&
\text{ if } n\ge 2p.
\end{cases}
\end{displaymath}
Moreover, the pairing is given, up to a sign, by the wedge product of
differential forms followed by the integral along $X$.
Therefore, by Serre's duality, the pairing of
proposition \ref{prop:2} is perfect.
\end{proof}
\subsection{Semi-purity of tempered Deligne cohomology }
\label{sec:purity-form-deligne}
\
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Vanishing theorems.} The aim of this section is to prove the
following result
\begin{theorem}(Semi-purity of tempered Deligne homology)
Let $X$ be a projective complex algebraic manifold,
$W$ a locally closed subvariety, of dimension at most $p$. Then
\begin{displaymath}
H_{n}^{\mathcal{D}^{\mathcal{T}}}(W^{\infty},\mathbb{R}(e))=0, \text{ for
all } n > \max(e+p,2p-1).
\end{displaymath}
\end{theorem}
\begin{proof}
We will prove the result by ascending induction over $p$. The
result is trivially true for $p<0$. Then, by the exact sequence
\eqref{eq:12} and
induction, one is
reduced to the case $W$ closed.
We will deduce the theorem by duality from the following proposition
\begin{proposition}\label{prop:1}
Let $Y$ be a closed subvariety of a projective complex algebraic
manifold. Let $p$ be the dimension of $Y$. Then
\begin{displaymath}
H^{n+1}_{\mathcal{D}^{f}}(Y^{\infty},\mathbb{R}(e+1))=0, \text{ for
all } n > max(e+p,2p-1)
\end{displaymath}
\end{proposition}
\begin{proof}
Let $\mathscr{I}_{Y}$ be the ideal of holomorphic functions on $X$
vanishing at $Y$. We denote
\begin{displaymath}
\Omega ^{q}_{Y^{\infty}}= \lim_{\substack{\longleftarrow\\k}}
\Omega ^{q}_{X}\left/ \mathscr{I}_{Y}^{k}\Omega ^{q}_{X}. \right.
\end{displaymath}
By \cite{KashiwaraSchapira:mfcacs} theorem 5.12 we have
\begin{lemma} \label{lemm:1}
The complex of sheaves $\mathscr{E}^{q,{\text{\rm l,ll,a}}t}_{Y,\mathbb{R}}$ is a
fine resolution of $\Omega ^{q}_{Y^{\infty}}$.
\end{lemma}
Since, by \cite{Poly:shcsesa}, the sheaf
$\mathscr{E}^{{\text{\rm l,ll,a}}t}_{Y^{\infty},\mathbb{R}}$ is an acyclic
resolution of the constant sheaf $\underline{\mathbb{R}}_{Y}$,
from lemma \ref{lemm:1} and the techniques of \cite{Burgos:CDB},
we deduce that
$H^{{\text{\rm l,ll,a}}t}_{\mathcal{D}^{f}}(Y^{\infty},\mathbb{R}(e+1))$ is
isomorphic to the hypercohomology of the complex of sheaves
\begin{equation}\label{eq:3}
\underline{\mathbb{R}}_{\mathcal{D}^{f},Y^{\infty}}(e):=
\underline{\mathbb{R}}_{Y}(f)\longrightarrow \Omega
^{0}_{Y^{\infty}}\longrightarrow \dots \longrightarrow
\Omega ^{e}_{Y^{\infty}}.
\end{equation}
\begin{lemma} \label{lemm:2} If $n> p$ then $H^{n}(Y,\Omega _{Y^{\infty}}^{q})=0$.
\end{lemma}
\begin{proof}
By \cite{hartshorne75:Rhamcag} proposition I.6.1
\begin{displaymath}
H^{n}(Y,\Omega _{Y^{\infty}}^{q})=
H^{n}(Y^{\text{{\rm alg}}},\hat \Omega _{Y}^{q}),
\end{displaymath}
where $Y^{\text{{\rm alg}}}$ is the corresponding algebraic variety and
$\hat \Omega _{Y}^{q}$ is the completion of the sheaf of
algebraic differentials. But now $Y^{\text{{\rm alg}}}$ is a noetherian
topological space of dimension $p$, hence the lemma.
\end{proof}
Using lemma \ref{lemm:2} we obtain that the $E_{1}^{s,t}$ term of the
spectral sequence of the hypercohomology of the complex
\eqref{eq:3} can be non zero only for $s=0$, $0\le t \le 2p$ and
$1\le s \le e+1$, $0\le t \le p$, which implies proposition
\ref{prop:1}.
\end{proof}
We finish now the proof of the theorem. By proposition \ref{prop:1},
for every $n > \max(p+e,2p-1)$, the morphism
\begin{displaymath}
\dd_{\mathcal{D}}^{n}: \mathcal{D}^{n}(E_{X^{\mathcal{W}}}(Y),e+1)
\longrightarrow \mathcal{D}^{n+1}(E_{X^{\mathcal{W}}}(Y),e+1)
\end{displaymath}
satisfies ${\im}g
(\dd_{\mathcal{D}}^{n})=\Ker(\dd_{\mathcal{D}}^{n+1})$, hence the
image of $\dd_{\mathcal{D}}^{n}$ is
a closed subspace. Therefore, by
\cite{bourbaki87:_topol_vector_spaces_chapt} IV.2 theorem 1, we have
that the dual morphism
\begin{displaymath}
\dd_{\mathcal{D}}:\mathcal{D}_{n}(D^{X^{\mathcal{T}}}(Y),e) \longrightarrow
\mathcal{D}_{n-1}(D^{X^{\mathcal{T}}}(Y),e)
\end{displaymath}
has closed image. This implies that, for $n\ge \max(p+e,2p-1)$, the
vector space
$H_{n}^{\mathcal{D}^{\mathcal{T}}}(Y^{\infty},\mathbb{R}(e))$ is separated.
Therefore, by proposition \ref{prop:2},
for $n>\max(p+e,2p-1)$ the pairing
\begin{displaymath}
H^{n+1}_{\mathcal{D}^{f}}(Y^{\infty},\mathbb{R}(e+1))\otimes
H_{n}^{\mathcal{D}^{\mathcal{T}}}(Y^{\infty},\mathbb{R}(e))
\longrightarrow \mathbb{R}
\end{displaymath}
is perfect. Hence by proposition \ref{prop:1} we obtain the theorem.
\end{proof}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{semi-purity of tempered Deligne cohomology.}
The semi-purity theorem can be stated in terms of tempered Deligne
cohomology as follows.
\begin{corollary} \label{cor:1}
Let $X$ be a complex quasi-projective manifold and $Y$ a closed
subvariety of codimension at least $p$. Then
\begin{displaymath}
H^{n}_{\mathcal{D}^{\mathcal{T}}, Y}(X,\mathbb{R}(e))=0, \text{ for
all } n < \min(e+p,2p+1),
\end{displaymath}
In particular
\begin{displaymath}
H^{n}_{\mathcal{D}^{\mathcal{T}}, Y}(X,\mathbb{R}(p))=0, \text{ for
all } n < 2p.
\end{displaymath}
\end{corollary}
This is the weak purity property used in \cite{BurgosKramerKuehn:cacg} 6.4.
\section{Arithmetic Intersection Theory}
\label{sec:arithm-inters-theory}
\subsection{ Definition of Covariant arithmetic Chow groups}
\label{sec:covar-arithm-chow}
In \cite{Burgos:acr}, the author introduced a variant of the
arithmetic Chow groups that are covariant with respect to arbitrary
proper morphisms.
In the paper \cite{BurgosKramerKuehn:cacg} these groups are further
studied as an example of cohomological arithmetic Chow groups. These
groups are denoted by $\cha^{{\text{\rm l,ll,a}}t}
(X,\mathcal{D}_{\text{{\rm cur}}})$. The semi-purity property (corollary
\ref{cor:1}) was announced in \cite{BurgosKramerKuehn:cacg} and has
consequences in the behavior of the covariant arithmetic Chow
groups. On the other hand, Kawaguchi and Moriwaki
\cite{KawaguchiMoriwaki:isfav} have
given another definition of covariant arithmetic Chow
groups called $D$-arithmetic Chow groups. A consequence of Corollary
\ref{cor:1} is that, when $X$ is
equidimensional and generically projective, both definitions of
covariant arithmetic Chow
groups agree. We note that Zha \cite{zha99:_rieman_roch} has also
introduced a notion of covariant arithmetic Chow
groups that only differs from the definition of
\cite{KawaguchiMoriwaki:isfav} on the fact that he neglects the
anti-linear involution $F^{\infty}$.
In this section we will summarize the properties of the
covariant arithmetic Chow groups. We will follow the notations and
terminology of
\cite{BurgosKramerKuehn:cacg}, but we will use the grading by
dimension that is more natural when dealing with covariant Chow
groups.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Arithmetic rings and arithmetic varieties.} Let $A$ be an
arithmetic ring (see \cite{GilletSoule:ait}) with fraction field
$F$. In particular
$A$ is provided with a non empty set of complex embeddings $\Sigma $
and a conjugate linear involution $F_{\infty}$ of $\mathbb{C}^{\Sigma
}$ that commutes with the diagonal embedding of $A$ in $\mathbb{C}^{\Sigma
}$.
Since we will be working with
dimension of cycles, following \cite{GilletSoule:aRRt} we
will further impose that $A$ is equicodimensional and Jacobson.
Let $S=\Spec A$ and let $e=\dim S$.
An arithmetic variety $X$ is
a flat quasi-projective scheme over $A$, that has smooth generic fiber
$X_{F}$. To every arithmetic variety $X$ we can associate a complex
algebraic manifold $X_{\Sigma }$ and a real algebraic manifold
$X_{\mathbb{R}}=(X_{\Sigma },F_{\infty})$.
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{The arithmetic complex of tempered Deligne homology.} To every
pair of integers $n,p$, and
every open Zariski subset $U$ of $X_{\mathbb{R}}$ we assign the group
\begin{displaymath}
\mathcal{D}_{n}^{\text{{\rm cur}},X}(U,p)=\mathcal{D}_{n}\left(
D_{{\text{\rm l,ll,a}}t}^{X_{\Sigma }^{\mathcal{T}}}(U),p\right)^{\sigma},
\end{displaymath}
where $\sigma $ is the involution that acts as complex conjugation on
the space and on the currents. That is, if $T\in
D_{n}(X_{\mathbb{C}})$ then $\sigma
(T)=\overline {(F_{\infty})_{{\text{\rm l,ll,a}}t}T}$.
And $(\phantom{A})^{\sigma}$ denote the elements that are fixed by
$\sigma $. Then $\mathcal{D}_{n}^{\text{{\rm cur}},X}(\underline{\phantom{A}},p)$ is
a totally acyclic sheaf (in the sense of
\cite{BurgosKramerKuehn:cacg}) for the real scheme underlying
$X_{\mathbb{R}}$. When $X$ is fixed, $\mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}},X}$ will
be denoted by $\mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}}}$.
If $U$ is a Zariski open subset of $X_{\mathbb{R}}$ and
$Y=X\setminus U_{\mathbb{R}}$ we write
\begin{align}
H^{\mathcal{D}^{\mathcal{T}}}_{{\text{\rm l,ll,a}}t}(U,\mathbb{R}(p))&=
H_{{\text{\rm l,ll,a}}t}(\mathcal{D}^{\text{{\rm cur}}}(U,p)),\\
H^{\mathcal{D}^{\mathcal{T}},Y}
_{{\text{\rm l,ll,a}}t}(X_{\mathbb{R}},\mathbb{R}(p))&=
H_{{\text{\rm l,ll,a}}t}(s(\mathcal{D}^{\text{{\rm cur}}}(U,p),
\mathcal{D}^{\text{{\rm cur}}}(X_{\mathbb{R}},p))),\\
\text{{\rm w}}idetilde {\mathcal{D}}_{2p-1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)&=
\mathcal{D}_{2p-1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)\left/
{\im}g \dd_{\mathcal{D}},\right.\\
{\rm Z} \mathcal{D}_{2p}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)&=
\Ker(\dd_{\mathcal{D}}:\mathcal{D}_{2p}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)\longrightarrow
\mathcal{D}_{2p+1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)).
\end{align}
Let $\mathcal{Z}_{p}=\mathcal{Z}_{p}(X_{\mathbb{R}})$ be the set of
dimension $p$ Zariski closed
subsets of $X_{\mathbb{R}}$ ordered by
inclusion. Then we will write
\begin{align*}
\mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus \mathcal{Z}_{p},p)
&=\lim_{\substack{\longrightarrow
\\ Y\in\mathcal{Z}^{p}}}
\mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus Y,p),\\
\text{{\rm w}}idetilde{\mathcal{D}}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus
\mathcal{Z}_{p},p)
&= \mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus
\mathcal{Z}_{p},p)\left /
{\im}g \dd_{\mathcal{D}}\right.,\\
H^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{{\text{\rm l,ll,a}}t}
(X_{\mathbb{R}},\mathbb{R}(p))&=
H_{{\text{\rm l,ll,a}}t}(s(\mathcal{D}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus
\mathcal{Z}_{p},p),
\mathcal{D}^{\text{{\rm cur}}}(X_{\mathbb{R}},p))).
\end{align*}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Green objects.} We recall the definition of Green object for a
cycle given in \cite{BurgosKramerKuehn:cacg} but adapted to the grading by
dimension. Let $y$ be a dimension $p$ algebraic cycle of
$X_{\mathbb{R}}$. Let $Y$ be the support of $y$. The class of $y$ in
$H^{\mathcal{D}^{\mathcal{T}},Y}_{2p}(X_{\mathbb{R}},\mathbb{R}(p))$,
denoted $\cl(y)$,
is represented by the pair $(\delta _{y},0)\in
s(\mathcal{D}^{\text{{\rm cur}}}(X_{\mathbb{R}},p),
\mathcal{D}^{\text{{\rm cur}}}(U_{\mathbb{R}},p))$.
We denote also by $\cl(y)$ the image of this class in
$H^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{2p}
(X_{\mathbb{R}},\mathbb{R}(p))$.
In this setting, the truncated homology classes can be written as
\begin{multline*}
\text{{\rm w}}idehat {H}^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{{\text{\rm l,ll,a}}t}
(X_{\mathbb{R}},\mathbb{R}(p))=\\ \{(\omega _{y},\text{{\rm w}}idetilde g_{y})\in {\rm Z}
\mathcal{D}_{2p}^{\text{{\rm cur}}}(X,p)\text{{\rm op}}lus
\text{{\rm w}}idetilde{\mathcal{D}}_{2p-1}^{\text{{\rm cur}}}(X_{\mathbb{R}}\setminus
\mathcal{Z}_{p},p)\mid \dd_{\mathcal{D}} \text{{\rm w}}idetilde g_{y}=\omega
_{y}\}.
\end{multline*}
There is an obvious class map
\begin{displaymath}
\cl: \text{{\rm w}}idehat {H}^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{{\text{\rm l,ll,a}}t}
(X_{\mathbb{R}},\mathbb{R}(p))\longrightarrow
H^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{{\text{\rm l,ll,a}}t}
(X_{\mathbb{R}},\mathbb{R}(p)).
\end{displaymath}
Then a Green object for $y$ is an element
$$\mathfrak{g}_{y}=(\omega _{y},\text{{\rm w}}idetilde g_{y})\in
\text{{\rm w}}idehat {H}^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{2p}
(X_{\mathbb{R}},\mathbb{R}(p))
$$
such that $\cl(\mathfrak{g}_{y})=\cl(y)$.
The following result follows directly from the definition
\begin{lemma} \label{lemm:3}
An element $\mathfrak{g}_{y}=(\omega _{y},\text{{\rm w}}idetilde g_{y})\in
\text{{\rm w}}idehat {H}^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p}}_{2p}
(X_{\mathbb{R}},\mathbb{R}(p))$ is a Green object for $y$ if
and only if there exists a current $\text{{\rm w}}idetilde \gamma \in \text{{\rm w}}idetilde
{\mathcal{D}}_{2p-1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)$ such that
\begin{align*}
\text{{\rm w}}idetilde g_{y}&=\text{{\rm w}}idetilde \gamma|_{X\setminus
\mathcal{Z}_{p}}\\
\dd_{\mathcal{D}} \text{{\rm w}}idetilde \gamma +\delta _{y}&=\omega _{y}.
\end{align*}
\end{lemma}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Arithmetic Chow groups.} Every dimension $p$ algebraic cycle
$y$ on $X$ defines a dimension $(p-e)$ algebraic cycle $y_{\mathbb{R}}$
on $X_{\mathbb{R}}$, where $e$ is the dimension of the base scheme $S$.
\begin{definition}
The group of arithmetic cycles of dimension $p$ is defined as
\begin{displaymath}
\za_{p}(X,\mathcal{D}^{\text{{\rm cur}}})=
\{(y,\mathfrak{g}_{y})\in {\rm Z}_{p}(X)\text{{\rm op}}lus
\text{{\rm w}}idehat {H}^{\mathcal{D}^{\mathcal{T}},\mathcal{Z}_{p-e}}_{2p-2e}
(X_{\mathbb{R}},\mathbb{R}(p-e))\mid
\cl(y_{\mathbb{R}})=\cl(\mathfrak{g}_{y})\}.
\end{displaymath}
Let $W$ be a dimension $p+1$ irreducible subvariety of $X$ and $f\in
K(W)^{{\text{\rm l,ll,a}}t}$ be a rational function. Let $\text{{\rm w}}idetilde W_{\mathbb{R}}$
be a resolution of singularities of $W_{\mathbb{R}}$ and let
$\iota:\text{{\rm w}}idetilde W_{\mathbb{R}}\longrightarrow X_{\mathbb{R}}$ be
the induced map. Then we write
\begin{displaymath}
\diva f = (\dv f, (0,\iota_{{\text{\rm l,ll,a}}t}(-\frac{1}{2} \log f\bar f )).
\end{displaymath}
The group of cycles rationally equivalent to zero is the subgroup
$$\rata_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\subset \za_{p}(X,\mathcal{D}^{\text{{\rm cur}}})$$
generated by the elements
of the form $\diva f$.
The \emph{homological arithmetic Chow groups} of $X$ are defined as
\begin{displaymath}
\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})=\za_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\left /
\rata_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\right.
\end{displaymath}
\end{definition}
There are well-defined maps
\begin{alignat*}{2}
\zeta&:\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\longrightarrow\CH_{p}(X),&&\quad
\zeta[y,\mathfrak{g}_{y}]=[y], \\
\rho&:\CH_{p,p+1}(X)\longrightarrow
H_{2p-2e+1}^{\mathcal{D}^{\mathcal{T}}}(X,p-e)
\subseteq\text{{\rm w}}idetilde{\mathcal{D}}_{2p+1}^{\text{{\rm cur}}}(X,p),&&\quad\rho[f]=
\cl(f), \\
\amap&:\text{{\rm w}}idetilde{\mathcal{D}}_{2p-2e+1}(X,p-e)\longrightarrow\cha_{p}
(X,\mathcal{D}^{\text{{\rm cur}}}),&&\quad\amap(\text{{\rm w}}idetilde{a})=[0,\amap(\text{{\rm w}}idetilde{a})], \\
\omega&:\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\longrightarrow{\rm
Z}\mathcal{D}_{2p-2e}^{\text{{\rm cur}}}(X,p-e),
&&\quad\omega[y,\mathfrak{g}_{y}]=\omega(\mathfrak{g}_{y}), \\
h&:{\rm Z}\mathcal{D}_{2p}^{\text{{\rm cur}}}(X,p)\longrightarrow
H_{2p}^{\mathcal{D}^{\mathcal{T}}}(X,p),
&&\quad h(\alpha)=[\alpha].
\end{alignat*}
\subsection{ Properties of Covariant arithmetic Chow groups}
\label{sec:prop-covar-arithm}
\
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Basic properties.} Recall that in
\cite{BurgosKramerKuehn:cacg}, there are defined
contravariant arithmetic Chow groups denoted by $\cha^
{{\text{\rm l,ll,a}}t}(X,\mathcal{D}_{\log})$.
The following result follows from the theory developed
\cite{BurgosKramerKuehn:cacg} and corollary \ref{cor:1} (semi-purity
property).
\begin{theorem}
\label{thm:logD}
With the above notations, we have the following statements:
\begin{enumerate}
\item[(i)] There are exact sequences
\begin{displaymath}
\CH_{p,p+1}(X)\overset{\rho}{\longrightarrow}\text{{\rm w}}idetilde
{\mathcal{D}}_{2p-2e+1}^{\text{{\rm cur}}}(X,p-e)\overset{\amap}{\longrightarrow}
\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\overset{\zeta}{\longrightarrow}
\CH_{p}(X)\longrightarrow 0.
\end{displaymath}
\begin{align*}
&\CH_{p,p+1}(X)\overset{\rho}{\longrightarrow}H_{2p-2e+1}^{\mathcal
{D}^{\mathcal{T}}}(X_{\mathbb{R}},\mathbb{R}(p-e))\overset{\amap}{\longrightarrow}
\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\overset{(\zeta,-\omega)}
{\longrightarrow} \\
&\phantom{CH_{p,p+1}}\CH_{p}(X)
\text{{\rm op}}lus{\rm Z}\mathcal{D}_{2p-2e}^{\text{{\rm cur}}}(X,p-e)\overset{\cl+h}
{\longrightarrow}H_{2p-2e}^{\mathcal{D}^{f}}(X_{\mathbb{R}},\mathbb{R}(p-e))
\longrightarrow 0.
\end{align*}
In particular, if $X_{F}$ is projective, then there is an
exact sequence
\begin{align*}
&\CH_{p,p+1}(X)\overset{\rho}{\longrightarrow}H_{2p-2e+1}^{\mathcal
{D}}(X_{\mathbb{R}},\mathbb{R}(p-e))\overset{\amap}{\longrightarrow}
\cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})\overset{(\zeta,-\omega)}
{\longrightarrow} \\
&\phantom{CH_{p,p+1}}\CH_{p}(X)
\text{{\rm op}}lus{\rm Z}\mathcal{D}_{2p-2e}^{\text{{\rm cur}}}(X,p-e)\overset{\cl+h}
{\longrightarrow}H_{2p-2e}^{\mathcal{D}}(X_{\mathbb{R}},\mathbb{R}(p-e))
\longrightarrow 0.
\end{align*}
\item[(ii)]
For any regular arithmetic variety $X$ over $A$ there are defined
contravariant arithmetic Chow groups
$\cha^{p}(X,\mathcal{D}_{\log})$. Furthermore, if $X$ is
equidimensional of dimension $d$, then there is
a morphism of arithmetic Chow groups
\begin{displaymath}
\cha^{p}(X,\mathcal{D}_{\log})\longrightarrow \cha_{d-p}
(X,\mathcal{D}^{\text{{\rm cur}}}).
\end{displaymath}
When $X_{F}$ is projective this morphism is a monomorphism.
Moreover, if $X_{F}$ has dimension zero, this morphism is an
isomorphism.
\item[(iii)] \label{item:1}
For any proper morphism $f:X\longrightarrow Y$ of arithmetic
varieties over $A$, there is a
morphism of covariant arithmetic Chow groups
\begin{displaymath}
f_{{\text{\rm l,ll,a}}t}:\cha_{p}(X,\mathcal{D}_{\text{{\rm cur}}})\longrightarrow\cha_{p}
(Y,\mathcal{D}_{\text{{\rm cur}}}).
\end{displaymath}
If $g:Y\longrightarrow Z$ is another such morphism, the equality
$(g\circ f)_{{\text{\rm l,ll,a}}t}=g_{{\text{\rm l,ll,a}}t}\circ f_{{\text{\rm l,ll,a}}t}$ holds. Moreover, if $X$ and
$Y$ are regular and
$f_{F}:X_{F}\longrightarrow Y_{F}$ is
a smooth proper morphism of projective varieties, then $f_{{\text{\rm l,ll,a}}t}$ is
compatible with the direct image of contravariant arithmetic Chow
groups.
\item [(iv)] If $f:X\longrightarrow Y$ is a flat morphism,
equidimensional of relative
dimension $d$, and such that $f_{F}$ is smooth, then there
is a pull-back map
\begin{displaymath}
f^{{\text{\rm l,ll,a}}t}:\cha_{p}(Y,\mathcal{D}^{\text{{\rm cur}}})\longrightarrow
\cha_{p+d}(X,\mathcal{D}^{\text{{\rm cur}}}).
\end{displaymath}
If $X$ and $Y$ are regular and equidimensional, this map is equivalent with
the pullback map defined in the contravariant Chow groups.
\item [(v)] Let $f:X\longrightarrow Y$ be a flat map between arithmetic
varieties, which is smooth over $F$ and let $g:P\longrightarrow Y$
be a proper map. Let $Z$ be the fiber product of $X$ and $P$ over
$Y$, with $p:Z\longrightarrow P$ and $q:Z\longrightarrow X$ the two
projections. Thus $p$ is flat and smooth over $F$ and $q$ is
proper. Then for any $x\in \cha_{{\text{\rm l,ll,a}}t}(P,\mathcal{D}^{\text{{\rm cur}}})$, it holds
\begin{displaymath}
q_{{\text{\rm l,ll,a}}t}p^{{\text{\rm l,ll,a}}t}(x)=f^{{\text{\rm l,ll,a}}t}g_{{\text{\rm l,ll,a}}t}(x)\in
\cha_{{\text{\rm l,ll,a}}t}(X,\mathcal{D}^{\text{{\rm cur}}}).
\end{displaymath}
\end{enumerate}
\end{theorem}
\begin{proof}
Part (i) follows from the standard exact sequences of
\cite{BurgosKramerKuehn:cacg} Theorem 4.13 adapted to the
grading by dimension and corollary \ref{cor:1}.
For (ii) we first note that, if $M$ is an equidimensional complex
algebraic
manifold, $D\subset X$ is a normal crossing divisor, $\omega$ is a
differential form with logarithmic singularities along $D$ and
$\eta$ is a form that is flat along $D$, then $\eta\text{{\rm w}}edge \omega $ is
flat along $D$. In particular, if $M$ is proper and $U=M\setminus D$,
then the associated
current $[\omega]$ belongs to $ D^{\text{{\rm extd}}}_{{\text{\rm l,ll,a}}t}(U)$. Therefore, if $y$ is a
codimension $p$ cycle on $X$ then, by the assumptions on $X$ and on the
arithmetic ring, $y$ is a dimension $d-p$ algebraic algebraic
cycle. Moreover, if $(\omega_{y},\text{{\rm w}}idetilde g_{y})$ is a Green form
for $y$ (i.e. a $\mathcal{D}_{\log}$-Green object for $y$) then, by
lemma \ref{lemm:3} and
\cite{BurgosKramerKuehn:cacg} Proposition 6.5 we have that $([\omega
_{y}],[\text{{\rm w}}idetilde g_{y}])$ is a $\mathcal{D}^{\text{{\rm cur}}}$-Green object for
$y$. Thus we have a well defined map
\begin{displaymath}
\za^{p}(X,\mathcal{D}_{\log})\longrightarrow
\za_{d-p}(X,\mathcal{D}^{\text{{\rm cur}}}).
\end{displaymath}
By definition this map is compatible with rational equivalence,
hence we obtain a map at the level of Chow groups.
To prove (iii) we first observe that, if $Z\subset X_{\Sigma }$ is a
closed subset, then $f_{{\text{\rm l,ll,a}}t} D_{{\text{\rm l,ll,a}}t}^{X_{\Sigma }^{\mathcal{T}}}(Z)\subset
D^{X_{\Sigma }^{\mathcal{T}}}(f(Z))$. Therefore, the push-forward of
currents define a
covariant $f$-morphism
\begin{displaymath}
f_{\#}:f_{{\text{\rm l,ll,a}}t}\mathcal{D}_{{\text{\rm l,ll,a}}t}^{\text{{\rm cur}},X} \longrightarrow
\mathcal{D}^{\text{{\rm cur}},Y}_{{\text{\rm l,ll,a}}t}.
\end{displaymath}
Here we are using the terminology of \cite{BurgosKramerKuehn:cacg}
3.67 but adapted to the grading by dimension. Therefore applying
\cite{BurgosKramerKuehn:cacg} \S 4.5 we obtain the push-forward map
for covariant arithmetic Chow groups.
More concretely this map is defined as
\begin{displaymath}
f_{{\text{\rm l,ll,a}}t}(y,(\omega _{y},\text{{\rm w}}idetilde g_{y}))=
(f_{{\text{\rm l,ll,a}}t}y,(f_{{\text{\rm l,ll,a}}t} \omega
_{y},(f_{{\text{\rm l,ll,a}}t}g_{y})\text{{\rm w}}idetilde{\phantom A})).
\end{displaymath}
It is straightforward to check that it is compatible with the
direct image of $\mathcal{D}_{\log}$-arithmetic Chow groups when $Y$
is projective and $f_{F}$ smooth.
We now prove (iv). Since $f_{F}$ is smooth, for any Zariski closed subset
$Z\subset Y_{\mathbb{R}}$ equidimensional of dimension $p$, there is a
well defined morphism
$f^{{\text{\rm l,ll,a}}t}D_{n}(Y_{\Sigma })\longrightarrow D_{n+2d}(X_{\Sigma })$
that sends $D_{n}^{Y_{\Sigma }^{\mathcal{T}}}(Z)$ to $D^{X_{\Sigma
}^{\mathcal{T}}}_{n+2d}(f^{-1}(Z))$. Therefore we obtain well
defined morphisms
\begin{displaymath}
\begin{matrix}
f^{\#}:\mathcal{D}^{\text{{\rm cur}}}_{n}(Y_{\mathbb{R}},p)&\longrightarrow&
\mathcal{D}^{\text{{\rm cur}}}_{n+2d}(X_{\mathbb{R}},p+d),\\
f^{\#}:\mathcal{D}^{\text{{\rm cur}}}_{n}(Y_{\mathbb{R}}\setminus Z,p)&\longrightarrow&
\mathcal{D}^{\text{{\rm cur}}}_{n+2d}(X_{\mathbb{R}}\setminus f^{-1}Z,p+d),
\end{matrix}
\end{displaymath}
that send $T$ to $f^{{\text{\rm l,ll,a}}t}T/(2\pi i)^{d}$.
Then the proof of (iv) is straightforward using the theory of
\cite{BurgosKramerKuehn:cacg} 4.4 adapted to the grading by
dimension.
(v) Follows as \cite{GilletSoule:aRRt} Lemma 11.
\end{proof}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Multiplicative properties.} In the next result we state the
multiplicative properties between covariant and contravariant Chow
groups. The proofs are simple modification of \cite{GilletSoule:aRRt}
Theorem 3. First, for a form $\eta \in
\text{{\rm w}}idetilde{\mathcal{D}}^{2p-1}_{\log}(X_{\mathbb{R}},p)$ and an element
$x\in \cha_{q}(X,\mathcal{D}^{\text{{\rm cur}}})$ we define
\begin{displaymath}
\eta \cap x = \amap(\eta \bullet \omega (x))=\amap(\eta \text{{\rm w}}edge
\omega (x)).
\end{displaymath}
\begin{theorem} \label{thm:1}
Given a map $f:X\longrightarrow Y$ of arithmetic varieties,
with $Y$ regular, there is a cap product
\begin{displaymath}
\begin{matrix}
\cha^{p}(Y,\mathcal{D}_{\log})\otimes \cha_{q}(X,\mathcal{D}^{\text{{\rm cur}}})
&\longrightarrow &\cha_{q-p}(X,\mathcal{D}^{\text{{\rm cur}}})_{\mathbb{Q}}\\
y\otimes x &\longmapsto & y._{f}x
\end{matrix}
\end{displaymath}
which is also denoted $y\cap X$ if $X=Y$. This product satisfies the
following properties
\begin{enumerate}
\item $\omega (y._{f}x)=f^{{\text{\rm l,ll,a}}t}\omega (y)\land \omega (x)$, and,
for any $\eta \in \text{{\rm w}}idetilde
{\mathcal{D}}_{\log}^{2p-1}(Y_{\mathbb{R}},p)$, it holds
$\amap(\eta)._{f}x=\amap(f^{{\text{\rm l,ll,a}}t}(\eta))\cap x$.
\item $\cha_{{\text{\rm l,ll,a}}t}(X,\mathcal{D}^{\text{{\rm cur}}})_{\mathbb{Q}}$ is a graded
$\cha^{{\text{\rm l,ll,a}}t}(Y,\mathcal{D}_{\log})$-module.
\item If $g:Y\longrightarrow Y'$ is a map of arithmetic varieties
with $Y'$ also regular, $y'\in \cha^{p}(Y',\mathcal{D}_{\log})$ and
$x\in \cha_{q}(X,\mathcal{D}^{\text{{\rm cur}}})$, then
$y'._{gf}x=g^{{\text{\rm l,ll,a}}t}(y')._{f}x$.
\item If $h:X'\longrightarrow X$ is a projective morphism, $x'\in
\cha_{q}(X',\mathcal{D}^{\text{{\rm cur}}})$ and $y\in
\cha^{p}(Y,\mathcal{D}_{\log})$, then
$y._{f}(h_{{\text{\rm l,ll,a}}t}(x'))=h_{{\text{\rm l,ll,a}}t}(y._{fh} x')$.
\item If $h:X'\longrightarrow X$ is flat and smooth over $F$, $x\in
\cha_{q}(X,\mathcal{D}^{\text{{\rm cur}}})$, $y\in
\cha^{p}(Y,\mathcal{D}_{\log})$, then
$h^{{\text{\rm l,ll,a}}t}(y._{f}x)=y._{f}(h^{{\text{\rm l,ll,a}}t}(x))$.
\item Let $f:X\longrightarrow Y$ be a flat map between arithmetic
varieties, with $Y$ regular and projective, and let $g:P\longrightarrow Y$
be a proper smooth map of arithmetic varieties of relative dimension
$d$. Let $Z$ be the fiber product of $X$ and $P$ over
$Y$, with $p:Z\longrightarrow P$ and $q:Z\longrightarrow X$ the two
projections. Then, for all $x\in \cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})$ and
$\gamma \in \cha^{q}(P,\mathcal{D}_{\log})$, it holds the equality
\begin{displaymath}
q_{{\text{\rm l,ll,a}}t}(\gamma ._{p}q^{{\text{\rm l,ll,a}}t}(x))=g_{{\text{\rm l,ll,a}}t}\gamma ._{f} \alpha .
\end{displaymath}
\end{enumerate}
\end{theorem}
\begin{proof}
To define $y._{f}x$ we follow closely \cite{GilletSoule:aRRt}. We
may assume that $Y$ is equidimensional, that
$x=(V,\mathfrak{g}_{V})$ with $V$ a prime algebraic cycle and
$y=(W,\mathfrak{g}_{W})$ with each component of $W$ meeting $V$
properly on the generic fiber $X_{F}$. As in \cite{GilletSoule:aRRt}
we can define a cycle $[V]._{f}[W]\in CH_{q-p}(V\cap
f^{-1}(|W|))_{\mathbb{Q}}$ that gives us a well defined cycle
$([V]._{f}[W])_{F}\in {\rm Z}_{q-p}(X_{F})$. Our task now is to
construct the Green object for this cycle. Let
$\mathfrak{g}_{W}=(\omega _{W},\text{{\rm w}}idetilde g_{W})$ and
$\mathfrak{g}_{V}=(\omega _{V},\text{{\rm w}}idetilde g_{V})$. We write
$U_{V}=X_{\mathbb{R}\setminus |V|}$,
$U_{W}=X_{\mathbb{R}\setminus f^{-1}|W|}$ and $r=q-p$.
We now define,
in analogy with \cite{BurgosKramerKuehn:cacg} theorem 3.37,
\begin{align*}
\mathfrak{g}_{W}&{\text{\rm l,ll,a}}t_{f} \mathfrak{g}_{V}=
f^{{\text{\rm l,ll,a}}t}\mathfrak{g}_{W}{\text{\rm l,ll,a}}t \mathfrak{g}_{V}\\
&=\left(f^{{\text{\rm l,ll,a}}t}(\omega_{W})\bullet\omega_{V},
((f^{{\text{\rm l,ll,a}}t}(g_{W})\bullet\omega_{V},f^{{\text{\rm l,ll,a}}t}(\omega_{W})\bullet
g_{V}),-f^{{\text{\rm l,ll,a}}t}(g_{W})\bullet g_
{V})^{\text{{\rm w}}idetilde{\phantom{=}}}\right)\\
&=(f^{{\text{\rm l,ll,a}}t}(\omega_{W})\text{{\rm w}}edge \omega_{V},
((f^{{\text{\rm l,ll,a}}t}(g_{W})\text{{\rm w}}edge \omega_{V},f^{{\text{\rm l,ll,a}}t}(\omega_{W})\text{{\rm w}}edge
g_{V}),\\
& \
\partial f^{{\text{\rm l,ll,a}}t}(g_{W})\land
g_{V}-\bar{\partial}f^{{\text{\rm l,ll,a}}t}(g_{W})\land g_{V}-
f^{{\text{\rm l,ll,a}}t}(g_{W})\land\partial g_{V}+f^{{\text{\rm l,ll,a}}t}(g_{W})\land\bar{\partial}g_{V}
)^{\text{{\rm w}}idetilde{\phantom{=}}}\\
&\in \text{{\rm w}}idehat H_{2e}(\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(X_{\mathbb{R}},e),
s(\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(U_{W},e)\text{{\rm op}}lus
\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(U_{V},e)\rightarrow
\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(U_{W}\cap U_{V},e)))\\
&\cong \text{{\rm w}}idehat H_{2e}(\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(X_{\mathbb{R}},e),
\mathcal{D}^{\text{{\rm cur}}}_{{\text{\rm l,ll,a}}t}(U_{W}\cup U_{V},e)).
\end{align*}
Now the proof follows as in \cite{GilletSoule:aRRt} Theorem 3 and
Lemma 12.
\end{proof}
\begin{remark}
\begin{enumerate}
\item The main difference between the arithmetic Chow groups
introduced here and the arithmetic Chow groups used in
\cite{GilletSoule:aRRt} is that, if $x\in
\cha_{{\text{\rm l,ll,a}}t}(X,\mathcal{D}^{\text{{\rm cur}}})$ then $\omega (x)$ is an arbitrary
current instead of a smooth differential form. This allows us to
define direct images for arbitrary proper morphisms. But the price
we have to pay is that there ire defined inverse images only for
morphisms that are smooth over $F$.
\item The fact that
the compatibility of direct images for the covariant Chow groups
and direct images for the contravariant Chow groups in theorem \ref{thm:logD}
\label{item:2} is stated only for varieties that are generically
projective, is due to the fact that the
latter is only defined when the base is proper. There are two ways
to overcome this difficulty. One is to allow arbitrary
singularities at infinity in the spirit of
\cite{BurgosKramerKuehn:accavb} 3.5, but then, one will have to
allow also arbitrary singularities at infinity for currents. This
means that we will have to consider currents that are tempered
in some components of the boundary but are not tempered in the
other. The second option would be to use a different notion of
logarithmic singularities that has better properties with respect
to direct images.
\end{enumerate}
\end{remark}
\vskip 2mm \refstepcounter{NNN}\noindent {\bf \theNNN . }par{Relationship with other arithmetic Chow groups.} Let us assume
now that $X_{F}$ is projective and let $\cha^{{\text{\rm l,ll,a}}t}(X)$ denote the
arithmetic Chow groups introduced in \cite{GilletSoule:ait} and
$\cha_{{\text{\rm l,ll,a}}t}(X)$ denote the arithmetic Chow groups introduced in
\cite{GilletSoule:aRRt}. In \cite{BurgosKramerKuehn:cacg} it is shown
that there is an isomorphism
\begin{displaymath}
\psi :\cha^{{\text{\rm l,ll,a}}t}(X,\mathcal{D}_{\log})\longrightarrow \cha^{{\text{\rm l,ll,a}}t}(X),
\end{displaymath}
that is compatible with products, inverse images with respect to
arbitrary morphisms and direct images with respect to proper morphism
that are smooth over $F$. We shall state the analogous result for
covariant arithmetic Chow groups.
\begin{proposition}\label{prop:6}
Let $X$ be an arithmetic variety with $X_{F}$ projective. Then there
is a short exact sequence
\begin{multline*}
0\longrightarrow \cha_{{\text{\rm l,ll,a}}t}(X)\overset{\phi }{\longrightarrow }
\cha_{{\text{\rm l,ll,a}}t}(X,\mathcal{D}^{\text{{\rm cur}}})\\
\longrightarrow
\bigoplus_{p}{\rm Z}\mathcal{D}_{2p}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)\left/
{\rm Z} \mathcal{D}_{2p}^{\text{{\rm smooth}}}(X_{\mathbb{R}},p)\longrightarrow
0\right. ,
\end{multline*}
where $\mathcal{D}_{2p}^{\text{{\rm smooth}}}(X_{\mathbb{R}},p)$ denotes the
subspace of currents that can be represented by smooth differential
forms. Moreover $\phi $ satisfies the following properties
\begin{enumerate}
\item If $f:X\longrightarrow Y$ is a proper morphism of arithmetic
varieties that is smooth over $F$ and with $Y_{F}$ projective,
then $f_{{\text{\rm l,ll,a}}t}\circ \phi =\phi
\circ f_{{\text{\rm l,ll,a}}t}$.
\item If $f:X\longrightarrow Y$ is a flat morphism of arithmetic
varieties that is smooth over $F$, with $X_{F}$ and $Y_{F}$
projective, then $f^{{\text{\rm l,ll,a}}t}\circ \phi =\phi
\circ f^{{\text{\rm l,ll,a}}t}$.
\item If $f:X\longrightarrow Y$ is a morphism of arithmetic
varieties, with $X_{F}$ and $Y_{F}$ projective and $Y$ regular
then, for $y\in \cha^{p}(Y,\mathcal{D}_{\log})$ and $x\in
\cha_{q}(Y)$, it holds the equality
\begin{displaymath}
y._{f} \phi (x) = \psi (y)._{f} x.
\end{displaymath}
\end{enumerate}
\end{proposition}
\begin{proof}
Let $y$ be a dimension $p$ algebraic cycle of
$X$ and let $g_{y}$ be a Green current for $y$ in the sense of
\cite{GilletSoule:aRRt}. Recall that the
normalization used here for the current $\delta _{y}$ differs with
the normalization used in \cite{GilletSoule:aRRt} by a factor
$\frac{1}{(2\pi i)^{p}} $. Then, by \ref{lemm:3}, the pair
\begin{displaymath}
\left(\frac{1}{2(2\pi i)^{p+1}}g_{y}|_{X_{\mathbb{R}}\setminus
\mathcal{Z}_{p}},
\frac{1}{2(2\pi i)^{p+1}} (-2\partial\bar \partial) g_{y}+\delta
_{y} \right)
\end{displaymath}
is a $\mathcal{D}^{\text{{\rm cur}}}$-Green object for $y$. Therefore we obtain a
well defined morphism
$\za_{p}(X)\longrightarrow \za_{p}(X,\mathcal{D}^{\text{{\rm cur}}})$. It is
straightforward to check that
this map preserves rational equivalence, the exactness of the above
exact sequence and properties (i), (ii) and (iii).
\end{proof}
\begin{corollary} With the hypothesis of the proposition,
every element $x\in \cha_{p}(X,\mathcal{D}^{\text{{\rm cur}}})$ can be represented as
\begin{displaymath}
x= \phi(x_{1})+\amap(\eta)
\end{displaymath}
where $x_{1}\in \cha_{p}(X)$ and $\eta\in \text{{\rm w}}idetilde
{\mathcal{D}}_{2p+1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)$. Moreover, if
\begin{displaymath}
x= \phi(x_{1})+\amap(\eta) = \phi(x'_{1})+\amap(\eta')
\end{displaymath}
are two such representations, then $\eta-\eta'\in \text{{\rm w}}idetilde
{\mathcal{D}}_{2p+1}^{\text{{\rm smooth}}}(X_{\mathbb{R}},p).$
\end{corollary}
\begin{proof}
This follows from the previous proposition and the fact that the map
\begin{displaymath}
\dd_{\mathcal{D}}:\text{{\rm w}}idetilde
{\mathcal{D}}_{2p+1}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)\longrightarrow
{\rm Z}\mathcal{D}_{2p}^{\text{{\rm cur}}}(X_{\mathbb{R}},p)\left/
{\rm Z} \mathcal{D}_{2p}^{\text{{\rm smooth}}}(X_{\mathbb{R}},p)
\right.
\end{displaymath}
is surjective due to the projectivity of $X$. The last statement
follows from \cite{GilletSoule:ait} Theorem 1.2.2.
\end{proof}
The following result follows now easily from the previous corollary.
\begin{corollary}
Assume furthermore that $X$ is equidimensional of dimension $d$ and
let $\cha^{{\text{\rm l,ll,a}}t}_{D}(X)$ denote the $D$-arithmetic Chow groups
introduced in \cite{KawaguchiMoriwaki:isfav}. Then there is a
natural isomorphism
\begin{displaymath}
\bigoplus _{p}\cha^{p}_{D}(X)\longrightarrow \bigoplus
_{p}\cha_{d-p}(X,\mathcal{D}^{\text{{\rm cur}}}).
\end{displaymath}
Moreover this isomorphism is compatible with push-forwards and
the structure of module over the contravariant arithmetic Chow
groups.
$\square$
\end{corollary}
\newcommand{\noopsort}[1]{} \newcommand{\printfirst}[2]{#1}
\newcommand{\singleletter}[1]{#1} \newcommand{\switchargs}[2]{#2#1}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\title{Semi-Decentralized Approximation of Optimal Control of Distributed Systems Based on a Functional Calculus}
\author{Y.~Yakoubi and M. ~Lenczner \\ Department Time-Frequency, FEMTO-ST Institute, \\ 26, Rue de
l'Epitaphe, 25030 Besan\c{c}on, FRANCE.}
\begin{abstract}
This paper discusses a new approximation method for operators which
are solution to an operational Riccati equation (\textbf{ORE}). The
latter is derived from the theory of optimal control of linear
problems posed in Hilbert spaces. The approximation is based on the
functional calculus of self-adjoint operators and the Cauchy
formula. Under a number of assumptions the approximation is suitable
for implementation on a semi-decentralized computing architecture in
view of real-time control. Our method is particularly applicable to
problems in optimal control of systems governed by partial
differential equations with distributed observation and control.
Some relatively academic applications are presented for
illustration. More realistic examples relating to microsystem arrays
have already been published.
{{\mathrm m}athrm e}nd{abstract}
{\mathrm m}aketitle
\section{Introduction}
This work is a contribution to the area of semi-decentralized
optimal control of large linear distributed systems for real-time
applications. It applies to systems modeled by linear partial
differential equations with observation and control distributed over
the whole domain. This is a strong assumption, but it does not mean
that actuators and sensors are actually continuously distributed.
The models satisfying such assumption may be derived by
homogenization of systems with periodic distribution of actuators
and sensors.
{\mathrm m}edskip
In this paper we consider two classes of systems, those with bounded
control
and bounded observation operators as in R. Curtain and H. Zwart \cite{CurZwa}
, and those with unbounded control but bounded observation operators
as in H.T. Banks and K. Ito \cite{BanIto}. In an example, we show
how the method may also be applied to a particular boundary control
problem. We view possible applications in the field of systems
including a network of actuators and sensors, see for instance
\cite{HuiYak} dedicated to arrays of Atomic Force Microscopes.
{\mathrm m}edskip
We consider four linear operators $A,$ $B,$ $C$, $S$, and the Linear
Quadratic Regulator (LQR) problem stated classically as a
minimization problem,
\begin{eqnarray}
{\mathrm m}athcal{J}\left( z_{0},\uu\right) &=& {\mathrm m}in\limits_{u\in
U}{\mathrm m}athcal{J}\left(
z_{0},u\right) , \label{minimization} \\
\text{with} ~ {\mathrm m}athcal{J}\left( z_{0},u\right) &=& \int_{0}^{+\infty
}\left\| Cz\right\| _{Y}^{2}+\left( Su,u\right) _{U} ~ dt,
\label{functional}
{{\mathrm m}athrm e}nd{eqnarray}
constrained by a state equation,
\begin{equation}
{\mathrm f}rac{dz}{dt}\left( t\right) =Az\left( t\right) +Bu\left( t\right)
\quad \text{for }t>0\quad \text{and }z\left( 0\right) =z_{0}\text{.}
\label{state equation}
{{\mathrm m}athrm e}nd{equation}
Under usual assumptions there exists a unique solution
$\uu=-S^{-1}B^{\ast
}Pz$, where $P$ is a solution of the\ operational Riccati equation (\textbf{
ORE}),
\begin{equation}
A^{\ast }P+PA-PBS^{-1}B^{\ast }P+C^{\ast }C=0\text{.} \label{Riccati
eq}
{{\mathrm m}athrm e}nd{equation}
In the framework of \cite{CurZwa}, $A:D(A)\subset Z{\mathrm m}apsto Z,$
$B:U{\mathrm m}apsto Z, $ $C:Z{\mathrm m}apsto Y$, $S:U{\mathrm m}apsto U$ and consequently
$P:Z{\mathrm m}apsto Z$ for some linear spaces $Z,$ $U$ and $Y$. To derive
our semi-decentralized realization
of $Pz$, we further assume that there exists a linear self-adjoint operator $
\Lambda :X{\mathrm m}apsto X$, three one-to-one mappings
\begin{equation}
\Phi _{Z}:X^{n_{Z}}{\mathrm m}apsto Z,\quad \Phi _{U}:X^{n_{U}}{\mathrm m}apsto U\quad \text{
and }\Phi _{Y}:X^{n_{Y}}{\mathrm m}apsto Y, \label{isomorphisms}
{{\mathrm m}athrm e}nd{equation}
with appropriate integers $n_{Z},$ $n_{U}$ and $n_{Y},$ and four
continuous matrix-valued functions $\lambda {\mathrm m}apsto a(\lambda ),$
$\lambda {\mathrm m}apsto b(\lambda )$, $\lambda {\mathrm m}apsto c(\lambda )$ and
$\lambda {\mathrm m}apsto s(\lambda )$ such that
\begin{equation}
A=\Phi _{Z}a(\Lambda )\Phi _{Z}^{-1},\quad B=\Phi _{Z}b(\Lambda
)\Phi _{U}^{-1},\quad C=\Phi _{Y}c(\Lambda )\Phi _{Z}^{-1}\quad
\text{and }S=\Phi _{U}s(\Lambda )\Phi _{U}^{-1}. \label{operator
factorizations}
{{\mathrm m}athrm e}nd{equation}
We notice that the functions of the self-adjoint operator $\Lambda $
used in the above formulae are defined using spectral theory of
self-adjoint operators (having a real spectrum) with compact or not
compact resolvent so that to encompass bounded and unbounded
domains. From (\ref{operator factorizations}), it follows that the
Riccati operator $P$ is factorized as
\begin{equation}
P=\Phi _{Z}p(\Lambda )\Phi _{Z}^{-1}, \label{decomposition of P}
{{\mathrm m}athrm e}nd{equation}
where $\lambda {\mathrm m}apsto p(\lambda )$ is a continuous function,
solution of the algebraic Riccati equation (\textbf{ARE})
\begin{equation}
a^{T}\left( \lambda \right) p+pa\left( \lambda \right) -pb\left(
\lambda \right) s^{-1}\left( \lambda \right) b^{T}\left( \lambda
\right) p+c^{T}\left( \lambda \right) c\left( \lambda \right)
=0\text{.} \label{algebraic Ricc equation}
{{\mathrm m}athrm e}nd{equation}
Our goal is reached once separate efficient semi-decentralized
approximations of $\Phi _{Z}$, $p(\Lambda )$ and $\Phi _{Z}^{-1}$
are provided for the realization of $P$ through (\ref{decomposition
of P}). This is generally not an issue for $\Phi _{Z}$ and for $\Phi
_{Z}^{-1},$ then the point is the semi-decentralized approximation
of $p(\Lambda )$. It might be
build by a polynomial approximation,
\begin{equation}
p_{N}(\Lambda )=\sum_{k=0}^{N}d_{k}\Lambda ^{k}, \label{polynomial
approximation}
{{\mathrm m}athrm e}nd{equation}
or a rational approximation,
\begin{equation}
\displaystyle p_{N}(\Lambda
)={\mathrm f}rac{\sum\limits_{k=0}^{N^{N}}d_{k}\Lambda
^{k}}{\sum\limits_{k^{\prime }=0}^{N^{D}}d_{k^{\prime }}^{\prime
}\Lambda ^{k^{\prime }}}\text{.} \label{rational approximation}
{{\mathrm m}athrm e}nd{equation}
Then, for practical implementations, the operator $\Lambda $ could
be replaced by a discretizations $\Lambda _{h},$ with parameter $h.$
We
emphasize that the formulae (\ref{polynomial approximation}) or (\ref
{rational approximation}) yield large approximation errors, with respect to $
h,$ due to the high powers of $\Lambda _{h}$. To overcome this
defect, we use an approximation based on the Cauchy integral which
requires to know the poles of $p$. In practice, we first approximate
the function $\lambda {\mathrm m}apsto p(\lambda )$ by a polynomial
approximation or a rational approximation $p_{N}(\lambda )$ with
degrees $N$ or $(N^{N},N^{D})$ sufficiently high to insure a very
small error. When $p_{N}$ is known its poles also, so we can state
the Cauchy formula for $p_{N}(\Lambda )$. This yields to introduce
the equations of the complex function $v=v_{1}+iv_{2}$
for each input $z\in Z,$
\begin{equation}
(\xi -\Lambda )v=-i\xi ^{\prime }p_{N}\left( \xi \right) z,
\label{syst of DS}
{{\mathrm m}athrm e}nd{equation}
where $\xi :(0,2\pi )\rightarrow {\mathrm m}athbb{C}$ is the contour of the
Cauchy formula. Denoting by $v^{{{\mathrm m}athrm e}ll }$ the solution corresponding
to a quadrature point $\xi _{{{\mathrm m}athrm e}ll }$ of the contour and $\omega
_{{{\mathrm m}athrm e}ll }$ some quadrature weights, the final approximation of
$p(\Lambda )z$ is
\begin{equation}
p_{N,M}(\Lambda )z={\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll
}v_{1}^{{{\mathrm m}athrm e}ll }. \label{DS approximation}
{{\mathrm m}athrm e}nd{equation}
Remark that the number $M$ of quadrature points is the only
important parameter governing the approximation error. For real-time
computation, the expression of $p_{N}$ is pre-computed, so the
approximation cost is also governed by $M$ only. With this method,
we do not observe a lack of precision when $\Lambda $ is replaced by
its discretizations $\Lambda _{h}$ and $M$ is large. In the sequel,
we show that the same derivation can be
done directly for $Qz=-S^{-1}B^{\ast }Pz$ provided that the isomorphisms $
\Phi _{Z}$ and $\Phi _{U}$ are also some functions of $\Lambda .$
{\mathrm m}edskip
This approach based on functional calculus is relatively simple, but
in each case it requires to determine the isomorphisms
(\ref{isomorphisms}). The theory has already been applied in
\cite{Yak} to a LQG control problem with a bounded operator $B$ that
is not a function of $\Lambda $. It has been shown how the control
approximation can be implemented through a distributed electronic
circuit. In \cite{LenYak1} and \cite{HuiYak} it has also been
applied to a one-dimensional array of cantilevers with regularly
spaced
actuators and sensors for which the operator $C$ is not a function of $
\Lambda $. The underlying model was derived with a multiscale
method, an implementation of the semi-decentralized control was
provided in the form of a periodic network of resistors, and the
numerical validations of the complete strategy was carried out. In
the present paper, we illustrate the theory with four simpler
examples ranging from a simple heat equation with internal bounded
control and observation operators, a heat equation with an unbounded
control operator, a vibrating Euler-Bernoulli beam, and a heat
equation with a boundary controls.
{\mathrm m}edskip
We notice that our method together improves and generalizes a
previous paper \cite{KadLenMr}. It was related to a specific
application, namely vibration control problem for a plate with a
periodic distribution of piezoelectric actuators and sensors. There,
the general isomorphisms (\ref{isomorphisms}) and the general
factorization (\ref{operator factorizations}) were not
introduced, and $p(\Lambda )$ was approximated by a polynomial as in (\ref
{polynomial approximation}) which were severely limiting the
accuracy of the approximation. In both papers, the control method is
a LQR, but the theory is applicable to Riccati equations that may
arise in a number of other control problem, for instance for $H_{2}$
or $H_{\infty }$ dynamic compensators. Other extensions are also
possible, for instance, we may want to deal with functions of a non
self-adjoint operator $\Lambda $. In such a
case, another functional calculus, like these in \cite{MarSan} or in \cite
{Haa}, could be used instead of the spectral theory. Other
frameworks for control problems of infinite dimensional systems
could also be used, for instance this of \cite{Lasiecka} for optimal
control with unbounded observations and unbounded controls.
{\mathrm m}edskip
Other techniques have already been established, see \cite{BamPag}, \cite
{PagBam}, \cite{Jov}, \cite{DanDul}, \cite{LanDAn} and the
references therein. But they are mostly focused on the infinite
length systems, see \cite{BamPag}, \cite{PagBam}, \cite{Jov} and
\cite{LanDAn} for systems governed by partial differential
equations, and \cite{DanDul} for discrete systems. Finally, in
\cite{LenMonYak} we developed another theoretical framework based on
the \textit{diffusive realization }applicable to a broad range of
linear operators on bounded or unbounded domains. In principle this
approach allows to cover general distributed control problems with
internal or boundary control. However, in this first paper in the
subject, only one-dimensional domains and linear operational
equations (e.g. Lyapunov equations) are covered.
{\mathrm m}edskip
The paper is organized as follows. Notations and basic definitions
are recalled in Section \ref{Notations}. In Section \ref{Bounded
Control operators} the abstract approximation method is stated in
the framework of bounded control and observation operators. The
framework of unbounded control operators is treated in Section
\ref{Unbounded control operators}. Some extensions are outlined in
Section \ref{Extensions}. Most proofs are concentrated in Section
\ref{Proofs}. The illustrative examples are detailed
in Section \ref{Application} and finally the paper is concluded by Section
\ref{conclusion}.
\section{Preliminary Results and Notations\label{Notations}}
The norm and the inner product of an Hilbert space $E$ are denoted by $
||.||_{E}$ and $(.,.)_{E}.$ For a second Hilbert space $F,$ ${\mathrm m}athcal{L}
(E,F) $ denotes the space of continuous linear operators from $E$ to
$F.$ In
addition, ${\mathrm m}athcal{L}(E,E)$ is denoted by ${\mathrm m}athcal{L}(E).$ One says that $
\Phi \in {\mathrm m}athcal{L}(E,F)$ is an isomorphism from $E$ to $F$ if
$\Phi $ is one-to-one and if its inverse is continuous.
{\mathrm m}edskip
Since the approximation method of $P$ is based on the concept of
matrices of functions of a self-adjoint operator, this section is
devoted to their definition. Let $\Lambda $ be a self-adjoint
operator on a separable Hilbert space $X$ with domain $D(\Lambda )$,
we denote by $\sigma (\Lambda )$ its spectrum and by $I_{\sigma
}=(\sigma _{{\mathrm m}in },\sigma _{{\mathrm m}ax })\subset {\mathrm m}athbb{R}$ an open
interval that includes $\sigma (\Lambda )$. We recall that if
$\Lambda $ is compact then $\sigma (\Lambda )$ is bounded and is
only constituted of eigenvalues $\lambda _{k}.$ They are the
solutions to the eigenvalue problem $\Lambda \phi _{k}=\lambda
_{k}\phi _{k}$ where $\phi _{k}$ is an eigenvector associated to
$\lambda _{k}$ chosen normed in $X$, i.e. such that $||\phi
_{k}||_{X}=1$. For a given real valued function $f$, continuous on
$I_{\sigma }$, $f(\Lambda )$ is the linear self-adjoint operator on
$X$ defined by
\begin{equation*}
f(\Lambda )z=\sum_{k=1}^{\infty }f(\lambda _{k})z_{k}\phi _{k}\quad \text{
where }z_{k}=(z,\phi _{k})_{X},
{{\mathrm m}athrm e}nd{equation*}
with domain $ D(f(\Lambda ))=\{z\in X~|~\sum\limits_{k=1}^{\infty
}\left\vert f(\lambda _{k})z_{k}\right\vert ^{2}~<\infty \}.$
Then, if $f$ is a $ n_{1}\times n_{2}$ matrix of real valued functions $f_{ij},$ continuous on $
I_{\sigma }$, $f(\Lambda )$ is a matrix of linear operators
$f_{ij}(\Lambda ) $ with domain
\begin{equation*}
D(f(\Lambda ))=\{z\in X^{n_{2}}~|~\sum\limits_{k=1}^{\infty
}\sum\limits_{j=1}^{n_{2}}|f_{ij}(\lambda
_{k})(z_{j})_{k}|^{2}~<\infty \quad {\mathrm f}orall i=1 \ldots n_{1}\}.
{{\mathrm m}athrm e}nd{equation*}
{\mathrm m}edskip
In the general case, where $\Lambda $ is not compact and where $f$
is still a continuous function, the self-adjoint operator $f(\Lambda
)$ is defined on $X$ by the Stieltjes integral
\begin{equation*}
f(\Lambda )=\int_{-\infty }^{+\infty }\lambda ~ d E_{\lambda },
{{\mathrm m}athrm e}nd{equation*}
and its domain is $D(f(\Lambda )) = \{z\in X ~ |~ \int_{-\infty
}^{+\infty
}|f(\lambda )|^{2}\text{ }d||E_{\lambda }z||_{X}^{2} ~ <\infty \} $ where $
E_{\lambda }$ is the spectral family associated to $\Lambda$, see \cite
{DauLio}. When $f$ is a matrix, $f(\Lambda )$ is a matrix of linear
operators with entries defined by the above formula and with domain
$$ D(f(\Lambda ))=\{z\in X^{n_{2}} ~ | ~ \int_{-\infty }^{+\infty
}\sum\limits_{j=1}^{n_{2}}|f_{ij}(\lambda )|^{2} ~ d||E_{\lambda
}z_{j}||_{X}^{2} ~ <\infty \quad {\mathrm f}orall i=1 \ldots n_{1}\}.$$
\section{Bounded Control Operators\label{Bounded Control operators}}
In this section, we state the approximation result in the framework
of bounded input operators. We follow the mathematical setting
\cite{CurZwa} of the LQR problem (\ref{minimization}-\ref{state
equation}). So, $A$ is the infinitesimal generator of a continuous
semigroup on a separable Hilbert
space $Z$ with dense domain $D(A)$, $B\in {\mathrm m}athcal{L}(U,Z)$, $C\in {\mathrm m}athcal{L
}(Z,Y)$ and $S\in {\mathrm m}athcal{L}(U,U)$ where $U$ and $Y$ are two
Hilbert spaces. We assume that $(A,B)$ is stabilizable and that
$(A,C)$ is detectable, in the sense that there exist $Q\in
{\mathrm m}athcal{L}(Z,U)$ and $F\in {\mathrm m}athcal{L}(Y,Z)$ such that $A-BQ$ and
$A-FC$ are the infinitesimal generators of two uniformly
exponentially stable continuous semigroups. For each $z_{0}\in Z$
the LQR problem (\ref{minimization}-\ref{state equation}) admits a
unique solution $\uu=-S^{-1}B^{\ast }Pz$ where $P\in {\mathrm m}athcal{L}(Z)$
is the unique self-adjoint nonnegative solution of the \textbf{ORE}
\begin{equation}
\left( A^{\ast }P+PA-PBS^{-1}B^{\ast }P+C^{\ast }C\right) z=0
\label{eq.Riccati.opercontinuz}
{{\mathrm m}athrm e}nd{equation}
for all $z\in D(A).$ The adjoint $A^{\ast }$ of the unbounded
operator $A$ is defined from $D(A^{\ast })\subset Z$ to $Z$ by the
equality $(A^{\ast
}z,z^{\prime })_{Z}=(z,Az^{\prime })_{Z}$ for all $z\in D(A^{\ast })$ and $
z^{\prime }\in D(A)$. The adjoint $B^{\ast }\in {\mathrm m}athcal{L}(Z,U)$ of
the bounded
operator $B$ is defined by $(B^{\ast }z,u)_{U}=(z,Bu)_{Z}$, the adjoint $
C^{\ast }\in {\mathrm m}athcal{L}(Y,Z)$ being defined similarly.
{\mathrm m}edskip
Now, we state specific assumptions for the approximation method. Here, $
\Lambda $ is a given self-adjoint operator on a separable Hilbert
space $X$ which is chosen to be easily approximable on a
semi-decentralized architecture. Generally, $\Lambda $ is chosen
with regard to $A,$ then $\Phi _{Z}$ and $\Phi _{U}$ can be chosen
so that to have also a natural semi-decentralized approximation.
\begin{assumption}[H1]
There exist three integers $n_{Z},$ $n_{U}$ and $n_{Y}\in {\mathrm m}athbb{N}^{\ast }$
, three isomorphisms $\Phi _{Z}\in {\mathrm m}athcal{L}(X^{n_{Z}},Z),$ $\Phi
_{U}\in {\mathrm m}athcal{L}(X^{n_{U}},U)$ and $\Phi _{Y}\in
{\mathrm m}athcal{L}(X^{n_{Y}},Y)$ and
four matrices of functions $a(\lambda )\in {\mathrm m}athbb{R}^{n_{Z}\times n_{Z}},$ $
b(\lambda )\in {\mathrm m}athbb{R}^{n_{Z}\times n_{U}}$, $c(\lambda )\in {\mathrm m}athbb{R}
^{n_{Y}\times n_{Z}}$ and $s(\lambda )\in {\mathrm m}athbb{R}^{n_{U}\times
n_{U}}$ continuous on $I_{\sigma }$ such that
\begin{equation*}
A=\Phi _{Z}a(\Lambda )\Phi _{Z}^{-1}, \quad B=\Phi _{Z}b(\Lambda
)\Phi _{U}^{-1}, \quad C=\Phi _{Y}c(\Lambda )\Phi _{Z}^{-1} \quad
\text{and} \quad S=\Phi _{U}s(\Lambda )\Phi _{U}^{-1}.
{{\mathrm m}athrm e}nd{equation*}
{{\mathrm m}athrm e}nd{assumption}
One of the consequences of this assumption, for a system governed by
a partial differential equation posed in a domain $\Omega ,$ is that
both the control and the observation must be distributed throughout
the domain, in conformity with what has been stated from the
beginning$.$
\begin{remark}
$\text{ }$
\begin{enumerate}
\item In case where all operators are function of $\Lambda $, then the
isomorphisms $\Phi $ are or not useful or can be chosen as function of $
\Lambda $. In both cases $P$ is also a function $p$ of $\Lambda $.
\item Introducing the isomorphisms $\Phi _{Z}$, $\Phi _{Y}$ and $\Phi _{U}$
allows to deal with problems where operators $A$, $B$ and $C$ are
not functions of $\Lambda $.
\item When control is distributed over the entire domain Assumption (H1) is
generally satisfied. In Section \ref{exemple 3}, there is an example
of observation operator $C$ that is not a function of $\Lambda $,
while in the paper \cite{LenYak1} it is the case for $B$ the control
operator.
\item For boundary control or observation problems, it is impossible to find
such isomorphisms. Nevertheless, in Subsection \ref{exemple 4} we
show how to proceed to address some boundary control problems.
\item Multi-scale models with controls at the micro scale, as in \cite
{LenYak1} and \cite{HuiYak}, are also possible applications.
{{\mathrm m}athrm e}nd{enumerate}
{{\mathrm m}athrm e}nd{remark}
\noindent We introduce the \textbf{ARE}
\begin{equation}
a^{T}\left( \lambda \right) p+pa\left( \lambda \right) -pb\left(
\lambda \right) s^{-1}\left( \lambda \right) b^{T}\left( \lambda
\right) p+c^{T}\left( \lambda \right) c\left( \lambda \right)
=0\text{.} \label{eq.Riccati.algebrique}
{{\mathrm m}athrm e}nd{equation}
\begin{assumption}[H2]
For all $\lambda \in I_{\sigma }$, the \textbf{ARE} (\ref
{eq.Riccati.algebrique}) admits a unique nonnegative symmetric
solution denoted by $p(\lambda )$.
{{\mathrm m}athrm e}nd{assumption}
\begin{remark}
\textbf{This assumption is stronger than the typical sufficient
condition for the mere existence of a solution to the Riccati
equation [give ref].}
{{\mathrm m}athrm e}nd{remark}
We make the following choices for the inner products of $Z$, $U$ and
$Y$:
\begin{equation*}
\left( z,z^{\prime }\right) _{Z}=\left( \Phi _{Z}^{-1}z,\Phi
_{Z}^{-1}z^{\prime }\right) _{X^{n_{Z}}}, ~ \left( u,u^{\prime
}\right) _{U}=\left( \Phi _{U}^{-1}u,\Phi _{U}^{-1}u^{\prime
}\right) _{X^{n_{U}}} ~ \text{and} ~ \left( y,y^{\prime }\right)
_{Y}=\left( \Phi _{Y}^{-1}y,\Phi _{Y}^{-1}y^{\prime }\right)
_{X^{n_{Y}}}.
{{\mathrm m}athrm e}nd{equation*}
Thus $P$, $Q$ and $p$, $q$ are related as follows.
\begin{theorem}
\label{Th formul P bounded B}If (H1) and (H2) are fulfilled then
\begin{equation*}
P=\Phi _{Z}~p(\Lambda )~\Phi _{Z}^{-1}\quad \text{and}\quad \uu=-Qz
{{\mathrm m}athrm e}nd{equation*}
where the controller $Q$ admits the factorization $Q=\Phi
_{U}q(\Lambda
)\Phi _{Z}^{-1}$ with $q(\Lambda )=s^{-1}(\Lambda )b^{T}(\Lambda )p(\Lambda )
\text{.}$
{{\mathrm m}athrm e}nd{theorem}
Now, we focus on a semi-decentralized approximation of $Q$ which
reduces to provide such an approximation for $q(\Lambda )$. We
restrict the presentation to the case of bounded operators $\Lambda
$ since they have a bounded spectra. This is sufficient for
applications to systems governed by partial differential equations
in bounded domains.
\begin{assumption}[H3]
The operator $\Lambda $ is bounded and its spectrum $\sigma (\Lambda
)$ is bounded, so there exists $R>0$ with $\sigma (\Lambda )\subset
(-R,R).$
{{\mathrm m}athrm e}nd{assumption}
This assumption can be relaxed, see Section \ref{extension}.
\begin{assumption}[H4]
The operators $\Phi _{Z}$, $\Phi _{Z}^{-1},$ $\Lambda $ and $(\xi
I-\Lambda )^{-1}$ admit semi-decentralized approximations for all
$\xi \in {\mathrm m}athbb{C}$ with $|\xi |=R$.
{{\mathrm m}athrm e}nd{assumption}
Now, we introduce two successive approximations $q_{N}(\Lambda )$ and $
q_{N,M}(\Lambda )$ of $q(\Lambda )$ that play a key role in our
method.
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{The rational approximation }$
q_{N}(\Lambda )$\textit{:} Since the interval $I_{\sigma }$ is
bounded, each entries $q_{ij}$ of the matrix $q$ admits a rational
approximation on $I_{\sigma }$. This defines a matrix of rational
approximations of $q(\lambda )$,
\begin{equation}
\displaystyle q_{N}\left( \lambda \right) ={\mathrm f}rac{\sum
\limits_{k=0}^{N^{N}}d_{k}\lambda ^{k}}{\sum\limits_{k^{\prime
}=0}^{N^{D}}d_{k^{\prime }}^{\prime }\lambda ^{k^{\prime }}},
\label{apprxfractional}
{{\mathrm m}athrm e}nd{equation}
to be understood componentwise, so each $d_{k}$, $d_{k^{\prime
}}^{\prime }$ is a matrix and $N=\left( N^{N},N^{D}\right) $ is a
pair of matrices of polynomial degrees. The particular case
$N^{D}=0$ corresponds to a classical polynomial approximation. For
any ${{\mathrm m}athrm e}ta >0$ the degrees of approximations
can be chosen so that the uniform estimate
\begin{equation}
\sup_{\lambda \in I_{\sigma }}\left\vert q\left( \lambda \right)
-q_{N}\left( \lambda \right) \right\vert \leq C_{1}\left( q\right)
{{\mathrm m}athrm e}ta \label{estim1}
{{\mathrm m}athrm e}nd{equation}
holds.
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Approximation
}$q_{N,M}(\Lambda )$ \textit{by quadrature of the Cauchy integral:}
For any complex valued
function $g(\theta )$ continuous on $[0,2\pi ],$ we introduce $I_{M}(g)=
\displaystyle\sum\limits_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll }g(\theta _{{{\mathrm m}athrm e}ll
})$ a quadrature rule for the integral $I\left( g\right)
=\int_{0}^{2\pi }g\left( \theta \right) ~d\theta $, $(\theta _{{{\mathrm m}athrm e}ll
})_{{{\mathrm m}athrm e}ll }$ denoting the nodes of a regular subdivision of $[0,2\pi
]$ and $\omega _{{{\mathrm m}athrm e}ll }$ the associated quadrature weights. The
quadrature rule is assumed to satisfy an error
estimate as
\begin{equation}
\left\vert I\left( g\right) -I_{M}\left( g\right) \right\vert \leq
C_{2}\left( g\right) {{\mathrm m}athrm e}ta \text{.} \label{estim2}
{{\mathrm m}athrm e}nd{equation}
For $z\in X^{n_{Z}}$ and $\xi =\xi _{1}+i\xi _{2}$ a sufficiently
regular complex contour enlacing $\sigma (\Lambda )$ and not
surrounding any pole of $q_{N}.$ We parameterize it by a parameter
varying in $[0,2\pi ]$. We further introduce the solution
$(v_{i})_{i=1,2}$ of the system
\begin{equation}
\left\{
\begin{array}{c}
\xi _{1}v_{1}-\xi _{2}v_{2}-\Lambda v_{1}=\Re e\left( -i\xi ^{\prime
}q_{N}\left( \xi \right) \right) z, \\
\xi _{2}v_{1}+\xi _{1}v_{2}-\Lambda v_{2}=\Im m\left( -i\xi ^{\prime
}q_{N}\left( \xi \right) \right) z,
{{\mathrm m}athrm e}nd{array}
\right. \label{transfo}
{{\mathrm m}athrm e}nd{equation}
and the second approximation of $q(\Lambda )$ through its realizations
\begin{equation}
q_{N,M}(\Lambda )z={\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll
}v_{1}^{{{\mathrm m}athrm e}ll }\text{.} \label{pNM}
{{\mathrm m}athrm e}nd{equation}
We notice that two approximations $p_{N}$ and $p_{N,M}$ of the
function $p$ can be constructed by following the same steps. The
next theorem states the approximations of the operators $P$ and $Q.$
\begin{theorem}
\label{Th approx P bounded B}Under the assumptions (H1-H4), $P$ and
$Q$ can be approximated by one of the two semi-decentralized
approximations
\begin{eqnarray*}
P_{N}=\Phi _{Z}p_{N}\left( \Lambda \right) \Phi _{Z}^{-1} &\text{
and }& \quad Q_{N}=\Phi _{U}q_{N}\left( \Lambda \right) \Phi
_{Z}^{-1} \\
\text{or } P_{N,M}=\Phi _{Z}p_{N,M}\left( \Lambda \right) \Phi
_{Z}^{-1} & \text{ and } & \quad Q_{N,M} =\Phi _{U}q_{N,M}\left(
\Lambda \right) \Phi _{Z}^{-1}\text{.}
{{\mathrm m}athrm e}nd{eqnarray*}
Moreover, for any ${{\mathrm m}athrm e}ta >0,$ there exist $N$ and $M$ such that
\begin{eqnarray*}
\left\| P-P_{N}\right\| _{{\mathrm m}athcal{L}\left( Z\right) } &\leq
&C_{3}{{\mathrm m}athrm e}ta \text{, } \left\| Q-Q_{N}\right\| _{{\mathrm m}athcal{L}\left(
Z,U\right) }\leq
C_{3}^{\prime }{{\mathrm m}athrm e}ta \\
\text{and }\left\| P-P_{N,M}\right\| _{{\mathrm m}athcal{L}\left( Z\right) }
&\leq &C_{4}{{\mathrm m}athrm e}ta \text{, }\left\| Q-Q_{N,M}\right\|
_{{\mathrm m}athcal{L}\left( Z,U\right) }\leq C_{4}^{\prime }{{\mathrm m}athrm e}ta ,
{{\mathrm m}athrm e}nd{eqnarray*}
$C_{3}$, $C_{3}^{\prime }$ and $C_{4},$ $C_{4}^{\prime }$ being
independent of ${{\mathrm m}athrm e}ta ,$ $N$ and $M$.
{{\mathrm m}athrm e}nd{theorem}
\begin{remark}
In the case of a polynomial approximation, i.e. $N^{D}=0$, we can
set a circle as contour $\xi (\theta )=Re^{i\theta }$. For actual
rational approximations, the contour must leave the poles outside,
so we choose an
ellipse centered at ${\mathrm f}rac{R_{1}}{2}$ parameterized by $\xi (\theta )={\mathrm f}rac{R_{1}}{2}
\left( 1+\cos \left( \theta \right) \right) +iR_{2}\sin \left(
\theta
\right) $ where $R_{1}$ and $R_{2}$ are for the major and minor radii and $
R_{2}$ is small enough.
{{\mathrm m}athrm e}nd{remark}
\begin{remark}
\label{Rk KadLen}The approximation of $p$ used in \cite{KadLenMr} is
based on Taylor series, so it is applicable only if the interval
$I_{\sigma }$ is sufficiently small. The approximation proposed in
our paper does not suffer from this drawback.
{{\mathrm m}athrm e}nd{remark}
\begin{remark}
\label{Rk BamPag}In case where the solution $P$ of a Riccati
equation is a kernel operator (see \cite{Lio} for optimal control of
systems governed by
partial differential equations) i.e. $Pz(x)=\int_{\Omega }\overline{p}
(x,x^{\prime })z(x^{\prime })dx^{\prime }$ and if $\Lambda $ is a
compact
operator then the kernel may be decomposed on a basis of eigenvectors of $
\Lambda $,
\begin{equation*}
\overline{p}(x,x^{\prime })=\sum\limits_{k=1}^{\infty }p(\lambda
_{k})\phi _{k}(x)\phi _{k}(x^{\prime }).
{{\mathrm m}athrm e}nd{equation*}
The truncation technique used in \cite{BamPag} can be applied to
build a
semi-decentralized approximation of $P$. However, when the decay of $
\overline{p}$ is not very fast, this technique is not efficient, see
for example the case $p(\lambda )=\lambda $ that may yield from a
LQR problem.
{{\mathrm m}athrm e}nd{remark}
For concrete real-time computations one can use either of the two formulae (
\ref{apprxfractional}) or (\ref{pNM}) given that both are
semi-decentralized, but we prefer the second since it does not make
use of powers of $\lambda .$ The reason will become clearer when
discretizing. In a real-time computation, the realization
$q_{N,M}(\Lambda )z$ requires solving $M$ systems (\ref{transfo})
corresponding to $M$ complex values $\xi (\theta _{{{\mathrm m}athrm e}ll })$.\ So the
parameter $M$ is essential to evaluate the cost of our algorithm.
The matrix $q_{N}$ is pre-computed off-line once and for all and we
choose $N$ sufficiently large that $q_{N}$ is a very good
approximation of $q$. Consequently, $M$ is the only parameter that
influences the accuracy of the method, except the parameter space
discretization that is discussed now.
{\mathrm m}edskip
The end of the section is devoted to spatial discretization. For the
sake of simplicity, the interval is meshed with regularly spaced
nodes separated by a distance $h$.
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Spatial discretization with
polynomial approximation:} First, we introduce $\Lambda _{h}^{k}$
the finite
differences discretizations of $\Lambda ^{k}$, with $k=1\cdots N$. For $
N^{D}=0$, the discretization $q_{N,h}$ of $q_{N}$ in
(\ref{apprxfractional}) can be written as
\begin{equation*}
q_{N,h}z_{h}=\sum_{k=0}^{N}d_{k}\Lambda _{h}^{k}z_{h},
{{\mathrm m}athrm e}nd{equation*}
where $z_{h}$ is the vector of nodal values of $z$. Their
discretization yields very high errors because the powers of
$\Lambda $. This can be avoided by using the Cauchy formula, i.e.
the equation (\ref{transfo}).
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Spatial discretization with
Cauchy formula approximation:} For each quadrature point $\xi :=\xi
_{1,{{\mathrm m}athrm e}ll }+i\xi _{2,{{\mathrm m}athrm e}ll }$, the discrete approximation $\left(
v_{1,h}^{{{\mathrm m}athrm e}ll },v_{2,h}^{{{\mathrm m}athrm e}ll }\right) $ of $\left( v_{1}^{{{\mathrm m}athrm e}ll
},v_{2}^{{{\mathrm m}athrm e}ll }\right) $ is the solution of the discrete set of
equations
\begin{equation}
\left\{
\begin{array}{c}
\xi _{1,{{\mathrm m}athrm e}ll}v_{1,h}^{{{\mathrm m}athrm e}ll }-\xi _{2}v_{2,h}^{{{\mathrm m}athrm e}ll }-\Lambda
_{h}v_{1,h}=\Re
e\left( -i\xi_{{{\mathrm m}athrm e}ll} q_{N}\left( \xi_{{{\mathrm m}athrm e}ll,{{\mathrm m}athrm e}ll} \right) \right) z_{h}, \\
\xi _{2,{{\mathrm m}athrm e}ll}v_{1,h}^{{{\mathrm m}athrm e}ll }+\xi _{1,{{\mathrm m}athrm e}ll}v_{2,h}^{{{\mathrm m}athrm e}ll }-\Lambda
_{h}v_{2,h}=\Im
m\left( -i\xi_{{{\mathrm m}athrm e}ll} q_{N}\left( \xi_{{{\mathrm m}athrm e}ll} \right) \right) z_{h}\text{.}
{{\mathrm m}athrm e}nd{array}
\right. \label{Pbdiscspatiale}
{{\mathrm m}athrm e}nd{equation}
Thus we deduce the discretization $q_{N,M,h}$ of the approximation
$q_{N,M}$ in (\ref{pNM}),
\begin{equation}
q_{N,M,h}z_{h}={\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll
}v_{1,h}^{{{\mathrm m}athrm e}ll }\text{.} \label{discspatiale}
{{\mathrm m}athrm e}nd{equation}
Under the Assumption (H4), we introduce $\Phi _{U,h}$ and $\Phi
_{Z,h}$ the semi-decentralized approximations of $\Phi _{U}$ and
$\Phi _{Z}$. So, the approximations of $\uu_{N}$ and $\uu_{N,M}$ by
a spatial discretization are
\begin{equation}
\uu_{N,h}=-\Phi _{U,h}q_{N,h}\Phi _{Z,h}^{-1}z_{h}\quad \text{and}\quad \uu
_{N,M,h}=-\Phi _{U,h}q_{N,M,h}\Phi _{Z,h}^{-1}z_{h}.
\label{discspatialeu}
{{\mathrm m}athrm e}nd{equation}
This constitutes two different final semi-decentralized approximations of $
\uu$.
\begin{remark}
\label{remarque10} The approximations $\uu_{N,h}$ and $\uu_{N,M,h}$
are given in the general case where the isomorphisms $\Phi _{Z}$ and
$\Phi _{U}$ are not function of $\Lambda $ only. Therefore,
\textbf{we use our approximation technique to represent} $q(\Lambda
)$. In some cases $\Phi _{Z}$ and $\Phi _{U}$ are function of
$\Lambda $ and then $Q$ is also and the approximation is developed
directly on it that we denote by $k(\Lambda )$,
\begin{equation}
\uu_{N,h}=-k_{N,h}(\Lambda )z_{h}\quad \text{and}\quad \uu
_{N,M,h}=-k_{N,M,h}(\Lambda )z_{h}\text{.} \label{discspatialeu1}
{{\mathrm m}athrm e}nd{equation}
In the case where $\Phi _{Z}$ and $\Phi _{U}^{-1}Q$ are functions of $
\Lambda $ then the approximation is developed on $\Phi _{U}^{-1}Q$,
we will
also denote it by $k(\Lambda )$ without risk of confusion,
\begin{equation*}
\uu_{N,h}=-\Phi _{U,h}k_{N,h}(\Lambda )z_{h}\quad \text{and}\quad \uu
_{N,M,h}=-\Phi _{U,h}k_{N,M,h}(\Lambda )z_{h}.
{{\mathrm m}athrm e}nd{equation*}
{{\mathrm m}athrm e}nd{remark}
\section{Unbounded Control Operators \label{Unbounded control operators}}
When the input operator $B$ is unbounded from $U$ to $Z$ and the
observation
operator $C$ is bounded from $Z$ to $Y$, we use the framework of \cite
{BanIto} where $V$ is another Hilbert space, $V^{\prime }$ is its
dual space
with respect to the pivot space $Z,$ $A\in {\mathrm m}athcal{L}(V,V^{\prime }),$ $
B\in {\mathrm m}athcal{L}(U,V^{\prime })$ and $C\in {\mathrm m}athcal{L}(Z,Y)$. A
number of other technical assumptions are not detailed here. The
state equations are written in the sense of $V^{\prime }$ with
$z_{0}\in Z.$ The optimal control is $\uu=-B^{\ast }Pz$ where $P$ is
the unique nonnegative solution of the Riccati operatorial equation
\begin{equation}
\left( A^{\ast }P+PA-PBB^{\ast }P+C^{\ast }C\right) v=0,
\label{eq.Riccatinonborne}
{{\mathrm m}athrm e}nd{equation}
for all $v\in V.$ The adjoint $A^{\ast }\in {\mathrm m}athcal{L}(V,V^{\prime
})$ is defined by $\left\langle A^{\ast }v,v^{\prime }\right\rangle
_{V^{\prime
},V}=\left\langle v,Av^{\prime }\right\rangle _{V,V^{\prime }}$ when $
B^{\ast }\in {\mathrm m}athcal{L}(V^{\prime },U)$ is defined as the adjoint
of a bounded operator. We keep the same inner products for $Z,$ $U$
and $Y$, and those of $V$ and $V^{\prime }$ are
\begin{equation*}
\left( v,v^{\prime }\right) _{V}=\left( \Phi _{V}^{-1}v,\Phi
_{V}^{-1}v^{\prime }\right) _{X^{n_{Z}}}\quad \text{and}\quad \left(
v,v^{\prime }\right) _{V^{\prime }}=\left( \Phi _{V^{\prime
}}^{-1}v,\Phi _{V^{\prime }}^{-1}v^{\prime }\right) _{X^{n_{Z}}}.
{{\mathrm m}athrm e}nd{equation*}
Moreover, we choose $J=\Phi _{V}\Phi _{V^{\prime }}^{-1}$ as the
canonical isomorphism from $V^{\prime }$ to $V$ and the duality
product between $V$ and $V^{\prime }$ is
\begin{equation*}
\left\langle v,v^{\prime }\right\rangle _{V,V^{\prime
}}=(v,Jv^{\prime })_{V}.
{{\mathrm m}athrm e}nd{equation*}
\begin{assumption}[H1']
Same statement as (H1) excepted that
\begin{equation*}
A=\Phi _{V^{\prime }}a(\Lambda )\Phi _{V}^{-1}\quad \text{and}\quad
B=\Phi _{V^{\prime }}b(\Lambda )\Phi _{U}^{-1}
{{\mathrm m}athrm e}nd{equation*}
where $\Phi _{V}\in {\mathrm m}athcal{L}(X^{n_{Z}},V)$ and $\Phi _{V^{\prime
}}\in {\mathrm m}athcal{L}(X^{n_{Z}},V^{\prime })$ are two additional
isomorphisms. Moreover,
\begin{equation*}
\Phi _{V}=\phi _{V}(\Lambda )\text{, }\quad \Phi _{Z}=\phi
_{Z}(\Lambda )\quad \text{and}\quad \Phi _{V^{\prime }}=\phi
_{V^{\prime }}(\Lambda )
{{\mathrm m}athrm e}nd{equation*}
are some functions of $\Lambda $.
{{\mathrm m}athrm e}nd{assumption}
Here, the \textbf{ARE} is
\begin{gather}
\phi _{V^{\prime }}\left( \lambda \right) a^{T}\left( \lambda
\right) ~ p ~ \phi _{V^{\prime }}^{-1}\left( \lambda \right) \phi
_{V}\left( \lambda \right) +\phi _{V}\left( \lambda \right) ~ p ~
a\left( \lambda \right)
\notag \\
-\phi _{V}\left( \lambda \right) ~ p ~ b\left( \lambda \right)
s^{-1}\left( \lambda \right) b^{T}\left( \lambda \right) ~ p ~ \phi
_{V^{\prime }}^{-1}\left( \lambda \right) \phi _{V}\left( \lambda
\right)
\label{eq.Riccati.algebriquenonborne} \\
+\phi _{Z}\left( \lambda \right) c^{T}\left( \lambda \right) c\left(
\lambda \right) \phi _{Z}^{-1}\left( \lambda \right) \phi _{V}\left(
\lambda \right) =0\text{.} \notag
{{\mathrm m}athrm e}nd{gather}
\begin{assumption}[H2']
For all $\lambda \in I_{\sigma }$, the \textbf{ARE} (\ref
{eq.Riccati.algebriquenonborne}) admits a unique nonnegative
solution denoted by $p(\lambda )$.
{{\mathrm m}athrm e}nd{assumption}
\begin{theorem}
\label{Th formul P unbounded B}If (H1',H2') are fulfilled, then
\begin{equation*}
P=\Phi _{V}~p(\Lambda )~\Phi _{V^{\prime }}^{-1}\quad
\text{and}\quad \uu=-Qz
{{\mathrm m}athrm e}nd{equation*}
where $Q$ admits the factorization $Q=\Phi _{U}q(\Lambda )$ with
$q(\Lambda )=b^{T}(\Lambda )\phi _{V^{\prime }}^{-1}\phi
_{V}p(\Lambda )\phi _{V^{\prime }}^{-1}.$
{{\mathrm m}athrm e}nd{theorem}
The following assumptions are necessary for the semi-decentralized
approximation of $P$.
\begin{assumption}[H4']
Same statement that (H4) completed by $\Phi _{V}$, $\Phi _{V^{\prime
}} $ and $\Phi _{V^{\prime }}^{-1}$ admit a semi-decentralized
approximation.
{{\mathrm m}athrm e}nd{assumption}
In the next statement, the approximations $q_{N}$ and $q_{N,M}$ of
$q$ are built according to the formulae (\ref{apprxfractional}) and
(\ref{pNM}).
\begin{theorem}
\label{Th Approx P unbounded B}Under the Assumptions
(H1',H2',H3,H4'), $P$ and $Q$ can be approximated by one of the two
semi-decentralized approximations
\begin{eqnarray*}
P_{N}=\Phi _{V}p_{N}(\Lambda )\Phi _{V^{\prime }}^{-1} &
\text{and } & Q_{N}=\Phi _{U}q_{N}(\Lambda ), \\
\text{or } P_{N,M}=\Phi _{V}p_{N,M}(\Lambda )\Phi _{V^{\prime
}}^{-1} & \text{and } & Q_{N,M}=\Phi _{U}q_{N,M}(\Lambda ).
{{\mathrm m}athrm e}nd{eqnarray*}
Moreover, for any ${{\mathrm m}athrm e}ta >0,$ there exist $N$ and $M$ such that
\begin{eqnarray*}
\left\|P-P_{N}\right\|_{{\mathrm m}athcal{L}(V^{\prime },V)} &\leq& C_{3}{{\mathrm m}athrm e}ta
, \quad \left\|Q-Q_{N}\right\|_{{\mathrm m}athcal{L}(V^{\prime },U)} \leq
C^{\prime}_{3}{{\mathrm m}athrm e}ta , \\
\text{and } \left\|P-P_{N,M}\right\|_{{\mathrm m}athcal{L}(V^{\prime
},V)}&\leq& C_{4}{{\mathrm m}athrm e}ta , \quad
\left\|Q-Q_{N,M}\right\|_{{\mathrm m}athcal{L}(V^{\prime },U)} \leq
C^{\prime}_{4}{{\mathrm m}athrm e}ta ,
{{\mathrm m}athrm e}nd{eqnarray*}
$C_{3}$, $C^{\prime}_{3}$, $C_{4}$ and $C^{\prime}_{4}$ being
independent of ${{\mathrm m}athrm e}ta ,$ $N$ and $M$.
{{\mathrm m}athrm e}nd{theorem}
\begin{remark}
An example of unbounded control operators is given in the Subsection \ref
{exemple 2}.
{{\mathrm m}athrm e}nd{remark}
The approximations of $\uu$ and $\uu_{h}$ are constructed using the
same method as in the case of bounded control operators.
\section{Extensions\label{Extensions}}
\label{extension} In this section, we mention possible extensions of
the theoretical framework presented above.
The same strategy applies directly to dynamic estimators and
compensators derived by the $H_{2}$ to the $H_{\infty }$ theories.
For instance, the condition $\rho \left( P\overline{P}\right)
<\gamma $ on the spectral radius of the product of the solution of
the two Riccati equation can be expressed under the form of a
condition on the spectral radius of the product of two parameterized
matrices $\rho \left( p\left( \lambda \right) \overline{p}\left(
\lambda
\right) \right) <\gamma $ for all $\lambda \in I_{\sigma }$, see Lemma \ref
{funct calc generalized} (6).
{\mathrm m}edskip
The spectral theory of self-adjoint operators has been chosen for
its relative simplicity. We are aware of its limitation, so we
mention possible extensions based on more general functional calculi
like these developped in \cite{MarSan} or \cite{Haa} to cite only
two.
{\mathrm m}edskip
Other frameworks for the well-posedness of the LQR problem can be
used. In particular, this of \cite{Lasiecka} for optimal control
with unbounded observation and control may be incorporated in this
approach.
\section{Proofs\label{Proofs}}
First, we remark that for $E$ and $F$ two Hilbert spaces and $\Phi $
an
isomorphism from $E$ to $F,$ if $F$ is equipped with the inner product $
(z,z^{\prime })_{F}=(\Phi ^{-1}z,\Phi ^{-1}z^{\prime })_{E}$ then
$\Phi ^{\ast }=\Phi ^{-1}$. In the next lemma, we state few
functional calculus properties.
\begin{lemma}
\label{funct calc}For $\Lambda $ a self-adjoint operator on a
separable Hilbert space $X$, and for $f$, $g$ two functions
continuous on $I_{\sigma }$
\begin{enumerate}
\item $f(\Lambda )$ is self-adjoint;
\item for ${\mathrm m}u \in {\mathrm m}athbb{R}$, $({\mathrm m}u f)(\Lambda )={\mathrm m}u f(\Lambda )$ on $
D(f(\Lambda ))$;
\item $(f+g)(\Lambda )=f(\Lambda )+g(\Lambda )$ on $D(f(\Lambda ))\cap
D(g(\Lambda ))$;
\item $g(\Lambda )f(\Lambda )=(g$ $f)(\Lambda )$ when the range of $
f(\Lambda )$ is included in $D(g(\Lambda ))$;
\item if $f\neq 0$ in $I_{\sigma }$ then $f(\Lambda )^{-1}$ exists and is
equal to ${\mathrm f}rac{1}{f}(\Lambda )$;
\item if $f(\lambda )\geq 0$ for all $\lambda \in I_{\sigma }$ then $
f(\Lambda )\geq 0$;
\item $||f(\Lambda )x||_{X}^{2}$ $\leq $ $\sup\limits_{\lambda \in I_{\sigma
}}|f(\lambda )|^{2}||x||_{X}^{2}$ for all $x\in D(f(\Lambda ))$.
{{\mathrm m}athrm e}nd{enumerate}
{{\mathrm m}athrm e}nd{lemma}
\begin{proof}
The proofs of the first five statements can be found in
\cite{DauLio}. We
prove ({{\mathrm m}athrm e}mph{6}) i.e. that $\displaystyle\sum\limits_{i,j=1}^{n}(f_{ij}(
\Lambda )z_{j},z_{i})_{X}\geq 0$. First, assume that $I_{\sigma }$
is bounded. We recall that for a function $g$ continuous on
$I_{\sigma }$ and for $z\in X,$ the integral $\int_{\sigma _{{\mathrm m}in
}}^{\sigma _{{\mathrm m}ax }}g(\lambda )dE_{\lambda }z$ is defined as the
strong limit in $X$ of the
Riemann sums, see \cite{DauLio}, $\displaystyle\sum\limits_{k=1}^{p}g(
\lambda _{k}^{\prime })(E_{\lambda _{k+1}}-E_{\lambda _{k}})z$ when $
{\mathrm m}ax\limits_{k}|\lambda _{k+1}-\lambda _{k}|$ vanishes, where
$\lambda _{k}^{\prime }\in \lbrack \lambda _{k},\lambda _{k+1}]$ and
$\sigma _{{\mathrm m}in
}=\lambda _{1}<\lambda _{2}...<\lambda _{p}=\sigma _{{\mathrm m}ax }$. When $
I_{\sigma }$ is not bounded, we use a subdivision of a bounded interval $I_{
\widetilde{\sigma }}=(\widetilde{\sigma }_{{\mathrm m}in },\widetilde{\sigma
}_{{\mathrm m}ax })$ and the integral $\int_{\sigma _{{\mathrm m}in }}^{\sigma _{{\mathrm m}ax
}}g(\lambda )dE_{\lambda }z$ is defined by passing to the limit in
the integral bounds.
Let us establish that the Riemann sum $\displaystyle\sum\limits_{i,j=1}^{n}
\sum\limits_{k=1}^{p}f_{ij}(\lambda _{k}^{\prime })((E_{\lambda
_{k+1}}z_{j},z_{i})-(E_{\lambda _{k}}z_{j},z_{i}))$ is nonnegative,
so the result will follow by passing to the limit. Since
$(E_{\lambda _{k+1}}z_{j},z_{i})-(E_{\lambda
_{k}}z_{j},z_{i})=((E_{\lambda
_{k+1}}-E_{\lambda _{k}})z_{j},z_{i})=(y_{j}^{k},y_{i}^{k})$ where $
y_{j}^{k}=(E_{\lambda _{k+1}}-E_{\lambda _{k}})z_{j},$ then the
Riemann sum
is the sum over $k$ of the nonnegative terms $\displaystyle
\sum\limits_{i,j=1}^{n}f_{ij}(\lambda _{k}^{\prime
})(y_{j}^{k},y_{i}^{k})$ which in turn is nonnegative.
Now we prove ({{\mathrm m}athrm e}mph{7}):
\begin{eqnarray*}
||f(\Lambda ))x||_{X}^{2} =\int_{\sigma _{{\mathrm m}in }}^{\sigma _{{\mathrm m}ax
}}|f(\lambda )|^{2}\text{ }d||E_{\lambda }x||_{X}^{2} &\leq
&\sup_{\lambda \in I_{\sigma }}|f(\lambda )|^{2}\int_{\sigma _{{\mathrm m}in
}}^{\sigma _{{\mathrm m}ax
}}d||E_{\lambda }x||_{X}^{2} \\
&\leq &\sup_{\lambda \in I_{\sigma }}|f(\lambda )|^{2}||x||_{X}^{2}.
{{\mathrm m}athrm e}nd{eqnarray*}
{{\mathrm m}athrm e}nd{proof}
For two integers $n_{E}$, $n_{F}$, a $n_{E}\times n_{F}$ matrix $f$
of functions continuous on $I_{\sigma }$ and two Hilbert spaces $E$,
$F$ isomorphic with $X^{n_{E}}$ and $X^{n_{F}}$ by $\Phi _{E}^{-1}$
and $\Phi _{F}^{-1}$ respectively, we introduce the so-called
generalized matrix of functions of $\Lambda $: $f^{\phi }(\Lambda
)=\Phi _{E}f(\Lambda )\Phi _{F}^{-1}\in {\mathrm m}athcal{L}(F,E)$ with
domain $D(f^{\phi }(\Lambda ))=\Phi _{F}D(f(\Lambda ))$. For the
sake of shortness, the spaces $E$ and $F$ do not appear explicitly
in the notation $f^{\phi }$, so they will be associated to each
matrix at the beginning of their use. Then, no confusion will be
possible. In the next lemma, we state some properties of generalized
matrices of functions.
\begin{lemma}
\label{funct calc generalized}For any generalized matrices of functions of $
\Lambda ,$ $f^{\phi }(\Lambda )=\Phi _{E}f(\Lambda )\Phi _{F}^{-1}$ and $
g^{\phi }(\Lambda )=\Phi _{E}g(\Lambda )\Phi _{F}^{-1}$, and any
real number ${\mathrm m}u $,
\begin{enumerate}
\item $(f^{\phi }(\Lambda ))^{\ast }=(f^{T})^{\phi }(\Lambda )$;
\item ${\mathrm m}u f^{\phi }(\Lambda )=({\mathrm m}u f)^{\phi }(\Lambda )$ on $D(f^{\phi
}(\Lambda ))$;
\item $f^{\phi }(\Lambda )+g^{\phi }(\Lambda )=(f+g)^{\phi }(\Lambda )$ on $
D(f^{\phi }(\Lambda ))\cap D(g^{\phi }(\Lambda ))$;
\item for another Hilbert space $G$ and $g^{\phi }(\Lambda )=\Phi
_{F}g(\Lambda )\Phi _{G}^{-1}$, $f^{\phi }(\Lambda )g^{\phi
}(\Lambda )=(fg)^{\phi }(\Lambda )=\Phi _{E}(fg)(\Lambda )\Phi
_{G}^{-1}$ when the range $R(f^{\phi }(\Lambda ))\subset D(g^{\phi
}(\Lambda ))$;
\item when $F=E,$ if $f(\lambda )\geq 0$ for all $\lambda \in I_{\sigma }$
then $f^{\phi }(\Lambda )\geq 0$;
\item $\sigma (f^{\phi }(\Lambda ))=\sigma (f)$.
{{\mathrm m}athrm e}nd{enumerate}
{{\mathrm m}athrm e}nd{lemma}
\begin{proof}
The properties ({{\mathrm m}athrm e}mph{1-4}) are direct consequences of Lemma \ref{funct calc}
. For the derivation of ({{\mathrm m}athrm e}mph{5}) we remark that for $z\in
D(f^{\phi}(\Lambda ))\subset E\text{, }(f^{\phi }(\Lambda
)z,z)_{E}=(f(\Lambda )\Phi _{E}^{-1}z,\Phi _{E}^{-1}z)_{X^{n_{E}}}$
which is nonnegative if $f(\Lambda ) $ is nonnegative. The
conclusion uses Lemma \ref{funct calc} ({{\mathrm m}athrm e}mph{5}). Finally, the
derivation of ({{\mathrm m}athrm e}mph{6}) is a direct consequence of the definition
of the spectrum of an operator.
{{\mathrm m}athrm e}nd{proof}
\begin{proof}[Proof of Theorem \protect\ref{Th formul P bounded B}]
From Lemma \ref{funct calc generalized} ({{\mathrm m}athrm e}mph{1}) and ({{\mathrm m}athrm e}mph{4}),
\begin{equation*}
A^{\ast }=\Phi _{Z}a^{T}(\Lambda )\Phi _{Z}^{-1},\quad BB^{\ast
}=\Phi _{Z}b(\Lambda )b^{\ast }(\Lambda )\Phi _{Z}^{-1}\quad
\text{and} \quad C^{\ast }C=\Phi _{Z}c^{\ast }(\Lambda )c(\Lambda
)\Phi _{Z}^{-1}
{{\mathrm m}athrm e}nd{equation*}
are some generalized matrices of functions of $\Lambda $ on $Z$. We
write
\begin{equation*}
e(\lambda )=a^{T}(\lambda )~p(\lambda )+p(\lambda )~a(\lambda
)-p(\lambda )~b(\lambda )b^{T}(\lambda )~p(\lambda )+c^{T}(\lambda
)c(\lambda ),
{{\mathrm m}athrm e}nd{equation*}
so by construction $e(\lambda )=0$ and $e(\Lambda )=0.$ Multiplying
the last equality by $\Phi _{Z}$ to the left and by $\Phi _{Z}^{-1}$
to the right, using Lemma \ref{funct calc generalized} ({{\mathrm m}athrm e}mph{3})
and ({{\mathrm m}athrm e}mph{4}), and
posing $\widetilde{P}=\Phi _{Z}~p(\Lambda )~\Phi _{Z}^{-1}$ we find that $
\widetilde{P}$ satisfies the Riccati equation (\ref{eq.Riccati.opercontinuz}
). Next, the nonnegativity and symmetry of $p$ with Lemma \ref{funct
calc generalized} ({{\mathrm m}athrm e}mph{1}) and ({{\mathrm m}athrm e}mph{5}) yield the nonnegativity
and
self-adjointness of $\widetilde{P}$. Finally, we conclude that $P=\widetilde{
P}$ thanks to uniqueness of the solution, so $\uu=-Qz$ where
$Q=S^{-1}B^{\ast }\widetilde{P}=\Phi _{U}s^{-1}(\Lambda
)b^{T}(\Lambda )p(\Lambda )\Phi _{Z}^{-1}$.
{{\mathrm m}athrm e}nd{proof}
\begin{proof}[Proof of Theorem \protect\ref{Th approx P bounded B}]
The estimate $||q(\Lambda )-q_{N}(\Lambda )||_{{\mathrm m}athcal{L}\left(
X^{n_{Z}},X^{n_{U}}\right) }$ results from (\ref{estim1}) and Lemma \ref
{funct calc} ({{\mathrm m}athrm e}mph{7}). In the following, we derive the estimate
\begin{equation*}
||q_{N}(\Lambda )-q_{N,M}(\Lambda )||_{{\mathrm m}athcal{L}\left(
X^{n_{Z}},X^{n_{U}}\right) }\leq C_{5}{{\mathrm m}athrm e}ta .
{{\mathrm m}athrm e}nd{equation*}
Since $q_{N}$ is holomorphic in ${\mathrm m}athbb{C}$ and $\Lambda $ is a
bounded operator on $X$ with a spectrum included in $(-R,R)$,
$p_{N}(\Lambda )$ may be represented by the Cauchy formula, see
\cite{Yos},
\begin{equation*}
q_{N}(\Lambda )={\mathrm f}rac{1}{2i\pi }\int_{{\mathrm m}athcal{C}(R)}q_{N}(\xi )(\xi
I-\Lambda )^{-1}d\xi
{{\mathrm m}athrm e}nd{equation*}
where ${\mathrm m}athcal{C}(R)\subset {\mathrm m}athbb{C}$, provided that all its
poles are out of the contour ${\mathrm m}athcal{C}(R)$. By choosing $\xi $,
function of $\theta $, with $\theta \in (0,2\pi )$ as a
parametrization of ${\mathrm m}athcal{C}(R)$, we find
\begin{equation*}
q_{N}(\Lambda )={\mathrm f}rac{1}{2\pi }\int_{0}^{2\pi }-i\xi ^{\prime
}q_{N}(\xi )(\xi I-\Lambda )^{-1}d\theta .
{{\mathrm m}athrm e}nd{equation*}
Then, we use the quadrature formula to approximate $q_{N}(\lambda )$
by
\begin{equation*}
q_{N,M}(\lambda )={\mathrm f}rac{1}{2\pi }I_{M}(-i\xi ^{\prime }q_{N}(\xi
)(\xi -\lambda )^{-1}).
{{\mathrm m}athrm e}nd{equation*}
Combining the estimate (\ref{estim2}) and Lemma \ref{funct calc} (5)
yields the wanted estimate. The triangular inequality yields
\begin{eqnarray*}
\left{\mathrm v}ert q(\Lambda )-q_{N,M}(\Lambda )\right{\mathrm v}ert
_{{\mathrm m}athcal{L}\left( X^{n_{Z}},X^{n_{U}}\right) } &\leq &\left{\mathrm v}ert
q(\Lambda )-q_{N}(\Lambda
)\right{\mathrm v}ert _{{\mathrm m}athcal{L}\left( X^{n_{Z}},X^{n_{U}}\right) } +\left{\mathrm v}ert q_{N}(\Lambda )-q_{N,M}(\Lambda )\right{\mathrm v}ert _{{\mathrm m}athcal{L}
\left( X^{n_{Z}},X^{n_{U}}\right) } \\
&\leq &(C_{3}+C_{5}){{\mathrm m}athrm e}ta = C_{4}{{\mathrm m}athrm e}ta
{{\mathrm m}athrm e}nd{eqnarray*}
with $C_{4}=C_{3}+C_{5}$. Consequently, the expression (\ref{pNM}) of $
q_{N,M}(\Lambda )z$ is obtained by posing $v^{{{\mathrm m}athrm e}ll }=-i\xi _{{{\mathrm m}athrm e}ll
}^{\prime }q_{N}(\xi _{{{\mathrm m}athrm e}ll })(\xi _{{{\mathrm m}athrm e}ll }-\Lambda )^{-1}z$.
{{\mathrm m}athrm e}nd{proof}
\begin{remark}
\label{Rq ellipse}The implementation of the Cauchy integral formula
requires that the function is holomorphic inside the contour. In the
case of an unknown function like the function $q$, it is generally
difficult to determine its domain of holomorphy, so it is easier to
use a rational approximation $q_{N}$ whose poles are under control.
{{\mathrm m}athrm e}nd{remark}
\begin{proof}[Proof of Theorem \protect\ref{Th formul P unbounded B}]
The derivation of the expression $A^{\ast }=J^{-1}\Phi _{V}a^{\ast
}(\Lambda
)\Phi _{V^{\prime }}^{-1}J^{-1}$ is straightforward provided that $
\left\langle u,v\right\rangle _{V^{\prime
},V}=(Ju,v)_{V}=(u,J^{-1}v)_{V^{\prime }}$. Since $J=\Phi _{V}\Phi
_{V^{\prime }}^{-1}$ this expression is simplified as $A^{\ast
}=\Phi _{V^{\prime }}a^{\ast }\Phi _{V}^{-1}$. Then,
(\ref{eq.Riccatinonborne}) is equivalent to
\begin{gather*}
\left[ \phi _{V^{\prime }}(\Lambda )a^{\ast }(\Lambda )P\phi
_{V^{\prime }}^{-1}(\Lambda )\phi _{V}(\Lambda )+\phi _{V}(\Lambda
)Pa(\Lambda )\right.
\\
-\phi _{V}(\Lambda )Pb(\Lambda )b^{\ast }(\Lambda )P\phi _{V^{\prime
}}^{-1}(\Lambda )\phi _{V}(\Lambda ) \\
+\left. \phi _{Z}c^{\ast }(\Lambda )c(\Lambda )\phi _{Z}^{-1}\phi
_{V}\right] x=0.
{{\mathrm m}athrm e}nd{gather*}
Finally, the complete proof follows the same steps as in Theorem
\ref{Th formul P bounded B}.
{{\mathrm m}athrm e}nd{proof}
The proof of Theorem \ref{Th Approx P unbounded B} is similar to the
one of Theorem \ref{Th approx P bounded B}.
\section{Applications and Numerical Results\label{Applications}}
\label{Application}
We present four applications to illustrate different aspects of the
theory. In Examples 1, 3 and 4, the input operator $B$ is bounded
when in Example 2 it is not. Then, we consider cases where the
operators $B$ and $C$ are functions of $\Lambda $ (Examples 1, 2 and
4), and a case where it is not (Example 3). Most examples are
devoted to internal control, nevertheless through the example of
Subsection \ref{exemple 4} it is shown how to tackle a boundary
control problem. In almost all cases, efficient algorithms are
described. The presentation of the examples 1, 3 and 4 follows the
same plan with three sub-Sections. The first one includes the state
equation, the functional to be minimized and some semi-decentralized
controls resulting of our approach. Their derivation is detailed in
the second sub-Section. As for the third, it discusses numerical
results.
The functional analysis is carried out in Sobolev spaces defined for
any
integer $k\in {\mathrm m}athbb{N}^{\ast }$ and any domain $\Omega \subset {\mathrm m}athbb{R}
^{d}$ by
\begin{eqnarray*}
H^{k}(\Omega ) &=&\{v\in L^{2}(\Omega )\quad |\quad \nabla ^{j}v\in
L^{2}(\Omega )^{d^{j}}\quad \text{for all}\quad 1\leq j\leq k\} \\
\text{and }H_{0}^{k}(\Omega ) &=&\{v\in H^{k}(\Omega )\quad |\quad
\nabla ^{j}v=0\text{ on }\partial \Omega \quad \text{for all}\quad
0\leq j\leq k-1\}.
{{\mathrm m}athrm e}nd{eqnarray*}
The boundary $\partial \Omega$ of $\Omega$ is always assumed to be
sufficiently regular to avoid any singularity and thus to simplify
the
choice of the isomorphisms $\Phi $. Its outward unit normal is denoted by $
\nu $. For $N\in {\mathrm m}athbb{N}$, ${\mathrm m}athbb{P}_{N}$ represents the set of $N^{
\text{th}}$-order polynomials.
\subsection{Example 1: The heat equation with a bounded control operator}
\label{exemple 1}
In this example, observation and control operators are bounded.
\subsubsection{The state equation and a choice of semi-decentralized
controllers}
Consider a system modeled by the heat equation posed in a domain
$\Omega \subset {\mathrm m}athbb{R}^{d}$, with homogeneous Dirichlet boundary
conditions. The control is distributed over the whole domain, so the
state $z:=w$ is solution to the boundary value problem,
\begin{equation}
\left\{ \begin{aligned} \partial _{t} w(t,x) & = \Delta w(t,x) +
\beta u(t,x) && \text{in } {\mathrm m}athbb{R}^{+\ast }\times \Omega \text{,}
\\ w(t,x) & = 0 && \text{on } {\mathrm m}athbb{R}^{+\ast }\times \partial
\Omega , \\ w(0,x) & = w_{0} && \text{in }\Omega ,
{{\mathrm m}athrm e}nd{aligned}\right. \label{exemple1}
{{\mathrm m}athrm e}nd{equation}
and the functional ${\mathrm m}athcal{J}\left( w_{0};u\right)
=\int_{0}^{+\infty }\left{\mathrm v}ert w\right{\mathrm v}ert _{L^{2}\left( \Omega
\right) }^{2}+\left{\mathrm v}ert \gamma u\right{\mathrm v}ert _{L^{2}\left( \Omega
\right) }^{2}dt$ is to be minimized. Here, $\beta $ and $\gamma $
are two nonnegative continuous
functions in $\Omega $. We apply the theory with the self-adjoint operator $
\Lambda =\left( -\Delta \right) ^{-1}$, defined as the inverse of
the Laplace operator $-\Delta :H_{0}^{1}(\Omega )\cap H^{2}(\Omega
)\rightarrow L^{2}(\Omega )$.
\noindent $\vartriangleright $ \textit{Linear approximation:} The
approximation (\ref{polynomial approximation}) with a first-degree
polynomial yields
\begin{equation*}
\uu_{1}=-{\mathrm f}rac{\beta }{\sqrt{\gamma }}(d_{0}+d_{1}\Lambda )w,
{{\mathrm m}athrm e}nd{equation*}
so in the special case $\gamma =\beta =1$, $\uu_{1}$ is the solution
to the boundary value problem
\begin{equation*}
-\Delta \uu_{1}=d_{0}\Delta w-d_{1}w\text{ in }\Omega, \quad \text{with}~\uu
_{1}=w=0\text{ on }\partial \Omega .
{{\mathrm m}athrm e}nd{equation*}
In the one-dimensional case $\Omega =]0,\pi \lbrack ,$ we apply
Algorithm 1 described hereafter to find $d_{0}=2.23\times 10^{-2}$
and $d_{1}=0.407$. Such $\uu_{1}$ constitutes a semi-decentralized
control before spatial discretization. The Laplace operator i.e. the
second order derivative may be approximated by a three-point
centered finite difference scheme, with
solution $(\uu_{1,j})_{j=0,\ldots,{\mathrm m}athcal{N}}$ that approximates the solutions $
\uu_{1}(x_{j})$ at the $({\mathrm m}athcal{N}+1)$ nodes of a subdivision $
(x_{j}=jh)_{j=0,\ldots,{\mathrm m}athcal{N}}$ with $h={\mathrm f}rac{\pi }{{\mathrm m}athcal{N}},$
\begin{equation*}
-\left( \uu_{1,j-1}-2\uu_{1,j}+\uu_{1,j+1}\right) =d_{0}\left(
w_{j-1}-2w_{j}+w_{j+1}\right) -d_{1}h^{2}w_{j},\quad j=1,\ldots,{\mathrm m}athcal{N
}-1,
{{\mathrm m}athrm e}nd{equation*}
completed by the boundary conditions
$\uu_{1,0}=\uu_{1,{\mathrm m}athcal{N}}=0$. Here
$w_{j}=w(x_{j})$ for $j=0,\ldots,{\mathrm m}athcal{N}$ that satisfy $w_{0}=w_{{\mathrm m}athcal{N}
}=0$. After elimination of $\uu_{1,0}$ and $\uu_{1,{\mathrm m}athcal{N}},$
the scheme can be written in matrix form,
\begin{equation}
\lbrack -\Delta _{h}]\uu_{1,h}=-d_{0}[-\Delta _{h}]w_{h}-d_{1}w_{h},
\label{uNh'}
{{\mathrm m}athrm e}nd{equation}
where $[-\Delta _{h}]=\dfrac{1}{h^{2}}
\begin{pmatrix}
2 & -1 & & & \\
-1 & 2 & \ddots & & \\
& \ddots & \ddots & \ddots & \\
& & \ddots & 2 & -1 \\
& & & -1 & 2
{{\mathrm m}athrm e}nd{pmatrix}
$, $\uu_{1,h}=
\begin{bmatrix}
\uu_{1,1} \\ \vdots \\ \uu_{1,{\mathrm m}athcal{N}-1}
{{\mathrm m}athrm e}nd{bmatrix}
$ and $w_{h}=
\begin{bmatrix}
w_{1} \\ \vdots \\ w_{{\mathrm m}athcal{N}-1}
{{\mathrm m}athrm e}nd{bmatrix}
$. This is the fully discretized problem of the semi-decentralized
control approximated by a linear polynomial.
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Approximation through the
Cauchy formula combined with a polynomial approximation:} To build
the approximated optimal control,
\begin{equation}
\uu_{N,M,h}=-{\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll }v_{1,h}^{{{\mathrm m}athrm e}ll }
, \label{app2ex1}
{{\mathrm m}athrm e}nd{equation}
the approximation $v_{i,h}^{{{\mathrm m}athrm e}ll }$ of $
\begin{bmatrix}
v_{i}^{{{\mathrm m}athrm e}ll }(x_{1}) & \ldots & v_{i}^{{{\mathrm m}athrm e}ll }(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{bmatrix}
^{T}$ is computed by solving the system (\ref{Pbdiscspatiale}), that
we
rewrite in the matrix form,
\begin{equation}
\begin{bmatrix}
\xi _{1}-[-\Delta _{h}]^{-1} & -\xi _{2} \\
\xi _{2} & \xi _{1}-[-\Delta _{h}]^{-1}
{{\mathrm m}athrm e}nd{bmatrix}
\begin{bmatrix}
v_{1,h}^{{{\mathrm m}athrm e}ll } \\
v_{2,h}^{{{\mathrm m}athrm e}ll }
{{\mathrm m}athrm e}nd{bmatrix}
=
\begin{bmatrix}
\Re e\left( -i\xi ^{\prime }p_{N}\left( \xi \right) \right) w_{h} \\
\Im m\left( -i\xi ^{\prime }p_{N}\left( \xi \right) \right) w_{h}
{{\mathrm m}athrm e}nd{bmatrix}
\label{vh1d}
{{\mathrm m}athrm e}nd{equation}
for each quadrature point $\xi :=\xi _{1,{{\mathrm m}athrm e}ll }+i\xi _{2,{{\mathrm m}athrm e}ll }$, where $
p_{N}(\lambda )$ is a polynomial approximation of $p(\lambda )$.
\subsubsection{Construction of the semi-decentralized controllers}
We detail the derivation of the polynomial approximation
$p_{N}(\Lambda )w$ of $Pw$ required both for the linear
approximation and in (\ref{vh1d}). We set $U=Z=L^{2}\left( \Omega
\right) $ thus $A=\Delta $ is an isomorphism from its domain
$D\left( \Delta \right) =H^{2}\left( \Omega \right) \cap
H_{0}^{1}\left( \Omega \right) $ into $Z$, see \cite{Gri}. Furthermore, $
Y=U=Z$ and $B=C=S=I$. We set $X=Z$, $\Lambda =\left[ -\Delta \right]
^{-1}$ which is compact, so it has a bounded positive spectrum with
an accumulation point at zero (but $0\not\in \sigma (\Lambda )$),
see \cite{SanSan}. Thus we can choose $\Phi _{Z}=\Phi _{Y}=\Phi
_{U}=I.$ Moreover, when $\beta =\gamma
=1$ the coefficients $a\left( \lambda \right) =-{\mathrm f}rac{1}{\lambda }$, $
b=c=s=1 $ are continuous on $I_{\sigma }=\left( 0,\sigma _{{\mathrm m}ax
}\right] $ and the \textbf{ARE} reads
\begin{equation}
p^{2}\left( \lambda \right) +{\mathrm f}rac{2}{\lambda }p\left( \lambda
\right) -1=0. \label{ex1.numricc}
{{\mathrm m}athrm e}nd{equation}
Its exact nonnegative solution, established only to the calculations
of errors, is
\begin{equation}
p(\lambda )={\mathrm f}rac{-1+\sqrt{1+\lambda ^{2}}}{\lambda }.
\label{ex1.solricc}
{{\mathrm m}athrm e}nd{equation}
We observe that $p(\lambda )$ is sufficiently regular to be
accurately
approximated by polynomials in $I_{\sigma }$. The \textbf{ARE} (\ref
{ex1.numricc}) is equivalent to the weak formulation
\begin{equation}
\int_{I_{\sigma }}\left( \lambda p^{2}+2p-\lambda \right) {{\mathrm m}athrm e}ta
\left( \lambda \right) d\lambda =0\text{ for all }{{\mathrm m}athrm e}ta \in
{\mathrm m}athcal{C}^{0}\left( I_{\sigma }\right) \label{algo1 riccati}
{{\mathrm m}athrm e}nd{equation}
to which we apply the spectral method with Legendre polynomials (see \cite
{BerMad1992} for instance) to find the equation satisfied by the
polynomial approximation $p_{N}$. The computation of the integral is
done exactly by using the Legendre-Gauss-Lobatto (\textbf{LGL})
quadrature formula analyzed in \cite{BerMad1991}, \cite{CroMig} and
\cite{DavRab}. The resolution of the nonlinear problem is achieved
by the iterative semi-implicit scheme described below, where
$\varepsilon $ is the stop criteria.
\begin{algorithm}[!h]
\caption{Semi-Implicit scheme applied to (\ref{algo1 riccati})}
\begin{algorithmic} [1]
\STATE $p_N^{0}$ given
\STATE $(m+1)^{\text{th}}$ step : knowing $p_N^{m}\in
{\mathrm m}athbb{P}_{N}$, find $p_N^{m+1}\in {\mathrm m}athbb{P}_{N}$ such that
\begin{equation*}
\int_{I_{\sigma }} p_N^{m+1}\left( \lambda \right)\left( \lambda
p_N^{m}\left( \lambda \right) +2 \right){{\mathrm m}athrm e}ta \left( \lambda \right)
d\lambda = \int_{I_{\sigma }}\lambda {{\mathrm m}athrm e}ta \left( \lambda \right)
d\lambda,
\quad {\mathrm f}orall {{\mathrm m}athrm e}ta \in {\mathrm m}athbb{P}_{N}.
{{\mathrm m}athrm e}nd{equation*}
\STATE If $\|p_N^{m+1}-p_N^{m}\|_{L^2\left(
I_{\sigma}\right)}\leq\varepsilon$ then terminate the algorithm else
return to Step 2.
{{\mathrm m}athrm e}nd{algorithmic}
{{\mathrm m}athrm e}nd{algorithm}
\subsubsection{Numerical results}
We analyze separately the three sources of discretization error: the
error of approximation of $p$ by a polynomial $p_{N}$, the error in
the quadrature of the Cauchy formula and the spatial discretization
error. We also discuss the convergence of Algorithm 1.
\noindent $\vartriangleright $ \textit{Polynomial approximation:}
The difference between successive iterations ${\mathrm v}ert
p_{N}^{m+1}-p_{N}^{m}{\mathrm v}ert _{L^{2}(I_{\sigma })}$ of Algorithm 1
decays exponentially. For $N=10$ and for the initial solution
$p_{N}^{0}=0$, the exponential decay rate is equal to $-1.80$. Let
us denote by $p_{N}$ the polynomial obtained after convergence of
$p_{N}^{m}$ by Algorithm 1, the convergence error ${\mathrm v}ert
p_{N}^{m}-p_{N}{\mathrm v}ert _{L^{2}(I_{\sigma })}$ is also exponentially
decaying with an exponential decay rate of $-1.82$. In addition, as
it is usual for spectral methods, the relative error
\begin{equation*}
e={\mathrm f}rac{\left{\mathrm v}ert p-p_{N}\right{\mathrm v}ert _{L^{2}\left( I_{\sigma }\right) }}{
\left{\mathrm v}ert p\right{\mathrm v}ert _{L^{2}\left( I_{\sigma }\right) }}
{{\mathrm m}athrm e}nd{equation*}
of the polynomial approximation decreases exponentially with $N$.
Here, the exponential decay rate is $-1.61$.
{\mathrm m}edskip
\noindent $\vartriangleright $\textit{\ Approximation through the
Cauchy formula combined with a polynomial approximation:} Because of
the absence of poles in $p_{N}$, the choice of the contour of the
Cauchy formula is free of constraints as long as it surrounds
$I_{\sigma }$. We have chosen a circle parameterized by $\xi (\theta
)=Re^{i\theta }$, with $\theta \in \lbrack 0,2\pi ]$. Then, we have
set the polynomial degree sufficiently large so that the error $e$
can be neglected. The numerical integrations have been performed
with a standard trapezoidal quadrature rule. Figure \ref{choixM1}
represents the relative error
\begin{equation*}
E={\mathrm f}rac{\left{\mathrm v}ert p-p_{N,M}\right{\mathrm v}ert _{L^{2}\left( I_{\sigma }\right) }}{
\left{\mathrm v}ert p\right{\mathrm v}ert _{L^{2}\left( I_{\sigma }\right) }}
{{\mathrm m}athrm e}nd{equation*}
between $p$ and $p_{N,M}$ for various values of the radius $R$. It
converges exponentially with respect to $M$ towards $e,$ and the
exponential decay rate is a decreasing function of $R$.
\begin{center}
\begin{figure}[h]
\begin{center}
{\normalsize \scalebox{0.4}{\includegraphics*{Figk.eps}} }
{{\mathrm m}athrm e}nd{center}
\caption{Error $E$ in logarithmic scale as a function of $M$ for
different values of $R$ and for $N=10$} \label{choixM1}
{{\mathrm m}athrm e}nd{figure}
{{\mathrm m}athrm e}nd{center}
\noindent $\vartriangleright $\textit{\ Spatial discretization:}
Computations have been carried out for $\uu_{N,h}$ defined in
(\ref{uNh'}) with $N=1$ and for $\uu_{N,M,h}$ defined in
(\ref{app2ex1}) with $R=5$, $N=10$ and $M=11$ so that $e$ is in the
range of $10^{-9}$ and is negligible compared to $E$. The
approximation (\ref{app2ex1}) is obtained from the formula
(\ref{discspatialeu}) by substituting $\Phi _{U}$ and $\Phi _{Z}$ by
the identity operator and by using the centered finite difference
scheme of the second order derivative, i.e. by replacing $\Lambda $
by its discretization $\Lambda _{h}$. The spatial discretizations
are compared to the expression of the approximations $\uu_{N}(t,x)$
and $\uu_{N,M}(t,x)$ that we calculate thanks to the modal
decomposition of the operator $\partial _{xx}^{2}$ with homogeneous
Dirichlet boundary conditions. It
comes
\begin{equation*}
\uu_{N}(t,x)=-\sum\limits_{i\in {\mathrm m}athbb{N}^{\ast }}w_{i}e^{-\left(
\lambda _{i}^{-1}+p_{N} \left( \lambda _{i}\right) \right)
t}p_{N}\left( \lambda _{i}\right) \phi _{i}\left( x\right) ,
{{\mathrm m}athrm e}nd{equation*}
where $\lambda _{i},$ $\phi _{i}$ and $w_{i}$ represent respectively the $i^{
\text{th}}$ eigenvalue, the $i^{\text{th}}$ eigenvector and the $i^{\text{th}
}$ modal coefficient of the initial condition. The same expression
holds for $\uu_{N,M}(t,x)$ after replacement of $p_{N}$ by
$p_{N,M}$. Then, the errors,
\begin{equation*}
\displaystyle{\mathrm f}rac{\int_{0}^{T}\left(\sum_{i=0}^{{\mathrm m}athcal{N}}\left| \uu
_{N,i}-\uu_{N}(x_{i})\right|^{2}\right)^{{\mathrm f}rac{1}{2}}dt}{\int_{0}^{T}\left(\sum_{i=0}^{
{\mathrm m}athcal{N}}\left\vert \uu_{N,i}\right\vert ^{2}\right)^{{\mathrm f}rac{1}{2}}dt} \quad \text{and} \quad {\mathrm f}rac{
\int_{0}^{T}\left(\sum_{i=0}^{{\mathrm m}athcal{N}}\left| \uu_{N,M,i}-\uu
_{N,M}(x_{i})\right| ^{2}\right)^{{\mathrm f}rac{1}{2}}dt}{\int_{0}^{T}\left(\sum_{i=0}^{{\mathrm m}athcal{N}
}\left| \uu_{N,M}(x_{i})\right| ^{2}\right)^{{\mathrm f}rac{1}{2}}dt},
{{\mathrm m}athrm e}nd{equation*}
are known to be theoretically quadratic with respect to $h$ the
spatial discretization step, which is confirmed by our experiments.
\subsection{Example 2: Heat equation with unbounded control operator}
\label{exemple 2}
In this example, the control operator is internal and unbounded and
the
observation operator is internal and bounded. We apply the theory of Section
\ref{Unbounded control operators} without going into much detail as
for other examples. We only describe the state equation and the
functional analysis framework.
\subsubsection{The state equation}
We keep the heat equation as the state equation with the same control space $
U \subset L^{2}(\Omega )$ and the same functional ${\mathrm m}athcal{J}$ but
the control operator is replaced by an unbounded one defined in the
distribution sense by $\left\langle Bu,v\right\rangle =-\beta
\int_{\Omega }u\beta _{1}.\nabla vdx$, where $\beta _{1}$ is a
vector of ${\mathrm m}athbb{R}^{d}.$
\subsubsection{The functional framework}
First, we pose $V=H_{0}^{1}(\Omega )$, so $A=\Delta $ is an
isomorphism from $V$ into $V^{\prime }$ from which we define
$J=(-A)^{-1}$. It allows to give a precise definition of $B$: for
all $v\in V$, $\left\langle Bu,v\right\rangle _{V^{\prime
},V}=-\int_{\Omega }u\beta _{1}.\nabla v~dx$ for $u\in U$. Let us
compute $B^{\ast }$ defined by $\left( Bu,v\right) _{V^{\prime
}}=\left( u,B^{\ast }v\right) _{L^{2}(\Omega )}$ for $u\in U$ and
$v\in V^{\prime }$. Since $\left( Bu,v\right) _{V^{\prime
}}=\left\langle Bu,Jv\right\rangle _{V^{\prime },V}=-\left( u,\beta
_{1}.\nabla Jv\right) _{L^{2}(\Omega )}$ then $B^{\ast }v=-\beta
_{1}\nabla Jv$. We introduce the kernel of $B$, $K_{B}=\{u\in
L^{2}\left( \Omega \right) |u$ constant in the direction$~\beta
_{1}\},$ $U=L^{2}(\Omega )/K_{B} $ and the kernel of $B^{\ast }$,
$K_{B^{\ast }}=\{v\in V^{\prime }~|~Jv~$constant in the
direction$~\beta _{1}\}$. Since $Jv=0$ on the boundary $\partial
\Omega $ then $K_{B^{\ast }}=\{0\}$. Then by using classical
arguments, e.g. \cite{GirRav}, one deduces that $B$ is an
isomorphism from $U $ into $V^{\prime }.$ We pose also
$Y=Z=L^{2}(\Omega ),$ $S=C=I\in {\mathrm m}athcal{L}(Z,Y).$ Now, we introduce
$X=V^{\prime },$ and $\Lambda =J$ which is a nonnegative operator.
The fact that $\Lambda $ is self-adjoint, i.e. that $(\Lambda
v,v^{\prime })_{V^{\prime }}=(v,\Lambda v^{\prime })_{V^{\prime }}$,
comes from the equality $\left\langle \Lambda v,Jv^{\prime
}\right\rangle _{V^{\prime },V}=\left\langle Jv,\Lambda v^{\prime
}\right\rangle _{V,V^{\prime }}.$ To complete the construction, we
pose $\Phi _{U}=(\beta _{1}.\nabla )^{-1},\Phi _{V}=\Lambda ,\Phi
_{V^{\prime }}=I,\Phi _{Y}=\Phi _{Z}=\Lambda ^{1/2}$ which is an
isomorphism from $V^{\prime }$ into $L^{2}(\Omega )$, $a(\Lambda
)=I,\text{ }b(\Lambda )=I\text{ and }c(\Lambda )=I$. Finally, we
proceed as in the first example for the computation of $p_{N,M}$.
\subsection{Example 3: Beam or plate model}
\label{exemple 3}
Here, we deal with a second order problem in time with distributed
internal bounded observation and control.
\subsubsection{The state equation and a choice of semi-decentralized
controllers}
The model under consideration is a fourth order equation posed in a domain $
\Omega \subset {\mathrm m}athbb{R}^{d}$ which may correspond to a
Euler-Bernoulli clamped beam equation when $d=1$ or to a
Love-Kirchhoff clamped plate equation when $d=2$. The control is
still distributed over the whole domain
and the state is $z:=
\begin{bmatrix}
w & \partial _{t}w
{{\mathrm m}athrm e}nd{bmatrix}
^{T}$ where $w$ is solution to the boundary value problem
\begin{eqnarray}
\partial _{tt}^{2}w &=&-\Delta ^{2}w+\beta u\text{ in }{\mathrm m}athbb{R}^{+\ast
}\times \Omega , \label{eq:etat3} \\
w &=&\nabla w.\nu=0\text{ on }{\mathrm m}athbb{R}^{+\ast }\times \partial
\Omega ,
\label{cL1} \\
w &=&w_{0}\text{ and }\partial _{t}w=w_{1}\text{ in }\Omega \text{
at }t=0, \label{CI1}
{{\mathrm m}athrm e}nd{eqnarray}
for a given function $\beta $ and given initial conditions $w_{0}$ and $
w_{1} $ all defined in $\Omega .$ Choosing the cost functional ${\mathrm m}athcal{J}
\left( w_{0},w_{1};u\right) =\int_{0}^{+\infty }\left{\mathrm v}ert \Delta
w\right{\mathrm v}ert _{L^{2}\left( \Omega \right) }^{2}+\left{\mathrm v}ert \gamma
u\right{\mathrm v}ert _{L^{2}\left( \Omega \right) }^{2}dt$, we pose $\Lambda
=\left( \Delta ^{2}\right) ^{-1},$ defined as the inverse of the
biharmonic operator
$\Delta ^{2}:H_{0}^{2}(\Omega )\cap H^{4}(\Omega )\rightarrow L^{2}(\Omega )$
. The method can handle the general case, however in the special case $
\gamma =\beta =1$, we show in the following Section that the optimal
control
$\uu$ may be approached by
\begin{equation}
\uu_{N,M,h}=-{\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll }(\vv
_{1,h}^{{{\mathrm m}athrm e}ll }+\overline{\vv}_{1,h}^{{{\mathrm m}athrm e}ll }), \label{app2ex3}
{{\mathrm m}athrm e}nd{equation}
where $(\vv_{1,h}^{{{\mathrm m}athrm e}ll },\overline{\vv}_{1,h}^{{{\mathrm m}athrm e}ll })$ are
solution to
\begin{equation}
\begin{aligned} \begin{bmatrix} \xi _{1}-\Lambda_{h} & -\xi _{2} \\ \xi _{2}
& \xi _{1}-\Lambda_{h} {{\mathrm m}athrm e}nd{bmatrix} \begin{bmatrix} \vv_{1,h}^{{{\mathrm m}athrm e}ll} \\
\vv_{2,h}^{{{\mathrm m}athrm e}ll} {{\mathrm m}athrm e}nd{bmatrix} &= \begin{bmatrix} \Re e\left( -i\xi
^{\prime }k_{1,N}\left( \xi \right) \right) w_{h} \\ \Im m\left(
-i\xi ^{\prime }k_{1,N}\left( \xi \right) \right) w_{h}
{{\mathrm m}athrm e}nd{bmatrix}, \\ \begin{bmatrix} \xi _{1}-\Lambda_{h} & -\xi _{2}
\\ \xi _{2} & \xi _{1}-\Lambda_{h}
{{\mathrm m}athrm e}nd{bmatrix} \begin{bmatrix} \overline{\vv}_{1,h}^{{{\mathrm m}athrm e}ll} \\
\overline{\vv}_{2,h}^{{{\mathrm m}athrm e}ll} {{\mathrm m}athrm e}nd{bmatrix} &= \begin{bmatrix} \Re
e\left( -i\xi ^{\prime }k_{2,N}\left( \xi \right) \right)
\partial_tw_{h} \\ \Im m\left( -i\xi ^{\prime }k_{2,N}\left( \xi
\right) \right) \partial_tw_{h}
{{\mathrm m}athrm e}nd{bmatrix}, {{\mathrm m}athrm e}nd{aligned}
{{\mathrm m}athrm e}nd{equation}
for each quadrature point $\xi _{{{\mathrm m}athrm e}ll }:=\xi _{1,{{\mathrm m}athrm e}ll }+i\xi
_{2,{{\mathrm m}athrm e}ll }$, and
\begin{equation}
\Lambda _{h}^{-1}=\dfrac{1}{h^{4}}
\begin{bmatrix}
2h^{3} & -{\mathrm f}rac{1}{2}h^{3} & & & & & \\
-4 & 6 & -4 & 1 & & & \\
1 & \ddots & 6 & \ddots & \ddots & & \\
& \ddots & \ddots & \ddots & \ddots & \ddots & \\
& & \ddots & \ddots & 6 & \ddots & 1 \\
& & & 1 & -4 & 6 & -4 \\
& & & & & -{\mathrm f}rac{1}{2}h^{3} & 2h^{3}
{{\mathrm m}athrm e}nd{bmatrix}
, \label{Delta^2h}
{{\mathrm m}athrm e}nd{equation}
the vectors $\vv_{i,h}^{T}$, $\overline{\vv}_{i,h}^{T}$,
$w_{h}^{T}$, $\partial _{t}w_{h}^{T} $ being the approximations of $
\begin{pmatrix}
\vv_{i}(x_{1}) & \ldots & \vv_{i}(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{pmatrix}
$, $
\begin{pmatrix}
\overline{\vv}_{i}(x_{1}) & \ldots & \overline{\vv}_{i}(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{pmatrix}
$, $
\begin{pmatrix}
w(x_{1}) & \ldots & w(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{pmatrix}
$, $
\begin{pmatrix}
\partial _{t}w(x_{1}) & \ldots & \partial _{t}w(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{pmatrix}
$ and $k_{i,N}$ being defined in the following section.
\subsubsection{Construction and study of the semi-decentralized controllers}
Firstly, the plate equation must be formulated under the form of a
first
order system. We set $z^{T}=
\begin{bmatrix}
w & \partial _{t}w
{{\mathrm m}athrm e}nd{bmatrix}
$, so we find that $A=
\begin{bmatrix}
0 & I \\
-\Delta ^{2} & 0
{{\mathrm m}athrm e}nd{bmatrix}
$, the operators $B^{T}=
\begin{bmatrix}
0 & I
{{\mathrm m}athrm e}nd{bmatrix}
$, $C=
\begin{bmatrix}
\Delta & 0
{{\mathrm m}athrm e}nd{bmatrix}
,\text{ }S=I$ and the functional spaces $U=L^{2}\left( \Omega
\right) $, $Y\subset L^{2}\left( \Omega \right) $. The usual state
space is $ Z=H_{0}^{2}\left( \Omega \right) \times L^{2}\left(
\Omega \right) $ thus $B$ and $C$ are bounded. We pose
$X=L^{2}\left( \Omega \right) $, $\Lambda =\left( \Delta ^{2}\right)
^{-1}$ an isomorphism from $X$ into $H^{4}\left(
\Omega \right) \cap H_{0}^{2}\left( \Omega \right) $, $\Phi _{Z}=
\begin{bmatrix}
\Lambda ^{{\mathrm f}rac{1}{2}} & 0 \\
0 & I
{{\mathrm m}athrm e}nd{bmatrix}
$, $\Phi _{U}=I$ and $\Phi _{Y}=\Delta \Lambda ^{1/2}$, so $Y=\Delta
\Lambda ^{{\mathrm f}rac{1}{2}}L^{2}\left( \Omega \right) =\Delta
H_{0}^{2}\left( \Omega \right) $ and $a\left( \lambda \right) =
\begin{bmatrix}
0 & \lambda ^{-1/2} \\
-\lambda ^{-1/2} & 0
{{\mathrm m}athrm e}nd{bmatrix}
$, $b^{T}\left( \lambda \right) =
\begin{bmatrix}
0 & 1
{{\mathrm m}athrm e}nd{bmatrix}
$, $c\left( \lambda \right) =
\begin{bmatrix}
1 & 0
{{\mathrm m}athrm e}nd{bmatrix}
$ and $s\left( \lambda \right) =1.$
\begin{remark}
\label{isoexp3} $\text{ }$
\begin{enumerate}
\item We indicate how isomorphisms $\Phi _{Y}$ and $\Phi _{Z}$ have been
chosen. The choice of $\Phi _{Z}$ directly comes from the expression
of the inner product $\left( z,z^{\prime }\right) _{Z}=\left( \Phi
_{Z}^{-1}z,\Phi _{Z}^{-1}z^{\prime }\right) _{X^{2}}$ and from
$\left( z_{1},z_{1}^{\prime }\right) _{H_{0}^{2}\left( \Omega
\right) }=\left( \left( \Delta ^{2}\right)
^{{\mathrm f}rac{1}{2}}z_{1},\left( \Delta ^{2}\right)
^{{\mathrm f}rac{1}{2}}z_{1}^{\prime
}\right) _{L^{2}\left( \Omega \right) }$. For $\Phi _{Y}$, we start from $
C=\Phi _{Y}c(\Lambda )\Phi _{Z}^{-1}$ and from the relation $\left(
y,y^{\prime }\right) _{Y}=\left( \Phi _{Y}^{-1}y,\Phi
_{Y}^{-1}y^{\prime
}\right) _{X}$ which imply that $\Delta =\Phi _{Y}c_{1}\Lambda^{-{\mathrm f}rac{1}{2}
} $. The expression of $\Phi _{Y}$ follows.
\item The isomorphisms $\Phi _{Z}$ and $\Phi _{U}$ are some matrices of
functions of $\Lambda $, and so $Q$ is also. Thus, the approximation
is
directly developed on $Q=k(\Lambda )$.
{{\mathrm m}athrm e}nd{enumerate}
{{\mathrm m}athrm e}nd{remark}
The controller $Q$ is a $1\times 2$ matrix of operators $k=
\begin{bmatrix}
k_{1} & k_{2}
{{\mathrm m}athrm e}nd{bmatrix}
$, with $k_{1}=p_{21}\Lambda ^{-{\mathrm f}rac{1}{2}}$ and $k_{2}=p_{22}$. So
$\left( k_{i}\right) _{i=1,2}$ is solution to the system
\begin{equation}
\lambda k_{1}^{2}+2k_{1}-1=0 \quad \text{and} \quad
2k_{1}-k_{2}^{2}=0. \label{ex3.numricc}
{{\mathrm m}athrm e}nd{equation}
As in the first example, a nonnegative exact solution
\begin{equation*}
k_{1}(\lambda )={\mathrm f}rac{-1+\sqrt{1+\lambda }}{\lambda } \quad
\text{and} \quad k_{2}(\lambda )=\sqrt{2{\mathrm f}rac{-1+\sqrt{1+\lambda
}}{\lambda }}
{{\mathrm m}athrm e}nd{equation*}
can be exhibited, so it is used to discuss numerical validation.
Again, the functions $k_{i}(\lambda )$ are sufficiently regular to
be accurately approximated by polynomials which computation is done
using the spectral method with Legendre polynomials and the
\textbf{LGL} quadrature formulae.
The weak formulation equivalent to (\ref{ex3.numricc}) is
\begin{equation}
\int_{I_{\sigma }}\left( \lambda k_{1}^{2}+2k_{1}-1\right) {{\mathrm m}athrm e}ta
_{1}(\lambda )\text{ }d\lambda =0 \quad \text{and} \quad
\int_{I_{\sigma }}\left( 2k_{1}-k_{2}^{2}\right) {{\mathrm m}athrm e}ta _{2}(\lambda
)\text{ }d\lambda =0 \label{WeakFormEx3}
{{\mathrm m}athrm e}nd{equation}
for all ${{\mathrm m}athrm e}ta _{1},$ ${{\mathrm m}athrm e}ta _{2}\in {\mathrm m}athcal{C}^{0}\left( I_{\sigma
}\right) , $ it is solved by the semi-implicit Algorithm 2 below.
\begin{algorithm}[!h]
\caption{Semi-Implicit scheme for (\ref{WeakFormEx3})}
\begin{algorithmic} [1]
\STATE $ k_{1,N_1}^{0}$, $k_{2,N_2}^{0}$ are given.
\STATE $(m+1)^{\text{th}}$ step: Knowing
$(k_{1,N_{1}}^{m},k_{2,N_{2}}^{m})\in
{\mathrm m}athbb{P}_{N_{1}}\times {\mathrm m}athbb{P}_{N_{2}}$, find $
(k_{1,N_{1}}^{m+1},k_{2,N_{2}}^{m+1})\in {\mathrm m}athbb{P}_{N_{1}}\times {\mathrm m}athbb{P}
_{N_{2}}$ such that ${\mathrm f}orall \left( {{\mathrm m}athrm e}ta _{1},{{\mathrm m}athrm e}ta _{2}\right) \in {\mathrm m}athbb{P}
_{N_{1}}\times {\mathrm m}athbb{P}_{N_{2}}$,
\begin{gather*}
\int_{I_{\sigma}}k_{1,N_1}^{m+1}\left( \lambda \right) \left(
\lambda k_{1,N_1}^{m}\left( \lambda \right) +2\right) {\mathrm m}athit{{{\mathrm m}athrm e}ta
}_{1}\left( \lambda \right) d\lambda
=\int_{I_{\sigma}}{{\mathrm m}athrm e}ta_{1}\left( \lambda
\right) d\lambda , \\
\int_{I_{\sigma}}k_{2,N_2}^{m+1}\left( \lambda \right) \left(
k_{2,N_2}^{m}\left( \lambda \right) +1\right) {{\mathrm m}athrm e}ta_{2}\left( \lambda
\right) d\lambda =\int_{I_{\sigma}}\left( 2k_{1,N_1}^{m+1}\left(
\lambda \right) +k_{2,N_2}^{m}\left( \lambda \right) \right)
{{\mathrm m}athrm e}ta_{2}\left( \lambda \right) d\lambda.
{{\mathrm m}athrm e}nd{gather*}
\STATE If $\|k_{i,N_i}^{m+1}-k_{i,N_i}^{m}\|_{L^2\left(
I_{\sigma}\right)}\leq\varepsilon_i$ then terminate the algorithm
else return to Step 2
{{\mathrm m}athrm e}nd{algorithmic}
{{\mathrm m}athrm e}nd{algorithm}
\subsubsection{Numerical results}
The simulations are conducted for a Euler-Bernoulli beam model with length $
L=4.73m$ so that all eigenvalues of $\Lambda $ are included in
$I_{\sigma }=\left( 0,1\right) $.
\noindent $\vartriangleright $ \textit{Polynomial approximation:}
Numerical
tests show an exponential convergence of the Algorithm 2. For $
N_{1}=N_{2}=10 $ and for null initial conditions$,$ the exponential
decay rate is about $-1.80$ and this of the differences of
successive iterates $(\left\|
k_{i,N_{i}}^{m+1}-k_{i,N_{i}}^{m}\right\| _{L^{2}\left( I_{\sigma
}\right) })_{i=1,2}$ is about $-1.83$. The two polynomial
approximation errors
\begin{equation*}
e_{i}={\mathrm f}rac{{\mathrm v}ert k_{i,N_{i}}-k_{i}{\mathrm v}ert _{L^{2}(I_{\sigma
})}}{{\mathrm v}ert k_{i}{\mathrm v}ert _{L^{2}(I_{\sigma })}}
{{\mathrm m}athrm e}nd{equation*}
are in the order of $10^{-10}$ and $10^{-11}$.
\noindent $\vartriangleright $ \textit{Approximation through the
Cauchy formula combined with a polynomial approximation:} The
numerical integrations, carried out as in the first example, yield
relative errors
\begin{equation*}
E_{i}={\mathrm f}rac{{\mathrm v}ert k_{i,N_{i},M}-k_{i}{\mathrm v}ert _{L^{2}(I_{\sigma
})}}{{\mathrm v}ert k_{i}{\mathrm v}ert _{L^{2}(I_{\sigma })}}
{{\mathrm m}athrm e}nd{equation*}
parameterized by the number $M$ of integration nodes. They decrease
exponentially with respect to $M$ as shown on Figure
\ref{choixMRex3} where both $M$ are varying from $11$ to $30$.
Several values of the radius $R$ have been tested showing that the
convergence rate is increasing with $R$.
\begin{figure}[!h]
\centering
\subfigure[]{\label{microcantilever1} \includegraphics[width=7cm]{Figk1.eps}}
\subfigure[]{\label{microcantilever2} \includegraphics[width=7cm]{Figk2.eps}}
\caption{ Relative errors \subref{microcantilever1} $E_{1}$ and
\subref{microcantilever2} $E_{2}$ in logarithmic scale with respect
to $M_{i}$ for different values of $R$ and for $N_{1}=N_{2}=10$}
\label{choixMRex3}
{{\mathrm m}athrm e}nd{figure}
\noindent $\vartriangleright $ \textit{Spatial discretization:}
Taking the same notation as in Example 1, the finite difference
discretization of the
one-dimensional fourth order boundary value problem
\begin{equation}
\Delta ^{2}\vv=f~\text{in }\Omega ,\quad \vv=\nabla \vv.\nu=0~\text{on }
\partial \Omega , \label{BilaplacienDirichlet}
{{\mathrm m}athrm e}nd{equation}
is
\begin{equation*}
{\mathrm f}rac{1}{h^{4}}\left( \vv_{i-2}-4\vv_{i-1}+6\vv_{i}-4\vv_{i+1}+\vv
_{i+2}\right) =f({x_{i}}),\quad \text{for }i=2,...,{\mathrm m}athcal{N}-2
{{\mathrm m}athrm e}nd{equation*}
for the equation in $\Omega ,$ and $\vv_{0}=0$,
$\vv_{{\mathrm m}athcal{N}}=0$ for the boundary conditions on $\vv$. This
scheme is consistant at the order 2. To do not deteriorate the error
we use a second order scheme for the
boundary conditions on $\partial _{x}$. From Taylor's Theorem, $\vv(x_{1})=
\vv(0)+h\partial _{x}\vv(0)+{\mathrm f}rac{h^{2}}{2}\partial _{xx}^{2}\vv(0)+{\mathrm m}athcal{
O}\left( h^{3}\right) $ and $\vv(x_{2})=\vv(0)+2h\partial _{x}\vv
(0)+2h^{2}\partial _{xx}^{2}\vv(0)+{\mathrm m}athcal{O}\left( h^{3}\right) $.
By
eliminating the term in $\partial _{xx}^{2}\vv(0)$ it comes $\partial _{x}\vv
(0)={\mathrm f}rac{-3\vv(0)+4\vv(x_{1})-\vv(x_{2})}{2h}+{\mathrm m}athcal{O}\left(
h^{2}\right) \text{.}$ The same is done for $\partial _{x}\vv(L)$, we find $
\partial _{x}\vv(L)={\mathrm f}rac{-3\vv(x_{{\mathrm m}athcal{N}})+4\vv(x_{{\mathrm m}athcal{N}-1})-\vv
(x_{{\mathrm m}athcal{N}-2})}{2h}+{\mathrm m}athcal{O}\left( h^{2}\right) \text{.}$ In
total, the discretization of the problem
(\ref{BilaplacienDirichlet}) after
elimination of $\vv_{0}$ and $\vv_{{\mathrm m}athcal{N}}$ is written in matrix form $
[\Delta _{h}^{2}]\vv_{h}=f_{h},$ where $\vv_{h}^{T}=
\begin{bmatrix}
\vv_{1} & \ldots & \vv_{{\mathrm m}athcal{N}-1}
{{\mathrm m}athrm e}nd{bmatrix}
$, $f_{h}^{T}=
\begin{bmatrix}
f(x_{1}) & \ldots & f(x_{{\mathrm m}athcal{N}-1})
{{\mathrm m}athrm e}nd{bmatrix}
$ and $[\Delta _{h}^{2}]$ is the matrix in (\ref{Delta^2h}). The
full optimal control approximation (\ref{app2ex3}) is obtained by
using the formulae (\ref{Pbdiscspatiale}-\ref{discspatiale}) and the
formula of $\uu_{N,M,h}$ in (\ref{discspatialeu1}) with $\Lambda
_{h}=[\Delta _{h}^{2}]^{-1}$. To validate this full strategy, we
have carried a computation with $R=5$, $M=11$, $10^{2}$ points in
the mesh of $\Omega $ and for the time $t\in (0,T)$ with $T=15 s$.
The spatial discretization is compared to the expression of the
approximation $\uu_{N,M}(t,x)$ that we calculate thanks to the modal
decomposition of the operator $\partial _{xxxx}^{4}$ with
homogeneous Dirichlet boundary conditions. Its expression is too big
to be presented, it
has been detailed in \cite{Yak}. Denoting by $\uu_{N,M,i}=\left( \uu
_{N,M,h}\right) _{i}$ the discrete values of the control, the
spatial discretization relative error
\begin{equation*}
{\mathrm f}rac{\int_{0}^{T}\left(\sum_{i=0}^{{\mathrm m}athcal{N}}\left| \uu_{N,M,i}(t)-\uu
_{N,M}(x_{i},t)\right| ^{2}\right)^{{\mathrm f}rac{1}{2}}dt}{\int_{0}^{T}\left(\sum_{i=0}^{{\mathrm m}athcal{N}
}\left\vert \uu_{N,M}(x_{i},t)\right\vert
^{2}\right)^{{\mathrm f}rac{1}{2}}dt}
{{\mathrm m}athrm e}nd{equation*}
between $u_{N,M}$ and $u_{N,M,h}$ is equal to $1.10 \times 10^{-4}$.
\subsection{Example 4: Two-dimensional heat equation with a boundary control}
\label{exemple 4}This example deals with a special case of boundary
control.
\subsubsection{The state equation and a choice of semi-decentralized
controller}
Let $\Omega $ be the rectangle $\left( 0,1\right) \times \left(
0,\pi \right) \subset {\mathrm m}athbb{R}^{2}$ and $\Gamma _{0}=\left\{
\left( 0,y\right) :0<y<\pi \right\} $ a part of its boundary. Let us
consider the heat equation with a control $v(t,y)$ applied to the
boundary $\Gamma _{0}$,
\begin{equation*}
\left\{ \begin{aligned} \partial _{t}w\left( t,x,y\right)
-\partial_{xx}^2 w\left( t,x,y\right) -\partial _{yy}^2 w\left(
t,x,y\right) &=0 && \text{in }{\mathrm m}athbb{R}^{+\ast }\times \Omega ,
\\ w\left( t,0,y\right) &=v\left(
t,y\right) && \text{on }{\mathrm m}athbb{R}^{+\ast }\times \Gamma _{0}, \\
w\left( t,x,y\right) &=0 && \text{on }{\mathrm m}athbb{R}^{+\ast }\times
\partial \Omega \backslash \Gamma _{0}, \\ w\left(
0,x,y\right)&=w_{0}\left( x,y\right) && \text{in }\Omega \text{.}
{{\mathrm m}athrm e}nd{aligned}\right.
{{\mathrm m}athrm e}nd{equation*}
Since our method is not directly applicable, we reduce the problem
to an internal control problem. We introduce $\overline{w}\left(
t,x,y\right) =w\left( t,x,y\right) -\left( 1-x\right) v\left(
t,y\right) $ solution to the heat equation with homogeneous boundary
conditions,
\begin{equation*}
\left\{ \begin{aligned} \partial _{t}\overline{w}\left( t,x,y\right)
& =\partial _{xx}^2 \overline{w}\left( t,x,y\right) +\partial
_{yy}^2 \overline{w}\left( t,x,y\right) -\left( 1-x\right) u\left(
t,y\right) && \text{in }{\mathrm m}athbb{R}^{+\ast }\times \Omega, \\
\overline{w}\left( t,x,y\right) & = 0 && \text{on }{\mathrm m}athbb{R}^{+\ast
}\times \partial \Omega,
\\ \overline{w}\left( 0,x,y\right) & = \overline{w}_{0}\left( x,y\right)
=w_{0}\left( x,y\right) -\left( 1-x\right) w_{0}\left( 0,y\right) &&
\text{in }\Omega , {{\mathrm m}athrm e}nd{aligned}\right.
{{\mathrm m}athrm e}nd{equation*}
with $u\left( t,y\right) =\partial _{t}v\left( t,y\right) -\partial
_{yy}^{2}v\left( t,y\right) $ that allows for easy computation of $v$ once $
u $ is known. For simplicity, we define the cost function and the
control space with $u$ instead of $v$. So, we chose the control
space $U\subset
L^{2}\left( \Gamma _{0}\right) $ and the cost functional
\begin{equation}
{\mathrm m}athcal{J}\left( \overline{w}_{0};u\right) =\int_{0}^{+\infty
}\left{\mathrm v}ert \overline{w}\left( t,x,y\right) \right{\mathrm v}ert
_{L^{2}\left( \Omega \right) }^{2}+\left{\mathrm v}ert u\left( t,y\right)
\right{\mathrm v}ert _{L^{2}\left( \Gamma _{0}\right) }^{2}\text{ }dt\text{.}
\label{fonctionnelle4}
{{\mathrm m}athrm e}nd{equation}
Then, the approximation of the control is done by using $J$ terms in
a modal decomposition of $\partial _{xx}^{2}$. Without entering into
much details, that are given in sub-Section \ref{ex4 Appli}, the
state vector is comprised
with $J$ components $\overline{w}_{j}\left( t,y\right) =\sqrt{2}\int_{0}^{1}
\overline{w}\left( t,x,y\right) \sin \left( j\pi x\right) $ $dx$ and
the associated control is therefore $\uu=-k(\Lambda )z$ where $k$ is
a $J$-row vector of functions and $\Lambda $ is the isomorphism
$(-\partial _{yy}^{2})^{-1}:L^{2}(\Gamma _{0})\rightarrow
H^{2}(\Gamma _{0})\cap H_{0}^{1}(\Gamma _{0})$. A semi-decentralized
control is built from a rational approximation $k_{j,N_{j}}$ of each
component $k_{j}$ and from a
quadrature rule in the Cauchy formula,
\begin{equation}
\uu_{N,M,h}=-{\mathrm f}rac{1}{2\pi }\sum_{{{\mathrm m}athrm e}ll =1}^{M}\omega _{{{\mathrm m}athrm e}ll }\sum_{j=1}^{J}
\vv_{1,h}^{{{\mathrm m}athrm e}ll ,j}, \label{app4ex4}
{{\mathrm m}athrm e}nd{equation}
where each $\vv_{1,h}^{{{\mathrm m}athrm e}ll ,j}$ is solution to a system like
(\ref{vh1d})
with $\overline{w}_{j,h}$ instead of $w_{h}$ and $k_{j,N_{j}}$ instead of $
p_{N}.$
\subsubsection{Construction and study of the semi-decentralized controller}
\label{ex4 Appli}
We start with projecting the model on the $J$ first components of
the orthonormal basis $\psi _{j}\left( x\right) =\sqrt{2}\sin \left(
j\pi x\right) $ in $L^{2}(0,1)$. Since $\int_{0}^{1}\left(
1-x\right) \psi _{j}(x)~dx={\mathrm f}rac{\sqrt{2}}{j\pi }$ the components
$\overline{w}_{j}\left( t,y\right) $ are solution to the equations
posed on $\Gamma _{0}$,
\begin{equation}
\left\{ \begin{aligned} \partial _{t}\overline{w}_{j}\left(
t,y\right) & = -j^{2}\pi ^{2}\overline{w}_{j}\left( t,y\right)
+\partial _{yy}^{2}\overline{w}_{j}\left( t,y\right)
-{\mathrm f}rac{\sqrt{2}}{j\pi }u\left(
t,y\right) && \text{in }{\mathrm m}athbb{R}^{+\ast }\times \Gamma_0 , \\
\overline{w}_{j}\left( t,0\right) &=\overline{w}_{j}\left(
t,\pi\right)=0 && \text{in }{\mathrm m}athbb{R}^{+\ast } , \\
\overline{w}_{j}\left( 0,y\right) &=\overline{w}_{j,0}\left(
y\right) =\int_{0}^{1}\overline{w}_{0}\left( x,y\right) \psi
_{j}\left( x\right) dx && \text{in }\Gamma_0 \text{.}
{{\mathrm m}athrm e}nd{aligned}\right. \label{Etat4}
{{\mathrm m}athrm e}nd{equation}
This is the system of state equations coupled by a common internal control $
u\left( t,y\right) $. The cost functional (\ref{fonctionnelle4}) is
reduced to
\begin{equation*}
{\mathrm m}athcal{J}\left( \overline{w}_{0};u\right) \simeq {\mathrm m}athcal{J}\left(
\overline{w}_{.,0};u\right) =\int_{0}^{+\infty }\sum_{j=1}^{J}||\overline{w}
_{j}\left( t,y\right) ||_{L^{2}\left( \Gamma _{0}\right)
}^{2}+||u\left( t,y\right) ||_{L^{2}\left( \Gamma _{0}\right)
}^{2}~dt.
{{\mathrm m}athrm e}nd{equation*}
Then, the state variable is $z^{T}=
\begin{bmatrix}
\overline{w}_{1} & \ldots & \overline{w}_{J}
{{\mathrm m}athrm e}nd{bmatrix}
$, $A=-{\rm diag}[(j^{2}\pi ^{2}+\Lambda ^{-1})_{j=1\ldots J}]$, $B^{T}={\mathrm f}rac{\sqrt{2}
}{\pi }
\begin{bmatrix}
{\mathrm f}rac{I}{1} & \ldots & {\mathrm f}rac{I}{J}
{{\mathrm m}athrm e}nd{bmatrix}
$, and $C$ is the identity operator. The control and the observation
spaces are $U=L^{2}\left( \Gamma _{0}\right) $ and $Y=\left(
L^{2}\left( \Gamma _{0}\right) \right) ^{J}$. In addition we pose
$X=L^{2}\left( \Gamma _{0}\right) $ and the state space $Z=\left(
L^{2}\left( \Gamma _{0}\right) \right) ^{J}$ thus $B$ and $C$ are
bounded. Thus $\Phi _{Z}=\Phi _{Y}=I_{J\times J}$, $\Phi _{U}=I$ and
$a\left( \lambda \right) =-{\rm diag}\left[ (j^{2}\pi
^{2}+{\mathrm f}rac{1}{\lambda })_{j=1\ldots J}\right] $, $b^{T}\left(
\lambda
\right) ={\mathrm f}rac{\sqrt{2}}{\pi }
\begin{bmatrix}
{\mathrm f}rac{1}{1} & \ldots & {\mathrm f}rac{1}{J}
{{\mathrm m}athrm e}nd{bmatrix}
$ and $c\left( \text{.}\right) $ is the identity operator on ${\mathrm m}athbb{R}
^{J}$. Since $\Phi _{Z}$ and $\Phi _{U}$ are the identity operators,
the approximation is developed on $Q=k(\Lambda )$ with $k(\Lambda )=q(\Lambda )$
, and the exact optimal control is $\uu=-k(\Lambda )z$.
To build a rational interpolation $k_{N}\left( \lambda \right) $ of
the form (\ref{apprxfractional}) the interval $I_{\sigma }=(0,1]$ is
meshed with $L+1$ distinct nodes $\lambda _{0},\ldots ,\lambda _{L}$
and each $p\left( \lambda _{n}\right) $ solutions to the
\textbf{ARE} is accurately computed with a standard solver. The
exact expression of $k\left( \lambda _{n}\right) =b^{T}p\left(
\lambda _{n}\right) $ follows and the coefficients of the
rational approximation are solution to the $L+1$ equations $
k_{j,N_{j}}\left( \lambda _{n}\right) =k_{j}\left( \lambda
_{n}\right) $ i.e. to
\begin{equation*}
\sum\limits_{m=0}^{N_{j}^{N}}d_{m}\lambda _{n}^{m}-k_{j}\left(
\lambda _{n}\right) \sum_{m^{\prime }=0}^{N_{j}^{D}}d_{m^{\prime
}}^{\prime }\lambda _{n}^{m^{\prime }}=0\text{ for }n=0,\ldots ,L.
{{\mathrm m}athrm e}nd{equation*}
The number $L$ of equations is taken sufficiently large so that the
system with $N^{N}+N^{D}+2$ unknowns is over-determined and is
solved in the mean square sense by using the singular value
decomposition.
\subsubsection{Numerical results}
The simulation have been conducted with four modes i.e. for $J=4$.
The shape
of the four first functions $k_{j}(\lambda )$ are represented in Figure \ref
{interpo-fraction4} which shows that they exhibit a singular
behavior at the origin. Thus, they can not be accurately
approximated by polynomials but may be by rational functions.
\begin{figure}[h]
\begin{center}
{\normalsize
\scalebox{0.7}{\includegraphics*{exemple4-interpo-fractionbis.eps}}
}
{{\mathrm m}athrm e}nd{center}
\caption{Shapes of the Spectral Functions $k$}
\label{interpo-fraction4}
{{\mathrm m}athrm e}nd{figure}
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Rational approximation:} In
order to get an accurate approximation, we choose a logarithmic
distribution of $100$ nodes in $(10^{-2},1)$, which corresponds to a
truncation of high frequencies. In Table \ref{ErPNMex4}, we report
the relative errors in the discrete ${{\mathrm m}athrm e}ll ^{2}$-norm on the other
set $\{\lambda_{n}\}_{n=0 \ldots 200}$
\begin{equation*}
e_{j}={\mathrm f}rac{\left(\sum_{n=0}^{200} \left| k_{j,N_{j}}\left(
\lambda_{n} \right)-k_{j}\left( \lambda_{n}
\right)\right|^2\right)^{{\mathrm f}rac{1}{2}}}{\left(\sum_{n=0}^{200}\left|
k_{j}\left( \lambda_{n} \right)\right|^2\right)^{{\mathrm f}rac{1}{2}}},
\text{ with } \lambda_{n} = 10^{-2 + {\mathrm f}rac{n}{100}} \quad \text{for
} j=1,\ldots,4 ,
{{\mathrm m}athrm e}nd{equation*}
between the exact function $k_{j}$ and its rational approximation
$k_{j,N_{j}}$ for special values of numerator's and denominator's
polynomial degrees $N_{j}=\left( N_{j}^{N},N_{j}^{D}\right) $
\begin{table}[h!]
\begin{center}
\caption{Errors of the rational approximations with numerator's and
denominator's degrees $N_{j}=\left( N_{j}^{N},N_{j}^{D}\right) $}
\begin{tabular}{| c | c | c | c | c |}
\hline
$j$ & $1 $ & $2 $ & $3 $ & $4 $ \\
\hline
$N_{j}$ & $\left( 19,3\right) $ & $\left( 18,3\right) $ & $ \left(17,1\right) $ & $\left( 20,2\right) $ \\
\hline
$e_{j}\times 10^{-10}$ & $0.003$ & $0.013$ & $1.003$ & $0.182$ \\
\hline
{{\mathrm m}athrm e}nd{tabular}
\label{ErPNMex4}
{{\mathrm m}athrm e}nd{center}
{{\mathrm m}athrm e}nd{table}
\noindent $\vartriangleright $ \textit{The Cauchy formula combined
with rational approximations:} Then, according to Remark \ref{Rq
ellipse}, numerical integrations are performed with a standard
trapezoidal quadrature rule along the ellipse defined by the two
radii in the real and imaginary directions $R_{1}=1.02$ and $R_{2} =
0.07$. The relative errors
\begin{equation*}
E_{j}={\mathrm f}rac{\left(\sum_{n=0}^{200} \left| k_{j,N_{j},M}\left(
\lambda_{n} \right)-k_{j}\left( \lambda_{n}
\right)\right|^2\right)^{{\mathrm f}rac{1}{2}}}{\left(\sum_{n=0}^{200}\left|
k_{j}\left( \lambda_{n} \right)\right|^2\right)^{{\mathrm f}rac{1}{2}}}
\text{ with } \lambda_{n} = 10^{-2 + {\mathrm f}rac{n}{100}} \quad \text{for
} j=1,\ldots,4,
{{\mathrm m}athrm e}nd{equation*}
between the exact functions and final approximations are plotted in
logarithmic scale in Figure \ref{ellipse4} for $M$ varying from $10$
to $5 \times 10^{2}$. The errors converge exponentially with an
exponential decay rate given in Figure \ref{ellipse4}. Note that the
parameters $R_1$ and $R_2$ of the ellipse affects the rate of
convergence errors, which is confirmed by our numerical calculation.
\begin{figure}[h]
\begin{center}
{\normalsize
\scalebox{0.6}{\includegraphics*{exemple4-ellipsebisbis.eps}}}
{{\mathrm m}athrm e}nd{center}
\caption{Errors between $k$ and $k_{N,M}$} \label{ellipse4}
{{\mathrm m}athrm e}nd{figure}
{\mathrm m}edskip
\noindent $\vartriangleright $ \textit{Spatial discretization:} The
approximation (\ref{app4ex4}) is obtained from the formula of
$\uu_{N,M,h}$ in (\ref{discspatialeu1}) and by using the centered
finite difference scheme
of the second order derivative $\partial _{yy}^{2}$. The expression of $
\Lambda _{h}$ is the same as in Example 1 and the error between
$\uu_{N,M,h}$ and $\uu$ is quadratic in the space step $h$.
\section{Conclusion\label{conclusion}}
\noindent We have proposed a method to compute distributed control
applied to linear distributed systems with a control operator that
is bounded or not. It has been conceived for architectures of
semi-decentralized processors. Its construction uses a functional
calculus for matrices of functions of an operator, based on spectral
theory and Cauchy formula. In the case of polynomial approximation
of $k$, we have noticed that the numerical integration needs few
integration points, and that the radius of the contour affects the
accuracy of the numerical integration of the Cauchy formula. If the
approximation is rational, we have concluded that numerical
integration requires more integration points in the ellipse which
parameters have been chosen heuristically. We think that the
performance of the method could be further improved by finding
optimal contour parameters depending on the number of quadrature
nodes following the ideas in J. A. C. Weideman and L. N. Trefethen
\cite{WeiTre}. Finally, the method can be extended to other
frameworks for distributed control and for functional calculus.
{{\mathrm m}athrm e}nd{document} |
\begin{document}
\begin{abstract}
Given a complete Gromov hyperbolic space $X$ that is roughly starlike from a point $\omega$ in its Gromov boundary $\partial_{G}X$, we use a Busemann function based at $\omega$ to construct an incomplete unbounded uniform metric space $X_{\varepsilon}$ whose boundary $\partial X_{\varepsilon}$ can be canonically identified with the Gromov boundary $\partial_{\omega}X$ of $X$ relative to $\omega$. This uniformization construction generalizes the procedure used to obtain the Euclidean upper half plane from the hyperbolic plane. Furthermore we show, for an arbitrary metric space $Z$, that there is a hyperbolic filling $X$ of $Z$ that can be uniformized in such a way that the boundary $\partial X_{\varepsilon}$ has a biLipschitz identification with the completion $\bar{Z}$ of $Z$. We also prove that this uniformization procedure can be done at an exponent that is often optimal in the case of CAT$(-1)$ spaces.
\varepsilonnd{abstract}
\title{Uniformizing Gromov hyperbolic spaces with Busemann functions}
\section{Introduction}
The goal of this paper is to construct an unbounded analogue of the uniformizations of Gromov hyperbolic spaces built by Bonk, Heinonen and Koskela in their extensive study of a number of problems in conformal analysis \cite{BHK}. The most familiar special case of our procedure is the construction of the upper half-space $\{(x,y):y > 0\}$ in $\mathbb R^{2}$ from the hyperbolic plane $\mathbb{H}^{2}$, which is discussed in Example \ref{hyperbolic plane}. The guiding example in \cite{BHK}, by comparison, is the relationship between $\mathbb{H}^{2}$ and the Euclidean unit disk $\{(x,y):x^{2}+y^{2} < 1\}$. As can be seen from these examples, the input for uniformization is a geodesic Gromov hyperbolic space $X$ and the output is an incomplete metric space $\Omega$, obtained from a conformal deformation of $X$, that is \varepsilonmph{uniform} in the sense of Definition \ref{def:uniform} below. The density used for uniformizing a Gromov hyperbolic space $X$ in \cite{BHK} is exponential in the distance to a fixed point $z$ of $X$. In contrast we will be using a density that is exponential in a \varepsilonmph{Busemann function} associated to a particular point of the Gromov boundary of $X$. This choice of density is natural as Busemann functions are often interpreted as distance functions ``from infinity" and can themselves be used to define a boundary of the space $X$ \cite[$\S$3]{BGS}. Unlike in \cite{BHK}, we will not assume any local compactness properties on $X$, so specializing our results back to their setting yields a small generalization of their results as well.
Our principal application of this uniformization construction will be to \varepsilonmph{hyperbolic fillings} $X$ of a metric space $Z$, with a particular focus on the case in which $Z$ is unbounded. When $Z$ is bounded a hyperbolic filling $X$ of $Z$ can be thought of as a Gromov hyperbolic graph whose Gromov boundary can be canonically identified with $Z$; in the case that $Z$ is unbounded there are some additional subtleties to this notion owing to the fact that the Gromov boundary of a Gromov hyperbolic space is always bounded. We refer to the discussion prior to Theorem \ref{filling theorem} for further information on this, as well as the contents of Section \ref{sec:filling}. Our use of Busemann functions in this setting is inspired by the hyperbolic filling construction of Buyalo and Schroeder \cite[Chapter 6]{BS07} for arbitrary metric spaces $Z$.
Our uniformization construction for hyperbolic fillings is used in a followup work \cite{Bu23} in order to establish a correspondence between Newton-Sobolev classes of functions on the hyperbolic filling of $Z$ and Besov classes of functions on $Z$ in the special case that $Z$ carries a doubling measure. This is heavily inspired by work of A. Bj\"orn, J. Bj\"orn, and Shanmugalingam \cite{BBS21} that establishes the corresponding result in the case that $Z$ is bounded. In a closely related work \cite{Bu22} we also generalize to our setting their results \cite{BBS20} on how local Poincar\'e inequalities transform under the uniformization in \cite{BHK}. This yields some interesting new examples of uniform metric spaces satisfying Poincar\'e inequalities. There are a number of known variants on the correspondence between function spaces on the hyperbolic filling and function spaces on $Z$, see for instance \cite{BS18}, \cite{BSS18}, \cite{BP03}. Such correspondences were one of the original motivating factors in the use of hyperbolic fillings in analysis on metric spaces. For applications to trace theorems on Ahlfors regular metric spaces that demonstrate the power of these correspondences we refer to \cite{SS17}.
Lastly we remark that the idea of uniformizing Gromov hyperbolic spaces using Busemann functions has been developed independently by Zhou \cite{Z20} for the purpose of an entirely different set of applications, including a study of Teichmueller's displacement problem for quasi-isometries of Gromov hyperbolic spaces. The work \cite{Z20} in particular gives alternative proofs of the main uniformization theorems (Theorems \ref{unbounded uniformization} and \ref{identification theorem}) restricted to the case of proper Gromov hyperbolic spaces and the original range $0 < \varepsilon \elleq \varepsilon_{0}$ of exponents considered by Bonk-Heinonen-Koskela (see Theorem \ref{Gehring-Hayman} below). Our applications to CAT$(-1)$ spaces and hyperbolic fillings require us to consider exponents outside this range however; this is a key point of departure from \cite{Z20}.
Stating our main theorems require some preliminary definitions. We opt to give precise definitions in the corresponding sections throughout the paper, while mostly only outlining the necessary definitions here in the introduction. For a metric space $(X,d)$ and a curve $\gammaamma: I \rightarrow X$, $I \subset \mathbb R$ a subinterval, we write $\ell(\gammaamma)$ for the length of $\gammaamma$ measured in $X$. We will follow the standard practice of using $\gammaamma$ to denote both the parametrization of the curve and the image of the curve in $X$. The curve $\gammaamma$ is a \varepsilonmph{geodesic} if it is isometric as a mapping of $I$ into $X$. We say that $X$ is \varepsilonmph{geodesic} if any two points can be joined by a geodesic. We will use the following distance notation for distance from a point $x$ to a set $E$ in any metric space $(X,d)$,
\[
\mathrm{dist}(x,E) = \inf_{y \in E} d(x,y),
\]
and in particular will write $\mathrm{dist}(x,\gammaamma)$ for the distance of a point $x \in X$ to (the image of) a curve $\gammaamma$.
We now define uniform metric spaces. We start with an \varepsilonmph{incomplete} metric space $(\Omega,d)$. We denote the boundary of $\Omega$ in its completion $\bar{\Omega}$ by $\partial \Omega = \bar{\Omega} \backslash \Omega$. For $x \in \Omega$ we write $d_{\Omega}(x):=\mathrm{dist}(x,\partial \Omega)$ for the distance from $x$ to the boundary $\partial \Omega$. For the definition below we allow $I = [a,b] \subset \mathbb R$ to be any closed interval, and for a curve $\gammaamma: I \rightarrow X$ we denote its endpoints by $\gammaamma_{-} := \gammaamma(a)$ and $\gammaamma_{+} := \gammaamma(b)$. For such an interval $I \subset \mathbb R$ we write $I_{\elleq t} = \{s \in I: s\elleq t\}$ and $I_{\gammaeq t} = \{s \in I: s\gammaeq t\}$.
\begin{defn}\ellambdabel{def:uniform}For a constant $A \gammaeq 1$ and a closed interval $I \subset \mathbb R$, a curve $\gammaamma: I \rightarrow \Omega$ is \varepsilonmph{$A$-uniform} if
\begin{equation}\ellambdabel{uniform one}
\ell(\gammaamma) \elleq Ad(\gammaamma_{-},\gammaamma_{+}),
\varepsilonnd{equation}
and if for every $t \in I$ we have
\begin{equation}\ellambdabel{uniform two}
\min\{\ell(\gammaamma|_{I\elleq t}),\ell(\gammaamma|_{I\gammaeq t})\} \elleq A d_{\Omega}(\gammaamma(t)).
\varepsilonnd{equation}
We say that the metric space $\Omega$ is \varepsilonmph{$A$-uniform} if any two points in $\Omega$ can be joined by an $A$-uniform curve.
\varepsilonnd{defn}
Many reasonable domains in Euclidean space such as the unit ball or upper half-space provide natural examples of uniform metric spaces when they are equipped with the Euclidean metric. The first requirement \varepsilonqref{uniform one} implies that $A$-uniform curves minimize the distance between their endpoints up to the multiplicative constant $A$. The second requirement \varepsilonqref{uniform two} implies that if we cut $\gammaamma$ at any point $\gammaamma(t)$ then at least one of the two subcurves $\gammaamma|_{I_{\elleq t}}$ or $\gammaamma|_{I_{\gammaeq t}}$ must have length controlled by the distance $d_{\Omega}(\gammaamma(t))$ of $\gammaamma(t)$ to $\partial \Omega$. We note that it is easily verified from the definitions that the property of a curve $\gammaamma$ being $A$-uniform is independent of the choice of parametrization of $\gammaamma$. For the purpose of formulating our theorems it is convenient to extend the definition of $A$-uniform curves to allow for arbitrary subintervals $I \subset \mathbb R$ and to allow the possibility $\ell(\gammaamma) = \infty$; as this extension is somewhat technical we refer to Definition \ref{def:extend uniform} for the exact details.
\begin{rem}\ellambdabel{no local compact} The definition of uniform metric spaces advanced in \cite{BHK} also requires local compactness. We follow V\"ais\"al\"a \cite{V99} in dropping the local compactness requirement, as the output of our uniformization procedure need not be locally compact in many cases of interest.
\varepsilonnd{rem}
For a continuous function $\rho: X \rightarrow (0,\infty)$ we write
\[
\ell_{\rho}(\gammaamma) = \int_{\gammaamma} \rho \, ds,
\]
for the line integral of $\rho$ along $\gammaamma$. We refer to \cite[Appendix]{BHK} for a detailed discussion of line integrals in our context. We will often refer to such a positive continuous function $\rho$ as a \varepsilonmph{density} on $X$. The following definition plays a key role in the statement of our main theorems.
\begin{defn}\ellambdabel{conformal factor}
Let $(X,d)$ be a geodesic metric space and let $\rho: X \rightarrow (0,\infty)$ be a density on $X$. The \varepsilonmph{conformal deformation of $X$ with conformal factor $\rho$} is the metric space $X_{\rho} = (X,d_{\rho})$ with metric
\[
d_{\rho}(x,y) = \inf \ell_{\rho}(\gammaamma),
\]
with the infimum taken over all curves $\gammaamma$ joining $x$ to $y$. We say that the density $\rho$ is a \varepsilonmph{Gehring-Hayman density} (abbreviated as a \varepsilonmph{GH-density}) if there is a constant $M \gammaeq 1$ such that for any $x,y \in X$ and any geodesic $\gammaamma$ joining $x$ to $y$ we have
\begin{equation}\ellambdabel{first GH}
\ell_{\rho}(\gammaamma) \elleq M d_{\rho}(x,y).
\varepsilonnd{equation}
\varepsilonnd{defn}
We will refer to the inequality \varepsilonqref{first GH} as the \varepsilonmph{GH-inequality} and will sometimes refer to the constant $M$ as the \varepsilonmph{$GH$-constant}. The terminology here is inspired by the work of Gehring-Hayman \cite{GH62}, which shows that in a simply connected hyperbolic domain $\Omega$ in the complex plane the hyperbolic geodesics minimize Euclidean length among all curves in the domain with the same end points, up to a universal multiplicative constant. Here the density $\rho$ is given by the conformal change of metric relating the Euclidean metric on $\Omega$ to the hyperbolic metric. Note that if $X$ is a tree then the GH-inequality holds for any density $\rho$ with $M = 1$ since any path joining two points in a tree must contain the geodesic joining those points.
We next discuss the notions we will need regarding Gromov hyperbolic spaces. Most formal definitions regarding Gromov hyperbolicity and the Gromov boundary are postponed to Section \ref{sec:hyperbolic}, as they can be found in any standard reference such as \cite{BS07}, \cite{GdH90}. A geodesic metric space $X$ is \varepsilonmph{Gromov hyperbolic} if there is a $\deltalta \gammaeq 0$ such that all geodesic triangles are \varepsilonmph{$\deltalta$-thin}, meaning that for any geodesic triangle $\Delta$ each edge of $\Delta$ is contained in a $\deltalta$-neighborhood of the other two edges of $\Delta$. In this case we will also say that $X$ is \varepsilonmph{$\deltalta$-hyperbolic}. We write $\partial_{G}X$ for the Gromov boundary of $X$, to be defined in Section \ref{sec:hyperbolic}; for now we note that a geodesic ray $\gammaamma:[0,\infty) \rightarrow X$ can always be identified with an equivalence class $[\gammaamma] \in \partial_{G}X$, but in general not every point in $\partial_{G}X$ can be realized in this way.
We consider a complete geodesic $\deltalta$-hyperbolic space $X$ and a geodesic ray $\gammaamma:[0,\infty) \rightarrow X$. The \varepsilonmph{Busemann function} $b_{\gammaamma}: X \rightarrow \mathbb R$ associated to $\gammaamma$ is defined by the limit
\begin{equation}\ellambdabel{first busemann definition}
b_{\gammaamma}(x) = \ellim_{t \rightarrow \infty} d(\gammaamma(t),x)-t.
\varepsilonnd{equation}
Using the triangle inequality and the fact that $d(\gammaamma(t),\gammaamma(0)) = t$, it's easy to check that the right side is nonincreasing in $t$ and bounded below by $-d(\gammaamma(0),x)$, so this limit exists. It's also easily verified that $b_{\gammaamma}$ is 1-Lipschitz, thus in particular is continuous. As is customary when considering Busemann functions, we will refer to any translate $b = b_{\gammaamma} + s$ of $b_{\gammaamma}$ for a constant $s \in \mathbb R$ as a Busemann function as well. We write
\begin{equation}\ellambdabel{extension busemann definition}
\mathcal{B}(X) = \{b_{\gammaamma}+s: \tildeext{$\gammaamma$ a geodesic ray in $X$, $s \in \mathbb R$}\},
\varepsilonnd{equation}
for the set of all Busemann functions on $X$. Given a Busemann function $b = b_{\gammaamma} + s$ we will write $\omega_{b} = [\gammaamma] \in \partial_{G}X$ for the point in the Gromov boundary determined by the geodesic ray $\gammaamma$. We will refer to $\omega_{b}$ as the \varepsilonmph{basepoint} of $b$ and say that $b$ is \varepsilonmph{based at $\omega_{b}$}.
To state our theorems in their appropriate generality it is useful to augment the set $\mathcal{B}(X)$ with the distance functions on $X$: for $z \in X$ we write $b_{z}(x) = d(x,z)$ for the distance function to $z$ and write
\begin{equation}\ellambdabel{extension distance definition}
\mathcal{D}(X) = \{b_{z}+s: \tildeext{$z \in X$, $s \in \mathbb R$}\},
\varepsilonnd{equation}
for the set of all translates of distance functions on $X$. We then write $\hat{\mathcal{B}}(X) = \mathcal{B}(X) \cup \mathcal{D}(X)$. In the case $b = b_{z} + s$ we write $\omega_{b} = z$ and refer to $\omega_{b}$ as the basepoint of $b$ as well. The defining formula \varepsilonqref{first busemann definition} for Busemann functions shows that any $b \in \mathcal{B}(X)$ can be realized as a pointwise limit of functions $b_{t} \in \mathcal{D}(X)$ defined by $b_{t}(x) = d(\gammaamma(t),x)-t$.
Given $b \in \hat{\mathcal{B}}(X)$ and $\varepsilon > 0$ we define a density $\rho_{\varepsilon,b}$ on $X$ by
\[
\rho_{\varepsilon,b}(x) = e^{-\varepsilon b(x)}.
\]
We write $X_{\varepsilon,b} = (X,d_{\varepsilon,b})$ for the conformal deformation of $X$ with conformal factor $\rho_{\varepsilon,b}$. In the theorem below we will be assuming that $X$ is \varepsilonmph{$K$-roughly starlike} from the basepoint $\omega_{b}$ of $b$. This is a technical condition on geodesics starting from $\omega_{b}$ that is described in Definition \ref{def:rough star}. The main purpose of this hypothesis is to rule out cases such as trees that have arbitrarily long finite branches. This $K$-rough starlikeness condition will be satisfied with $K = \frac{1}{2}$ in our application of Theorem \ref{unbounded uniformization} to hyperbolic fillings in Theorem \ref{filling theorem}.
\begin{thm}\ellambdabel{unbounded uniformization}
Let $X$ be a complete geodesic $\deltalta$-hyperbolic space and let $b \in \hat{\mathcal{B}}(X)$ be given. We suppose that $X$ is $K$-roughly starlike from $\omega_{b}$ and that $\varepsilon > 0$ is given such that $\rho_{\varepsilon,b}$ is a GH-density with constant $M$.
Then geodesics in $X$ are $A$-uniform curves in $X_{\varepsilon,b}$, with $A = A(\deltalta,K,\varepsilon,M)$. Consequently $X_{\varepsilon,b}$ is an $A$-uniform metric space. Furthermore $X_{\varepsilon,b}$ is bounded if and only if $b \in \mathcal{D}(X)$.
\varepsilonnd{thm}
In this statement and all subsequent ones the notation $A = A(\deltalta,K,\varepsilon,M)$ is used to indicate that a particular constant depends on the indicated parameters. We refer to Definition \ref{def:extend uniform} for the extension of the definition of $A$-uniform curves that is necessary to cover the case of an arbitrary geodesic in $X$; Definition \ref{def:uniform} only covers the case of geodesics defined on closed intervals. The claim that $X_{\varepsilon,b}$ is bounded if and only if $b \in \mathcal{D}(X)$ does not require either the rough starlikeness hypothesis or the assumption that $\rho_{\varepsilon,b}$ is a GH-density, see Proposition \ref{bounded equivalence}.
We describe the motivating example for Theorem \ref{unbounded uniformization} in the case $b \in \mathcal{B}(X)$ below.
\begin{ex}\ellambdabel{hyperbolic plane}
Let $\mathbb{U}^{2} = \{(x,y) \in \mathbb R^{2}: y > 0\}$ be the upper half space in $\mathbb R^{2}$ equipped with the Euclidean metric, which is easily seen to be a uniform metric space. Let $\mathbb{H}^{2}$ denote the upper half plane model of the hyperbolic plane, which is $\mathbb{U}^{2}$ equipped with the Riemannian metric $ds^{2} = \frac{dx^{2}+dy^{2}}{y^{2}}$. Define $\gammaamma: [0,\infty) \rightarrow \mathbb{H}^{2}$ by $\gammaamma(t) = (0,e^{t})$. Then $\gammaamma$ is a geodesic ray in $\mathbb{H}^{2}$.
From explicit formulas for the hyperbolic distance in this model (see for instance \cite[A.3]{BS07}) it is straightforward to calculate that the associated Busemann function is given by $b_{\gammaamma}(x,y) = -\ellog y$. Setting $\varepsilon = 1$, the density $\rho_{1,b_{\gammaamma}}$ is thus simply given by $\rho_{1,b_{\gammaamma}}(x,y) = y$. Therefore the uniformized metric space $\mathbb{H}^{2}_{1,b_{\gammaamma}}$ is isometric to $\mathbb{U}^{2}$. We also remark that the GH-inequality \varepsilonqref{first GH} for the density $\rho_{1,b_{\gammaamma}}$ can easily be verified using the standard representation of geodesics in the upper half-plane model for $\mathbb{H}^{2}$ as subsegments of semicircles or vertical lines orthogonal to the horizontal line $\{y = 0\}$ in $\mathbb R^{2}$.
\varepsilonnd{ex}
A metric space $(X,d)$ is \varepsilonmph{proper} if its closed balls are compact. In the case $b \in \mathcal{D}(X)$, Theorem \ref{unbounded uniformization} generalizes \cite[Proposition 4.5]{BHK} as it does not require $X$ to be proper and allows for a potentially larger range of values for the parameter $\varepsilon$; this larger range will be relevant to Theorem \ref{CAT theorem}.
For a $\deltalta$-hyperbolic space $X$ and a point $\omega \in \partial_{G}X$ we write $\partial_{\omega} X = \partial_{G}X \backslash \{\omega\}$ for the complement of $\omega$ in the Gromov boundary of $X$. We will refer to $\partial_{\omega} X$ as the \varepsilonmph{Gromov boundary relative to $\omega$} for reasons that will be explained prior to Proposition \ref{convergence Busemann}. We formally extend this definition to $\omega \in X$ by defining $\partial_{\omega}X = \partial_{G}X$; in this case we will still refer to $\partial_{\omega} X$ as the Gromov boundary relative to $\omega$, with the understanding that this simply coincides with the standard Gromov boundary for $\omega \in X$. As part of the proof of Theorem \ref{unbounded uniformization}, we will show that there is a canonical identification $\varphi_{\varepsilon,b}: \partial_{\omega_{b}} X \rightarrow \partial X_{\varepsilon,b}$ between the Gromov boundary of $X$ relative to $\omega_{b}$ and the boundary of $X_{\varepsilon,b}$ in its completion. The most important property of this identification is summarized in Theorem \ref{identification theorem} below.
A function $b \in \hat{\mathcal{B}}(X)$ can be used to define a natural class of metrics on $\partial_{\omega_{b}}X$ known as \varepsilonmph{visual metrics based at $\omega_{b}$} (see \cite[Chapter 3]{BS07} as well as Section \ref{subsec:visual}). These visual metrics have an associated parameter $\varepsilon > 0$ and a comparison constant $L$ to a specific model quasi-metric $\tildeheta_{\varepsilon,b}$ on $\partial_{\omega_{b}}X$ defined in \varepsilonqref{visual quasi}. We continue to write $d_{\varepsilon,b}$ for the canonical extension of the metric on the uniformization $X_{\varepsilon,b}$ to its completion $\bar{X}_{\varepsilon,b}$.
\begin{thm}\ellambdabel{identification theorem}
Let $X$ be a complete geodesic $\deltalta$-hyperbolic space and let $b \in \hat{\mathcal{B}}(X)$ be such that $X$ is $K$-roughly starlike from the basepoint $\omega$ of $b$. Let $\varepsilon > 0$ be given such that $\rho_{\varepsilon,b}$ is a GH-density with constant $M$. Then there is a canonical identification $\varphi_{\varepsilon,b}: \partial_{\omega}X \rightarrow \partial X_{\varepsilon,b}$ under which the restriction of $d_{\varepsilon,b}$ to $\partial X_{\varepsilon,b}$ defines a visual metric on $\partial_{\omega}X$ based at $\omega$ with parameter $\varepsilon$ and comparison constant $L = L(\deltalta,K,\varepsilon,M)$.
\varepsilonnd{thm}
For a precise description of the identification $\varphi_{\varepsilon,b}$ we refer to the disucssion after \varepsilonqref{construct identify}.
\begin{rem}\ellambdabel{flexibility}
It is useful to allow some additional flexibility in the choice of function $b$ in Theorems \ref{unbounded uniformization} and \ref{identification theorem}. This flexibility will be used in Theorem \ref{filling theorem}. For a continuous function $b: X \rightarrow \mathbb R$ and a constant $\mathbf{k}appa \gammaeq 0$ we write $b \in \hat{\mathcal{B}}_{\mathbf{k}appa}(X)$ if there is some $b' \in \hat{\mathcal{B}}(X)$ such that $|b(x)-b'(x)| \elleq \mathbf{k}appa$ for all $x \in X$; we write $b \in \mathcal{D}_{\mathbf{k}appa}(x)$ if $b' \in \mathcal{D}(X)$ and $b \in \mathcal{B}_{\mathbf{k}appa}(x)$ if $b' \in \mathcal{B}(X)$. We define the basepoint of $b$ to be the basepoint of $b'$, $\omega_{b} = \omega_{b'}$; while this definition may be ambiguous in the case $b \in \mathcal{D}_{\mathbf{k}appa}(X)$, this ambiguity does not matter in the context of our theorems. Then $X_{\varepsilon,b}$ is $e^{\varepsilon \mathbf{k}appa}$-biLipschitz to $X_{\varepsilon,b'}$ via the identity map on $X$ and $\rho_{\varepsilon,b'}$ will be a GH-density with constant $e^{2\varepsilon \mathbf{k}appa}M$ if $\rho_{\varepsilon,b}$ is a GH-density with constant $M$. It then easily follows that Theorems \ref{unbounded uniformization} and \ref{identification theorem} hold for $b \in \hat{\mathcal{B}}_{\mathbf{k}appa}(X)$ as well, with the uniformity parameter $A$ in Theorem \ref{unbounded uniformization} and the comparison constant $L$ in Theorem \ref{identification theorem} depending additionally on $\mathbf{k}appa$.
\varepsilonnd{rem}
\begin{rem}\ellambdabel{generalize to Banach}
Since we do not assume that the Gromov hyperbolic space $X$ in our theorems is proper, it is an interesting question whether our theorems can be applied to the ``free quasiworld" considered by V\"ais\"al\"a \cite{V99}. The Gromov hyperbolic spaces that arise in this context are domains in Banach spaces equipped with hyperbolic metrics. However in this setting the hypothesis that $X$ is geodesic is too strong \cite[Remark 3.5]{V99}. The best one can assume is that $X$ is a \varepsilonmph{length space}, i.e., that the distance between two points of $X$ is equal to the infimum of the lengths of all curves joining them. Thus one would need to generalize Theorems \ref{unbounded uniformization} and \ref{identification theorem} to Gromov hyperbolic spaces that are not necessarily geodesic, but are still length spaces. We believe that such a generalization is possible, but since it is unnecessary for our applications we will not pursue it here.
\varepsilonnd{rem}
Let's now discuss when the hypotheses of Theorems \ref{unbounded uniformization} and \ref{identification theorem} are satisfied in practice. The two key hypotheses are the rough starlikeness hypothesis from the basepoint $\omega_{b}$ of $b$ and the assumption that $\rho_{\varepsilon,b}$ is a GH-density on $X$. The rough starlikeness hypothesis is always easily verified in applications of interest, so as a consequence it is typically not a concern when trying to apply these theorems. Thus the main hypothesis to verify is that of $\rho_{\varepsilon,b}$ being a GH-density. The most general result available regarding verifying this condition is the following theorem of Bonk-Heinonen-Koskela.
\begin{thm}\ellambdabel{Gehring-Hayman}\cite[Theorem 5.1]{BHK}
Let $(X,d)$ be a geodesic $\deltalta$-hyperbolic space. There is $\varepsilon_{0} = \varepsilon_{0}(\deltalta) > 0$ depending only on $\deltalta$ such that if a density $\rho:X \rightarrow (0,\infty)$ satisfies for all $x,y \in X$ and some fixed $0 < \varepsilon \elleq \varepsilon_{0}$,
\begin{equation}\ellambdabel{proto Harnack}
e^{-\varepsilon d(x,y)} \elleq \frac{\rho(x)}{\rho(y)} \elleq e^{\varepsilon d(x,y)},
\varepsilonnd{equation}
then $\rho$ is a GH-density with constant $M = 20$.
\varepsilonnd{thm}
The inequality \varepsilonqref{proto Harnack} is satisfied for $\rho_{\varepsilon,b}$ for any $b \in \hat{\mathcal{B}}(X)$ since all functions in $\hat{\mathcal{B}}(X)$ are $1$-Lipschitz. Theorem \ref{Gehring-Hayman} builds on a number of previous works that are summarized at the beginning of \cite[Chapter 5]{BHK}. Thus if one is not concerned about obtaining Theorems \ref{unbounded uniformization} and \ref{identification theorem} for a specific value of $\varepsilon$ then it is always possible to assume that $\rho_{\varepsilon,b}$ is a GH-density with constant $M = 20$ by taking $\varepsilon$ sufficiently small.
In general one wants to establish that $\rho_{\varepsilon,b}$ is a GH-density for as large a value of $\varepsilon$ as possible, as this property is then inherited for smaller values of $\varepsilon$ by Proposition \ref{inheritance}. This is particularly important for applications in which there is a preferred visual metric on $\partial_{\omega}X$, such as Theorems \ref{CAT theorem} and \ref{filling theorem} below.
CAT$(-1)$ spaces are geodesic metric spaces in which geodesic triangles are thinner than corresponding comparison geodesic triangles in the hyperbolic plane $\mathbb{H}^{2}$. We refer to \cite[Definition 3.2.1]{DSU17} for a precise definition; since the proper definition is somewhat lengthy to state and we will only be using easily stated consequences of the CAT$(-1)$ property, we omit a full description of the definition here. These spaces encompass many natural examples such as trees and simply connected Riemannian manifolds with sectional curvatures $\elleq -1$. A CAT$(-1)$ space is $\deltalta$-hyperbolic with the same hyperbolicity constant $\deltalta = \deltalta(\mathbb{H}^{2})$ as the hyperbolic plane, for which the optimal constant can be computed explicitly to be $\deltalta = \ellog(1+\sqrt{2})$.
For a CAT$(-1)$ space $X$ and a function $b \in \hat{\mathcal{B}}(X)$ with basepoint $\omega$ the model quasi-metric $\tildeheta_{1,b}$ on $\partial_{\omega}X$ in fact defines a distinguished choice of visual metric on $\partial_{\omega}X$ with parameter $\varepsilon = 1$. This metric is known as the \varepsilonmph{Bourdon metric} on $\partial_{\omega}X = \partial X$ when $b \in \mathcal{D}(X)$ and the \varepsilonmph{Hamenst\"adt metric} on $\partial_{\omega}X$ when $b \in \mathcal{B}(X)$. For further details we refer to Remark \ref{CAT visual}. Our next theorem applies Theorems \ref{unbounded uniformization} and \ref{identification theorem} to the special case of CAT$(-1)$ spaces at the special value $\varepsilon = 1$.
\begin{thm}\ellambdabel{CAT theorem}
Let $X$ be a complete CAT$(-1)$ space and let $b \in \hat{\mathcal{B}}(X)$ be given with basepoint $\omega$. Then there is a universal constant $M \gammaeq 1$ such that $\rho_{1,b}$ is a GH-density with constant $M$. If, furthermore, $X$ is $K$-roughly starlike from the basepoint $\omega$ of $b$ for some $K \gammaeq 0$ then the conclusions of Theorems \ref{unbounded uniformization} and \ref{identification theorem} hold for $X_{1,b}$ with constants $A = A(K)$ and $L = L(K)$ depending only on $K$. In particular the restriction of $d_{1,b}$ to $\partial X_{1,b}$ is $L$-biLipschitz to $\tildeheta_{1,b}$.
\varepsilonnd{thm}
The constant $M$ in Theorem \ref{CAT theorem} is universal in the sense that it is the same for any CAT$(-1)$ space $X$ and any $b \in \hat{\mathcal{B}}(X)$. The dependence of $A$ on $K$ can be removed when $b \in \mathcal{D}(X)$ by mimicking the arguments of \cite[Proposition 4.5]{BHK}; this same comment applies to Theorem \ref{unbounded uniformization} as well. The conclusions of Theorem \ref{CAT theorem} show in particular that the boundary $\partial X_{1,b}$ of the uniformization has a canonical biLipschitz identification with the Gromov boundary $\partial_{\omega}X$ relative to $\omega$ equipped with the distinguished visual metric $\tildeheta_{1,b}$.
\begin{rem}\ellambdabel{sharp} Theorem \ref{identification theorem} produces an obstruction for $\rho_{\varepsilon,b}$ to be a GH-density: the Gromov boundary $\partial_{\omega}X$ must admit a visual metric based at the basepoint $\omega$ of $b$ with parameter $\varepsilon$. In the case $b = b_{z}$ for some $z \in X$ (i.e., if $b$ is a distance function) then this shows in particular that $\varepsilon \elleq -K_{u}(X)$, where $K_{u}(X)$ is the asymptotic upper curvature bound defined by Bonk and Foertsch (see \cite[Theorem 1.5]{BF06}). In the case of the $n$-dimensional hyperbolic space $\mathbb{H}^{n}$ of constant negative curvature $-1$ ($n \gammaeq 2$) we have $K_{u}(\mathbb{H}^{n}) = 1$ by \cite[Proposition 1.4]{BF06}. Hence $\rho_{\varepsilon,b}$ cannot be a GH-density for any $\varepsilon > 1$ when $b \in \mathcal{D}(\mathbb{H}^{n})$. This shows in particular that the value $\varepsilon = 1$ for $\rho_{1,b}$ to be a GH-density in Theorem \ref{CAT theorem} is sharp in certain cases. We can in fact extend these conclusions to observe that $\rho_{\varepsilon,b}$ cannot be a GH-density for any $\varepsilon > 1$ when $b \in \mathcal{B}(\mathbb{H}^{n})$ as well by observing that, for a fixed $\omega \in \partial\mathbb{H}^{n}$, any visual metric based at $\omega$ on $\partial_{\omega}\mathbb{H}^{n}$ with parameter $\varepsilon > 1$ would give rise to a visual metric on $\partial \mathbb{H}^{n}$ with parameter $\varepsilon > 1$ by a M\"obius inversion of $\partial_{\omega}\mathbb{H}^{n}$ centered at the point $\omega$.
\varepsilonnd{rem}
We will also apply our uniformization results to hyperbolic fillings of an arbitrary metric space $(Z,d)$. We briefly describe the construction of the hyperbolic filling here, with further details in Section \ref{sec:filling}, including proofs for the claims made here. Our construction will depend in part on two parameters $\alpha > 1$ and $\tildeau > 1$. For an $r > 0$ we say that a subset $S \subset Z$ is \varepsilonmph{$r$-separated} if for each $x,y \in S$ we have $d(x,y) \gammaeq r$. Given a parameter $\alpha > 1$, we choose for each $n \in \mathbb Z$ a maximal $\alpha^{-n}$-separated subset $S_{n}$ of $Z$. For $n \in \mathbb Z$ we write $V_{n} = \{(z,n): z \in S_{n}\}$ and set $V = \bigcup_{n \in \mathbb Z} V_{n}$. The set $V$ will serve as the vertex set for $X$. We define the \varepsilonmph{height function} $h: V \rightarrow \mathbb Z$ on this vertex set by $h(v) = n$ for $v = (z,n) \in V_{n}$.
We associate to each vertex $v = (z,n)\in V$ the ball $B(v) = B(z,\tildeau \alpha^{-n})$ of radius $\tildeau \alpha^{-n}$ centered at $z$. We place an edge between vertices $v,w \in V$ if and only if their heights satisfy $|h(v)-h(w)| \elleq 1$ and their associated balls satisfy $B(v) \cap B(w) \neq \varepsilonmptyset$. We write $X$ for the resulting graph and call this a \varepsilonmph{hyperbolic filling} of $Z$. If $\tildeau$ is sufficiently large (see inequality \varepsilonqref{tau requirement}) then $X$ will be a connected graph by Proposition \ref{connected filling}. We make $X$ into a geodesic metric space by declaring all edges to have unit length. We extend the height function $h$ to a $1$-Lipschitz function $h: X \rightarrow \mathbb R$ by linearly interpolating the values of $h$ from the vertices to the edges of $X$.
As a metric space $X$ is $\deltalta$-hyperbolic with $\deltalta = \deltalta(\alpha,\tildeau)$ depending only on the parameters $\alpha$ and $\tildeau$. There is a distinguished point $\omega \in \partial_{G}X$ in the Gromov boundary that can be thought of as an ideal point at infinity for $Z$. We have an identification $\partial_{\omega}X \,|\,ng \bar{Z}$ of the Gromov boundary relative to $\omega$ with the completion $\bar{Z}$ of $Z$. Under this identification the extension of the metric $d$ to $\bar{Z}$ defines a visual metric on $\partial_{\omega}X$ with parameter $\varepsilon = \ellog \alpha$. All of the results mentioned here are proved in Section \ref{sec:filling}.
We define a density $\rho$ on $X$ by $\rho(x) = \alpha^{-h(x)}$ for $x \in X$. We write $X_{\rho}$ for the conformal deformation of $X$ with conformal factor $\rho$. By Lemma \ref{height busemann} there is a Busemann function $b$ based at $\omega$ such that $|h(x)-b(x)| \elleq 3$ for all $x \in X$ and therefore $h \in \mathcal{B}_{3}(X)$ in the notation of Remark \ref{flexibility}. For such a Busemann function $b \in \mathcal{B}(X)$ we have that the density $\rho$ is uniformly comparable to the density $\rho_{\varepsilon,b}$ with $\varepsilon = \ellog \alpha$.
\begin{thm}\ellambdabel{filling theorem}
Let $Z$ be a metric space and let $X$ be a hyperbolic filling of $Z$ with parameters $\alpha > 1$ and $\tildeau > \min\{3,\alpha/(\alpha-1)\}$. Then $X$ is $\frac{1}{2}$-roughly starlike from $\omega$ and $\rho$ is a GH-density with constant $M = M(\alpha,\tildeau)$.
Thus the conclusions of Theorems \ref{unbounded uniformization} and \ref{identification theorem} hold for $X_{\rho}$. In particular we have a canonical $L$-biLipschitz identification of $\partial X_{\rho}$ and $\bar{Z}$, with $L = L(\alpha,\tildeau)$.
\varepsilonnd{thm}
We compare our results to those of \cite{BBS21} in Remark \ref{bounded filling}. Another notable predecessor to Theorem \ref{filling theorem} in the case that $Z$ is compact is the work of Piaggio \cite[Section 2]{P13}.
We provide here an outline of the contents of the rest of the paper. In Section \ref{sec:hyperbolic} we review several key notions in the setting of Gromov hyperbolic spaces. Section \ref{sec:tripod} establishes some basic properties of geodesic triangles in Gromov hyperbolic spaces with vertices on the Gromov boundary and gives a rough formula for evaluating certain distance functions and Busemann functions on their edges. We then use these results in Section \ref{sec:uniformize} to obtain estimates for the uniformized distance and prove Theorems \ref{unbounded uniformization}, \ref{identification theorem}, and \ref{CAT theorem}. In Section \ref{sec:filling} we construct the hyperbolic fillings of metric spaces that we use in Theorem \ref{filling theorem} and establish their basic properties. Lastly Theorem \ref{filling theorem} is proved in Section \ref{sec:uniform filling}.
We are very grateful to Nageswari Shanmugalingam for providing multiple drafts of the work \cite{BBS21} on which a significant part of this paper is based. We also thank Tushar Das for making us aware of the results of \cite{DSU17} that are used to prove Theorem \ref{CAT theorem}.
\section{Hyperbolic metric spaces}\ellambdabel{sec:hyperbolic}
\subsection{Definitions}\ellambdabel{subsec:Def} Let $X$ be a set and let $f$, $g$ be real-valued functions defined on $X$. For $c \gammaeq 0$ we will write $f \doteq_{c} g$ if
\[
|f(x)-g(x)| \elleq c,
\]
for all $x \in X$. If the exact value of the constant $c$ is not important or implied by context we will often just write $f \doteq g$. We will sometimes refer to the relation $f \doteq g$ as a \varepsilonmph{rough equality} between $f$ and $g$.
If $C \gammaeq 1$ and $f$ and $g$ both take values in $(0,\infty)$, we will write $f \asymp_{C} g$ if
\[
C^{-1}g(x) \elleq f(x) \elleq Cg(x).
\]
We will similarly write $f \asymp g$ if the value of $C$ is not important or implied by context. Note that if $f \doteq_{c} g$ then $e^{f} \asymp_{e^{c}} e^{g}$, and similarly if $f \asymp_{C} g$ then $\ellog f \doteq_{\ellog C} \ellog g$. We will stick to a convention of using lowercase $c$ for additive constants and uppercase $C$ for multiplicative constants. When this additive constant $c$ is determined by other parameters $\deltalta$, $K$, etc. under discussion we will write $c = c(\deltalta,K)$, while continuing to use the shorthand $c$ where it is not ambiguous (and the same for multiplicative constants $C$).
For a metric space $(X,d)$ we write $B(x,r) = \{y:d(x,y) < r\}$ for the open ball of radius $r > 0$ centered at $x$. A map $f:(X,d) \rightarrow (X',d')$ between metric spaces $X$ and $X'$ is \varepsilonmph{isometric} if $d'(f(x),f(y)) = d(x,y)$ for $x$, $y\in X$. If furthermore $f$ is surjective then we say that it is an \varepsilonmph{isometry} and that $X$ and $X'$ are \varepsilonmph{isometric}. For a constant $c \gammaeq 0$ a map $f:X \rightarrow X'$ is defined to be \varepsilonmph{$c$-roughly isometric} if $d'(f(x),f(y)) \doteq_{c} d(x,y)$. The map $f$ is \varepsilonmph{$L$-Lipschitz} for a constant $L \gammaeq 0$ if $d'(f(x),f(y)) \elleq Ld(x,y)$, and it is \varepsilonmph{$L$-biLipschitz} for a constant $L \gammaeq 1$ if $d'(f(x),f(y)) \asymp_{L} d(x,y)$. As usual we will not mention the exact value of the constants if they are unimportant.
When dealing with Gromov hyperbolic spaces $X$ in this paper we will use the generic distance notation $|xy|:=d(x,y)$ for the distance between $x$ and $y$ in $X$, except for cases where this could cause confusion. We will often use the generic notation $xy$ for a geodesic connecting two points $x,y \in X$, even when this geodesic is not unique; in these cases there will be no ambiguity regarding the geodesic that we are referring to. A \varepsilonmph{geodesic triangle} $\Delta$ in $X$ is a collection of three points $x,y,z \in X$ together with geodesics $xy$, $xz$, and $yz$ joining these points, which we will refer to as the \varepsilonmph{edges} of $\Delta$. We will also alternatively write $xyz = \Delta$ for a geodesic triangle with vertices $x$, $y$ and $z$.
For $x,y,z \in X$ the \varepsilonmph{Gromov product} of $x$ and $y$ based at $z$ is defined by
\begin{equation}\ellambdabel{Gromov product}
(x|y)_{z} = \frac{1}{2}(|xz|+|yz|-|xy|).
\varepsilonnd{equation}
We note the basepoint change inequality for $x,y,z,p \in X$,
\begin{equation}\ellambdabel{basepoint change}
|(x|y)_{z}-(x|y)_{p}| \elleq |zp|,
\varepsilonnd{equation}
which follows from the triangle inequality.
By \cite[Chapitre 2, Proposition 21]{GdH90} we have two key consequences of $\deltalta$-hyperbolicity for a metric space $X$ regarding Gromov products. The first is that for every $x,y,z,p \in X$ we have
\begin{equation}\ellambdabel{delta inequality}
(x|z)_{p} \gammaeq \min \{(x|y)_{p},(y|z)_{p}\} - 4\deltalta.
\varepsilonnd{equation}
We refer to \varepsilonqref{delta inequality} as the \varepsilonmph{$4\deltalta$-inequality}.
The second is that for any geodesic triangle $xyz$ in $X$ we have that if $p \in xy$, $q \in xz$ are points with $|xp| = |xq| \elleq (y|z)_{x}$ then $|pq| \elleq 4\deltalta$. Here $xy$ and $xz$ are referring to the corresponding geodesics in the triangle $\Delta$. We will refer to this as the \varepsilonmph{$4\deltalta$-tripod condition}.
Both inequality \varepsilonqref{delta inequality} and the tripod condition can be taken as equivalent definitions of hyperbolicity. By \cite[Chapitre 2, Proposition 21]{GdH90} all of these definitions are equivalent up to a factor of $4$. We note that the definition using inequality \varepsilonqref{delta inequality} does not use the fact that $X$ is geodesic, and is therefore used as a definition of $\deltalta$-hyperbolicity for general metric spaces. We will be citing several basic results from \cite{BS07} in which inequality \varepsilonqref{delta inequality} is used as the definition of $\deltalta$-hyperbolicity (with $\deltalta$ in place of $4\deltalta$). Wherever necessary we have multiplied the constants used in their results by $4$ in order to account for this discrepancy.
Let $X$ be a geodesic Gromov hyperbolic space and fix $p \in X$. A sequence $\{x_{n}\}$ \varepsilonmph{converges to infinity} if we have $(x_{n}|x_{m})_{p} \rightarrow \infty$ as $m,n \rightarrow \infty$. The \varepsilonmph{Gromov boundary} $\partial_{G} X$ of a Gromov hyperbolic space $X$ is defined to be the set of all equivalence classes of sequences $\{x_{n}\} \subset X$ converging to infinity, with the equivalence relation $\{x_{n}\} \sim \{y_{n}\}$ if $(x_{n}|y_{n})_{p} \rightarrow \infty$ as $n \rightarrow \infty$. Inequality \varepsilonqref{basepoint change} shows that these notions do not depend on the choice of basepoint $p$.
A second boundary that we can associate to $X$ is the \varepsilonmph{geodesic boundary} $\partial^{g}X$, which is defined as equivalence classes of geodesic rays $\gammaamma: [0,\infty) \rightarrow X$, with two geodesic rays $\gammaamma$ and $\sigma$ being equivalent if there is a constant $c \gammaeq 0$ such that $|\gammaamma(t)\sigma(t)| \elleq c$ for $t \gammaeq 0$. There is a natural inclusion $\partial^{g}X \subseteq \partial_{G}X$ given by sending a geodesic ray $\gammaamma$ to the sequence $\{\gammaamma(n)\}_{n \in \mathbb N}$. This inclusion need not be surjective in general. However, it is always surjective if $X$ is \varepsilonmph{proper}, meaning that closed balls in $X$ are compact.
For a point $\omega \in \partial_{G}X$ and a sequence $\{x_{n}\}$ converging to infinity we will write $\{x_{n}\} \in \omega$ or $x_{n} \rightarrow \omega$ if $\{x_{n}\}$ belongs to the equivalence class of $\omega$. For a geodesic ray $\gammaamma:[a,\infty) \rightarrow X$, $a \in \mathbb R$, and a point $\omega \in \partial_{G}X$ we will write $\gammaamma \in \omega$ if $\{\gammaamma(n)\}_{n \gammaeq a} \in \omega$, $n \in \mathbb N$. We will sometimes also consider geodesic rays $\gammaamma: (-\infty,a] \rightarrow X$ with a reversely oriented parametrization, for which we write $\gammaamma \in \omega$ if $\{\gammaamma(-n)\}_{n \gammaeq -a} \in \omega$.
For the rest of this paper we will be using the standard notation $\partial X:=\partial_{G} X$ for the Gromov boundary of a Gromov hyperbolic space $X$. While this notation does conflict with the notation $\partial \Omega = \bar{\Omega} \backslash \Omega$ introduced prior to Definition \ref{def:uniform}, the meaning of the notation will always be clear from context since we will never use it in the sense of Definition \ref{def:uniform} in the context of Gromov hyperbolic spaces.
We now extend some notions regarding geodesic triangles to the Gromov boundary. For a point $x \in X$ and a point $\xi \in \partial X$ we will often write $x\xi$ for a geodesic ray $\gammaamma: [0,\infty) \rightarrow X$ with $\gammaamma(0)= x$ and $\gammaamma \in \xi$, provided such a geodesic ray exists. Similarly, for $\zeta,\xi \in \partial X$ we will write $\zeta\xi$ for a geodesic line $\gammaamma: \mathbb R \rightarrow X$ with $\gammaamma|_{(-\infty,0]} \in \zeta$ and $\gammaamma|_{[0,\infty)} \in \xi$, provided such a geodesic line exists. Such geodesic lines and rays always exist when $X$ is proper, but not necessarily in general. We extend the definition of geodesic triangles $\Delta$ in $X$ to allow for vertices in $\partial X$: a geodesic triangle $xyz = \Delta$ in $X$ is a collection of three points $x,y,z \in X \cup \partial X$ together with geodesics $xy$, $xz$, $yz$ connecting them in the sense described above.
\begin{rem}\ellambdabel{distinct}
It is easy to see from the definitions that there is no geodesic $\gammaamma: \mathbb R \rightarrow X$ such that $\gammaamma|_{[0,\infty)}$ and $\gammaamma|_{(-\infty,0]}$ belong to the same equivalence class in the Gromov boundary $\partial X$. Hence, for a geodesic triangle $\Delta$, all vertices of $\Delta$ on $\partial X$ must be distinct.
\varepsilonnd{rem}
Gromov products based at points $p \in X$ can be extended to points of $\partial X$ by defining the Gromov product of equivalence classes $\xi$, $\zeta \in \partial X$ based at $p$ to be
\[
(\xi |\zeta)_{p} = \inf \elliminf_{n \rightarrow \infty}(x_{n}|y_{n})_{p},
\]
with the infimum taken over all sequences $\{x_{n}\} \in \xi$, $\{y_{n}\} \in \zeta$. By \cite[Lemma 2.2.2]{BS07}, if $X$ is $\deltalta$-hyperbolic then for any choices of sequences $\{x_{n}\} \in \xi$, $\{y_{n}\} \in \zeta$ we have
\begin{equation}\ellambdabel{sequence approximation}
(\xi |\zeta)_{p} \elleq \elliminf_{n \rightarrow \infty}(x_{n}|y_{n})_{p} \elleq \ellimsup_{n \rightarrow \infty}(x_{n}|y_{n})_{p} \elleq (\xi |\zeta)_{p} + 8\deltalta.
\varepsilonnd{equation}
We also have the $4\deltalta$-inequality for $\xi$, $\zeta$, $\omega \in \partial X$ and $p \in X$,
\begin{equation}\ellambdabel{boundary delta inequality}
(\xi |\omega)_{p} \gammaeq \min \{(\xi | \zeta)_{p},(\zeta | \omega)_{p}\} - 4\deltalta.
\varepsilonnd{equation}
For $x \in X$, $\xi \in \partial X$ the Gromov product is defined analogously as
\[
(x |\xi)_{p} = \inf \elliminf_{n \rightarrow \infty}(x|x_{n})_{p},
\]
with the infimum taken over $\{x_{n}\} \in \xi$, and the analogues of \varepsilonqref{sequence approximation} and \varepsilonqref{boundary delta inequality} hold as well.
We next observe that geodesic triangles $\Delta$ with vertices in $X \cup \partial X$ are $10\deltalta$-thin, in the precise sense that if $u \in \Delta$ is any given point then there is a point $v \in \Delta$ satisfying $|uv| \elleq 10\deltalta$ that does not belong to the same edge of $\Delta$ as $u$. When $X$ is proper this can be easily deduced from the $\deltalta$-thin triangles property for triangles in $X$ by a limiting argument. Without the properness hypothesis this result can also be obtained with a larger thinness constant $200\deltalta$ as a consequence of work of V\"ais\"al\"a \cite[Theorem 6.24]{V05}; we note that he uses \varepsilonqref{delta inequality} as the definition of $\deltalta$-hyperbolicity so we have to multiply the constant he obtains by $4$. As V\"ais\"al\"a works in the more general context of Gromov hyperbolic spaces that are not necessarily geodesic (which greatly complicates the proofs), we prefer to give a simpler direct proof of $10\deltalta$-thinness here.
\begin{lem}\ellambdabel{infinite thin}
Let $\Delta$ be a geodesic triangle in $X$ with vertices in $X \cup \partial X$. Then $\Delta$ is $10\deltalta$-thin.
\varepsilonnd{lem}
\begin{proof}
Let $x,y,z \in X \cup \partial X$ be the vertices of $\Delta$. Let $u \in \Delta$ be given. Since $X$ has $\deltalta$-thin triangles, we may assume that $\Delta$ has at least one vertex on $\partial X$. We first consider the case in which $\Delta$ has exactly one vertex on $\partial X$, which by relabeling we can assume is $z$. We first assume that $u \in xy$. Let $\{z_{n}\} \subset xz$ and $\{z_{n}'\} \subset yz$ be sequences such that $z_{n} \rightarrow z$ and $z_{n}' \rightarrow z$. For each $n$ we let $\Delta_{n} = xyz_{n}$ be the geodesic triangle sharing the edge $xy$ with $\Delta$, having a second edge be the subsegment $xz_{n}$ of $xz$, and having a third edge be any choice of geodesic $yz_{n}$. Then $\Delta_{n}$ is $\deltalta$-thin, so we have for each $n$ that either $\mathrm{dist}(u,xz_{n}) \elleq \deltalta$ or $\mathrm{dist}(u,yz_{n}) \elleq \deltalta$ (or both). In the first case we are done since $xz_{n} \subset xz$, so we can assume that $\mathrm{dist}(u,yz_{n}) \elleq \deltalta$. Let $v_{n} \in yz_{n}$ be such that $|uv_{n}| \elleq \deltalta$. Then $|v_{n}y| \elleq \deltalta + |uy|$.
Since both $z_{n}$ and $z_{n}'$ converge to $z$, for sufficiently large $n$ we will have $(z_{n}|z_{n}')_{y} \gammaeq \deltalta + |uy|$, which implies in particular that $|z_{n}'y| \gammaeq |v_{n}y|$. The $4\deltalta$-tripod condition applied to $y$, $z_{n}$, and $z_{n}'$ then implies that if $w_{n} \in yz_{n}'$ is the unique point such that $|yw_{n}| = |yv_{n}|$ then $|v_{n}w_{n}| \elleq 4\deltalta$, from which it follows that $|uw_{n}| \elleq 5\deltalta$ for all sufficiently large $n$. Since $w_{n} \in yz$ this completes the proof of this case.
The other cases are $u \in xz$ and $u \in yz$. By symmetry it suffices to prove the case $u \in xz$. We define the sequences $\{z_{n}\}$ and $\{z_{n}'\}$ and the triangle $\Delta_{n}$ as before. As in the case $u \in xy$ we can assume that $\mathrm{dist}(u,yz_{n}) \elleq \deltalta$ for all $n$, as otherwise by the $\deltalta$-thin triangles property we have $\mathrm{dist}(u,xy) \elleq \deltalta$ and we are done. We let $v_{n} \in yz_{n}$ be such that $|uv_{n}| \elleq \deltalta$, note that $|v_{n}y| \elleq \deltalta + |uy|$ as before, and choose $n$ large enough that $(z_{n}|z_{n}')_{y} \gammaeq \deltalta + |uy|$. As before the $4\deltalta$-tripod condition then supplies a point $w_{n} \in yz_{n}'$ such that $|w_{n}v_{n}| \elleq 4\deltalta$ and we conclude that $\mathrm{dist}(u,yz) \elleq 5\deltalta$.
We can now handle the case in which potentially two or three vertices of $\Delta$ belong to $\partial X$. By symmetry it suffices to show for a point $u \in xy$ that $u$ is $10\deltalta$-close to either $xz$ or $yz$. Let $\{x_{n}\} \subset xy$ and $\{y_{n}\} \subset xy$ be sequences such that $x_{n} \rightarrow x$ and $y_{n} \rightarrow y$; if $x \in X$ then we set $x_{n} = x$ for all $n$ and similarly if $y \in Y$ then we set $y_{n} = y$ for all $n$. Let $\Delta_{n} = x_{n}y_{n}z$ be a geodesic triangle with one edge the subsegment $x_{n}y_{n}$ of $xy$. Then $\Delta_{n}$ has at most one vertex $z$ on $\partial X$. We conclude from the previous case that $u$ is $5\deltalta$-close to either $x_{n}z$ or $y_{n}z$. By switching the roles of $x$ and $y$ if necessary, we can then assume that there is $v \in x_{n}z$ such that $|uv| \elleq 5\deltalta$. If $x \in X$ then $x_{n} = x$ and we are done. Thus we can assume that $x \in \partial X$.
Fix any point $w \in xz$ and let $x_{n}' \in wx$ be defined such that $|wx_{n}'| = |ux_{n}|$. Since the geodesic rays $wx$ and $ux$ define the same point $x$ of the Gromov boundary, there is a constant $c \gammaeq 0$ such that $|x_{n}x_{n}'| \elleq c$ for all $n$. We apply the previous case again to a triangle $\Delta_{n}' = x_{n}x_{n}'z$ with edges the segment $x_{n}'z$, the segment $x_{n}z$, and a choice of geodesic $x_{n}x_{n}'$, obtaining that $v$ is $5\deltalta$-close to either $x_{n}'z$ or $x_{n}x_{n}'$. If $v$ is $5\deltalta$-close to $x_{n}x_{n}'$ for all $n$ then
\[
|x_{n}u| \elleq |uv| + \mathrm{dist}(v,x_{n}x_{n}') + |x_{n}x_{n}'| \elleq 10\deltalta + c,
\]
contradicting that $|x_{n}u| \rightarrow \infty$ as $n \rightarrow \infty$. We conclude that $v$ is $5\deltalta$-close to $x_{n}'z \subset xz$ for all sufficiently large $n$, which implies that $\mathrm{dist}(u,xz) \elleq 10\deltalta$ as desired.
\varepsilonnd{proof}
We can also now formally define rough starlikeness from points of $X \cup \partial X$. We recall that for $\omega \in \partial X$ we write $\partial_{\omega} X = \partial X \backslash \{\omega\}$ for the Gromov boundary of $X$ relative to $\omega$. The definition is slightly different for points of $X$ and points of $\partial X$, so we handle these two cases separately.
\begin{defn}\ellambdabel{def:rough star}
Let $X$ be a geodesic Gromov hyperbolic space. Let $z \in X$ and $K \gammaeq 0$ be given. We say that $X$ is \varepsilonmph{$K$-roughly starlike} from $z$ if
\begin{enumerate}
\item For each $x \in X$ there is a geodesic ray $\gammaamma: [0,\infty) \rightarrow X$ such that $\gammaamma(0) = z$ and $\mathrm{dist}(x,\gammaamma) \elleq K$.
\item For each $\xi \in \partial X$ there is a geodesic ray $\gammaamma: [0,\infty) \rightarrow X$ such that $\gammaamma(0) = z$ and $\gammaamma \in \xi$.
\varepsilonnd{enumerate}
For a point $\omega \in \partial X$ we say that $X$ is \varepsilonmph{$K$-roughly starlike} from $\omega$ if
\begin{enumerate}
\item For each $x \in X$ there is a geodesic line $\gammaamma: \mathbb R \rightarrow X$ such that $\mathrm{dist}(x,\gammaamma) \elleq K$ and $\gammaamma|_{(-\infty,0]}\in \omega$.
\item For each $\xi \in \partial_{\omega} X$ there is a geodesic line $\gammaamma: \mathbb R \rightarrow X$ such that $\gammaamma|_{[0,\infty)} \in \xi$ and $\gammaamma|_{(-\infty,0]} \in \omega$.
\varepsilonnd{enumerate}
\varepsilonnd{defn}
Part (2) of Definition \ref{def:rough star} implies in both cases that $\partial^{g}X = \partial X$, i.e., the geodesic boundary and the Gromov boundary coincide. It will be used as a replacement for the properness hypothesis in the main theorem of \cite{BHK}. We note that Property (2) of Definition \ref{def:rough star} automatically holds for any $\omega \in X \cup \partial X$ when $X$ is proper, since in this case any two points of $X \cup \partial X$ can be joined by a geodesic. We also remark that if $\partial X$ consists of a single point $\omega$ then $X$ cannot be roughly starlike from $\omega$, since no geodesic line $\gammaamma: \mathbb R \rightarrow X$ can exist in this case. Similarly if $\partial X$ is empty then $X$ cannot be roughly starlike from any of its points.
\subsection{Busemann functions}
In this section we closely follow \cite[Chapter 3]{BS07}. Throughout much of the paper we will need to work with Gromov products based at a point $\omega \in \partial X$. These will be defined through the use of Busemann functions. In order to use the results from \cite[Chapter 3]{BS07} we have to show, for a geodesic ray $\gammaamma \in \omega$, that $b_{\gammaamma}$ is a Busemann function based at $\omega$ in their sense. The definition of a Busemann function given there starts with the function
\begin{equation}\ellambdabel{bs busemann}
b_{\omega,p}(x) = (\omega|p)_{x}-(\omega|x)_{p},
\varepsilonnd{equation}
for $x,p \in X$ and $\omega \in \partial X$ and defines a Busemann function based at $\omega$ to be any function $b: X \rightarrow \mathbb R$ satisfying $b \doteq_{8\deltalta} b_{\omega,p}+s$ for some $p \in X$ and $s \in \mathbb R$ (recall that we are multiplying all of their constants by $4$ due to differing definitions of hyperbolicity). Note that this alternative definition \varepsilonqref{bs busemann} makes sense even for points in the Gromov boundary that do not belong to the geodesic boundary.
\begin{lem}\ellambdabel{equivalence busemann}
Let $\omega \in \partial X$, let $p \in X$, and let $\gammaamma \in \omega$ be a geodesic ray with $\gammaamma(0) = p$. Then we have $b_{\omega,p} \doteq_{24\deltalta} b_{\gammaamma}$.
\varepsilonnd{lem}
\begin{proof}
By \cite[Example 3.1.4]{BS07} we have for all $x \in X$ that
\[
b_{\omega,p}(x) \doteq_{8\deltalta} |xp|-(\omega|x)_{p}.
\]
By inequality \varepsilonqref{sequence approximation} we have $(\gammaamma(n)|x)_{p} \doteq_{8\deltalta} (\omega|x)_{p}$ for $n \in \mathbb N$ sufficiently large. Since $p = \gammaamma(0)$ we have
\[
(\gammaamma(n)|x)_{p} = \frac{1}{2}(n + |xp|-|\gammaamma(n)x|).
\]
Then
\begin{equation}\ellambdabel{final line equivalence}
|xp|-2(\omega|x)_{p} \doteq_{16\deltalta} |\gammaamma(n)x|-n.
\varepsilonnd{equation}
Since the right side converges to $b_{\gammaamma}(x)$ as $n \rightarrow \infty$, the result follows.
\varepsilonnd{proof}
We recall our definition of a Busemann function from \varepsilonqref{extension busemann definition}: a Busemann function $b \in \mathcal{B}(X)$ is any function $b: X \rightarrow \mathbb R$ such that $b = b_{\gammaamma}+s$ for some geodesic ray $\gammaamma$ in $X$ and some $s \in \mathbb R$. For such a Busemann function $b$ we let $\omega = \omega_{b} = [\gammaamma]$ denote its basepoint in $\partial X$. Then by Lemma \ref{equivalence busemann} $b$ is a Busemann function based at $\omega$ in the sense of \cite[Chapter 3]{BS07} as well, provided that we use a cutoff of $b \doteq_{24\deltalta} b_{\omega,p} + s$ instead of the $8\deltalta$-cutoff used there. This only has the effect of further multiplying constants by 3 in the claims of that chapter. An easy consequence of Lemma \ref{equivalence busemann} is the following.
\begin{lem}\ellambdabel{identify busemann}
Let $\omega \in \partial X$ and let $\gammaamma,\sigma:[0,\infty) \rightarrow X$ be geodesic rays with $\gammaamma,\sigma \in \omega$. Then there is a constant $s \in \mathbb R$ such that $b_{\sigma} \doteq_{72\deltalta} b_{\gammaamma} + s$. The constant $s$ depends only on the starting points $\gammaamma(0)$ and $\sigma(0)$ of the rays and satisfies $s = 0$ if $\gammaamma(0) = \sigma(0)$.
Consequently if $b$ is any Busemann function based at $\omega$ and $\sigma \in \omega$ is any geodesic ray then there is a constant $s \in \mathbb R$ such that $b \doteq_{72\deltalta} b_{\sigma}+s$.
\varepsilonnd{lem}
\begin{proof}
By \cite[Lemma 3.1.2]{BS07}, for each $p,q, x \in X$ we have
\[
b_{\omega,p}(x) \doteq_{24\deltalta} b_{\omega,q}(x)+b_{\omega,q}(p).
\]
Setting $p = \gammaamma(0)$, $q = \sigma(0)$, and applying Lemma \ref{equivalence busemann} gives
\[
b_{\gammaamma}(x) \doteq_{72\deltalta} b_{\sigma}(x) + b_{\omega,\sigma(0)}(\gammaamma(0)).
\]
This gives the first claim of the lemma with $c = b_{\omega,\sigma(0)}(\gammaamma(0))$. The claim that $s = 0$ if $\gammaamma(0) = \sigma(0)$ follows from the fact that $b_{\omega,p}(p) = 0$ for any $p \in X$. The final claim follows immediately since for any Busemann function $b$ based at $\omega$ there is some geodesic ray $\gammaamma \in \omega$ such that $b = b_{\gammaamma}+s'$ for some $s' \in \mathbb R$.
\varepsilonnd{proof}
We will usually use the following lemma to perform computations with Busemann functions in practice. Note that the geodesics are parametrized as starting from the basepoint $\omega \in \partial X$ instead of ending there. The notation $(-\infty,a]$ below should be interpreted as $(-\infty,a] = \mathbb R$ when $a = \infty$.
\begin{lem}\ellambdabel{geodesic busemann}
Let $b$ be a Busemann function on $X$ based at $\omega \in \partial X$. Let $a \in \mathbb R \cup \{\infty\}$ and let $\gammaamma:(-\infty,a] \rightarrow X$ be a geodesic with $\gammaamma(t) \rightarrow \omega$ as $t \rightarrow -\infty$.
\begin{enumerate}
\item For any $s, t \in (-\infty,a]$ (or any $s,t \in \mathbb R$ in the case $a = \infty$) we have
\begin{equation}\ellambdabel{geodesic busemann equation}
b(\gammaamma(t)) - b(\gammaamma(s)) \doteq_{144\deltalta} t - s.
\varepsilonnd{equation}
\item For any constant $u \in \mathbb R$ there is an arclength reparametrization $\tilde{\gammaamma}: (-\infty,\tilde{a}] \rightarrow X$ of $\gammaamma$ such that $b(\tilde{\gammaamma}(t)) \doteq_{144\deltalta} t+u$ for $t \in (-\infty,\tilde{a}]$.
\varepsilonnd{enumerate}
\varepsilonnd{lem}
\begin{proof}
Let $s \in (-\infty,a]$ be given and let $\sigma_{s}:[s-a,\infty) \rightarrow X$ be defined by $\sigma_{s}(t) = \gammaamma(s-t)$. It's easily checked from the definition \varepsilonqref{first busemann definition} that $b_{\sigma_{s}}(\sigma_{s}(t)) = -t$ for $t \in [s-a,\infty)$. Lemma \ref{identify busemann} shows that there is a constant $c \in \mathbb R$ such that $b \doteq_{72\deltalta} b_{\sigma_{s}} + c$. It follows that for any $t \in (-\infty,a]$,
\[
b(\gammaamma(t))-b(\gammaamma(s)) \doteq_{144\deltalta} b_{\sigma_{s}}(\sigma_{s}(s-t))-b_{\sigma_{s}}(\sigma_{s}(0)) = t-s,
\]
for $t \in [-a,\infty)$. This proves (1).
For the second claim we fix an $s \in (-\infty,a)$ and define $\tilde{\gammaamma}(t) = \gammaamma(t-b(\gammaamma(s))+s+u)$ for $t \in (-\infty,\tilde{a}]$, $\tilde{a} = a-s+b(\gammaamma(s))-u$ (if $a = \infty$ we take $\tilde{a} = \infty$). Then by \varepsilonqref{geodesic busemann equation},
\begin{align*}
b(\tilde{\gammaamma}(t)) &= b(\gammaamma(t-b(\gammaamma(s))+s+u)) \\
&\doteq_{144\deltalta} (t-b(\gammaamma(s))+s+u)-s+b(\gammaamma(s)) \\
&= t+u.
\varepsilonnd{align*}
\varepsilonnd{proof}
For $x$, $y \in X$ and $b \in \hat{\mathcal{B}}(X)$ the Gromov product based at $b$ is defined by
\[
(x|y)_{b} = \frac{1}{2}(b(x) + b(y) - |xy|).
\]
Since $b$ is $1$-Lipschitz we have the useful inequality
\begin{equation}\ellambdabel{both busemann}
(x|y)_{b} \elleq \min\{b(x),b(y)\}.
\varepsilonnd{equation}
For $b \in \mathcal{D}(X)$ this notion essentially reduces to the standard Gromov product: if $b(x) = |xp| +s$ for some $p \in X$ and $s \in \mathbb R$ then $(x|y)_{b} = (x|y)_{p} +s$. The analogues of all the results below then follow from the discussion in the previous section. We will thus focus on the case of Busemann functions $b \in \mathcal{B}(X)$.
Let $b \in \mathcal{B}(X)$ and let $\omega = \omega_{b}$ be its basepoint. The Gromov product based at $b$ is extended to $\partial X$ by, for $(\xi,\zeta) \neq (\omega,\omega)$,
\[
(\xi | \zeta)_{b} = \inf \elliminf_{n \rightarrow \infty}(x_{n}|y_{n})_{b}
\]
with the infimum taken over $\{x_{n}\} \in \xi$, $\{y_{n}\} \in \zeta$ as before, and similarly for $x \in X$ and $\xi \in \partial X$ we define
\[
(x| \xi)_{b} = \inf \elliminf_{n \rightarrow \infty}(x | x_{n})_{b},
\]
with the infimum taken over $\{x_{n}\} \in \xi$. The next lemma extends the $4\deltalta$-inequality to Gromov products based at $b$. It follows from \cite[Lemma 3.2.4]{BS07}. Recall that we have multiplied their additive constants by a total of $12$ due to the differing definition of hyperbolicity and larger cutoff in defining Busemann functions; we then round up to $600\deltalta$ afterward. The corresponding additive constant in \cite[Lemma 3.2.4]{BS07} below is $44\deltalta$.
\begin{lem}\ellambdabel{busemann inequality}
Let $b$ be a Busemann function based at $\omega \in \partial X$. Then
\begin{enumerate}
\item For any $\xi$, $\zeta \in \partial X \backslash \{\omega\}$ and any $\{x_{n}\} \in \xi$, $\{y_{n}\} \in \zeta$ we have
\[
(\xi |\zeta)_{b} \elleq \elliminf_{n \rightarrow \infty}(x_{n}|y_{n})_{b} \elleq \ellimsup_{n \rightarrow \infty}(x_{n}|y_{n})_{b} \elleq (\xi |\zeta)_{b} + 600\deltalta,
\]
and the same holds if we replace $\zeta$ with $x \in X$.
\item For any $\xi,\zeta,\ellambda \in X \cup \partial_{\omega}X$ we have
\[
(\xi |\ellambda)_{b} \gammaeq \min \{(\xi | \zeta)_{b},(\zeta | \ellambda)_{b}\} - 600\deltalta.
\]
\varepsilonnd{enumerate}
\varepsilonnd{lem}
Combining (1) of Lemma \ref{busemann inequality} with inequality \varepsilonqref{both busemann} gives for all $x,y \in X \cup \partial X$ with $(x,y) \neq (\omega,\omega)$,
\begin{equation}\ellambdabel{both busemann boundary}
(x|y)_{b} \elleq \min\{b(x),b(y)\}+600\deltalta,
\varepsilonnd{equation}
where we set $b(\omega) = -\infty$ and $b(\xi) = \infty$ for $\xi \in \partial_{\omega} X$.
For a point $\omega \in \partial^{g} X$ belonging to the geodesic boundary, a sequence $\{x_{n}\}$ \varepsilonmph{converges to infinity with respect to $\omega$} if for some Busemann function $b$ based at $\omega$ we have $(x_{m}|x_{n})_{b} \rightarrow \infty$ as $m,n \rightarrow \infty$. Two sequences $\{x_{n}\}$, $\{y_{n}\}$ converging to infinity with respect to $\omega$ are \varepsilonmph{equivalent with respect to $\omega$} if $(x_{n}|y_{n})_{b} \rightarrow \infty$. These notions do not depend on the choice of Busemann function $b$ based at $\omega$ by Lemma \ref{identify busemann}. One then defines the \varepsilonmph{Gromov boundary relative to $\omega$} as the set of all equivalence classes of sequences converging to infinity with respect to $\omega$. We will denote this by $\partial_{\omega} X$. As our past use of the notation $\partial_{\omega}X = \partial X \backslash \{\omega\}$ suggests we have the following, which is \cite[Proposition 3.4.1]{BS07}.
\begin{prop}\ellambdabel{convergence Busemann}
A sequence $\{x_{n}\}$ converges to infinity with respect to $\omega$ if and only if it converges to a point $\xi \in \partial X \backslash \{\omega\}$. This correspondence defines a canonical identification of $\partial_{\omega} X$ and $\partial X \backslash \{\omega\}$.
\varepsilonnd{prop}
We recall that for $\omega \in X$ we will often abuse terminology and also refer to $\partial_{\omega}X = \partial X$ as the Gromov boundary relative to $\omega$.
\subsection{Visual metrics}\ellambdabel{subsec:visual} Let $K \gammaeq 1$ and let $Z$ be a set. A function $\tildeheta: Z \tildeimes Z \rightarrow [0,\infty)$ is a \varepsilonmph{$K$-quasi-metric} if the following holds for any $z,z',z'' \in Z$,
\begin{enumerate}
\item $\tildeheta(z,z') = 0$ if and only if $z = z'$,
\item $\tildeheta(z,z')= \tildeheta(z',z)$,
\item $\tildeheta(z,z'') \elleq K \max\{\tildeheta(z,z'),\tildeheta(z',z'')\}$.
\varepsilonnd{enumerate}
By a standard construction (see \cite[Lemma 2.2.5]{BS07}) a $K$-quasi-metric with $K \elleq 2$ is always $4$-biLipschitz to a metric on $Z$. Since for $\varepsilon > 0$ we have that $\tildeheta^{\varepsilon}$ is a $K^{\varepsilon}$ quasi-metric if $\tildeheta$ is a $K$-quasi-metric, for any quasi-metric $\tildeheta$ we always have that $\tildeheta^{\varepsilon}$ is $4$-biLipschitz to a metric $d$ on $Z$ (by the identity map on $Z$) whenever $\varepsilon$ is small enough that $K^{\varepsilon} \elleq 2$.
Let $X$ be a geodesic $\deltalta$-hyperbolic space. For $x \in X$ and $\varepsilon > 0$ we define for $\xi$, $\zeta \in \partial X$,
\begin{equation}\ellambdabel{visual base}
\tildeheta_{\varepsilon,x}(\xi,\zeta) = e^{-\varepsilon (\xi|\zeta)_{x}},
\varepsilonnd{equation}
with the understanding that $e^{-\infty} = 0$. By \varepsilonqref{boundary delta inequality} the function $\tildeheta_{\varepsilon,x}$ defines an $e^{8\deltalta \varepsilon}$-quasi-metric on $\partial X$. We refer to any metric $\tildeheta$ on $\partial X$ that is $L$-biLipschitz to $\tildeheta_{\varepsilon,x}$ as a \varepsilonmph{visual metric} on $\partial X$ based at $x$ with parameter $\varepsilon$; we call $L$ the \varepsilonmph{comparison constant} to the model quasi-metric $\tildeheta_{\varepsilon,x}$. A visual metric always exists once $\varepsilon$ is small enough that $e^{8\deltalta \varepsilon} \elleq 2$. We give $\partial X$ the topology induced by any visual metric. Equipped with a visual metric with respect to any basepoint $x \in X$ and any parameter $\varepsilon > 0$ the set $\partial X$ is a complete bounded metric space. The basepoint change inequality \varepsilonqref{basepoint change} combined with inequality \varepsilonqref{sequence approximation} shows that the notion of a visual metric does not actually depend on the choice of basepoint $x \in X$, however the comparison constant to the quasi-metric \varepsilonqref{visual base} will depend on the basepoint. For $b \in \mathcal{D}(X)$ of the form $b(y) = d(x,y) + s$ for some $s \in \mathbb R$ we then define
\begin{equation}\ellambdabel{D extension}
\tildeheta_{\varepsilon,b}(\xi,\zeta) = e^{-\varepsilon (\xi|\zeta)_{b}} = e^{-\varepsilon s}\tildeheta_{\varepsilon,x}(\xi,\zeta).
\varepsilonnd{equation}
Let $\omega \in \partial^{g} X$ be a point of the geodesic boundary and let $b \in \mathcal{B}(X)$ be a Busemann function based at $\omega$. We define for $\varepsilon > 0$ and $\xi, \zeta \in \partial_{\omega} X$,
\begin{equation}\ellambdabel{visual quasi}
\tildeheta_{\varepsilon,b}(\xi,\zeta) = e^{-\varepsilon (\xi|\zeta)_{b}}.
\varepsilonnd{equation}
Then $\tildeheta_{\varepsilon,b}$ defines an $e^{600\deltalta \varepsilon}$-quasi-metric on $\partial_{\omega} X$ by Lemma \ref{busemann inequality}. A \varepsilonmph{visual metric} based at $\omega$ with parameter $\varepsilon$ is defined to be any metric $\tildeheta$ on $\partial_{\omega} X$ that is $L$-biLipschitz to $\tildeheta_{\varepsilon,b}$, and as before we will call $L$ the comparison constant to the model quasi-metric $\tildeheta_{\varepsilon,b}$. Since all Busemann functions associated to $\omega$ differ from each other by a constant, up to a bounded error (by Lemma \ref{identify busemann}), the notion of a visual metric based at $\omega$ does not depend on the choice of Busemann function $b$ based at $\omega$. Equipped with any visual metric based at $\omega$ the metric space $\partial_{\omega}X$ is complete. It is bounded if and only if $\omega$ is an isolated point in $\partial X$.
\begin{rem}\ellambdabel{CAT visual}
For CAT$(-1)$ spaces the quasi-metric $\tildeheta_{1,b}$ for $b \in \hat{\mathcal{B}}(X)$ with basepoint $\omega$ defines a distinguished visual metric on $\partial_{\omega} X$ with parameter $\varepsilon = 1$. This metric is known as a \varepsilonmph{Bourdon metric} when $b \in \mathcal{D}(X)$ and a \varepsilonmph{Hamenst\"adt metric} when $b \in \mathcal{B}(X)$. The basic properties of the Bourdon metric for CAT$(-1)$ spaces were established by Bourdon in \cite{Bou95}. The Hamenst\"adt metric was introduced by Hamenst\"adt in the setting of Hadamard manifolds with sectional curvatures $\elleq -1$ in \cite{Ham89} through a slightly different construction. The formulation for CAT$(-1)$ spaces using Gromov products based at $b$ is due to Foertsch-Schroeder \cite{FS11}.
\varepsilonnd{rem}
\section{Tripod maps and Busemann functions}\ellambdabel{sec:tripod}
In this section we let $X$ be a geodesic $\deltalta$-hyperbolic space for a given parameter $\deltalta \gammaeq 0$. We will be establishing some standard claims regarding geodesic triangles in $X$ that have vertices on the Gromov boundary $\partial X$. We will then use these claims regarding geodesic triangles in $X$ to evaluate Busemann functions on geodesics in $X$ in Proposition \ref{compute Busemann} and Lemma \ref{star parametrize}. When $X$ is proper these claims can be obtained via limiting arguments from the corresponding claims for geodesic triangles in \cite[Chapitre 2]{GdH90}. Without the properness hypothesis they may be obtained (with larger constants) by careful examination and specialization of the results of V\"ais\"al\"a on roads and biroads in $\deltalta$-hyperbolic space \cite[Section 6]{V05}. We will provide more direct proofs of these results here, as we will also need to use some particular corollaries of the proofs that cannot be found in \cite{V05}. Providing our own proofs also allows us to organize the results in a manner that is convenient for our applications.
\subsection{Tripod maps} We start with a definition. The terminology is taken from \cite[Chapter 2]{BS07}. Compare \cite[Chapitre 2, D\'efinition 18]{GdH90}.
\begin{defn}\ellambdabel{equiradial definition}
Let $\Delta$ be a geodesic triangle in $X$ with vertices $x,y,z \in X \cup \partial X$ and let $\chi \gammaeq 0$ be given. A collection of points $\hat{x} \in yz$, $\hat{y} \in xz$, $\hat{z} \in xy$ is \varepsilonmph{$\chi$-equiradial} if
\[
\mathrm{diam}\{\hat{x},\hat{y},\hat{z}\} = \max\{|\hat{x}\hat{y}|,|\hat{y}\hat{z}|,|\hat{x}\hat{z}|\} \elleq \chi.
\]
We then refer to $\hat{x}$, $\hat{y}$, $\hat{z}$ as \varepsilonmph{$\chi$-equiradial points} for $\Delta$.
\varepsilonnd{defn}
\begin{rem}\ellambdabel{alternate}
For $x,y,z \in X$ Definition \ref{equiradial definition} makes sense in any geodesic metric space $X$. Taking $\chi = \deltalta$ gives yet another quantitatively equivalent definition of $\deltalta$-hyperbolicity for $X$. See \cite[Chapitre 2, Proposition 21]{GdH90}.
\varepsilonnd{rem}
When $x,y,z \in X$, the $4\deltalta$-tripod condition directly provides us with a set of $4\deltalta$-equiradial points $\hat{x}$, $\hat{y}$, $\hat{z}$ defined by the system of equalities $|x\hat{y}| = |x\hat{z}| = (y|z)_{x}$, $|y\hat{x}| = |y\hat{z}| = (x|z)_{y}$, and $|z\hat{x}| = |z\hat{y}| = (x|y)_{z}$. We will often refer to these points as the \varepsilonmph{canonical equiradial points} for $\Delta$, since they are uniquely determined. The following definition encodes a convenient hypothesis to make on equiradial points of a geodesic triangle $\Delta$ that partially generalizes the notion of canonical equiradial points to the case that some of the vertices of $\Delta$ belong to $\partial X$.
We adopt the notation convention for $x,y \in X \cup \partial X$ that $|xy| = \infty$ if $x \neq y$ and one of $x$ or $y$ belongs to $\partial X$ and $|xy| = 0$ if $x = y$.
\begin{defn}\ellambdabel{calibrated}
Let $\Delta$ be a geodesic triangle in $X$ with vertices $x,y,z \in X \cup \partial X$, let $\chi \gammaeq 0$ be given, and let $(\hat{x},\hat{y},\hat{z})$ be a collection of $\chi$-equiradial points for $\Delta$. We say that this collection is \varepsilonmph{calibrated} if we have $|\hat{x}z| = |\hat{y}z|$, $|\hat{y}x| = |\hat{z}x|$, and $|\hat{z}y| = |\hat{x}y|$.
\varepsilonnd{defn}
This condition is trivially satisfied when all vertices of $\Delta$ belong to $\partial X$, since all of the subsegments involved have infinite length.
We let $\Upsilon$ be the tripod geodesic metric space composed of three copies $L_{1}$, $L_{2}$, and $L_{3}$ of the closed half-line $[0,\infty)$ identified at $0$. This identification point will be denoted by $o$ and will be referred to as the \varepsilonmph{core} of the tripod $\Upsilon$. The space $\Upsilon$ is clearly $0$-hyperbolic. The Gromov boundary $\partial \Upsilon$ consists of three points $\zeta_{i}$, $i = 1,2,3$, corresponding to the half-lines $L_{i}$ thought of as geodesic rays starting from the core $o$.
For a geodesic triangle $\Delta$ with a calibrated ordered triple of $\chi$-equiradial points $(\hat{x}, \hat{y}, \hat{z})$ as in Definitions \ref{equiradial definition} and \ref{calibrated}, we define the associated \varepsilonmph{tripod map} $T: \Delta \rightarrow \Upsilon$ to be the map that sends the sides $xz$, $yz$, and $xy$ isometrically into $L_{1} \cup L_{3}$, $L_{2} \cup L_{3}$, and $L_{1} \cup L_{2}$ respectively in the unique way that satisfies $T(x) \in L_{1} \cup \{\zeta_{1}\}$, $T(y) \in L_{2} \cup \{\zeta_{2}\}$,$T(z) \in L_{3} \cup \{\zeta_{3}\}$, and $T(\hat{x}) = T(\hat{y}) = T(\hat{z}) = o$. To be more precise for boundary points, when $x \in \partial X$ we mean here that $T(x) = \zeta_{1}$, i.e., $T$ maps the geodesic rays $\hat{y}x$ and $\hat{z}x$ isometrically onto $L_{1}$. A choice of ordering of the equiradial points is required to define the map $T$ but is not important, as changing the ordering simply corresponds to permuting the rays $L_{i}$ in $\Upsilon$ while keeping the core $o$ fixed.
We first obtain the following direct consequence of Lemma \ref{infinite thin}.
\begin{lem}\ellambdabel{infinite equiradial}
Let $\Delta$ be a geodesic triangle with vertices $x,y,z \in X \cup \partial X$. Then there is a calibrated $60\deltalta$-equiradial collection of points $\hat{x} \in yz$, $\hat{y} \in xz$, $\hat{z} \in xy$.
\varepsilonnd{lem}
\begin{proof}
If all vertices of $\Delta$ belong to $X$ then the canonical equiradial points give a calibrated $4\deltalta$-equiradial collection for $\Delta$, so we can assume that at least one vertex of $\Delta$ belongs to $\partial X$. Thus we can assume without loss of generality that $z \in \partial X$.
Parametrize the side $xy$ by arclength as $\gammaamma: I \rightarrow X$ for an interval $I \subset \mathbb R$, oriented from $x$ to $y$. Let $E_{x} \subset I$ be the collection of times $t$ such that $\mathrm{dist}(\gammaamma(t),xz) \elleq 10\deltalta$ and $E_{y} \subset I$ the collection of times $t$ such that $\mathrm{dist}(\gammaamma(t),yz) \elleq 10\deltalta$. Each of the sets $E_{x}$ and $E_{y}$ are closed and we have $E_{x} \cup E_{y} = I$ by Lemma \ref{infinite thin}. We claim that both $E_{x}$ and $E_{y}$ are always nonempty. For this we can assume without loss of generality that $E_{x}$ is nonempty since $E_{x} \cup E_{y} = I$.
If $E_{y} = \varepsilonmptyset$ then $E_{x} = I$. For each $t \in I$ we let $x_{t} \in xz$ be a point such that $|x_{t}\gammaamma(t)| \elleq 10\deltalta$. For $n \in \mathbb N$ the sequence $\{\gammaamma(n)\}$ converges to $y$, which implies that the sequence $\{x_{n}\}$ converges to $y$ since these sequences are a bounded distance from one another. However any sequence of points converging to infinity in $xz$ can only possibly converge to $x$ or $z$, which is a contradiction. Thus $E_{y}$ must also be nonempty.
By the connectedness of $I$ we then conclude that $E_{x} \cap E_{y} \neq \varepsilonmptyset$. Letting $s \in E_{x} \cap E_{y}$, setting $w:=\gammaamma(s)$, and selecting points $u \in xz$, $v \in yz$ such that $|wu| \elleq 10\deltalta$ and $|wv| \elleq 10\deltalta$, we conclude that $\{w,u,v\}$ is a $20\deltalta$-equiradial collection of points for $\Delta$.
Lastly we need to produce a calibrated collection of equiradial points from the collection $\{w,u,v\}$. If all vertices of $\Delta$ belong to $\partial X$ then the collection is trivially calibrated, so we can assume at least one vertex of $\Delta$ belongs to $X$. By relabeling the vertices we can then assume that either $x \in X$ and $y \in \partial X$ or $x \in X$ and $y \in X$. In both cases we can find $u' \in xz$ such that $|xu'| = |xw|$ since $|xz| = \infty$. Then
\begin{equation}\ellambdabel{u u}
|uu'| = ||xu|-|xu'|| = ||xu|-|xw|| \elleq |uw| \elleq 20\deltalta.
\varepsilonnd{equation}
It follows that the collection $\{w,u',v\}$ is $40\deltalta$-equiradial. If $y \in \partial X$ then this collection is also calibrated and we are done.
If $y \in X$ then we repeat this argument again by using the fact that $|yz| = \infty$ to find $v' \in yz$ such that $|yv'| = |yw|$. The calculation \varepsilonqref{u u} then shows that $|vv'| \elleq 20\deltalta$ as well. We can then conclude that the collection $\{w,u',v'\}$ is calibrated and $60\deltalta$-equiradial, as desired.
\varepsilonnd{proof}
Our next goal will be to prove that the tripod map $T: \Delta \rightarrow \Upsilon$ associated to the calibrated collection of equiradial points produced by Lemma \ref{infinite equiradial} is roughly isometric. We will require the following simple lemma.
\begin{lem}\ellambdabel{simple}
Let $X$ be a metric space and let $x,y,z \in X$ with $|xz| \elleq |yz|$. Suppose that we are given geodesics $xz$ and $yz$ joining $x$ to $z$ and $y$ to $z$ respectively. Let $u \in xz$, $v \in yz$ be given points that satisfy $|xu| = |yv|$ and let $w \in yz$ be the unique point satisfying $|wz| = |uz|$. Then $w \in vz$ and $|wv| \elleq |xy|$.
\varepsilonnd{lem}
\begin{proof}
The point $w$ must belong to the subsegment $vz$ of $yz$, as if $w \in yv$ and $w \neq v$ then
\begin{align*}
|yz| &= |yv| + |wz| - |wv| \\
&= |xu| + |uz| - |wv| \\
&= |xz|-|wv| \\
& < |xz|,
\varepsilonnd{align*}
contradicting that $|yz| \gammaeq |xz|$. Since $w \in vz$ we then have
\begin{align*}
|yz| &= |yv| + |vw| + |wz| \\
&= |xu| + |vw| + |uz| \\
&= |xz| + |vw|,
\varepsilonnd{align*}
which implies by the triangle inequality that $|vw| \elleq |xy|$.
\varepsilonnd{proof}
We now apply Lemma \ref{simple} to the setting of a $\deltalta$-hyperbolic space $X$.
\begin{lem}\ellambdabel{infinite triangle}
Let $x,y \in X$, let $z \in X \cup \partial X$, and let $\bar{x} \in xz$, $\bar{y} \in yz$ satisfy $|x\bar{x}| = |y\bar{y}|$. Then we have
\begin{equation}\ellambdabel{combined inequality}
|\bar{x}\bar{y}| \elleq 3|xy|+8\deltalta.
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
Set $t = |x\bar{x}| = |y\bar{y}|$. If $t \elleq |xy|$ then
\[
|\bar{x}\bar{y}| \elleq |\bar{x}x| + |xy| + |\bar{y}y| \elleq 3|xy|,
\]
which verifies inequality \varepsilonqref{combined inequality}. We can thus assume that $t > |xy|$.
We first assume that $z \in X$. We can then assume without loss of generality that $|xz|\elleq |yz|$. We consider a geodesic triangle $\Delta = xyz$ with sides the given geodesics $xz$ and $yz$, as well as a geodesic $xy$ from $x$ to $y$. Let $w \in yz$ be the unique point such that $|wz| = |\bar{x}z|$. Lemma \ref{simple} shows that $w \in \bar{y}z$ and $|w\bar{y}| \elleq |xy|$.
Let $x' \in xz$ and $y' \in yz$ be the canonical equiradial points for $\Delta$ on these edges. These points must satisfy $\max\{|x'x|,|y'y|\} \elleq |xy|$ since $xy$ is an edge of $\Delta$. The assumption $t > |xy|$ then implies that $\bar{x} \in x'z$ and $\bar{y} \in y'z$. Thus $w \in y'z$. The $4\deltalta$-tripod condition then implies that $|w\bar{x}| \elleq 4\deltalta$, from which it follows that $|\bar{x}\bar{y}| \elleq |xy| + 4\deltalta$. This proves \varepsilonqref{combined inequality} in this case.
We now consider the case $z \in \partial X$. For each $s \gammaeq 0$ we define $x_{s} \in xz$, $y_{s} \in yz$ to be the points such that $|x x_{s}| = s$ and $|yy_{s}| = s$. Since the geodesics $xz$ and $yz$ have the same endpoint $z \in \partial X$, we must have $(x_{s}|y_{s})_{x} \rightarrow \infty$ as $s \rightarrow \infty$ and the same for $(x_{s}|y_{s})_{y}$. We choose $s$ large enough that $(x_{s}|y_{s})_{x} \gammaeq t$ and $(x_{s}|y_{s})_{y} \gammaeq t$. We consider a geodesic triangle $\Delta_{1} = xx_{s}y_{s}$ with edges the subsegment $xx_{s}$ of the given geodesic $xz$ as well as geodesics $x_{s}y_{s}$ and $xy_{s}$, and a triangle $\Delta_{2} = xy y_{s}$ with edges the subsegment $yy_{s}$ of the given geodesic $yz$, the edge $xy_{s}$ of $\Delta_{1}$, and a geodesic $xy$. Then $\bar{x} \in xx_{s}$ and $\bar{y} \in yy_{s}$ by our choice of $s$.
Since $(x_{s}|y_{s})_{x} \gammaeq t$, we must have $|xy_{s}| \gammaeq t$. Therefore there is a unique point $w \in xy_{s}$ such that $|xw| = |x\bar{x}| = t$. The $4\deltalta$-tripod condition applied to the triangle $\Delta_{1}$ then implies that $|\bar{x}w| \elleq 4\deltalta$. If $|xy_{s}| \elleq |yy_{s}|$ then we let $u \in yy_{s}$ be the unique point such that $|uy_{s}| = |wy_{s}|$. By applying Lemma \ref{simple} we then conclude that $u \in \bar{y}y_{s}$ and $|u\bar{y}| \elleq |xy|$. Since $xy$ is an edge of the triangle $\Delta_{2}$, the canonical equiradial points of this triangle on the edges $xy_{s}$ and $yy_{s}$ can be at most a distance $|xy| \elleq t$ from the vertices $x$ and $y$ respectively. We thus conclude from the $4\deltalta$-tripod condition that $|uw| \elleq 4\deltalta$. Combining these inequalities together gives
\begin{equation}\ellambdabel{calculate}
|\bar{x}\bar{y}| \elleq |\bar{x}w| + |wu| + |u\bar{y}| \elleq |xy| + 8\deltalta,
\varepsilonnd{equation}
which proves \varepsilonqref{combined inequality}. The case $|xy_{s}| \gammaeq |yy_{s}|$ is similar: we let $v \in xy_{s}$ be the point such that $|vy_{s}| = |\bar{y}y_{s}|$, apply Lemma \ref{simple} to obtain $|vw| \elleq |xy|$ and $v \in wy_{s}$, then apply the $4\deltalta$-tripod condition to obtain $|v\bar{y}| \elleq 4\deltalta$. This gives inequality \varepsilonqref{combined inequality} through the same calculation as \varepsilonqref{calculate}.
\varepsilonnd{proof}
\begin{rem}\ellambdabel{sharper}
The proof of Lemma \ref{infinite triangle} shows that we have the sharper inequality $|\bar{x}\bar{y}| \elleq |xy| + 8\deltalta$ when $|xy| > |x\bar{x}| = |y\bar{y}|$.
\varepsilonnd{rem}
We will use Lemma \ref{infinite triangle} to show that the tripod map associated to a collection of calibrated equiradial points for a geodesic triangle $\Delta$ is roughly isometric.
\begin{prop}\ellambdabel{rough tripod}
Let $x,y,z \in X \cup \partial X$ be given vertices of a geodesic triangle $\Delta$ in $X$. Let $\hat{x} \in yz$, $\hat{y} \in xz$, $\hat{z} \in xy$ be points such that $(\hat{x},\hat{y},\hat{z})$ is a calibrated ordered triple of $\chi$-equiradial points for $\Delta$ for a given $\chi \gammaeq 0$. Let $T: \Delta \rightarrow \Upsilon$ be the tripod map associated to this triple. Then $T$ is $(6\chi+16\deltalta)$-roughly isometric.
In particular if $(\hat{x},\hat{y},\hat{z})$ is the calibrated $60\deltalta$-equiradial triple produced in Lemma \ref{infinite equiradial} then $T$ is $400\deltalta$-roughly isometric.
\varepsilonnd{prop}
\begin{proof}
By symmetry (permuting the vertices $x$, $y$, and $z$), to estimate $|T(p)T(q)|$ for $p,q \in \Delta$ it suffices to restrict to the case $p \in \hat{y}z$ and then consider the possible locations of $q$. By construction we have $|T(p)T(q)| = |pq|$ if $p$ and $q$ belong to the same edge of $\Delta$, since the tripod map is isometric on the edges of $\Delta$. This handles the case that $q$ belongs to the same edge as $p$, i.e., that $q \in xz$.
We next consider the case $q \in yz$. Since $|\hat{y}z| = |\hat{x}z|$, we can find a point $u \in \hat{x}z$ such that $|\hat{x}u| = |\hat{y}p|$. Then $|T(p)T(q)| = |uq|$. Applying Lemma \ref{infinite triangle} yields
\[
|up| \elleq 3|\hat{y}\hat{x}| + 8\deltalta \elleq 3\chi + 8\deltalta,
\]
so that
\[
||uq|-|pq|| \elleq |up| \elleq 3\chi + 8\deltalta,
\]
which gives the desired estimate in this case.
Lastly we must consider the case $q \in xy$, which we subdivide into the cases $q \in x\hat{z}$ and $q \in \hat{z}y$. When $q \in x\hat{z}$ we can use the condition $|x\hat{z}| = |x\hat{y}|$ to find a point $v \in x\hat{y}$ such that $|q\hat{z}| = |v\hat{y}|$. Then $|T(p)T(q)| = |vp|$. Similarly to the previous case, Lemma \ref{infinite triangle} gives us the estimate $|vq| \elleq 3\chi + 8\deltalta$ which implies that
\[
||vp|-|pq|| \elleq |vq| \elleq 3\chi + 8\deltalta,
\]
as desired. When $q \in \hat{z}y$ we use the equality $|\hat{z}y| = |\hat{x}y|$ to find $w \in \hat{x}y$ such that $|\hat{z}q| = |\hat{x}w|$, and we use the equality $|\hat{x}z| = |\hat{y}z|$ to find $s \in \hat{x}z$ such that $|s\hat{x}| = |p\hat{y}|$. Then $|T(p)T(q)| = |sw|$. Lemma \ref{infinite triangle} gives us the estimate
\[
\max\{|sp|,|wq|\} \elleq 3\chi + 8\deltalta,
\]
which implies by the triangle inequality that
\[
||sw|-|pq|| \elleq ||sw|-|wp|| + ||wp|-|qp|| \elleq |sp| + |wq| \elleq 6\chi + 16\deltalta.
\]
This completes the proof of the main claim. The final assertion follows by substituting $\chi = 60\deltalta$ and rounding up.
\varepsilonnd{proof}
\begin{rem}\ellambdabel{shorthand}
Throughout this paper we will often suppress the exact choice of calibrated equiradial points used to define a tripod map $T: \Delta \rightarrow \Upsilon$. To make this more formal, for a geodesic triangle $\Delta$ in $X$ we will refer to a tripod map $T: \Delta \rightarrow \Upsilon$ associated to $\Delta$ as being any tripod map $T$ for $\Delta$ associated to an ordered triple $(\hat{x},\hat{y},\hat{z})$ of calibrated $60\deltalta$-equiradial points for $\Delta$ obtained from Lemma \ref{infinite equiradial}. We will also abuse terminology and say that $\hat{x}$, $\hat{y}$ and $\hat{z}$ are equiradial points for the tripod map $T$ (as opposed to for the triangle $\Delta$).
\varepsilonnd{rem}
\subsection{Calculating Busemann functions} We recall that the Gromov boundary $\partial \Upsilon$ of the tripod $\Upsilon$ is a disjoint union of three points $\zeta_{i}$, $i=1,2,3$, corresponding to the geodesic rays $\gammaamma_{i}:[0,\infty) \rightarrow \Upsilon$ that parametrize the half-lines $L_{i}$ starting from $o$ for $i =1,2,3$. Set $b_{\Upsilon}:=b_{\gammaamma_{1}}$ to be the Busemann function associated to the geodesic ray $\gammaamma_{1}$. A straightforward calculation shows that $b_{\Upsilon}$ is given by $b_{\Upsilon}(s) = -s$ for $s \in L_{1}$ and $b_{\Upsilon}(s) = s$ for $s \in L_{2}$ or $s \in L_{3}$, when we consider each of these rays as identified with $[0,\infty)$.
In this next proposition we consider a geodesic triangle $\Delta$ in $X$ with a distinguished vertex $\omega \in \partial X$ together with a Busemann function $b$ based at $\omega$. We will not keep track of exact constants in the proof of this lemma so we will not produce an explicit value for $\mathbf{k}appa = \mathbf{k}appa(\deltalta)$ below. If one does careful bookkeeping in the proof it is possible to show that $\mathbf{k}appa = 2000\deltalta$ works.
\begin{prop}\ellambdabel{compute Busemann}
Let $\Delta = \omega xy$ be a geodesic triangle in $X$ with $\omega \in \partial X$ and $x,y\in X \cup \partial_{\omega} X$. There is a constant $\mathbf{k}appa = \mathbf{k}appa(\deltalta)$ such that the following holds: let $\hat{\omega} \in xy$, $\hat{x} \in \omega y$, and $\hat{y} \in \omega x$ be a calibrated set of $60\deltalta$-equiradial points on $\Delta$ provided by Lemma \ref{infinite equiradial} and let $b$ be a Busemann function based at $\omega$. Let $T: \Delta \rightarrow \Upsilon$ be the tripod map associated to the triple $(\hat{\omega},\hat{x},\hat{y})$. Then for each $p \in \Delta$ we have
\begin{equation}\ellambdabel{tripod image}
b(p) \doteq_{\mathbf{k}appa} b_{\Upsilon}(T(p)) + (x|y)_{b}.
\varepsilonnd{equation}
Consequently we have $b(p) \doteq_{\mathbf{k}appa} (x|y)_{b}$ for $p \in \{\hat{\omega},\hat{x},\hat{y}\}$ and
\begin{equation}\ellambdabel{tripod minimum}
(x|y)_{b} \doteq_{\mathbf{k}appa} \inf_{p \in xy} b(p).
\varepsilonnd{equation}
\varepsilonnd{prop}
\begin{proof}
Since we will not be keeping track of the exact value of the final constant $\mathbf{k}appa = \mathbf{k}appa(\deltalta)$ in the proof, we will let $\doteq$ denote any equality up to an additive error depending only on $\deltalta$. Set $u = b(\hat{\omega})$. Then $u \doteq b(\hat{x})$ and $u \doteq b(\hat{y})$ since $b$ is 1-Lipschitz. We will prove the rough equality \varepsilonqref{tripod image} with $u$ in place of $(x|y)_{b}$ and use this to deduce that $u \doteq (x|y)_{b}$. Thus we will first show that for $p \in \Delta$ we have
\begin{equation}\ellambdabel{modified tripod}
b(p) \doteq b_{\Upsilon}(T(p)) + u.
\varepsilonnd{equation}
We first handle the case in which $p \in \omega x$ or $p \in \omega y$. Since the roles of $x$ and $y$ are symmetric, we can assume without loss of generality that $p \in \omega x$. Let $\gammaamma: (-\infty,a] \rightarrow X$ be an arclength parametrization of $\omega x$ with $\gammaamma(0) = \hat{x}$ and $\gammaamma(t) \rightarrow \omega$ as $t \rightarrow -\infty$. If we define $s \in (-\infty,a]$ such that $\gammaamma(s) = p$ then it follows from the construction of the tripod map that $b_{\Upsilon}(T(\gammaamma(s)) = s$. Applying Lemma \ref{geodesic busemann} gives
\[
b(p) - b(\hat{x}) \doteq s = b_{\Upsilon}(T(\gammaamma(s)),
\]
which gives \varepsilonqref{modified tripod} since $b(\hat{x}) \doteq u$.
The remaining case is when $p \in xy$. By the symmetric roles of $x$ and $y$ we can assume that $p \in x\hat{\omega}$. As in the proof of Proposition \ref{rough tripod}, since $|x\hat{\omega}| = |x\hat{y}|$ we can find $q \in x\hat{y}$ such that $|p\hat{\omega}| = |q\hat{y}|$. Then by Lemma \ref{infinite triangle} we have
\[
|pq| \elleq 3|\hat{y}\hat{\omega}| + 10\deltalta \elleq 190\deltalta.
\]
Thus $b(p) \doteq b(q)$ since $b$ is 1-Lipschitz. It then follows, from the rough equality \varepsilonqref{modified tripod} for $q \in \omega x$ that we established above, that
\[
b(p) \doteq b(q) \doteq |q\hat{y}| + u = |p\hat{\omega}| + u,
\]
which gives \varepsilonqref{modified tripod} in this case.
We next show that $u \doteq (x|y)_{b}$. By Lemma \ref{busemann inequality} we have for any sequences $x_{n} \rightarrow x$ and $y_{n} \rightarrow y$ that $(x_{n}|y_{n})_{b} \doteq_{600\deltalta} (x|y)_{b}$ for sufficiently large $n$; if $x \in X$ then we can just set $x_{n} = x$ for all $n$ and the same goes for $y$. We choose sequences $\{x_{n}\}$ and $\{y_{n}\}$ that belong to $xy$ and consider only those $n$ large enough that $(x_{n}|y_{n})_{b} \doteq_{600\deltalta} (x|y)_{b}$ and $x_{n} \in \hat{\omega} x$, $y_{n} \in \hat{\omega} y$. Then applying \varepsilonqref{modified tripod},
\begin{align*}
(x|y)_{b}&\doteq (x_{n}|y_{n})_{b} \\
&= \frac{1}{2}(b(x_{n}) + b(y_{n}) - |x_{n}y_{n}|) \\
&\doteq \frac{1}{2}(|x_{n}\hat{\omega}|+ |y_{n}\hat{\omega}| + 2u - |x_{n}y_{n}|) \\
&= u.
\varepsilonnd{align*}
Thus we can substitute in $(x|y)_{b}$ for $u$ in \varepsilonqref{modified tripod} at the cost of an additional additive constant depending only on $\deltalta$. The main claim \varepsilonqref{tripod image} follows. The assertion that $b(p) \doteq_{\mathbf{k}appa} (x|y)_{b}$ for $p \in \{\hat{\omega},\hat{x},\hat{y}\}$ follows from \varepsilonqref{tripod image} since each point of $\{\hat{\omega},\hat{x},\hat{y}\}$ has image $o \in \Upsilon$ under the tripod map $T$ and $b_{\Upsilon}(o) = 0$. The rough equality \varepsilonqref{tripod minimum} also follows directly from \varepsilonqref{tripod image} since the image of $xy$ under $T$ is contained in $L_{2} \cup L_{3}$ and $b_{\Upsilon}$ is nonnegative on this subset of $\Upsilon$.
\varepsilonnd{proof}
Proposition \ref{compute Busemann} leads to the following important definition, which is useful for calculations. We recall our convention that $\partial_{\omega} X = \partial X$ when $\omega \in X$.
\begin{defn}\ellambdabel{adapted busemann}
Let $X$ be a geodesic $\deltalta$-hyperbolic space and let $b \in \hat{\mathcal{B}}(X)$ be given with basepoint $\omega$. Let $x,y \in X \cup \partial_{\omega} X$ and let $c \gammaeq 0$ be a given constant. Suppose that $xy$ is a geodesic joining $x$ to $y$. We say that a parametrization $\gammaamma:I \rightarrow X$, $I \subseteq \mathbb R$, of $xy$ by arclength is \varepsilonmph{$c$-adapted to $b$} if $0 \in I$ and
\begin{equation}\ellambdabel{adapted equation}
b(\gammaamma(t)) \doteq_{c} |t| + (x|y)_{b},
\varepsilonnd{equation}
for $t \in I$.
\varepsilonnd{defn}
The inclusion of $0$ in the domain of $\gammaamma$ will be vital for our applications. When the value of $c$ is implied by context we will often shorten the terminology to just saying that the parametrization $\gammaamma$ is adapted to $b$.
For $b \in \hat{\mathcal{B}}(X)$ with basepoint $\omega$ we will construct adapted parametrizations for geodesics joining any two points $x,y \in X \cup \partial_{\omega} X$ under an assumption similar to the rough starlikeness hypothesis of Theorem \ref{unbounded uniformization}. We emphasize that the points $x$ and $y$ in the lemma need not always be the vertices of a geodesic triangle $\Delta$ with a third vertex at $\omega$.
\begin{lem}\ellambdabel{star parametrize}
Let $X$ be a geodesic $\deltalta$-hyperbolic space and let $b \in \hat{\mathcal{B}}(X)$ be given with basepoint $\omega$. Let $x,y \in X \cup \partial_{\omega} X$ and let $xy$ be a given geodesic from $x$ to $y$. Suppose that we are given $K \gammaeq 0$ and points $x',y' \in X \cup \partial_{\omega}X$ and geodesics $\omega x',\omega y'$ joining $\omega$ to $x'$ and $y'$ respectively such that $\max\{|xx'|,|yy'|\} \elleq K$. Then there is a constant $c = c(\deltalta,K)$ depending only on $\deltalta$ and $K$ such that there is a parametrization $\gammaamma: I \rightarrow X$ of $xy$ that is $c$-adapted to $b$.
\varepsilonnd{lem}
\begin{proof}
We first consider the case that $x = x'$ and $y = y'$, so that we can take $K = 0$. Let $\Delta = \omega x y$ be the geodesic triangle formed by the geodesics $\omega x$, $\omega y$, and $xy$. Let $T: \Delta \rightarrow \Upsilon$ be a $400\deltalta$-roughly isometric tripod map associated to $\Delta$ such that $T(\omega) \in L_{1} \cup\{\zeta_{1}\}$, $T(x) \in L_{2} \cup \{\zeta_{2}\}$, and $T(y) \in L_{3} \cup \{\zeta_{3}\}$, as given by Proposition \ref{rough tripod}. We identify the union $L_{2} \cup L_{3}$ of geodesic rays in $\Upsilon$ with $\mathbb R$ by identifying $L_{2}$ with $(-\infty,0]$ and $L_{3}$ with $[0,\infty)$, sending the core $o$ of $\Upsilon$ to the origin in $\mathbb R$. We let $I = T(xy) \subset L_{2} \cup L_{3}$ denote the image of $xy$ under $T$ and consider $I$ as a subinterval $I \subset \mathbb R$ under the identification of $L_{2} \cup L_{3}$ with $\mathbb R$. Since the tripod map $T$ is isometric when restricted to $xy$, we can then construct an arclength parametrization $\gammaamma: I \rightarrow X$ of $xy$ by inverting the restriction of $T$ to $xy$. By the construction of $T$ we have $0 \in I$ since the core $o$ is contained in the image $T(xy)$ of $xy$.
When $b \in \mathcal{B}(X)$ the rough equality \varepsilonqref{tripod image} directly implies the $c$-adapted condition \varepsilonqref{adapted busemann} for $\gammaamma$ with $c = c(\deltalta)$, since for $z \in L_{2} \cup L_{3}$ we have $b_{\Upsilon}(z) = |zo|$. For $b \in \mathcal{D}(X)$ it is easy to see that it suffices to verify \varepsilonqref{adapted equation} for $b$ of the form $b(x) = |x\omega|$, $\omega \in X$. We then have to show that there is a constant $c = c(\deltalta)$ such that for $t \in I$ we have
\begin{equation}\ellambdabel{distance adapated}
d(\gammaamma(t),\omega) \doteq_{c} |t| + (x|y)_{\omega}.
\varepsilonnd{equation}
By \varepsilonqref{sequence approximation} we can find points $\bar{x} \in \omega x$, $\bar{y} \in \omega y$ such that $(\bar{x}|\bar{y})_{\omega} \doteq_{c(\deltalta)} (x|y)_{\omega}$ and $T(\bar{x}) \in L_{2}$, $T(\bar{y}) \in L_{3}$. Then $(T(\bar{x})|T(\bar{y}))_{T(\omega)} \doteq_{c(\deltalta)} (\bar{x}|\bar{y})_{\omega}$ since $T$ is $c(\deltalta)$-roughly isometric. Since $T(\bar{y}) \in L_{2}$ and $T(\bar{z}) \in L_{3}$, a quick calculation then shows that
\[
(T(\bar{x})|T(\bar{y}))_{\omega} = |T(\omega)o|,
\]
and therefore $(x|y)_{\omega} \doteq_{c(\deltalta)} |T(\omega)o|$. Thus for $t \in I$ we have
\[
|t| + (x|y)_{\omega} \doteq_{c(\deltalta)} |t| + |T(\omega)o| = |T(\omega)T(\gammaamma(t))|,
\]
with the second equality following from the construction of $\gammaamma$. The rough equality \varepsilonqref{distance adapated} with $c = c(\deltalta)$ then directly follows from the fact that $T$ is $c(\deltalta)$-roughly isometric.
We now consider the general case in which we are given points $x',y' \in X \cup \partial_{\omega}X$ and $K \gammaeq 0$ such that $\max\{|xx'|,|yy'|\} \elleq K$. If $x$ and $y$ both belong to $\partial_{\omega} X$ then our conventions imply that $x = x'$ and $y = y'$, hence this case reduces to the case $K = 0$ considered previously.
If $x$ and $y$ both belong to $X$ then we apply the $K = 0$ case to the points $x'$ and $y'$ to obtain a $c'$-adapted parametrization $\sigma: I' \rightarrow X$ of $x'y'$ oriented from $x'$ to $y'$, $I' = [t_{-}',t_{+}']$ with $c' = c'(\deltalta)$. Since $0 \in I'$ we have $t_{-}' \elleq 0$ and $t_{+}' \gammaeq 0$. Let $\varepsilonta: I \rightarrow X$, $I = [t_{-}',t_{+}]$, be the unique arclength parametrization of $xy$ that is oriented from $x$ to $y$ and starts from the same time parameter $t_{-}'$ as $\sigma$. The piecewise geodesic curve $xx' \cup x'y' \cup yy'$ joining $x$ to $y$ can be parametrized as a $4K$-roughly isometric map $\beta: J \rightarrow X$ for an appropriate interval $J \subset \mathbb R$. By the stability of geodesics in Gromov hyperbolic spaces \cite[Theorem 1.3.2]{BS07} this implies that there is a constant $c_{0} = c_{0}(\deltalta,K)$ such that the given geodesic $xy$ is contained in a $c_{0}$-neighborhood of the curve $\beta$. Since the segments $xx'$ and $yy'$ of $\beta$ are each contained in a $K$-neighborhood of $\sigma$, by increasing $c_{0}$ by an amount depending only on $K$ we can assume that $xy$ is contained in a $c_{0}$-neighborhood of $\sigma$.
Now let $t \in I$ be given and let $s \in I'$ be such that $|\varepsilonta(t)\sigma(s)| \elleq c_{0}$. Since $b$ is 1-Lipschitz it follows that
\[
b(\varepsilonta(t)) \doteq_{c_{0}} b(\sigma(s)) \doteq_{c'} |s|+(x|y)_{b}.
\]
Thus it suffices to show that $t \doteq_{c''} s$ for a constant $c'' = c''(\deltalta,K)$. Since $t-t_{-}' = |\varepsilonta(t)x|$ and $s-t_{-}' = |\sigma(s)x'|$, we have
\begin{align*}
|t-s| &= |(t-t_{-}')-(s-t_{-}')| \\
&= ||\varepsilonta(t)x|-|\sigma(s)x'|| \\
&\elleq ||\varepsilonta(t)x|-|\varepsilonta(t)x'|| + ||\varepsilonta(t)x'|-|\sigma(s)x'|| \\
&\elleq |xx'| + |\varepsilonta(t)\sigma(s)| \\
&\elleq K+ c_{0},
\varepsilonnd{align*}
so that we can set $c'' = K+c_{0}$. It follows that $\varepsilonta$ satisfies \varepsilonqref{adapted equation} with constant $c = c(\deltalta,K)$ depending only on $\deltalta$ and $K$.
If $0 \in I$ then $\varepsilonta$ gives a parametrization of $xy$ that is $c$-adapted to $b$ and we are done. We can therefore assume that $0 \notin I$ which implies that $t_{+} < 0$ since $t_{-}' \elleq 0$. We then note that $|x'y'| \doteq_{2K} |xy|$ and $t_{+}'-t_{-}' = |x'y'|$, $t_{+}-t_{-}' = |xy|$, which implies that $t_{+} \doteq_{2K} t_{+}'$. Since $t_{+}' \gammaeq 0$ and $t_{+} \elleq 0$, we conclude that $|t_{+}| \elleq 2K$. We set $I'' = [t_{-}'-t_{+},0]$ and set $\gammaamma(t) = \varepsilonta(t+t_{+})$ for $t \in I''$. Then $0 \in I''$ by construction and this arclength parametrization $\gammaamma$ of $xy$ still satisfies \varepsilonqref{adapted equation} with $c= c(\deltalta,K)$ since $b$ is $1$-Lipschitz and $|t_{+}| \elleq 2K$. Thus $\gammaamma$ gives the desired adapted parametrization.
Lastly we consider the case in which one of $x$ or $y$ belong to $\partial_{\omega} X$, but not both. Without loss of generality we can assume that $x \in X$ and $y \in \partial_{\omega} X$. Let $\{y_{n}\} \subset xy$ be the sequence of points with $|xy_{n}| = n$ for each $n \in \mathbb N$. Let $\varepsilonta_{n}: I_{n} \rightarrow X$ be the arclength parametrizations of $xy_{n}$ for each $n$ that were constructed in the previous case, $I_{n} = [s_{n},t_{n}]$. Since $0 \in I_{n}$ for each $n$ we have $s_{n} \elleq 0$ for each $n$. Since $\varepsilonta_{n}(s_{n}) = x$ for each $n$, we have from the condition that $\varepsilonta_{n}$ is $c$-adapted to $b$,
\[
b(x) \doteq_{c} |s_{n}|+(x|y)_{b} = -s_{n} + (x|y)_{b}
\]
with $c = c(\deltalta,K)$. It follows that $s_{m} \doteq_{c} s_{n}$ for each $m, n \in \mathbb N$. Thus, by replacing $\varepsilonta_{n}$ with the parametrization $\gammaamma_{n}$ defined by $\gammaamma_{n}(t) = \varepsilonta_{n}(t-s_{1}+s_{n})$ on the domain $I_{n}' = [s_{1},t_{n}+s_{1}-s_{n}]$, we can assume that $s_{n} = s_{1}:= s$ for all $n \in \mathbb N$. Note also that, since $t_{n} \rightarrow \infty$ as $n \rightarrow \infty$ and $s \elleq 0$, we have $0 \in I_{n}$ for all large enough $n$. It follows that the resulting parametrization $\gammaamma_{n}$ will be $c$-adapted to $b$ for $n$ large enough that $t_{n} \gammaeq 0$ since $b$ is 1-Lipschitz, with $c = c(\deltalta,K)$.
With these modifications the parametrizations $\gammaamma_{n}$ now have the same starting point $s \elleq 0$. Since these are parametrizations of $xy_{n}$ by arclength and the sequence $\{y_{n}\}$ defines progressively longer subsegments $xy_{n}$ of $xy$ that exhaust $xy$, the maps $\gammaamma_{n}$ coincide wherever their domains overlap and can therefore be used to define a parametrization $\gammaamma: [s,\infty) \rightarrow X$ of $xy$ that is $c$-adapted to $b$ by construction.
\varepsilonnd{proof}
\section{Uniformization}\ellambdabel{sec:uniformize}
Our task in this section will be to prove Theorems \ref{unbounded uniformization}, \ref{identification theorem}, and \ref{CAT theorem}. Section \ref{subsec:estimate uniform} establishes some general estimates for the uniformized distances $d_{\varepsilon,b}$. Section \ref{subsec:busemann uniformize} proves the theorems in the case that $b \in \mathcal{B}(X)$ (i.e., $b$ is a Busemann function). Section \ref{subsec:distance uniformize} then uses a special construction (Definition \ref{ray augment}) to deduce the case $b \in \mathcal{D}(X)$ from the case $b \in \mathcal{B}(X)$. Since Theorem \ref{CAT theorem} follows from Theorems \ref{unbounded uniformization} and \ref{identification theorem} once we've shown that $\rho_{1,b}$ is a GH-density in this case in Proposition \ref{strong hyperbolic admissible}, we will focus our efforts on proving Theorems \ref{unbounded uniformization} and \ref{identification theorem} after that point.
\subsection{Estimates for the uniformized distance}\ellambdabel{subsec:estimate uniform} In this section we will derive some estimates for the conformal deformation $(X_{\varepsilon,b},d_{\varepsilon,b})$ of a geodesic $\deltalta$-hyperbolic space $X$ by the density $\rho_{\varepsilon,b}(x) = e^{-\varepsilon b(x)}$ for a given $\varepsilon > 0$ and $b \in \hat{\mathcal{B}}(X)$ using the tripod maps we built in Section \ref{sec:tripod}. For now we will not be assuming that $\rho_{\varepsilon,b}$ is a GH-density (using the terminology of Definition \ref{conformal factor}). Hence we can use these results to establish that $\rho_{\varepsilon,b}$ is a GH-density in certain important cases. To simplify notation we will drop the function $b$ from the notation for objects associated to the conformal deformation and write $\rho_{\varepsilon}:=\rho_{\varepsilon,b}$, $X_{\varepsilon} :=X_{\varepsilon,b}$, etc. For a curve $\gammaamma: I \rightarrow X_{\varepsilon}$ we will write $\ell_{\varepsilon}(\gammaamma):=\ell_{\rho_{\varepsilon}}(\gammaamma)$ for its length measured in the metric $d_{\varepsilon}$. We let $\ell(\gammaamma)$ denote the length of $\gammaamma$ measured in $X$ instead.
\begin{rem}\ellambdabel{appendix interlude} Throughout the rest of this paper we will be using \cite[Proposition A.7]{BHK}, which for a geodesic metric space $X$ and a continuous function $\rho:X \rightarrow (0,\infty)$ allows us to compute the lengths $\ell_{\rho}(\gammaamma)$ in the conformal deformation $X_{\rho}$ of curves $\gammaamma: I \rightarrow X$ parametrized by arclength in $X$ as
\begin{equation}\ellambdabel{appendix prop}
\ell_{\rho}(\gammaamma) = \int_{I}\rho \circ \gammaamma \, ds,
\varepsilonnd{equation}
with $ds$ denoting the standard length element in $\mathbb R$.
\varepsilonnd{rem}
Since $b$ is $1$-Lipschitz we have the \varepsilonmph{Harnack type inequality} for $x,y \in X$,
\begin{equation}\ellambdabel{Harnack}
e^{-\varepsilon|xy|} \elleq \frac{\rho_{\varepsilon}(x)}{\rho_{\varepsilon}(y)} \elleq e^{\varepsilon|xy|},
\varepsilonnd{equation}
which made its first appearance in the statement of Theorem \ref{Gehring-Hayman} earlier.
The metric spaces $X_{\varepsilon}$ and $X$ are biLipschitz on bounded subsets of $X$ by inequality \varepsilonqref{Harnack}. A more precise estimate for this is given in the lemma below.
\begin{lem}\ellambdabel{arc Harnack}
For any $x,y \in X$ we have
\[
\rho_{\varepsilon}(x)\varepsilon^{-1}(1-e^{-\varepsilon |xy|}) \elleq d_{\varepsilon}(x,y) \elleq \rho_{\varepsilon}(x)\varepsilon^{-1}(e^{\varepsilon |xy|}-1).
\]
\varepsilonnd{lem}
\begin{proof}
For the upper bound we let $xy$ be a geodesic joining $x$ to $y$. Then, using \varepsilonqref{Harnack},
\begin{align*}
d_{\varepsilon}(x,y) &\elleq \int_{xy} \rho_{e}\, ds \\
&\elleq \rho_{\varepsilon}(x)\int_{0}^{|xy|} e^{\varepsilon t}\, dt \\
&= \rho_{\varepsilon}(x)\varepsilon^{-1}(e^{\varepsilon |xy|}-1).
\varepsilonnd{align*}
For the lower bound we consider a rectifiable curve $\gammaamma$ joining $x$ to $y$ in $X$, which we can assume is parametrized by arclength as $\gammaamma:[0,\ell(\gammaamma)] \rightarrow X$ with $\ell(\gammaamma)$ denoting the length of $\gammaamma$ in $X$. With this parametrization $\gammaamma$ defines a $1$-Lipschitz function from $[0,\ell(\gammaamma)]$ to $X$, so that in particular we have $|x\gammaamma(t)| \elleq t$ for each $t \in [0,\ell(\gammaamma)]$. Then by \varepsilonqref{Harnack},
\begin{align*}
\ell_{\varepsilon}(\gammaamma) &\gammaeq \rho_{\varepsilon}(x)\int_{0}^{\ell(\gammaamma)} e^{-\varepsilon |x\gammaamma(t)|}\, dt \\
&\gammaeq \rho_{\varepsilon}(x)\int_{0}^{\ell(\gammaamma)} e^{-\varepsilon t}\, dt \\
&= \rho_{\varepsilon}(x)\varepsilon^{-1}(1-e^{-\varepsilon \ell(\gammaamma)}) \\
&\gammaeq \rho_{\varepsilon}(x)\varepsilon^{-1}(1-e^{-\varepsilon |xy|}),
\varepsilonnd{align*}
where in the final line we used that $\ell(\gammaamma) \gammaeq |xy|$.
\varepsilonnd{proof}
Lemma \ref{arc Harnack} can be rewritten in the following useful form when $|xy| \elleq 1$.
\begin{lem}\ellambdabel{rephrased arc Harnack}
For any $x,y \in X$ with $|xy| \elleq 1$ we have
\begin{equation}\ellambdabel{rephrased inequality}
d_{\varepsilon}(x,y) \asymp_{C(\varepsilon)} e^{-\varepsilon(x|y)_{b}}|xy|.
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
For $0 \elleq t \elleq 1$ we have the inequalities
\[
1-e^{-\varepsilon t} \gammaeq \varepsilon e^{-\varepsilon}t,
\]
and
\[
e^{\varepsilon t}-1 \elleq \varepsilon e^{\varepsilon} t,
\]
as can be verified by noting that equality holds at $t = 0$ and differentiating each side. Thus for $|xy| \elleq 1$ the inequality of Lemma \ref{arc Harnack} implies that
\begin{equation}\ellambdabel{unbounded inequality}
d_{\varepsilon}(x,y) \asymp_{C(\varepsilon)} \rho_{\varepsilon}(x)|xy| \asymp_{C(\varepsilon)} e^{-\varepsilon(x|y)_{b}}|xy|,
\varepsilonnd{equation}
with the final estimate following from
\[
\rho_{\varepsilon}(x) = e^{-\varepsilon b(x)} \asymp_{e^{\varepsilon}} e^{-\varepsilon(x|y)_{b}},
\]
since $|xy| \elleq 1$ and $b$ is $1$-Lipschitz.
\varepsilonnd{proof}
The comparison \varepsilonqref{unbounded inequality} in Lemma \ref{rephrased arc Harnack} has the following important consequence, which proves the last claim of Theorem \ref{unbounded uniformization}.
\begin{prop}\ellambdabel{bounded equivalence}
$X_{\varepsilon}$ is bounded if and only if $b \in \mathcal{D}(X)$.
\varepsilonnd{prop}
\begin{proof}
We first suppose that $b \in \mathcal{D}(X)$. For $x \in X$ we can then write $b(x) = |xz| + s$ for some $z \in X$ and $s \in \mathbb R$. We let $x \in X$ be given and let $\gammaamma$ be a geodesic joining $z$ to $x$. Then
\begin{align*}
d_{\varepsilon}(x,z) &\elleq \int_{\gammaamma} \rho_{\varepsilon}\, dt \\
&= e^{-s}\int_{0}^{|xz|}e^{-\varepsilon t}\,dt \\
&=\varepsilon^{-1}e^{-s}(1-e^{-\varepsilon |xz|}) \\
&\elleq \varepsilon^{-1}e^{-s}.
\varepsilonnd{align*}
It follows that $X_{\varepsilon}$ is bounded with $\mathrm{diam} \, X_{\varepsilon} \elleq 2\varepsilon^{-1}e^{-s}$.
Now suppose that $b \in \mathcal{B}(X)$. Then we can find a geodesic ray $\gammaamma:[0,\infty) \rightarrow X$ and a constant $s \in \mathbb R$ such that $b = b_{\gammaamma}+s$. For each $t \gammaeq 0$ we apply the comparison \varepsilonqref{unbounded inequality} with $x = \gammaamma(t)$ and $y = \gammaamma(t+1)$ to obtain
\[
d_{\varepsilon}(\gammaamma(t),\gammaamma(t+1)) \asymp_{C(\varepsilon)} e^{-\varepsilon b(\gammaamma(t))} = e^{\varepsilon(t-s)},
\]
since $b(\gammaamma(t)) = -t+s$. Thus as $t \rightarrow \infty$ we have $d_{\varepsilon}(\gammaamma(t),\gammaamma(t+1)) \rightarrow \infty$. It follows that $X_{\varepsilon}$ is unbounded.
\varepsilonnd{proof}
We next use adapted parametrizations to estimate the length in $X_{\varepsilon}$ of geodesics in $X$. Below we write $\omega = \omega_{b}$ for the basepoint of $b$.
\begin{lem}\ellambdabel{epsilon geodesic}
Let $x,y \in X$ be given and let $\gammaamma$ be a geodesic in $X$ joining $x$ to $y$. Suppose that we are given $K \gammaeq 0$ and points $x',y' \in X$ and geodesics $\omega x',\omega y'$ joining $\omega$ to $x$ and $y$ such that $\max\{|xx'|,|yy'|\} \elleq K$. Then
\begin{equation}\ellambdabel{epsilon geodesic estimate}
\ell_{\varepsilon}(\gammaamma) \asymp_{C(\deltalta,K,\varepsilon)} e^{-\varepsilon(x|y)_{b}}\min\{1,|xy|\}.
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
Throughout this proof all additive constants $c \gammaeq 0$ and $C \gammaeq 1$ will depend only on $\deltalta$, $K$, and $\varepsilon$; we write $\doteq$ and $\asymp$ for $\doteq_{c}$ and $\asymp_{C}$ respectively. We consider an arclength parametrization $\gammaamma: I \rightarrow X$ of $\gammaamma$ that is $c$-adapted to $b$ with $c = c(\deltalta,K)$ as constructed in Lemma \ref{star parametrize}. We assume that $\gammaamma$ is oriented from $x$ to $y$ and set $w = \gammaamma(0)$. By \varepsilonqref{adapted equation} we then have $b(w) \doteq (x|y)_{b}$.
When $|xy| \elleq 1$ we observe that $|zw| \elleq |xy| \elleq 1$ for all $z \in xy$. Inequality \varepsilonqref{Harnack} then implies that
\begin{equation}\ellambdabel{small comparison}
\rho_{\varepsilon}(z) \asymp_{e^{\varepsilon}} \rho_{\varepsilon}(w).
\varepsilonnd{equation}
By integrating the comparison \varepsilonqref{small comparison} over $\gammaamma$ we obtain
\[
\ell_{\varepsilon}(\gammaamma) \asymp \rho_{\varepsilon}(w)|xy| \asymp e^{-\varepsilon (x|y)_{b}}|xy|.
\]
This gives the estimate \varepsilonqref{epsilon geodesic estimate} when $|xy| \elleq 1$.
We now suppose that $|xy| \gammaeq 1$. Let $\gammaamma_{1}:[-|xw|,0] \rightarrow X$ and $\gammaamma_{2}:[0,|yw|] \rightarrow X$ denote the parametrizations of the subsegments of $\gammaamma$ from $x$ to $w$ and from $w$ to $y$ respectively. Then, using \varepsilonqref{adapted equation} and $b(w) \doteq (x|y)_{b}$, we have
\begin{align*}
\ell_{\varepsilon}(\gammaamma) &= \ell_{\varepsilon}(\gammaamma_{1}) + \ell_{\varepsilon}(\gammaamma_{2}) \\
&= \int_{\gammaamma_{1}}\rho_{\varepsilon} \, dt + \int_{\gammaamma_{2}}\rho_{\varepsilon} \, dt \\
&\asymp e^{-\varepsilon (x|y)_{b}} \elleft(\int_{0}^{|xw|}e^{-\varepsilon t}\, dt + \int_{0}^{|yw|}e^{-\varepsilon t}\, dt\right) \\
&= \varepsilon^{-1}e^{-\varepsilon (x|y)_{b}}(2-e^{-\varepsilon |xw|} - e^{-\varepsilon |yw|}).
\varepsilonnd{align*}
It follows immediately that
\[
\ell_{\varepsilon}(\gammaamma) \elleq Ce^{-\varepsilon (x|y)_{b}},
\]
with $C = C(\deltalta,K,\varepsilon)$. This gives the upper bound in \varepsilonqref{epsilon geodesic estimate} when $|xy| \gammaeq 1$. For the lower bound we note that since $|xy| \gammaeq 1$ and $|xw| + |yw| = |xy|$, we must have $\min\{|xw|,|yw|\} \gammaeq \frac{1}{2}$. Therefore
\[
\varepsilon^{-1}e^{-\varepsilon (x|y)_{b}}(2-e^{-\varepsilon |xw|} - e^{-\varepsilon |yw|}) \gammaeq \varepsilon^{-1}e^{-(x|y)_{b}}(1-e^{-\frac{\varepsilon}{2}}).
\]
This gives the lower bound in \varepsilonqref{epsilon geodesic estimate} when $|xy| \gammaeq 1$.
\varepsilonnd{proof}
In connection with Lemma \ref{epsilon geodesic} it is helpful to formulate the following definition.
\begin{defn}\ellambdabel{roughly geodesic}
Let $\omega \in X \cup \partial X$ be given. For a constant $K \gammaeq 0$ we say that $X$ is \varepsilonmph{$K$-roughly geodesic from $\omega$} if for each $x \in X$ there exists $x' \in X$ and a geodesic $\omega x'$ joining $\omega$ to $x'$ such that $|x x'| \elleq K$.
\varepsilonnd{defn}
When $X$ is $K$-roughly geodesic from $\omega$ we can apply Lemma \ref{epsilon geodesic} freely to any geodesic $\gammaamma$ in $X$ with this constant $K$. If $\omega \in X$ then $X$ is $0$-roughly geodesic from $\omega$ since $X$ is geodesic. For the case $\omega \in \partial X$ we note that if $X$ is $K$-roughly starlike from $\omega$ then $X$ is clearly also $K$-roughly geodesic from $\omega$. Note however that $X$ can be roughly geodesic from $\omega \in \partial X$ without being roughly starlike from $\omega$; this happens for instance when $X$ is a tree with arbitrarily long finite branches. We also remark that $X$ is always $0$-roughly geodesic from any point of $\partial X$ when it is proper.
When $\rho_{\varepsilon}$ is a GH-density we thus obtain the following corollary of Lemma \ref{epsilon geodesic} using the GH-inequality \varepsilonqref{first GH}.
\begin{lem}\ellambdabel{lem:proto estimate both}
Suppose that $X$ is $K$-roughly geodesic from $\omega$ and that $\rho_{\varepsilon}$ is a GH-density with constant $M$. Then for any $x,y \in X$ we have
\begin{equation}\ellambdabel{proto estimate both}
d_{\varepsilon}(x,y) \asymp_{C(\deltalta,K,\varepsilon,M)} e^{-\varepsilon (x|y)_{b}}\min\{1,|xy|\}.
\varepsilonnd{equation}
\varepsilonnd{lem}
Lemma \ref{epsilon geodesic} has the following immediate corollary when combined with Lemma \ref{rephrased arc Harnack}.
\begin{cor}\ellambdabel{criterion}
Suppose that $X$ is $K$-roughly geodesic from $\omega$ and that there is a constant $M_{0} \gammaeq 1$ such that for any $x,y \in X$ with $|xy| > 1$ we have
\begin{equation}\ellambdabel{criterion estimate}
e^{-\varepsilon(x|y)_{b}} \elleq M_{0}d_{\varepsilon}(x,y).
\varepsilonnd{equation}
Then $\rho_{\varepsilon}$ is a GH-density with constant $M = M(\deltalta,K,\varepsilon,M_{0})$.
\varepsilonnd{cor}
\begin{proof}
If $x,y \in X$ with $|xy| \elleq 1$ then combining Lemmas \ref{rephrased arc Harnack} and \ref{epsilon geodesic} establishes the GH-inequality \varepsilonqref{first GH} with $M = M(\deltalta,K,\varepsilon)$. If we instead have $|xy| > 1$ then the inequality \varepsilonqref{criterion estimate} implies the GH-inequality \varepsilonqref{first GH} with $M = M(\deltalta,K,\varepsilon,M_{0})$ by the estimate \varepsilonqref{epsilon geodesic estimate} and the fact that $d_{e}(x,y) \elleq \ell_{\varepsilon}(\gammaamma)$ for any geodesic $\gammaamma$ joining $x$ to $y$.
\varepsilonnd{proof}
We will use Corollary \ref{criterion} to show for a CAT$(-1)$ space $X$ that $\rho_{1} = \rho_{1,b}$ is a GH-density for any $b \in \hat{\mathcal{B}}(X)$ with a universal constant $M \gammaeq 1$. Our proof will be based on the following four point inequality for CAT$(-1)$ spaces.
\begin{prop}\ellambdabel{prop:strong hyp}\cite[Proposition 3.3.4]{DSU17}
Let $X$ be a CAT$(-1)$ space. Then for any four points $x,y,z,w \in X$ we have
\begin{equation}\ellambdabel{strong hyp inequality}
e^{-(x|z)_{w}} \elleq e^{-(x|y)_{w}} + e^{-(y|z)_{w}}.
\varepsilonnd{equation}
\varepsilonnd{prop}
Metric spaces satisfying the inequality \varepsilonqref{strong hyp inequality} are called \varepsilonmph{strongly hyperbolic} in \cite{DSU17}.
The inequality \varepsilonqref{strong hyp inequality} can easily be improved to hold for Gromov products based at any function $b \in \hat{\mathcal{B}}(X)$.
\begin{lem}\ellambdabel{improve strong hyp}
Let $X$ be a CAT$(-1)$ space. Then for any $x,y,z \in X$ and $b \in \hat{\mathcal{B}}(X)$ we have
\begin{equation}\ellambdabel{improve strong inequality}
e^{-(x|z)_{b}} \elleq e^{-(x|y)_{b}} + e^{-(y|z)_{b}}.
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
If $b \in \mathcal{D}(X)$ has the form $b(x) = d(x,w)$ for some $w \in X$ then \varepsilonqref{improve strong inequality} is just a restatement of \varepsilonqref{strong hyp inequality}. The inequality for $b$ of the form $b(x) = d(x,w) + s$ for some $w \in X$ and $s \in \mathbb R$ can then be obtained by multiplying the inequality \varepsilonqref{strong hyp inequality} through by $e^{-s}$.
Now suppose that $b \in \mathcal{B}(X)$. Then we can find a geodesic ray $\gammaamma:[0,\infty) \rightarrow X$ and an $s \in \mathbb R$ such that $b = b_{\gammaamma} + s$. By multiplying the target inequality \varepsilonqref{improve strong inequality} through by $e^{s}$ we see that it suffices to consider the case that $b = b_{\gammaamma}$. Let $x,y,z \in X$ be given. For each $t \in [0,\infty)$ we have from \varepsilonqref{strong hyp inequality} that
\[
e^{-(x|z)_{\gammaamma(t)}} \elleq e^{-(x|y)_{\gammaamma(t)}} + e^{-(y|z)_{\gammaamma(t)}}.
\]
After multiplying each side by $e^{t}$ and expanding the Gromov products we obtain that
\[
e^{-\frac{1}{2}(|x\gammaamma(t)|-t + |y\gammaamma(t)|-t -|xy|)} \elleq e^{-\frac{1}{2}(|x\gammaamma(t)|-t + |z\gammaamma(t)|-t -|xz|)} + e^{-\frac{1}{2}(|y\gammaamma(t)|-t + |z\gammaamma(t)|-t -|yz|)}.
\]
Letting $t \rightarrow \infty$ then gives inequality \varepsilonqref{improve strong inequality}.
\varepsilonnd{proof}
By combining Lemma \ref{improve strong hyp} with Corollary \ref{criterion} we can show that the density $\rho_{1}$ for $b \in \hat{\mathcal{B}}(X)$ is a GH-density when $X$ is a complete CAT$(-1)$ space. The completeness hypothesis is only used in the case $b \in \mathcal{B}(X)$.
\begin{prop}\ellambdabel{strong hyperbolic admissible}
There is a constant $M \gammaeq 1$ such that for any complete CAT$(-1)$ space $X$ and any $b \in \hat{\mathcal{B}}(X)$ we have that $\rho_{1} = \rho_{1,b}$ is a GH-density with constant $M$.
\varepsilonnd{prop}
\begin{proof}
We first observe that $X$ is always $0$-roughly geodesic from $\omega$. For $\omega \in X$ this is trivial, while for $\omega \in \partial X$ this can be deduced from the completeness hypothesis together with the CAT$(-1)$ condition on $X$ \cite[Proposition 4.4.4]{DSU17}. Thus we can apply Corollary \ref{criterion} with $K = 0$. Let $x,y \in X$ be given and let $\omega \in X \cup \partial X$ denote the basepoint of $b$. Since $X$ is $\deltalta$-hyperbolic with $\deltalta = \deltalta(\mathbb{H}^{2})$ and we are restricting to the case $\varepsilon = 1$, it suffices by Corollary \ref{criterion} to produce a universal constant $M_{0} \gammaeq 1$ such that for any $x,y \in X$ with $|xy| > 1$ we have
\begin{equation}\ellambdabel{admissible target}
e^{-(x|y)_{b}} \elleq M_{0}d_{1}(x,y).
\varepsilonnd{equation}
We consider a rectifiable curve $\varepsilonta: I \rightarrow X$ joining $x$ to $y$ that is parametrized by arclength (in $X$) and oriented from $x$ to $y$. We define a finite sequence of points $t_{0},\dots,t_{n}$ in $I$ inductively as follows: we set $t_{0}$ to be the left endpoint of $I$ and for each applicable $k > 0$ we set $t_{k}$ to be the supremum of all points $t \in I$ such that $t \gammaeq t_{k-1}$ and $|\varepsilonta(s)\varepsilonta(t_{k-1})| < 1$ for each $t_{k-1} \elleq s \elleq t$. The finite length of $\varepsilonta$ in $X$ ensures that this sequence is finite, so that we have as a consequence that this process terminates at the right endpoint $t_{n}$ of $I$. The assumption $|xy| > 1$ implies that $n \gammaeq 2$. By construction we then have $|\varepsilonta(t_{k})\varepsilonta(t_{k+1})| = 1$ for $0 \elleq k \elleq n-2$ and $|\varepsilonta(t_{n-1})\varepsilonta(t_{n})| \elleq 1$. Since $b$ is $1$-Lipschitz it follows that
\[
e^{-(x|y)_{b}} \elleq Ce^{-(\varepsilonta(t_{0})|\varepsilonta(t_{n-1}))_{b}},
\]
with $C = e$, recalling that $\varepsilonta(t_{0}) = x$. Hence to prove \varepsilonqref{admissible target} it suffices to show for any rectifiable curve $\varepsilonta$ joining $x$ to $y$ that we have
\begin{equation}\ellambdabel{reduce target}
e^{-(\varepsilonta(t_{0})|\varepsilonta(t_{n-1}))_{b}} \elleq C\ell_{1}(\varepsilonta),
\varepsilonnd{equation}
for some constant $C \gammaeq 1$. Repeated application of the inequality \varepsilonqref{improve strong inequality} based at $b$ gives
\[
e^{-(\varepsilonta(t_{0})|\varepsilonta(t_{n-1}))_{b}} \elleq \sum_{k=0}^{n-2}e^{-(\varepsilonta(t_{k})|\varepsilonta(t_{k+1}))_{b}} \elleq C\sum_{k=0}^{n-2}e^{-b(\varepsilonta(t_{k}))},
\]
with the second inequality holding for $C = e$ since $b$ is $1$-Lipschitz and $|\varepsilonta(t_{k})\varepsilonta(t_{k+1})| = 1$. For $0 \elleq k \elleq n-2$ we have by construction that any $t \in [t_{k},t_{k+1}]$ satisfies $|\varepsilonta(t_{k})\varepsilonta(t)| \elleq 1$. Hence the Harnack inequality \varepsilonqref{Harnack} implies that
\[
\ell_{1}(\varepsilonta|_{[t_{k},t_{k+1}]}) \asymp_{e} e^{-b(\varepsilonta(t_{k}))}\ell(\varepsilonta|_{[t_{k},t_{k+1}]}) \gammaeq e^{-b(\varepsilonta(t_{k}))},
\]
since the length of $\varepsilonta|_{[t_{k},t_{k+1}]}$ in $X$ is at least the distance $1$ between its endpoints. This shows that
\[
\sum_{k=0}^{n-2}e^{-b(\varepsilonta(t_{k}))} \elleq C\sum_{k=0}^{n-2}\ell_{1}(\varepsilonta|_{[t_{k},t_{k+1}]}) \elleq C\ell_{1}(\varepsilonta),
\]
with $C = e$. We conclude that the desired estimate \varepsilonqref{reduce target} holds, so that as a consequence $\rho_{1}$ is a GH-density with a universal constant $M \gammaeq 1$.
\varepsilonnd{proof}
By repurposing the proof of Proposition \ref{strong hyperbolic admissible} we are also able to show that $\rho_{\varepsilon}$ being a GH-density implies that $\rho_{\varepsilon'}$ is also a GH-density for each $0 < \varepsilon' \elleq \varepsilon$, with a constant independent of $\varepsilon'$.
\begin{prop}\ellambdabel{inheritance}
Suppose that $X$ is $K$-roughly geodesic from $\omega$ and that $\rho_{\varepsilon}$ is a GH-density with constant $M$. Then there is a constant $M' = M'(\deltalta,K,\varepsilon,M)$ such that $\rho_{\varepsilon'}$ is a GH-density with constant $M'$ for any $0 < \varepsilon' \elleq \varepsilon$.
\varepsilonnd{prop}
\begin{proof}
Let $\varepsilon_{0} = \varepsilon_{0}(\deltalta)$ be the threshold determined by the Harnack inequality \varepsilonqref{Harnack} and Theorem \ref{Gehring-Hayman} such that $\rho_{\varepsilon}$ is a GH-density with constant $M = 20$ for $0 < \varepsilon \elleq \varepsilon_{0}$. For the purpose of proving the proposition we can then assume that $\varepsilon > \varepsilon_{0}$ and $\varepsilon' \in [\varepsilon_{0},\varepsilon]$. We will first produce a constant $M_{0} = M_{0}(\deltalta,K,\varepsilon,\varepsilon',M)$ such that for any $x,y \in X$ with $|xy| > 1$ we have
\begin{equation}\ellambdabel{admissible second target}
e^{-\varepsilon'(x|y)_{b}} \elleq M_{0}d_{\varepsilon'}(x,y).
\varepsilonnd{equation}
As in the proof of Proposition \ref{strong hyperbolic admissible}, we let $\varepsilonta: I \rightarrow X$ be a rectifiable curve joining $x$ to $y$ that is parametrized by arclength (in $X$) and oriented from $x$ to $y$. We then construct the finite sequence of points $t_{0},\dots,t_{n}$ in $I$ exactly as in the proof of Proposition \ref{strong hyperbolic admissible}, with $n \gammaeq 2$ since $|xy| > 1$. Since $b$ is $1$-Lipschitz, we conclude as in that proof that it suffices to establish the estimate
\begin{equation}\ellambdabel{reduce second target}
e^{-\varepsilon'(\varepsilonta(t_{0})|\varepsilonta(t_{n-1}))_{b}} \elleq M_{1}\ell_{\varepsilon'}(\varepsilonta),
\varepsilonnd{equation}
for any rectifiable curve $\varepsilonta$ joining $x$ to $y$, with $M_{1} = M_{1}(\deltalta,K,\varepsilon,\varepsilon',M)$.
We set $\beta = \varepsilon'/\varepsilon$ and observe that $\varepsilon_{0}/\varepsilon \elleq \beta \elleq 1$ by hypothesis. By the triangle inequality for $d_{\varepsilon}$ we have
\[
d_{\varepsilon}(\varepsilonta(t_{0}),\varepsilonta(t_{n-1})) \elleq \sum_{k=0}^{n-2} d_{\varepsilon}(\varepsilonta(t_{k}),\varepsilonta(t_{k+1})),
\]
which implies that
\[
d_{\varepsilon}(\varepsilonta(t_{0}),\varepsilonta(t_{n-1}))^{\beta} \elleq \sum_{k=0}^{n-2} d_{\varepsilon}(\varepsilonta(t_{k}),\varepsilonta(t_{k+1}))^{\beta},
\]
since $0 < \beta \elleq 1$. By Lemma \ref{lem:proto estimate both} we then conclude that
\begin{align*}
e^{-\varepsilon'(\varepsilonta(t_{0})|\varepsilonta(t_{n-1}))_{b}} &\elleq C\sum_{k=0}^{n-2}e^{-\varepsilon'(\varepsilonta(t_{k})|\varepsilonta(t_{k+1}))_{b}} \\
&\elleq C\sum_{k=0}^{n-2}e^{-\varepsilon'b(\varepsilonta(t_{k}))},
\varepsilonnd{align*}
with $C = C(\deltalta,K,\varepsilon,\varepsilon',M)$, where the second inequality follows from the fact that $|\varepsilonta(t_{k})\varepsilonta(t_{k+1})| = 1$ by construction. Since $|\varepsilonta(t_{k})\varepsilonta(t)| \elleq 1$ for each $t \in [t_{k},t_{k+1}]$, we have by the Harnack inequality \varepsilonqref{Harnack},
\[
\ell_{\varepsilon'}(\varepsilonta|_{[t_{k},t_{k+1}]}) \asymp_{e^{\varepsilon'}} e^{-\varepsilon'b(\varepsilonta(t_{k}))}\ell(\varepsilonta|_{[t_{k},t_{k+1}]}) \gammaeq e^{-\varepsilon'b(\varepsilonta(t_{k}))},
\]
since the length of $\varepsilonta|_{[t_{k},t_{k+1}]}$ in $X$ is at least the distance $1$ between its endpoints. It follows that
\[
\sum_{k=0}^{n-2}e^{-\varepsilon'b(\varepsilonta(t_{k}))} \elleq C(\varepsilon')\sum_{k=0}^{n-2}\ell_{\varepsilon'}(\varepsilonta|_{[t_{k},t_{k+1}]}) \elleq C(\varepsilon')\ell_{\varepsilon'}(\varepsilonta),
\]
with $C(\varepsilon') = e^{\varepsilon'}$. This proves the desired inequality \varepsilonqref{reduce second target}.
By direct inspection of the calculations in this proof, as well as in the proofs of our previous lemmas in this section, one can verify that if $\varepsilon' \in [\varepsilon_{0},\varepsilon]$ then the constants can always be chosen to depend only on $\varepsilon_{0}$ and $\varepsilon$. This shows that the GH-constant $M'$ for $\rho_{\varepsilon'}$ can be chosen such that $M' = M'(\deltalta,K,\varepsilon,\varepsilon_{0},M)$. Since $\varepsilon_{0} = \varepsilon_{0}(\deltalta)$ we in fact obtain that $M' = M'(\deltalta,K,\varepsilon,M)$, as desired.
\varepsilonnd{proof}
We recall that $\omega = \omega_{b}$ denotes the basepoint of $b$. We end this section by using Lemma \ref{epsilon geodesic} to construct a map
\begin{equation}\ellambdabel{construct identify}
\varphi_{\varepsilon} = \varphi_{\varepsilon,b}: \partial_{\omega} X \rightarrow \partial X_{\varepsilon},
\varepsilonnd{equation}
when $X$ is roughly geodesic from $\omega$ and $\partial_{\omega} X \neq \varepsilonmptyset$. We will be using the following corollary of the estimate \varepsilonqref{epsilon geodesic estimate} for $x,y \in X$,
\begin{equation}\ellambdabel{epsilon geodesic corollary}
d_{\varepsilon}(x,y) \elleq C(\deltalta,\varepsilon,K) e^{-\varepsilon(x|y)_{b}}\min\{1,|xy|\}.
\varepsilonnd{equation}
The inequality \varepsilonqref{epsilon geodesic corollary} implies that if $\{x_{n}\}$ is a sequence in $X$ that converges to infinity with respect to $\omega$ (recall this means that $(x_{m}|x_{n})_{b} \rightarrow \infty$ as $m,n \rightarrow \infty$) then $\{x_{n}\}$ is a Cauchy sequence in $X_{\varepsilon}$. Since $X$ and $X_{\varepsilon}$ are biLipschitz on bounded subsets of $X$ by Lemma \ref{arc Harnack}, it is easy to see that $\{x_{n}\}$ cannot converge to a point of $X_{\varepsilon}$. It follows that $X_{\varepsilon}$ is incomplete as long as $\partial_{\omega}X$ contains at least one point; the only exceptions are when either $X$ is bounded or $\omega$ is the only point of $\partial X$. Furthermore a second application of \varepsilonqref{epsilon geodesic corollary} shows that sequences $\{x_{n}\}$ and $\{y_{n}\}$ converging to infinity with respect to $\omega$ that are equivalent with respect to $\omega$ are equivalent as Cauchy sequences in $X_{\varepsilon}$, i.e., $d_{\varepsilon}(x_{n},y_{n}) \rightarrow 0$ as $n \rightarrow \infty$. Setting $\partial X_{\varepsilon} = \bar{X}_{\varepsilon} \backslash X_{\varepsilon}$ to be the complement of $X_{\varepsilon}$ in its completion, we thus have a well-defined map $\varphi_{\varepsilon}: \partial_{\omega} X \rightarrow \partial X_{\varepsilon}$ given by sending a sequence $\{x_{n}\}$ converging to infinity with respect to $\omega$ to its limit in $\partial X_{\varepsilon}$ inside of $\bar{X}_{\varepsilon}$. In the next section we will show that $\varphi_{\varepsilon}$ is a bijection when $X$ is roughly starlike from $\omega$ and $\rho_{\varepsilon}$ is a GH-density. We remark that the map $\varphi_{\varepsilon}$ need not be a bijection in general, see \cite[Proposition 4.1]{BBS21}.
\subsection{Uniformizing by Busemann functions}\ellambdabel{subsec:busemann uniformize} For this section we let $X$ be a complete geodesic $\deltalta$-hyperbolic space. We let $b \in \mathcal{B}(X)$ be given with basepoint $\omega \in \partial X$ and suppose that $X$ is $K$-roughly starlike from $\omega$. For a given $\varepsilon > 0$ we suppose $\rho_{\varepsilon,b}$ is a GH-density with constant $M$. As in Section \ref{subsec:estimate uniform}, to simplify notation we will drop $b$ from the notation for objects associated to the uniformization and write $\rho_{\varepsilon}:=\rho_{\varepsilon,b}$, $X_{\varepsilon} :=X_{\varepsilon,b}$, etc. As before, for a curve $\varepsilonta: I \rightarrow X_{\varepsilon}$ we will write $\ell_{\varepsilon}(\varepsilonta):=\ell_{\rho_{\varepsilon}}(\varepsilonta)$ for its length measured in the metric $d_{\varepsilon}$. For brevity, throughout this section we write $\doteq$ for equality up to an additive that depends only on $\deltalta$, $K$, $\varepsilon$, and $M$, and write $\asymp$ for equality up to a multiplicative constant that depends only on those same parameters. We write $c \gammaeq 0$ and $C \gammaeq 1$ for additive and multiplicative constants depending only on these parameters.
The rough starlikeness of $X$ from $\omega$ implies that $\partial_{\omega}X$ contains at least one point, so as a consequence the space $X_{\varepsilon}$ is incomplete by the discussion at the conclusion of the previous section. As before we write $\bar{X}_{\varepsilon}$ for the completion of the uniformization $X_{\varepsilon}$ and $\partial X_{\varepsilon} = \bar{X}_{\varepsilon} \backslash X_{\varepsilon}$ for the boundary of $X_{\varepsilon}$ inside its completion. We will continue to write $d_{\varepsilon}$ for the canonical extension of this metric on $X_{\varepsilon}$ to the completion $\bar{X}_{\varepsilon}$. We write $d_{\varepsilon}(x) = \mathrm{dist}(x,\partial X_{\varepsilon})$ for the distance to the boundary in $X_{\varepsilon}$. We let $\varphi_{\varepsilon}: \partial_{\omega}X \rightarrow \partial X_{\varepsilon}$ be the map constructed in \varepsilonqref{construct identify} at the end of the previous section by sending a sequence $\{x_{n}\}$ converging to infinity with respect to $\omega$ to its limit in $\partial X_{\varepsilon}$ as a Cauchy sequence in $X_{\varepsilon}$. We formally extend the distance function $d_{\varepsilon}$ to $\partial_{\omega}X$ by setting $d_{\varepsilon}(x,y):=d_{\varepsilon}(\varphi_{\varepsilon}(x),\varphi_{\varepsilon}(y))$ for $x,y \in X \cup \partial_{\omega}X$, where we set $\varphi_{\varepsilon}(x) = x$ for $x \in X$.
Our first lemma extends the estimate \varepsilonqref{proto estimate both} to hold for $x,y \in X \cup \partial_{\omega} X$ with our formal extension of $d_{\varepsilon}$ to $\partial_{\omega}X$. We recall our convention that for $x,y \in X \cup \partial X$ we define $|xy| = \infty$ if $x \neq y$ and either $x \in \partial X$ or $y \in \partial X$, and define $|xy| = 0$ if $x = y \in \partial X$.
\begin{lem}\ellambdabel{lem:estimate both}
For any $x,y \in X \cup \partial_{\omega} X$ we have
\begin{equation}\ellambdabel{estimate both}
d_{\varepsilon}(x,y) \asymp e^{-\varepsilon (x|y)_{b}}\min\{1,|xy|\}.
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
The case in which $x,y \in X$ is an immediate consequence of Lemma \ref{lem:proto estimate both} since $X$ is $K$-roughly geodesic from $\omega$ (because $X$ is $K$-roughly starlike from $\omega$). We thus only need to consider the case in which at least one of the points belongs to $\partial_{\omega}X$. Since \varepsilonqref{estimate both} holds trivially when $x = y$ we can assume that $x \neq y$. We can then assume without loss of generality that $x \in \partial_{\omega}X$. We then need to show that $d_{\varepsilon}(x,y) \asymp e^{-\varepsilon (x|y)_{b}}$. We let $\{x_{n}\}$ and $\{y_{n}\}$ be sequences converging to infinity with respect to $\omega$ in $X$ that represent the points $x$ and $y$ respectively; if $y \in X$ then we instead set $y_{n} = y$ for all $n$. Our definition of the extension of $d_{\varepsilon}$ to $\partial_{\omega}X$ then implies that we have $\ellim_{n \rightarrow \infty} d_{\varepsilon}(x_{n},y_{n}) = d_{\varepsilon}(x,y)$. For $n$ sufficiently large we will have $(x|y)_{b} \doteq_{600\deltalta} (x_{n}|y_{n})_{b}$ by Lemma \ref{busemann inequality} and we will have $|x_{n}y_{n}| \gammaeq 1$. The comparison \varepsilonqref{estimate both} then follows from the corresponding comparison for $x_{n}$ and $y_{n}$ for sufficiently large $n$.
\varepsilonnd{proof}
By using Lemma \ref{lem:estimate both} we are able to show that $\varphi_{\varepsilon}$ is a bijection.
\begin{lem}\ellambdabel{boundary identification}
The map $\varphi_{\varepsilon}: \partial_{\omega} X \rightarrow \partial X_{\varepsilon}$ is a bijection.
\varepsilonnd{lem}
\begin{proof}
The injectivity of $\varphi_{\varepsilon}$ follows immediately from Lemma \ref{lem:estimate both} applied to $x \neq y \in \partial_{\omega}X$. Thus our focus will be on showing that $\varphi_{\varepsilon}$ is surjective. Let $\{x_{n}\}$ be a Cauchy sequence in $X_{\varepsilon}$ that converges to a point $z \in \partial X_{\varepsilon}$. We claim that the sequence $\{x_{n}\}$ cannot belong to a bounded subset of $X$. If it did then for a fixed $p \in X$ there would be an $r > 0$ such that $\{x_{n}\} \subset B(p,r)$ for all $n$, with $B(p,r)$ denoting the ball of radius $r$ centered at $p$ in $X$. Lemma \ref{arc Harnack} shows that the metrics on $X$ and $X_{\varepsilon}$ are biLipschitz to one another on $B(p,2r)$, which implies that $\{x_{n}\}$ is also a Cauchy sequence in $X$. Since $X$ is complete this Cauchy sequence must converge in $X$ to a point $y \in B(p,2r)$. However this means that $\{x_{n}\}$ also converges to $y$ in $X_{\varepsilon}$, contradicting that $\{x_{n}\}$ converges to a point of $\partial X_{\varepsilon}$.
Thus, by passing to a subsequence if necessary, we can assume that $|x_{m}x_{n}| \gammaeq 1$ for $m \neq n$. It then follows from Lemma \ref{lem:estimate both} that for $m \neq n$,
\[
d_{\varepsilon}(x_{m},x_{n}) \asymp e^{-\varepsilon (x_{m}|x_{n})_{b}}.
\]
Since $d_{\varepsilon}(x_{n},x_{m}) \rightarrow 0$ as $m,n \rightarrow \infty$, we conclude that $(x_{m}|x_{n})_{b} \rightarrow \infty$ as $m,n \rightarrow \infty$. Thus $\{x_{n}\}$ converges to infinity with respect to $\omega$. Letting $\xi \in \partial_{\omega}X$ denote the point in the Gromov boundary relative to $\omega$ represented by the sequence $\{x_{n}\}$, the construction of $\varphi_{\varepsilon}$ then shows that $\varphi_{\varepsilon}(\xi) = z$. We conclude that $\varphi_{\varepsilon}$ is surjective.
\varepsilonnd{proof}
We can now prove Theorem \ref{identification theorem} in the case $b \in \mathcal{B}(X)$. We recall the definition \varepsilonqref{visual quasi} of the model visual quasi-metric $\tildeheta_{\varepsilon,b}$ on $\partial_{\omega}X$.
\begin{proof}[Proof of Theorem \ref{identification theorem}]
The fact that $\varphi_{\varepsilon}$ is a bijection follows from Lemma \ref{boundary identification}. To complete the proof of Theorem \ref{identification theorem} it then suffices to show that for any $\xi,\zeta \in \partial_{\omega}X$ there is a constant $L = L(\deltalta,K,\varepsilon,M)$ such that
\[
d_{\varepsilon}(\xi,\zeta) \asymp_{L} \tildeheta_{\varepsilon,b}(\xi,\zeta) = e^{-\varepsilon(\xi|\zeta)_{b}}.
\]
This desired comparison then follows from Lemma \ref{lem:estimate both}.
\varepsilonnd{proof}
The next lemma shows that the distance to $\partial X_{\varepsilon}$ can be computed in terms of the density $\rho_{\varepsilon}$.
\begin{lem}\ellambdabel{compute distance}
For $x \in X$ we have
\begin{equation}\ellambdabel{compute distance inequality}
d_{\varepsilon}(x) \asymp \rho_{\varepsilon}(x).
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
Let $x \in X$ be given. We first compute the upper bound in \varepsilonqref{compute distance inequality}. By the rough-starlikeness condition we can find a geodesic line $\gammaamma: \mathbb R \rightarrow X$ starting at $\omega$ and ending at some $\xi \in \partial X$ with $\mathrm{dist}(x,\gammaamma) \elleq K$. Using Lemma \ref{geodesic busemann} we can consider $\gammaamma$ as parametrized by arclength with $b(\gammaamma(t)) \doteq_{144\deltalta} t$ for each $t \in \mathbb R$. We let $s \in \mathbb R$ be such that $|x\gammaamma(s)| \elleq K$. We then compute,
\begin{align*}
\ell_{\varepsilon}(\gammaamma|_{[s,\infty)}) &= \int_{s}^{\infty}e^{-\varepsilon b(\gammaamma(t))}\,dt \\
& \asymp \int_{s}^{\infty}e^{-\varepsilon t}\,dt \\
&= \varepsilon^{-1}e^{-\varepsilon s}.
\varepsilonnd{align*}
By Lemma \ref{arc Harnack} we then have
\begin{align*}
d_{\varepsilon}(x,\xi) &\elleq d_{\varepsilon}(x,\gammaamma(s)) + d_{\varepsilon}(\gammaamma(s),\xi) \\
&\elleq \varepsilon^{-1}\rho_{\varepsilon}(x)(e^{\varepsilon |x\gammaamma(s)|}-1) + \ell_{\varepsilon}(\gammaamma|_{[s,\infty)}) \\
&\elleq C\rho_{\varepsilon}(x).
\varepsilonnd{align*}
Since $d_{\varepsilon}(x) \elleq d_{\varepsilon}(x,\xi)$ the upper bound follows.
For the lower bound we let $\xi \in \partial X_{\varepsilon}$ be a given point, which we can think of as a point in $\partial_{\omega}X$ using Lemma \ref{boundary identification}. By rough starlikeness we can then find a geodesic line $\gammaamma: \mathbb R \rightarrow X$ starting at $\omega$ and ending at $\xi$. For $n \in \mathbb N$ we note that $|x\gammaamma(n)| \rightarrow \infty$ as $n \rightarrow \infty$, so we will have $|x\gammaamma(n)| \gammaeq 1$ for all sufficiently large $n$. For sufficiently large $n$ we can then apply \varepsilonqref{estimate both} and Lemma \ref{busemann inequality} to obtain
\[
d_{\varepsilon}(x,\gammaamma(n)) \asymp e^{-\varepsilon (x|\gammaamma(n))_{b}} \asymp e^{-\varepsilon (x|\xi)_{b}}.
\]
By \varepsilonqref{both busemann boundary} we have $(x|\xi)_{b} \elleq b(x) + 600\deltalta$. By combining this with the above we obtain that
\[
d_{\varepsilon}(x,\gammaamma(n)) \gammaeq C^{-1}\rho_{\varepsilon}(x).
\]
This gives the lower bound since $d_{\varepsilon}(x,\gammaamma(n)) \rightarrow d_{\varepsilon}(x,\xi)$ as $n \rightarrow \infty$.
\varepsilonnd{proof}
We can now complete the proof of Theorem \ref{unbounded uniformization} in the case $b \in \mathcal{B}(X)$. Since we have already shown that $X_{\varepsilon}$ is unbounded in Proposition \ref{bounded equivalence}, to prove Theorem \ref{unbounded uniformization} we only need to show that geodesics in $X$ are uniform curves in $X_{\varepsilon}$. For this we need to extend the definition of uniform curves to cover curves defined on arbitrary subintervals $I \subset \mathbb R$.
As in Definition \ref{def:uniform}, we let $(\Omega,d)$ be an incomplete metric space, set $\partial \Omega = \bar{\Omega} \backslash \Omega$, and set $d_{\Omega}(x)=\mathrm{dist}(x,\partial \Omega)$. We consider a curve $\gammaamma:I \rightarrow \Omega$ defined on an arbitrary subinterval $I \subset \mathbb R$; we write $t_{-} \in [-\infty,\infty)$ and $t_{+} \in (-\infty,\infty]$ for the endpoints of $I$. If $\gammaamma$ has finite length, $\ell(\gammaamma) < \infty$, then $\gammaamma$ has well-defined endpoints $\gammaamma_{-},\gammaamma_{+} \in \bar{\Omega}$ defined by the limits $\gammaamma(t_{-}) = \ellim_{t \rightarrow t_{-}} \gammaamma(t)$ and $\gammaamma(t_{+}) = \ellim_{t \rightarrow t_{+}} \gammaamma(t)$ in $\bar{\Omega}$, which exist because $\ell(\gammaamma) < \infty$.
\begin{defn}\ellambdabel{def:extend uniform}For a constant $A \gammaeq 1$ and an interval $I \subset \mathbb R$, a curve $\gammaamma: I \rightarrow \Omega$ with $\ell(\gammaamma) < \infty$ is \varepsilonmph{$A$-uniform} if
\begin{equation}\ellambdabel{extend uniform one}
\ell(\gammaamma) \elleq Ad(\gammaamma_{-},\gammaamma_{+}),
\varepsilonnd{equation}
and if for every $t \in I$ we have
\begin{equation}\ellambdabel{extend uniform two}
\min\{\ell(\gammaamma|_{I_{\elleq t}}),\ell(\gammaamma|_{I_{\gammaeq t}})\} \elleq A d_{\Omega}(\gammaamma(t)).
\varepsilonnd{equation}
If $\ell(\gammaamma) = \infty$ then we instead define $\gammaamma$ to be $A$-uniform if \varepsilonqref{extend uniform two} holds and if $d(\gammaamma(s),\gammaamma(t)) \rightarrow \infty$ as $s \rightarrow t_{-}$ and $t \rightarrow t_{+}$.
\varepsilonnd{defn}
\begin{prop}\ellambdabel{uniformization prop}
There is an $A = A(\deltalta,K,\varepsilon,M)$ such that any geodesic in $X$ is an $A$-uniform curve in $X_{\varepsilon}$. Consequently $X_{\varepsilon}$ is $A$-uniform.
\varepsilonnd{prop}
\begin{proof}
We first consider the case of a geodesic $xy$ in $X$ joining two points $x,y \in X \cup \partial_{\omega} X$. Let $\gammaamma:I \rightarrow X$ be a parametrization of $xy$ that is $c$-adapted to $b$, $c = c(\deltalta,K)$, as in Lemma \ref{star parametrize}; we can always find points $x', y' \in X \cup \partial_{\omega}X$ satisfying the hypotheses of the lemma by the $K$-rough starlikeness hypothesis from $\omega$. Let $t_{-} \in [-\infty,\infty)$, $t_{+} \in (-\infty,\infty]$ be the endpoints of $I$. Let $\{s_{n}\},\{t_{n}\} \subset I$ be sequences such that $s_{n} \rightarrow t_{-}$, $t_{n} \rightarrow t_{+}$, and $s_{n} < t_{n}$ for each $n$. Applying inequality \varepsilonqref{first GH} to the geodesic $\gammaamma|_{[s_{n},t_{n}]}$ joining $\gammaamma(s_{n})$ to $\gammaamma(t_{n})$ gives
\begin{equation}\ellambdabel{sequential GH}
\ell_{\varepsilon}(\gammaamma|_{[s_{n},t_{n}]}) \elleq Md_{\varepsilon}(\gammaamma(s_{n}),\gammaamma(t_{n})).
\varepsilonnd{equation}
Letting $n \rightarrow \infty$, the left side of \varepsilonqref{sequential GH} converges to $\ell_{\varepsilon}(\gammaamma)$. If $x \in X$ then the sequence $\{\gammaamma(s_{n})\}$ converges to $x$ in $X$, while if $x \in \partial_{\omega}X$ then the sequence $\{\gammaamma(s_{n})\}$ belongs to the equivalence class of $X$ with respect to $\omega$. In both cases we then have that $\{\gammaamma(s_{n})\}$ converges to $x$ in $\bar{X}_{\varepsilon}$, with the second case following from the construction of the identification $\varphi_{\varepsilon}: \partial_{\omega}X \rightarrow \partial X_{\varepsilon}$. The same discussion applies to the sequence $\{\gammaamma(t_{n})\}$ in relation to $y$. It follows that $d_{\varepsilon}(\gammaamma(s_{n}),\gammaamma(t_{n})) \rightarrow d_{\varepsilon}(x,y)$ as $n \rightarrow \infty$. Consequently $\gammaamma$ has finite length in $X_{\varepsilon}$ with endpoints $\gammaamma_{-} = x$ and $\gammaamma_{+} = y$. The inequality \varepsilonqref{extend uniform one} then follows by letting $n \rightarrow \infty$ in \varepsilonqref{sequential GH}.
We next verify inequality \varepsilonqref{extend uniform two}. It suffices to verify this inequality in the case that $s \in I_{\gammaeq 0}$, since we can deduce the case $s \in I_{\elleq 0}$ from this by reversing the roles of $x$ and $y$. We thus assume that $s \in I_{\gammaeq 0}$. A straightforward calculation with \varepsilonqref{adapted equation} gives us that
\begin{equation}\ellambdabel{near uniform}
\ell_{\varepsilon}(\gammaamma|_{I_{\gammaeq s}}) \asymp e^{-\varepsilon (x|y)_{b}}\int_{I_{\gammaeq s}} e^{-\varepsilon t} \,dt \elleq \varepsilon^{-1} e^{-\varepsilon (x|y)_{b}} e^{-\varepsilon s}.
\varepsilonnd{equation}
Since $s + (x|y)_{b} \doteq b(\varepsilonta(s))$, it then follows from \varepsilonqref{near uniform} and Lemma \ref{compute distance} that
\[
\ell_{\varepsilon}(\gammaamma|_{I_{\gammaeq s}}) \elleq C \rho_{\varepsilon}(\gammaamma(s)) \elleq C d_{\varepsilon}(\gammaamma(s)),
\]
with $C = C(\deltalta,K,\varepsilon,M)$. We conclude that $\gammaamma$ is an $A$-uniform curve in $X_{\varepsilon}$ with $A = A(\deltalta,K,\varepsilon,M)$. Since any two points $x,y \in X$ can be joined by a geodesic $xy$ in $X$, this implies that the metric space $X_{\varepsilon}$ is $A$-uniform.
It remains to treat the case of a geodesic $\omega x$ joining the basepoint $\omega \in \partial X$ of $b$ to a point $x \in X \cup \partial_{\omega}X$. By applying (2) of Lemma \ref{geodesic busemann} with $u = 0$ we can find an arclength parametrization $\gammaamma:(-\infty,a] \rightarrow X$ of $\omega x$, $a \in (-\infty,\infty]$, with $b(\gammaamma(t)) \doteq_{144\deltalta} t$ for $t \in (-\infty,a]$. For $s \elleq t \in (-\infty,a]$ we then have, by a computation similar to the one done in Lemma \ref{compute distance},
\begin{equation}\ellambdabel{two endpoint}
\ell_{\varepsilon}(\gammaamma|_{[s,t]}) \asymp e^{-\varepsilon s} - e^{-\varepsilon t}.
\varepsilonnd{equation}
By letting $s \rightarrow -\infty$ we conclude that $\ell_{\varepsilon}(\gammaamma) = \infty$. The comparison \varepsilonqref{two endpoint} together with the GH-inequality \varepsilonqref{first GH} then implies that $d_{\varepsilon}(\gammaamma(s),\gammaamma(t)) \rightarrow \infty$ as $s \rightarrow -\infty$ and $t \rightarrow a$. Thus our replacement condition in the infinite length case for \varepsilonqref{extend uniform one} is satisfied. Combining \varepsilonqref{two endpoint} and Lemma \ref{compute distance} also implies for each $s \in (-\infty,a]$ that
\[
\ell_{\varepsilon}(\gammaamma|_{I_{\gammaeq s}}) \elleq Ce^{-\varepsilon s} \elleq C\rho_{\varepsilon}(\gammaamma(s)) \elleq Cd_{\varepsilon}(\gammaamma(s)),
\]
with $C = C(\deltalta,K,\varepsilon,M)$ in each inequality. Thus \varepsilonqref{extend uniform two} also holds for $\gammaamma$ with $A = A(\deltalta,K,\varepsilon,M)$. We conclude that $\gammaamma$ is an $A$-uniform curve in $X_{\varepsilon}$ in this case as well.
\varepsilonnd{proof}
\subsection{Uniformizing by distance functions}\ellambdabel{subsec:distance uniformize}For this section we assume the same setup as in Section \ref{subsec:busemann uniformize}, with the exception that we will be assuming instead that $b \in \mathcal{D}(X)$ with basepoint $z \in X$. We will reduce Theorems \ref{unbounded uniformization} and \ref{identification theorem} in this case to the case considered in Section \ref{subsec:busemann uniformize} using the following general construction.
\begin{defn}\ellambdabel{ray augment} For a metric space $(X,d_{X})$ and a point $z \in X$ we let $Y = X \cup_{z \sim 0} [0,\infty)$ be the metric space obtained by gluing the half-line $[0,\infty)$ to $X$ by identifying the point $z$ with $0 \in [0,\infty)$. The metric $d_{Y}$ on $Y$ is defined by setting $d_{Y}(x,y) = d_{X}(x,y)$ for $x,y \in X$, $d_{Y}(s,t) = |s-t|$ for $s,t \in [0,\infty)$, and $d_{Y}(x,s) = d_{X}(x,z) + s$ for $x \in X$, $s \in [0,\infty)$. The space $X$ then has a canonical isometric embedding into $Y$. We refer to the metric space $(Y,d_{Y})$ as the \varepsilonmph{ray augmentation of $X$ based at $z$}.
\varepsilonnd{defn}
The following trick will be the basis of many of the results we prove regarding the ray augmentation.
\begin{lem}\ellambdabel{shorten}
Let $(X,d_{X})$ be a metric space. Let $(Y,d_{Y})$ be the ray augmentation of $X$ based at $z \in X$. Then for each curve $\gammaamma:I \rightarrow Y$ in $Y$ there is a curve $\sigma:I \rightarrow X$ such that $\sigma(t) = \gammaamma(t)$ when $\gammaamma(t) \in X$ and $\sigma(t) = z$ when $\gammaamma(t) \notin X$.
\varepsilonnd{lem}
\begin{proof}
Consider the retraction $r: Y \rightarrow X$ given by setting $r(x) = x$ for $x \in X$ and $r(t) = z$ for $t \in [0,\infty)$. It is easy to see that $r$ is $1$-Lipschitz; in particular $r$ is continuous. For a given curve $\gammaamma:I \rightarrow Y$ the curve $\sigma = r \circ \gammaamma:I \rightarrow X$ then has the desired properties.
\varepsilonnd{proof}
We refer to the curve $\sigma$ constructed from $\gammaamma$ in Lemma \ref{shorten} as the \varepsilonmph{shortening} of $\gammaamma$ to $X$. We apply Lemma \ref{shorten} to conformal deformations of the ray augmentation.
\begin{lem}\ellambdabel{transfer admissible}
Let $(X,d_{X})$ be a geodesic metric space and let $(Y,d_{Y})$ be the ray augmentation of $X$ based at $z \in X$. Let $\rho: X \rightarrow (0,\infty)$ be a continuous density on $X$ and let $\tilde{\rho}:Y \rightarrow (0,\infty)$ be a continuous density on $Y$ with $\tilde{\rho}|_{X} = \rho$. Then the isometric embedding $X \rightarrow Y$ induces an isometric embedding $X_{\rho} \rightarrow Y_{\tilde{\rho}}$ of the corresponding conformal deformations. Furthermore $\rho$ is a GH-density with constant $M \gammaeq 1$ if and only if $\tilde{\rho}$ is a GH-density with the same constant $M$.
\varepsilonnd{lem}
\begin{proof}
We write $d_{\rho}$ for the metric on $X_{\rho}$ and $d_{\tilde{\rho}}$ for the metric on $Y_{\tilde{\rho}}$. Let $x,y \in X$ be given. Let $\gammaamma:I \rightarrow Y$ be a rectifiable curve joining them and let $\sigma:I \rightarrow X$ be the shortening of $\gammaamma$ to $X$. Since $\tilde{\rho}|_{X} = \rho$ we clearly have
\[
\int_{\sigma} \tilde{\rho}\, ds = \int_{\sigma} \rho\, ds = \int_{\gammaamma^{-1}(X)} \rho\, ds \elleq \int_{\gammaamma} \tilde{\rho}\, ds.
\]
It follows that $\ell_{\tilde{\rho}}(\sigma) \elleq \ell_{\tilde{\rho}}(\gammaamma)$. Thus in computing $d_{\tilde{\rho}}(x,y)$ it suffices to minimize $\ell_{\tilde{\rho}}(\gammaamma)$ over curves $\gammaamma: I \rightarrow X$ taking values only in $X$. Since for such curves we have $\ell_{\tilde{\rho}}(\gammaamma) = \ell_{\rho}(\gammaamma)$, it immediately follows that $d_{\tilde{\rho}}(x,y) = d_{\rho}(x,y)$. We conclude that the embedding $X_{\rho} \rightarrow Y_{\tilde{\rho}}$ is an isometry.
It is obvious that $\rho$ is a GH-density with constant $M$ if $\tilde{\rho}$ is a GH-density with constant $M$, since $\tilde{\rho}$ restricts to $\rho$ on $X$ and geodesics in $X$ are also geodesics in $Y$. For the converse we assume that $\rho$ is a GH-density with constant $M$ and let $x,y \in Y$ be given points. We need to prove the GH-inequality \varepsilonqref{first GH} for any geodesic $\gammaamma$ joining $x$ to $y$ in $Y$.
If $x,y \in X$ then any geodesic $\gammaamma$ joining $x$ to $y$ in $Y$ is in fact a geodesic joining $x$ to $y$ in $X$. The inequality \varepsilonqref{first GH} for $\tilde{\rho}$ then follows from the corresponding inequality for $\rho$. If $x,y \in [0,\infty) = Y \backslash (X \backslash \{z\})$ then there is only one geodesic $\gammaamma$ from $x$ to $y$ in $Y$, which is simply the interval connecting them in $[0,\infty)$. Since any curve $\sigma$ joining $x$ to $y$ in $Y$ must contain this interval we in fact have $\ell_{\tilde{\rho}}(\gammaamma) = d_{\tilde{\rho}}(x,y)$. Thus inequality \varepsilonqref{first GH} holds in this case as well.
The final case is that in which $x \in X$ and $y \in [0,\infty)$. Let $\gammaamma$ be a geodesic joining $x$ to $y$ in $Y$. Then we can write $\gammaamma = \sigma \cup \varepsilonta$, where $\sigma$ is a geodesic in $X$ joining $x$ to $z$ and $\varepsilonta$ is the geodesic in $[0,\infty)$ joining $0$ to $y$, which is just the interval connecting these points. Given a rectifiable curve $\alpha: I\rightarrow Y$ joining $x$ to $y$ we let $\beta$ be the shortening of $\alpha$ to $X$. Then by the inequality \varepsilonqref{first GH} for $\rho$ we have $\ell_{\rho}(\sigma) \elleq M\ell_{\rho}(\beta)$. Since the intersection $\alpha(I) \cap [0,\infty)$ must contain the interval $\varepsilonta$ joining $0$ to $y$, we deduce from this that
\begin{align*}
\ell_{\tilde{\rho}}(\gammaamma) &= \ell_{\tilde{\rho}}(\sigma) + \ell_{\tilde{\rho}}(\varepsilonta) \\
&\elleq M\elleft(\ell_{\tilde{\rho}}(\beta) + \int_{\alpha^{-1}([0,\infty))} \tilde{\rho}\, ds\right) \\
&= M \ell_{\tilde{\rho}}(\alpha).
\varepsilonnd{align*}
Minimizing over all rectifiable curves $\alpha$ joining $x$ to $y$ then gives inequality \varepsilonqref{first GH}.
\varepsilonnd{proof}
We assume now that $X$ is a geodesic $\deltalta$-hyperbolic space. We let $Y$ be the ray augmentation of $X$ based at some point $z \in X$. It is an easy exercise to see that $Y$ is also $\deltalta$-hyperbolic; recall that we have defined $\deltalta$-hyperbolicity using $\deltalta$-thin triangles. We also note that $Y$ is complete if $X$ is complete. We will continue to use the generic distance notation $|xy|$ for the distance between $x,y \in Y$, noting that there is no conflict with the distance notation for $X$ since $X$ sits isometrically inside of $Y$.
By definition the \varepsilonmph{canonical ray} in $Y$ is the geodesic ray $\gammaamma:[0,\infty) \rightarrow Y$ corresponding to the canonical parametrization of the copy of $[0,\infty)$ that we glued onto $X$. The key property of the ray augmentation is that the Busemann function $b_{\gammaamma}$ associated to the canonical ray restricts on $X$ to the distance from the distinguished point $z$.
\begin{lem}\ellambdabel{augment}
Let $\gammaamma$ be the canonical ray in $Y$. Then for $x \in X$ we have $b_{\gammaamma}(x) = |xz|$.
\varepsilonnd{lem}
\begin{proof}
For $x \in X$ and $t \gammaeq 0$ we have
\[
|\gammaamma(t)x|-t = t+|xz|-t = |xz|,
\]
which implies upon taking $t \rightarrow \infty$ that $b_{\gammaamma}(x) = |xz|$.
\varepsilonnd{proof}
Let $\omega \in \partial Y$ be the point in the Gromov boundary defined by the canonical ray. We next show that $\partial_{\omega}Y = \partial Y \backslash \{\omega\}$ identifies canonically with $\partial X$ and rough starlikeness from $z$ in $X$ passes over to rough starlikeness from $\omega$ in $Y$.
\begin{lem}\ellambdabel{ray boundary}
We have $\partial_{\omega} Y = \partial X$. Furthermore any visual metric on $\partial X$ based at $z$ with parameter $\varepsilon > 0$ also defines a visual metric on $\partial_{\omega} Y$ based at $\omega$ with parameter $\varepsilon$, and the converse holds as well. If $X$ is $K$-roughly starlike from $z$ then $Y$ is $K$-roughly starlike from $\omega$.
\varepsilonnd{lem}
\begin{proof}
For the first assertion it suffices to show that if $\{x_{n}\}$ is a sequence converging to infinity in $Y$ then there is an $N \in \mathbb N$ such that for $n \gammaeq N$ the points $x_{n}$ either all belong to $X$ or all belong to $[0,\infty)$. Recall that $\{x_{n}\}$ converges to infinity if we have $(x_{n}|x_{m})_{z} \rightarrow \infty$ as $m,n \rightarrow \infty$. If our assertion did not hold then we could find subsequences $\{y_{n}\}$ and $\{z_{n}\}$ of the sequence $\{x_{n}\}$ such that $\{y_{n}\} \subset X$, $\{z_{n}\} \subset [0,\infty)$, and $(y_{n}|z_{n})_{z} \rightarrow \infty$ as $n \rightarrow \infty$. But then $|y_{n}z_{n}| = |y_{n}z| + |z_{n}z|$ and therefore
\[
(y_{n}|z_{n})_{z} = \frac{1}{2}(|y_{n}z|+|z_{n}z|-|y_{n}z_{n}|) = 0,
\]
contradicting our assumption that $(y_{n}|z_{n})_{z} \rightarrow \infty$. Since all sequences $\{x_{n}\}$ converging to infinity in $Y$ that belong exclusively to $[0,\infty)$ must converge to $\omega$, it follows that we have a canonical identification of $\partial X$ with $\partial_{\omega} Y$.
Let $b_{\gammaamma}$ be the Busemann function associated to the canonical ray $\gammaamma$ as in Lemma \ref{augment}. We observe that $b_{\gammaamma}(x) = |xz|$ for $x \in X$ implies that $(x|y)_{b_{\gammaamma}} = (x|y)_{z}$ for $x,y \in X$. Since any sequence $\{x_{n}\}$ converging to infinity in $Y$ that does not converge to $\omega$ must eventually stay within $X$, it follows that $(\xi|\zeta)_{b_{\gammaamma}} = (\xi|\zeta)_{z}$ for $\xi,\zeta \in \partial X$. Thus through our identification $\partial_{\omega} Y = \partial X$ we have a canonical identification between visual metrics on $\partial_{\omega} Y$ based at $\omega$ and visual metrics on $\partial X$ based at $z$ for any parameter $\varepsilon > 0$.
Observe that if $\sigma: [0,\infty) \rightarrow X$ is a geodesic ray starting at $z$ then the map $\tilde{\sigma}: \mathbb R \rightarrow Y$ defined by $\tilde{\sigma}(t) = \sigma(t)$ for $t \gammaeq 0$ and $\tilde{\sigma}(t) = -t \in [0,\infty)$ for $t \elleq 0$ defines a geodesic line in $Y$ that begins at $\omega$, coincides with $\sigma$ inside of $X$, and has the same endpoint in $\partial X = \partial_{\omega} Y$ as $\sigma$. This implies that if $X$ is $K$-roughly starlike from $z$ for some $K \gammaeq 0$ then $Y$ is $K$-roughly starlike from $\omega$.
\varepsilonnd{proof}
We can now complete the proofs of Theorems \ref{unbounded uniformization} and \ref{identification theorem} by showing that they also hold for $b \in \mathcal{D}(X)$.
\begin{prop}\ellambdabel{bound from unbound}
Theorems \ref{unbounded uniformization} and \ref{identification theorem} hold for $b \in \mathcal{D}(X)$.
\varepsilonnd{prop}
\begin{proof}
Let $X$ be a complete geodesic $\deltalta$-hyperbolic space that is $K$-roughly starlike from $z \in X$ and let $b \in \mathcal{D}(X)$ have the form $b(x) = |xz| + s$ for some $s \in \mathbb R$. We assume that $\varepsilon > 0$ is such that $\rho_{\varepsilon,b}$ is a GH-density with constant $M$. We let $Y = X \cup_{z \sim 0} [0,\infty)$ be the ray augmentation of $X$ based at $z$, let $\gammaamma$ be the canonical ray in $Y$, and set $\tilde{b} = b_{\gammaamma}+s$. Then $Y$ is a complete geodesic $\deltalta$-hyperbolic space that is $K$-roughly starlike from the endpoint $\omega \in \partial Y$ of the canonical ray by Lemma \ref{ray boundary}. We have $\tilde{b}|_{X} = b$ by Lemma \ref{augment}. Thus by Lemma \ref{transfer admissible} the embedding $X_{\varepsilon,b} \rightarrow Y_{\varepsilon,\tilde{b}}$ is isometric and $\rho_{\varepsilon,\tilde{b}}$ is a GH-density with the same constant $M$. We can then apply Theorems \ref{unbounded uniformization} and \ref{identification theorem} to $Y$ equipped with the Busemann function $\tilde{b}$.
For $t \in [0,\infty) \subset Y$ we have $\tilde{b}(t) = -t+s$. Thus a straightforward calculation shows that $\ell_{\varepsilon}(\gammaamma) = \infty$. It follows from this and the GH-inequality \varepsilonqref{first GH} that the only boundary points of the metric space $Y_{\varepsilon,\tilde{b}}$ are the boundary points of $X_{\varepsilon,b}$, i.e., $\partial X_{\varepsilon,b} = \partial Y_{\varepsilon,\tilde{b}}$. By applying Theorem \ref{identification theorem} to $Y$ and then using Lemma \ref{ray boundary}, we conclude that we have a canonical identification $\varphi_{\varepsilon}:\partial X \rightarrow \partial X_{\varepsilon,b}$. Since visual metrics on $\partial X$ based at $z$ with parameter $\varepsilon$ correspond to visual metrics on $\partial_{\omega}Y$ based at $\omega$ with the same parameter $\varepsilon$, it then follows from Theorem \ref{identification theorem} that the restriction of $d_{\varepsilon,b}$ to $\partial X_{\varepsilon,b}$ defines a visual metric on $\partial_{\omega}X$ with parameter $\varepsilon$ and comparison constant $L = L(\deltalta,K,\varepsilon,M)$. This completes the proof of Theorem \ref{identification theorem}.
For Theorem \ref{unbounded uniformization} we observe that any geodesic $\sigma$ in $X$ is also a geodesic in $Y$. Since $X_{\varepsilon,b}$ sits isometrically inside of $Y_{\varepsilon,\tilde{b}}$ and $\partial X_{\varepsilon,b} = \partial Y_{\varepsilon,\tilde{b}}$, by applying Theorem \ref{unbounded uniformization} to $Y$ we conclude that $\sigma$ is an $A$-uniform curve in $Y_{\varepsilon,\tilde{b}}$ and therefore also an $A$-uniform curve in $X_{\varepsilon,b}$. This completes the proof of Theorem \ref{unbounded uniformization} in this case.
\varepsilonnd{proof}
\begin{rem}\ellambdabel{transfer estimates}
The proof of Proposition \ref{bound from unbound} also shows that the estimates of Lemmas \ref{lem:estimate both} and \ref{compute distance} hold for $b \in \mathcal{D}(X)$ as well. In the notation of the proof, this is because $\tilde{b}|_{X} = b$, $X_{\varepsilon,b}$ isometrically embeds into $Y_{\varepsilon,\tilde{b}}$, and we have an identification of boundaries $\partial X_{\varepsilon,b} = \partial Y_{\varepsilon,\tilde{b}}$.
\varepsilonnd{rem}
\section{Hyperbolic fillings}\ellambdabel{sec:filling}
Let $(Z,d)$ be a metric space and let $\alpha,\tildeau > 1$ be given parameters. We recall the construction of a hyperbolic filling $X$ of $Z$ with these parameters described prior to Theorem \ref{filling theorem}. For each $n \in \mathbb Z$ we select a maximal $\alpha^{-n}$-separated subset $S_{n}$ of $Z$. The existence of such a set is guaranteed by a standard application of Zorn's lemma. Then for each $n \in \mathbb Z$ the balls $B(z,\alpha^{-n})$, $z \in S_{n}$, cover $Z$.
The vertex set of $X$ has the form
\[
V = \bigcup_{n \in \mathbb Z} V_{n}, \;\;\; V_{n} = \{(x,n):x \in S_{n}\}.
\]
To each vertex $v = (x,n)$ we associate the dilated ball $B(v) = B(x,\tildeau \alpha^{-n})$. We will often use $v$ to denote both a vertex in $X$ and its associated point in $Z$. We also define the \varepsilonmph{height function} $h: V \rightarrow \mathbb Z$ by $h(x,n) = n$. By construction for each $z \in Z$ there is a $v \in V_{n}$ such that $\rho(v,z) < \alpha^{-n}$.
We place an edge in $X$ between distinct vertices $v$ and $w$ if and only if $|h(v)-h(w)| \elleq 1$ and $B(v) \cap B(w) \neq \varepsilonmptyset$. Thus there is an edge between vertices if and only if they are of the same or adjacent height and there is a nonempty intersection of their associated balls. For vertices $v,w$ we write $v \sim w$ if there is an edge between $v$ and $w$. Edges between vertices of the same height are referred to as \varepsilonmph{horizontal}, and edges between vertices of different heights are called \varepsilonmph{vertical}. We say that an edge path between two vertices is \varepsilonmph{vertical} if it is composed exclusively of vertical edges.
While we will allow any choice of $\alpha > 1$, we will need to place some constraints on the values of the parameter $\tildeau$ based on $\alpha$. We will require that
\begin{equation}\ellambdabel{tau requirement}
\tildeau > \max\elleft\{3,\frac{\alpha}{\alpha-1}\right\}.
\varepsilonnd{equation}
We give each connected component of $X$ the unique geodesic metric in which all edges have unit length. The restriction \varepsilonqref{tau requirement} will be used in Proposition \ref{connected filling} to show that $X$ is actually connected and is therefore a geodesic metric space itself. For applications a standard choice of parameters satisfying \varepsilonqref{tau requirement} is given by $\alpha = 2$ and $\tildeau = 4$.
\begin{rem}\ellambdabel{idk}We do not know whether the constraint \varepsilonqref{tau requirement} can be relaxed while preserving the properties of $X$ described below. In particular we do not know whether $X$ is always Gromov hyperbolic or even connected for all $\tildeau > 1$. However, by applying Lemma \ref{height connection} below it is easy to see that $X$ is connected for any $\tildeau > 1$ when $Z$ is bounded. We note that one cannot take $\tildeau = 1$ in the construction as it is possible for the resulting graph to fail to be Gromov hyperbolic even in the bounded case \cite[Example 8.8]{BBS21}.
\varepsilonnd{rem}
Since edges can only connect vertices of the same or adjacent heights, all vertical edge paths are geodesics in $X$. We will refer to these vertical paths as \varepsilonmph{vertical geodesics}. We will use the generic distance notation $|xy|$ for the distance between $x,y \in X$. Thus for $v = (x,n),w=(y,n) \in V$ we will denote their distance in $X$ by $|vw|$ and their distance in $Z$ by $d(v,w):=d(x,y)$. Identifying an edge $g$ from a vertex $v$ to a vertex $w$ isometrically with $[0,1]$, we extend the height function $h$ to $g$ by $h(s) = sh(v)+(1-s)h(w)$. Then $h$ defines a function $h: X \rightarrow \mathbb R$ that is $1$-Lipschitz on the connected components of $X$.
We begin with a simple lemma.
\begin{lem}\ellambdabel{height connection}
Let $v,w \in V$ with $h(v) \neq h(w)$ and $B(v) \cap B(w) \neq \varepsilonmptyset$. Then there is a vertical edge path from $v$ to $w$.
\varepsilonnd{lem}
\begin{proof}
Let $v = (x,m)$, $w = (y,n)$, and let $z \in B(v) \cap B(w)$. We can assume without loss of generality that $m < n$. For each integer $m \elleq k \elleq n$ we can find a vertex $v_{k} \in V_{k}$ with $z \in B(v_{k})$; we set $v_{m} = v$ and $v_{n} = w$. Then $v_{k} \sim v_{k+1}$ for each $m \elleq k < n$ by the construction of the graph $X$. It follows that $v$ is connected to $w$ by a vertical edge path passing through the vertices $v_{k}$.
\varepsilonnd{proof}
The next lemma estimates the distance in $Z$ between vertices in $X$ that are connected by a vertical edge path.
\begin{lem}\ellambdabel{geometric series}
Let $v, w \in V$. Suppose that $v$ is joined to $w$ by a vertical edge path and $h(v) \elleq h(w)$. Then
\[
d(v,w) \elleq \frac{2\tildeau \alpha^{-h(v)+1}}{\alpha-1}.
\]
\varepsilonnd{lem}
\begin{proof}
We first derive a sharper inequality in the case $h(w) = h(v)+1$. Set $h(v) = m$. Let $x \in B(v) \cap B(w)$. Then
\[
d(v,w) \elleq d(x,v) + d(x,w) < \tildeau \alpha^{-m} + \tildeau \alpha^{-m-1} < 2\tildeau \alpha^{-m}.
\]
Now let $h(v) = m$, $h(w) = n$. For each $m \elleq k \elleq n$ we let $v_{k} \in V_{k}$ be the vertex at this height in the vertical edge path joining $v$ to $w$. Then by the ``$h(w) = h(v) + 1$" case we have
\[
d(v,w) \elleq \sum_{k=m}^{n-1}d(v_{k},v_{k+1}) \elleq 2\tildeau \alpha^{-m}\sum_{k=0}^{n-m-1}\alpha^{-k} \elleq \frac{2\tildeau \alpha^{-m+1}}{\alpha-1},
\]
with the final inequality following by summing the geometric series in $\alpha^{-1}$.
\varepsilonnd{proof}
Following the hyperbolic filling construction in \cite{BS07}, we define a \varepsilonmph{cone point} $u \in V$ for a pair of vertices $\{v,w\} \subseteq V$ to be a vertex that can be joined to both $v$ and $w$ by vertical geodesics and that satisfies $h(u) \elleq \min\{h(v),h(w)\}$. A \varepsilonmph{branch point} for $\{v,w\}$ is defined to be a cone point of maximal height. A branch point for $\{v,w\}$ always exists as long as there is at least one cone point for $\{v,w\}$. When $v = w$ the vertex $v$ is trivially a branch point for the set $\{v\}$.
\begin{lem}\ellambdabel{cone adjacent}
Let $v,w \in V_{n}$ be distinct vertices with $v \sim w$. Then there is a branch point $u \in V_{n-1}$ for the set $\{v,w\}$.
\varepsilonnd{lem}
\begin{proof}
The assumptions imply that $B(v) \cap B(w) \neq \varepsilonmptyset$. Let $z \in B(v)\cap B(w)$ be a point in this intersection. Since $V_{n-1}$ is a maximal $\alpha^{-n+1}$-separated set in $Z$ we can find $u \in V_{n-1}$ such that $d(u,z) < \alpha^{-n+1}$. We compute
\[
d(v,u) \elleq d(v,z) + d(z,u) < \tildeau \alpha^{-n} + \alpha^{-n+1} < \tildeau \alpha^{-n+1},
\]
by inequality \varepsilonqref{tau requirement}, noting that the final inquality here is equivalent to
\[
\tildeau + \alpha < \tildeau \alpha,
\]
which is implied by \varepsilonqref{tau requirement}. It follows that $v \in B(u)$ and therefore $B(v) \cap B(u) \neq \varepsilonmptyset$. Thus $v$ is joined to $u$ by a vertical edge. Since the roles of $v$ and $w$ are symmetric, we conclude by the same calculation that $B(w) \cap B(u) \neq \varepsilonmptyset$, i.e., $w$ is also joined to $u$ by a vertical edge. Thus $u$ is a cone point for $\{v,w\}$. Since a cone point for a pair of distinct vertices on an adjacent level is trivially maximal, we conclude that $u$ is a branch point for $\{v,w\}$.
\varepsilonnd{proof}
We can now show that the graph $X$ is connected.
\begin{prop}\ellambdabel{connected filling}
For each $v,w \in V$ there is a branch point $u$ for the set $\{v,w\}$ that satisfies
\begin{equation}\ellambdabel{branch comparison}
\alpha^{-h(u)} \asymp_{C(\alpha,\tildeau)} d(v,w) + \alpha^{-\min\{h(v),h(w)\}}.
\varepsilonnd{equation}
Consequently the graph $X$ is connected.
\varepsilonnd{prop}
\begin{proof}
Let $v \in V_{m}$, $w \in V_{n}$ be given. We can assume without loss of generality that $m \elleq n$. If $v = w$ then $v$ is a branch point for the set $\{v\}$ and the comparison \varepsilonqref{branch comparison} holds trivially. We can thus assume for the rest of the proof that $v \neq w$. We let $k \in \mathbb Z$ be any integer satisfying $\alpha^{-k} > d(v,w)$ and $k\elleq m$; note that such an integer always exists since $\alpha^{-k} \rightarrow \infty$ as $k \rightarrow -\infty$. Let $p \in V_{k}$ be a vertex such that $d(v,p) < \alpha^{-k}$ and let $q \in V_{k}$ be a vertex such that $d(q,w) < \alpha^{-k}$. Then
\[
d(p,q) \elleq d(v,p) + d(v,w) + d(w,q) < 3\alpha^{-k} < \tildeau \alpha^{-k},
\]
by \varepsilonqref{tau requirement}. Thus $q \in B(p)$, so $B(p) \cap B(q) \neq \varepsilonmptyset$. We conclude that $p \sim q$. By Lemma \ref{cone adjacent} we can then find a branch point $x \in V_{k-1}$ for the set $\{p,q\}$. Since $B(p) \cap B(v) \neq \varepsilonmptyset$ and $B(q) \cap B(w) \neq \varepsilonmptyset$, Lemma \ref{height connection} shows that $p$ and $q$ are connected to $v$ and $w$ respectively by vertical edge paths, and the requirement $k \elleq m$ implies that $\max\{h(p),h(q)\} \elleq \min\{h(v),h(w)\}$. Since $p$ and $q$ are each connected to $x$ by a vertical edge, we conclude that $x$ is a cone point for the set $\{v,w\}$.
It follows that there is a branch point $u$ for the set $\{v,w\}$. Since $u$ is joined to $v$ and $w$ by vertical edge paths, the triangle inequality and Lemma \ref{geometric series} implies that
\[
d(v,w) \elleq 2\max\{d(v,u),d(w,u)\} \elleq C(\alpha,\tildeau)\alpha^{-h(u)}.
\]
Since $h(u) \elleq m$, we have $\alpha^{-m} \elleq \alpha^{-h(u)}$ and therefore
\[
d(v,w) + \alpha^{-m} \elleq C(\alpha,\tildeau)\alpha^{-h(u)} + \alpha^{-m} \elleq C(\alpha,\tildeau)\alpha^{-h(u)},
\]
which gives the lower bound in the comparison \varepsilonqref{branch comparison}.
For the upper bound in \varepsilonqref{branch comparison} we split into two cases. The first case is that in which $B(v) \cap B(w) \neq \varepsilonmptyset$. If $h(v) = h(w)$ then this implies that $v \sim w$ and Lemma \ref{cone adjacent} implies that $h(u) = h(v)-1$. The upper bound follows immediately from this, as we then have
\[
d(v,w) + \alpha^{-\min\{h(v),h(w)\}} \gammaeq \alpha^{-h(v)} = \alpha^{-1}\alpha^{-h(u)}.
\]
If $h(v) \neq h(w)$ then by Lemma \ref{height connection} $v$ can be joined to $w$ by a vertical edge path. In this case $v$ is a branch point for the set $\{v,w\}$ and the inequality
\[
d(v,w) +\alpha^{-h(v)} \gammaeq \alpha^{-h(u)},
\]
holds trivially for $u = v$.
The second case is that in which have $B(v) \cap B(w) = \varepsilonmptyset$. This implies in particular that we must have $w \notin B(v)$. Thus $d(v,w) \gammaeq \tildeau \alpha^{-m} > 0$. Let $k \in \mathbb Z$ be the maximal integer such that $k \elleq m$ and $\alpha^{-k} > d(v,w)$. Then either $k = m$ or $d(v,w) \gammaeq \alpha^{-k-1}$. Since $\alpha^{-k} > d(v,w)$ and $d(v,w) \gammaeq \tildeau \alpha^{-m}$, we conclude in both cases that $d(v,w) \asymp_{C(\alpha,\tildeau)} \alpha^{-k}$. Making this choice of $k$ in the construction of $x$ above, we can thus construct a cone point $x$ for the set $\{v,w\}$ with $h(x) = k-1$ and therefore
\[
\alpha^{-h(x)} \asymp_{C(\alpha,\tildeau)} d(v,w).
\]
Since the branch point $u$ satisfies $h(u) \gammaeq h(x)$ it follows that
\[
\alpha^{-h(u)} \elleq C(\alpha,\tildeau)d(v,w) \elleq C(\alpha,\tildeau) (d(v,w) + \alpha^{-m}).
\]
The upper bound in \varepsilonqref{branch comparison} follows.
Lastly, since we can connect $v$ to $w$ through the branch point $u$, it follows that $v$ and $w$ can be connected by an edge path in the graph $X$. Since $v$ and $w$ were arbitrary we conclude that $X$ is connected.
\varepsilonnd{proof}
Now that we've shown $X$ is connected, the metrics we put on its connected components give it the structure of a geodesic metric space in which all edges of $X$ have unit length. The height function then defines a $1$-Lipschitz function $h: X \rightarrow \mathbb R$. We formally define the Gromov product based at $h$ by, for $x,y \in X$,
\[
(x|y)_{h} = \frac{1}{2}(h(x)+h(y)-|xy|).
\]
Since $h$ is $1$-Lipschitz we have
\begin{equation}\ellambdabel{lip height}
(x|y)_{h} \elleq \min\{h(x),h(y)\}.
\varepsilonnd{equation}
Our next lemma gives a key relation of the Gromov product based at $h$ to branch points.
\begin{lem}\ellambdabel{branch estimate}
Let $v,w \in V$ and let $u$ be a branch point for $\{v,w\}$. Then
\[
h(u) \doteq_{c(\alpha,\tildeau)} (v|w)_{h},
\]
and therefore
\[
\alpha^{-(v|w)_{h}} \asymp_{C(\alpha,\tildeau)} d(v,w)+\alpha^{-\min\{h(v),h(w)\}}.
\]
\varepsilonnd{lem}
\begin{proof}
Since the claims of the lemma hold trivially when $v = w$ we can assume that $v \neq w$. Proposition \ref{connected filling} gives the existence of a branch point $u$ for $\{v,w\}$ satisfying \varepsilonqref{branch comparison}. The vertical edge path from $v$ to $u$ followed by the vertical edge path from $u$ to $w$ gives an edge path from $v$ to $w$, which shows that
\[
|vw| \elleq |vu| + |uw| = h(v)-h(u) + h(w)-h(u) = h(v)+h(w)-2h(u).
\]
Rearranging this we obtain
\[
h(u) \elleq \frac{1}{2}(h(v)+h(w)-|vw|) = (v|w)_{h}.
\]
To get a bound in the other direction, let $v = v_{0},v_{1},\dots,v_{k} = w$ be a sequence of vertices joined by edges that gives a geodesic $\gammaamma$ from $v$ to $w$. Then $|vw| = k$ and $k \gammaeq 1$ since $v \neq w$. For $1 \elleq i \elleq k$ we have $B(v_{i-1}) \cap B(v_{i}) \neq \varepsilonmptyset$ and therefore, using $|h(v_{i-1})-h(v_{i})| \elleq 1$,
\[
d(v_{i-1},v_{i}) < 2\tildeau \alpha^{-\min\{h(v_{i-1}),h(v_{i})\}} \elleq 2\tildeau \alpha^{-h(v_{i-1})+1}.
\]
We can run the same argument viewing $\gammaamma$ as a geodesic from $w$ to $v$ instead, setting $w_{i} = v_{k-i}$ for $0 \elleq i \elleq k$. We see from this that we also have
\[
d(w_{i-1},w_{i}) < 2\tildeau \alpha^{-h(w_{i-1})+1},
\]
for $1 \elleq i \elleq k$. For each $1 \elleq l \elleq k$ we thus obtain an estimate (using $h(v_{i-1}) \gammaeq h(v)-i+1$ and $h(w_{i-1}) \gammaeq h(w)- i+ 1$),
\begin{align*}
d(v,w) &\elleq
\sum_{i=1}^{k}d(v_{i-1},v_{i}) \\
&\elleq \sum_{i=1}^{l}d(v_{i-1},v_{i}) + \sum_{i=1}^{k-l+1}d(w_{i-1},w_{i}) \\
&< 2\tildeau \alpha^{-h(v)}\sum_{i=1}^{l} \alpha^{i} + 2\tildeau \alpha^{-h(w)}\sum_{i=1}^{k-l+1} \alpha^{i} \\
&\elleq \frac{2\tildeau \alpha}{\alpha-1}(\alpha^{-h(v)}(\alpha^{l}-1) + \alpha^{-h(w)}(\alpha^{k-l+1}-1)) \\
&\elleq \frac{2\tildeau \alpha}{\alpha-1}(\alpha^{l-h(v)}+ \alpha^{k-l+1-h(w)}).
\varepsilonnd{align*}
We set $l = \ellceil\frac{1}{2}(k-h(w)+h(v)) \rceil$ (the least integer greater than this quantity), observing that $1 \elleq l \elleq k$ since $|h(v)-h(w)| \elleq k$. This gives, after some simplification,
\[
d(v,w) \elleq C(\alpha,\tildeau)\alpha^{-\frac{1}{2}(h(v)+h(w)-k)} = C(\alpha,\tildeau) \alpha^{-(v|w)_{h}},
\]
recalling that $|vw| = k$. By Proposition \ref{connected filling} and inequality \varepsilonqref{lip height}, we then have
\[
\alpha^{-h(u)} \elleq C(\alpha,\tildeau) \alpha^{-(v|w)_{h}},
\]
which implies upon taking logarithms that
\[
h(u) \gammaeq (v|w)_{h}-c(\alpha,\tildeau).
\]
This gives the desired lower bound of the first approximate equality of the lemma. The second comparison inequality follows by using Proposition \ref{connected filling} again.
\varepsilonnd{proof}
We now prove an inequality similar to the $4\deltalta$-inequality \varepsilonqref{delta inequality} for our formal Gromov products based at $h$.
\begin{lem}\ellambdabel{delta inequality filling}
Let $u,v,w \in V$. Then
\[
(u|w)_{h} \gammaeq \min\{(u|v)_{h},(v|w)_{h}\} - c(\alpha,\tildeau).
\]
\varepsilonnd{lem}
\begin{proof}
Let $u,v,w \in V$ be vertices. By the triangle inequality in $Z$ we have
\[
d(u,w) + \alpha^{-\min\{h(u),h(w)\}} \elleq d(u,v) + \alpha^{-\min\{h(u),h(v)\}} + d(v,w) + \alpha^{-\min\{h(v),h(w)\}},
\]
which becomes, upon applying Lemma \ref{branch estimate},
\begin{align*}
\alpha^{-(u|w)_{h}} &\elleq C(\alpha,\tildeau) (\alpha^{-(u|v)_{h}} + \alpha^{-(v|w)_{h}}) \\
&\elleq C(\alpha,\tildeau)\alpha^{-\min\{(u|v)_{h},(v|w)_{h}\}}.
\varepsilonnd{align*}
Taking logarithms of each side gives the desired inequality.
\varepsilonnd{proof}
We can now show that $X$ is Gromov hyperbolic. For this we use some terminology from \cite[Chapter 2]{BS07}: a \varepsilonmph{$\deltalta$-triple} for $\deltalta \gammaeq 0$ is a triple $(a,b,c)$ of real numbers $a,b,c$ such that the two smallest numbers differ by at most $\deltalta$. Observe that $(a,b,c)$ is a $\deltalta$-triple if and only if the inequality
\begin{equation}\ellambdabel{basic delta}
c \gammaeq \min\{a,b\} - \deltalta,
\varepsilonnd{equation}
holds for all permutations of the roles of $a$, $b$, and $c$. We will also need the following standard claim \cite[Lemma 2.1.4]{BS07} which is called the \varepsilonmph{Tetrahedron lemma}.
\begin{lem}\ellambdabel{tetrahedron}
Let $d_{12}$, $ d_{13}$, $d_{14}$, $d_{23}$, $d_{24}$, $d_{34}$ be six numbers such that the four triples $(d_{23},d_{24},d_{34})$, $(d_{13},d_{14},d_{34})$, $(d_{12},d_{14},d_{24})$, and $(d_{12},d_{13},d_{23})$ are $\deltalta$-triples. Then
\[
(d_{12}+d_{34},d_{13}+d_{24},d_{14}+d_{23})
\]
is a $2\deltalta$-triple.
\varepsilonnd{lem}
\begin{prop}\ellambdabel{hyperbolicity filling}
The space $X$ is $\deltalta$-hyperbolic with $\deltalta = \deltalta(\alpha,\tildeau)$.
\varepsilonnd{prop}
\begin{proof}
We will use the \varepsilonmph{cross-difference triple} defined in \cite[Chapter 2.4]{BS07}. For a quadruple of points $Q=(x,y,z,u) \in X$ and a fixed basepoint $o \in X$ this triple is defined by
\[
A_{o}(Q)= ((x|y)_{o} + (z|u)_{o},(x|z)_{o}+(y|u)_{o},(x|u)_{o}+(y|z)_{o}).
\]
The triple $A_{o}(Q)$ has the same differences among its members as the triple
\[
A_{h}(Q) = ((x|y)_{h} + (z|u)_{h}, (x|z)_{h}+(y|u)_{h}, (x|u)_{h} + (y|z)_{h}),
\]
as a routine calculation shows for instance that
\[
(x|y)_{o} + (z|u)_{o} - (x|z)_{o}-(y|u)_{o} = (x|y)_{h} + (z|u)_{h} - (x|z)_{h}-(y|u)_{h} ,
\]
with both expressions being equal to
\[
\frac{1}{2}(-|xy|-|zu|+|xz|+|yu|).
\]
Similar calculations give equality for the other differences. Thus $A_{o}(Q)$ is a $\deltalta$-triple for a given $\deltalta \gammaeq 0$ if and only if $A_{h}(Q)$ is a $\deltalta$-triple.
Using Lemma \ref{delta inequality filling} we conclude that the six numbers $(x|y)_{h}$, $(z|u)_{h}$, $(x|z)_{h}$, $(y|u)_{h}$, $(x|u)_{h}$, $(y|z)_{h}$ together satisfy the hypotheses of Lemma \ref{tetrahedron} with parameter $\deltalta = \deltalta(\alpha,\tildeau)$. This implies that $A_{h}(Q)$ is a $2\deltalta$-triple and therefore that $A_{o}(Q)$ is a $2\deltalta$-triple. By \cite[Proposition 2.4.1]{BS07} this implies that inequality \varepsilonqref{delta inequality} holds for Gromov products based at $o$ in $X$ (with $2\deltalta$ replacing $4\deltalta$). By \cite[Chapitre 2, Proposition 21]{GdH90} this implies that geodesic triangles in $X$ are $8\deltalta$-thin, i.e., $X$ is $8\deltalta$-hyperbolic.
\varepsilonnd{proof}
We next show that any vertex in $V$ is part of a vertical geodesic line. We will in fact show something stronger. We let $\bar{Z}$ denote the completion of $Z$, and continue to write $d$ for the canonical extension of the metric on $Z$ to its completion. For $r > 0$ and a point $z \in \bar{Z}$ we will write $B'(z,r)$ for the open ball of radius $r$ centered at $z$ in the completion $\bar{Z}$.
\begin{lem}\ellambdabel{second height connection}
Let $z \in \bar{Z}$. Then there is a vertical geodesic $\gammaamma: \mathbb R \rightarrow X$ with $h(\gammaamma(t)) = t$ for $t \in \mathbb R$ such that, writing $\gammaamma(n) = (z_{n},n)$ for $n \in \mathbb Z$, we have $z \in B'(z_{n},\frac{\tildeau}{3}\alpha^{-n})$ for each $n \in \mathbb Z$. Furthermore if $v = (z,m)$ is a given vertex of $V$ then we can construct $\gammaamma$ such that $\gammaamma(m) = v$.
\varepsilonnd{lem}
\begin{proof}
Since $\frac{\tildeau}{3} > 1$ by \varepsilonqref{tau requirement} and since for each $n \in \mathbb Z$ the balls $B(y,\alpha^{-n})$ cover $Z$ for $y \in S_{n}$, it follows from the fact that $Z$ is dense in $\bar{Z}$ that the balls $B'(y,\frac{\tildeau}{3}\alpha^{-n})$ for $y \in S_{n}$ cover $\bar{Z}$. Thus, given $z \in \bar{Z}$, for each $n \in \mathbb Z$ we can find $z_{n} \in S_{n}$ such that $z \in B'(z_{n},\frac{\tildeau}{3}\alpha^{-n})$.
Let $v_{n} = (z_{n},n)$ be the associated vertex in $V$. We claim that for each $n \in \mathbb Z$ we have $B(v_{n}) \cap B(v_{n+1}) \neq \varepsilonmptyset$. Since $Z$ is dense in $\bar{Z}$ we can find $y \in Z$ such that $d(y,z) < \frac{\tildeau}{3}\alpha^{-n-1}$. Then
\[
d(y,z_{n+1}) \elleq d(y,z) + d(z,z_{n+1}) < \frac{\tildeau}{3}\alpha^{-n-1} + \frac{\tildeau}{3}\alpha^{-n-1} < \tildeau \alpha^{-n-1},
\]
which implies that $y \in B(v_{n+1})$. A similar calculation shows that $y \in B(v_{n})$ since $\alpha^{-n-1} < \alpha^{-n}$. Thus $B(v_{n}) \cap B(v_{n+1}) \neq \varepsilonmptyset$ and therefore $v_{n} \sim v_{n+1}$. We can then find a vertical geodesic $\gammaamma: \mathbb R \rightarrow X$ through the sequence of vertices $\{v_{n}\}_{n \in \mathbb Z}$, which can be parametrized such that $h(\gammaamma(t)) = t$ for $t \in \mathbb R$. Finally, if $v = (z,m)$ is a vertex of $V$ then we can choose $z_{m} = z$ in our construction since we trivially have $z \in B'(z,\frac{\tildeau}{3}\alpha^{-m})$.
\varepsilonnd{proof}
A \varepsilonmph{descending geodesic ray} $\gammaamma:[0,\infty) \rightarrow X$ is a vertical geodesic ray such that $h(\gammaamma(t))$ is strictly decreasing as a function of $t$. In this case we have $h(\gammaamma(t)) = h(\gammaamma(0)) - t$ for each $t \gammaeq 0$. Similarly an \varepsilonmph{ascending geodesic ray} $\gammaamma:[0,\infty) \rightarrow X$ is a vertical geodesic ray such that $h(\gammaamma(t))$ is strictly increasing as a function of $t$. In this case we instead have that $h(\gammaamma(t)) = h(\gammaamma(0)) + t$ for each $t \gammaeq 0$. A vertical geodesic $\gammaamma$ is \varepsilonmph{anchored} at a point $z \in \bar{Z}$ if for each vertex $(z_{m},m)$ belonging to $\gammaamma$ we have $z \in B'(z_{m},\frac{\tildeau}{3}\alpha^{-m})$; when the point $z$ does not need to be referenced we will just say that $\gammaamma$ is \varepsilonmph{anchored}. Lemma \ref{second height connection} gives the existence of ascending and descending geodesic rays in $X$ anchored at any point $z \in \bar{Z}$.
We will next show that all anchored descending vertical geodesic rays in $X$ define the same point in the Gromov boundary $\partial X$.
\begin{lem}\ellambdabel{bounded distance vertical}
Let $\gammaamma$, $\sigma: [0,\infty) \rightarrow X$ be two descending geodesic rays in $X$ starting at vertices $v = \gammaamma(0)$ and $w = \sigma(0)$ of $X$ respectively and anchored at $y,z \in \bar{Z}$ respectively. Let $k \in \mathbb Z$ be such that $k \elleq \min\{h(v),h(w)\}$ and $\frac{\tildeau}{3}\alpha^{-k} > d(y,z)$. Let $v_{k} \in \gammaamma \cap V_{k}$, $w_{k} \in \sigma \cap V_{k}$ be the vertices on these geodesics at the height $k$. Then $|v_{k}w_{k}| \elleq 1$.
\varepsilonnd{lem}
\begin{proof}
By the anchoring condition we have $d(v_{k},y) < \frac{\tildeau}{3}\alpha^{-k}$ and $d(w_{k},z) < \frac{\tildeau}{3}\alpha^{-k}$. Hence
\[
d(v_{k},w_{k}) \elleq d(v_{k},y) + d(y,z) + d(z,w_{k}) < \tildeau \alpha^{-k}.
\]
Thus $w_{k} \in B(v_{k})$ and therefore $B(v_{k}) \cap B(w_{k}) \neq \varepsilonmptyset$, which implies that either $v_{k} = w_{k}$ or $v_{k} \sim w_{k}$. In both cases we conclude that $|v_{k}w_{k}| \elleq 1$.
\varepsilonnd{proof}
The Busemann functions associated to anchored descending geodesic rays have a particularly simple form.
\begin{lem}\ellambdabel{height busemann}
Let $\gammaamma$ be an anchored descending geodesic ray in $X$ starting from a vertex $v \in V$. Then for all $x \in X$ we have
\begin{equation}\ellambdabel{height busemann equation}
b_{\gammaamma}(x) \doteq_{3} h(x)-h(\gammaamma(0)).
\varepsilonnd{equation}
\varepsilonnd{lem}
\begin{proof}
Since both $b_{\gammaamma}$ and $h$ are $1$-Lipschitz and the edges of $X$ have unit length, it suffices to prove the estimate \varepsilonqref{height busemann equation} on the vertices of $X$ with the constant $1$ instead of $3$. Let $z \in \bar{Z}$ be the anchoring point for $\gammaamma$. Let $w \in V$ be an arbitrary vertex and let $\sigma: [0,\infty) \rightarrow X$ be an anchored descending geodesic ray in $X$ starting at $w$ and anchored at the point $y \in Z$ associated to $w$, as constructed in Lemma \ref{second height connection}. The sequences of vertices $\{\gammaamma(n)\}_{n=0}^{\infty}$ on $\gammaamma$ satisfies $h(\gammaamma(n)) = h(\gammaamma(0)) - n$ since $\gammaamma$ is a descending geodesic ray, and the same holds for $\sigma$. We let $n$ be any integer large enough that $h(\gammaamma(n)) \elleq h(w)$ and $\frac{\tildeau}{3}\alpha^{-h(\gammaamma(n))} > d(y,z)$ and observe that if we define $k_{n} = h(w)-h(\gammaamma(n))$ then $h(\sigma(k_{n})) = h(\gammaamma(n))$. Then by Lemma \ref{bounded distance vertical} we have $|\gammaamma(n)\sigma(k_{n})| \elleq 1$. Since $\sigma(k_{n})$ is joined to $w$ by a vertical geodesic of length $k_{n}$, it follows immediately that
\[
h(w)-h(\gammaamma(n)) \elleq |\gammaamma(n)w| \elleq h(w)-h(\gammaamma(n))+1,
\]
and therefore, since $h(\gammaamma(n)) = h(\gammaamma(0)) -n$,
\[
|\gammaamma(n)w| \doteq_{1} h(w)+ n - h(\gammaamma(0)).
\]
This implies that
\[
|\gammaamma(n)w| - n \doteq_{1} h(w)-h(\gammaamma(0)).
\]
By letting $n \rightarrow \infty$ we conclude that
\[
b_{\gammaamma}(w) \doteq_{1} h(w) - h(\gammaamma(0)),
\]
which gives the desired result.
\varepsilonnd{proof}
In particular, for an anchored descending geodesic ray $\gammaamma$ with $h(\gammaamma(0)) = 0$, Lemma \ref{height busemann} shows that $b_{\gammaamma} \doteq_{3} h$. We fix such a descending geodesic ray $\gammaamma$ for the remainder of this section and write $b:=b_{\gammaamma}$ for the associated Busemann function. Let $\omega \in \partial X$ be the point corresponding to the equivalence class of $\gammaamma$ in the Gromov boundary of $X$; note that Lemma \ref{bounded distance vertical} shows that all anchored descending geodesic rays belong to the equivalence class $\omega$ defined by $\gammaamma$. Our final goal in this section is to show that the boundary $\partial_{\omega}X$ of $X$ relative to $\omega$ can be canonically identified with the completion $\bar{Z}$ of $Z$ in such a way that the extension of the metric $d$ to $\bar{Z}$ is a visual metric on $\partial_{\omega}X$ based at $\omega$ with parameter $\ellog \alpha$.
We remark that the rough equality $b \doteq_{3} h$ implies that $(x|y)_{b} \doteq_{3} (x|y)_{h}$ for all $x,y \in X$ as well, so that in particular the conclusions of Lemma \ref{connected filling} hold with $b$ replacing $h$ and $(v|w)_{b}$ replacing $(v|w)_{h}$ everywhere, at the cost of adding $6$ to the constant $c(\alpha,\tildeau)$ there and multiplying the constant $C(\alpha,\tildeau)$ by $\alpha^{6}$. We will use this observation without further comment below.
For each point $z \in \bar{Z}$ we fix an ascending geodesic ray $\gammaamma_{z}:[0,\infty) \rightarrow X$ anchored at $z$, as given by Lemma \ref{second height connection}. We define a map $\partialsi: \bar{Z} \rightarrow \partial_{\omega} X$ by setting $\partialsi(z) = [\gammaamma_{z}]$, i.e., $\partialsi(z)$ is the equivalence class in $\partial_{\omega}X$ defined by the geodesic ray $\gammaamma_{z}$. Implicit in this definition is the fact that we cannot have $[\gammaamma_{z}] = \omega$ for any $z \in \bar{Z}$, as it is easy to see from the fact that $h$ is $1$-Lipschitz that ascending geodesic rays cannot be at bounded distance from descending geodesic rays. We also note that if $\gammaamma$ is any other ascending geodesic ray anchored at $z \in \bar{Z}$ then we must have $[\gammaamma] = [\gammaamma_{z}]$: for $k \in \mathbb Z$ sufficiently large the unique vertices $v_{k} \in \gammaamma \cap V_{k}$ and $w_{k} \in \gammaamma_{z} \cap V_{k}$ must satisfy $|v_{k}w_{k}| \elleq 1$ since $z \in B(v_{k}) \cap B(w_{k})$, hence the geodesic rays $\gammaamma$ and $\gammaamma_{z}$ are at a bounded distance from one another. The map $\partialsi$ can thus equivalently be thought of as sending $z \in \bar{Z}$ to the equivalence class of all ascending geodesic rays anchored at $z$.
\begin{prop}\ellambdabel{identify boundary}
The map $\partialsi: \bar{Z} \rightarrow \partial_{\omega}X$ defines an identification of $\bar{Z}$ with $\partial_{\omega}X$. Under this identification the metric $d$ on $\bar{Z}$ defines a visual metric on $\partial_{\omega}X$ with parameter $\ellog \alpha$ and comparison constant depending only on $\alpha$ and $\tildeau$.
\varepsilonnd{prop}
\begin{proof}
Let $x,y \in Z$ be given and let $\gammaamma_{x}$ and $\gammaamma_{y}$ be ascending geodesic rays anchored at $x$ and $y$ respectively. For $n \in \mathbb Z$ sufficiently large we let $x_{n} \in \gammaamma_{x} \cap V_{n}$ and $y_{n} \in \gammaamma_{y} \cap V_{n}$ be the unique vertices on these rays at height $n$. By Lemma \ref{branch estimate} we have
\begin{equation}\ellambdabel{branch use}
\alpha^{-(x_{n}|y_{n})_{b}} \asymp_{C(\alpha,\tildeau)} d(x_{n},y_{n}) + \alpha^{-n}.
\varepsilonnd{equation}
Since $x \in B(x_{n})$ and $y \in B(y_{n})$ we have $d(x,x_{n}) < \tildeau \alpha^{-n}$ and $d(y,y_{n}) < \tildeau \alpha^{-n}$. Hence, by letting $n \rightarrow \infty$ in \varepsilonqref{branch use} and using Lemma \ref{busemann inequality}, we conclude that
\begin{equation}\ellambdabel{phi visual}
\alpha^{-(\partialsi(x)|\partialsi(y))_{b}} \asymp_{C(\alpha,\tildeau)} d(x,y).
\varepsilonnd{equation}
It follows immediately that $\partialsi: \bar{Z} \rightarrow \partial_{\omega}X$ is injective. To complete the proof of the proposition it suffices to show that $\partialsi$ is surjective, as the estimate \varepsilonqref{phi visual} then shows that the metric $d$ on $\bar{Z}$ defines a visual metric on $\partial_{\omega}X$ with parameter $\ellog \alpha$ and comparison constant depending only on $\alpha$ and $\tildeau$ when we use $\partialsi$ to identify $\bar{Z}$ with $\partial_{\omega}X$.
We recall from Proposition \ref{convergence Busemann} that $\partial_{\omega}X$ can be defined as equivalence classes of sequences $\{x_{n}\}$ in $X$ such that $(x_{m}|x_{n})_{b} \rightarrow \infty$ as $m,n \rightarrow \infty$, with two sequences $\{x_{n}\}$, $\{y_{n}\}$ being equivalent if $(x_{n}|y_{n})_{b} \rightarrow \infty$ as $n \rightarrow \infty$. Since $b$ is $1$-Lipschitz we can always choose these sequences to consist of vertices in $X$ by replacing $x_{n}$ with a nearest vertex $v_{n}$.
Thus let $\{v_{n}\}$ be a sequence of vertices defining a point $\xi$ of $\partial_{\omega}X$. Let $\{z_{n}\}$ be the associated sequence of points in $Z$. By Lemma \ref{branch estimate} we have
\[
\alpha^{-(v_{n}|v_{m})_{b}} \asymp_{C(\alpha,\tildeau)} d(z_{n},z_{m}) + \alpha^{-\min\{b(v_{n}),b(v_{m})\}}.
\]
Since $(v_{n}|v_{m})_{b} \rightarrow \infty$ and $b(v_{n}) \rightarrow \infty$, it follows immediately that $\{z_{n}\}$ is a Cauchy sequence in $Z$ and therefore converges to a point $z \in \bar{Z}$. We claim that $\partialsi(z) = \xi$.
Let $\gammaamma_{z}$ be an ascending geodesic ray anchored at $z$ and let $\{w_{n}\}$ be the sequence of vertices on $\gammaamma_{z}$ starting from its initial point. When considered as points of $Z$ this sequence of vertices must satisfy $d(w_{n},z) \rightarrow 0$ since $\gammaamma_{z}$ is ascending and anchored at $z$. This implies that $d(w_{n},z_{n}) \rightarrow 0$. Since by Lemma \ref{branch estimate} we have
\[
\alpha^{-(v_{n}|w_{n})_{b}} \asymp_{C(\alpha,\tildeau)} d(z_{n},w_{n}) + \alpha^{-\min\{b(v_{n}),b(w_{n})\}},
\]
we conclude that $(v_{n}|w_{n})_{b} \rightarrow \infty$ as $n \rightarrow \infty$. Hence $\{v_{n}\}$ and $\{w_{n}\}$ define the same point of $\partial_{\omega}X$, i.e., $\partialsi(z) = \xi$. We conclude that $\partialsi$ is surjective.
\varepsilonnd{proof}
\section{Uniformizing the hyperbolic filling}\ellambdabel{sec:uniform filling}
This final section is devoted to proving Theorem \ref{filling theorem}. We retain all hypotheses and notation from the previous section. In particular we let $(Z,d)$ be a metric space and let $X$ be a hyperbolic filling of $Z$ with parameters $\alpha > 1$ and $\tildeau > \min\{3,\alpha/(\alpha-1)\}$ as in the previous section. We let $h: X \rightarrow \mathbb R$ be the height function and set $\rho(x) = \alpha^{-h(x)}$. We write $X_{\rho}$ for the conformal deformation of $X$ with conformal factor $\rho$, $d_{\rho}$ for the metric on $X_{\rho}$, and $\ell_{\rho}$ for lengths of curves measured in the metric $d_{\rho}$. Since $h$ is $1$-Lipschitz the density $\rho$ satisfies the Harnack inequality \varepsilonqref{Harnack} with $\varepsilon = \ellog \alpha$.
In the notation of Remark \ref{flexibility}, Lemma \ref{height busemann} shows that we have $h \in \mathcal{B}_{3}(X)$. Clearly $X$ is geodesic and complete, and Proposition \ref{hyperbolicity filling} shows that $X$ is $\deltalta$-hyperbolic with $\deltalta = \deltalta(\alpha,\tildeau)$. Thus to prove Theorem \ref{filling theorem} it suffices to show that $X$ is $\frac{1}{2}$-roughly starlike from $\omega$ and that $\rho$ is a GH-density with constant $M = M(\alpha,\tildeau)$, as it then follows for a Busemann function $b \in \mathcal{B}(X)$ with $b \doteq_{3} h$ that $\rho_{\varepsilon,b}$ is a GH-density with constant $M = M(\alpha,\tildeau)$ as well, where $\varepsilon = \ellog \alpha$. The conclusions of Theorem \ref{filling theorem} can then be derived from the fact that the metrics on $X_{\varepsilon,b}$ and $X_{\rho}$ are $\alpha^{3}$-biLipschitz to one another by the identity map on $X$.
We first look at rough starlikeness from the distinguished point $\omega \in \partial X$ corresponding to the equivalence class of all anchored descending geodesic rays in $X$.
\begin{lem}\ellambdabel{starlike filling}
The hyperbolic filling $X$ is $\frac{1}{2}$-roughly starlike from $\omega$.
\varepsilonnd{lem}
\begin{proof}
Let $v \in V$ be a vertex of $X$ with associated point $z \in Z$. Let $\gammaamma:\mathbb R \rightarrow X$ be an ascending vertical geodesic line through $v$ that is anchored at $z$, as constructed in Lemma \ref{second height connection}, parametrized by arclength such that $\gammaamma(0) = v$. We put $\bar{\gammaamma}(t) = \gammaamma(-t)$ for $t \in \mathbb R$. Then $\bar{\gammaamma}|_{[0,\infty)}$ is an anchored descending geodesic ray and therefore belongs to the equivalence class $\omega$ by Lemma \ref{bounded distance vertical}. This shows that any vertex of $X$ lies on a geodesic line starting at $\omega$. Since any point in $X$ is within distance $\frac{1}{2}$ of some vertex, condition (1) of Definition \ref{def:rough star} follows.
For condition (2) we use the identification of $\partial_{\omega} X$ with $\bar{Z}$ from Proposition \ref{identify boundary}. Let $z \in \bar{Z}$ be given and let $\gammaamma: \mathbb R \rightarrow X$ be a vertical geodesic line anchored at $z$ and parametrized such that $h(\gammaamma(t)) = t$, as constructed in Lemma \ref{second height connection}. By the construction of the identification $\varphi$ in Proposition \ref{identify boundary} the geodesic ray $\gammaamma|_{[0,\infty)}$ then belongs to the equivalence class of $z$ when $z$ is considered as a point of $\partial_{\omega}X$. Putting $\bar{\gammaamma}(t) = \gammaamma(-t)$ as above, we also have that $\bar{\gammaamma}|_{[0,\infty)}$ is a descending geodesic ray anchored at $z$ and therefore belongs to the equivalence class of $\omega$ by Lemma \ref{bounded distance vertical}. Since $z \in \bar{Z}$ was arbitrary, condition (2) follows.
\varepsilonnd{proof}
We will now show that $\rho$ is a GH-density with constant $M = M(\alpha,\tildeau)$ depending only on $\alpha$ and $\tildeau$. We will do this by estimating the distance $d_{\rho}$ between points at sufficiently large scales in $X$ and then using Corollary \ref{criterion}.
\begin{lem}\ellambdabel{large distance}
Let $x,y \in X$ with $|xy| \gammaeq 1$. Then we have
\begin{equation}\ellambdabel{filling large}
d_{\rho}(x,y) \asymp_{C(\alpha,\tildeau)} \alpha^{-(x|y)_{h}}.
\varepsilonnd{equation}
Consequently $\rho$ is a GH-density with constant $M = M(\alpha,\tildeau)$.
\varepsilonnd{lem}
\begin{proof}
The bound on $d_{\rho}(x,y)$ from above follows from Lemma \ref{epsilon geodesic} applied with $\varepsilon = \ellog \alpha$, since there is a Busemann function $b \in \mathcal{B}(X)$ such that $b \doteq_{3} h$. Hence it suffices to establish the lower bound. Observe that for an edge $g$ of $X$, considered as a path between its endpoints $v$ and $w$ and assuming the orientation in which $h(v) \elleq h(w)$, when $h(v) = h(w) = k$ we have
\begin{equation}\ellambdabel{no telescope}
\ell_{\rho}(g) = \alpha^{-k}.
\varepsilonnd{equation}
On the other hand, since $B(v) \cap B(w) \neq \varepsilonmptyset$ we have $d(v,w) < 2\tildeau \alpha^{-k}$. It follows that
\[
\ell_{\rho}(g) > C(\alpha,\tildeau)^{-1}d(v,w)
\]
Similarly, when $h(v) = k$ and $h(w) = k+1$ we have
\begin{equation}\ellambdabel{telescope}
\ell_{\rho}(g) = \frac{1}{\ellog \alpha}(\alpha^{-k}-\alpha^{-k-1}) = \frac{1-\alpha^{-1}}{\ellog \alpha}\alpha^{- k},
\varepsilonnd{equation}
while $B(v) \cap B(w) \neq \varepsilonmptyset$ implies again that $d(v,w) < 2\tildeau \alpha^{-k}$. Thus in this case we also have
\[
\ell_{\rho}(g) > C(\alpha,\tildeau)^{-1}d(v,w).
\]
Now let $\gammaamma$ be a rectifiable curve joining $x$ to $y$. Let $v$ be the first vertex on $\gammaamma$ met traveling from $x$ to $y$ and let $w$ be the first vertex on $\gammaamma$ met traveling from $y$ to $x$. We first suppose that $v \neq w$. We then let $\sigma$ be the subcurve of $\gammaamma$ from $v$ to $w$ starting from this first occurrence of $v$ and ending at this last occurrence of $w$. Let $\{v_{i}\}_{i=0}^{l}$ be the sequence of vertices encountered along the path $\sigma$, noting that by assumption we have $l \gammaeq 1$. Then from our calculations above we have
\begin{equation}\ellambdabel{one bound}
\ell_{\rho}(\sigma) \gammaeq C(\alpha,\tildeau)^{-1}\sum_{i=0}^{l-1}d(v_{i},v_{i+1}) \gammaeq C(\alpha,\tildeau)^{-1}d(v,w).
\varepsilonnd{equation}
On the other hand, since $v \neq w$ the curve $\sigma$ must contain at least one full edge of $X$ with one vertex being $v$ and at least one full edge with one vertex being $w$ (these may be the same edge). Then it follows from \varepsilonqref{no telescope} and \varepsilonqref{telescope} applied to those edges that
\begin{equation}\ellambdabel{two bound}
\ell_{\rho}(\sigma) \gammaeq C(\alpha,\tildeau)^{-1}\alpha^{-\min\{h(v),h(w)\}}.
\varepsilonnd{equation}
By combining \varepsilonqref{one bound} and \varepsilonqref{two bound} and then using Lemma \ref{branch estimate}, we conclude that
\[
\ell_{\rho}(\sigma) \gammaeq C(\alpha,\tildeau)^{-1}\alpha^{-(v|w)_{h}}.
\]
Since $\sigma$ is a subcurve of $\gammaamma$ it follows that this inequality holds for $\gammaamma$ as well.
Now suppose that $v = w$. Then, since $|xy| \gammaeq 1$, either the initial segment of $\gammaamma$ from $x$ to $v$ or the final segment of $\gammaamma$ from $w$ to $y$ has length at least $\frac{1}{2}$ in $X$. By reversing the roles of $x$ and $y$ if necessary we can assume that the initial segment $\varepsilonta$ of $\gammaamma$ from $x$ to $v$ has length at least $\frac{1}{2}$ in $X$. Since $\varepsilonta$ is contained entirely in a single edge of $X$ that has $v$ as a vertex, it follows that
\[
\ell_{\rho}(\varepsilonta) \gammaeq \alpha^{-h(v)-1}\ell(\varepsilonta) \gammaeq \frac{1}{2}\alpha^{-h(v)-1},
\]
with $\ell(\varepsilonta)$ denoting the length of $\varepsilonta$ in $X$. Hence
\[
\ell_{\rho}(\gammaamma) \gammaeq \ell_{\rho}(\varepsilonta) \gammaeq C(\alpha)^{-1}\alpha^{-(v|w)_{h}},
\]
using that $(v|w)_{h} = h(v)$ since $v = w$. This gives a similar lower bound on $\ell_{\rho}(\gammaamma)$ in this case as well. Minimizing over all rectifiable paths $\gammaamma$ from $x$ to $y$ then gives in both cases that
\[
d_{\rho}(x,y) \gammaeq C(\alpha,\tildeau)^{-1}\alpha^{-(v|w)_{h}} \gammaeq C(\alpha,\tildeau)^{-1}\alpha^{-(x|y)_{h}},
\]
with the second inequality following from the fact that $h$ is $1$-Lipschitz and $|xv| \elleq 1$, $|yw| \elleq 1$.
To conclude that $\rho$ is a GH-density we let $b$ be a Busemann function on $X$ such that $b \doteq_{3} h$ as in Lemma \ref{height busemann}. We let $d_{\varepsilon,b}$ be the distance obtained on $X$ through conformal deformation with the conformal factor
\[
\rho_{\varepsilon,b}(x) = e^{-\varepsilon b(x)} = \alpha^{-b(x)},
\]
where $\varepsilon = \ellog \alpha$. Then $d_{\varepsilon,b}$ is $\alpha^{3}$-biLipschitz to $d_{\rho}$ by the identity map on $X$. Consequently the comparison \ref{filling large} holds with $d_{\varepsilon,b}$ replacing $d_{\rho}$. We can then apply Corollary \ref{criterion} to conclude that $\rho_{\varepsilon,b}$ is a GH-density with constant $M = M(\alpha,\tildeau)$, noting that $X$ is $\deltalta$-hyperbolic with $\deltalta = \deltalta(\alpha,\tildeau)$ and $\frac{1}{2}$-roughly starlike from $\omega$. The GH-inequality \varepsilonqref{first GH} for $\rho$ then follows immediately from the $\alpha^{3}$-biLipschitz comparison of $d_{\varepsilon,b}$ to $d_{\rho}$.
\varepsilonnd{proof}
This completes the proof of Theorem \ref{filling theorem} aside from the final assertion regarding the identification of $\partial X_{\rho}$ with $\bar{Z}$ given by the combination of Lemma \ref{boundary identification} and Proposition \ref{identify boundary} is biLipschitz. This is shown below.
\begin{prop}\ellambdabel{bilip boundary}
The identification $\partial X_{\rho} \,|\,ng \bar{Z}$ is biLipschitz with biLipschitz constant $L = L(\alpha,\tildeau)$ depending only on $\alpha$ and $\tildeau$.
\varepsilonnd{prop}
\begin{proof}
We consider $\partial_{\omega} X$ as equipped with the visual metric with parameter $\ellog \alpha$ defined by Proposition \ref{identify boundary}, which coincides with the extension of the metric $d$ on $Z$ to the completion $\bar{Z}$ under the identification $\partialsi: \bar{Z} \rightarrow \partial_{\omega}X$ of that proposition. By Theorem \ref{identification theorem} applied with this visual metric and $\varepsilon = \ellog \alpha$, the identification $\varphi:\partial_{\omega}X \rightarrow \partial X_{\rho}$ is biLipschitz. Hence the induced identification $\bar{Z} \,|\,ng \partial X_{\varepsilon}$ given by $\varphi \circ \partialsi$ is also biLipschitz. Furthermore all of the parameters involved in the biLipschitz constant can be taken to depend only on $\alpha$ and $\tildeau$ by the results of this section.
\varepsilonnd{proof}
\begin{rem}\ellambdabel{bounded filling}
For $k \in \mathbb Z$ there is a canonical correspondence between hyperbolic fillings with fixed parameters $\alpha,\tildeau > 1$ of the metric spaces $(Z,d)$ and $(Z,\alpha^{-k}d)$ given by considering $\alpha^{-n}$-separated sets in $(Z,d)$ as $\alpha^{-n-k}$-separated sets in $(Z,\alpha^{-k}d)$. Thus when $Z$ is bounded there is no harm in assuming that $\mathrm{diam}\, Z < 1$ by multiplying the metric by $\alpha^{-k}$ for $k$ sufficiently large. The hyperbolic filling $X$ can then be written as $X = X_{\gammaeq 0} \cup X_{\elleq 0}$, where $X_{\gammaeq 0} = h^{-1}([0,\infty))$ is the set of all points of nonnegative height and $X_{\elleq 0} = h^{-1}((-\infty,0])$ is the set of all points of nonpositive height. The condition $\mathrm{diam}\, Z < 1$ implies that the vertex sets $V_{n} = \{v_{n}\}$ for $n \elleq 0$ consist only of a single point, and in particular $X_{\elleq 0}$ is simply a descending geodesic ray starting from $v_{0}$. The space $X$ is isometric to the ray augmentation of $X_{\gammaeq 0}$ based at $v_{0}$, in the language of Definition \ref{ray augment}.
The graph $X_{\gammaeq 0}$ is essentially the hyperbolic filling of $Z$ constructed in \cite{BBS21}, with the exceptions that they have a stricter condition for the placement of vertical edges and that they require an additional nesting condition $S_{m} \subset S_{n}$ for $m \elleq n$. They uniformize this filling for \varepsilonmph{all} $\tildeau > 1$ using the density $\rho_{\varepsilon,v_{0}}(x) = e^{-\varepsilon |xv_{0}|}$ for $0 < \varepsilon \elleq \ellog \alpha$, for which it is easy to see that $\rho_{\varepsilon,v_{0}} = \rho_{\varepsilon}|_{X \gammaeq 0}$, where $\rho_{\varepsilon}(x) = e^{-\varepsilon h(x)}$ and $h: X \rightarrow \mathbb R$ is the height function. When $\tildeau$ satisfies \varepsilonqref{tau requirement} we can use Theorem \ref{filling theorem} to deduce their results from ours, up to some minor differences in the definition of the hyperbolic filling. When $\tildeau$ is close to $1$ it is possible to realize trees as hyperbolic fillings \cite[Theorem 7.1]{BBS21}, whereas when $\tildeau$ satisfies \varepsilonqref{tau requirement} the hyperbolic filling is only a tree if $Z$ consists of a single point (by Lemma \ref{cone adjacent}).
\varepsilonnd{rem}
\varepsilonnd{document} |
\begin{document}
\title{fseries Jucys-Murphy elements, orthogonal matrix integrals, and
Jack measures}
\begin{abstract}
We study symmetric polynomials whose variables are
odd-numbered Jucys-Murphy elements.
They define elements of the Hecke algebra associated to
the Gelfand pair of the symmetric group with the hyperoctahedral group.
We evaluate their expansions in zonal spherical functions and
in double coset sums.
These evaluations are related to integrals of polynomial functions
over orthogonal groups.
Furthermore, we give an extension of them, based on Jack polynomials.
\noindent
\emph{2000 Mathematics Subject Classification:}
05E15,
20C08,
05E05,
20C35.
\end{abstract}
\section{Introduction}
Let $S_n$ be the symmetric group.
Jucys-Murphy elements are formal sums in the group algebra $\mathbb{C}[S_{n}]$ defined by
$$
J_1=0, \qquad J_k=\sum_{i=1}^{k-1} (i \ k), \qquad k=2,3,\dots,n,
$$
where $(i \ k)$ is the transposition between $i$ and $k$.
Every two of them commute with each other.
Jucys \cite{Jucys} studied symmetric polynomials in variables $J_1,J_2,\dots,J_n$
and proved that they are central elements in $\mathbb{C}[S_n]$.
More precisely, given a symmetric function $F$, we have
$F(J_1,J_2,\dots,J_n) \in \mcal{Z}_n$, where $\mcal{Z}_n$ is the center of
the group algebra $\mathbb{C}[S_n]$.
For a given element $F(J_1,J_2,\dots,J_n) \in \mcal{Z}_n$,
it is a natural to ask for its character expansion and class expansion.
Let $\chi^\lambda$ be the irreducible character of $S_n$ associated to
Young diagram $\lambda$
and $f^\lambda$ the dimension of its corresponding representation.
Jucys \cite{Jucys} obtained the character expansion
$$
F(J_1,J_2, \dots,J_n)= \sum_{\lambda: |\lambda|=n} F(A_\lambda) \frac{f^\lambda}{n!} \chi^\lambda,
$$
where $|\lambda|$ is the number of boxes in the Young diagram $\lambda$
and $A_\lambda$ is the alphabet of contents $j-i$ of boxes $(i,j)$ in $\lambda$.
The computation for the class expansion of $F(J_1,J_2,\dots,J_n)$ is a more difficult problem.
Let $\mf{c}_\mu(n)$ be the sum of all permutations in $S_n$ of reduced cycle-type $\mu$.
For example, $\mf{c}_{(0)}(n)$ is the identity permutation and $\mf{c}_{(1)}(n)$
is the sum of all transpositions.
Then $\{\mf{c}_\mu(n) \ | \ |\mu|+\ell(\mu) \le n\}$ is a basis of $\mcal{Z}_n$,
where $\ell(\mu)$ is the number of rows of the diagram $\mu$.
The question is to evaluate coefficients $\mathcal{A}_\mu(F,n)$ in
$$
F(J_1,J_2,\dots,J_n) = \sum_{\mu: |\mu|+\ell(\mu) \le n} \mathcal{A}_\mu(F,n) \mf{c}_\mu(n).
$$
The $\mathcal{A}_\mu(F,n)$ are zero unless $\deg F \ge |\mu|$ and
polynomials in $n$.
If $\deg F=|\mu|$, then $\mathcal{A}_\mu(F,n)$ is independent of $n$.
Lascoux and Thibon \cite{LT} studied the coefficients for power-sum symmetric functions $F=p_k$,
and Fujii et al. \cite{FKMO} expressed $\mathcal{A}_{(0)}(p_k,n)$ as an explicit polynomial in
binomail coefficients $\binom{n}{m}$.
Matsumoto and Novak \cite{MN} gave a combinatorial explicit expression of
$\mathcal{A}_\mu(m_\lambda,n)$ with $|\lambda|=|\mu|$,
where $m_\lambda$ is the monomial symmetric function.
The coefficients $\mathcal{A}_\mu(h_k,n)$ for complete symmetric functions $h_k$ are more interesting.
They appear in expansions of unitary matrix integrals.
Let $U(N)$ be the group of $N \times N$ unitary matrices $g=(g_{ij})_{1 \le i,j \le N}$,
equipped with its normalized Haar measure $d g$.
Consider integrals of the form
$$
\int_{g \in U(N)} g_{i_1 j_1} g_{i_2 j_2} \cdots g_{i_n j_n}
\overline{g_{i_1' j_1'} g_{i_2' j_2'} \cdots g_{i_n' j_n'}} dg
$$
where $i_k, i_k',j_k,j_k'$ are in $\{1,2,\dots, N\}$
and $N \ge n$.
The Weingarten calculus for unitary groups
developed in \cite{W,C,CS} states that
those integrals are given by a sum of Weingarten functions
$$
\mr{Wg}^{U(N)}_n(\sigma)= \int_{g \in U(N)} \prod_{k=1}^n g_{kk} \overline{g_{k \sigma(k)}} d g,
\qquad \sigma \in S_{n}.
$$
In \cite{Novak} (see also \cite{MN}),
a remarkable connection between $\mr{Wg}^{U(N)}_n$ and Jucys-Murphy elements is
discovered.
Specifically, the Weingarten function is given as a generating function of $h_k(J_1,\dots,J_n)$:
$$
\sum_{\sigma \in S_n} \mr{Wg}^{U(N)}_n(\sigma) \sigma = \sum_{k=0}^\infty (-1)^k N^{-n-k} h_k(J_1,J_2,\dots,J_n),
$$
or equivalently
$$
\mr{Wg}^{U(N)}_n(\sigma) = \sum_{k=0}^\infty (-1)^k N^{-n-k} \mathcal{A}_\mu(h_k,n),
$$
where $\mu$ is the reduced cycle-type of $\sigma$.
Thus unitary matrix integrals are evaluated by observing symmetric functions in Jucys-Murphy elements.
The main purpose of the present paper is to study their analogues for orthogonal groups.
In the orthogonal group case, the elements
$F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$ are needed instead of $F(J_1,J_2,\dots,J_n)$.
Here $P_n=\sum_{\zeta \in H_n} \zeta$ is an element of $\mathbb{C}[S_{2n}]$
and $H_n$ is the hyperoctahedral group realized in $S_{2n}$.
We will prove first that $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$ belongs to
the Hecke algebra $\mcal{H}_n$ associated with the Gelfand pair $(S_{2n},H_n)$.
The Hecke algebra $\mcal{H}_n$ has two kinds of natural basis
$\{\omega^\lambda \ | \ |\lambda|=n\}$ and $\{\psi_\mu(n) \ | \ |\mu|+\ell(\mu) \le n \}$,
where the $\omega^\lambda$ are zonal spherical functions and
the $\psi_\mu(n)$ are sums over double cosets of the form $H_n \sigma H_n$.
As in the unitary group case,
it is natural to ask for expansions of $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
in zonal spherical functions $\omega^\lambda$ or in double coset sums $\psi_\mu(n)$.
We will prove that the expansion in $\omega^\lambda$ is given by
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n=
\sum_{\lambda: |\lambda|=n} F(A_\lambda') \frac{f^{2\lambda}}{(2n-1)!} \omega^\lambda,
$$
where $A_\lambda'$ is the alphabet of modified contents $2j-i-1$.
Our main purpose is to obtain some properties for coefficients $\mathcal{A}'_\mu(F,n)$ defined via
$$
F(J_1,J_3,\dots,J_{2n-1} ) \cdot P_n =
\sum_{\mu: |\mu|+\ell(\mu) \le n} \mathcal{A}_\mu'(F,n) \mf{\psi}_\mu(n).
$$
In general, the $\mathcal{A}_\mu'(F,n)$ are different from
$\mathcal{A}_\mu(F,n)$.
For example, $\mathcal{A}_{(1)}(h_3,n)= \tfrac{1}{2}n^2+\tfrac{3}{2}n-4$ but
$\mathcal{A}'_{(1)}(h_3,n)=n^2+3n-7$.
However, by observing deep combinatorics of perfect matchings,
we will prove that,
if $\deg F=|\mu|$, they coincide as $\mathcal{A}_\mu(F,n)=\mathcal{A}_\mu'(F,n)$,
and are independent of $n$.
Like in the unitary group case,
coefficients $\mathcal{A}_\mu'(h_k,n)$ are involved in orthogonal matrix integrals.
Let $O(N)$ be the orthogonal group of degree $N$ and
$d g$ its normalized Haar measure.
Then, for example, we obtain
$$
\int_{g \in O(N)} g_{11}^2 g_{22}^2 \cdots g_{nn}^2 d g =
\sum_{k=0}^\infty (-1)^{k} N^{-n-k} \mathcal{A}_{(0)}'(h_k,n).
$$
Via the Weingarten calculus for orthogonal groups developed in \cite{CS,CM,ZJ},
we establish the connection between orthogonal matrix integrals and Jucys-Murphy elements.
Furthermore, we introduce an $\alpha$-extension of $\mathcal{A}_\mu(F,n)$ and $\mathcal{A}'_\mu(F,n)$.
Let $\alpha$ be a positive real number.
We define the value $\mathcal{A}_\mu^{(\alpha)}(F,n)$ as an average with respect to
the Jack measure.
The Jack measure is a probability measure on Young diagrams
and is a deformation of the Plancherel measure for symmetric groups.
Its definition is based on Jack polynomial theory and
the connections between them and random matrix theory are much studied, see \cite{Mat, BO} and their references.
From symmetric function theory, we can see $\mathcal{A}_\mu(F,n)=\mathcal{A}^{(1)}_\mu(F,n)$
and $\mathcal{A}_\mu'(F,n)= \mathcal{A}^{(2)}_\mu(F,n)$ for any symmetric function $F$ and partition $\mu$.
Also $\mathcal{A}_\mu^{(1/2)}(F,n)$ are important and related to a twisted Gelfand pair.
We evaluate $\mathcal{A}_\mu^{(\alpha)}(e_k,n)$ for elementary symmetric functions $e_k$.
Also, by applying shifted symmetric function theory developed in
\cite{KOO,LassalleSomeIdentities, LassalleCumulant, Olshanski},
we prove that
the $\mathcal{A}^{(\alpha)}_\mu(F,n)$ are polynomials in $n$.
We could not obtain any strong results for $\mathcal{A}_\mu^{(\alpha)}(F,n)$.
Our appoarch is experimental but the author believes that it is fascinating and
applicable in futurer research.
The present paper is constructed as follows:
In Section 2 we review necessary notations and fundamental properties.
In Section 3, we evaluate $e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$ explicitly and
prove that $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$ is an element of the Hecke algebra $\mcal{H}_n$.
In Section 4, we give the expansion of $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
in zonal spherical functions.
In Section 5 an 6, we give some properties of $\mathcal{A}_\mu'(F,n)$.
Specifically, we prove that $\mathcal{A}_\mu'(F,n)$ coincides with
$\mathcal{A}_\mu(F,n)$ if $|\mu|= \deg F$.
As we mentioned, such an equality does not hold for $|\mu|< \deg F$.
In Section 7, we see the connection to orthogonal matrix integrals.
In Section 8, we study Jack's $\alpha$-deformations $\mathcal{A}_\mu^{(\alpha)}(F,n)$.
In the final section, Section 9,
we give some examples and suggest four conjectures.
\begin{remark}
Since a primary version of this paper was released,
all of our four conjectures given in Subsection \ref{subsec:Open}
have been actively studied by some other reseachers.
We would be able to see their proofs very soon.
\end{remark}
\section{Preparations} \label{Sec:Preparations}
We use the notations of Macdonald.
See Chapter I and VII in his book \cite{Mac}.
\subsection{Partitions and contents} \label{subsec:partitions}
A {\it partition} $\lambda=(\lambda_1,\lambda_2,\dots)$ is a weakly decreasing sequence of
nonnegative integers such that
its {\it length} $\ell(\lambda):=|\{ i \ge 1 \ | \ \lambda_i >0\}|$ is finite.
We write the {\it size} of $\lambda$ by $|\lambda|:= \sum_{i \ge 1} \lambda_i$.
If $|\lambda|=n$, we say $\lambda$ to be a partition of $n$ and write $\lambda \vdash n$.
We often identify $\lambda$ with its {\it Young diagram}
$Y(\lambda):=\{\square =(i,j) \in \mathbb{Z}^2 \ | \ 1 \le i \le \ell(\lambda), \ 1 \le j \le \lambda_i \}$.
If $\square =(i,j) \in Y(\lambda)$, we say that $\square$ is a {\it box} of $\lambda$ and write
$\square \in \lambda$ shortly.
The {\it content} of $\square=(i,j) \in \lambda$ is defined by $c(\square):=j-i$.
Also we use its analogy $c'(\square):=2j-i-1$.
Let $A_\lambda$ and $A_\lambda'$ be the alphabet with $|\lambda|$ elements given by
$$
A_\lambda=\{c(\square) \ | \ \square \in \lambda\}, \qquad
A_\lambda'=\{c'(\square) \ | \ \square \in \lambda\}.
$$
For example, $A_{(2,2,1)}=\{1,0,0,-1,-2\}$ and $A_{(2,2,1)}'=\{2,1,0,-1,-2\}$.
For each $i \ge 1$, we write the multiplicity of $i$ in $\lambda$ by
$m_i(\lambda)=|\{j \ge 1 \ | \ \lambda_j=i\}|$.
We sometimes write $\lambda$ as
$(\dots,3^{m_3(\lambda)}, 2^{m_2(\lambda)},1^{m_1(\lambda)})$.
For example, $\lambda=(2,1,1,1)= (2,1^3)$.
Define
\begin{equation} \label{eq:zlambda}
z_\lambda= \prod_{i \ge 1} i^{m_i(\lambda)} m_i(\lambda)!.
\end{equation}
Let $\lambda,\mu$ be partitions.
We define $\lambda+\mu$ to be the sequence of $\lambda_i+\mu_i$:
$(\lambda+\mu)_i=\lambda_i+\mu_i$.
Also we define $\lambda \cup \mu$ to be the partition whose parts are those of $\lambda$
and $\mu$, arranged in decreasing order.
In general, given partitions $\lambda^{(1)},\lambda^{(2)},\dots,\lambda^{(k)}$,
we define $\lambda^{(1)} \cup \lambda^{(2)} \cup \dots \cup \lambda^{(k)}$ in the same way.
For a partition $\lambda$ with $\ell(\lambda)=l$,
we define its {\it reduction} $\tilde{\lambda}$ by
$\tilde{\lambda}=(\lambda_1-1, \lambda_2-1,\dots,\lambda_{l}-1)$.
For each $n \ge 1$, the map $\lambda \mapsto \tilde{\lambda}$ gives a bijection
from the set
$\{\lambda \ | \ |\lambda|= n\}$ to
$\{ \mu \ | \ |\mu|+\ell(\mu) \le n \}$.
Indeed, its inverse map is given by $\mu
\mapsto \mu+(1^{n-|\mu|})=:\lambda$.
Then $|\lambda|-\ell(\lambda)=|\mu|$.
\subsection{Symmetric functions}
\label{subsec:SymmetricFunctions}
Let $x=(x_1,x_2,\dots)$ be an infinite sequence of indeterminates,
and $\mbb{S}$ the algebra of symmetric functions with complex coefficients in variables $x$.
Given a partition $\lambda$, the \emph{monomial symmetric function} $m_\lambda$
is defined by
$$
m_\lambda(x)= \sum_{(\alpha_1,\alpha_2,\dots)} x_1^{\alpha_1}
x_2^{\alpha_2}\cdots ,
$$
summed over all distinct permutations $(\alpha_1,\alpha_2,\dots)$ of
$(\lambda_1,\lambda_2,\dots)$.
Denote by $e_k, p_k$, and $h_k$ the
\emph{elementary}, \emph{power-sum}, and \emph{complete symmetric functions},
respectively.
Namely,
\begin{align*}
e_k(x)=& m_{(1^k)}(x)= \sum_{i_1 < i_2 < \cdots <i_k} x_{i_1} x_{i_2} \cdots x_{i_k}, \\
p_k(x)=&m_{(k)}(x)= x_1^k+x_2^k+\cdots, \\
h_k(x)=& \sum_{\lambda \vdash k} m_\lambda(x)= \sum_{ i_1 \le i_2 \le \cdots \le i_k}x_{i_1} x_{i_2} \cdots x_{i_k}.
\end{align*}
Also we put
$e_{\lambda}=\prod_{i=1}^{\ell(\lambda)} e_{\lambda_i}$,
and similarly for $p_\lambda$ and $h_\lambda$.
For convenience, we set $m_{(0)}=e_0=h_0=1$.
For finite variables $(x_1,x_2,\dots,x_n)$,
the monomial symmetric function (or polynomial) $m_\lambda(x_1,x_2,\dots)$ is zero
unless $\ell(\lambda) \le n$.
The degree of $m_\lambda$ is naturally defined to be $\deg m_\lambda= |\lambda|$.
The fundamental theorem for symmetric functions says that
any symmetric function $F$ is given by a polynomial in
$e_1,e_2,\dots$ and that the $e_k$ are algebraically independent.
We can replace $e_k$ by $p_k$ in this statement.
\subsection{Hyperoctahedral groups} \label{subsec:Hyperoctahedral}
We recall a Gelfand pair $(S_{2n},H_n)$. The detail is seen in \cite[VII.2]{Mac}.
Let $S_{n}$ be the symmetric group on $\{1,2,\dots, n\}$.
Let $\mathbb{C}[S_{n}]$ denote the algebra of all complex-valued functions $f$
on $S_n$ under convolution
$(f_1 \cdot f_2)(\sigma)= \sum_{\tau \in S_{n}} f_1(\sigma \tau^{-1}) f_2(\tau)$.
This is identified with the algebra of formal $\mathbb{C}$-linear sums of
permutations with multiplication
$(\sum_{\sigma} f_1(\sigma) \sigma)(\sum_{\tau} f_2(\tau) \tau) =
\sum_{\pi} \left( \sum_{\sigma} f_1(\sigma) f_2(\sigma^{-1} \pi) \right) \pi$.
A permutation $\sigma$ in $S_n$ is regarded as a permutation in $S_{n+1}$ fixing
the letter $n+1$. Thus $\mathbb{C}[S_{n}] \subset \mathbb{C}[S_{n+1}]$.
Let $H_n$ be the \emph{hyperoctahedral group}, which
is a subgroup of $S_{2n}$ generated by
transpositions $(2i-1 \ 2i)$, $(1 \le i \le n)$, and by
double transpositions $(2i-1 \ 2j-1) (2i \ 2j)$, $(1 \le i<j \le n)$.
Equivalently, $H_n$ is the centralizer of $(1 \ 2)(3 \ 4) \cdots (2n-1 \ 2n)$ in $S_{2n}$.
Then the pair $(S_{2n},H_n)$ is a Gelfand pair.
Let $P_n$ be the sum of all elements of $H_n$ in $\mathbb{C}[S_{2n}]$:
$$
P_n=\sum_{\zeta \in H_n}\zeta.
$$
Consider the double cosets $H_n \sigma H_n$ in $S_{2n}$.
These cosets are indexed by partitions of $n$, that is,
\begin{equation} \label{eq:bothsidedecomposition}
S_{2n} = \bigsqcup_{\rho \vdash n} H_{\rho},
\end{equation}
where each $H_\rho$ is a double coset.
The permutation $\sigma \in S_{2n}$ is said to be of {\it coset-type} $\rho$
and written as $\Xi_n(\sigma)= \rho$
if $\sigma \in H_\rho$.
Also the partition $\Xi_n(\sigma)$ is graphically defined as follows.
Consider the graph $\Gamma(\sigma)$ whose vertex set is $\{1,2,\dots,2n\}$ and whose
edge set consists of $\{2i-1,2i\}$ and $\{\sigma(2i-1),\sigma(2i)\}$, $1 \le i \le n$.
We think of the edges $\{\sigma(2i-1),\sigma(2i)\}$ as blue, and the others as red.
Then $\Gamma(\sigma)$ has some connected components of even lengths
$2 \rho_1 \ge 2 \rho_2 \ge \cdots$.
Thus $\sigma$ determines a partition $\rho=(\rho_1,\rho_2,\dots)$ of $n$.
The $\rho$ is nothing but the coset-type $\Xi_n(\sigma)$.
Two permutations $\sigma_1,\sigma_2 \in S_{2n}$ have the same coset-type if
and only if $H_n \sigma_1 H_n= H_n \sigma_2 H_n$.
The set $H_\rho$ consists of permutations in $S_{2n}$ of coset-type $\rho$.
Given $\sigma \in S_{2n}$, we let $\nu_n(\sigma)$ to be the length of
the partition $\Xi_n(\sigma)$: $\nu_n(\sigma) = \ell(\Xi_n(\sigma))$.
\begin{example} \label{ex:GraphGamma}
For $\sigma= \left( \begin{smallmatrix} 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 \\
5 & 1 & 4 & 10 & 3 & 9 & 7 & 6 & 2 & 8 \end{smallmatrix} \right) \in S_{10}$,
its graph $\Gamma(\sigma)$ has two connected components $\Gamma^{(1)}$ and $\Gamma^{(2)}$:
$$
\Gamma^{(1)}: 1 \Longleftrightarrow 5 \longleftrightarrow 6
\Longleftrightarrow 7 \longleftrightarrow 8 \Longleftrightarrow 2
\longleftrightarrow 1, \qquad
\Gamma^{(2)}: 3 \Longleftrightarrow 9 \longleftrightarrow 10 \Longleftrightarrow 4
\longleftrightarrow 3.
$$
Here ``$i \Longleftrightarrow j$'' means that
a blue edge connects
the $i$-th vertex with the $j$-th vertex,
whereas ``$p \longleftrightarrow q$'' means that
a red edge connects
the $p$-th vertex with the $q$-th vertex.
Equivalently, there exists an integer $k$ such that
$\{i,j\}=\{\sigma(2k-1), \sigma(2k)\}$ (resp. $\{p,q\}=\{2k-1,2k\}$).
In this example, the component $\Gamma^{(1)}$
and $\Gamma^{(2)}$ has $6$ and $4$ vertices, respectively,
and hence $\Xi_5(\sigma)=(3,2)$ and $\nu_5(\sigma)=2$.
\end{example}
\subsection{Perfect matchings} \label{subsec:PM}
Let $\mcal{M}(2n)$ be the set of all \emph{perfect matchings} on $\{1,2,\dots,2n\}$.
Each perfect matching $\mf{m}$ in $\mcal{M}(2n)$ is uniquely expressed by the form
\begin{equation} \label{eq:ExpressionPairPartition}
\left\{ \{\mf{m}(1),\mf{m}(2) \}, \{\mf{m}(3),\mf{m}(4) \}, \dots, \{\mf{m}(2n-1),\mf{m}(2n) \}
\right\}
\end{equation}
with $\mf{m}(2i-1) < \mf{m}(2i)$ for $1 \le i \le n$ and with
$\mf{m}(1) <\mf{m}(3) < \cdots < \mf{m}(2n-1)$.
We call each $\{\mf{m}(2i-1),\mf{m}(2i)\}$ a \emph{block} of $\mf{m}$.
A block of the form $\{2i-1,2i\}$ is said to be \emph{trivial}.
We embed the set $\mcal{M}(2n)$ into $S_{2n}$ via the mapping
\begin{equation} \label{eq:identificationPM}
\mcal{M}(2n) \ni \mf{m} \mapsto
\begin{pmatrix}
1 & 2 & 3 & 4 & \cdots & 2n \\
\mf{m}(1) & \mf{m}(2) & \mf{m}(3) & \mf{m}(4) & \cdots & \mf{m}(2n)
\end{pmatrix} \in S_{2n}.
\end{equation}
In particular, the graph $\Gamma(\mf{m})$, the coset-type $\Xi_n(\mf{m})$,
and the value $\nu_n(\mf{m})$ are defined.
Note that $\Gamma(\mf{m})=\Gamma(\mf{n})$ if and only if
$\mf{m}=\mf{n}$.
A perfect matching $\mf{n}$ in $\mcal{M}(2n-2)$ is regarded as an element of $\mcal{M}(2n)$ by
adding the trivial block $\{2n-1,2n\}$:
$$
\mcal{M}(2n-2) \ni \mf{n} \mapsto \mf{n} \sqcup \{\{2n-1,2n\}\} \in \mcal{M}(2n).
$$
Thus we think as $\mcal{M}(2n-2) \subset \mcal{M}(2n)$.
It is well known that
$\mcal{M}(2n)$ is the set of all representatives of the right cosets $\sigma H_n$
of $H_n$ in $S_{2n}$:
\begin{equation} \label{eq:rightcosets}
S_{2n} = \bigsqcup_{\mf{m} \in \mcal{M}(2n)} \mf{m} H_n.
\end{equation}
\subsection{Characters and zonal spherical functions} \label{subsec:CharZonal}
Given a partition $\lambda \vdash n$, we denote by $\chi^\lambda$ the irreducible character
of $S_{n}$. The set $\{\chi^\lambda \ | \ \lambda \vdash n \}$ is a basis of the center of the group algebra $\mathbb{C}[S_{n}]$.
Let $\mr{id}_n$ denote the identity permutation in $S_n$ and let
$f^\lambda:= \chi^\lambda(\mr{id}_n)$.
Thus the number $f^\lambda$ is the dimension of the irreducible representation
of $S_{n}$ with character $\chi^\lambda$.
Equivalently, $f^\lambda$ is the number of standard Young tableaux of shape $\lambda$,
see e.g. \cite{Sagan}.
For each partition $\lambda$ of $n$, we define the {\it zonal spherical function} of the
Gelfand pair $(S_{2n},H_n)$ by
\begin{equation}
\omega^\lambda (\sigma) = \frac{1}{2^n n!} \sum_{\zeta \in H_n} \chi^{2\lambda}(\sigma \zeta),
\qquad \sigma \in S_{2n},
\end{equation}
where $2\lambda=\lambda+\lambda=(2\lambda_1,2\lambda_2,\dots)$.
If we regard $\omega^\lambda$ as an element of $\mathbb{C}[S_{2n}]$,
we can express $\omega^\lambda= \frac{1}{2^n n!} \chi^{2\lambda} P_n=
\frac{1}{2^n n!} P_n \chi^{2\lambda}$.
These functions are $H_n$-biinvariant functions on $S_{2n}$ and
constant on each double coset $H_\rho$.
Denote by $\omega^\lambda_\rho$ the value of $\omega^\lambda$ at $H_\rho$.
Let $\mcal{H}_n$ be the Hecke algebra associated with the Gelfand pair $(S_{2n},H_n)$:
$$
\mcal{H}_n =\{ f : S_{2n} \to \mathbb{C} \ | \ \text{$f$ is constant on each $H_\rho$ ($\rho \vdash n$)} \}.
$$
Since $(S_{2n},H_n)$ is a Gelfand pair,
this algebra is commutative with respect to the convolution product.
We often regard $\mcal{H}_n$ as a subspace of $\mathbb{C}[S_{2n}]$.
There are two natural bases of $\mcal{H}_n$;
one is $\{\omega^\lambda \ | \ \lambda \vdash n\}$ and another is
$\{\phi_\rho \ | \ \rho \vdash n\}$.
Here the $\phi_\rho$ are double-coset sum functions
$$
\phi_\rho= \sum_{\sigma \in H_\rho} \sigma.
$$
Note that $\phi_{(1^n)}= P_n$.
\section{Analogue of Jucys' result}
Define the Jucys-Murphy elements $J_k$.
They are commuting elements in group algebras of symmetric groups, given by
$J_1=0$ and by
$$
J_k=(1 \ k) +(2 \ k)+ \cdots +(k-1 \ k) \qquad \text{for $k = 2,3,\dots$}.
$$
Jucys \cite{Jucys} obtains an exact expression for
$e_k(J_1,J_2,\dots,J_n)$,
where $e_k$ is the elementary symmetric function.
His result is the following identity:
$$
e_k(J_1,J_2,\dots,J_n)= \sum_{\pi} \pi
$$
summed over all permutations $\pi$ in $S_{n}$ with exactly $n-k$ cycles (including trivial cycles).
The following proposition is an analogue of this identity, and was
essentially obtained by Zinn-Justin \cite{ZJ} very recently.
Our proof is an analogue of Jucys' proof in \cite{Jucys}.
\begin{prop} \label{prop-Jucys}
For any $k$ and $n$, we have
\begin{equation} \label{eq:ElementaryJM1}
e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n =
\sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\
\nu_n(\mf{m})= n-k \end{subarray}} \mf{m} P_n.
\end{equation}
\end{prop}
\begin{proof}
First observe that \eqref{eq:ElementaryJM1} holds true when $k=0$
because $\mf{m}=\{\{1,2\},\{3,4\},\dots,\{2n-1,2n\}\}$ is
the unique element in $\mcal{M}(2n)$ satisfying $\nu_n(\mf{m})=n$.
Also, when $k \ge n$, both sides are zero.
We proceed by induction on $n$.
Let $n >1$ and suppose that the claim holds for
$e_k(J_1,J_3,\dots,J_{2n-3}) \cdot P_{n-1}$ with any $k \ge 0$.
Using identities
$e_{k}(x_1,x_2,\dots,x_n)= e_{k}(x_1,\dots,x_{n-1})
+ x_n e_{k-1}(x_1,\dots,x_{n-1})$ and
$P_{n-1} P_n =|H_{n-1}| P_n$,
we see that
\begin{align*}
& e_k(J_1,J_3,\dots,J_{2n-1}) P_{n} \\
=& e_k (J_1,J_3,\dots,J_{2n-3}) P_n + J_{2n-1} e_{k-1}(J_1,J_3,\dots,J_{2n-3})P_n \\
=& \frac{1}{|H_{n-1}|} e_{k}(J_1,J_3,\dots,J_{2n-3}) P_{n-1} P_n
+ \frac{1}{|H_{n-1}|} J_{2n-1} e_{k-1} (J_1,J_3,\dots,J_{2n-3}) P_{n-1} P_n.
\end{align*}
The induction assumption gives
\begin{equation} \label{eq:InductionElementary}
e_k(J_1,J_3,\dots,J_{2n-1}) P_{n}=
\sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2) \\
\nu_{n-1} (\mf{n})= n-1-k \end{subarray}} \mf{n} P_n
+ \sum_{t=1}^{2n-2} \sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2) \\
\nu_{n-1}(\mf{n})= n-k \end{subarray}} (t \ 2n-1) \mf{n}P_n.
\end{equation}
Recall the natural inclusion
$\mcal{M}(2n-2) \ni \mf{n} \mapsto \mf{n} \sqcup \{\{2n-1, 2n\}\} \in \mcal{M}(2n)$.
We have $\nu_n(\mf{n} \sqcup \{\{2n-1, 2n\}\} )=
\nu_{n-1} (\mf{n})+1$,
and hence the first summand on the right hand side of \eqref{eq:InductionElementary}
coincides with $\sum_{\mf{m}} \mf{m} P_n$,
summed over $\mf{m} \in \mcal{M}(2n)$ having the block $\{2n-1,2n\}$ with
$\nu_{n}(\mf{m})=n-k$.
Next, let us see $ (t \ 2n-1) \mf{n}P_n$ for $\mf{n} \in \mcal{M}(2n-2)$ and
$1 \le t \le 2n-2$.
Denote by $t_{\mf{n}}$ the index in $\{1,2,\dots,2n-2\}$,
determined by $\{t_{\mf{n}},t\} \in \mf{n}$.
We define an element $\mf{n}_t$ in $\mcal{M}(2n)$
by removing $\{t_{\mf{n}},t\}$ from $\mf{n}$ and by
adding two new blocks $\{t_{\mf{n}},2n-1\}$ and $\{t,2n\}$:
$$
\mf{n}_t = (\mf{n} \setminus \{\{t_{\mf{n}},t\}\}) \cup \{ \{ t_{\mf{n}},2n-1\},
\{t,2n\} \}.
$$
Then it is easy to see $\mf{n}_t H_n =
(t \ 2n-1)\mf{n}H_n$, and therefore
$$
\mf{n}_t P_n= (t \ 2n-1) \mf{n} P_n.
$$
Moreover, we can see
$$
\nu_{n-1}(\mf{n})= \nu_n(\mf{n}_t).
$$
In fact, consider graphs $\Gamma(\mf{n}), \Gamma(\mf{n}_t)$
defined in Subsection \ref{subsec:Hyperoctahedral}.
We use the notation of Example \ref{ex:GraphGamma}.
The graph $\Gamma(\mf{n}_t)$ can be obtained from $\Gamma(\mf{n})$ if we replace
an edge $t_{\mf{n}} \Longleftrightarrow t$ in $\Gamma(\mf{n})$
by the path $t_{\mf{n}} \Longleftrightarrow 2n-1 \longleftrightarrow
2n \Longleftrightarrow t$.
This means that the numbers of components of $\Gamma(\mf{n})$ and $\Gamma(\mf{n}_t)$
coincide, i.e., $\nu_{n-1}(\mf{n})= \nu_n(\mf{n}_t)$.
The observation in the previous paragraph implies that
for each $t$
the sum $\sum_{\mf{n}} (t \ 2n-1) \mf{n}P_n$
on \eqref{eq:InductionElementary}
coincides with $\sum_{\mf{m}} \mf{m} P_n$,
summed over $\mf{m} \in \mcal{M}(2n)$ having the block $\{t,2n\}$ with
$\nu_{n}(\mf{m})=n-k$.
It follows that the expression \eqref{eq:InductionElementary}
is $\sum_{\mf{m}} \mf{m} P_n$,
summed over all $\mf{m} \in \mcal{M}(2n)$ with
$\nu_{n}(\mf{m})=n-k$.
Thus \eqref{eq:ElementaryJM1} is proved.
\end{proof}
\iffalse
\begin{proof}
First observe that \eqref{eq:ElementaryJM1} holds true when $k=0$
because $\mf{m}=\{\{1,2\},\{3,4\},\dots,\{2n-1,2n\}\}$ is
the unique element of $\mcal{M}(2n)$ satisfying $\nu_n(\mf{m})=n$.
Also, when $k \ge n$, both sides are zero.
We proceed by induction on $n$.
Let $n >1$ and suppose that the claim holds for
$e_k(J_1,J_3,\dots,J_{2n-3}) \cdot P_{n-1}$ with any $0<k<n$.
We define the map $\mf{m} \mapsto D_{n}(\mf{m})$ from $\mcal{M}(2n)$ onto
$\mcal{M}(2n-2)$ as follows.
If $\mf{m}$ has the block $\{2n-1,2n\}$,
then we define a new perfect matching $D_n(\mf{m}) $
by removing the block $\{2n-1,2n\}$ from $\mf{m}$.
Otherwise, $\mf{m}$ has two blocks $\{s,2n-1\}$ and $\{t,2n\}$
with $1 \le s \not= t \le 2n-2$. We define a new perfect matching
$D_n(\mf{m}) $ by removing these two blocks and by adding
the new one $\{s,t\}$.
Let $\mf{n} \in \mcal{M}(2n-2)$. The inverse image
$(D_n)^{-1} (\mf{n})$ consists of $2n-1$ perfect matchings
\begin{align*}
\mf{n}_n:=& \mf{n} \cup \{ \{2n-1,2n\} \}, \\
\mf{n}_i':=& (\mf{n} \setminus \{\{\mf{n}(2i-1),\mf{n}(2i)\}\}) \cup \{ \{ \mf{n}(2i-1),2n-1\},
\{\mf{n}(2i),2n\} \},\\
\mf{n}_i'':=& (\mf{n} \setminus \{\{\mf{n}(2i-1),\mf{n}(2i)\}\}) \cup \{ \{ \mf{n}(2i),2n-1\},
\{\mf{n}(2i-1),2n\} \},
\end{align*}
where $i=1,2,\dots,n-1$.
It is immediate to see that
$$
\nu_n(\mf{n}_n)=\nu_{n-1}(\mf{n})+1, \qquad
\nu_n(\mf{n}_i')=\nu_n(\mf{n}_i'')=\nu_{n-1}(\mf{n}) \ (i=1,2,\dots,n-1).
$$
Also we obtain
$$
\mf{n}_n P_n= \mf{n} P_n, \qquad \mf{n}_i' P_n= \mf{n} (2i \ 2n-1) P_n,
\qquad \mf{n}_i'' P_n= \mf{n} (2i-1 \ 2n-1)P_n.
$$
Here $\mf{n}$ is regarded as a permutation in $S_{2n}$ via inclusions
$\mcal{M}(2n-2) \subset \mcal{M}(2n) \subset S_{2n}$ (or $\mcal{M}(2n-2) \subset S_{2n-2}
\subset S_{2n})$.
In fact, since
$$
\left( \begin{smallmatrix}
1 & \cdots & 2i-1 & 2i & \cdots & 2n-2 \\
\mf{n}(1) & \cdots & \mf{n}(2i-1) & \mf{n}(2i) & \cdots & \mf{n}(2n-2)
\end{smallmatrix} \right) \cdot
(2i \ 2n-1) =
\left( \begin{smallmatrix}
1 & \cdots & 2i-1 & 2i & \cdots & 2n-2 & 2n-1 & 2n \\
\mf{n}(1) & \cdots & \mf{n}(2i-1) & 2n-1 & \cdots & \mf{n}(2n-2)
& \mf{n}(2i) & 2n
\end{smallmatrix} \right),
$$
we have $\mf{n} \cdot (2i \ 2n-1) H_n = \mf{n}_i' H_n$ as $H_n$-right cosets.
Similarly for others.
Now we prove \eqref{eq:ElementaryJM1} for $n$ by induction.
By the previous paragraph,
the right hand side on \eqref{eq:ElementaryJM1} equals
\begin{align*}
&\sum_{\mf{n} \in \mcal{M}(2n-2)}
\sum_{\begin{subarray}{c} \mf{m} \in (D_n)^{-1}(\mf{n}) \\
\nu_n(\mf{m})=n-k \end{subarray}} \mf{m} P_n \\
=& \sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-1-k \end{subarray}}\mf{n}_n P_n
+ \sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-k \end{subarray}}\sum_{i=1}^{n-1} (\mf{n}_i'P_n +\mf{n}_i'' P_n) \\
=& \sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-1-k \end{subarray}}\mf{n} P_n
+ \sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-k \end{subarray}}\sum_{j=1}^{2n-2} \mf{n} \cdot (j \ 2n-1) P_n \\
=& \frac{1}{|H_{n-1}|}\sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-1-k \end{subarray}}\mf{n} P_{n-1} P_n
+ \frac{1}{|H_{n-1}|}\sum_{\begin{subarray}{c} \mf{n} \in \mcal{M}(2n-2)\\
\nu_{n-1}(\mf{n})=n-k \end{subarray}} \mf{n} \cdot P_{n-1}J_{2n-1} P_n.
\end{align*}
Here we used equations
\begin{equation} \label{eq:lemComJn-1}
P_{n-1} P_n = |H_{n-1}| P_n \qquad \text{and} \qquad
P_{n-1}J_{2n-1}=J_{2n-1} P_{n-1}
\end{equation}
at the last equality.
By the induction hypothesis and by using \eqref{eq:lemComJn-1} again,
we continue the calculation
\begin{align*}
=& \frac{1}{|H_{n-1}|} e_{k}(J_1,J_3,\dots,J_{2n-3}) P_{n-1} P_n
+ \frac{1}{|H_{n-1}|} e_{k-1} (J_1,J_3,\dots,J_{2n-3}) P_{n-1} J_{2n-1} P_n \\
=& \left ( e_k(J_1,J_3,\dots,J_{2n-3}) +
e_{k-1}(J_1,J_3,\dots,J_{2n-3}) J_{2n-1} \right) P_n.
\end{align*}
Therefore the result follows from the recurrence
$$e_{k}(x_1,x_2,\dots,x_n)= e_{k}(x_1,\dots,x_{n-1})
+ e_{k-1}(x_1,\dots,x_{n-1})x_n.
$$
\end{proof}
\fi
Recall that
$H_\rho$ is the double coset $H_n \sigma H_n$ of permutations
of coset-type $\rho$, and that $\phi_\rho$
is the formal sum over $H_\rho$ in $\mathbb{C}[S_{2n}]$.
\begin{cor} \label{cor1-Jucys}
For each $0 \le k <n$, we have
\begin{equation} \label{eq:ElementaryJM2}
e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n
= \sum_{\begin{subarray}{c} \rho \vdash n \\
\ell(\rho)= n-k \end{subarray}} \phi_\rho.
\end{equation}
This belongs to $\mcal{H}_n$.
Moreover,
$e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n
= P_n \cdot e_k(J_1,J_3,\dots,J_{2n-1})$.
\end{cor}
\begin{proof}
By decompositions \eqref{eq:rightcosets} and \eqref{eq:bothsidedecomposition},
the right hand side on \eqref{eq:ElementaryJM1} equals
$$
\sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\
\nu_n(\mf{m})= n-k \end{subarray}} \sum_{\zeta \in H_n} \mf{m} \zeta
=\sum_{\begin{subarray}{c} \sigma \in S_{2n} \\
\nu_n(\sigma)= n-k \end{subarray}} \sigma
=\sum_{\begin{subarray}{c} \rho \vdash n \\
\ell(\rho)= n-k \end{subarray}} \sum_{\sigma \in H_\rho} \sigma,
$$
which implies \eqref{eq:ElementaryJM2}. Let $\iota: \mathbb{C}[S_{2n}] \to \mathbb{C}[S_{2n}]$
be the linear extension of the anti-isomorphism $S_{2n} \ni\sigma \mapsto \sigma^{-1} \in S_{2n}$.
By \eqref{eq:ElementaryJM2}
and by the fact that $H_n \sigma H_n=H_n \sigma^{-1} H_n$ for any $\sigma \in S_{2n}$,
the element $e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
is invariant under $\iota$.
However, $\iota (e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n) =\iota (P_n) \cdot \iota(e_k(J_1,J_3,\dots,J_{2n-1})) = P_n \cdot e_k(J_1,J_3,\dots,J_{2n-1})$.
\end{proof}
Now the fundamental theorem on symmetric polynomials gives
\begin{cor} \label{cor:SymPolyHecke}
For any symmetric function $F$ and positive integer $n$,
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n = P_n \cdot F(J_1,J_3,\dots,J_{2n-1}),
$$
which belongs to $\mcal{H}_n$.
\end{cor}
We are interested in the expansion of $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
with respect to basis $\omega^\lambda$'s or $\phi_\rho$'s of $\mcal{H}_n$.
\section{Spherical expansion}
Our purpose in this section is to obtain the expansion of $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
in zonal spherical functions $\omega^\lambda$.
Given $F \in \mbb{S}$ and $\lambda \vdash n$, we put
$$
F(A_\lambda') := F(x_1,x_2,\dots,x_n,0,0,\dots)|_{\{x_1,x_2,\dots,x_n\} =A_\lambda'}
$$
where $A_\lambda'$ was defined in Subsection \ref{subsec:partitions}.
\begin{thm} \label{thm:fOmegaContent}
For any $\lambda \vdash n$ and symmetric function $F$,
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot \omega^\lambda =
\omega^\lambda \cdot F(J_1,J_3,\dots,J_{2n-1}) = F(A_\lambda') \omega^\lambda.
$$
\end{thm}
\begin{proof}
In this proof, we suppose readers are familiar with standard Young tableaux, see e.g. \cite{Sagan}.
For a partition $\mu$, denote by $\mr{SYT}(\mu)$
the set of all standard Young tableaux of shape $\mu$.
For each standard Young tableau $T \in \mr{SYT}(\mu)$,
let $e_T \in \mathbb{C}[S_{|\mu|}]$ be Young's orthogonal idempotent.
Their definition and properties are seen in \cite{Garsia}.
We use well-known identities
$$
J_k \cdot e_T= e_T \cdot J_k=c(T_k) e_T, \qquad
\sum_{T \in \mr{SYT}(\mu)} e_T = \frac{f^\mu}{|\mu|!} \chi^\mu.
$$
Here $T_k$ is the box $\square=(i_k,j_k)$ in $T$ labelled by $k$
and $c(T_k)$ is its content $j_k-i_k$.
Note $f^\mu = |\mr{STY}(\mu)|$.
These identities imply that, for each $\mu \vdash 2n$,
\begin{equation} \label{eq:YngIdenF}
F(J_1,J_3,\dots,J_{2n-1}) \cdot \chi^\mu=
\frac{(2n)!}{f^\mu} \sum_{T \in \mr{SYT}(\mu)} F(c(T_1),c(T_3),\dots,c(T_{2n-1}))e_T.
\end{equation}
Let $\lambda \vdash n$.
Given $S =(S[i,j])_{(i,j) \in \lambda} \in \mr{SYT}(\lambda)$, we define the standard Young tableau $S'=(S'[i,j])_{(i,j) \in 2\lambda} \in \mr{SYT}(2\lambda)$ by
$$
S'[i,2j-1]= 2S[i,j]-1, \qquad S'[i,2j]= 2S[i,j], \qquad (i,j) \in \lambda.
$$
Here $S[i,j]$ stands for the entry of $S$ in the box $(i,j)$.
For example, given $S={\footnotesize \young(13,24)} \in \mr{SYT}((2,2))$,
we have $S'={\footnotesize \young(1256,3478)} \in \mr{SYT}((4,4))$.
Proposition 4 in \cite{ZJ} claims that,
given a standard Young tableau $T$ with $2n$ boxes,
$P_n e_T$ is zero unless there is a standard tableau $S$ with $n$ boxes such that
$T=S'$.
We have $c(S'_{2k-1})=c'(S_k)$
by the construction of $S'$.
Hence it follows by \eqref{eq:YngIdenF} that
\begin{align*}
&F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n \cdot \chi^{2\lambda}
= P_n \cdot F(J_1,J_3,\dots,J_{2n-1}) \cdot \chi^{2\lambda} \\
=& \frac{(2n)!}{f^{2\lambda}} \sum_{T \in \mr{SYT}(2\lambda)}
F(c(T_1),c(T_3),\dots,c(T_{2n-1})) P_n \cdot e_T \\
=& \frac{(2n)!}{f^{2\lambda}} \sum_{S \in \mr{SYT}(\lambda)}
F(c'(S_1),c'(S_2),\dots,c'(S_n)) P_n \cdot e_{S'} \\
=& \frac{(2n)!}{f^{2\lambda}} F(A_\lambda') \sum_{S \in \mr{SYT}(\lambda)} P_n \cdot e_{S'}
= \frac{(2n)!}{f^{2\lambda}} F(A_\lambda') \sum_{T \in \mr{SYT}(2\lambda)} P_n \cdot e_{T}
= F(A_\lambda') P_n \cdot \chi^{2\lambda}.
\end{align*}
Hence we obtain the desired formula because of
$\omega^\lambda =(2^n n!)^{-1} P_n \cdot \chi^{2\lambda}$.
\end{proof}
Now we give the explicit expansion of $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
with respect to $\omega^\lambda$.
\begin{cor} \label{cor:SPexp}
For any symmetric function $F$, we have
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n =
\frac{1}{(2n-1)!!} \sum_{\lambda \vdash n} f^{2\lambda} F(A'_{\lambda}) \omega^\lambda.
$$
In particular, the multiplicity of the identity $\mr{id}_{2n}$ in $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$
equals
\begin{equation} \label{eq:ContentEva}
\frac{1}{(2n-1)!!} \sum_{\lambda \vdash n} f^{2\lambda} F(A'_{\lambda}).
\end{equation}
\end{cor}
\begin{proof}
Recall (see (4.8) in \cite{CM})
$$
P_n =\frac{1}{(2n-1)!!} \sum_{\lambda \vdash n} f^{2\lambda} \omega^\lambda.
$$
The claim follows from this identity and Theorem \ref{thm:fOmegaContent} immediately.
\end{proof}
\section{Double coset expansion}
\subsection{Class expansion for $m_\lambda(J_1,J_2,\dots,J_n)$}
\label{subsec:ClassExp}
In this subsection,
we review some results in \cite{MN}.
These should be compared with theorems given in the next subsection.
A permutation $\pi$ in $S_n$ is of \emph{reduced cycle-type} $\mu$ if
$\pi$ is of the (ordinary) cycle-type $\lambda$ and
$\mu=\tilde{\lambda}$.
Here, as defined, $\tilde{\lambda}$ is the reduction of $\lambda$.
Let $\mf{c}_\mu(n)$ be the sum of permutations in $S_{n}$ whose reduced cycle-types are $\mu$.
The element $\mf{c}_\mu(n)$ in $\mathbb{C}[S_n]$ is zero unless $|\mu|+\ell(\mu) \le n$,
and $\{\mf{c}_\mu(n) \ | \ |\mu|+\ell(\mu) \le n\}$ is a basis of the center of $\mathbb{C}[S_n]$.
It is well known that,
for any $F \in \mbb{S}$,
$F(J_1,J_2,\dots,J_n)$ is an element of the center of the group algebra
in $\mathbb{C}[S_n]$,
see e.g. \cite{Jucys,MN}.
We define coefficients $L^\lambda_\mu(n)$
for the monomial symmetric function $m_\lambda$
via
\begin{equation} \label{eq:defL}
m_\lambda(J_1,J_2,\dots,J_n)= \sum_{\mu:|\mu|+\ell(\mu) \le n} L^\lambda_\mu(n) \mf{c}_\mu(n).
\end{equation}
We define a number
$$
\mr{RC}(\lambda)= \frac{|\lambda|!}{(|\lambda|-\ell(\lambda)+1)! \prod_{i \ge 1} m_i(\lambda)!}.
$$
For convenience, we put $\mr{RC}(0)=1$ for the zero partition $(0)$.
We call this the \emph{refined Catalan number}, see \cite[\S 5.1]{MN}.
It is known that $\mr{RC}(\lambda)$ is a positive integer for any $\lambda$.
\begin{thm}[\cite{MN}] \label{thm:MN1}
Let $\lambda,\mu$ be partitions.
Then the following statements hold.
\begin{enumerate}
\item $L^\lambda_\mu(n)$ is a polynomial in $n$.
\item $L^\lambda_\mu(n)$ is zero unless $|\lambda| \ge |\mu|$ and $|\lambda| \equiv |\mu| \pmod{2}$.
\item If $|\lambda|=|\mu|$, then $L^\lambda_\mu=L^\lambda_\mu(n)$ is independent of $n$, and given by
\begin{equation}
L^\lambda_\mu = \sum_{(\lambda^{(1)},\lambda^{(2)},\dots)} \mr{RC}(\lambda^{(1)})
\mr{RC}(\lambda^{(2)}) \cdots
\end{equation}
summed over all sequences of partitions such that
$$
\lambda^{(i)} \vdash \mu_i \ (i \ge 1) \qquad \text{and} \qquad \lambda=\lambda^{(1)} \cup \lambda^{(2)} \cup
\cdots.
$$
\end{enumerate}
\end{thm}
Define coefficients $F^k_\mu(n)$ via
\begin{equation} \label{eq:defF}
h_k(J_1,J_2,\dots,J_n)= \sum_{\mu:|\mu|+\ell(\mu) \le n} F^k_\mu(n) \mf{c}_\mu(n),
\end{equation}
where $h_k$ is the complete symmetric function of degree $k$.
Since $h_k=\sum_{\lambda \vdash k} m_\lambda$, we have
$$
F^k_\mu(n)=\sum_{\lambda \vdash k} L^\lambda_\mu(n).
$$
\begin{thm}[\cite{MN}] \label{thm:MN2}
For $\mu \vdash k$ we have
$$
F^k_\mu(n) = \prod_{i=1}^{\ell(\mu)} \mr{Cat}_{\mu_i}.
$$
Here $\mr{Cat}_k=\frac{1}{k+1} \binom{2k}{k}$ is the Catalan number.
\end{thm}
\subsection{Double coset expansion for $m_\lambda(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$}
Like in the case of reduced cycle-types,
we prefer to reduced coset-types rather than ordinary coset-types.
A permutation $\sigma \in S_{2n}$ is of \emph{reduced coset-type} $\mu$
if $\mu=\tilde{\lambda}$ and the ordinary coset-type of $\sigma$ is $\lambda \vdash n$,
i.e., $\Xi_n(\sigma)=\lambda$.
In particular, elements in $H_n$ are of reduced coset-type $(0)$.
If $\mu$ is the reduced coset-type of $\sigma$, we write as $\xi(\sigma)=\mu$.
Define $\psi_\mu(n)$ to be the sum of permutations in $S_{2n}$
whose reduced coset-types are $\mu$. Note that
$\phi_\lambda= \psi_\mu(n)$ if $\lambda \vdash n$ and $\mu= \tilde{\lambda}$,
where $\phi_\lambda$ is defined in Subsection \ref{subsec:CharZonal}.
We have $\psi_\mu(n)=0$ unless $|\mu|+\ell(\mu) \le n$.
The set $\{\psi_\mu(n) \ | \ |\mu|+\ell(\mu) \le n\}$ forms a basis of the Hecke algebra $\mcal{H}_n$.
Corollary \ref{cor1-Jucys} can be rewritten as
\begin{equation}
e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n = \sum_{\mu \vdash k} \psi_\mu(n).
\end{equation}
We would like to generalize this formula to any monomial symmetric function $m_\lambda$.
Define coefficients $M^\lambda_\mu(n)$ by
\begin{equation} \label{eq:defM}
m_\lambda(J_1,J_3,\dots,J_{2n-1}) \cdot P_n = \sum_\mu M^\lambda_\mu(n) \psi_\mu(n)
\end{equation}
summed over $\mu$ such that $|\mu|+\ell(\mu) \le n$.
Note that,
by Corollary \ref{cor:SPexp}, the coefficient $M^\lambda_\mu(n)$ is given by
\begin{equation} \label{eq:Msum}
M^\lambda_\mu(n) = \frac{1}{(2n-1)!!} \sum_{\rho \vdash n} f^{2\rho}
\omega^\rho_{\mu+(1^{n-|\mu|})} m_\lambda(A_\rho'),
\end{equation}
where $\omega^\rho_\nu$ is a value of a zonal spherical function
defined in Subsection \ref{subsec:CharZonal}.
The following theorem is our main result for $M^\lambda_\mu(n)$.
\begin{thm} \label{thm:coefM}
Let $\lambda,\mu$ be partitions. Then
\begin{enumerate}
\item $M^\lambda_\mu(n)$ is a polynomial in $n$.
\item We have the inequality
\begin{equation} \label{eq:MLinequality}
M^\lambda_\mu(n) \ge L^\lambda_\mu(n).
\end{equation}
\item $M^\lambda_\mu(n)$ is zero unless $|\lambda| \ge |\mu|$.
\item If $|\lambda|=|\mu|$, then we have $M^\lambda_\mu(n)=L^\lambda_\mu$.
Here $L^\lambda_\mu$ is given in Theorem \ref{thm:MN1}.
In particular, $M^\lambda_\mu(n)$ is independent of $n$ in this case.
\end{enumerate}
\end{thm}
Define coefficients $G^k_\mu(n)$ via
\begin{equation} \label{eq:defG}
h_k(J_1,J_3,\dots,J_{2n-1})\cdot P_n= \sum_{\mu:|\mu|+\ell(\mu) \le n} G^k_\mu(n) \psi_\mu(n),
\end{equation}
or
$$
G^k_\mu(n)=\sum_{\lambda \vdash k} M^\lambda_\mu(n).
$$
\begin{thm} \label{thm:coefG}
For $\mu \vdash k$, we have $G^k_\mu(n)=F^k_\mu(n)=\prod_{i=1}^{\ell(\mu)} \mr{Cat}_{\mu_i}$.
\end{thm}
We will prove these theorems except
part 1 of Theorem \ref{thm:coefM} in the coming section.
The remaining statement will be proved in Section \ref{sec:JackDeform}
by applying the theory of shifted symmetric functions.
\section{Proof of Theorem \ref{thm:coefM} and Theorem \ref{thm:coefG}}
\subsection{Proof of part 2 of Theorem \ref{thm:coefM}}
\label{subsec:Proof2}
Recall that quantities $L^\lambda_\mu(n)$ and $M^\lambda_\mu(n)$ are
defined by \eqref{eq:defL} and \eqref{eq:defM}, respectively.
Let $\mu$ be a partition and let $n$ be a positive integer such that $n \ge |\mu|+\ell(\mu)$.
We define a permutation $\pi_\mu \in S_{n}$ and
a pair partition $\mf{m}_\mu \in \mcal{M}(2n)$ as follows.
\begin{align*}
\pi_\mu=& (1 \ 2 \ \dots \ \mu_1+1)(\mu_1+2 \ \mu_1+3 \ \dots \ \mu_1+\mu_2+2) \cdots, \\
\mf{m}_\mu=&\{ \{1, 2\mu_1+2\}, \{2,3\},\dots,\{2\mu_1,2\mu_1+1\}, \\
& \quad \{2\mu_1+3,2(\mu_1+\mu_2)+4\}, \{2\mu_1+4,2\mu_1+5\},\dots,\{2(\mu_1+\mu_2)+2,2(\mu_1+\mu_2)+3\},\dots \}.
\end{align*}
For example, if $\mu=(2,1)$, we have
\begin{align*}
\pi_{(2,1)}=&(1 \ 2 \ 3)(4 \ 5)(6)(7) \cdots(n) \\
\mf{m}_{(2,1)}=&\{\{1,6\},\{2,3\},\{4,5\},\{7,10\},\{8,9\},
\{10,11\},\dots,\{2n-1,2n\} \}.
\end{align*}
By construction, the reduced cycle-type of $\sigma_\mu$ is $\mu$
and the reduced coset-type of $\mf{m}_\mu$ is also: $\xi(\mf{m}_\mu)=\mu$.
Note that $\mf{m}_{(0)}=\{\{1,2\},\{3,4\},\dots,\{2n-1,2n\}\}$.
Define the action $\mf{L}$ of $S_{2n}$ on $\mcal{M}(2n)$ by
$$
\mf{L}(\sigma) \mf{m}= \left\{
\{\sigma(\mf{m}(1)), \sigma(\mf{m}(2))\}, \dots \dots,
\{\sigma(\mf{m}(2n-1)), \sigma(\mf{m}(2n))\}
\right\}, \qquad (\sigma \in S_{2n}, \ \mf{m} \in \mcal{M}(2n)).
$$
Note $\mf{L}(\sigma)\mf{m}_{(0)} = \mf{m}$ if and only if $\sigma \in \mf{m} H_n$.
\begin{lem} \label{lem:NumberMdef}
Let $\lambda, \mu$ be partitions and let $n \ge |\mu|+\ell(\mu)$.
Fix $\mf{l} \in \mcal{M}(2n)$ of reduced coset-type $\mu$.
(In particular, we may take $\mf{l}=\mf{m}_\mu$.)
Then we have
$$
M^\lambda_\mu(n)= \sum_{
\begin{subarray}{c} \sigma \in S_{2n} \\ \mf{L}(\sigma) \mf{m}_{(0)}=\mf{l}
\end{subarray}} [\sigma] m_\lambda(J_1,J_3,\dots,J_{2n-1}),
$$
where $[\sigma] w$ denotes
the multiplicity of $\sigma$ in $w \in \mathbb{C}[S_{2n}]$.
In particular, $M^\lambda_\mu(n)$ is a non-negative integer.
\end{lem}
\begin{proof}
From the coset decomposition \eqref{eq:rightcosets}, we have
\begin{align*}
m_\lambda(J_1,J_3,\dots,J_{2n-1}) \cdot P_n
=& \sum_{\sigma \in S_{2n}}
([\sigma]m_\lambda(J_1,J_3,\dots,J_{2n-1})) \sigma P_n \\
=& \sum_{ \mf{m} \in \mcal{M}(2n)} \sum_{\sigma \in \mf{m} H_n}
([\sigma]m_\lambda(J_1,J_3,\dots,J_{2n-1})) \mf{m} P_n \\
=& \sum_\mu \sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\ \xi(\mf{m})=\mu
\end{subarray}}
\sum_{\sigma \in \mf{m} H_n} ([\sigma] m_\lambda(J_1,J_3,\dots,J_{2n-1}))\mf{m} P_n.
\end{align*}
Since
$$
\psi_\mu(n) = \sum_{\begin{subarray}{c} \sigma \in S_{2n} \\
\xi(\sigma)=\mu \end{subarray}}\sigma=
\sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\ \xi(\mf{m})=\mu \end{subarray}}
\mf{m}P_n,
$$
it follows from \eqref{eq:defM} that for each $\mu$,
$$
\sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\ \xi(\mf{m})=\mu
\end{subarray}}
\sum_{\sigma \in \mf{m} H_n} ([\sigma] m_\lambda(J_1,J_3,\dots,J_{2n-1}))\mf{m} P_n
= M^\lambda_\mu(n)
\sum_{\begin{subarray}{c} \mf{m} \in \mcal{M}(2n) \\ \xi(\mf{m})=\mu \end{subarray}}
\mf{m}P_n
$$
so that
$M^\lambda_\mu(n)= \sum_{\sigma \in \mf{l} H_n} [\sigma] m_\lambda(J_1,J_3,\dots,J_{2n-1})$. This implies the desired claim.
\end{proof}
Let $(i_1,\dots,i_k)$ be a weakly increasing sequence of $k$ positive integers.
The sequence $(i_1,\dots,i_k)$ is of \emph{type} $\lambda \vdash k$ if $\lambda=(\lambda_1,\lambda_2,\dots)$
is a permutation of $(b_1,b_2,\dots)$,
where $b_p$ is the multiplicity of $p$ in $(i_1,\dots,i_k)$.
For $\lambda \vdash k$, the monomial symmetric polynomial is expressed as
$$
m_\lambda(x_1,x_2,\dots,x_n)= \sum_{\begin{subarray}{c} 1 \le t_1 \le t_2 \le \dots \le t_k \le n \\
(t_1,t_2, \dots,t_k): \text{type $\lambda$} \end{subarray}}
x_{t_k}\cdots x_{t_2} x_{t_1}.
$$
Given partitions $\lambda,\mu$ with $|\lambda|=k$, we denote by $A_n(\lambda,\mu)$
the set of
sequences $(u_1,v_1,u_2,v_2,\dots,u_k,v_k)$ of positive integers, satisfying
the following conditions:
\begin{itemize}
\item $(v_1,v_2,\dots,v_k)$ is of type $\lambda$ and $2 \le v_1 \le v_2 \le \dots \le v_k \le n$;\item $u_i <v_i$ for all $1 \le i \le k$;
\item The product of transpositions $(u_k \ v_k) \cdots (u_2 \ v_2) (u_1 \ v_1)$
coincides with $\pi_\mu$.
\end{itemize}
We also denote by $B_n(\lambda,\mu)$ the set of the sequences $(s_1,t_1,\dots,s_k,t_k)$ satisfying
\begin{itemize}
\item $(t_1,t_2,\dots,t_k)$ consists of odd numbers and is
of type $\lambda$, and $3 \le t_1 \le t_2 \le \dots \le t_k \le 2n-1$;
\item $s_i <t_i$ for all $1 \le i \le k$;
\item $\mf{L}((s_k \ t_k) \cdots (s_2 \ t_2) (s_1 \ t_1)) \mf{m}_{(0)}=\mf{m}_\mu$.
\end{itemize}
By the definitions of $L^\lambda_\mu(n)$ and Lemma \ref{lem:NumberMdef}, we have
$$
L^\lambda_\mu(n)=|A_n(\lambda,\mu)|, \qquad M^\lambda_\mu(n)=|B_n(\lambda,\mu)|.
$$
Now the map
$$
(u_1,v_1,u_2,v_2,\dots,u_k,v_k) \mapsto (2u_1-1, 2v_1-1, 2u_2-1,2v_2-1,\dots, 2u_k-1,2v_k-1)
$$
gives an injection from $A_n(\lambda,\mu)$ to $B_n(\lambda,\mu)$.
Indeed, suppose $(u_1,v_1,\dots,u_k,v_k)$ is an element of $A_n(\lambda,\mu)$.
Then $\sigma:=(2u_k-1 \ 2v_k-1) \cdots (2u_1-1 \ 2v_1-1)$ permutes only odd-numbered letters,
and
$\sigma (2j-1) = 2 \pi_\mu(j)-1$ for any $j$.
Since $\pi_\mu$ has the cycle $(1 \ 2 \ \dots \ \mu_1+1)$,
the perfect matching $\mf{L}(\sigma) \mf{m}_{(0)}$ has blocks
$\{3,2\},\{5,4\},\dots,\{2\mu_1+1,2\mu_1\}$ and $\{1,2(\mu_1+1)\}$,
which are the first $\mu_1+1$ blocks of $\mf{m}_\mu$.
Thus, we obtain $\mf{L}(\sigma) \mf{m}_{(0)}=\mf{m}_\mu$.
This injection gives $|A_n(\lambda,\mu)| \le |B_n(\lambda,\mu)|$, that is,
$L^\lambda_\mu(n) \le M^\lambda_\mu(n)$.
\subsection{Proof of part 3 of Theorem \ref{thm:coefM}} \label{subsec:Proof3}
The discussion in this subsection is parallel to \cite[\S 5.3]{MN}.
From now, we suppose that $n$ is sufficiently large.
In Subsection \ref{subsec:PM}, we consider the inclusion $\mcal{M}(2n-2) \subset \mcal{M}(2n)$.
Under this inclusion, the reduced coset-types are invariant.
Given $\mf{m} \in \mcal{M}(2n)$, we define the set $\mcal{S}(\mf{m})$ by
$$
\mcal{S}(\mf{m}) =\big\{ j \in \{1,2,\dots,n\} | \
\text{$\{\mf{m}(2j-1),\mf{m}(2j)\} \not= \{2k-1,2k\}$ for any $k \ge 1$} \big\}.
$$
If the reduced coset-type of $\mf{m}$ is $\mu$, then
$|\mcal{S}(\mf{m})|=|\mu|+\ell(\mu)$.
For a real number $x$, we put $\lceil x \rceil= -1n \{n \in \mathbb{Z} \ | \ x \le n \}$.
Given a positive integer $s$, define $s^{\circ}$ by
$$
s^{\circ} = \begin{cases}
s+1 & \text{if $s$ is odd}, \\
s-1 & \text{if $s$ is even}.
\end{cases}
$$
Equivalently, $s^{\circ}$ is the unique integer satisfying
$\{s,s^{\circ}\} \in \mf{m}_{(0)}$. We have $s=t^{\circ}$ if and only if $t=s^{\circ}$.
We use the notations in Example \ref{ex:GraphGamma}.
Given $\mf{m} \in \mcal{M}(2n)$ and integers $1 \le i<j \le 2n$,
the symbol $i \Leftrightarrow j$ stands for $\{i,j\} \in \mf{m}$.
Also, $i \leftrightarrow j$ stands for $j=i^{\circ}$.
A part of a component of the graph $\Gamma(\mf{m})$
$$
i_1 \leftrightarrow i_2 \Leftrightarrow i_3 \leftrightarrow \dots
$$
is called a \emph{piece} of $\Gamma(\mf{m})$.
For example, $1 \leftrightarrow 2 \Leftrightarrow 6 \leftrightarrow 5$ is a piece of $\Gamma(\{\{1,4\},\{2,6\},\{3,5\}\})$.
If $A$ is an empty piece, the piece $ i \Leftrightarrow A \Leftrightarrow j $
stands for the piece $i \Leftrightarrow j$ simply.
\begin{lem}
Given an $\mf{m} \in \mcal{M}(2n)$ and transposition $(s\ t)$,
let $\mf{n}=\mf{L}((s \ t)) \mf{m}$.
Suppose that $\lambda=(\lambda_1,\lambda_2,\dots)$ and
$\mu=(\mu_1,\mu_2,\dots)$ are the reduced coset-types of $\mf{m}$ and $\mf{n}$,
respectively.
Then either $|\mu|= |\lambda|-1$, $|\mu|= |\lambda|+1$, or $\mu=\lambda$ holds.
Furthermore, if $|\mu|=|\lambda|+1$, then
$\mcal{S}(\mf{n}) = \mcal{S}(\mf{m}) \cup \{ \lceil \tfrac{s}{2} \rceil,
\lceil \tfrac{t}{2} \rceil \}$, and
vertices $s,t$ belong to the same component of $\Gamma(\mf{n})$.
\end{lem}
\begin{proof}
First, suppose $\lceil \tfrac{s}{2} \rceil=\lceil \tfrac{t}{2} \rceil$, i.e.,
$t=s^{\circ}$.
There exist (possibly empty) pieces $A, B$ such that
$A \Leftrightarrow s \leftrightarrow t \Leftrightarrow B$ is a piece of $\Gamma(\mf{m})$,
and then $\Gamma(\mf{n})$ has the piece
$A \Leftrightarrow t \leftrightarrow s \Leftrightarrow B$.
Therefore we have $\lambda=\mu$.
From now, we suppose $\lceil \tfrac{s}{2} \rceil\not=\lceil \tfrac{t}{2} \rceil$,
and so $s,s^{\circ},t$, and $t^{\circ}$ are distinct.
Then the following five cases may occur:
\begin{itemize}
\item[(i)] $|\mcal{S}(\mf{m}) \cap \{\lceil \tfrac{s}{2} \rceil, \lceil \tfrac{t}{2} \rceil \}|=0$.
\item[(ii)] $|\mcal{S}(\mf{m}) \cap \{\lceil \tfrac{s}{2} \rceil, \lceil \tfrac{t}{2} \rceil \}|=1$.
\item[(iii)] $\{\lceil \tfrac{s}{2} \rceil, \lceil \tfrac{t}{2} \rceil \} \subset \mcal{S}(\mf{m})$
and $s,t$ belong to different components of $\Gamma(\mf{m})$.
\item[(iv)] $\Gamma(\mf{m})$ has a component of the form
\begin{equation} \label{eq:componentst1}
s \leftrightarrow s^{\circ} \Leftrightarrow A \Leftrightarrow t \leftrightarrow t^{\circ} \Leftrightarrow B \Leftrightarrow s.
\end{equation}
\item[(v)] $\Gamma(\mf{m})$ has a component of the form
\begin{equation} \label{eq:componentst2}
s \leftrightarrow s^{\circ} \Leftrightarrow C \Leftrightarrow t^{\circ} \leftrightarrow t \Leftrightarrow D \Leftrightarrow s.
\end{equation}
\end{itemize}
Here $A, B, C$ and $D$ are possibly empty pieces.
For each case, we shall compare $\Gamma(\mf{n})=\Gamma(\mcal{L}(s \ t) \mf{m})$
with $\Gamma(\mf{m})$.
In the case (i),
the graph $\Gamma(\mf{m})$ has components $s\leftrightarrow s^{\circ} \Leftrightarrow s$ and
$t \leftrightarrow t^{\circ} \Leftrightarrow t$, and
the graph $\Gamma(\mf{n})$ has the new component
$s \leftrightarrow s^{\circ} \Leftrightarrow t \leftrightarrow t^{\circ} \Leftrightarrow s$.
Thus $\mu = \lambda \cup (1)$.
In the case (ii), we may suppose $\mcal{S}(\mf{m}) \cap \{\lceil \tfrac{s}{2} \rceil, \lceil \tfrac{t}{2} \rceil \}=\{ \lceil \tfrac{s}{2} \rceil\}$.
A piece $A \Leftrightarrow s \leftrightarrow s^{\circ} \Leftrightarrow B$ of $\Gamma(\mf{m})$ with some pieces $A,B$
becomes
the piece $A \Leftrightarrow t \leftrightarrow t^{\circ} \Leftrightarrow s \leftrightarrow s^{\circ} \Leftrightarrow B$ of $\Gamma(\mf{n})$.
Therefore $\mu$ has a part equal to $\lambda_j+1$.
In particular, $\mcal{S}(\mf{n}) = \mcal{S}(\mf{m}) \cup \{ \lceil \tfrac{s}{2} \rceil,
\lceil \tfrac{t}{2} \rceil \}$.
In case (iii), $\Gamma(\mf{m})$ has two components of the forms
$s \leftrightarrow s^{\circ} \Leftrightarrow A \Leftrightarrow s$ and
$t \leftrightarrow t^{\circ} \Leftrightarrow B \Leftrightarrow t$,
where $A,B$ are non-empty pieces.
Then $\Gamma(\mf{n})$ has the combined component
$$
s \leftrightarrow s^{\circ} \Leftrightarrow A \Leftrightarrow t \leftrightarrow t^{\circ} \Leftrightarrow B \Leftrightarrow s.
$$
Therefore a certain part $\mu_m$ of $\mu$ equals $\lambda_i+\lambda_j+1$
for some $1 \le i<j \le \ell(\lambda)$.
We also see that $\{\lceil \tfrac{s}{2} \rceil, \lceil \tfrac{t}{2} \rceil \} \subset \mcal{S}(\mf{m}) =\mcal{S}(\mf{n})$.
In case (iv), $\Gamma(\mf{n})$ has divided components
$$
s \leftrightarrow s^{\circ} \Leftrightarrow A \Leftrightarrow s \qquad \text{and} \qquad
t \leftrightarrow t^{\circ} \Leftrightarrow B \Leftrightarrow t.
$$
Therefore there are $\mu_i$ and $\mu_j$ equal to $r-1$ and $\lambda_m-r$
for some $\lambda_m$ and $1 \le r \le \lambda_m$.
In particular, $|\mu|=|\lambda|-1$.
In case (v),
$\Gamma(\mf{n})$ has the component
$$
s \leftrightarrow s^{\circ} \Leftrightarrow C \Leftrightarrow t^{\circ} \leftrightarrow t \Leftrightarrow D^\vee \Leftrightarrow s.
$$
Here, if $D$ is the piece $i_1 \leftrightarrow i_2 \Leftrightarrow \cdots \leftrightarrow i_{2p}$ then
$D^\vee$ is the piece $i_{2p} \leftrightarrow \cdots \Leftrightarrow i_2 \leftrightarrow i_1$.
In this case, $\lambda=\mu$.
For the only cases (i), (ii), and (iii), we have $|\mu|=|\lambda|+1$.
The rest of the claims are already seen.
\end{proof}
\begin{cor} \label{cor:numbertrans}
Let $\mu$ be the reduced coset-type of $\mf{m} \in \mcal{M}(2n)$.
Suppose that there exist $p$ transpositions $(s_1 \ t_1), \dots, (s_p \ t_p)$
satisfying
$\mf{L}( (s_p\ t_p) \cdots (s_1 \ t_1) ) \mf{m}_{(0)} = \mf{m}$.
Then $|\mu| \le p$.
\end{cor}
\begin{cor} \label{cor:BestTrans}
Let $\mu \vdash p$ and let $\mf{m} \in \mcal{M}(2n)$ be of reduced coset-type $\mu$.
Suppose that there exist $p$ transpositions $(s_1 \ t_1), \dots, (s_p \ t_p)$
satisfying
$\mf{L}( (s_p \ t_p) \cdots (s_1 \ t_1) ) \mf{m}_{(0)} = \mf{m}$.
Then $\mcal{S}(\mf{m})=\{
\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_p}{2} \rceil, \lceil \tfrac{t_p}{2} \rceil \}$.
Furthermore, for each $i$, the vertices $s_i,t_i$ belong to the same component of
$\Gamma(\mf{m})$.
\end{cor}
Since $m_\lambda(J_1,J_3,\dots,J_{2n-1})$ is
a sum of products of $|\lambda|$ transpositions,
part 3 of Theorem \ref{thm:coefM} follows from Corollary \ref{cor:numbertrans}
together with Lemma \ref{lem:NumberMdef}.
\subsection{Proof of Theorem \ref{thm:coefG}}
Recall that quantities $F^k_\mu(n)$ and $G^k_\mu(n)$ are
defined by \eqref{eq:defF} and \eqref{eq:defG}, respectively.
Let $\mf{m}, \mf{n} \in \mcal{M}(2n)$ and
suppose $\mcal{S}(\mf{m}) \cap \mcal{S}(\mf{n}) =\emptyset$.
Denote by $\tilde{\mf{m}}$ the perfect matching on
$\bigsqcup_{i \in \mcal{S}(\mf{m})} \{2i-1,2i\}$ obtained as
the union of
non-trivial blocks of $\mf{m}$.
Clearly, $\mcal{S}(\mf{m})=\mcal{S}(\tilde{\mf{m}})$.
We define the new perfect matching $\mf{m} \cup \mf{n} \in \mcal{M}(2n)$ by
$$
\mf{m} \cup \mf{n} = \tilde{\mf{m}} \sqcup \tilde{\mf{n}} \sqcup
\{\{ 2i-1,2i \} \ | \ i \not\in \mcal{S}(\mf{m}) \sqcup \mcal{S}(\mf{n}) \}.
$$
If $\lambda$ and $\mu$ is the reduced coset-type of $\mf{m}$ and $\mf{n}$, respectively,
then the reduced coset-type of $\mf{m} \cup \mf{n}$ is $\lambda \cup \mu$.
\begin{example}
For
\begin{align*}
\mf{m}=&\{ \{1,5\},\{3,4\},\{2,6\}, \{7,8\},\{9,10\},\{11,12\},\dots,\{2n-1,2n\}\}, \\
\mf{n}=&\{\{1,2\},\{3,4\},\{5,6\}, \{7,10\},\{8,9\},\{11,12\},\dots,\{2n-1,2n\}\},
\end{align*}
we have
$$
\mf{m} \cup \mf{n}= \{\{1,5\},\{2,6\},\{3,4\},\{7,10\},\{8,9\},\{11,12\},\dots,\{2n-1,2n\} \}.
$$
The reduced coset-types of $\mf{m}$, $\mf{n}$, and $\mf{m} \cup \mf{n}$
are $(1)$, $(1)$, and $(1,1)$, respectively.
\end{example}
\begin{lem} \label{lem:PMdecomCoe}
Let $\mf{n}^{(1)}, \mf{n}^{(2)}\in \mcal{M}(2n)$
such that $k<l$ for all $k \in \mcal{S}(\mf{n}^{(1)})$ and
$l \in \mcal{S}(\mf{n}^{(2)})$.
Suppose that the reduced coset-types of $\mf{n}^{(i)}$ have
sizes $r_i$ \ ($i=1,2$).
Also, there exist $r$ transpositions $(s_1 \ t_1),\dots, (s_r \ t_r)$
satisfying
$\mf{L}((s_r \ t_r) \cdots (s_1 \ t_1)) \mf{m}_{(0)} =\mf{n}^{(1)} \cup \mf{n}^{(2)}$,
where $r=r_1+r_2, s_i<t_i \ (1 \le i \le r)$, and
$t_r \ge \cdots \ge t_1$.
Then
$$
\mf{n}^{(1)}= \mf{L}( (s_{r_1} \ t_{r_1}) \cdots (s_1 \ t_1)) \mf{m}_{(0)},
\qquad \mf{n}^{(2)}= \mf{L}( (s_{r} \ t_{r}) \cdots (s_{r_1+1} \ t_{r_1+1})) \mf{m}_{(0)}
$$
and
$$
\mcal{S}(\mf{n}^{(1)}) = \{
\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_{r_1}}{2} \rceil, \lceil \tfrac{t_{r_1}}{2} \rceil \},
\qquad
\mcal{S}(\mf{n}^{(2)})=\{
\lceil \tfrac{s_{r_1+1}}{2} \rceil, \lceil \tfrac{t_{r_1+1}}{2} \rceil,
\dots, \lceil \tfrac{s_r}{2} \rceil, \lceil \tfrac{t_r}{2} \rceil \}.
$$
\end{lem}
\begin{proof}
By Corollary \ref{cor:BestTrans}, we see
$\mcal{S}(\mf{n}^{(1)}) \sqcup \mcal{S}(\mf{n}^{(2)}) =
\mcal{S}(\mf{n}^{(1)} \cup \mf{n}^{(2)})=\{
\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_r}{2} \rceil, \lceil \tfrac{t_r}{2} \rceil \}$.
Since $t_i$ are not decreasing, there exists an integer $p$ such that
$\{\lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{t_p}{2} \rceil \} \subset \mcal{S}(\mf{n}^{(1)})$
and $\{\lceil \tfrac{t_{p+1}}{2} \rceil,
\dots, \lceil \tfrac{t_r}{2} \rceil \} \subset \mcal{S}(\mf{n}^{(2)})$.
Furthermore, applying Corollary \ref{cor:BestTrans} again,
we see that $s_i,t_i$ belong to the same component of
$\Gamma(\mf{n}^{(1)} \cup \mf{n}^{(2)})$,
and so that $\mcal{S}(\mf{n}^{(1)})=\{
\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_p}{2} \rceil, \lceil \tfrac{t_p}{2} \rceil \}$
and
$\mcal{S}(\mf{n}^{(2)})=\{
\lceil \tfrac{s_{p+1}}{2} \rceil, \lceil \tfrac{t_{p+1}}{2} \rceil,
\dots, \lceil \tfrac{s_r}{2} \rceil, \lceil \tfrac{t_r}{2} \rceil \}$.
Put $\rho^{(1)}= (s_p \ t_p) \cdots (s_1 \ t_1)$ and
$\rho^{(2)}= (s_r \ t_r) \cdots (s_{p+1} \ t_{p+1})$.
Since $\{s_1,t_1,\dots,s_p,t_p\} \cap \{s_{p+1},t_{p+1},\dots,s_r,t_r\}=\emptyset$,
we have
$\mf{n}^{(1)} \cup \mf{n}^{(2)} =\mcal{L}(\rho^{(2)} \rho^{(1)})\mf{m}_{(0)} =
\mcal{L}(\rho^{(1)})\mf{m}_{(0)}
\cup \mcal{L}(\rho^{(2)})\mf{m}_{(0)}$
and
\begin{align*}
\mcal{S}(\mf{L}(\rho^{(1)})\mf{m}_{(0)}) =& \{
\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_p}{2} \rceil, \lceil \tfrac{t_p}{2} \rceil \}
=\mcal{S}(\mf{n}^{(1)}), \\
\mcal{S}(\mf{L}(\rho^{(2)})\mf{m}_{(0)}) =& \{
\lceil \tfrac{s_{p+1}}{2} \rceil, \lceil \tfrac{t_{p+1}}{2} \rceil,
\dots, \lceil \tfrac{s_r}{2} \rceil, \lceil \tfrac{t_r}{2} \rceil \}
=\mcal{S}(\mf{n}^{(2)}).
\end{align*}
Hence $\mf{n}^{(i)}= \mf{L}(\rho^{(i)})\mf{m}_{(0)}$ \ $(i=1,2)$.
In particular, the size of the reduced coset-type of $\mf{L}(\rho^{(i)})\mf{m}_{(0)}$
is $r_i$ \ $(i=1,2)$.
On the other hand, Corollary \ref{cor:numbertrans} and the definition of $\rho^{(i)}$ imply that
$r_1 \le p$ and $r_2 \le r-p$.
But $r=r_1+r_2$ so that $p=r_1$.
\end{proof}
Given a positive integer $k$ and a perfect matching $\mf{l} \in \mcal{M}(2n)$,
we define $\mcal{B}_n(k,\mf{l})$ by
the set of all sequences $(s_1,t_1,s_2,t_2,\dots,s_k,t_k)$ of positive integers,
satisfying
following conditions.
\begin{itemize}
\item All of $t_i$ are odd and $3 \le t_1 \le \cdots \le t_{k} \le 2n-1$;
\item $s_i <t_i$ for all $i$;
\item $\mf{L}((s_k \ t_k) \cdots (s_1 \ t_1))\mf{m}_{(0)}= \mf{l}$.
\end{itemize}
Remark that the set $\mcal{B}_n(k,\mf{m}_\mu)$ coincides with the union
$\bigsqcup_{\lambda \vdash k} B_n(\lambda,\mu)$,
where $B_n(\lambda,\mu)$ was defined in Subsection \ref{subsec:Proof2}.
Also we define the set $\mcal{B}(k,\mf{l})$
as the subset of $\mcal{B}_n(k,\mf{l})$ which consists of
sequences satisfying
$$
\{\lceil \tfrac{t_1}{2} \rceil, \dots, \lceil \tfrac{t_k}{2} \rceil \} \subset \mcal{S}(\mf{l})
\qquad \text{and} \qquad t_k= 2 a-1,
$$
where $a$ is the maximum of $\mcal{S}(\mf{l})$.
\begin{lem} \label{lem:coeGn}
Let $\mu \vdash k$ and let $\mf{l}$ be a perfect matching of reduced coset-type $\mu$.
Then $\mcal{B}_n(k,\mf{l}) = \mcal{B}(k,\mf{l})$ and
$G^k_\mu(n)= |\mcal{B}(k,\mf{l})|$.
In particular, both $\mcal{B}_n(|\mu|,\mf{l})$ and $G^k_\mu(n)$ are independent of $n$.
\end{lem}
\begin{proof}
Let $(s_1,t_1,\dots,s_k,t_k)$ be an element in $\mcal{B}_n(k,\mf{l})$.
Then by Corollary \ref{cor:BestTrans}, we have
$$
\{\lceil \tfrac{s_1}{2} \rceil, \lceil \tfrac{t_1}{2} \rceil,
\dots, \lceil \tfrac{s_k}{2} \rceil, \lceil \tfrac{t_k}{2} \rceil
\}=\mcal{S}(\mf{l}).
$$
Therefore $t_k=2 a-1$ with $a= \max \mcal{S}(\mf{l})$.
Hence $(s_1,t_1,\dots,s_k,t_k) \in \mcal{B}(k,\mf{l})$, and so
$\mcal{B}(k,\mf{l})=\mcal{B}_n(k,\mf{l})$.
Also we have $G^k_\mu(n)=|\mcal{B}_n(k,\mf{l})|$
from Lemma \ref{lem:NumberMdef}.
\end{proof}
\begin{lem} \label{lem:Gdecomp}
Let $\mu \vdash k$.
Then
$G^k_\mu(n)= \prod_{i=1}^{\ell(\mu)} G^{\mu_i}_{(\mu_i)}(n)$.
\end{lem}
\begin{proof}
We prove by induction on $\ell(\mu)=l$.
If $l=1$, then our claim is trivial. Assume $l>1$.
The perfect matching $\mf{m}_\mu$ may be uniquely expressed as $\mf{m}_\mu=\mf{m}_{\nu}
\cup \mf{n}$,
where $\nu=(\mu_1,\mu_2,\dots,\mu_{l-1})$, and
$\mf{n}$ is the perfect matching such that
$\mcal{S}(\mf{n})=\{\mu_1+\cdots+\mu_{l-1}+l+j \ | \ 0 \le j \le \mu_l\}$.
Let $(s_1,t_1,\dots,s_k,t_k)$ be a sequence in $\mcal{B}(k,\mf{m}_\mu)$.
By Lemma \ref{lem:PMdecomCoe},
this sequence satisfies
$$
\mf{L}( (s_{k-\mu_l} \ t_{k-\mu_l}) \dots (s_1 \ t_1)) = \mf{m}_{\nu}, \qquad
\mf{L}( (s_{k} \ t_{k}) \dots (s_{k-\mu_l+1} \ t_{k-\mu_l+1})) = \mf{n},
$$
and
\begin{gather*}
3 \le t_1 \le \cdots \le t_{k-\mu_l} = 2(\mu_1+\cdots+\mu_{l-1}+l-1)-1, \\
2(\mu_1+\cdots+\mu_{l-1}+l)+1 \le t_{k-\mu_l+1} \le \cdots \le t_k = 2(k+l)-1.
\end{gather*}
Therefore $(s_1,t_1,\dots,s_{k-\mu_l},t_{k-\mu_l})$ belongs to $\mcal{B}(k-\mu_l,\mf{m}_\nu)$, and
$(s_{k-\mu_l+1}, t_{k-\mu_l+1}, \dots, t_{k})$
belongs to $\mcal{B}(\mu_l,\mf{n})$.
This gives a bijection between
$\mcal{B}(k,\mf{m}_\mu)$ and $\mcal{B}(k-\mu_l,\mf{m}_\nu) \times \mcal{B} (\mu_l,\mf{n})$.
Hence the claim follows from Lemma \ref{lem:coeGn} and the assumption of the induction.
\end{proof}
\begin{lem}
$G^{k}_{(k)}(n)= \mr{Cat}_k$.
\end{lem}
\begin{proof}
We prove by induction on $k$.
Assume that for any $0 \le q <k$ it holds that $G^q_{(q)}(n)=\mr{Cat}_q$.
Let $(s_1,t_1,\dots,s_k,t_k)$ be an element of $\mcal{B}(k,\mf{m}_{(k)})$.
Then $t_k=2k+1$.
Put $p=s_k$ and $\mf{n}=\mf{L} ((s_{k-1} \ t_{k-1}) \cdots (s_1 \ t_1)) \mf{m}_{(0)}$.
Note that $\mf{n}= \mf{L}( (p \ 2k+1))\mf{m}_{(k)}$
and that the reduced coset-type of $\mf{n}$
is of size $k-1$.
Suppose $p$ is even, say $p=2q$ with $1 \le q \le k$.
Since $\mf{m}_{(k)}=\{\{1,2k+2\},\{2,3\},\dots,\{2k,2k+1\}\}$,
the graph $\Gamma( \mf{L}( (2q \ 2k+1))\mf{m}_{(k)})$ has
only one non-trivial component
$$
1 \leftrightarrow 2 \Leftrightarrow 3 \leftrightarrow 4 \Leftrightarrow \dots \leftrightarrow 2q \Leftrightarrow 2k \leftrightarrow 2k-1 \Leftrightarrow 2k-2 \leftrightarrow \dots
\leftrightarrow 2q+1 \Leftrightarrow 2k+1 \leftrightarrow 2k+2 \Leftrightarrow 1.
$$
Therefore the reduced coset-type of $\mf{n}$ is $(k)$ but this is contradictory.
Hence $p=s_k$ must be odd.
Write as $s_k=p=2q-1$ with $1 \le q \le k$.
The perfect matching $\mf{n}$ can be expressed as $\mf{n}=\mf{n}_q' \cup \mf{n}_q''$,
where $\mf{n}_q'$ and $\mf{n}_q''$ are perfect matchings in $\mcal{M}(2n)$ such that
\begin{align*}
\widetilde{\mf{n}_q'}=& \{\{1,2k+2\},\{2,3\},\{4,5\},\dots,\{2q-4,2q-3\},\{2q-2,2k+1\}\}, \\
\widetilde{\mf{n}_q''}=& \{\{2q-1,2k\},\{2q,2q+1\}, \{2q+2,2q+3\}, \dots,\{2k-2,2k-1\} \}.
\end{align*}
The reduced coset-type of $\mf{n}$ is either $(q-1,k-q)$ or $(k-q,q-1)$.
Therefore the sequence $(s_1,t_1,\dots, s_{k-1},t_{k-1})$ belongs to $\mcal{B}_n(k-1,\mf{n})$.
Conversely, if $(s_1',t_1',\dots,s_{k-1}',t_{k-1}')$ is an element of
$\mcal{B}_n(k-1,\mf{n}_q' \cup \mf{n}_q'')$,
then $(s_1',t_1',\dots,s_{k-1}',t_{k-1}', 2q-1, 2k+1)$ belongs to $\mcal{B}_n(k,\mf{m})$.
Therefore we have the identity
$$
G_{(k)}^k(n)=|\mcal{B}_n(k,\mf{m})|= \sum_{q=1}^k |\mcal{B}_n(k-1,\mf{n}_q' \cup \mf{n}_q'')|.
$$
It follows from Lemma \ref{lem:coeGn}, Lemma \ref{lem:Gdecomp}, and the induction assumption that
$$
|\mcal{B}_n(k-1,\mf{n}_q' \cup \mf{n}_q'')| =
G^{q-1}_{(q-1)}(n)G^{k-q}_{(k-q)}(n) =
\mr{Cat}_{q-1} \mr{Cat}_{k-q}.
$$
Hence the well known recurrence formula for Catalan numbers gives
$G_{(k)}^k(n)=\sum_{q=1}^k \mr{Cat}_{q-1} \mr{Cat}_{k-q}=\mr{Cat}_k$.
\end{proof}
We have obtained the proof of Theorem \ref{thm:coefG}.
\subsection{Proof of part 4 of Theorem \ref{thm:coefM}}
Let $\mu \vdash k$.
By Theorem \ref{thm:MN2}, part 2 of Theorem \ref{thm:coefM}, and Theorem \ref{thm:coefG},
we see
$$
\prod_{i=1}^{\ell(\mu)}\mr{Cat}_{\mu_i}
=F^k_\mu(n)= \sum_{\lambda \vdash k} L^\lambda_\mu(n) \le
\sum_{\lambda \vdash k} M^\lambda_\mu(n) = G^k_\mu(n) = \prod_{i=1}^{\ell(\mu)}\mr{Cat}_{\mu_i}
$$
so that $M^\lambda_\mu(n)= L^\lambda_\mu(n)$ for all $\lambda \vdash k$.
\section{Weingarten functions for the orthogonal group}
\label{section:WgOrtho}
Fix positive integers $N,n$ and assume $N \ge n$.
We define the Weingarten function for the orthogonal group $O(N)$ by
\begin{equation} \label{eq:WgDefinition}
\mr{Wg}^{O(N)}_{n}= \frac{1}{(2n-1)!!} \sum_{\lambda \vdash n} \frac{f^{2\lambda}}
{\prod_{\square \in \lambda} (N+c'(\square))} \omega^\lambda,
\end{equation}
which is an element of the Hecke algebra $\mcal{H}_n$ of the Gelfand pair $(S_{2n},H_n)$.
Here $f^{2\lambda}$ and $c'(\square)$ were defined in Section \ref{Sec:Preparations}.
As proved in \cite{CM},
this Weingarten function plays an important role in calculations of integrals
of polynomial functions over the orthogonal group $O(N)$.
\begin{prop}[\cite{CM}, see also \cite{CS}] \label{prop:CM}
Suppose $N \ge n$.
Let $g=(g_{ij})_{1 \le i,j \le N}$ be a Haar-distributed random matrix from $O(N)$ and
let $d g$ the normalized Haar measure on $O(N)$.
Given two functions $\bm{i},\bm{j}$ from $\{1,2,\dots,2n\}$ to $\{1,2,\dots, N\}$,
we have
\begin{align*}
& \int_{g \in O(N)} g_{\bm{i}(1) \bm{j}(1)} g_{\bm{i}(2) \bm{j}(2)} \cdots g_{\bm{i}(2n) \bm{j}(2n)} d g \\
=& \sum_{\mf{m},\mf{n} \in \mcal{M}(2n)} \mr{Wg}^{O(N)}_{n}(\mf{m}^{-1} \mf{n})
\prod_{k=1}^n \delta_{\bm{i}(\mf{m}(2k-1)),\bm{i}(\mf{m}(2k))}
\delta_{\bm{j}(\mf{m}(2k-1)),\bm{j}(\mf{m}(2k))}.
\end{align*}
Here we regard $\mcal{M}(2n)$ as a subset of $S_{2n}$.
\end{prop}
As a special case of Proposition \ref{prop:CM}, we obtain an integral expression for $\mr{Wg}^{O(N)}_n(\sigma)$:
$$
\mr{Wg}^{O(N)}_n(\sigma) = \int_{g \in O(N)} g_{1 j_1} g_{1 j_2} g_{2 j_3} g_{2 j_4}
\cdots g_{n j_{2n-1}} g_{n j_{2n}} dg, \qquad
\sigma \in S_{2n},
$$
with
$$
(j_1,j_2,\dots,j_{2n})= \left( \left\lceil \tfrac{\sigma(1)}{2} \right\rceil,
\left\lceil \tfrac{\sigma(2)}{2} \right\rceil, \dots, \left\lceil \tfrac{\sigma(2n)}{2} \right\rceil \right).
$$
\begin{remark}
In Proposition \ref{prop:CM}, we can remove the assumption $N \ge n$.
In fact, when $N <n$, it is enough to replace the range of the sum on \eqref{eq:WgDefinition}
by partitions $\lambda \vdash n$ such that $\ell(\lambda) \le N $.
See \cite{CM} for details.
\end{remark}
Recall that the generating function for complete symmetric polynomials $h_k$ is
$$
\sum_{k=0}^\infty h_k(x_1,x_2,\dots,x_n) u^k = \prod_{i =1}^n \frac{1}{1-x_i u}.
$$
\begin{thm} \label{thm:OrthoWgJM}
Suppose $N \ge 2n-1$. Then
$$
\mr{Wg}^{O(N)}_{n}= \sum_{k =0}^\infty (-1)^k N^{-n-k} h_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n.
$$
\end{thm}
\begin{proof}
We have
\begin{align*}
\mr{Wg}_n^{O(N)} =& \frac{1}{(2n-1)!!}\sum_{\lambda \vdash n}
f^{2\lambda} \left( \prod_{\square \in \lambda} (N+c'(\square))^{-1} \right) \omega^\lambda \\
=& \frac{1}{(2n-1)!!}\sum_{\lambda \vdash n}
f^{2\lambda} \left( \sum_{k =0}^\infty (-1)^k N^{-n-k} h_k(A_\lambda') \right) \omega^\lambda \\
=& \frac{1}{(2n-1)!!}\sum_{k =0}^\infty (-1)^k N^{-n-k} \sum_{\lambda \vdash n}
f^{2\lambda} h_k(A_\lambda') \omega^\lambda \\
=& \sum_{k =0}^\infty(-1)^k N^{-n-k} h_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n.
\end{align*}
Here the second equality follows because of $|c'(\square)| \le 2n-2 <N$
for all $\square \in \lambda \vdash n$, and the fourth equality follows by Corollary \ref{cor:SPexp}.
\end{proof}
Recall that $G_\mu^k(n)$ are coefficients in
$$
h_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n = \sum_\mu G_\mu^k (n) \psi_\mu(n).
$$
These coefficients appear in the asymptotic expansion of
$\mr{Wg}^{O(N)}_n(\sigma)$ with respect to $\tfrac{1}{N}$.
\begin{thm} \label{thm:ExpansionWg}
Let $\mu$ be a partition and let $N,n,k$ be positive integers. Suppose
$N \ge 2n-1$ and $n \ge |\mu|+\ell(\mu)$.
For any permutation $\sigma$ in $S_{2n}$ of reduced coset-type $\mu$,
we have
\begin{align}
\mr{Wg}^{O(N)}_{n}(\sigma)=&
\sum_{g=0}^\infty (-1)^{|\mu|+g} G_\mu^{|\mu|+g}(n) N^{-n-|\mu|-g} \label{eq:AsymExpWg}\\
=& (-1)^{|\mu|} \prod_{i = 1}^{\ell(\mu)} \mr{Cat}_{\mu_i} \cdot N^{-n-|\mu|}
+ (-1)^{|\mu|+1} \prod_{i = 1}^{\ell(\mu)} G^{|\mu|+1}_\mu(n) \cdot N^{-n-|\mu|-1}
+\cdots.
\end{align}
\end{thm}
\begin{proof}
Theorem \ref{thm:OrthoWgJM} and the definition of $G^k_\mu(n)$ imply
$$
\mr{Wg}^{O(N)}_{n}(\sigma)= \sum_{k=0}^\infty (-1)^k G_\mu^k(n) N^{-n-k}.
$$
It follows from Theorem \ref{thm:coefM} and Theorem \ref{thm:coefG} that
$G^{k}_\mu(n)=\sum_{\lambda \vdash k} M^\lambda_\mu(n)$ is zero unless $k \ge |\mu|$ and
that $G^{|\mu|}_{\mu}(n)=\prod_{i=1}^{\ell(\mu)} \mr{Cat}_{\mu_i}$.
\end{proof}
The unitary group version of results in this section is seen in \cite{MN}.
Collins and \'{S}niady \cite{CS} obtained
$$
\mr{Wg}^{O(N)}_{n}(\sigma)= (-1)^{|\mu|} \prod_{i \ge 1} \mr{Cat}_{\mu_i} \cdot N^{-n-|\mu|} +
\mr{O} (N^{-n-|\mu|-1}), \qquad N \to \infty,
$$
where $\sigma$ is a permutation in $S_{2n}$ of reduced coset-type $\mu$.
Our result is a refinement of their one.
We will observe the subleading coefficient $G^{|\mu|+1}_\mu(n)$ later,
see Subsection \ref{subsec:Open}.
\section{Jack deformations} \label{sec:JackDeform}
A purpose in this section is to intertwine $M^\lambda_\mu(n)$ with
$L^\lambda_\mu(n)$.
They have been defined via symmetric functions in Jucys-Murphy elements.
We define their $\alpha$-extension based on the theory of Jack polynomials.
\subsection{Jack-Plancherel measures}
Let $\alpha>0$ be a positive real number.
For each $\lambda \vdash n$, we put
$$
j_{\lambda}^{(\alpha)}= \prod_{(i,j) \in \lambda} \left\{(\alpha(\lambda_i-j) + \lambda_j'-i +1)
(\alpha(\lambda_i-j) + \lambda_j'-i +\alpha) \right\},
$$
where $\lambda'=(\lambda_1',\lambda_2',\dots)$ is the conjugate partition of $\lambda$.
Here the Young diagram of $\lambda'$ is, by definition,
the transpose of the Young diagram $\lambda$.
Define
\begin{equation}
\mathbb{P}_n^{(\alpha)} (\lambda)= \frac{\alpha^n n!}{j_\lambda^{(\alpha)}}.
\end{equation}
This gives a probability measure on partitions of $n$ and is called the {\it Jack-Plancherel measure} or \emph{Jack measure} shortly.
When $\alpha=1$,
\begin{equation}
\mathbb{P}_n^{(1)} (\lambda)= \frac{n!}{(H_\lambda)^2} = \frac{(f^\lambda)^2}{n!},
\end{equation}
where $H_\lambda= \sqrt{j_\lambda^{(1)}}$ is the product of hook-lengths of $\lambda$,
and the well-known hook-length formula gives $f^\lambda = \frac{n!}{H_\lambda}$.
The probability measure $\mathbb{P}^{(1)}_n$ is known as the {\it Plancherel measure}
for the symmetric group $S_n$.
Also, it is easy to see that
$$
\mathbb{P}_n^{(2)} (\lambda)= \frac{f^{2\lambda}}{(2n-1)!!}, \qquad
\mathbb{P}^{(1/2)}_n(\lambda)= \frac{f^{\lambda \cup \lambda}}{(2n-1)!!}.
$$
\begin{example}
$$
\mathbb{P}_3^{(\alpha)} ((3))=\frac{1}{(1+\alpha)(1+2\alpha)}, \quad
\mathbb{P}_3^{(\alpha)} ((2,1))=\frac{6\alpha}{(2+\alpha)(1+2\alpha)}, \quad
\mathbb{P}_3^{(\alpha)} ((1^3))=\frac{\alpha^2}{(1+\alpha)(2+\alpha)}.
$$
\end{example}
The Jack-Plancherel measure has the duality relation:
$$
\mathbb{P}_n^{(\alpha)}(\lambda) = \mathbb{P}_n^{(\alpha^{-1})}(\lambda'),
$$
which follows from $j_\lambda^{(\alpha)}= \alpha^{2|\lambda|} j_{\lambda'}^{(\alpha^{-1})}$.
Some asymptotic properties of random variables $\lambda_1,\lambda_2,\dots$
with repect to Jack-Plancherel measures in $n \to \infty$ are studied,
see \cite{Mat} and its references.
\subsection{Jack symmetric functions}
Recall the fundamental properties for Jack symmetric functions $J^{(\alpha)}_\lambda$.
The details are seen in \cite[VI-10]{Mac}.
Consider a scalar product on
the algebra $\mathbb{S}$ of symmetric functions given by
$$
\langle p_\lambda, p_\mu \rangle_\alpha = \delta_{\lambda, \mu} \alpha^{\ell(\lambda)}
z_\lambda
$$
where $z_\lambda$ is defined in \eqref{eq:zlambda}.
The Jack functions $\{J_\lambda^{(\alpha)} \ | \ \text{$\lambda$: partitions} \}$ are
the unique family satisfying the following two conditions:
\begin{itemize}
\item They are of the form
$J_\lambda^{(\alpha)} = \sum_{\mu \le \lambda} u_{\lambda \mu}^{(\alpha)} m_\mu$,
where each coefficient $u^{(\alpha)}_{\lambda \mu}$ is a rational function in $\alpha$,
and where $\mu \le \lambda$ stands for
the dominance ordering: $|\mu|=|\lambda|$ and $\mu_1+\cdots+\mu_i \le \lambda_1+\cdots+\lambda_i$
for any $i \ge 1$.
\item (orthogonality) $\langle J_\lambda^{(\alpha)}, J^{(\alpha)}_\mu \rangle_\alpha= \delta_{\lambda,\mu}
j_\lambda^{(\alpha)}$ for any $\lambda,\mu$.
\end{itemize}
We note $J^{(1)}_\lambda=H_\lambda s_\lambda$ and $J^{(2)}_\lambda=Z_\lambda$,
where $s_\lambda$ is a Schur function and $Z_\lambda$ is a zonal polynomial.
Let $\theta^\lambda_\rho(\alpha)$ be the coefficient of $p_\rho$ in $J_\lambda^{(\alpha)}$:
$$
J_\lambda^{(\alpha)}=\sum_{\rho :|\rho|=|\lambda|} \theta^\lambda_\rho(\alpha) p_\rho.
$$
By orthogonality relations for Jack and power-sum functions,
we have its dual identity
\begin{equation} \label{eq:PowerSumJack}
p_\rho= \alpha^{\ell(\rho)} z_\rho \sum_{\lambda: |\lambda|=|\rho|} \frac{\theta^\lambda_\rho(\alpha)}{j_\lambda^{(\alpha)}} J^{(\alpha)}_\lambda
\end{equation}
and the orthogonality relation for $\theta^\lambda_\rho(\alpha)$
\begin{equation} \label{eq:OrthoTheta}
\sum_{\lambda \vdash n} \theta^\lambda_\rho(\alpha) \theta^\lambda_\pi (\alpha) \mathbb{P}_n^{(\alpha)}(\lambda)=
\delta_{\rho \pi} \frac{\alpha^{n-\ell(\rho)} n!}{z_\rho}.
\end{equation}
We set $\theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha)=0$ unless $|\mu|+\ell(\mu) \le n$.
Note $\theta^{\lambda}_{(1^{|\lambda|})}(\alpha)=1$.
Let $X$ be an indeterminate.
Let $\epsilon_X$ be the algebra homomorphism from
$\mathbb{S}$ to $\mathbb{C}[X]$,
defined by
$\epsilon_X(p_r)=X$ for all $r \ge 1$.
Then we have (\cite[VI (10.25)]{Mac})
\begin{equation} \label{eq:JackSpecial}
\epsilon_X (J_\lambda^{(\alpha)}) = \prod_{(i,j) \in \lambda} (X+\alpha(j-1)-(i-1)).
\end{equation}
\subsection{Jack-Plancherel averages}
Let $A_\lambda^{(\alpha)}$ be the alphabet
$$
A_\lambda^{(\alpha)} = \{ (j-1)-(i-1)/\alpha \ | \ (i,j) \in \lambda \}.
$$
For example, $A_{(2,2)}^{(\alpha)}=\{1, 0,-1/\alpha,1-1/\alpha\}$.
Note that $A_\lambda=A^{(1)}_\lambda$ and $A_\lambda'=\{2 z \ | \ z \in A^{(2)}_\lambda\}$,
which are defined in Subsection \ref{subsec:partitions}.
Given a symmetric function $F$, we define
$$
\mathcal{A}_0^{(\alpha)}(F,n)=
\alpha^{\deg F}\sum_{\lambda \vdash n} F(A_\lambda^{(\alpha)}) \mathbb{P}^{(\alpha)}_n(\lambda).
$$
More generally, for a partition $\mu$, we define
$$
\mathcal{A}_\mu^{(\alpha)}(F,n)=
\frac{\alpha^{\deg F-|\mu|}z_{\mu+(1^{n-|\mu|})}}{ n!}\sum_{\lambda \vdash n}
F(A_\lambda^{(\alpha)}) \mathbb{P}^{(\alpha)}_n(\lambda) \theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha).
$$
Note that $\mathcal{A}_0^{(\alpha)}(F,n)= \mathcal{A}_{\mu}^{(\alpha)}(F,n)$ with $\mu=(0)$.
If $F$ is homogeneous, then $F(A_\lambda^{(\alpha)})= (-\alpha)^{-\deg F}
F(A_{\lambda'}^{(\alpha^{-1})})$.
The $\theta^\lambda_{\mu+(1^{|\lambda|-|\mu|})}(\alpha)$ has the duality
$\theta^\lambda_{\mu+(1^{|\lambda|-|\mu|})}(\alpha)= (-\alpha)^{|\mu|}
\theta^{\lambda'}_{\mu+(1^{|\lambda|-|\mu|})}(\alpha^{-1})$ (\cite[VI (10.30)]{Mac}).
Hence we have the duality relation for $\mathcal{A}_\mu^{(\alpha)}(F,n)$ with a homogeneous symmetric
function $F$:
\begin{equation} \label{eq:dualityAValpha}
\mathcal{A}_\mu^{(\alpha)}(F,n)= (-\alpha)^{\deg F-|\mu|} \mathcal{A}_\mu^{(\alpha^{-1})}(F,n).
\end{equation}
The following two examples give the connection to Jucys-Murphy elements.
The average $\mathcal{A}_\mu^{(\alpha)}(F,n)$ with $\alpha>0$ is
a generalization of coefficients
$L^\lambda_\mu(n), M^\lambda_\mu(n), F^k_\mu(n)$, and $G^k_\mu(n)$,
which are studied in \cite{MN} and in the first half of the present paper.
\begin{example}[$\alpha=1$] \label{ex:alpha1AV}
From $J^{(1)}_\lambda=H_\lambda s_\lambda$ and from
the Frobenius formula $s_\lambda= \sum_\rho z_\rho^{-1} \chi^\lambda_\rho p_\rho$,
we have
$\theta^\lambda_\rho(1)= z_\rho^{-1} H_{\lambda} \chi^\lambda_\rho$,
and hence
$$
\mathcal{A}_\mu^{(1)}(F,n)= \sum_{\lambda \vdash n} F(A_\lambda^{(1)}) \mathbb{P}^{(1)}_n(\lambda)
\frac{H_\lambda \chi^\lambda_{\mu+(1^{n-|\mu|})}(\alpha)}{ n!} =
\sum_{\lambda \vdash n} F(A_\lambda) \frac{f^\lambda \chi^\lambda_{\mu+(1^{n-|\mu|})}}{n!}.
$$
In particular, we have
$\mathcal{A}_0^{(1)}(F,n)=
\sum_{\lambda \vdash n} F(A_\lambda^{(1)}) \mathbb{P}^{(1)}_n(\lambda)$,
the average of $F(A_\lambda)$ with respect to the Plancherel measure $\mathbb{P}_n^{(1)}$.
By results in \cite{MN} (see also Subsection \ref{subsec:ClassExp} in the present paper),
we obtain the identity
\begin{equation}
F(J_1,J_2,\dots,J_n) = \sum_\mu \mathcal{A}_\mu^{(1)}(F,n) \mf{c}_\mu(n).
\end{equation}
In particular, $L^\lambda_\mu(n)=\mathcal{A}_\mu^{(1)}(m_\lambda,n)$ and
$F^k_\mu(n)=\mathcal{A}_\mu^{(1)}(h_k,n)$.
\end{example}
\begin{example}[$\alpha=2$] \label{ex:alpha2AV}
Since $J^{(2)}_\lambda=Z_\lambda$, or since $\theta^\lambda_\rho(2)= 2^{|\rho|-\ell(\rho)} |\rho|! z_{\rho}^{-1} \omega^\lambda_\rho$, we have
$$
\mathcal{A}_\mu^{(2)}(F,n)=2^{\deg F}\sum_{\lambda \vdash n} F(A_\lambda^{(2)}) \mathbb{P}^{(2)}_n(\lambda)
\omega^\lambda_{\mu+(1^{n-|\mu|})} =
\sum_{\lambda \vdash n} F(A_\lambda') \frac{f^{2\lambda}\omega^\lambda_{\mu+(1^{n-|\mu|})}}{(2n-1)!!}.
$$
Now the equation \eqref{eq:Msum} implies
$M^\lambda_\mu(n)=\mathcal{A}_\mu^{(2)}(m_\lambda,n)$ and
$G^k_\mu(n)=\mathcal{A}_\mu^{(2)}(h_k,n)$.
More generally,
\begin{equation}
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n = \sum_\mu \mathcal{A}_\mu^{(2)}(F,n) \psi_\mu(n).
\end{equation}
\end{example}
\subsection{The $\alpha=1/2$ case}
We construct the $\alpha=1/2$ version of Example \ref{ex:alpha1AV} and Example \ref{ex:alpha2AV}.
We refer to \cite[VII.2, Example 6,7]{Mac}.
Let $\epsilon$ denote the sign character of $S_{2n}$, and
let $\epsilon_n$ denote its restriction to $H_n$:
$\epsilon_n= \epsilon \downarrow^{S_{2n}}_{H_n}$.
Then $(S_{2n},H_n, \epsilon_n)$ is a twisted Gelfand pair
in the sense of \cite[VII.1, Example 10]{Mac}.
The corresponding Hecke algebra is
$$
\mcal{H}_n^{\epsilon}=\{ f:S_{2n} \to \mathbb{C} \ | \
f(\zeta \sigma)= f(\sigma \zeta) = \epsilon_n(\zeta) f(\sigma) \quad
(\sigma \in S_{2n}, \ \zeta \in H_n)\}.
$$
This algebra is commutative.
For each partition $\lambda \vdash n$,
the \emph{$\epsilon$-spherical function} $\pi^{\lambda}$ is defined by
$$
\pi^{\lambda}= (2^n n!)^{-1}\chi^{\lambda \cup \lambda} \cdot P_n^{\epsilon}
=(2^n n!)^{-1} P_n^{\epsilon} \cdot \chi^{\lambda \cup \lambda},
$$
where $P_n^{\epsilon}=\sum_{\zeta \in H_n} \epsilon_n(\zeta )\zeta$.
For each $f \in \mcal{H}_n$, let $f^{\epsilon}$ be the function on $S_{2n}$
defined by $f^{\epsilon} (\sigma)=\epsilon(\sigma) f(\sigma)$.
Then the map $f \mapsto f^{\epsilon}$ is an isomorphism of $\mcal{H}_n$ to
$\mcal{H}_n^{\epsilon}$.
Under this isomorphism, $P_n$, $\omega^\lambda$, and $\psi_\mu(n)$ are mapped to
$P_n^{\epsilon}$, $\pi^{\lambda'}$, and $\psi_\mu^{\epsilon}(n)=
\sum_{\sigma \in H_{\mu+(1^{n-|\mu|})}} \mathrm{sgn}(\sigma) \sigma$, respectively.
Furthermore, for any homogeneous symmetric function $F$,
the element $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n \in \mcal{H}_n$ is mapped to
$(-1)^{\deg F} F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n^{\epsilon}$,
and we have $F(A_{\lambda}')= (-1)^{\deg F} F(A_{\lambda'}^{(1/2)})$.
Therefore we can obtain the following statements from facts for
$F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n$.
\begin{thm}
\begin{enumerate}
\item For each $0 \le k< n$, we have
$$
e_k(J_1,J_3,\dots,J_{2n-1}) \cdot P_n^{\epsilon} = (-1)^k \sum_{\mu \vdash n}
\psi_\mu^{\epsilon}(n).
$$
\item For any symmetric function $F$ and partition $\lambda$ of $n$,
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot \pi^\lambda= \pi^\lambda \cdot
F(J_1,J_3,\dots,J_{2n-1}) = F(A_{\lambda}^{(1/2)}) \pi^\lambda.
$$
This belongs to $\mcal{H}_n^{\epsilon}$.
\item For any symmetric function $F$,
$$
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n^{\epsilon} = \frac{1}{(2n-1)!!} \sum_{\lambda \vdash n}
f^{\lambda \cup \lambda} F(A^{(1/2)}_\lambda) \pi^\lambda.
$$
Furthermore, if $F$ is homogeneous, then
\begin{align*}
F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n^{\epsilon}
=& (-1)^{\deg F} \sum_{|\mu|+\ell(\mu) \le n} \mathcal{A}_\mu^{(2)}(F,n) \psi_\mu^{\epsilon}(n) \\
=& \sum_{|\mu|+\ell(\mu) \le n} (-1)^{|\mu|}2^{\deg F-|\mu|}
\mathcal{A}_\mu^{(1/2)}(F,n) \psi_\mu^{\epsilon}(n).
\end{align*}
\item For any homogeneous symmetric function $F$,
$$
\mathcal{A}_\mu^{(1/2)}(F,n)= \frac{(-1)^{|\mu|}}{2^{\deg F-|\mu|}}
\sum_{\lambda \vdash n} F(A^{(1/2)}_\lambda)
\frac{f^{\lambda \cup \lambda} \omega^{\lambda'}_{\mu+(1^{n-|\mu|})}}{(2n-1)!!}.
$$
\end{enumerate}
\end{thm}
In a similar way to Section \ref{section:WgOrtho},
we can observe the deep connection between $F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n^{\epsilon}$
and integrals over symplectic groups.
That connection will be seen in the forthcoming paper.
\subsection{Some properties}
\begin{lem} \label{lem:EvaJackContent}
Let $F$ be a symmetric function and $n$ a positive integer.
Assume that there exist complex numbers $\{a(\mu) \ | \ \text{$\mu$ is a partition}\}$
such that
$$
F(A_\lambda^{(\alpha)})= \sum_{\mu} a(\mu) \theta^\lambda_{\mu+(1^{|\lambda|-|\mu|})}(\alpha)
$$
for any partitions $\lambda$.
Then $\mathcal{A}_\mu^{(\alpha)}(F,n)= \alpha^{\deg F} a(\mu)$ for each $\mu$.
\end{lem}
\begin{proof}
We have
$$
\mathcal{A}_\mu^{(\alpha)}(F,n) =
\frac{\alpha^{\deg F- |\mu|}z_{\mu+(1^{n-|\mu|})}}{ n!}
\sum_{\nu} a(\nu)
\sum_{\lambda \vdash n}
\mathbb{P}^{(\alpha)}_n(\lambda) \theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha)
\theta^\lambda_{\nu+(1^{n-|\nu|})}(\alpha).
$$
The claim follows from the orthogonality relation \eqref{eq:OrthoTheta}.
\end{proof}
The following theorem is a Jack deformation of
Jucys' result \cite{Jucys} and its analogue, Corollary \ref{cor1-Jucys}.
\begin{prop} \label{prop:JucysAlpha}
$$
\mathcal{A}^{(\alpha)}_\mu(e_k,n)
=\begin{cases} 1 & \text{if $|\mu|=k$}, \\
0 & \text{otherwise}.
\end{cases}
$$
\end{prop}
\begin{proof}
Let $X$ be an indeterminate and let $\lambda \vdash n$.
It follows from \eqref{eq:JackSpecial} that
\begin{align*}
& \sum_{k=0}^n e_k(A_\lambda^{(\alpha)}) X^k =
(X/\alpha)^n \prod_{(i,j) \in \lambda} (\alpha /X +\alpha(j-1)-(i-1))
= (X/\alpha)^n \epsilon_{\alpha/X} (J_\lambda^{(\alpha)}) \\
=& (X/\alpha)^n \sum_{\rho \vdash n} \theta^\lambda_\rho(\alpha)
\epsilon_{\alpha/X} (p_\rho)
= \sum_{\rho \vdash n} \theta^\lambda_\rho(\alpha) (X/\alpha)^{n-\ell(\rho)}
= \sum_{k=0}^n \alpha^{-k}
\sum_{\nu \vdash k} \theta^\lambda_{\nu+(1^{n-k})}(\alpha) X^{k},
\end{align*}
which gives
\begin{equation} \label{eq:La98}
e_k(A_\lambda^{(\alpha)}) =\alpha^{-k} \sum_{\nu \vdash k}
\theta^\lambda_{\nu+(1^{|\lambda|-k})}(\alpha).
\end{equation}
The claim follows from this identity and Lemma \ref{lem:EvaJackContent}.
\end{proof}
\begin{remark}
The equation \eqref{eq:La98} is seen in \cite[Theorem 5.4]{Lassalle98}.
\end{remark}
\begin{thm} \label{thm:AVpolynomial}
Let $F$ be any symmetric function and $\mu$ a partition.
Then
$\mathcal{A}_\mu^{(\alpha)}(F,n)$ is a polynomial in $n$.
If the expansion of $F$ in $p_\rho$ is given by
$F=\sum_\rho a(\rho) p_\rho$, then
the degree of $\mathcal{A}_\mu^{(\alpha)}(F,n)$ in $n$ is at most
$$
\max_{a(\rho) \not=0} (|\rho|+\ell(\rho)) - (|\mu|+\ell(\mu)).
$$
\end{thm}
This theorem at $\alpha=2$ with Example \ref{ex:alpha2AV} implies
part 1 of Theorem \ref{thm:coefM}.
The proof is given in the next subsection by applying shifted symmetric function theory.
\begin{example}
Since the monomial symmetric function is expanded as
$$
m_\lambda= p_\lambda +\sum_{\rho > \lambda} a(\rho) p_\rho,
$$
the degree of the polynomial $\mathcal{A}^{(\alpha)}_\mu(m_\lambda,n)$ is
at most $|\lambda|+\ell(\lambda) -(|\mu|+\ell(\mu))$.
But this evaluation is not sharp.
Indeed, as we will observe below,
the degree of $\mathcal{A}_{(0)}^{(\alpha)}(m_{(3)},n)=\alpha(\alpha-1) \binom{n}{2}$ is $2$
but $(|\lambda|+\ell(\lambda))-(|\mu|+\ell(\mu))=4$ with $\lambda=(3)$ and $\mu=(0)$.
\end{example}
\subsection{Shifted symmetric functions and proof of Theorem \ref{thm:AVpolynomial}}
Following to \cite{KOO, LassalleSomeIdentities},
we review the theory of shifted symmetric functions related to
Jack functions.
A polynomial in $n$ variables $x_1,x_2,\dots,x_n$ is said to be
\emph{shifted-symmetric} if it is symmetric in the variables
$y_i:=x_i-i/\alpha$.
Denote by $\mathbb{S}^*_\alpha(n)$ the subalgebra of
shifted-symmetric functions in $\mathbb{C}[x_1,x_2,\dots,x_n]$.
Consider an infinite alphabet $x=(x_1,x_2,\dots)$
and consider the morphism $F(x_1,x_2,\dots,x_n,x_{n+1}) \mapsto
F(x_1,x_2,\dots,x_{n},0)$ from $\mathbb{S}^*_\alpha(n+1)$ to $\mathbb{S}^*_\alpha(n)$.
As the definition of $\mathbb{S}$,
we can define the algebra $\mathbb{S}^*_\alpha$ as the
projective limit of the sequence $(\mathbb{S}_\alpha^*(n))_{n \ge 1}$.
Elements of $\mathbb{S}_\alpha^*$ are called \emph{shifted-symmetric functions}
and written as $F(x)=F(x_1,x_2,\dots)$ using infinite variables.
Denote by $\deg F$ the degree of $F$.
For each $F \in \mathbb{S}^*_\alpha$, we may evaluate at
partitions:
$F(\lambda)=F(\lambda_1,\lambda_2,\dots)$.
We denote by $[F] \in \mathbb{S}$ the homogeneous symmetric terms of degree $\deg F$.
We call $[F]$ the \emph{leading symmetric term} of $F$.
The map $F \mapsto [F]$ provides a canonical isomorphism of the graded algebra associated
to the filtered algebra $\mathbb{S}^*_\alpha$ onto $\mathbb{S}$.
Assuming that the leading terms $[F_1], [F_2],\dots$ of a sequence $F_1,F_2,\dots$
in $\mathbb{S}^*_\alpha$ generate the algebra $\mathbb{S}$,
this sequence itself generates $\mathbb{S}^*_\alpha$.
For each integer $k \ge 1$, consider a polynomial
$$
p^*_k(x;\alpha)= \sum_{i \ge 1}
\left( (x_i-(i-1)/\alpha)^{\downarrow k}- (-(i-1)/\alpha)^{\downarrow k} \right)
$$
with $a^{\downarrow k}=a(a-1) \cdots (a-k+1)$.
Then these polynomials are shifted-symmetric.
Since $[p^*_k(\cdot;\alpha)]=p_k$
and since the $p_k$ generate $\mathbb{S}$,
they generate $\mathbb{S}^*_\alpha$.
For $F \in \mathbb{S}$ and a partition $\lambda$,
we put $H_F^{(\alpha)}(\lambda)=F(A_\lambda^{(\alpha)})$.
\begin{lem}[Lemma 7.1 in \cite{LassalleSomeIdentities}]
For any integer $k \ge 1$, the function $\lambda \mapsto H_{p_k}^{(\alpha)}(\lambda)
=p_k(A^{(\alpha)}_\lambda)$
defines a shifted symmetric function of degree $\deg H_{p_k}^{(\alpha)}=k+1$.
Specifically,
$$
H_{p_k}^{(\alpha)}(\lambda)= \sum_{m=1}^k S(k,m) \frac{p^*_{m+1}(\lambda;\alpha)}{m+1}.
$$
Here $S(k,m)$ are Stirling's numbers of second kinds,
defined via $u^k=\sum_{m=1}^k S(k,m) u^{\downarrow m}$.
\end{lem}
Since $p_k$ generate $\mathbb{S}$, we have the following corollary.
\begin{cor} \label{cor:DegreeHF}
For any $F \in \mathbb{S}$, the function $\lambda \mapsto H_F^{(\alpha)}(\lambda)$
defines a shifted symmetric function.
Furthermore, if the expansion of $F$ in $p_\rho$ is given by
$F=\sum_\rho a(\rho) p_\rho$, then the degree of $H_F^{(\alpha)}$ is
$\max_{a(\rho)\not=0} (|\rho|+\ell(\rho))$.
\end{cor}
Now we define \emph{shifted Jack functions} $J_\mu^*(x;\alpha)$.
They are defined by
$$
J^*_\mu(\lambda;\alpha)= \frac{|\lambda|^{\downarrow |\mu|}
\langle p_1^{|\lambda|-|\mu|} J_\mu^{(\alpha)}, J_\lambda^{(\alpha)} \rangle_\alpha}
{\alpha^{|\lambda|} |\lambda|!}.
$$
\begin{lem}[\cite{KOO}] \label{lem:PropertySJack}
The $J^*_\mu(x;\alpha)$ are shifted-symmetric and satisfy the following properties.
\begin{enumerate}
\item $[J^*_\mu(\cdot;\alpha)]=J^{(\alpha)}_\mu$.
Hence the $J^*_\mu(x;\alpha)$ form a basis of $\mathbb{S}^*_\alpha$.
\item $J_\mu^*(\lambda;\alpha)=0$ unless $\mu_i \le \lambda_i$ for all $i \ge 1$.
\item $J_\mu^*(\mu;\alpha)= \alpha^{-|\mu|} j_\mu^{(\alpha)}$.
\end{enumerate}
\end{lem}
The following theorem is a slight extension of Theorem 5.5 in \cite{Olshanski}.
\begin{prop} \label{prop:AVsJack}
Let $\mu,\nu$ be partitions.
If $|\nu| \ge |\mu|+\ell(\mu)$, then we have
$$
\frac{z_{\mu+(1^{n-|\mu|})}}{ n!}\sum_{\lambda \vdash n}
J_\nu^*(\lambda;\alpha) \mathbb{P}^{(\alpha)}_n(\lambda) \theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha)=
\binom{n-|\mu|-\ell(\mu)}{|\nu|-|\mu|-\ell(\mu)} z_{\mu+(1^{|\nu|-|\mu|})}
\theta^\nu_{\mu+(1^{|\nu|-|\mu|})}(\alpha),
$$
which is a polynomial in $n$ of degree $|\nu|-|\mu|-\ell(\mu)$.
Otherwise, both sides are zero.
\end{prop}
\begin{proof}
Put $m=|\nu|$.
If $n <m$, then both sides vanish by part 2 of Lemma \ref{lem:PropertySJack},
and so we may assume $n \ge m$.
We have
\begin{align*}
& \frac{z_{\mu+(1^{n-|\mu|})}}{ n!}\sum_{\lambda \vdash n}
J_\nu^*(\lambda;\alpha) \mathbb{P}^{(\alpha)}_n(\lambda) \theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha) \\
=& \frac{z_{\mu+(1^{n-|\mu|})}}{ n!}\sum_{\lambda \vdash n}
\frac{n^{\downarrow m}
\langle p_1^{n-m} J_\nu^{(\alpha)}, J_\lambda^{(\alpha)} \rangle_\alpha}
{j_\lambda^{(\alpha)} }
\theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha) \\
=& \frac{n^{\downarrow m}}{\alpha^{n-|\mu|} n!}
\left\langle p_1^{n-m} J_\nu^{(\alpha)},
\sum_{\lambda \vdash n}
\frac{\alpha^{n-|\mu|} z_{\mu+(1^{n-|\mu|})}}{j_\lambda^{(\alpha)}}
\theta^\lambda_{\mu+(1^{n-|\mu|})} J_\lambda^{(\alpha)} \right\rangle_\alpha \\
=& \frac{n^{\downarrow m}}{\alpha^{n-|\mu|} n!}
\left\langle p_1^{n-m} J_\nu^{(\alpha)}, p_{\mu+(1^{n-|\mu|})} \right\rangle_\alpha
\qquad \text{by \eqref{eq:PowerSumJack}}.
\end{align*}
Using the fact that the adjoint to the multiplication by $p_1$
with respect to $\langle \cdot, \cdot \rangle_\alpha$ is
$\alpha \frac{\partial}{\partial p_1}$, we have
\begin{equation} \label{eq:Polynomiality1}
=\frac{n^{\downarrow m}}{\alpha^{n-|\mu|} n!}
\left\langle J_\nu^{(\alpha)}, \alpha^{n-m} \left( \frac{\partial}{\partial p_1} \right)^{n-m}
p_{\mu+(1^{n-|\mu|})} \right\rangle_\alpha.
\end{equation}
Since $m_1(\mu+(1^{n-|\mu|}))=n-|\mu|-\ell(\mu)$,
the symmetric function $ \left( \tfrac{\partial}{\partial p_1} \right)^{n-m} p_{\mu+(1^{n-|\mu|})}$
vanishes unless $m \ge |\mu|+\ell(\mu)$.
If $n \ge m \ge |\mu|+\ell(\mu)$, then \eqref{eq:Polynomiality1} equals
\begin{align*}
=& \frac{n^{\downarrow m}}{\alpha^{m-|\mu|} n!} (n-|\mu|-\ell(\mu))^{\downarrow (n-m)}
\langle J_\nu^{(\alpha)}, p_{\mu+(1^{m-|\mu|})} \rangle_\alpha \\
=& \binom{n-|\mu|-\ell(\mu)}{m-|\mu|-\ell(\mu)} z_{\mu+(1^{m-|\mu|})}
\theta^\nu_{\mu+(1^{m-|\mu|})}(\alpha).
\end{align*}
\end{proof}
\begin{remark}
Proposition \ref{prop:AVsJack} can be rewritten as follows:
for partitions $\nu,\mu$ such that $|\nu| \ge |\mu|+\ell(\mu)$ and for any $n \ge 0$,
$$
\sum_{\lambda \vdash n}
J_\nu^*(\lambda;\alpha) \mathbb{P}^{(\alpha)}_n(\lambda) \theta^\lambda_{\mu+(1^{n-|\mu|})}(\alpha)=
n^{\downarrow |\nu|}\theta^\nu_{\mu+(1^{|\nu|-|\mu|})}(\alpha).
$$
In particular, we obtain a simple identity
$$
\sum_{\lambda \vdash n}
J_\nu^*(\lambda;\alpha) \mathbb{P}^{(\alpha)}_n(\lambda)=
n^{\downarrow |\nu|},
$$
which is seen in \cite[Theorem 5.5]{Olshanski}.
Lassalle obtained a similar identity.
Specifically, Equation (3.3) in \cite{Lassalle08} implies that,
for partitions $\nu$ and $\mu$ such that $|\nu| \ge |\mu|+\ell(\mu)$
and for any $m \ge 0$,
$$
\sum_{\rho \vdash m} J^*_\rho(\nu;\alpha)
\mathbb{P}_m^{(\alpha)}(\rho) \theta^\rho_{\mu +(1^{m-|\mu|})}(\alpha)=
\frac{(|\nu|-|\mu|-\ell(\mu))! \, m!}{(|\nu|-m)! \, (m-|\mu|-\ell(\mu))!}
\theta^\nu_{\mu+(1^{|\nu|-|\mu|})}(\alpha).
$$
\end{remark}
\begin{proof}[Proof of Theorem \ref{thm:AVpolynomial}
and part 1 of Theorem \ref{thm:coefM}]
The statement follows from Theorem \ref{cor:DegreeHF}, part 1 of Lemma \ref{lem:PropertySJack},
and Proposition \ref{prop:AVsJack}.
\end{proof}
\section{Examples and open problems}
\subsection{Examples of $\mathcal{A}_\mu^{(\alpha)}(F,n)$}
\label{subsec:Examples}
We give examples of $\mathcal{A}_\mu^{(\alpha)}(m_\lambda,n)$ and
$\mathcal{A}_\mu^{(\alpha)}(h_k,n)$, studied in the previous section.
\noindent
$|\lambda|=0,1$.
$$
\mathcal{A}_{\mu}^{(\alpha)}(m_{(0)},n)= \delta_{\mu,(0)}. \qquad
\mathcal{A}_{\mu}^{(\alpha)}(m_{(1)},n)= \delta_{\mu,(1)}.
$$
\noindent
$|\lambda|=2$.
\begin{align*}
\mathcal{A}_\mu^{(\alpha)}(m_{(2)},n)=&\delta_{\mu,(2)}+(\alpha-1) \delta_{\mu,(1)} +\alpha \binom{n}{2}
\delta_{\mu,(0)}. \\
\mathcal{A}_\mu^{(\alpha)}(m_{(1^2)},n)=&\delta_{\mu,(2)}+\delta_{\mu,(1^2)}. \\
\mathcal{A}_\mu^{(\alpha)}(h_2,n)=&2\delta_{\mu,(2)}+\delta_{\mu,(1^2)}
+(\alpha-1) \delta_{\mu,(1)} +\alpha \binom{n}{2}
\delta_{\mu,(0)}.
\end{align*}
$|\lambda|=3$.
\begin{align*}
\mathcal{A}_\mu^{(\alpha)}(m_{(3)},n)=&
\delta_{\mu,(3)}+
3(\alpha-1)\delta_{\mu,(2)}+
(2\alpha n +\alpha^2-5\alpha+1)\delta_{\mu,(1)}
+\alpha(\alpha-1) \binom{n}{2}\delta_{\mu,(0)}. \\
\mathcal{A}_\mu^{(\alpha)}(m_{(2,1)},n)=&
3\delta_{\mu,(3)}+ \delta_{\mu,(2,1)}
+3(\alpha-1)\delta_{\mu,(2)}+ 2(\alpha-1) \delta_{\mu,(1^2)}
+\alpha \left( \binom{n}{2}-1 \right)\delta_{\mu,(1)}. \\
\mathcal{A}_\mu^{(\alpha)}(m_{(1^3)},n)=&
\delta_{\mu,(3)}+\delta_{\mu,(2,1)}+\delta_{\mu,(1^3)}. \\
\mathcal{A}_\mu^{(\alpha)}(h_3,n)=&
5\delta_{\mu,(3)}+2\delta_{\mu,(2,1)}+\delta_{\mu,(1^3)}+
6(\alpha-1)\delta_{\mu,(2)}+2(\alpha-1)\delta_{\mu,(1^2)} \\
&\qquad +
\left( \frac{1}{2} \alpha n^2 + \frac{3}{2} \alpha n+\alpha^2-6\alpha+1 \right)\delta_{\mu,(1)}
+\alpha(\alpha-1) \binom{n}{2}\delta_{\mu,(0)}.
\end{align*}
In fact,
the identities for $m_{(1^k)}$ are given by Proposition \ref{prop:JucysAlpha}.
Lassalle \cite{LassalleCumulant, LassalleHP} (see also \cite[Conjecture 8.1]{Lassalle98}) gives the expansion of
$\theta^\lambda_{\mu+(1^{|\lambda|-|\mu|})}(\alpha)$ with respect
to $p_\rho(A^{(\alpha)}_\lambda)$:
Letting
$\hat{\theta}_\mu(\lambda)=\theta^\lambda_{\mu+(1^{|\lambda|-|\mu|})}(\alpha)$ and
$\hat{p}_\rho(\lambda)=p_\rho(A^{(\alpha)}_\lambda)$,
\begin{align*}
\hat{\theta}_{(1)}(\lambda)=& \alpha \hat{p}_{(1)}(\lambda), \\
\hat{\theta}_{(2)}(\lambda)=& \alpha^2 \hat{p}_{(2)}(\lambda)
-\alpha(\alpha-1) \hat{p}_{(1)} (\lambda) - \alpha \binom{|\lambda|}{2},\\
\hat{\theta}_{(1^2)}(\lambda)=& -\tfrac{3}{2} \alpha^2 \hat{p}_{(2)}(\lambda)
+\tfrac{1}{2} \alpha^2 \hat{p}_{(1^2)} (\lambda)
+\alpha(\alpha-1) \hat{p}_{(1)}(\lambda) +\alpha \binom{|\lambda|}{2}, \\
\hat{\theta}_{(3)}(\lambda)=&
\alpha^3 \hat{p}_{(3)}(\lambda) -3\alpha^2 (\alpha-1) \hat{p}_{(2)}(\lambda) +
\alpha (-2\alpha |\lambda|+2\alpha^2-\alpha+2) \hat{p}_{(1)}(\lambda)
+2\alpha(\alpha-1) \binom{|\lambda|}{2}, \\
\hat{\theta}_{(2,1)}(\lambda)=&
-4 \alpha^3 \hat{p}_{(3)}(\lambda) +\alpha^3 \hat{p}_{(2,1)}(\lambda)
+9\alpha^2 (\alpha-1) \hat{p}_{(2)} (\lambda)-\alpha^2 (\alpha-1) \hat{p}_{(1^2)}(\lambda) \\
& +\alpha(-\tfrac{\alpha}{2} |\lambda|^2 +\tfrac{13 \alpha}{2}|\lambda|-5\alpha^2+2\alpha-5) \hat{p}_{(1)}(\lambda)
-5\alpha(\alpha-1) \binom{|\lambda|}{2}.
\end{align*}
Using these, we can express $p_\rho(A^{(\alpha)}_\lambda)$
in terms of $\hat{\theta}_\mu(\lambda)$ and therefore also
$m_\nu(A^{(\alpha)}_\lambda)$.
Hence our above examples follow by Lemma \ref{lem:EvaJackContent}.
Those examples are reduced as typical cases
\begin{align*}
L^\lambda_\mu(n)=&\mathcal{A}_\mu^{(1)}(m_\lambda,n), &
F^k_\mu(n)=&\mathcal{A}_\mu^{(1)}(h_k,n), \\
M^\lambda_\mu(n)=&\mathcal{A}_\mu^{(2)}(m_\lambda,n), &
G^k_\mu(n)=&\mathcal{A}_\mu^{(2)}(h_k,n).
\end{align*}
For example, we obtain
$$
\mathcal{A}_\mu^{(2)}(h_3,n)=
5\delta_{\mu,(3)}+2\delta_{\mu,(2,1)}+\delta_{\mu,(1^3)}+
6\delta_{\mu,(2)}+2\delta_{\mu,(1^2)}
(n^2 +3 n-7)\delta_{\mu,(1)}
+n(n-1)\delta_{\mu,(0)},
$$
or, by Example \ref{ex:alpha2AV},
\begin{align*}
h_{3}(J_1,J_3,\dots,J_{2n-1}) \cdot P_n =& 5\psi_{(3)}(n)+2\psi_{(2,1)}(n)+\psi_{(1^3)}(n)+ 6\psi_{(2)}(n) +2 \psi_{(1^2)}(n)& \\
&\qquad +(n^2+3n-7)\psi_{(1)}(n)+n(n-1) \psi_{(0)}(n).
\end{align*}
See also the $\alpha=1$ cases in \cite[\S 8.1]{MN}.
We remark that
a conjecture for $\mathcal{A}_0^{(\alpha)}(p_\lambda,n)$ is given by
\cite[Conjecture 12.1]{Lassalle98}.
\subsection{Table of asymptotic expansions of $\mr{Wg}^{O(N)}_{n}$} \label{subsec:tableAsymWg}
We give some examples of the expansion \eqref{eq:AsymExpWg},
by using coefficients appearing in Subsection \ref{subsec:Examples}.
Given a partition $\mu$,
we define
$\mr{Wg}^{O(N)}(\mu;n)=\mr{Wg}^{O(N)}_n(\sigma)$,
where $\sigma$ is a permutation in $S_{2n}$ of reduced coset-type $\mu$.
For example,
$$
\mr{Wg}^{O(N)}((0);n)=\mr{Wg}^{O(N)}_n(\mr{id}_{2n}), \qquad
\mr{Wg}^{O(N)}((1);n)=\mr{Wg}^{O(N)}_n \left( \left( \begin{smallmatrix}
1 & 2 & 3 & 4 & 5 & 6 & \dots & 2n-1 & 2n \\
1 & 4 & 2 & 3 & 5 & 6 & \dots & 2n-1 & 2n
\end{smallmatrix} \right) \right).
$$
Theorem \ref{thm:ExpansionWg} and examples in the previous subsection with $\alpha=2$
give the following
asymptotic expansions.
As $N \to \infty$,
\begin{align*}
\mr{Wg}^{O(N)}((0);n)=&
N^{-n} +n(n-1)N^{-n-2} -n(n-1)N^{-n-3} + \mr{O}(N^{-n-4}). \\
\mr{Wg}^{O(N)}((1);n)=&
-N^{-n-1} +N^{-n-2} -(n^2+3n-7)N^{-n-3} + \mr{O}(N^{-n-4}). \\
\mr{Wg}^{O(N)}((2);n)=&
2N^{-n-2}-6N^{-n-3}+ \mr{O}(N^{-n-4}).\\
\mr{Wg}^{O(N)}((1^2);n)=&
N^{-n-2}-2 N^{-n-3}+ \mr{O}(N^{-n-4}).
\end{align*}
On the other hand, in \cite{CM},
the explicit values of $\mr{Wg}^{O(N)}_{n}(\sigma)$ for $n \le 6$ are given.
We remark that in \cite{CM} ordinary coset-types were used, not reduced ones.
Using a computer with the table in \cite{CM}, we obtain the following expansions.
\begin{align*}
\mr{Wg}^{O(N)}((0);2)=& N^{-2} - 0 N^{-3}+2 N^{-4}-2 N^{-5}+6 N^{-6} -10 N^{-7} +22 N^{-8} -\cdots. \\
\mr{Wg}^{O(N)}((0);3)=& N^{-3}- 0 N^{-4} +6 N^{-5}-6 N^{-6}+50 N^{-7} -126 N^{-8} +610 N^{-9} -\cdots.\\
\mr{Wg}^{O(N)}((0);4)=& N^{-4}- 0 N^{-5} +12 N^{-6}-12 N^{-7}+176 N^{-8} -468 N^{-9} +3544 N^{-10}
-\cdots.\\
\mr{Wg}^{O(N)}((0);5)=& N^{-5} - 0 N^{-6}+20 N^{-7}-20 N^{-8}+440 N^{-9} -1180 N^{-10} +12480 N^{-11}
-\cdots. \\
\mr{Wg}^{O(N)}((0);6)=& N^{-6}- 0 N^{-7} +30 N^{-8}-30 N^{-9}+910 N^{-10} -2430 N^{-11} +33710 N^{-12}
-\cdots.
\end{align*}
\begin{align*}
\mr{Wg}^{O(N)}((1);2)=& -N^{-3} +N^{-4} -3 N^{-5}+5N^{-6} -11 N^{-7} +21N^{-8}-43N^{-9}+ \cdots.\\
\mr{Wg}^{O(N)}((1);3)=& -N^{-4} +N^{-5} -11 N^{-6}+29N^{-7} -147 N^{-8} +525 N^{-9}
-2227N^{-10}+ \cdots. \\
\mr{Wg}^{O(N)}((1);4)=& -N^{-5} +N^{-6} -21 N^{-7}+57N^{-8} -489 N^{-9} +2157 N^{-10}
-14077N^{-11}+ \cdots. \\
\mr{Wg}^{O(N)}((1);5)=& -N^{-6} +N^{-7} -33 N^{-8}+89N^{-9} -1117 N^{-10} +5237 N^{-11}
-45881N^{-12}+ \cdots.\\
\mr{Wg}^{O(N)}((1);6)=& -N^{-7} +N^{-8} -47 N^{-9}+125N^{-10} -2123 N^{-11} +10121 N^{-12}
-112551N^{-13}+ \cdots.
\end{align*}
\begin{align*}
\mr{Wg}^{O(N)}((2);3)=& 2N^{-5} -6N^{-6} +34 N^{-7}-126N^{-8} +546 N^{-9} -2142N^{-10}+ \cdots. \\
\mr{Wg}^{O(N)}((2);4)=& 2N^{-6} -6N^{-7} +64 N^{-8}-300N^{-9} +2094 N^{-10} -11682N^{-11}+ \cdots.\\
\mr{Wg}^{O(N)}((2);5)=& 2N^{-7} -6N^{-8} +98 N^{-9}-490N^{-10} +4694 N^{-11} -30382N^{-12}+ \cdots.\\
\mr{Wg}^{O(N)}((2);6)=& 2N^{-8} -6N^{-9} +136 N^{-10}-696N^{-11} +8590 N^{-12} -59850N^{-13}+ \cdots.
\end{align*}
\begin{align*}
\mr{Wg}^{O(N)}((1^2);4)=& N^{-6} -2N^{-7} +43 N^{-8}-216N^{-9} +1737 N^{-10} -10254N^{-11}+ \cdots. \\
\mr{Wg}^{O(N)}((1^2);5)=& N^{-7} -2N^{-8} +59 N^{-9}-280N^{-10} +3257 N^{-11} -21934N^{-12}+ \cdots. \\
\mr{Wg}^{O(N)}((1^2);6)=& N^{-8} -2N^{-9} +77 N^{-10}-350N^{-11} +5385 N^{-12} -37498N^{-13}+ \cdots.
\end{align*}
\begin{align*}
\mr{Wg}^{O(N)}((3);4)=& -5N^{-7}+29N^{-8} -258 N^{-9} +1590 N^{-10} -10695 N^{-11} + \cdots. \\
\mr{Wg}^{O(N)}((3);5)=& -5N^{-8}+29N^{-9} -370 N^{-10} +2630 N^{-11} -23815 N^{-12} + \cdots.\\
\mr{Wg}^{O(N)}((3);6)=& -5N^{-9}+29N^{-10}-492 N^{-11} +3738 N^{-12} -42019 N^{-13} + \cdots.
\end{align*}
\begin{align*}
\mr{Wg}^{O(N)}((2,1);5)=& -2N^{-8}+8N^{-9}-190N^{-10}+1460 N^{-11} - 15994 N^{-12}+ \cdots. \\
\mr{Wg}^{O(N)}((2,1);6)=& -2N^{-9}+8N^{-10}-236N^{-11}+1760 N^{-12} - 24254 N^{-13}+ \cdots.
\end{align*}
$$
\mr{Wg}^{O(N)}((1^3);6)= -N^{-9}+3N^{-10}-120N^{-11}+742N^{-12}-13023N^{-13}+\cdots.
$$
\begin{align*}
\mr{Wg}^{O(N)}((4);5)=& 14N^{-9}-130N^{-10}+1640 N^{-11} - 14740 N^{-12}+138578N^{-13}-\cdots. \\
\mr{Wg}^{O(N)}((4);6)=& 14N^{-10}-130N^{-11}+2060 N^{-12} - 20360 N^{-13}+232838N^{-14}-\cdots.
\end{align*}
$$
\mr{Wg}^{O(N)}((3,1);6)= 5N^{-10}-34N^{-11}+862N^{-12}-9096N^{-13}+126523N^{-14}+\cdots.
$$
$$
\mr{Wg}^{O(N)}((2,2);6)= 4N^{-10}-24N^{-11}+772N^{-12}-8436N^{-13}+121936N^{-14}+\cdots.
$$
$$
\mr{Wg}^{O(N)}((5);6)= -42N^{-11}+562N^{-12}-9426N^{-13}+114478N^{-14}-\cdots.
$$
\subsection{Open questions} \label{subsec:Open}
\begin{enumerate}
\item (cf. Corollary \ref{cor:SymPolyHecke}.)
It is known that the set $\{F(J_1,J_2,\dots,J_n) \ | \ F \in \mbb{S} \}$
coincides with the center $\mcal{Z}_n$ of the group algebra $\mathbb{C}[S_n]$.
Thus symmetric functions in Jucys-Murphy elements generate $\mcal{Z}_n$.
Now the following conjecture is natural.
\begin{conj}
The set $\{F(J_1,J_3,\dots,J_{2n-1}) \cdot P_n\ | \ F \in
\mbb{S} \}$ coincides with the Hecke algebra $\mcal{H}_n$.
\end{conj}
\item (cf. part 4 of Theorem \ref{thm:coefM}
and Examples in Subsection \ref{subsec:Examples}.)
We suggest the following conjecture.
\begin{conj}
Let $F$ be a symmetric function of degree $k$ and
let $\alpha$ be a positive real number.
Then, for each partition $\mu \vdash k$,
$\mathcal{A}^{(\alpha)}_\mu(F,n)$ is independent of both $\alpha$ and $n$.
In particular, for $\lambda,\mu \vdash k$,
$\mathcal{A}^{(\alpha)}_\mu(m_\lambda,n)=L^\lambda_\mu$
and $\mathcal{A}^{(\alpha)}_\mu(h_k,n)= \prod_{i=1}^{\ell(\mu)} \mr{Cat}_{\mu_i}$.
\end{conj}
\item (cf. Examples in Subsection \ref{subsec:Examples}.)
We suggest the following conjecture.
\begin{conj}\label{conj:SecondOrder}
Let $F$ be a homogeneous symmetric function of degree $k$ and
let $\alpha$ be a positive real number.
Then, for each partition $\mu \vdash k-1$,
$\mathcal{A}^{(\alpha)}_\mu(F,n)$ is independent of $n$
(but depends on $\alpha$).
\end{conj}
\item (cf. Theorem \ref{thm:ExpansionWg} and Subsection \ref{subsec:tableAsymWg}.)
Conjecture \ref{conj:SecondOrder} implies that $G^{|\mu|+1}_\mu(n)=\mathcal{A}^{(2)}_\mu(h_{|\mu|+1},n)$
is independent of $n$.
Can you evaluate the $G^{|\mu|+1}_\mu=G^{|\mu|+1}_\mu(n)$ explicitly?
From identities in Subsection \ref{subsec:tableAsymWg},
we can obtain
$$
G^{1}_{(0)} =0, \qquad G^{2}_{(1)}=1, \qquad
G^{3}_{(2)} =6, \qquad G^{3}_{(1^2)} =2,
$$
and conjecture
\begin{gather*}
G^{4}_{(3)}\stackrel{?}{=} 29, \qquad
G^{4}_{(2,1)}\stackrel{?}{=} 8,\qquad
G^{4}_{(1^3)}\stackrel{?}{=} 3,\\
G^{5}_{(4)}\stackrel{?}{=} 130, \qquad
G^{5}_{(3,1)}\stackrel{?}{=} 34, \qquad
G^{5}_{(2^2)}\stackrel{?}{=} 24, \qquad
G^6_{(5)} \stackrel{?}{=} 562.
\end{gather*}
Recall that the $n$-independent number $G^{|\mu|}_\mu=G^{|\mu|}_\mu(n)$
is the product of Catalan numbers.
How about $G^{|\mu|+1}_\mu$?
We could expect that $G^{|\mu|+1}_\mu$ has a good combinatorial interpretation.
For one-row partitions, we suggest the following conjecture.
\begin{conj}
Let $n,k$ be nonnegative integers such that $n>k$.
Then $G^{k+1}_{(k)}(n)$ is independent of $n$ and equal to
$$
4^k - \binom{2k+1}{k}.
$$
Equivalently,
$$
\mr{Wg}^{O(N)}((k);n) \stackrel{?}{=}
(-1)^k \mr{Cat}_k N^{-n-k} +(-1)^{k+1} \left( 4^k-\binom{2k+1}{k} \right) N^{-n-k-1} +\mr{O}(N^{-n-k-2}).
$$
\end{conj}
The number $4^k - \binom{2k+1}{k}$ is called
{\it the area of Catalan paths} of length $k$,
see \cite{CEF}.
Define the set $\mf{E}(k)$ by
$$
\mf{E}(k)=\left\{(i_1,i_2,\dots,i_k) \in \mathbb{Z}^k \ \Bigm| \
\begin{array}{l}
1 \le i_1 \le i_2 \le \cdots \le i_k \le k, \\
i_p \ge p \ (1 \le p \le k)
\end{array}
\right\}.
$$
It is known that
$$
\mr{Cat}_k = |\mf{E}(k)|, \qquad 4^k-\binom{2k+1}{4}
= \sum_{(i_1,i_2,\dots,i_k) \in \mf{E}(k)}
\sum_{p=1}^k (2(i_p-p)+1).
$$
\end{enumerate}
\end{document} |
\begin{equation}gin{document}
\title{On Homogeneous Landsberg Surfaces}
\begin{equation}gin{abstract}
In this paper, we prove that every homogeneous Landsberg surface has isotropic flag curvature. Using this special form of the flag curvature, we prove a rigidity result on homogeneous Landsberg surface. Indeed, we prove that every homogeneous Landsberg surface is Riemannian or locally Minkowskian. This gives a positive answer to the Xu-Deng's well-known conjecture in 2-dimensional homogeneous Finsler manifolds.\\\\
{\bf {Keywords}}: Homogeneous Finsler surface, Landsberg metric, Berwald metric, flag curvature.\footnote{ 2000 Mathematics subject Classification: 53B40, 53C60.}
{\epsilon}nd{abstract}
\section{Introduction}
Let $(M, F)$ be a Finsler manifold and $c: [a, b]\rightarrow M$ be a piecewise $C^\infty$ curve from $c(a)=p$ to $c(b)=q$. For every $u\in T_pM$, let us define $P_c:T_pM\rightarrow T_qM$ by $P_c(u):=U(b)$, where $U=U(t)$ is the parallel vector field along $c$ such that
$U(a)=u$. $P_c$ is called the parallel translation along $c$. In \cite{I}, Ichijy\={o} showed that if $F$ is a Berwald metric, then all tangent
spaces $(T_xM, F_x)$ are linearly isometric to each other. Let us consider the Riemannian metric ${\hat g}_x$ on $T_xM_0:=T_xM-\{0\}$ which is defined by
${\hat g}_x:=g_{ij}(x, y)\delta y^i\otimes \delta y^j$, where $g_{ij}:={1}/{2}[F^2]_{y^iy^j}$ is the fundamental tensor of $F$ and $\{\delta y^i:= dy^i+N^i_j dx^j\}$ is the natural coframe on $T_xM$ associated with the natural basis $\{{{\partial}rtial}/{{\partial}rtial x^i}|_x\}$ for $T_xM$. If $F$ is a Landsberg metric, then for any $C^\infty$ curve $c$, $P_c$ preserves the induced Riemannian metrics on the tangent spaces, i.e., $P_c:(T_pM, {\hat g}_p)\rightarrow (T_qM, {\hat g}_q)$ is an isometry. By definition, every Berwald metric is a Landsberg metric, but the converse may not hold.
In 1996, Matsumoto found a list of rigidity results which almost suggest that such a pure Landsberg metric (non-Berwaldian metric) does not exist \cite{Mat96}. In 2003, Matsumoto emphasized this problem again and looked at it as the most important open problem in Finsler geometry.
It is a long-existing open problem in Finsler geometry to find Landsberg metrics which are not Berwaldian. Bao called such metrics unicorns in
Finsler geometry, mythical single-horned horse-like creatures that exist in legend but have never been seen by human beings \cite{Bao2}. There are a lot of unsuccessful attempts
to find explicit examples of unicorns. In \cite{Szabo2}, Szab\'{o} made an argument to prove that any regular Landsberg metric must be of Berwald type. But unfortunately, there is a little gap in Szab\'{o}'s argument. As pointed out in Szab\'{o}'s correction to \cite{Szabo2}, his argument only applies to the so-called dual Landsberg spaces. Hence, the unicorn problem remains open in Finsler geometry. Taking into account of so many unsuccessful efforts of
many researchers, one can conclude that unicorn problem is becoming more and more puzzling.
The unicorn problem in Finsler geometry is well-studied. However, up to now, very little attention has been paid to the subject of homogeneous Finsler metrics. A Finsler manifold $(M, F)$ is said to be homogeneous if its group of isometries acts transitively on $M$. In \cite{TN1}, the authors consider the unicorn problem in the class of homogeneous $(\alpha, \begin{equation}ta)$-metric. We proved that every homogeneous $(\alpha, \begin{equation}ta)$-metric is a stretch metric if and only if it is a Berwald metric. In \cite{XD}, Xu-Deng introduced a generalization of $(\alpha,\begin{equation}ta)$-metrics,
called $(\alpha_1,\alpha_2)$-metrics. Let $(M, \alpha)$ be an $n$-dimensional Riemannian manifold. Then one can define an $\alpha$-orthogonal decomposition of the tangent bundle by $TM=\mathcal{V}_1\oplus\mathcal{V}_2$,
where $\mathcal{V}_1$ and $\mathcal{V}_2$ are two linear subbundles with dimensions $n_1$ and $n_2$ respectively, and
$\alpha_i=\alpha|_{\mathcal{V}_i}$ $i=1,2$ are naturally viewed as functions on $TM$.
An $(\alpha_1,\alpha_2)$-metric on $M$ is a Finsler metric $F$ which can be written as
$F=\sqrt{L(\alpha_1^2,\alpha_2^2)}$. An $(\alpha_1,\alpha_2)$-metric can also be represented as
$F=\alpha\phi(\alpha_2/\alpha)=\alpha\psi(\alpha_1/\alpha)$, in which
$\phi(s)=\psi(\sqrt{1-s^2})$. They proved that evey Landsberg $(\alpha_1,\alpha_2)$-metric reduces to a Berwald metric. This result shows that the finding a unicorn cannot be successful even in the very broad class of $(\alpha_1,\alpha_2)$-metrics. Then, Xu-Deng conjectured the following:
\begin{equation}gin{con}{\rm (\cite{XD})}
A homogeneous Landsberg space must be a Berwald space.
{\epsilon}nd{con}
Taking a look at the rigid theorems in Finsler geometry, one can find that this type of result is different for procedures with dimensions greater than three. For example, in \cite{Sz} Szab\'{o} proved that any connected Berwald surface is locally Minkowskian or Riemannian. In \cite{BCS}, Bao-Chern-Shen proved a rigidity result for compact Landsberg surface. They showed that a compact Landsberg surfaces with non-positive flag curvature is locally Minkowskian or Riemannian. Therefore, we preferred to consider the issue of unicorns for homogeneous Finsler surfaces. In this paper, we prove the following rigidity result.
\begin{equation}gin{thm}\label{MainTHM1}
Any homogeneous Landsberg surface of is Riemannian or locally Minkowskian.
{\epsilon}nd{thm}
This result articulates the hunters of unicorns that they do not looking forward to seeing such a creature in the jungle of homogeneous Finsler surfaces.
In order to prove Theorem \ref{MainTHM1}, we consider the flag curvature of Landsberg surface and prove the following rigidity result.
\begin{equation}gin{thm}\label{MainTHM2}
Every homogeneous Landsberg surface has isotropic flag curvature.
{\epsilon}nd{thm}
\section{Preliminaries}\label{sectionP}
Let $(M, F)$ be an $n$-dimensional Finsler manifold, and $TM$ be its tangent space. We denote the slit tangent space of $M$ by $TM_0$, i.e., $T_xM_0=T_xM-\{0\}$ at every $x\in M$. The fundamental tensor $\textbf{g}_y:T_xM\times
T_xM\rightarrow \mathbb{R}$ of $F$ is defined by following
\[
\textbf{g}_{y}(u,v):={1 \over 2}\frac{{\partial} ^2}{{\partial} s {\partial} t} \Big[ F^2 (y+su+tv)\Big]|_{s,t=0}, \ \
u,v\in T_xM.
\]
Let $x\in M$ and $F_x:=F|_{T_xM}$. To measure the
non-Euclidean feature of $F_x$, define ${\bf C}_y:T_xM\times T_xM\times
T_xM\rightarrow \mathbb{R}$ by
\[
{\bf C}_{y}(u,v,w):={1 \over 2} \frac{d}{dt}\Big[\textbf{g}_{y+tw}(u,v)
\Big]|_{t=0}, \ \ u,v,w\in T_xM.
\]
The family ${\bf C}:=\{{\bf C}_y\}_{y\in TM_0}$ is called the Cartan torsion. By definition, ${\bf C}_y$ is a symmetric trilinear form on $T_xM$. It is well known that ${\bf{C}}=0$ if and only if $F$ is Riemannian.
Let $(M, F)$ be a Finsler manifold. For $y\in T_x M_0$, define ${\bf I}_y:T_xM\rightarrow \mathbb{R}$
by
\[
{\bf I}_y(u)=\sum^n_{i=1}g^{ij}(y) {\bf C}_y(u, {\partial}rtial_i, {\partial}rtial_j),
\]
where $\{{\partial}rtial_i\}$ is a basis for $T_xM$ at $x\in M$. The family
${\bf I}:=\{{\bf I}_y\}_{y\in TM_0}$ is called the mean Cartan torsion. By definition, ${\bf I}_y(u):=I_i(y)u^i$, where $I_i:=g^{jk}C_{ijk}$. By Deicke's theorem, every positive-definite Finsler metric
$F$ is Riemannian if and only if ${\bf I}=0$.
Given a Finsler manifold $(M, F)$, then a global vector field ${\bf G}$ is induced by $F$ on $TM_0$, and in a standard coordinate $(x^i,y^i)$ for $TM_0$ is given by ${\bf G}=y^i {{{\partial}rtial} / {{\partial}rtial x^i}}-2G^i(x,y){{{\partial}rtial}/ {{\partial}rtial y^i}}$, where $G^i=G^i(x, y)$ are scalar functions on $TM_0$ given by
\[
G^i:=\frac{1}{4}g^{ij}\Bigg\{\frac{{\partial}rtial^2[F^2]}{{\partial}rtial x^k
{\partial}rtial y^j}y^k-\frac{{\partial}rtial[F^2]}{{\partial}rtial x^j}\Bigg\},\ \
y\in T_xM.\label{G}
\]
The vector field ${\bf G}$ is called the spray associated with $(M, F)$.
For $y \in T_xM_0$, define ${\bf B}_y:T_xM\times T_xM \times T_xM\rightarrow T_xM$ by ${\bf B}_y(u, v, w):=B^i_{\ jkl}(y)u^jv^kw^l{{{\partial}rtial } \over {{\partial}rtial x^i}}|_x$ where
\[
B^i_{\ jkl}:={{{\partial}rtial^3 G^i} \over {{\partial}rtial y^j {\partial}rtial y^k {\partial}rtial y^l}}.
\]
The quantity $\bf B$ is called the Berwald curvature of the Finsler metric $F$. We call a Finsler metric $F$ a Berwald metric, if ${\bf{B}}=0$.
Define the mean of Berwald curvature by ${\bf E}_y:T_xM\times T_xM \rightarrow \mathbb{R}$, where
\[
{\bf E}_y (u, v) := {1\over 2} \sum_{i=1}^n g^{ij}(y) g_y \Big ( {\bf B}_y (u, v, e_i ) , e_j \Big ).
\]
The family ${\bf E}=\{ {\bf E}_y \}_{y\in TM\setminus\{0\}}$ is called the {\it mean Berwald curvature} or {\it E-curvature}.
In a local coordinates, ${\bf E}_y(u, v):=E_{ij}(y)u^iv^j$, where
\[
E_{ij}:=\frac{1}{2}B^m_{\ mij}.
\]
The quantity $\bf E$ is called the mean
Berwald curvature. $F$ is called a weakly Berwald metric if ${\bf{E}}=0$. Also, define ${\bf H}_y:T_xM\otimes T_xM \rightarrow \mathbb{R}$ by ${\bf H}_y(u,v):=H_{ij}(y)u^iv^j$, where
\[
H_{ij}:= E_{ij|s} y^s.
\]
Then ${\bf H}_y$ is defined as the covariant derivative of ${\bf E}$ along geodesics.
For non-zero vector $y \in T_xM_0$, define ${\bf D}_y:T_xM\otimes T_xM \otimes T_xM\rightarrow T_xM$
by ${\bf D}_y(u,v,w):=D^i_{\ jkl}(y)u^iv^jw^k\frac{{\partial}rtial}{{\partial}rtial x^i}|_{x}$, where
\[
D^i_{\ jkl}:=\frac{{\partial}rtial^3}{{\partial}rtial y^j{\partial}rtial y^k{\partial}rtial y^l}\Bigg[G^i-\frac{2}{n+1}\frac{{\partial}rtial G^m}{{\partial}rtial y^m} y^i\Bigg].\label{Douglas1}
\]
$\bf D$ is called the Douglas curvature. $F$ is called a Douglas metric if $\bf{D}=0$. According to the definition, the Douglas tensor can be written as follows
\[
D^i_{\ jkl}=B^i_{\ jkl}-\frac{2}{n+1}\Big\{E_{jk}\delta^i_{\ l}+E_{kl}\delta^i_{\ j}+E_{lj}\delta^i_{\ k}+E_{jk,l}y^i\Big\}.
\]
For $y\in T_xM$, define the Landsberg curvature ${\bf L}_y:T_xM\times T_xM \times T_xM\rightarrow \mathbb{R}$ by
\[
{\bf L}_y(u, v,w):=-\frac{1}{2}{\bf g}_y\big({\bf B}_y(u, v, w), y\big).
\]
$F$ is called a Landsberg metric if ${\bf L}_y=0$. By definition, every Berwald metric is a Landsberg metric.
Let $(M, F)$ be a Finsler manifold. For $y\in T_x M_0$, define ${\bf J}_y:T_xM\rightarrow \mathbb{R}$
by
\[
{\bf J}_y(u)=\sum^n_{i=1}g^{ij}(y) {\bf L}_y(u, {\partial}rtial_i, {\partial}rtial_j).
\]
The quntity $\bf J$ is called the mean Landsberg curvature or J-curvature of Finsler metric $F$.
A Finsler metric $F$ is called a weakly Landsberg metric if ${\bf J}_y=0$. By definition, every Landsberg metric is a weakly Landsberg metric.
Mean Landsberg curvature can also be defined as following
\[
J_i: = y^m {{\partial} I_i \over {\partial} x^m} -I_m {{\partial} G^m\over {\partial} y^i} - 2 G^m {{\partial} I_i \over {\partial} y^m}.
\]
By definition, we get
\begin{equation}gin{eqnarray*}
{\bf J}_y (u):= {d\over dt} \Big [ {\bf I}_{\dot{\sigma}(t) } \big ( U(t) \big )\Big ]_{t=0},
{\epsilon}nd{eqnarray*}
where $y\in T_xM$, $\sigma=\sigma(t)$ is the geodesic with $\sigma(0)=x$, $\dot{\sigma}(0)=y$, and $U(t)$ is a linearly parallel vector field along $\sigma$ with
$U(0)=u$. The mean Landsberg curvature ${\bf J}_y$ is the rate of change of ${\bf I}_y$ along geodesics
for any $y\in T_xM_0$.
For an arbitrary non-zero vector $y \in T_xM_{0}$, the Riemann curvature is a linear
transformation $\textbf{R}_y: T_xM \rightarrow T_xM$ with homogeneity ${\bf R}_{\lambda y}=\lambda^2 {\bf R}_y$,
$\forall \lambda>0$, which is defined by
$\textbf{R}_y(u):=R^i_{k}(y)u^k {{\partial}rtial / {{\partial}rtial x^i}}$, where
\begin{equation}
R^i_{k}(y)=2{{\partial}rtial G^i \over {{\partial}rtial x^k}}-{{\partial}rtial^2 G^i \over
{{{\partial}rtial x^j}{{\partial}rtial y^k}}}y^j+2G^j{{\partial}rtial^2 G^i \over
{{{\partial}rtial y^j}{{\partial}rtial y^k}}}-{{\partial}rtial G^i \over {{\partial}rtial
y^j}}{{\partial}rtial G^j \over {{\partial}rtial y^k}}.\label{Riemannx}
{\epsilon}nd{equation}
The family $\textbf{R}:=\{\textbf{R}_y\}_{y\in TM_0}$ is called the Riemann curvature of the Finsler manifold $(M, F)$.
For a flag $P:={\rm span}\{y, u\} \subset T_xM$ with flagpole $y$, the flag curvature ${\bf
K}={\bf K}(x, y, P)$ is defined by
\begin{equation}
{\bf K}(x, y, P):= {{\bf g}_y \big(u, {\bf R}_y(u)\big) \over {\bf g}_y(y, y) {\bf g}_y(u,u)
-{\bf g}_y(y, u)^2 }.\label{FC0}
{\epsilon}nd{equation}
The flag curvature ${\bf K}(x, y, P)$ is a function of tangent planes $P={\rm span}\{ y, v\}\subset T_xM$. This quantity tells us how curved space is at a point. A Finsler metric $F$ is of scalar flag curvature if $\textbf{K} = \textbf{K}(x, y, P)$ is independent of flags $P$ containing $y\in T_xM_0$.
\section{Proof of Theorems}
In this section, we are going to prove Theorems \ref{MainTHM1} and \ref{MainTHM2}. In order to prove Theorem \ref{MainTHM1}, first we consider the flag curvature of homogeneous Landsberg surface. More precisely, we prove Theorem \ref{MainTHM2}. For this aim, we need some useful Lemmas as follows.
In \cite{LR}, Latifi-Razavi proved that every homogeneous Finsler manifold is forward geodesically complete. In \cite{TN1}, Tayebi-Najafi improved their result and proved the following.
\begin{equation}gin{lem}{\rm (\cite{TN2})}\label{lem1}
Every homogeneous Finsler manifold is complete.
{\epsilon}nd{lem}
By definition, every two points of a homogeneous Finsler manifold $(M, F)$ map to each other under an isometry. This causes the norm of an invariant tensor under the isometries of a homogeneous Finsler manifold is a constant function on $M$, and consequently, it has a bounded norm. Then, we conclude the following.
\begin{equation}gin{lem}{\rm (\cite{TN1})}\label{lem2}
Let $(M, F)$ be a homogeneous Finsler manifold. Then, every invariant tensor under the isometries of $F$ has a bounded norm with respect to $F$.
{\epsilon}nd{lem}
\noindent
{\bf Proof of Theorem \ref{MainTHM2}:} We first deal with Finsler surfaces. The special and useful Berwald frame was introduced and developed by Berwald \cite{B}. Let $(M, F)$ be a two-dimensional Finsler manifold. One can define a local field of orthonormal frame $({\epsilon}ll^i,m^i)$ called the Berwald frame, where ${\epsilon}ll^i=y^i/F$, $m^i$ is the unit vector with ${\epsilon}ll_i m^i=0$, ${\epsilon}ll_i=g_{ij}{\epsilon}ll^i$ and $g_{ij}$ is defined by $g_{ij}={\epsilon}ll_i{\epsilon}ll_j+m_im_j$. In \cite{BM}, it is proved that the Douglas curvature of the Finsler surface $(M, F)$ is given by following
\[
D^i_{\ jkl} = -\frac{1}{ 3F^2}\Big(6I_{,1}+ I_{2|2} + 2II_2\Big)m_jm_km_ly^i.
\]
We rewrite it as equivalently
\begin{equation}
{\bf D}_y(u,v,w)={\bf T}(u, v, w) y\label{GDW1}
{\epsilon}nd{equation}
where ${\bf T}(u, v, w):=T_{ijk}u^iv^jw^k$ and $T_{ijk}:=-{1}/(3F^2)(6I_{,1}+ I_{2|2} + 2II_2)m_im_jm_k$. It is easy to see that ${\bf T}$ is a symmetric Finslerian tensor filed and satisfies the following
\[
{\bf T}(y, v, w)=0.
\]
Let us denote the Berwald connection of $F$ by $D$. The horizontal and vertical derivation with of a Finsler tensor field are denoted by `` $D_{u}$ " and `` $D_{\dot u}$ " respectively. Taking a horizontal derivation of {\epsilon}qref{GDW1} along Finslerian geodesics implies that
\begin{equation}
D_0{\bf D}_y(u,v,w)=D_0{\bf T}(u, v, w)y,\label{GDW1.5}
{\epsilon}nd{equation}
where $D_0:=D_iy^i$. Let us define ${\bf h}_y:T_xM\to T_xM$ by
\[
{\bf h}_y(u)=u-{1 \over F^2 }{\bf g}_y(u,y)y.
\]
Since ${\bf h}_y(y)=0$, it follows from {\epsilon}qref{GDW1.5} that
\begin{equation}gin{equation}
{\bf h}_y\big(D_0{\bf D}_y(u,v,w)\big)=0.\label{GDW2}
{\epsilon}nd{equation}
On the other hand. the Douglas tensor of $F$ is given by
\begin{equation}gin{equation}
{\bf D}_y(u,v,w)={\bf B}_y(u,v,w)-\frac{2}{3}\Big\{{\bf E}_y(v, w)u+{\bf E}_y(w, u)v+{\bf E}_y(u, v)w+(D_{\dot u}{\bf E}_y)(v,w)y)\Big\}.\label{GD2}
{\epsilon}nd{equation}
Then
\begin{equation}gin{equation}
{\bf h}_y\big(D_0{\bf D}_y(u, v, w)\big)={\bf h}_y\big(D_0{\bf B}_y(u, v, w)\big)-\frac{2}{3}\Big\{{\bf H}_y(u,v) {\bf h}_y(w)+{\bf H}_y(v, w) {\bf h}_y(u)+{\bf H}_y(w, u) {\bf h}_y(v)\Big\}.\label{GD3}
{\epsilon}nd{equation}
Let us define
\[
\tilde{\bf B}_y:=D_0{\bf B}_y.
\]
Indeed, $\tilde{\bf B}_y$ is the horizontal derivative of Berwald curvature along Finsler geodesics. By (\ref{GDW2}) and (\ref{GD2}), we get
\begin{equation}gin{equation}
{\bf h}_y\big(\tilde{\bf B}_y(u, v, w)\big)=\frac{2}{3}\Big\{{\bf H}_y(u,v) {\bf h}_y(w)+{\bf H}_y(v, w) {\bf h}_y(u)+{\bf H}_y(w, u) {\bf h}_y(v)\Big\}.\label{GD4a}
{\epsilon}nd{equation}
Using $D_i{\bf h}=0$ yields
\begin{equation}gin{equation}
{\bf h}_y\big(D_i\tilde{\bf B}_y(u, v, w)\big)=\frac{2}{3}\Big\{D_i{\bf H}_y(u,v) {\bf h}_y(w)+D_i{\bf H}_y(v, w) {\bf h}_y(u)+D_i{\bf H}_y(w, u) {\bf h}_y(v)\Big\}.\label{GD5}
{\epsilon}nd{equation}
Using $g_y({\bf B}_y(u,v,w),y)=-2{\bf L}_y(u,v,w)$, we get
\begin{equation}gin{eqnarray}\label{GD7}
D_i\big( {\bf h}_y\tilde{\bf B}_y(u, v, w)\big)\!\!\!\!&=&\!\!\!\! {\bf h}_y\big(D_i\tilde{\bf B}_y(u, v, w)\big)\nonumber\\
\!\!\!\!&=&\!\!\!\! D_iD_0\big( {\bf h}_y{\bf B}_y(u, v, w)\big)\nonumber\\
\!\!\!\!&=&\!\!\!\! D_iD_0\Big ({\bf B}_y(u, v, w)-{1 \over F^2}{\bf g}_y\big({\bf B}_y(u, v, w), y\big)\Big )\nonumber\\
\!\!\!\!&=&\!\!\!\! D_i\tilde{\bf B}_y(u, v, w)+{2 \over F^2}D_iD_0{\bf L}_y(u,v,w)y.
{\epsilon}nd{eqnarray}
By (\ref{GD5}), (\ref{GD7}), and ${\bf L}=0$, we obtain
\begin{equation}gin{equation}
D_i\tilde{\bf B}_y(u, v, w)=\frac{2}{3}\Big\{D_i{\bf H}_y(u,v) {\bf h}_y(w)+D_i{\bf H}_y(v, w) {\bf h}_y(u)+D_i{\bf H}_y(w, u) {\bf h}_y(v)\Big\}.\label{GD1}
{\epsilon}nd{equation}
The relation {\epsilon}qref{GD1} yields
\begin{equation}gin{eqnarray}
D_h\tilde{\bf B}_y(u, v, {\partial}rtial_k)-D_k\tilde{\bf B}_y(u, v, {\partial}rtial_h)&=&\frac{2}{3}\Big\{D_h{\bf H}_y(u,v) {\bf h}_y({\partial}rtial_k)-D_k{\bf H}_y(u,v) {\bf h}_y({\partial}rtial_h)\Big\}\nonumber\\
&+&\frac{2}{3}\Big\{\big(D_h{\bf H}_y(v, {\partial}rtial_k)-D_k{\bf H}_y(v, {\partial}rtial_h) \big) {\bf h}_y(u)\Big\}\nonumber\\
&+&\frac{2}{3}\Big\{\big(D_h{\bf H}_y({\partial}rtial_k, u)-D_k{\bf H}_y({\partial}rtial_h, u)\big) {\bf h}_y(v)\Big\}.\label{GD1b}
{\epsilon}nd{eqnarray}
By definition, we have $tr(\tilde{\bf B})=2{\bf H}$ and $tr({\bf h})=1$.
Then, (\ref{GD1b}) implies that
\begin{equation}gin{eqnarray}
D_h{\bf H}_y(u,{\partial}rtial_k)-D_k{\bf H}_y(u,{\partial}rtial_h)=2\Big\{D_h{\bf H}_y(u,{\partial}rtial_k)-D_k{\bf H}_y(u,{\partial}rtial_h)\Big\},\label{GD11}
{\epsilon}nd{eqnarray}
which yields
\begin{equation}gin{equation}
D_h{\bf H}_y(u,{\partial}rtial_k)=D_k{\bf H}_y(u,{\partial}rtial_h).\label{GD12}
{\epsilon}nd{equation}
Contracting (\ref{GD12}) with $y^h$ and using $D_k{\bf H}_y(u,y)=0$, we get
\begin{equation}gin{equation}
D_0{\bf H}_y(u,w)=0.\label{GD13}
{\epsilon}nd{equation}
Take an arbitrary unit vector $y\in T_xM$ and an arbitrary vector $v\in T_xM$. Let $c(t)$ be the geodesic with $\dot c(0)=y$ and $U=U(t)$ the parallel vector field along $c$ with $V(0)=v$. In order to avoid clutter, we put
\begin{equation}gin{equation}
{\bf E}(t)={\bf E}_{\dot c}(U(t),U(t)), \ \ \ \ \ {\bf H}(t)={\bf H}_{\dot c}(U(t),U(t)).\label{GD14}
{\epsilon}nd{equation}
From the definition of ${\bf H}_y$, we have
\begin{equation}gin{equation}
{\bf H}(t)={\bf E}^{'}(t).\label{GD15x}
{\epsilon}nd{equation}
By (\ref{GD13}) we have ${\bf H}^{'}(t)=0$ which implies that
\begin{equation}gin{equation}
{\bf H}(t)={\bf H}(0).\label{GD15xx}
{\epsilon}nd{equation}
Then by (\ref{GD15x}) and (\ref{GD15xx}), we get
\begin{equation}gin{equation}
{\bf E}(t)={\bf H}(0)t+{\bf E}(0).\label{GD10}
{\epsilon}nd{equation}
Since ${\bf E}(t)$ is a bounded function on $[0,\infty)$, then letting $ t\rightarrow +\infty $ or $ t\rightarrow -\infty $ implies that
\[
{\bf H}_y(v,v)={\bf H}(0)=0.
\]
Therefore ${\bf H}=0$. According to Akbar-Zadeh's theorem every Finsler metric $F=F(x, y)$ of scalar flag curvature ${\bf K}={\bf K}(x, y)$
on an $n$-dimensional manifold $M$ has isotropic flag curvature ${\bf K}={\bf K}(x)$ if and only if $\textbf{H}=0$ \cite{AZ}. Every Finsler surface has scalar flag curvature ${\bf K}={\bf K}(x, y)$. Then by Akbar-Zadeh theorem, we get ${\bf K}={\bf K}(x)$.
\qed
Now, we can prove the Theorem \ref{MainTHM1}.
\noindent
{\bf Proof of Theorem \ref{MainTHM1}:} Let $(M, F)$ be a homogeneous Landsberg surface and fix a point $x\in M$. Suppose that $y=y(t)$ is a unit speed parametrization of indicatrix of $M$ at $x$. We know that the curvature along $y(t)$ is completely determined by the Cartan scalar of $F$, i.e., we have
\[
{\bf K}(t)={\bf K}(0)\ e^{\int_0^tI(s)ds}.
\]
Thus either ${\bf K}(t)$ vanishes every where or it is non-zero every where and ${\bf K}(t)$ has the same sign as the sign of ${\bf K}(0)$. On the other hand, for homogeneous Finsler surfaces the flag curvature is a bounded scalar function on $SM$. Suppose that $\lambda_1\leq {\bf K}(t)\leq \lambda_2$. In this case, we have
\[
e^{\lambda_1 t}\leq C(0)e^{\int_0^t {\bf K}(s)ds}\leq e^{\lambda_2 t}.
\]
Suppose that $C(0)\neq 0$. Then we consider two following cases:\\\\
{\bf Case 1:} If $\lambda_1$ and $\lambda_2$ are positive, then letting $t\to \infty$ implies that $C(t)$ is unbounded, which is a contradiction.\\\\
{\bf Case 2:} If $\lambda_1$ and $\lambda_2$ are negative, then letting $t\to -\infty$ implies that $C(t)$ is unbounded, which is a contradiction.\\\\
Thus, every homogeneous Landsberg surface is Riemannian or flat. On the other hand, by Akbar-Zadeh's theorem any positively complete Finsler metric with zero flag curvature must be locally Minkowskian if the first and second Cartan torsions are bounded \cite{AZ}. For the homogeneous Finsler metrics, the first and second Cartan torsions are bounded. Then in this case, $F$ reduces to a locally Minkowskian metric. This completes the proof.
\qed
It is worth to mention that, in general, every Landsberg metric of non-zero scalar flag curvature is Riemannian, provided that its dimension is greater than two. Theorem \ref{MainTHM1} is Numata type theorem for homogeneous Finsler surfaces.
\begin{equation}gin{cor}
Let $(M, F)$ be a homogeneous Finsler surface of non-positive flag curvature. Then $F$ is a Landsberg metric if and only if it has isotropic flag curvature. In this case, $F$ is Riemannian or locally Minkowskian.
{\epsilon}nd{cor}
\begin{equation}gin{proof}
According to Theorem 8.1 of \cite{BCS}, every geodesically complete Finsler surface of non-positive isotropic flag curvature ${\bf K}(x)\leq 0$ and bounded Cartan scalar is a Landsberg metric. Then, by Theorem \ref{MainTHM1}, we get the proof.
{\epsilon}nd{proof}
In \cite{1924}, L. Berwald introduced a non-Riemannian curvature so-called stretch curvature and denoted it by ${\bf \Sigma}_y$. He showed that this tensor vanishes if and only if the length of a vector remains unchanged under the parallel displacement along an infinitesimal parallelogram.
\begin{equation}gin{cor}
Every homogeneous stretch surface is Riemannian or locally Minkowskian.
{\epsilon}nd{cor}
\begin{equation}gin{proof}
Every Landsberg metric is a stretch metric. In \cite{TN1}, it is proved that every homogeneous stretch metric is a Landsberg metric. Then, by Theorem \ref{MainTHM1}, we get the proof.
{\epsilon}nd{proof}
In \cite{BF}, Bajancu-Farran introduced a new class of Finsler metrics, called generalized Landsberg metrics. This class of Finsler metrics contains the class of Landsberg metrics as a special case. A Finsler metric $F$ on a manifold $M$ is called generalized Landsberg metric the Riemannian curvature tensors of the Berwald and Chern connections coincide.
\begin{equation}gin{cor}
Every homogeneous generalized Landsberg surface is Riemannian or locally Minkowskian.
{\epsilon}nd{cor}
\begin{equation}gin{proof}
By definition, we have
\begin{equation}
L^i_{\ jl|k}-L^i_{\ jk|l}+L^i_{\ sk}L^s_{\ jl}-L^i_{\ sl}L^s_{\ jk}=0,\label{GL}
{\epsilon}nd{equation}
where ``$|$" denotes the horizontal derivation with respect to the Berwald connection of $F$. By {\epsilon}qref{GL}, we get
\begin{equation}gin{eqnarray}
&&L_{isk}L^s_{\ jl}-L_{isl}L^s_{\ jk}=0,\label{GL4} \\
&&L_{ijl|k}-L_{ijk|l}=0.\label{GL5}
{\epsilon}nd{eqnarray}
The Landsberg curvature of Finsler surface satisfies
\begin{equation}\label{B3b}
L_{jkl}+\mu FC_{jkl}=0.
{\epsilon}nd{equation}
where $\mu:=-{4I_{,1}}/{I}$. By {\epsilon}qref{GL4} and {\epsilon}qref{B3b}, we get
\begin{equation}gin{eqnarray}
\mu F\Big\{C_{isk}C^s_{\ jl}-C_{isl}C^s_{\ jk}\Big\}=0.\label{GL5}
{\epsilon}nd{eqnarray}
We have two cases: If $C_{isk}C^s_{\ jl}-C_{isl}C^s_{\ jk}=0$, then the vv-curvature is vanishing. In \cite{Sc}, Schneider proved that vv-curvature is vanishing if and only if $F$ is Riemannian. If $\mu=0$, then by {\epsilon}qref{B3b} it follows that $F$ is a Landsberg metric. By Theorem \ref{MainTHM1}, we get the proof.
{\epsilon}nd{proof}
Let us define ${\bf \tilde J}= {\tilde J}_{ij}dx^i\otimes dx^j$, by
\begin{equation}
{\bf \tilde J}:=\big(J_{i,j}+J_{j,i}\big)_{|m}y^m.\label{X1}
{\epsilon}nd{equation}
In \cite{X}, Xia proved that every $n$-dimensional compact Finsler manifold with ${\bf \tilde J}=2{\bf \tilde H}$ is a weakly Landsberg metric. Here, we prove the following.
\begin{equation}gin{cor}\label{Prop1}
Every homogeneous Finsler surface satisfying ${\bf \tilde J}=2{\bf \tilde H}$ is Riemannian or locally Minkowskian.
{\epsilon}nd{cor}
\begin{equation}gin{proof}
The following Bianchi idenity holds
\begin{equation}
H_{ij}:=\frac{1}{2}\big(J_{i,j}+J_{j,i}-(I_{i,j})_{|p}y^p\big)_{|m}y^m.\label{X2}
{\epsilon}nd{equation}
See \cite{X}. By {\epsilon}qref{X1} and {\epsilon}qref{X2}, we get $(I_{i,j})_{|p}y^p=0$ and contracting it with $y^j$ yields
\begin{equation}
J_{i|p}y^p=0.\label{X3}
{\epsilon}nd{equation}
For any geodesic $c=c(t)$ and any parallel vector field $U=U(t)$ along $c$, let us put
\[
{\bf I}(t)={\bf I}_{\dot c}\big(U(t),U(t), U(t)\big), \ \ \ \ {\bf J}(t)={\bf J}_{\dot c}\big(U(t),U(t), U(t)\big).
\]
Thus, we have
\begin{equation}gin{equation}
{\bf J}(t)={\bf I}^{'}(t).\label{GD15}
{\epsilon}nd{equation}
Integrating {\epsilon}qref{X3} implies that
\[
{\bf I}(t)={\bf J}(0)t+{\bf I}(0).
\]
Every homogeneous manifold $ M $ is complete and the parameter $t$ takes all the values in $ (-\infty,+\infty) $. Letting $ t\rightarrow +\infty $ or $ t\rightarrow -\infty $ we have $ \textbf{I}(t) $ is unbounded which is a contradiction. Therefore $ \textbf{J}(0)={\bf J}(t)=0 $. On the other hand, every Finsler surface is C-reducible
\begin{equation}
{\bf C}_y(u, v, w)= {1\over 3}\Big\{{\bf I}_y(u){\bf h}_y(v, w)+{\bf I}_y(v){\bf h}_y(u, w)+{\bf I}_y(w){\bf h}_y(u, v) \Big\}.\label{CR}
{\epsilon}nd{equation}
Taking a horizontal derivation of {\epsilon}qref{CR} yields
\begin{equation}
{\bf L}_y(u, v, w)= {1\over 3}\Big\{{\bf J}_y(u){\bf h}_y(v, w)+{\bf J}_y(v){\bf h}_y(u, w)+{\bf J}_y(w){\bf h}_y(u, v) \Big\}.\label{LR}
{\epsilon}nd{equation}
Putting ${\bf J}=0$ in {\epsilon}qref{LR} implies that ${\bf L}=0$. By Theorem \ref{MainTHM1}, we get the proof.
{\epsilon}nd{proof}
\begin{equation}gin{thebibliography}{99}
\bibitem{An} P. L. Antonelli, {\it Handbook of Finsler Geometry}, Kluwer Academic Publishers, 2005.
\bibitem{AZ} H. Akbar-Zadeh, {\it Sur les espaces de Finsler \'{a} courbures sectionnelles constantes}, Bull. Acad. Roy. Bel. Cl, Sci, 5e S\'{e}rie
- Tome LXXXIV (1988), 281-322.
\bibitem{BM} S. B\'{a}cs\'{o} and M. Matsumoto, {\it On Finsler spaces of Douglas type, A generalization of notion of Berwald space}, Publ. Math.
Debrecen. \textbf{51}(1997), 385-406.
\bibitem{Bao2} D. Bao, {\it On two curvature-driven problems in Riemann-Finsler geometry}, Adv. Stud. Pure. Math. {\bf 48}(2007), 19-71.
\bibitem{BCS} D. Bao, S. S. Chern and Z. Shen, {\it Rigidity issues on Finsler surfaces}, Rev. Roumaine Math. Pures Appl. {\bf 42}(1997), 707-735.
\bibitem{BF} A. Bejancu and H. Farran, {\it Generalized Landsberg manifolds of scalar curvature}, Bull. Korean. Math. Soc. \textbf{37}(2000), 543-550.
\bibitem{B} L. Berwald, {\it On Cartan and Finsler Geometries, III, Two Dimensional Finsler Spaces with Rectilinear Extremal}, Ann. of Math., {\bf 42} No. 2 (1941) 84122.
\bibitem{1924} L. Berwald, {\it \"{U}ber Parallel\"{u}bertragung in R\"{a}umen mit allgemeiner Massbestimmung}, Jber. Deutsch. Math.-Verein. {\bf 34}(1925), 213-220.
\bibitem{ChSh3} X. Cheng and Z. Shen, {\it A class of Finsler metrics with isotropic S-curvature}, Israel J. Math. {\bf 169}(2009), 317-340.
\bibitem{DH1} S. Deng and Z. Hu, {\it Homogeneous Finsler spaces of negative curvature}, J. Geom. Phys, {\bf 57}(2007), 657-664.
\bibitem{I} Y. Ichijy\={o}, {\it Finsler spaces modeled on a Minkowski space}, J. Math. Kyoto. Univ. {\bf 16}(1976), 639-652.
\bibitem{HD} Z. Hu and S. Deng, {\it Homogeneous Randers spaces with isotropic S-curvature and positive flag curvature}, Math. Z.
{\bf 270}(2012), 989-1009.
\bibitem{LR} D. Latifi and A. Razavi, {\it On homogeneous Finsler spaces}, Rep. Math. Phys. {\bf 57}(2006), 357-366.
\bibitem{Mat96} M. Matsumoto, {\it Remarks on Berwald and Landsberg spaces}, Contemp. Math. {\bf 196}(1996), 79-82.
\bibitem{Szabo2} Z. I. Szab\'{o}, {\it All regular Landsberg metrics are Berwald}, Ann. Glob. Anal. Geom. {\bf 34}(2008), 381-386; correction, ibid,
{\bf 35}(2009), 227-230.
\bibitem{Sz} Z. I. Szab\'{o}, {\it Positive definite Berwald spaces. Structure theorems on Berwald spaces}, Tensor (N.S.), {\bf 35}(1981), 25-39.
\bibitem{TN1} A. Tayebi and B. Najafi, {\it A class of homogeneous Finsler metrics}, J. Geom. Phys, {\bf 140}(2019), 265-270.
\bibitem{TN2} A. Tayebi and B. Najafi, {\it On homogeneous isotropic Berwald metrics}, European J Math, https://doi.org/10.1007/s40879-020-00401-4.
\bibitem {Sc} R. Schneider, {\it \"{U}ber die Finsler\"{a}ume mit $S_{ijkl} =0$}, Arch. Math. {\bf 19} (1968), 656-658.
\bibitem {X} Q. Xia, {\it Some results on the non-Riemannian quantity ${\bf H}$ of a Finsler metric}, Int. J. Math. {\bf 22}(2011), 925-936.
\bibitem{XD} M. Xu and S. Deng, {\it The Landsberg equation of a Finsler space}, Annali della Scuola Normale Superiore di Pisa, Classe di Scienze, DOI: 10.2422/2036-2145.201809$\_$015.
\bibitem {XDn} M. Xu and S. Deng, {\it Normal homogeneous Finsler spaces}, Transform. Groups. {\bf 22}(2017), 1143-1183.
\bibitem {XDr} M. Xu and S. Deng, {\it $(\alpha_1, \alpha_2)$-spaces and Clifford-Wolf homogeneity}, arXiv:1401.0472.
{\epsilon}nd{thebibliography}
\noindent
Akbar Tayebi\\
Department of Mathematics, Faculty of Science\\
University of Qom \\
Qom. Iran\\
Email:\ [email protected]
\noindent
Behzad Najafi\\
Department of Mathematics and Computer Sciences\\
Amirkabir University (Tehran Polytechnic)\\
Hafez Ave.\\
Tehran. Iran\\
Email:\ [email protected]
{\epsilon}nd{document} |
\begin{document}
\title{TRAIL: Near-Optimal Imitation Learning with Suboptimal Data}
{\bm{s}}pace{-3em}
\begin{abstract}
The aim in imitation learning is to learn effective policies by utilizing near-optimal expert demonstrations. However, high-quality demonstrations from human experts can be expensive to obtain in large number. On the other hand, it is often much easier to obtain large quantities of suboptimal or task-agnostic trajectories, which are not useful for direct imitation, but can nevertheless provide insight into the dynamical structure of the environment, showing what \tilde{p}h{could} be done in the environment even if not what \tilde{p}h{should} be done. We ask the question, is it possible to utilize such suboptimal offline datasets to facilitate \tilde{p}h{provably} improved downstream imitation learning? In this work, we answer this question affirmatively and present training objectives that use offline datasets to learn a \tilde{p}h{factored} transition model whose structure enables the extraction of a \tilde{p}h{latent action space}. Our theoretical analysis shows that the learned latent action space can boost the sample-efficiency of downstream imitation learning, effectively reducing the need for large near-optimal expert datasets through the use of auxiliary non-expert data. To learn the latent action space in practice, we propose TRAIL\xspace (Transition-Reparametrized Actions for Imitation Learning), an algorithm that learns an energy-based transition model contrastively, and uses the transition model to reparametrize the action space for sample-efficient imitation learning. We evaluate the practicality of our objective through experiments on a set of navigation and locomotion tasks. Our results verify the benefits suggested by our theory and show that TRAIL\xspace is able to improve baseline imitation learning by up to 4x in performance.\footnote{Find experimental code at~\url{https://github.com/google-research/google-research/tree/master/rl_repr}.}
\end{abstract}
\setlength{\abovedisplayskip}{2pt}
\setlength{\abovedisplayshortskip}{2pt}
\setlength{\belowdisplayskip}{2pt}
\setlength{\belowdisplayshortskip}{2pt}
\setlength{\jot}{2pt}
\setlength{\floatsep}{2ex}
\setlength{\textfloatsep}{2ex}
\setlength{\parskip}{0.1em}
\titlespacing\section{0pt}{10pt plus 2pt minus 2pt}{0pt plus 2pt minus 2pt}
\titlespacing\subsection{2pt}{10pt plus 2pt minus 2pt}{2pt plus 2pt minus 2pt}
\section{Introduction}\label{sec:intro}
Imitation learning uses expert demonstration data to learn sequential decision making policies~\citep{schaal1999imitation}. Such demonstrations, often produced by human experts, can be costly to obtain in large number. On the other hand, practical application domains, such as recommendation~\citep{afsar2021reinforcement} and dialogue~\citep{jiang2021towards} systems, provide large quantities of offline data generated by suboptimal agents. Since the offline data is suboptimal in performance, using it directly for imitation learning is infeasible. While some prior works have proposed using suboptimal offline data for offline reinforcement learning (RL) ~\citep{kumar2019stabilizing,wu2019behavior,levine2020offline}, this would require reward information, which may be unavailable or infeasible to compute from suboptimal data~\citep{abbeel2004apprenticeship}.
Nevertheless, conceptually, suboptimal offline datasets should contain useful information about the environment, if only we could distill that information into a useful form that can aid downstream imitation learning.
One approach to leveraging suboptimal offline
datasets is to use the offline data to extract a lower-dimensional \tilde{p}h{latent action space}, and then perform imitation learning on an expert dataset using this latent action space. If the latent action space is learned properly, one may hope that performing imitation learning in the latent space can reduce the need for large quantities of expert data.
While a number of prior works have studied similar approaches in the context of hierarchical imitation and RL setting~\citep{parr1998reinforcement,dietterich1998maxq,sutton1999between,kulkarni2016hierarchical,vezhnevets2017feudal,nachum2018data,ajay2020opal,pertsch2020accelerating,hakhamaneshi2021hierarchical}, such methods typically focus on the theoretical and practical benefits of \tilde{p}h{temporal abstraction} by extracting temporally extended skills from data or experience. That is, the main benefit of these approaches is that the latent action space operates at a lower temporal frequency than the original environment action space. We instead focus directly on the question of \tilde{p}h{action representation}: instead of learning skills that provide for temporal abstraction, we aim to directly reparameterize the action space in a way that provides for more sample-efficient downstream imitation without the need to reduce control frequency. Unlike learning temporal abstractions, action reparamtrization does not have to rely on any hierarchical structures in the offline data, and can therefore utilize highly suboptimal datasets (e.g., with random actions).
Aiming for a provably-efficient approach to utilizing highly suboptimal offline datasets, we use first principles to derive an upper bound on the quality of an imitation learned policy involving three terms corresponding to ({\textnormal{e}}f{eq:tabular-rep}) action representation and ({\textnormal{e}}f{eq:tabular-dec}) action decoder learning on a suboptimal offline dataset, and finally, ({\textnormal{e}}f{eq:tabular-bc}) behavioral cloning (i.e., max-likelihood learning of latent actions) on an expert demonstration dataset.
The first term in our bound immediately suggests a practical offline training objective based on a transition dynamics loss using an \tilde{p}h{factored} transition model. We show that under specific factorizations (e.g., low-dimensional or linear), one can guarantee improved sample efficiency on the expert dataset. Crucially, our mathematical results avoid the potential shortcomings of temporal skill extraction, as our bound is guaranteed to hold even when there is no temporal abstraction in the latent action space.
\begin{figure}
\caption{The TRAIL framework. Pretraining learns a factored transition model $\mathcal{T}
\label{fig:framework}
\end{figure}
We translate these mathematical results into an algorithm that we call \tilde{p}h{Transition-Reparametrized Actions for Imitation Learning} (TRAIL\xspace). As shown in~\Figref{fig:framework}, TRAIL\xspace consists of a pretraining stage (corresponding to the first two terms in our bound) and a downstream imitation learning stage (corresponding to the last term in our bound). During the pretraining stage, TRAIL\xspace uses an offline dataset to learn a factored transition model and a paired action decoder. During the downstream imitation learning stage, TRAIL\xspace first reparametrizes expert actions into the latent action space according to the learned transition model, and then learns a latent policy via behavioral cloning in the latent action space. During inference, TRAIL\xspace uses the imitation learned latent policy and action decoder in conjunction to act in the environment. In practice, TRAIL\xspace parametrizes the transition model as an energy-based model (EBM) for flexibility and trains the EBM with a contrastive loss. The EBM enables the low-dimensional factored transition model referenced by our theory, and we also show that one can recover the \tilde{p}h{linear} transition model in our theory by approximating the EBM with random Fourier features~\citep{rahimi2007random}.
To summarize, our contributions include (i) a provably beneficial objective for learning action representations without temporal abstraction and (ii) a practical algorithm for optimizing the proposed objective by learning an EBM or linear transition model.
An extensive evaluation on a set of navigation and locomotion tasks demonstrates the effectiveness of the proposed objective.
TRAIL\xspace's empirical success compared to a variety of existing methods suggests that the benefit of learning \tilde{p}h{single-step} action representations has been overlooked by previous temporal skill extraction methods. Additionally, TRAIL\xspace significantly improves behavioral cloning even when the offline dataset is unimodal or highly suboptimal (e.g., obtained from a random policy), whereas temporal skill extraction methods lead to \tilde{p}h{degraded} performance in these scenarios. Lastly, we show that TRAIL\xspace, without using reward labels, can perform similarly or better than offline reinforcement learning (RL) with orders of magnitude less expert data, suggesting new ways for offline learning of squential decision making policies.
\section{Related Work}
Learning action abstractions is a long standing topic in the hierarchical RL literature~\citep{parr1998reinforcement,dietterich1998maxq,sutton1999between,kulkarni2016hierarchical,nachum2018data}. A large body of work focusing on \tilde{p}h{online skill discovery} have been proposed as a means to improve exploration and sample complexity in online RL. For instance, \citet{eysenbach2018diversity,sharma2019dynamics,gregor2016variational,warde2018unsupervised,liu2021learn} propose to learn a diverse set of skills by maximizing an information theoretic objective. Online skill discovery is also commonly seen in a hierarchical framework that learns a continuous space~\citep{vezhnevets2017feudal,hausman2018learning,nachum2018data,nachum2019multi} or a discrete set of lower-level policies~\citep{bacon2017option,stolle2002learning,peng2019mcp}, upon which higher-level policies are trained to solve specific tasks. Different from these works, we focus on learning action representations \tilde{p}h{offline} from a fixed suboptimal dataset to accelerate imitation learning.
Aside from online skill discovery, \tilde{p}h{offline skill extraction} focuses on learning temporally extended action abstractions from a fixed offline dataset. Methods for offline skill extraction generally involve maximum likelihood training of some latent variable models on the offline data, followed by downstream planning~\citep{lynch2020learning}, imitation learning~\citep{kipf2019compile,ajay2020opal,hakhamaneshi2021hierarchical}, offline RL~\citep{ajay2020opal}, or online RL~\citep{fox2017multi,krishnan2017ddco,shankar2020learning,shankar2019discovering,singh2020parrot,pertsch2020accelerating,pertsch2021guided,wang2021skill} in the induced latent action space. Among these works, those that provide a theoretical analysis attribute the benefit of skill extraction predominantly to increased temporal abstraction as opposed to the learned action space being any ``easier'' to learn from than the raw action space~\citep{ajay2020opal,nachum2018near}. Unlike these methods, our analysis focuses on the advantage of a lower-dimensional reparametrized action space agnostic to temporal abstraction. Our method also applies to offline data that is highly suboptimal (e.g., contains random actions) and potentially unimodal (e.g., without diverse skills to be extracted),
which have been considered challenging by previous work~\citep{ajay2020opal}.
While we focus on reducing the complexity of the action space through the lens of action representation learning,
there exists a disjoint set of work that focuses on accelerating RL with \tilde{p}h{state} representation learning~\citep{singh1995reinforcement,ren2002state,castro2010using,gelada2019deepmdp,zhang2020learning,arora2020provable,nachum2021provable}, some of which have proposed to extract a latent state space from a learned dynamics model. Analogous to our own derivations, these works attribute the benefit of representation learning to a smaller latent state space reduced from a high-dimensional input state space (e.g., images).
\section{Preliminaries}
In this section, we introduce the problem statements for imitation learning and learning-based control, and define relevant notations.
\paragraph{Markov decision process.} Consider an MDP~\citep{puterman1994markov} $\mathcal{M}:= \langleS, A, \mathbb{R}eward, \mathcal{T}, \mu, \gamma{\textnormal{a}}ngle$, consisting of a state space $S$, an action space $A$, a reward function $\mathbb{R}eward:S\timesA\to\mathbb{R}$, a transition function $\mathcal{T}:S\timesA\to\Delta(S)$\footnote{$\Delta({\mathcal{X}})$ denotes the simplex over a set ${\mathcal{X}}$.}, an initial state distribution $\mu\in\Delta(S)$, and a discount factor $\gamma \in [0, 1)$
A policy $\pi:S\to\Delta(A)$ interacts with the environment starting at an initial state $s_0 \sim \mu$. An action $a_t\sim\pi(s_t)$ is sampled and applied to the environment at each step $t \ge 0$. The environment produces a scalar reward $\mathbb{R}eward(s_t,a_t)$
and transitions into the next state $s_{t+1}\sim\mathcal{T}(s_t,a_t)$. Note that we are specifically interested in the imitation learning setting, where the rewards produced by $\mathbb{R}eward$ are unobserved by the learner.
The state visitation distribution ${\bm{i}}sitpi(s)$ induced by a policy $\pi$ is defined as ${\bm{i}}sitpi(s) := (1-\gamma)\sum_{t=0}^\infty\gamma^t\cdot\Pr\left[s_t=s|\pi,\mathcal{M}{\textnormal{i}}ght]$. We relax the notation and use $(s,a)\sim d^\pi$ to denote $s\sim d^\pi, a\sim\pi(s)$.
\paragraph{Learning goal.} Imitation learning aims to recover an \tilde{p}h{expert policy} $\pi_*$ with access to only a fixed set of samples from the expert:
$\mathcal{D}^{\pitarget}=\{(s_i,a_i)\}_{i=1}^n$ with $s_i\sim d^\pi_*$ and $a_i\sim\pi_*(s_i)$. One approach to imitation learning is to learn a policy $\pi$ that minimizes some discrepancy between $\pi$ and $\pi_*$. In our analysis, we will use the total variation (TV) divergence in state visitation distributions,
\begin{equation}
\mathrm{Diff}(\pi,\pi_*) = D_\mathrm{TV}(d^\pi\|d^\pi_*),\nonumber
\end{equation}
as the way to measure the discrepancy between $\pi$ and $\pi_*$. Our bounds can be easily modified to apply to other divergence measures such as the Kullback–Leibler (KL) divergence or difference in expected future returns. \tilde{p}h{Behavioral cloning} (BC)~\citep{pomerleau1989alvinn} solves the imitation learning problem by learning $\pi$ from $\mathcal{D}^{\pitarget}$ via a maximum likelihood objective
\begin{equation}
J_\mathrm{BC}(\pi) := \mathbb{E}_{(s,a)\sim({\bm{i}}sittarget,\pi_*)}[-\log\pi(a|s)],\nonumber
\end{equation}
which optimizes an upper bound of $\mathrm{Diff}(\pi,\pi_*)$ defined above~\citep{ross2010efficient,nachum2021provable}:
\begin{equation}
\small
\mathrm{Diff}(\pi, \pi_*) \le \frac{\gamma}{1-\gamma}\sqrt{\frac{1}{2}\mathbb{E}_{{\bm{i}}sittarget}[D_\mathrm{KL}(\pi_*(s)\|\pi(s))]} =\frac{\gamma}{1-\gamma}\sqrt{\mathrm{const}(\pi_*) + \frac{1}{2}J_\mathrm{BC}(\pi)}.\nonumber
\end{equation}
\paragraph{BC with suboptimal offline data.} The standard BC objective (i.e., direct max-likelihood on $\mathcal{D}^{\pitarget}$) can struggle to attain good performance when the amount of expert demonstrations is limited~\citep{ross2011reduction,tu2021closing}. We assume access to an additional \tilde{p}h{suboptimal} offline dataset
$\mathcal{D}^\mathrm{off}=\{(s_i,a_i,s_i^\prime)\}_{i=1}^m$, where the suboptimality is a result of (i) suboptimal action samples $a_i\sim \mathrm{Unif}_{\Aset}$
and (ii) lack of reward labels. We use $(s,a,s')\sim{\bm{i}}sitrb$ as a shorthand for simulating finite sampling from $\mathcal{D}^\mathrm{off}$ via $s_i\sim{\bm{i}}sitrb, a_i\sim \mathrm{Unif}_{\Aset},s_i'\sim\mathcal{T}(s_i,a_i),$ where ${\bm{i}}sitrb$ is an \tilde{p}h{unknown} offline state distribution. We assume ${\bm{i}}sitrb$ sufficiently covers the expert distribution; i.e., ${\bm{i}}sittarget(s) > 0 \mathbb{R}ightarrow {\bm{i}}sitrb(s) > 0$ for all $s\in S$.
The uniform sampling of actions in $\mathcal{D}^\mathrm{off}$ is largely for mathematical convenience, and in theory can be replaced with any distribution uniformly bounded from below by $\eta>0$, and our derived bounds will be scaled by $\frac{1}{|A| \eta}$ as a result. This works focuses on how to utilize such a suboptimal $\mathcal{D}^\mathrm{off}$ to provably accelerate BC.
\section{Near-Optimal Imitation Learning with Reparametrized Actions}
In this section, we provide a provably-efficient objective for learning action representations from suboptimal data. Our initial derivations (Theorem~{\textnormal{e}}f{thm:tabular}) apply to general policies and latent action spaces, while our subsequent result (Theorem~{\textnormal{e}}f{thm:linear}) provides improved bounds for specialized settings with continuous latent action spaces. Finally, we present our practical method TRAIL\xspace for action representation learning and downstream imitation learning.
\subsection{Performance Bound with Reparametrized Actions}
Despite $\mathcal{D}^\mathrm{off}$ being highly suboptimal (e.g., with random actions), the large set of $(s,a,s')$ tuples from $\mathcal{D}^\mathrm{off}$ reveals the transition dynamics of the environment, which a latent action space should support. Under this motivation, we propose to learn a \tilde{p}h{factored} transition model $\overline{\mathcal{T}}:= \mathcal{T}_\Zset \circ \phi$ from the offline dataset $\mathcal{D}^\mathrm{off}$, where $\phi:S\timesA\toZ$ is an action representaiton function and $\mathcal{T}_\Zset:S\timesZ\to\Delta(S)$ is a latent transition model. Intuitively, good action representations should enable good imitation learning.
We formalize this intuition in the theorem below by establishing a bound on the quality of a learned policy based on ({\textnormal{e}}f{eq:tabular-rep}) an offline pretraining objective for learning $\phi$ and $\mathcal{T}_\Zset$, ({\textnormal{e}}f{eq:tabular-dec}) an offline decoding objective for learning an action decoder $\pi_{\alpha}$, and ({\textnormal{e}}f{eq:tabular-bc}) a downstream imitation learning objective for learning a latent policy $\pi_Z$ with respect to latent actions determined by $\phi$.
\begin{theorem}
\label{thm:tabular}
Consider an action representation function $\phi:S\timesA\toZ$, a factored transition model $\mathcal{T}_\Zset:S\timesZ\to\Delta(S)$, an action decoder $\pi_{\alpha}:S\timesZ\to\Delta(A)$, and a tabular latent policy $\pi_{\Zset}:S\to\Delta(Z)$. Define the transition representation error as
\begin{align}
J_\mathrm{T}(\mathcal{T}_\Zset, \phi) &:= \mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}\left[D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s, \phi(s,a))){\textnormal{i}}ght],\nonumber
\end{align}
the action decoding error as
\begin{equation}
J_\mathrm{BC}dec(\pi_{\alpha},\phi) := \mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}[-\log\pi_{\alpha}(a|s, \phi(s, a))],\nonumber
\end{equation}
and the latent behavioral cloning error as
\begin{equation*}
J_\mathrm{BC}rep(\pi_{\Zset}) := \mathbb{E}_{(s,a)\sim({\bm{i}}sittarget,\pi_*)}[-\log\pi_{\Zset}(\phi(s,a)|s)].\nonumber
\end{equation*}
Then the TV divergence between the state visitation distributions of $\pi_{\alpha}\circ\pi_{\Zset}:S\to\Delta(A)$ and $\pi_*$ can be bounded as
\begin{minipage}{\textwidth}
\begin{center}
\begin{equation*}
\hspace{-25em}\mathrm{Diff}(\pi_{\alpha}\circ\pi_{\Zset},\pi_*) \leq
\end{equation*}
\begin{empheq}[left={\text{Pretraining}\tilde{p}heqlbrace}]{align}
& C_1 \cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}\left[D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s, \phi(s,a))){\textnormal{i}}ght]}_{\displaystyle={\color{blue}J_\mathrm{T}(\mathcal{T}_\Zset, \phi)}}}\label{eq:tabular-rep}
\\
+& C_2 \cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{s\sim{\bm{i}}sitrb}[
\max_{z\inZ}
D_\mathrm{KL}(\pi_{\alpha^*}(s,z)\|\pi_{\alpha}(s,z))]}_{\displaystyle\approx~\mathrm{const}({\bm{i}}sitrb,\phi) + {\color{blue}J_\mathrm{BC}dec(\pi_{\alpha},\phi)}}} \label{eq:tabular-dec}
\end{empheq}
\begin{empheq}[left={\hspace{-1.8cm}\parbox{1.8cm}{\text{Downstream} \\ \text{Imitation}}\tilde{p}heqlbrace}]{align}
+& C_3\cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{s\sim {\bm{i}}sittarget}[D_\mathrm{KL}(\pi_{*,Z}(s)\|\pi_{\Zset}(s))]}_{\displaystyle =~\mathrm{const}(\pi_*,\phi) + {\color{red}J_\mathrm{BC}rep(\pi_{\Zset})}}},\label{eq:tabular-bc}
\end{empheq}
\end{center}
\end{minipage}
where $C_1 = \gamma|A|(1-\gamma)^{-1}(1+D_\mathrm{\chi^2}({\bm{i}}sittarget\|{\bm{i}}sitrb)^{\frac{1}{2}})$, $C_2=\gamma(1-\gamma)^{-1}(1+D_\mathrm{\chi^2}({\bm{i}}sittarget\|{\bm{i}}sitrb)^{\frac{1}{2}})$, $C_3=\gamma(1-\gamma)^{-1}$, $\pi_{\alpha^*}$ is the optimal action decoder for a specific data distribution ${\bm{i}}sitrb$ and a specific $\phi$:
\begin{equation*}
\pi_{\alpha^*}(a|s,z) = \frac{{\bm{i}}sitrb(s,a)\cdot\mathbbm{1}[z=\phi(s,a)]}{\sum_{a'\inA }{\bm{i}}sitrb(s,a')\cdot\mathbbm{1}[z=\phi(s,a')]},
\end{equation*}
and $\pi_{*,Z}$ is the marginalization of $\pi_*$ onto $Z$ according to $\phi$:
\begin{equation*}
\pi_{*,Z}(z|s) := \sum_{a\inA, z=\phi(s,a)}\pi_*(a|s).
\end{equation*}
\end{theorem}
Theorem~{\textnormal{e}}f{thm:tabular} essentially decomposes the imitation learning error into ({\textnormal{e}}f{eq:tabular-rep}) a transition-based representation error $J_\mathrm{T}$, ({\textnormal{e}}f{eq:tabular-dec}) an action decoding error $J_\mathrm{BC}dec$, and ({\textnormal{e}}f{eq:tabular-bc}) a latent behavioral cloning error $J_\mathrm{BC}rep$. Notice that only ({\textnormal{e}}f{eq:tabular-bc}) requires expert data $\mathcal{D}^{\pitarget}$; ({\textnormal{e}}f{eq:tabular-rep}) and ({\textnormal{e}}f{eq:tabular-dec}) are trained on the large offline data $\mathcal{D}^\mathrm{off}$. By choosing $|Z|$ that is smaller than $|A|$, fewer demonstrations are needed to achieve small error in $J_\mathrm{BC}rep$ compared to vanilla BC with $J_\mathrm{BC}$. The Pearson $\chi^2$ divergence term $D_\mathrm{\chi^2}({\bm{i}}sittarget\|{\bm{i}}sitrb)$ in $C_1$ and $C_2$ accounts for the difference in state visitation between the expert and offline data. In the case where ${\bm{i}}sittarget$ differs too much from ${\bm{i}}sitrb$, known as the distribution shift problem in offline RL~\citep{levine2020offline}, the errors from $J_\mathrm{T}$ and $J_\mathrm{BC}dec$ are amplified and the terms ({\textnormal{e}}f{eq:tabular-rep}) and ({\textnormal{e}}f{eq:tabular-dec}) in Theorem~{\textnormal{e}}f{thm:tabular} dominate. Otherwise, as $J_\mathrm{T}{\textnormal{i}}ghtarrow0$ and $\pi_{\alpha},\phi{\textnormal{i}}ghtarrow\argminJ_\mathrm{BC}dec$, optimizing $\pi_{\Zset}$ in the latent action space is guaranteed to optimize $\pi$ in the original action space.
\paragraph{Sample Complexity}
To formalize the intuition that a smaller latent action space $|Z|<|A|$ leads to more sample efficient downstream behavioral cloning, we provide the following theorem in the tabular action setting. First, assume access to an oracle latent action representation function $\phi_{opt}:=\mathcal{OPT}_\phi(\mathcal{D}^\mathrm{off})$ which yields pretraining errors ({\textnormal{e}}f{eq:tabular-rep})$(\phi_{opt})$ and ({\textnormal{e}}f{eq:tabular-dec})$(\phi_{opt})$ in Theorem~{\textnormal{e}}f{thm:tabular}. For downstream behavioral cloning, we consider learning a tabular $\pi_{\Zset}$ on $\mathcal{D}^{\pitarget}$ with $n$ expert samples. We can bound the expected difference between a latent policy $\pi_{opt,Z}$ with respect to $\phi_{opt}$ and $\pi_*$ as follows.
\begin{theorem}
\label{thm:sample}
Let $\phi_{opt}:= \mathcal{OPT}_\phi(\mathcal{D}^\mathrm{off})$ and $\pi_{opt,Z}$ be the latent BC policy with respect to $\phi_{opt}$. We have,
\begin{equation*}
\mathbb{E}_{\mathcal{D}^{\pitarget}}[\mathrm{Diff}(\pi_{opt,Z}, \pi_*)] \le ({\textnormal{e}}f{eq:tabular-rep})(\phi_{opt}) + ({\textnormal{e}}f{eq:tabular-dec})(\phi_{opt}) + C_3\cdot\sqrt{\frac{|Z||S|}{n}},
\end{equation*}
where $C_3$ is the same as in Theorem~{\textnormal{e}}f{thm:tabular}.
\end{theorem}
We can contrast this bound to its form in the vanilla BC setting, for which $|Z|=|A|$ and both ({\textnormal{e}}f{eq:tabular-rep})$(\phi_{opt})$ and ({\textnormal{e}}f{eq:tabular-dec})$(\phi_{opt})$ are zero. We can expect an improvement in sample complexity from reparametrized actions when the errors in ({\textnormal{e}}f{eq:tabular-rep}) and ({\textnormal{e}}f{eq:tabular-dec}) are small and $|Z| < |A|$.
\subsection{Linear Transition Models with Deterministic Latent Policy}\label{sec:linear}
Theorem {\textnormal{e}}f{thm:tabular} has introduced the notion of a latent expert policy $\pi_{*,Z}$, and minimizes the KL divergence between $\pi_{*,Z}$ and a \tilde{p}h{tabular} latent policy $\pi_{\Zset}$. However, it is not immediately clear, in the case of continuous actions, how to ensure that the latent policy $\pi_{\Zset}$ is expressive enough to capture any $\pi_{*,Z}$. In this section, we provide guarantees for recovering stochastic expert policies with continuous action space under a linear transition model.
Consider a \tilde{p}h{continuous} latent space $Z\subset\mathbb{R}^d$ and a \tilde{p}h{deterministic} latent policy $\pi_\theta(s)=\theta_s$ for some $\theta\in\mathbb{R}^{d\times|S|}$. While a deterministic $\theta$ in general cannot capture a stochastic $\pi_*$, we show that under a linear transition model $\mathcal{T}_\Zset(s'|s,\phi(s,a))=w(s')^\top \phi(s,a)$, there always exists a deterministic policy $\theta:S\to\mathbb{R}^d$, such that $\theta_s = \pi_{*,Z}(s),\,\forall s\inS$. This means that our scheme for offline pretraining paired with downstream imitation learning can \tilde{p}h{provably} recover any expert policy $\pi_*$ from a deterministic $\pi_\theta$, regardless of whether $\pi_*$ is stochastic.
\begin{theorem}
\label{thm:linear}
Let $\phi:S\times A \toZ$ for some $Z\subset\mathbb{R}^d$ and suppose there exist $w:S\to\mathbb{R}^d$ such that $\mathcal{T}_\Zset(s'|s,\phi(s,a))=w(s')^\top \phi(s,a)$ for all $s,s'\inS,a\inA$.
Let $\pi_{\alpha}:S\timesZ\to\Delta(A)$ be an action decoder, $\pi:S\to\Delta(A)$ be any policy in $\mathcal{M}$ and $\pi_\theta:S\to\mathbb{R}^d$ be a deterministic latent policy for some $\theta\in\mathbb{R}^{d\times|S|}$.
Then,
\begin{minipage}{\textwidth}
\begin{center}
\begin{equation*}
\hspace{-15em}
\mathrm{Diff}(\pi_{\alpha}\circ\pi_\theta,\pi_*) \leq ({\textnormal{e}}f{eq:tabular-rep})(\mathcal{T}_\Zset,\phi) + ({\textnormal{e}}f{eq:tabular-dec})(\pi_{\alpha},\phi)
\end{equation*}
\begin{empheq}[left={\hspace{-2cm}\parbox{1.8cm}{\text{Downstream} \\ \text{Imitation}}\tilde{p}heqlbrace}]{align}
&+ C_4 \cdot \left\|\frac{\partial}{\partial\theta} \mathbb{E}_{s\sim d^{\pi_*}, a\sim\pi_*(s)}[(\theta_s - \phi(s,a))^2]{\textnormal{i}}ght\|_1,\label{eq:linear-bc}
\end{empheq}
\end{center}
\end{minipage}
where $C_4=\frac{1}{4}|S|\|w\|_\infty$, ({\textnormal{e}}f{eq:tabular-rep}) and ({\textnormal{e}}f{eq:tabular-dec}) corresponds to the first and second terms in the bound in Theorem~{\textnormal{e}}f{thm:tabular}.
\end{theorem}
By replacing term ({\textnormal{e}}f{eq:tabular-bc}) in Theorem~{\textnormal{e}}f{thm:tabular} that corresponds to behavioral cloning in the latent action space by term ({\textnormal{e}}f{eq:linear-bc}) in Theorem~{\textnormal{e}}f{thm:linear} that is a convex function unbounded in all directions, we are guaranteed that $\pi_\theta$ is provably optimal regardless of the form of $\pi_*$ and $\pi_{*,Z}$.
Note that the downstream imitation learning objective implied by term ({\textnormal{e}}f{eq:linear-bc}) is simply the mean squared error between actions $\theta_s$ chosen by $\pi_\theta$ and reparameterized actions $\phi(s,a)$ appearing in the expert dataset.
\subsection{TRAIL\xspace: Reparametrized Actions and Imitation Learning in Practice}\label{sec:learning}
In this section, we describe our learning framework, Transition-Reparametrized Actions for Imitation Learning (TRAIL\xspace). TRAIL\xspace consists of two training stages: pretraining and downstream behavioral cloning. During pretraining, TRAIL\xspace learns $\mathcal{T}_\Zset$ and $\phi$ by minimizing $J_\mathrm{T}(\mathcal{T}_\Zset,\phi)=\mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}\left[D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s, \phi(s,a))){\textnormal{i}}ght]$. Also during pretraining, TRAIL\xspace learns $\pi_{\alpha}$ and $\phi$ by minimizing $J_\mathrm{BC}dec(\pi_{\alpha},\phi) := \mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}[-\log\pi_{\alpha}(a|s, \phi(s, a))]$.
TRAIL\xspace parametrizes $\pi_{\alpha}$ as a multivariate Gaussian distribution. Depending on whether $\mathcal{T}_\Zset$ is defined according to Theorem~{\textnormal{e}}f{thm:tabular} or Theorem~{\textnormal{e}}f{thm:linear}, we have either TRAIL\xspace EBM or TRAIL\xspace linear.
\textbf{TRAIL\xspace EBM for Theorem~{\textnormal{e}}f{thm:tabular}.} In the tabular action setting that corresponds to Theorem~{\textnormal{e}}f{thm:tabular}, to ensure that the factored transition model $\mathcal{T}_\Zset$ is flexible to capture any complex (e.g., multi-modal) transitions in the offline dataset, we propose to use an energy-based model (EBM) to parametrize $\mathcal{T}_\Zset(s'|s,\phi(s,a))$,
\begin{equation}
\mathcal{T}_\Zset(s'|s, \phi(s,a)) \propto {\textnormal{h}}o(s')\text{exp}(-\|\phi(s,a) - \psi(s')\|^2),
\end{equation}
where ${\textnormal{h}}o$ is a fixed distribution over $S$. In our implementation we set ${\textnormal{h}}o$ to be the distribution of $s'$ in ${\bm{i}}sitrb$, which enables a practical learning objective for $\mathcal{T}_\Zset$ by minimizing $\mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}\left[D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s, \phi(s,a))){\textnormal{i}}ght]$ in Theorem~{\textnormal{e}}f{thm:tabular} using a contrastive loss:
\begin{multline*}
\mathbb{E}_{{\bm{i}}sitrb}[-\log\mathcal{T}_\Zset(s'|s, \phi(s, a)))] = \mathrm{const}({\bm{i}}sitrb) + \frac{1}{2}\mathbb{E}_{{\bm{i}}sitrb}[||\phi(s, a) - \psi(s')||^2] \\
+ \log\mathbb{E}_{\tilde{s}'\sim{\textnormal{h}}o}[\exp\{-\frac{1}{2}||\phi(s, a) - \psi(\tilde{s}')||^2\}].
\end{multline*}
During downstream behavioral cloning, TRAIL\xspace EBM learns a latent Gaussian policy $\pi_{\Zset}$ by minimizing $J_\mathrm{BC}rep(\pi_{\Zset})=\mathbb{E}_{(s,a)\sim({\bm{i}}sittarget,\pi_*)}[-\log\pi_{\Zset}(\phi(s,a)|s)]$ with $\phi$ fixed. During inference, TRAIL\xspace EBM first samples a latent action according to $z\sim\pi_{\Zset}(s)$, and decodes the latent action using $a\sim\pi_{\alpha}(s,z)$ to act in an environment.~\Figref{fig:framework} describes this process pictorially.
\textbf{TRAIL\xspace Linear for Theorem~{\textnormal{e}}f{thm:linear}.} In the continuous action setting that corresponds to Theorem~{\textnormal{e}}f{thm:linear}, we propose TRAIL\xspace linear, an approximation of TRAIL\xspace EBM, to enable learning \tilde{p}h{linear} transition models required by Theorem~{\textnormal{e}}f{thm:linear}. Specifically, we first learn $f,g$ that parameterize an energy-based transition model $\overline{\mathcal{T}}(s'|s,a)\propto {\textnormal{h}}o(s')\exp\{-||f(s, a) - g(s')||^2/2\}$ using the same contrastive loss as above (replacing $\phi$ and $\psi$ by $f$ and $g$), and then apply random Fourier features~\citep{rahimi2007random} to recover $\bar\phi(s,a)=\cos(Wf(s,a)+b)$, where $W$ and $b$ are implemented as an untrainable neural network layer on top of $f$. This results in an approximate linear transition model,
\begin{equation*}
\overline{\mathcal{T}}(s'|s,a)\propto{\textnormal{h}}o(s')\exp\{-||f(s,a) - g(s')||^2/2\} \propto \bar\psi(s')^\top\bar\phi(s,a).
\end{equation*}
During downstream behavioral cloning, TRAIL\xspace linear learns a deterministic policy $\pi_\theta$ in the continuous latent action space determined by $\bar{\phi}$ via minimizing $\left\|\frac{\partial}{\partial\theta} \mathbb{E}_{s\sim d^{\pi_*}, a\sim\pi_*(s)}[(\theta_s - \bar\phi(s,a))^2]{\textnormal{i}}ght\|_1$ with $\bar\phi$ fixed.
During inference, TRAIL\xspace linear first determines the latent action according to $z = \pi_\theta(s)$, and decodes the latent action using $a\sim\pi_{\alpha}(s,z)$ to act in an environment.
\section{Experimental Evaluation}
\begin{figure}
\caption{Tasks for our empirical evaluation. We include the challenging AntMaze navigation tasks from D4RL~\citep{fu2020d4rl}
\label{fig:tasks}
\end{figure}
We now evaluate TRAIL\xspace on a set of navigation and locomotion tasks (\Figref{fig:tasks}).
Our evaluation is designed to study how well TRAIL\xspace can improve imitation learning with limited expert data by leveraging available suboptimal offline data. We evaluate the improvement attained by TRAIL\xspace over vanilla BC, and additionally compare TRAIL\xspace to previously proposed temporal skill extraction methods. Since there is no existing benchmark for imitation learning with suboptimal offline data, we adapt existing datasets for offline RL, which contain suboptimal data, and augment them with a small amount of expert data for downstream imitation learning.
\subsection{Evaluating Navigation without Temporal Abstraction}
\begin{figure}
\caption{Average success rate ($\%$) over $4$ seeds of TRAIL\xspace EBM (Theorem~{\textnormal{e}
\label{fig:antmaze}
\end{figure}
\paragraph{Description and Baselines.} We start our evaluation on the AntMaze task from D4RL~\citep{fu2020d4rl}, which has been used as a testbed by recent works on temporal skill extraction for few-shot imitation~\citep{ajay2020opal} and RL~\citep{ajay2020opal,pertsch2020accelerating,pertsch2021guided}.
We compare TRAIL\xspace to OPAL~\citep{ajay2020opal}, SkilD~\citep{pertsch2021guided}, and SPiRL~\citep{pertsch2020accelerating}, all of which use an offline dataset to extract temporally extended (length $t=10$) skills to form a latent action space for downstream learning. SkiLD and SPiRL are originally designed only for downstream RL, so we modify them to support downstream imitation learning as described in Appendix~{\textnormal{e}}f{app:exp}. While a number of other works have also proposed to learn primitives for hierarchical imitation~\citep{kipf2019compile,hakhamaneshi2021hierarchical} and RL~\citep{fox2017multi,krishnan2017ddco,shankar2019discovering,shankar2020learning,singh2020parrot}, we chose OPAL, SkiLD, and SPiRL for comparison because they are the most recent works in this area with reported results that suggest these methods are state-of-the-art, especially in learning from \tilde{p}h{suboptimal} offline data based on D4RL.
To construct the suboptimal and expert datasets,
we follow the protocol in~\citet{ajay2020opal}, which uses the full \texttt{diverse} or \texttt{play} D4RL AntMaze datasets as the suboptimal offline data, while using a set of $n=10$ expert trajectories (navigating from one corner of the maze to the opposite corner) as the expert data. The \texttt{diverse} and \texttt{play} datasets are suboptimal in the corner-to-corner navigation task, as they only contain data that navigates to random or fixed locations different from task evaluation.
\paragraph{Implementation Details.} For TRAIL\xspace, we parameterize $\phi(s,a)$ and $\psi(s')$ using separate feed-forward neural networks (see details in Appendix~{\textnormal{e}}f{app:exp}) and train the transition EBM via the contrastive objective described in~\Secref{sec:learning}. We parametrize both the action decoder $\pi_{\alpha}$ and the latent $\pi_{\Zset}$ using multivariate Gaussian distributions with neural-network approximated mean and variance. For the temporal skill extraction methods, we implement the trajectory encoder using a bidirectional RNN and parametrize skill prior, latent policy, and action decoder as Gaussians following~\citet{ajay2020opal}. We adapt SPiRL and SkiLD
for imitation learning by including the KL Divergence term between the latent policy and the skill prior during downstream behavioral cloning (see details in Appendix~{\textnormal{e}}f{app:exp}). We do a search on the extend of temporal abstraction, and found $t=10$ to work the best as reported in these papers' maze experiments. We also experimented with a version of vanilla BC pretrained on the suboptimal data and fine-tuned on expert data for fair comparison, which did not show a significant difference from directly training vanilla BC on expert data.
\paragraph{Results.} \Figref{fig:antmaze} shows the average performance of TRAIL\xspace in terms of task success rate (out of 100\%) compared to the prior methods. Since all of the prior methods are proposed in terms of temporal abstraction, we evaluate them both with the default temporal abstract, $t=10$, as well as without temporal abstraction, corresponding to $t=1$.
Note that TRAIL\xspace uses \tilde{p}h{no} temporal abstraction.
We find that on the simpler \texttt{antmaze-medium} task, TRAIL\xspace trained on a
a single-step transition model performs similarly to the set of temporal skill extraction methods with $t=10$. However, these skill extraction methods experience a degradation in performance when temporal abstraction is removed ($t=1$). This corroborates the existing theory in these works~\citep{ajay2020opal}, which attributes their benefits predominantly to temporal abstraction rather than producing a latent action space that is ``easier'' to learn. Meanwhile, TRAIL\xspace is able to excel without any temporal abstraction.
These differences become even more pronounced on the harder \texttt{antmaze-large} tasks. We see that TRAIL\xspace maintains significant improvements over vanilla BC, whereas temporal skill extraction fails to achieve good performance even with $t=10$.
These results suggest that TRAIL\xspace attains significant improvement specifically from utilizing the suboptimal data for learning suitable action representations, rather than simply from providing temporal abstraction. Of course, this does not mean that temporal abstraction is never helpful. Rather, our results serve as evidence that suboptimal data can be useful for imitation learning not just by providing temporally extended skills, but by actually reformulating the action space to make imitation learning easier and more efficient.
\subsection{Evaluating Locomotion with Highly Suboptimal Offline Data}
\begin{figure}
\caption{Average rewards (over $4$ seeds) of TRAIL\xspace EBM (Theorem~{\textnormal{e}
\label{fig:ant}
\end{figure}
\paragraph{Description.}The performance of TRAIL\xspace trained on a \tilde{p}h{single-step} transition model in the previous section suggests that learning single-step latent action representations can benefit a broader set of tasks for which temporal abstraction may not be helpful, e.g., when the offline data is highly suboptimal (with near-random actions) or unimodal (collected by a single stationary policy).
In this section, we
consider a Gym-MuJoCo task from D4RL using the same 8-DoF quadruped ant robot as the previously evaluated navigation task. We first learn action representations from the \texttt{medium}, \texttt{medium-replay}, or \texttt{random} datasets, and imitate from $1\%$ or $2.5\%$ of the \texttt{expert} datasets from D4RL. The \texttt{medium} dataset represents data collected from a mediocre stationary policy (exhibiting unimodal behavior), and the \texttt{random} dataset is collected by a randomly initialized policy and is hence highly suboptimal.
\paragraph{Implementation Details.} For this task, we additionally train a linear version of TRAIL\xspace by approximating the transition EBM using random Fourier features~\citep{rahimi2007random} and learn a \tilde{p}h{deterministic} latent policy following Theorem~{\textnormal{e}}f{thm:linear}. Specifically, we use separate feed-forward networks to parameterize $f(s, a)$ and $g(s')$, and extract action representations using $\phi(s,a) = \cos(W f(s,a) + b)$, where $W,b$ are untrainable randomly initialized variables as described in~\Secref{sec:learning}. Different from TRAIL\xspace EBM which parametrizes $\pi_{\Zset}$ as a Gaussian, TRAIL\xspace linear parametrizes the \tilde{p}h{deterministic} $\pi_\theta$ using a feed-forward neural network.
\paragraph{Results.} Our results are shown in \Figref{fig:ant}. Both the EBM
and linear versions of TRAIL\xspace consistently improve over baseline BC, whereas temporal skill extraction methods generally lead to worse performance regardless of the extent of abstraction, likely due to the degenerate effect (i.e., latent skills being ignored by a flexible action decoder) resulted from unimodal offline datasets as discussed in~\citep{ajay2020opal}.
Surprisingly, TRAIL\xspace achieves a significant performance boost even when latent actions are learned from the \texttt{random} dataset, suggesting the benefit of learning action representations from transition models when the offline data is highly suboptimal.
Additionally, the linear variant of TRAIL\xspace performs slightly better than the EBM variant when the expert sample size is small (i.e., $10$k), suggesting
the benefit of learning deterministic latent policies from Theorem~{\textnormal{e}}f{thm:linear} when the environment is effectively approximated by a linear transition model.
\subsection{Evaluation on DeepMind Control Suite}
\begin{figure}
\caption{Average task rewards (over $4$ seeds) of TRAIL\xspace EBM (Theorem~{\textnormal{e}
\label{fig:rlu}
\end{figure}
\paragraph{Description.} Having witnessed the improvement TRAIL\xspace brings to behavioral cloning on AntMaze and MuJoCo Ant, we wonder how TRAIL\xspace perform on a wider spectrum of locomotion tasks with various degrees of freedom.
We consider $6$ locomotion tasks from the DeepMind Control Suite~\citep{tassa2018deepmind} ranging from simple (e.g., 1-DoF \texttt{cartople-swingup}) to complex (e.g., 21-DoF \texttt{humanoid-run}) tasks. Following the setup in~\citet{zolna2020offline}, we take $\frac{1}{10}$ of the trajectories whose episodic reward is among the top $20\%$ of the open source RL Unplugged datasets~\citep{gulcehre2020rl} as expert demonstrations (see numbers of expert trajectories in Appendix~{\textnormal{e}}f{app:exp}), and the bottom $80\%$ of RL Unplugged as the suboptimal offline data. For completeness, we additionally include comparison to Critic Regularized Regression (CRR)~\citep{wang2020critic}, an offline RL method with competitive performance on these tasks.
CRR is trained on the full RL Unplugged datasets (i.e., combined suboptimal and expert datasets) with reward labels.
\paragraph{Results.}\Figref{fig:rlu} shows the comparison results. TRAIL\xspace outperforms temporal extraction methods on both low-dimensional (e.g., \texttt{cartpole-swingup}) and high-dimensional (\texttt{humanoid-run}) tasks. Additionally, TRAIL\xspace performs similarly to or better than CRR on $4$ out of the $6$ tasks despite not using any reward labels, and only slightly worse on \texttt{humanoid-run} and \texttt{walker-walk}. To test the robustness of TRAIL\xspace when the offline data is highly suboptimal, we further reduce the size and quality of the offline data to the bottom $5\%$ of the original RL Unplugged datasets. As shown in~\Figref{fig:rlu5} in Appndix~{\textnormal{e}}f{app:results}, the performance of temporal skill extraction declines in \texttt{fish-swim}, \texttt{walker-stand}, and \texttt{walker-walk} due to this change in offline data quality, whereas TRAIL\xspace maintains the same performance as when the bottom $80\%$ data was used, suggesting that TRAIL\xspace is more robust to low-quality offline data.
This set of results suggests a promising direction for offline learning of sequential decision making policies, namely to learn latent actions from abundant low-quality data and behavioral cloning in the latent action space on scarce high-quality data. Notably, compared to offline RL, this approach is applicable to settings where data quality cannot be easily expressed through a scalar reward.
\section{Conclusion}
We have derived a near-optimal objective for learning a latent action space from suboptimal offline data that provably accelerates downstream imitation learning. To learn this objective in practice, we propose transition-reparametrized actions for imitation learning (TRAIL), a two-stage framework that first pretrains a factored transition model from offline data, and then uses the transition model to reparametrize the action space prior to behavioral cloning. Our empirical results suggest that TRAIL can improve imitation learning drastically, even when pretrained on highly suboptimal data (e.g., data from a random policy), providing a new approach to imitation learning through a combination of pretraining on task-agnostic or suboptimal data and behavioral cloning on limited expert datasets. That said, our approach to action representation learning is not necessarily specific to imitation learning, and insofar as the reparameterized action space simplifies downstream control problems, it could also be combined with reinforcement learning in future work. More broadly, studying how learned action reparameterization can accelerate various facets of learning-based control represents an exciting future direction, and we hope that our results provide initial evidence of such a potential.
\subsubsection*{Acknowledgments}
We thank Dale Schuurmans and Bo Dai for valuable discussions. We thank Justin Fu, Anurag Ajay, and Konrad Zolna for assistance in setting up evaluation tasks.
\appendix
\begin{center}
{\huge Appendix}
\end{center}
\section{Proofs for Foundational Lemmas}
\begin{lemma}
\label{lem:performance}
If $\pi_1$ and $\pi_2$ are two policies in $\mathcal{M}$ and $d^{\pi_1}(s)$ and $d^{\pi_2}(s)$ are the state visitation distributions induced by policy $\pi_1$ and $\pi_2$ where ${\bm{i}}sitpi(s) := (1-\gamma)\sum_{t=0}\gamma^t\cdot\Pr\left[s_t=s|\pi,\mathcal{M}{\textnormal{i}}ght]$. Define $\mathrm{Diff}(\pi_2,\pi_1) = D_\mathrm{TV}(d^{\pi_2}\|d^{\pi_1})$ then
\begin{equation}
\label{eq:perf-diff-bound}
\mathrm{Diff}(\pi_2, \pi_1) \leq \frac{\gamma}{1-\gamma}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}),
\end{equation}
where
\begin{equation}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}) := \frac{1}{2}\sum_{s'\inS} \left|\mathbb{E}_{s\sim d^{\pi_1},a_1\sim\pi_1(s),a_2\sim\pi_2(s)}[\mathcal{T}(s'|s,a_1) - \mathcal{T}(s'|s,a_2)]{\textnormal{i}}ght|.
\end{equation}
is the TV-divergence between $\mathcal{T}\circ\pi_1\circ d^{\pi_1}$ and $\mathcal{T}\circ\pi_2\circ d^{\pi_1}$.
\end{lemma}
\begin{proof}
Following similar derivations in~\cite{achiam2017constrained,nachum2018near}, we express $D_\mathrm{TV}(d^{\pi_2}\|d^{\pi_1})$ in linear operator notation:
\begin{equation}
\mathrm{Diff}(\pi_2,\pi_1) = D_\mathrm{TV}(d^{\pi_2}\|d^{\pi_1}) = \frac{1}{2}\mathbf{1}|(1-\gamma)(I - \gamma \mathcal{T}\Pi_2)^{-1}\mu - (1-\gamma)(I - \gamma \mathcal{T}\Pi_1)^{-1}\mu|,
\end{equation}
where $\Pi_1,\Pi_2$ are linear operators $S\toS\timesA$ such that $\Pi_i \nu(s,a) = \pi_i(a|s)\nu(s)$ and $\mathbf{1}$ is an all ones row vector of size $|S|$.
Notice that $d^{\pi_1}$ may be expressed in this notation as $(1-\gamma)(I - \gamma \mathcal{T}\Pi_1)^{-1}\mu$.
We may re-write the above term as
\begin{align}
&\frac{1}{2}\mathbf{1}|(1-\gamma)(I - \gamma \mathcal{T}\Pi_2)^{-1}((I - \gamma\mathcal{T}\Pi_1) - (I - \gamma\mathcal{T}\Pi_2))(I - \gamma \mathcal{T}\Pi_1)^{-1}\mu| \nonumber\\
=& \gamma\cdot\frac{1}{2}\mathbf{1}|(I - \gamma \mathcal{T}\Pi_2)^{-1}(\mathcal{T}\Pi_2 - \mathcal{T}\Pi_1) d^{\pi_1}|.
\end{align}
Using matrix norm inequalities, we bound the above by
\begin{equation}
\gamma\cdot\frac{1}{2} \|(I - \gamma \mathcal{T}\Pi_2)^{-1}\|_{1,\infty}\cdot \mathbf{1}|(\mathcal{T}\Pi_2 - \mathcal{T}\Pi_1) d^{\pi_1}|.
\end{equation}
Since $\mathcal{T}\Pi_2$ is a stochastic matrix, $\|(I - \gamma \mathcal{T}\Pi_2)^{-1}\|_{1,\infty} \le \sum_{t=0}^\infty \gamma^t\|\mathcal{T}\Pi_2\|_{1,\infty} = (1-\gamma)^{-1}$. Thus, we bound the above by
\begin{equation}
\frac{\gamma}{2(1-\gamma)}\mathbf{1}|(\mathcal{T}\Pi_2 - \mathcal{T}\Pi_1) d^{\pi_1}| = \frac{\gamma}{1-\gamma} \mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}),
\end{equation}
and so we immediately achieve the desired bound in~\eqref{eq:perf-diff-bound}.
\end{proof}
The divergence bound above relies on the true transition model $\mathcal{T}$ which is not available to us. We now introduce an approximate transition model $\overline{\mathcal{T}}$ to proxy $\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T})$.
\begin{lemma}
\label{lem:model1}
For $\pi_1$ and $\pi_2$ two policies in $\mathcal{M}$ and any transition model $\overline{\mathcal{T}}(\cdot|s, a)$ we have,
\begin{align}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}) &\le |A|\mathbb{E}_{(s,a)\sim (d^{\pi_1}, \mathrm{Unif}_{\Aset})}[D_\mathrm{TV}(\mathcal{T}(s,a)\|\overline{\mathcal{T}}(s,a))] +
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}}).
\end{align}
\end{lemma}
\begin{proof}
\begin{align}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}) &= \frac{1}{2}\sum_{s'\inS}\left|\mathbb{E}_{s\sim d^{\pi_1},a_1\sim\pi_1(s),a_2\sim\pi_2(s)}[\mathcal{T}(s'|s,a_1) - \mathcal{T}(s'|s,a_2)]{\textnormal{i}}ght|
\end{align}
\begin{align}
&= \frac{1}{2}\sum_{s'\inS}\left|\sum_{a\inA}\mathbb{E}_{s\sim d^{\pi_1}}[\mathcal{T}(s'|s,a)\pi_1(a|s) - \mathcal{T}(s,a)\pi_2(a|s)]{\textnormal{i}}ght| \\
&= \frac{1}{2}\sum_{s'\inS}\left|\sum_{a\inA}\mathbb{E}_{s\sim d^{\pi_1}}[(\mathcal{T}(s'|s,a) - \overline{\mathcal{T}}(s'|s,a))(\pi_1(a|s) - \pi_2(a|s)) + \overline{\mathcal{T}}(s'|s,a)(\pi_1(a|s) - \pi_2(a|s))]{\textnormal{i}}ght| \\
&\le \frac{1}{2}\sum_{s'\inS}\left|\sum_{a\inA}\mathbb{E}_{s\sim d^{\pi_1}}[(\mathcal{T}(s'|s,a) - \overline{\mathcal{T}}(s'|s,a))(\pi_1(a|s) - \pi_2(a|s))]{\textnormal{i}}ght| + \mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}}) \\
&\le \frac{1}{2}\sum_{s'\inS}\sum_{a\inA}\mathbb{E}_{s\sim d^{\pi_1}}[\left|(\mathcal{T}(s'|s,a) - \overline{\mathcal{T}}(s'|s,a))(\pi_1(a|s) - \pi_2(a|s)){\textnormal{i}}ght|] + \mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}}) \\
&\le |A|\mathbb{E}_{(s,a)\sim (d^{\pi_1}, \mathrm{Unif}_{\Aset})}[D_\mathrm{TV}(\mathcal{T}(s'|s,a)\|\overline{\mathcal{T}}(s'|s,a)|] + \mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}}),
\end{align}
and we arrive at the inequality as desired where the last step comes from $D_\mathrm{TV}(\mathcal{T}(s,a)\|\overline{\mathcal{T}}(s,a)) = \frac{1}{2}\sum_{s'\inS}|\mathcal{T}(s'|s,a)-\overline{\mathcal{T}}(s'|s,a)|$.
\end{proof}
Now we introduce a representation function $\phi:S\timesA\toZ$ and show how the error above may be reduced when $\overline{\mathcal{T}}(s,a)=\mathcal{T}_\Zset(s, \phi(s, a))$:
\begin{lemma}
\label{lem:bottleneck}
Let $\phi:S\timesA\toZ$ for some space $Z$ and suppose there exists $\mathcal{T}_\Zset:S\timesZ\to\Delta(S)$ such that $\overline{\mathcal{T}}(s,a)=\mathcal{T}_\Zset(s,\phi(s,a))$ for all $s\inS,a\inA$.
Then for any policies $\pi_1,\pi_2$,
\begin{align}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}})] &\le \mathbb{E}_{s\sim d^{\pi_1}}[D_\mathrm{TV}(\pi_{1,Z}\|\pi_{2,Z})],
\end{align}
where $\pi_{k,Z}(z|s)$ is the marginalization of $\pi_k$ onto $Z$:
\begin{align}
\pi_{k,Z}(z|s) &:= \sum_{a\inA, z=\phi(s,a)}\pi_k(a|s)
\end{align}
for all $z\inZ, k\in\{1,2\}$.
\end{lemma}
\begin{proof}
\begin{align}
& \frac{1}{2}\sum_{s'\inS}\left|\mathbb{E}_{s\sim d^{\pi_1},a_1\sim\pi_1(s),a_2\sim\pi_2(s)}[\overline{\mathcal{T}}(s'|s,a_1) - \overline{\mathcal{T}}(s'|s,a_2)]{\textnormal{i}}ght| \\
&= \frac{1}{2}\sum_{s'\inS}\left|\sum_{s\inS,a\inA}\mathcal{T}_\Zset(s'|s,\phi(s,a))\pi_1(a|s)d^{\pi_1}(s) - \sum_{s\inS,a\inA}\mathcal{T}_\Zset(s'|s,\phi(s,a))\pi_2(a|s)d^{\pi_1}(s){\textnormal{i}}ght| \nonumber \\
&=\frac{1}{2}\sum_{s'\inS}\left|\sum_{s\inS,z\inZ}\mathcal{T}_\Zset(s'|s,z)\!\!\!\sum_{\substack{a\inA,\\\phi(s,a)=z}}\pi_1(a|s)d^{\pi_1}(s) - \sum_{s\inS,z\inZ}\mathcal{T}_\Zset(s'|s,z)\sum_{\substack{a\inA,\\\phi(s,a)=z}}\pi_2(a|s)d^{\pi_1}(s){\textnormal{i}}ght| \nonumber \\
&=\frac{1}{2}\sum_{s'\inS}\left|\sum_{s\inS,z\inZ}\mathcal{T}_\Zset(s'|s,z)\pi_{1,Z}(z|s) d^{\pi_1}(s) - \sum_{s\inS,z\inZ}\mathcal{T}_\Zset(s'|s,z)\pi_{2,Z}(z|s)d^{\pi_1}(s){\textnormal{i}}ght| \nonumber \\
&=\frac{1}{2}\sum_{s'\inS}\left|\mathbb{E}_{s\sim d^{\pi_1}}\left[\sum_{z\inZ}\mathcal{T}_\Zset(s'|s,z)(\pi_{1,Z}(z|s) - \pi_{2,Z}(z|s)){\textnormal{i}}ght]{\textnormal{i}}ght| \\
&\le \frac{1}{2}\mathbb{E}_{s\sim d^{\pi_1}}\left[\sum_{z\inZ}\sum_{s'\inS}\mathcal{T}_\Zset(s'|s,z)\left|\pi_{1,Z}(z|s) - \pi_{2,Z}(z|s){\textnormal{i}}ght|{\textnormal{i}}ght] \\
&= \frac{1}{2}\mathbb{E}_{s\sim d^{\pi_1}}\left[\sum_{z\inZ}\left|\pi_{1,Z}(z|s) - \pi_{2,Z}(z|s){\textnormal{i}}ght|{\textnormal{i}}ght] \\
&=\mathbb{E}_{s\sim d^{\pi_1}}\left[D_\mathrm{TV}(\pi_{1,Z}\|\pi_{2,Z}){\textnormal{i}}ght],
\end{align}
and we arrive at the inequality as desired.
\end{proof}
\begin{lemma}\label{lem:marginal-opt}
Let $d\in\Delta(S,A)$ be some state-action distribution, $\phi:S\timesA\to Z$, and $\pi_{\Zset}:S\to\Delta(Z)$.
Denote $\pi_{\alpha^*}$ as the optimal action decoder for $d,\phi$:
\begin{equation*}
\pi_{\alpha^*}(a|s,z) = \frac{d(s,a)\cdot\mathbbm{1}[z=\phi(s,a)]}{\sum_{a'\inA }d(s,a')\cdot\mathbbm{1}[z=\phi(s,a')]},
\end{equation*}
and $\pi_{\alpha^*,Z}$ as the marginalization of $\pi_{\alpha^*}\circ\pi_{\Zset}$ onto $Z$:
\begin{equation*}
\pi_{\alpha^*,Z}(z|s) := \sum_{a\inA, z=\phi(s,a)} (\pi_{\alpha^*}\circ\pi_{\Zset})(a|s) = \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\pi_{\alpha^*}(a|s, \tilde z)\pi_{\Zset}(\tilde z|s).
\end{equation*}
Then we have
\begin{equation}
\pi_{\alpha^*,Z}(z|s) = \pi_{\Zset}(z|s)
\end{equation}
for all $z\inZ$ and $s\inS$.
\begin{proof}
\begin{align}
\pi_{\alpha^*,Z}(z|s)
&= \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\pi_{\alpha^*}(a|s, \tilde z)\pi_{\Zset}(\tilde z|s) \\
&= \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\frac{d(s,a)\cdot\mathbbm{1}[\tilde z=\phi(s,a)]}{\sum_{a'\inA }d(s,a')\cdot\mathbbm{1}[\tilde z=\phi(s,a')]}\pi_{\Zset}(\tilde z|s) \\
&= \sum_{a\inA, z=\phi(s,a)}\frac{d(s,a)\cdot\mathbbm{1}[ z=\phi(s,a)]}{\sum_{a'\inA }d(s,a')\cdot\mathbbm{1}[ z=\phi(s,a')]}\pi_{\Zset}(z|s) \\
&= \pi_{\Zset}(z|s) \sum_{a\inA, z=\phi(s,a)}\frac{d(s,a)\cdot\mathbbm{1}[ z=\phi(s,a)]}{\sum_{a'\inA }d(s,a')\cdot\mathbbm{1}[ z=\phi(s,a')]} \\
&= \pi_{\Zset}(z|s),
\end{align}
and we have the desired equality.
\end{proof}
\end{lemma}
\begin{lemma}
\label{lem:decode}
Let $\pi_{\Zset}:S\to\Delta(Z)$ be a latent policy in $Z$ and $\pi_{\alpha}:S\timesZ\toA$ be an action decoder, $\pi_{\alpha,Z}$ be the marginalization of $\pi_{\alpha}\circ\pi_{\Zset}$ onto $Z$:
\begin{equation*}
\pi_{\alpha,Z}(z|s) := \sum_{a\inA, z=\phi(s,a)} (\pi_{\alpha}\circ\pi_{\Zset})(a|s) = \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\pi_{\alpha}(a|s, \tilde z)\pi_{\Zset}(\tilde z|s).
\end{equation*}
Then for any $s\inS$ we have
\begin{align}
D_\mathrm{TV}(\pi_{\Zset}(s)\|\pi_{\alpha,Z}(s)) \le \max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha^*}(s, z)\|\pi_{\alpha}(s, z)),
\end{align}
where $\pi_{\alpha^*}$ is the optimal action decoder defined in Lemma~{\textnormal{e}}f{lem:marginal-opt} (and this holds for any choice of $d$ from Lemma~{\textnormal{e}}f{lem:marginal-opt}).
\begin{proof}
\begin{align}
&D_\mathrm{TV}(\pi_{\Zset}(s)\|\pi_{\alpha,Z}(s)) \\
=& \frac{1}{2}\sum_{z\inZ}\left|\pi_{\Zset}(z|s) - \pi_{\alpha,Z}(z|s){\textnormal{i}}ght|\\
=& \frac{1}{2}\sum_{z\inZ}\left|\pi_{\Zset}(z|s) - \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\pi_{\alpha}(a|s, \tilde z)\pi_{\Zset}(\tilde z|s){\textnormal{i}}ght|\\
=& \frac{1}{2}\sum_{z\inZ}\left|\pi_{\Zset}(z|s) - \sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\left(\pi_{\alpha}(a|s, \tilde z) - \pi_{\alpha^*}(a|s, \tilde z) + \pi_{\alpha^*}(a|s, \tilde z){\textnormal{i}}ght)\pi_{\Zset}(\tilde z|s){\textnormal{i}}ght|\\
=& \frac{1}{2}\sum_{z\inZ}\left|\sum_{a\inA, z=\phi(s,a)}\sum_{\tilde z\inZ}\left(\pi_{\alpha}(a|s, \tilde z) - \pi_{\alpha^*}(a|s, \tilde z){\textnormal{i}}ght)\pi_{\Zset}(\tilde z|s){\textnormal{i}}ght| ~~~~~~~~~~~~~\text{(by Lemma~{\textnormal{e}}f{lem:marginal-opt})}\\
\le& \frac{1}{2}\mathbb{E}_{\tilde z\sim\pi_{\Zset}(s)}\left[\sum_{z\inZ}\sum_{a\inA, z=\phi(s,a)}\left|\pi_{\alpha}(a|s, \tilde z) - \pi_{\alpha^*}(a|s, \tilde z){\textnormal{i}}ght|{\textnormal{i}}ght]\\
=& \frac{1}{2}\mathbb{E}_{\tilde z\sim\pi_{\Zset}(s)}\left[\sum_{a\inA}\left|\pi_{\alpha}(a|s, \tilde z) - \pi_{\alpha^*}(a|s, \tilde z){\textnormal{i}}ght|{\textnormal{i}}ght]\\
=& \mathbb{E}_{\tilde z\sim\pi_{\Zset}(s)}\left[D_\mathrm{TV}(\pi_{\alpha}(s,\tilde z)\|, \pi_{\alpha^*}(s, \tilde z)){\textnormal{i}}ght]\\
\le& \max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha}(s,z)\|\pi_{\alpha^*}(s, z)),\\
\end{align}
and we have the desired inequality.
\end{proof}
\end{lemma}
\begin{lemma}
\label{lem:tvs}
Let $\pi_{1,Z}$ be the marginalization of $\pi_1$ onto $Z$ as defined in Lemma~{\textnormal{e}}f{lem:bottleneck}, and let $\pi_{\Zset}$, $\pi_{\alpha}$, $\pi_{\alpha,Z}$ be as defined in Lemma~{\textnormal{e}}f{lem:decode}, and let $\pi_{\alpha^*,Z}$ be as defined in Lemma~{\textnormal{e}}f{lem:marginal-opt}. For any $s\inS$ we have
\begin{equation}
D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\alpha, Z}(s)) \le \max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha}(s,z)\|\pi_{\alpha^*}(s, z)) + D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\Zset}(s)).
\end{equation}
\begin{proof}
The desired inequality is achieved by plugging the inequality from Lemma~{\textnormal{e}}f{lem:decode} into the following triangle inequality:
\begin{equation}
D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\alpha, Z}(s)) \le D_\mathrm{TV}(\pi_{\Zset}(s)\|\pi_{\alpha,Z}(s)) + D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\Zset}(s)).
\end{equation}
\end{proof}
\end{lemma}
Our final lemma will be used to translate on-policy bounds to off-policy.
\begin{lemma}
\label{lem:off-policy}
For two distributions ${\textnormal{h}}o_1,{\textnormal{h}}o_2\in\Delta(S)$ with ${\textnormal{h}}o_1(s)>0 \mathbb{R}ightarrow {\textnormal{h}}o_2(s) > 0$, we have,
\begin{equation}
\mathbb{E}_{{\textnormal{h}}o_1}[h(s)] \le (1 + D_\mathrm{\chi^2}({\textnormal{h}}o_1\|{\textnormal{h}}o_2)^\frac{1}{2}) \sqrt{\mathbb{E}_{{\textnormal{h}}o_2}[h(s)^2]}.
\end{equation}
\end{lemma}
\begin{proof}
The lemma is a straightforward consequence of Cauchy-Schwartz:
\begin{align}
\mathbb{E}_{{\textnormal{h}}o_1}[h(s)] &= \mathbb{E}_{{\textnormal{h}}o_2}[h(s)] + (\mathbb{E}_{{\textnormal{h}}o_1}[h(s)] - \mathbb{E}_{{\textnormal{h}}o_2}[h(s)]) \\
&= \mathbb{E}_{{\textnormal{h}}o_2}[h(s)] + \sum_{s\inS}\frac{{\textnormal{h}}o_1(s) - {\textnormal{h}}o_2(s)}{{\textnormal{h}}o_2(s)^{\frac{1}{2}}}\cdot {\textnormal{h}}o_2(s)^{\frac{1}{2}} h(s) \\
&\le \mathbb{E}_{{\textnormal{h}}o_2}[h(s)] + \left(\sum_{s\inS}\frac{({\textnormal{h}}o_1(s) - {\textnormal{h}}o_2(s))^2}{{\textnormal{h}}o_2(s)}{\textnormal{i}}ght)^{\frac{1}{2}}\cdot \left(\sum_{s\inS}{\textnormal{h}}o_2(s) h(s)^2{\textnormal{i}}ght)^{\frac{1}{2}} \\
&= \mathbb{E}_{{\textnormal{h}}o_2}[h(s)] + D_\mathrm{\chi^2}({\textnormal{h}}o_1\|{\textnormal{h}}o_2)^{\frac{1}{2}}\cdot \sqrt{\mathbb{E}_{{\textnormal{h}}o_2}[h(s)^2]}.
\end{align}
Finally, to get the desired bound, we simply note that the concavity of the square-root function implies $\mathbb{E}_{{\textnormal{h}}o_2}[h(s)] \le \mathbb{E}_{{\textnormal{h}}o_2}[\sqrt{h(s)^2}] \le \sqrt{\mathbb{E}_{{\textnormal{h}}o_2}[h(s)^2]}$.
\end{proof}
\section{Proofs for Major Theorems}
\subsection{Proof of Theorem~{\textnormal{e}}f{thm:tabular}}
\begin{proof}
Let $\pi_2 := \pi_{\alpha}\circ\pi_{\Zset}$, we have $\pi_{2,Z}(z|s)$ = $\pi_{\alpha,Z}(z|s) = \sum_{a\inA,\phi(s,a)=z} (\pi_{\alpha}\circ\pi_{\Zset})(z|s)$. By plugging the result of Lemma~{\textnormal{e}}f{lem:tvs} into Lemma~{\textnormal{e}}f{lem:bottleneck}, we have
\begin{equation}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\overline{\mathcal{T}})] \le \mathbb{E}_{s\sim d^{\pi_1}}\left[\max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha^*}(s, z)\|\pi_{\alpha}(s,z)) + D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\Zset}(s)){\textnormal{i}}ght].
\end{equation}
By plugging this result into Lemma~{\textnormal{e}}f{lem:model1}, we have
\begin{align}
\mathrm{Err}_{d^{\pi_1}}(\pi_1,\pi_2,\mathcal{T}) &\le |A|\mathbb{E}_{(s,a)\sim (d^{\pi_1}, \mathrm{Unif}_{\Aset})}[D_\mathrm{TV}(\mathcal{T}(s,a)\|\overline{\mathcal{T}}(s,a))]\\
&+ \mathbb{E}_{s\sim d^{\pi_1}}\left[\max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha^*}(s, z)\|\pi_{\alpha}(s,z)){\textnormal{i}}ght]\\
&+ \mathbb{E}_{s\sim d^{\pi_1}}\left[D_\mathrm{TV}(\pi_{1, Z}(s)\|\pi_{\Zset}(s)){\textnormal{i}}ght].
\end{align}
By further plugging this result into Lemma~{\textnormal{e}}f{lem:performance} and let $\pi_1 = \pi_*$, we have:
\begin{align}\label{eq:onpolicy}
\mathrm{Diff}(\pi_{\alpha}\circ\pi_{\Zset},\pi_*) &\leq
\frac{\gamma|A|}{1-\gamma}\cdot\mathbb{E}_{(s,a)\sim (d^{\pi_1}, \mathrm{Unif}_{\Aset})}[D_\mathrm{TV}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s,\phi(s,a))] \nonumber\\
&+ \frac{\gamma}{1-\gamma} \cdot \mathbb{E}_{s\sim{\bm{i}}sittarget}[\max_{z\inZ}D_\mathrm{TV}(\pi_{\alpha^*}(s,z)\|\pi_{\alpha}(s,z))] \nonumber\\
& + \frac{\gamma}{1-\gamma} \cdot\mathbb{E}_{s\sim {\bm{i}}sittarget}[D_\mathrm{TV}(\pi_{*,Z}(s)\|\pi_{\Zset}(s))].
\end{align}
Finally, by plugging in the off-policy results of Lemma~{\textnormal{e}}f{lem:off-policy} to the bound in~\mathbb{E}qref{eq:onpolicy} and by applying Pinsker's inequality $D_\mathrm{TV}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s,\phi(s,a)))^2\le \frac{1}{2}D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s,\phi(s,a)))$, we have
\begin{align}
\mathrm{Diff}(\pi_{\alpha}\circ\pi_{\Zset},\pi_*) &\leq C_1 \cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}\left[D_\mathrm{KL}(\mathcal{T}(s,a)\|\mathcal{T}_\Zset(s, \phi(s,a))){\textnormal{i}}ght]}_{\displaystyle=J_\mathrm{T}(\mathcal{T}_\Zset, \phi)}}\nonumber\\
&+ C_2 \cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{s\sim{\bm{i}}sitrb}[\max_{z\inZ}
D_\mathrm{KL}(\pi_{\alpha^*}(s,z)\|\pi_{\alpha}(s,z))]}_{\displaystyle\approx~\mathrm{const}({\bm{i}}sitrb,\phi) + J_\mathrm{BC}dec(\pi_{\alpha},\phi)}} \nonumber\\
& + C_3\cdot\sqrtexplained{\frac{1}{2}\underbrace{\mathbb{E}_{s\sim {\bm{i}}sittarget}[D_\mathrm{KL}(\pi_{*,Z}(s)\|\pi_{\Zset}(s))]}_{\displaystyle =~\mathrm{const}(\pi_*,\phi) + J_\mathrm{BC}rep(\pi_{\Zset})}},
\end{align}
where $C_1 = \gamma|A|(1-\gamma)^{-1}(1+D_\mathrm{\chi^2}({\bm{i}}sittarget\|{\bm{i}}sitrb)^{\frac{1}{2}})$, $C_2=\gamma(1-\gamma)^{-1}(1+D_\mathrm{\chi^2}({\bm{i}}sittarget\|{\bm{i}}sitrb)^{\frac{1}{2}})$, and $C_3=\gamma(1-\gamma)^{-1}$. Since the $\max_{z\inZ}$ is not tractable in practice, we approximate $\mathbb{E}_{s\sim{\bm{i}}sitrb}[\max_{z\inZ}
D_\mathrm{KL}(\pi_{\alpha^*}(s,z)\|\pi_{\alpha}(s,z))]$ using $\mathbb{E}_{(s,a)\sim{\bm{i}}sitrb}[
D_\mathrm{KL}(\pi_{\alpha^*}(s,\phi(s,a))\|\pi_{\alpha}(s,\phi(s,a)))]$, which reduces to $J_\mathrm{BC}dec(\pi_{\alpha},\phi)$ with additional constants. We now arrive at
the desired off-policy bound in Theorem~{\textnormal{e}}f{thm:tabular}.
\end{proof}
\subsection{Proof of Theorem~{\textnormal{e}}f{thm:sample}}
\begin{lemma}
\label{lem:empirical-tv}
Let ${\textnormal{h}}o\in\Delta(\{1,\dots,k\})$ be a distribution with finite support. Let $\hat{{\textnormal{h}}o}_n$ denote the empirical estimate of ${\textnormal{h}}o$ from $n$ i.i.d. samples $X\sim{\textnormal{h}}o$. Then,
\begin{equation}
\mathbb{E}_n[D_\mathrm{TV}({\textnormal{h}}o\|\hat{{\textnormal{h}}o}_n)] \le \frac{1}{2}\cdot\frac{1}{\sqrt{n}}\sum_{i=1}^k \sqrt{{\textnormal{h}}o(i)} \le \frac{1}{2}\cdot\sqrt{\frac{k}{n}}.
\end{equation}
\end{lemma}
\begin{proof}
The first inequality is Lemma 8 in~\cite{berend2012convergence} while the second inequality is due to the concavity of the square root function.
\end{proof}
\begin{lemma}
\label{lem:tv-bc}
Let $\mathcal{D}:=\{(s_i,a_i)\}_{i=1}^n$ be i.i.d. samples from a factored distribution $x(s,a):={\textnormal{h}}o(s)\pi(a|s)$ for ${\textnormal{h}}o\in\Delta(S),\pi:S\to\Delta(A)$. Let $\hat{{\textnormal{h}}o}$ be the empirical estimate of ${\textnormal{h}}o$ in $\mathcal{D}$ and $\hat{\pi}$ be the empirical estimate of $\pi$ in $\mathcal{D}$.
Then,
\begin{equation}
\mathbb{E}_{\mathcal{D}}[\mathbb{E}_{s\sim{\textnormal{h}}o}[D_\mathrm{TV}(\pi(s)\|\hat{\pi}(s))]] \le \sqrt{\frac{|S||A|}{n}}.
\end{equation}
\end{lemma}
\begin{proof}
Let $\hat{x}$ be the empirical estimate of $x$ in $\mathcal{D}$. We have,
\begin{align}
\mathbb{E}_{s\sim{\textnormal{h}}o}[D_\mathrm{TV}(\pi(s)\|\hat{\pi}(s))] &= \frac{1}{2}\sum_{s,a} {\textnormal{h}}o(s)\cdot |\pi(a|s) - \hat{\pi}(a|s)| \\
&=\frac{1}{2}\sum_{s,a} {\textnormal{h}}o(s)\cdot \left|\frac{x(s,a)}{{\textnormal{h}}o(s)} - \frac{\hat{x}(s,a)}{\hat{{\textnormal{h}}o}(s)}{\textnormal{i}}ght| \\
&\le \frac{1}{2}\sum_{s,a} {\textnormal{h}}o(s)\cdot \left|\frac{\hat{x}(s,a)}{{\textnormal{h}}o(s)} - \frac{\hat{x}(s,a)}{\hat{{\textnormal{h}}o}(s)}{\textnormal{i}}ght| + \frac{1}{2}\sum_{s,a} {\textnormal{h}}o(s)\cdot \left|\frac{\hat{x}(s,a)}{{\textnormal{h}}o(s)} - \frac{x(s,a)}{{\textnormal{h}}o(s)}{\textnormal{i}}ght| \\
&= \frac{1}{2}\sum_{s,a} {\textnormal{h}}o(s)\cdot \left|\frac{\hat{x}(s,a)}{{\textnormal{h}}o(s)} - \frac{\hat{x}(s,a)}{\hat{{\textnormal{h}}o}(s)}{\textnormal{i}}ght| + D_\mathrm{TV}(x\|\hat{x}) \\
&= \frac{1}{2}\sum_{s} {\textnormal{h}}o(s)\cdot\left|\frac{1}{{\textnormal{h}}o(s)}-\frac{1}{\hat{{\textnormal{h}}o}(s)}{\textnormal{i}}ght| \left(\sum_{a} \hat{x}(s,a){\textnormal{i}}ght) + D_\mathrm{TV}(x\|\hat{x}) \\
&= \frac{1}{2}\sum_{s} {\textnormal{h}}o(s)\cdot\left|\frac{1}{{\textnormal{h}}o(s)}-\frac{1}{\hat{{\textnormal{h}}o}(s)}{\textnormal{i}}ght| \cdot\hat{{\textnormal{h}}o}(s) + D_\mathrm{TV}(x\|\hat{x}) \\
&=D_\mathrm{TV}({\textnormal{h}}o\|\hat{{\textnormal{h}}o}) + D_\mathrm{TV}(x\|\hat{x}).
\end{align}
Finally, the bound in the lemma is achieved by application of Lemma~{\textnormal{e}}f{lem:empirical-tv} to each of the TV divergences.
\end{proof}
To prove Theorem~{\textnormal{e}}f{thm:sample}, we first rewrite Theorem~{\textnormal{e}}f{thm:tabular} as
\begin{equation}
\mathrm{Diff}(\pi_{\Zset},\pi_*) \leq
({\textnormal{e}}f{eq:tabular-rep})(\phi) + ({\textnormal{e}}f{eq:tabular-dec})(\phi)
+ C_3\cdot\mathbb{E}_{s\sim {\bm{i}}sittarget}[D_\mathrm{TV}(\pi_{*,Z}(s)\|\pi_{\Zset}(s))],
\end{equation}
where ({\textnormal{e}}f{eq:tabular-rep}) and ({\textnormal{e}}f{eq:tabular-dec}) are the first two terms in the bound of Theorem~{\textnormal{e}}f{thm:tabular}, and $C_3=\frac{\gamma}{1-\gamma}$.
The result in Theorem~{\textnormal{e}}f{thm:sample} is then derived by setting $\phi = \phi_{opt}$ and $\pi_{\Zset}:=\pi_{opt,Z}$ and using the result of Lemma~{\textnormal{e}}f{lem:tv-bc}.
\subsection{Proof of Theorem~{\textnormal{e}}f{thm:linear}}
\begin{proof}
The gradient term in Theorem~{\textnormal{e}}f{thm:linear} with respect to a specific column $\theta_s$ of $\theta$ may be expressed as
\begin{align}
&\frac{\partial}{\partial\theta_s} \mathbb{E}_{\tilde{s}\sim d^{\pi}, a\sim\pi(\tilde{s})}[(\theta_{\tilde{s}} - \phi(\tilde{s},a))^2] \nonumber \\
&= -2\mathbb{E}_{a\sim\pi(s)}[{\bm{i}}sitpi(s)\phi(s,a)] + 2{\bm{i}}sitpi(s)\theta_s \nonumber \\
&= -2\mathbb{E}_{a\sim\pi(s)}[{\bm{i}}sitpi(s)\phi(s,a)] + 2\mathbb{E}_{z=\theta_s}[{\bm{i}}sitpi(s)\cdot z],
\end{align}
and so,
\begin{align}
&w(s')^\top \frac{\partial}{\partial\theta_s} \mathbb{E}_{\tilde{s}\sim d^{\pi}, a\sim\pi(\tilde{s})}[(\theta_{\tilde{s}} - \phi(\tilde{s},a))^2] \nonumber\\
&= -2\mathbb{E}_{a\sim\pi(s)}[{\bm{i}}sitpi(s)\overline{\mathcal{T}}(s'|s,a)] + 2\mathbb{E}_{z=\theta_s}[{\bm{i}}sitpi(s)w(s')^\top z].
\end{align}
Summing over $s\inS$, we have:
\begin{align}
\sum_{s\inS}w(s')^\top \frac{\partial}{\partial\theta_s} \mathbb{E}_{\tilde{s}\sim d^{\pi}, a\sim\pi(\tilde{s})}[(\theta_{\tilde{s}} - \phi(\tilde{s},a))^2]\nonumber\\ = 2\mathbb{E}_{s\sim{\bm{i}}sitpi,a\sim\pi(s),z=\theta_s}[-\overline{\mathcal{T}}(s'|s,a)+\mathcal{T}_\Zset(s'|s,z)]
\end{align}
Thus, we have:
\begin{align}\label{eq:linear-model}
\mathrm{Err}_{d^{\pi}}(\pi,\pi_\theta,\overline{\mathcal{T}}) &=\frac{1}{2}\sum_{s'\inS} \left| \mathbb{E}_{s\sim{\bm{i}}sitpi, a\sim\pi(s),z=\theta_s}[-\overline{\mathcal{T}}(s'|s,a) + \mathcal{T}_\Zset(s'|s, z)] {\textnormal{i}}ght| \nonumber\\
&=\frac{1}{4}\sum_{s'\inS}\left|\sum_{s\inS}w(s')^\top \frac{\partial}{\partial\theta_s} \mathbb{E}_{\tilde{s}\sim d^{\pi}, a\sim\pi(\tilde{s})}[(\theta_{\tilde{s}} - \phi(\tilde{s},a))^2] {\textnormal{i}}ght| \nonumber\\
&\le \frac{1}{4}|S|\|w\|_\infty \cdot \left\|\frac{\partial}{\partial\theta} \mathbb{E}_{s\sim d^{\pi}, a\sim\pi(s)}[(\theta_s - \phi(s,a))^2]
{\textnormal{i}}ght\|_1.
\end{align}
Then by combining Lemmas~{\textnormal{e}}f{lem:performance},~{\textnormal{e}}f{lem:model1},~{\textnormal{e}}f{lem:off-policy}, and apply~\mathbb{E}qref{eq:linear-model} (as opposed to Lemma~{\textnormal{e}}f{lem:bottleneck} as in the tabular case), we arrive at the desired bound in Theorem~{\textnormal{e}}f{thm:linear}.
\end{proof}
\section{Experiment Details}
\label{app:exp}
\subsection{Architecture}
We parametrize $\phi$ as a two-hidden layer fully connected neural network with $256$ units per layer. A Swish~\citep{ramachandran2017searching} activation function is applied to the output of each hidden layer. We use embedding size $64$ for AntMaze and $256$ for Ant and all DeepMind Control Suite (DMC) tasks after sweeping values of $64$, $256$, and $512$, though we found TRAIL\xspace to be relatively robust to the latent dimension size as long as it is not too small (i.e., $\ge64$). The latent skills in temporal skill extraction require a much smaller dimension size, e.g., $8$ or $10$ as reported by \citet{ajay2020opal,pertsch2021guided}. We tried increasing the latent skill size for these work during evaluation, but found the reported value $8$ to work the best. We additionally experimented with different extend of skill extraction, but found the previously reported $t=10$ to also work the best. We implement the trajectory encoder in OPAL, SkiLD, and SPiRL using a bidirectional LSTM with hidden dimension $256$. We use $\beta = 0.1$ for the KL regularization term in the $\beta$ VAE of OPAL (as reported). We also use $0.1$ as the weight for SPiRL and SkiLD's KL divergence terms.
\subsection{Training and Evaluation}
During pretraining, we use the Adam optimizer with learning rate $0.0003$ for $200$k iterations with batch size $256$ for all methods that require pretraining. During downstream behavioral cloning, learned action representations are fixed, but the action decoder is fine-tuned on the expert data as suggested by~\citet{ajay2020opal}. Behavioral cloning for all methods including vanilla BC is trained with learning rate $0.0001$ for $1$M iterations. We experimented with learning rate decay of downstream BC by a factor of $3$ at the $200$k boundary for all methods. We found that when the expert sample size is small, decaying learning rate can prevent overfitting for all methods. The reported results are with learning rate decay on AntMaze and without learning rate decay on other environments for all methods. During the downstream behavioral cloning stage, we evaluate the latent policy combined with the action decoder every $10$k steps by executing $\pi_{\alpha}\circ\pi_{\Zset}$ in the environment for $10$ episodes and compute the average total return. Each method is run with $4$ seeds where each seed corresponds to one set of action representations and downstream imitation learning result on that set of representations. We report the mean and standard error for all methods in the bar and line figures.
\subsection{Modification to SkiLD and SPiRL}
Since SkiLD~\citep{pertsch2021guided} and SPiRL~\citep{pertsch2020accelerating} are originally designed for RL as opposed to imitation learning, we replace the downstream RL algorithms of SkiLD and SPiRL by behavioral cloning with regularization (but keep skill extraction the same as the original methods). Specifically, for SkILD, we apply a KL regularization term between the latent policy and the learned skill prior in the suboptimal offline dataset during pretraining, and another KL regularization term between the latent policy and a learn ``skill posterior'' on the expert data as done in the original paper during downstream behavioral cloning. We do not need to train the binary classifier that SkiLD trains to decide which regularizer to apply because we know which set of actions are expert versus suboptimal in the imitation learning setting. For SPiRL, we apply the KL divergence between latent policy and skill prior extracted from offline data (i.e., using the red term in Algorithm 1 of~\citet{pertsch2020accelerating}) as an additional term to latent behavioral cloning.
\subsection{Dataset Details}
\paragraph{AntMaze.} For the expert data in AntMaze, we use the goal-reaching expert policies trained by~\citet{ajay2020opal} (expert means that the agent is trained to navigate from the one corner of the maze to the opposite corner) to collect $n=10$ trajectories. For the suboptimal data in AntMaze, we use the full D4RL datasets \texttt{antmaze-large-diverse-v0}, \texttt{antmaze-medium-play-v0}, \texttt{antmaze-medium-diverse-v0}, and \texttt{antmaze-medium-play-v0}.
\paragraph{Ant.} For the expert data in Ant, we use a small set of expert trajectories selected by taking either the first $10$k or $25$k transitions from \texttt{ant-expert-v0} in D4RL, corresponding to about $10$ and $25$ expert trajectories, respectively. For the suboptimal data in Ant, we use the full D4RL datasets \texttt{ant-medium-v0}, \texttt{ant-medium-replay-v0}, and \texttt{ant-random-v0}.
\paragraph{RL Unplugged.}
For DeepMind Control Suite~\citep{tassa2018deepmind} set of tasks, we use the RL Unplugged~\citep{gulcehre2020rl} dataset. For the expert data, we take $\frac{1}{10}$ of the trajectories whose episodic reward is among the top $20\%$ of the open source RL Unplugged datasets following the setup in~\citet{zolna2020offline}. For the suboptimal data, we use the bottom $80\%$ of the RL Unplugged dataset. Table~{\textnormal{e}}f{tab:rlu} records the total number of trajectories available in RL Unplugged for each task ($80\%$ of which are used as suboptimal data), and the number of expert trajectories used in our evaluation.
\begin{table}[h]
\centering
\begin{tabular}{l|r|r}\toprule
Task & \# Total & \# $\mathcal{D}^{\pitarget}$ \\\hline
cartpole-swingup & $40$ & $2$ \\
cheetah-run & $300$ & $3$ \\
fish-swim & $200$ & $1$ \\
humanoid-run & $3000$ & $53$ \\
walker-stand & $200$ & $4$ \\
walker-walk & $200$ & $6$ \\\bottomrule
\end{tabular}
\caption{Total number of trajectories from RL Unplugged~\citep{gulcehre2020rl} locomotion tasks used to train CRR~\citep{wang2020critic} and the number of expert trajectories used to train TRAIL\xspace. The bottom $80\%$ of \# Total is used to learn action representations by TRAIL\xspace.}
\label{tab:rlu}
\end{table}
\marginpar{NEW}page
\section{Additional Empirical Restuls}
\label{app:results}
\begin{figure}
\caption{Average task rewards (over $4$ seeds) of TRAIL\xspace EBM (Theorem~{\textnormal{e}
\label{fig:rlu5}
\end{figure}
\end{document} |
\begin{document}
\title{Some applications of smooth bilinear forms with Kloosterman
sums}
\author{Valentin Blomer}
\address{Mathematisches Institut, Universit\"at G\"ottingen,
Bunsenstr. 3-5, 37073 G\"ottingen, Germany} \email{[email protected]}
\author{\'Etienne Fouvry}
\address{Laboratoire de Math\'ematiques d'Orsay, Universit\' e Paris--Saclay \\
91405 Orsay \\France}
\email{[email protected]}
\author{Emmanuel Kowalski}
\address{ETH Z\"urich -- D-MATH\\
R\"amistrasse 101\\
CH-8092 Z\"urich\\
Switzerland} \email{[email protected]}
\author{Philippe Michel} \address{EPF Lausanne, Chaire TAN, Station 8, CH-1015
Lausanne, Switzerland } \email{[email protected]}
\author{Djordje Mili\'cevi\'c}
\address{Department of Mathematics,
Bryn Mawr College,
101 North Merion Avenue,
Bryn Mawr, PA 19010-2899, U.S.A.}
\curraddr{Max-Planck-Institut f\"ur Mathematik, Vivatsgasse 7, D-53111 Bonn, Germany}
\email{[email protected]}
\thanks{V.\ B.\ was partially supported by the
Volkswagen Foundation. \'E.\ F.\ thanks ETH Z\"urich and EPF Lausanne
for financial
support. Ph. M. was partially supported by the SNF (grant
200021-137488) and the ERC (Advanced Research Grant 228304). V.\ B.,
Ph.\ M.\ and E.\ K.\ were also partially supported by a DFG-SNF lead
agency program grant (grant 200021L\_153647). D. M. was partially
supported by the NSF (Grant
DMS-1503629) and ARC (through Grant DP130100674).}
\subjclass[2010]{11M06, 11F11, 11L05, 11L40, 11F72, 11T23}
\keywords{$L$-functions, modular forms, shifted convolution sums,
Kloosterman sums, incomplete exponential sums}
\begin{abstract}
We revisit a recent bound of I. Shparlinski and T. P. Zhang on
bilinear forms with Kloosterman sums, and prove an extension for
correlation sums of Kloosterman sums against Fourier coefficients of
modular forms. We use these bounds to improve on earlier results on
sums of Kloosterman sums along the primes and on the error term of
the fourth moment of Dirichlet $L$-functions.
\end{abstract}
\maketitle
\setcounter{tocdepth}{1}
\section{Statement of results}\label{intro}
\subsection{Preliminaries}
This note is motivated by a recent result of I. E. Shparlinski and
T. P. Zhang \cite{SZ} concerning bilinear forms with Kloosterman
sums. Given a prime $q$ and $m\in{\mathbf{F}_q}$, let
\[
\Kl(m;q):=\frac{1}{\sqrt{q}}\sum_{\substack{x\in{\mathbf{F}_q}t\\ xy=1}}e_q(y+mx)
\]
denote the normalized Kloosterman sum, where
$e_q (x) =\exp( 2 \pi i x /q)$. Shparlinski and Zhang (\cite [Theorem
3.1]{SZ}) proved the following theorem.
\begin{theorem}[Shparlinski--Zhang]\label{thmSZ}
Let $q$ be a prime number and let $\mathcal{M},\mathcal{N}\subset [1,q-1]$ be
intervals of lengths $M,N\geqslant 1$. Then we have
\begin{equation}\label{SZbound1}
\mathop{\sum \Bigl.^{*}}\limitsum_{m\in{\mathcal{M}},n\in\mathcal{N}}\Kl(mn;q)\ll_{\varepsilon} q^{\varepsilon}\Bigl( q^{1/2}+\frac{MN}{q^{1/2}} \Bigr)
\end{equation}
for any $\varepsilon > 0$, where the implied constant depends only on $\varepsilon$.
\end{theorem}
In light of the Weil bound for Kloosterman sums $|\Kl(m;q)|\leqslant 2$, the
estimate \eqref{SZbound1} is non-trivial as long as $MN$ is a bit larger than
$q^{1/2}$. On the other hand, if $M$ or $N$ is close to $q$, other
methods (e.g.\ the completion method) become more efficient. In particular,
the restriction that $M$ and $N$ are $\leqslant q$ is not really
restrictive for applications.
The aim of this paper is two-fold. On the one hand, we put Theorem \ref{thmSZ}
into a slightly more general context in Propositions~\ref{propSZ} and \ref{propIScuspidal}; viewing it as a correlation estimate for Kloosterman sums and a divisor function (which itself is a Fourier coefficient of an Eisenstein series), it turns out to be a consequence of a version of the Voronoi summation formula. On the other hand, we give two applications of independent interest to the fourth moment of Dirichlet $L$-functions in Theorem~\ref{472} and sums of Kloosterman sums over primes in Theorem~\ref{Klpq}; these applications are discussed in Subsection \ref{appl}.
\subsection{Variations on a theme} Our first result is a smoothed version of the bound \eqref{SZbound1}.
To state it, we use the following class of smoothing functions. For a
modulus $q\geqslant 1$ and a parameter $Q\geqslant 1$, we will consider
functions satisfying the following conditions:
\begin{equation}\label{Wbound}
\begin{split}
W\colon [0,+\infty[\to\mathbf{C}\text{ is smooth, }
\mathrm{Supp}(W)\subset [1/2,2],\\
W^{(j)}(x)\ll_{j, \varepsilon} \bigl(q^{\varepsilon}Q\bigr)^j\,
\text{ for any $x\geqslant 0$, $j\geqslant 0$ and $\varepsilon>0$.}
\end{split}
\end{equation}
\begin{proposition}\label{propSZ} Let $q$ be a prime number and let
$Q\geqslant 1$ be a real number. Let $W_1,W_2$ be functions
satisfying~\eqref{Wbound}. For any $M,N\geqslant 1$ and any integer $a$
coprime with $q$, we have
\begin{equation}\label{SZboundsmooth}
\mathop{\sum \Bigl.^{*}}\limitsum_{m,n}W_1\Bigl(\frac{m}M\Bigr)W_2\Bigl(\frac{n}N\Bigr)\Kl(amn;q)\ll_\varepsilon
(qQ)^{\varepsilon}\, Q^2\,\Bigl(q^{1/2}+\frac{MN}{q^{1/2}}\Bigr).
\end{equation}
Furthermore, if $W_3$ also satisfies~\eqref{Wbound}, then for any
$Y\geqslant 1$, we have
\begin{equation}\label{SZboundsmooth1}
\mathop{\sum \Bigl.^{*}}\limitsum_{m,n}W_1\Bigl(\frac{m}M\Bigr)
W_2\Bigl(\frac{n}N\Bigr)W_3\Bigl( \frac{mn}{Y}\Bigr)\Kl(amn;q)\ll_\varepsilon
(qQ)^{\varepsilon}\, Q^2\,\Bigl(q^{1/2}+\frac{MN}{q^{1/2}}\Bigr).
\end{equation}
In both cases, the implied constant depends only on $\varepsilon$.
\end{proposition}
The inequalities \eqref{SZboundsmooth} and \eqref{SZboundsmooth1}
could be easily deduced from the result of Shparlinski and Zhang by
summation by parts with respect to the variables $m$ and $n$. In \S
\ref{par2}, we will give an alternative proof based on~\cite[Prop.~2.2]{FKMd3}.
The $Q$-dependence in Proposition~\ref{propSZ} is presented in a compact form well suited for our applications but it is not fully optimized otherwise (in particular, for Theorem~\ref{472} we will be using $Q=q^{\varepsilon}$); our proof actually yields a better $Q$-dependence in some other ranges.
We can view the bounds \eqref{SZboundsmooth} and
\eqref{SZboundsmooth1} essentially as sums over a single variable weighted by the
divisor function $d$. The advantage of our proof of Proposition \ref{propSZ} is that it provides naturally an automorphic generalization, where the divisor function is replaced with Fourier
coefficients of modular forms.
\begin{proposition}\label{propIScuspidal} Let $(\lambda_f(n))_{n\geqslant 1}$ be
the Hecke eigenvalues of a holomorphic cuspidal Hecke eigenform $f$
of level $1$, normalized so that $|\lambda_f(n)|\leqslant d(n)$. Let $q$ be a
prime number, and let $W$ be a function satisfying~\eqref{Wbound}
with $Q=1$. Let $a$ be an integer coprime to $q$. For any $N\geqslant 1$
and any $\varepsilon>0$, we have
\begin{equation}\label{SZcusp}
\sum_{n\geqslant 1}\lambda_f(n)\Kl(an;q)
W\Bigl(\frac{n}N\Bigr)\ll_{\varepsilon, f} (qN)^{\varepsilon}\Bigl(q^{1/2}+\frac{N}{q^{1/2}}\Bigr)
\end{equation}
where the implied constant depends only on $f$ and $\varepsilon$.
\end{proposition}
\begin{remark}
This is by no means the most general statement that may be proved
along these lines.
\end{remark}
As pointed out in \cite{SZ}, the estimates \eqref{SZbound1} and
\eqref{SZboundsmooth} are significant improvements of the bound
\begin{equation}\label{FKMbound1}
\mathop{\sum \Bigl.^{*}}\limitsum_{m,n}W_1\Bigl(\frac{m}M\Bigr)W_2\Bigl(\frac{n}N\Bigr)\Kl(amn;q)\ll_{\varepsilon, Q}
q^{\varepsilon}MN\Bigl(1+\frac{q}{MN}\Bigr)^{1/2}q^{-1/8},
\end{equation}
and likewise the estimate \eqref{SZcusp} improves significantly over
\begin{equation}\label{FKMbound2} \sum_{n\geqslant 1}\lambda_f(n)\Kl(an;q)W\Bigl(\frac{n}N\Bigr)\ll_{\varepsilon,Q}
q^{\varepsilon}N\Bigl(1+\frac{q}{N}\Bigr)^{1/2}q^{-1/8},
\end{equation}
both of which were obtained by Fouvry, Kowalski and Michel as special
cases of \cite[Thm.~1.16]{FKM2} and \cite[Thm.~1.2]{FKM1}.
\subsection{Applications}\label{appl} The bounds \eqref{FKMbound1} and
\eqref{FKMbound2}
have been applied recently in a number of problems, and the bounds
\eqref{SZbound1} and \eqref{SZboundsmooth} lead to further
improvements. The main source for these improvements is the new input of Proposition \ref{propSZ}, but a bit of extra work is necessary.
As a first application, we can improve our work on the error term for
the fourth moment of Dirichlet series $L(s, \chi)$ of characters
$\chi$ to a prime modulus $q$ (\cite[Theorem 1.1]{445}).
\begin{theorem}\label{472}
There exists a polynomial $P_4\in \mathbf{R}[X]$ of degree $4$, such that
\[
\frac{1}{q-1}\sum_{\chi\mods q}|L(\chi,1/2)|^4=P_4(\log
q)+O(q^{-1/20+\varepsilon})
\]
for all primes $q$, where the implied constant depends only on
$\varepsilon>0$. If the Ramanujan--Petersson conjecture holds for Fourier
coefficients of Hecke--Maa{\ss} forms of level $1$, then the exponent $1/20$
can be replaced by $1/16$.
\end{theorem}
\begin{remark} In \cite[Theorem 1.1]{445}, the exponents were
respectively $1/32$ (unconditionally) and $1/24$ (assuming the
Ramanujan--Petersson conjecture). The first breakthrough in this
respect is due to M.\ Young \cite{MY} who obtained an asymptotic
formula with exponents $5/512$ (resp.\ $1/80$).
\end{remark}
\begin{remark}
The proof of Theorem \ref{472} follows the same lines as \cite[\S
6.3]{445}, except that instead of the bound \cite [(5.5)]{445}
(i.e.\ \eqref{FKMbound1} above) we use Proposition \ref{propSZ}. It
is of some interest to record here in outline how this improved
exponent arises. The problem of the fourth moment leads to
evaluating non-trivially the shifted convolution type sum
\begin{equation}\label{eqquadrismooth}
\mathop{\sum \Bigl.^{*}}\limitsum_\stacksum{m\asymp M,n\asymp N}{m\equiv n\mods q}d(m)d(n)\approx
\mathop{\sum \Bigl.^{*}}\limitsum_\stacksum{m_1,m_2,n_1,n_2}{m_1m_2\equiv n_1n_2\mods q}1
\end{equation}
with $d$ the usual divisor function and with
$MN=M_1M_2N_1N_2\approx q^2$. The spectral theory of automorphic forms
provides a good error term when $M$ and $N$ are relatively
close in the logarithmic scale. Otherwise, assuming that $N=N_1N_2\geqslant M=M_1M_2$, we apply the
Poisson summation formula to both variables $n_1$ and $n_2$
(equivalently, the Voronoi summation formula applied to the variable
$n=n_1n_2$), getting two variables of dual size $n_1^*\sim q/N_1$ and
$n_2^*\sim q/N_2$ and a smooth quadrilinear sum of Kloosterman sums
\[ \mathop{\sum \Bigl.^{*}}\limitsum_{m_1,m_2,n^*_1,n^*_2}\Kl(m_1m_2n_1^*n_2^*;q), \]
which is evaluated by various means, in particular using the smooth
bilinear sum bound \eqref{SZboundsmooth}. In our specific case, the
bound \eqref{SZboundsmooth} amounts to applying the Poisson formula to
two of the four variables $m_1,m_2,n^*_1,n^*_2$. This leads back to a
sum of the type \eqref{eqquadrismooth}, which is then bounded
trivially. This argument is not circular, and allows for an
improvement, because we (implicitly) apply the process to variables
different from the ones we started from (for instance to $m_1$ and
$n_1^*$ instead of $n_1^*$ and $n_2^*$).
\end{remark}
Our second application is an improvement of the first bound in
\cite[Cor.~1.13] {FKM2} for Klooster\-man sums over primes in short
intervals:
\begin{theorem}\label{Klpq} Let $q$ be a prime number.
Let $Q\geqslant 1$ be a parameter and let $W$ be a function
satisfying~\eqref{Wbound}. Then for every $X$ such that
$2\leqslant X\leqslant q$ and every $\varepsilon >0$, we have
\begin{equation}\label{evening2}
\sum_{p\text{ prime }}
W \Bigl( \frac{p}{X}\Bigr) \Kl (p;q) \ll_{\varepsilon}
q^{1/4+\varepsilon} Q^{1/2} X^{2/3}.
\end{equation}
\par
In addition, for every prime $q$, every $X$ such that $2\leqslant X\leqslant q$
and every $\varepsilon >0$, we have
\begin{equation}\label{evening1}
\sum_\stacksum{p\leqslant X}{p\text{ prime}}\Kl(p;q)\ll_{\varepsilon} q^{1/6+\varepsilon}\, X^{7/9}.
\end{equation}
In both cases, the implicit constant depends only on $\varepsilon$.
\end{theorem}
\begin{remark} The range where these bounds are non-trivial is the
same as that in~\cite[Cor.~1.13]{FKM2}, namely the length of
summation $X$ should be greater than $q^{3/4 +\varepsilon}$ if $Q$
is fixed. The improvement therefore lies in the greater
cancellation in this allowed range. For instance, when $X=q$, we
gain a factor $q^{1/18 -\varepsilon}$ over the trivial bound for
the sum appearing in \eqref{evening1} instead of
$q^{1/48 -\varepsilon}$ in \cite [Corollary 1.13]{FKM2}.
\end{remark}
\subsection*{Acknowledgement.} We would like to thank the referee for very useful suggestions that improved the presentation of the paper.
\section{Correlation sums of Kloosterman sums and divisor-like
functions}\label{par2}
In this section, we revisit Theorem \ref{thmSZ} and establish Proposition \ref{propSZ}.
The idea behind the proof of Theorem \ref{thmSZ} is that after applying the completion method twice over the $m$ and $n$ variables, the Kloosterman sum $\Kl(amn;q)$ is transformed into the Dirac type function
$q^{1/2}\delta_{mn\equiv a\mods q}$, and taking the congruence condition into account one saves (in the most favourable situation) a factor $q^{1/2}/q=q^{-1/2}$ over the trivial bound.
In our smoothed setting, the completion method is replaced by two
applications of the Poisson summation formula or more precisely by a
single application of the \emph{tempered Voronoi summation formula} of
Deshouillers and Iwaniec, in the form established in
\cite[Prop.~2.2]{FKMd3}.
Let $q$ be a prime number, and let $K\colon\mathbf{Z}\to \mathbf{C}$ be a
$q$-periodic function. The \emph{normalized Fourier transform} of $K$
is the $q$-periodic function on $\mathbf{Z}$ defined by
\begin{equation*}
\fourier{K}(h) = \frac{1}{\sqrt{q}}\sum_{n\bmod q} K(n) e_q(
{hn})
\end{equation*}
and the \emph{Voronoi transform} of $K$ is the $q$-periodic function
on $\mathbf{Z}$ defined by
\[
\bessel{K}(n) = \frac{1}{\sqrt{q}}\sum_{\substack{h\bmod q\\(h,q) =1}}
\fourier{K}(h) e_q ({\overline h n} ).
\]
\begin{proposition}[Tempered Voronoi formula modulo
primes]\label{Voronoigeneral0}
Let $q$ be a prime number, let $K\, :\ \mathbf{Z} \longrightarrow \mathbf{C}$ be a
$q$-periodic function, and let $G$ be a smooth function on $\mathbf{R}^2$
with compact support and Fourier transform denoted by $\widehat G$. We have
\begin{equation}\label{voronoi}
\mathop{\sum \Bigl.^{*}}\limitsum_{m,n\in\mathbf{Z}} K(mn) G(m,n)=\frac{\fourier{K}(0)}{\sqrt{q}}
\ \mathop{\sum \Bigl.^{*}}\limitsum_{m,n\in\mathbf{Z}} G(m,n)+
\frac{1}{q}\ \mathop{\sum \Bigl.^{*}}\limitsum_{m,n\in\mathbf{Z}} \bessel{K}(mn)\fourier{G}\Bigl(\frac{m}q,\frac
nq\Bigr).
\end{equation}
\end{proposition}
The key point is that when $K$ is a (multiplicatively shifted)
Kloosterman sum, then $\widecheck{G}$ is a normalized delta-function:
\begin{lemma}\label{immediate} For $(a,q)=1$ and $K(n)=\Kl(an;q)$ one has
\[
\widehat K(h)=\begin{cases}0&\text{if $q\mid h$},\\
e_q(-a\ov h) &\text{if $q\nmid h$,}
\end{cases}
\]
and
\[
\bessel{K}(n)=
\begin{cases}\displaystyle{\frac{q-1}{q^{1/2}}}&\text{if $n\equiv a \bmod q$},\\
\displaystyle{-\frac{1}{q^{1/2}}}&\text{otherwise}.
\end{cases}
\]
\end{lemma}
This lemma is proved by an immediate computation. We now begin with
the proof of \eqref{SZboundsmooth}. Let $q$ be a prime and let $W$ be
a function satisfying~(\ref{Wbound}). By integration by parts, we then
have
\[
\widehat W (t) \ll_{j,\varepsilon} \min \bigl( 1, q^{j\varepsilon}\vert
t/Q\vert^{-j}\bigr)
\]
for $t\in \mathbf{R}$ and for any integer $j\geqslant 0$ and $\varepsilon >0$,
where the implied constant depends only on $j$ and $\varepsilon$.
Defining $G(m,n)=W_1(m/M) W_2(n/N)$, we deduce that for any $A$ and
any $\varepsilon>0$, we have
\begin{equation}\label{G}
\widehat G\Bigl(\frac{m}q,\frac{n}q\Bigr)=M\widehat W_1\Bigl(\frac{mM}q\Bigr)N\widehat W_2\Bigl(\frac{nN}q\Bigr)\ll_{\varepsilon,A} q^\varepsilon MN\Bigl(1+\frac{\vert m\vert M}{qQ}\Bigr)^{-A}\Bigl(1+\frac{\vert n\vert N}{qQ}\Bigr)^{-A}.
\end{equation}
We next apply the Voronoi formula, Proposition~\ref{Voronoigeneral0}, with $K(n)=\Kl(an;q)$ to the left-hand side of
\eqref{SZboundsmooth}. The first term on the right-hand side of
\eqref{voronoi} vanishes since $\fourier{K}(0) = 0$. By Lemma \ref{immediate}
and \eqref{G}, the contribution of $mn \not\equiv a$ (mod $q$) in the
second term is of order at most
\begin{align*}
\frac{MN}{q^{3/2-\varepsilon}} \sum_{m, n\in\mathbf{Z}} \Bigl(1+\frac{\vert m\vert M}{qQ}\Bigr)^{-2}\Bigl(1+\frac{\vert n\vert N}{qQ}\Bigr)^{-2}& \ll \frac{MN}{q^{3/2-\varepsilon}}\Bigl(1 + \frac{qQ}{M}\Bigr) \Bigl(1 + \frac{qQ}{N}\Bigr) \\
&\ll q^{\varepsilon}\Bigl(\frac{MN}{q^{3/2}} + \frac{(M+N)Q}{q^{1/2}} + q^{1/2}Q^2\Bigr).
\end{align*}
Similarly, the remaining terms $mn \equiv a$ (mod $q$) are, up to a constant, bounded by
\[ q^\varepsilon\frac{MN}{q^{1/2}} \sum_{n \equiv a\, (\text{mod }q)} d(n) \Bigl(1 + \frac{n MN}{q^2Q^2}\Bigr)^{-2} \ll (q^2Q)^{\varepsilon} \left(\frac{MN}{q^{1/2}} + Q^2 q^{1/2}\right). \]
This completes the proof of \eqref{SZboundsmooth}.
Next, we prove \eqref{SZboundsmooth1}. We may suppose that
\begin{equation*}
MN/8 <Y <8 MN,
\end{equation*}
since otherwise the sum of interest is empty. Then we see that for
$M/2 < x<2M$ and $N/2<y<2N$, we have the inequalities
\[
\frac{\partial^{i+j} W_3(xy/Y)}{\partial x^i \, \partial y^j} \ll_{\varepsilon, i, j} (q^\varepsilon Q)^{i+j} M^{-i}N^{-j}
\]
for all non-negative integers $i, j$. Hence the function
$ G(x,y) = W_1(x/M) W_2(y/N)
W_3(xy/Y)$
satisfies the inequalities
\[
\frac{\partial^{i+j} G(x,y)}{\partial x^i \partial y^j}\ll_{\varepsilon,i,j} (q^\varepsilon Q)^{i+j} x^{-i} y^{-j},
\]
for $x$, $y >0$, $\varepsilon >0$ and integers $i, j \geqslant
0$. By repeated integration by parts of the definition of the Fourier
transform
\[
\widehat G (u,v)= \int_{-\infty}^{\infty} \int_{-\infty}^\infty G(x,y) e( -ux-vy) dx\, dy,
\]
we obtain the bound
\[
\widehat G\Bigl( \frac{m}{q}, \frac{n}{q}\Bigr) \ll_{\varepsilon, A} q^\varepsilon MN \Bigl( 1 +\frac{\vert m\vert M}{qQ}\Bigr)^{-A}
\Bigl( 1 +\frac{\vert n\vert N}{qQ}\Bigr)^{-A}
\]
for any $A$ and any $\varepsilon >
0$, analogously to \eqref{G}. The end of the proof of
\eqref{SZboundsmooth1} is now similar to \eqref{SZboundsmooth}.
For future reference we record the following bound for type II sums of
Kloosterman sums
\cite[Thm.~1.17]{FKM2}.
\begin{proposition}\label{proptypeII} Let $q$ be a prime number.
Let $1\leqslant M,N\leqslant q$ and $(\alpha_m)$, $(\beta_n)$ be sequences of
complex numbers supported in $[M,2M]$ and $[N,2N]$ respectively. Let
either $Q=1$ and $W$ be the constant function $1$, or $Q\geqslant 1$ and $W$
be a function satisfying~\eqref{Wbound}. Then, for every
$\varepsilon >0$, we have
\[
\mathop{\sum \Bigl.^{*}}\limitsum_{m,n}\alpha_m \beta_n \Kl (mn;q)W
\Bigl(\frac{mn}{Y}\Bigr)\ll_\varepsilon \Vert \mathbf \alpha\Vert_2 \,
\Vert \mathbf \beta \Vert_2\, (MN)^{1/2}\Bigl( \frac1M+ Q\frac{q^{1/2+
\varepsilon}}{N} \Bigr)^{1/2}.
\]
\end{proposition}
This is a special case of \cite[Thm.~1.17]{FKM2} when $W$ is the
constant $1$. For smooth $W$, the same proof applies, except that we apply partial summation in \cite[(3.2)] {FKM2} if $m_1 \not= m_2$ to remove the weight $W(m_1n/Y) W(m_2n/Y)$; this produces a factor $Q$ that after taking square roots produces the above bound.
\section{Correlation sums of Kloosterman sums and Hecke eigenvalues}
In this section we prove Proposition \ref{propIScuspidal}. We replace
the tempered Voronoi summation formula by the Voronoi summation
formula for cusp forms, which we state in a form suited to our purpose.
\begin{proposition}[Voronoi summation formula for cusp forms with arithmetic weights modulo
primes]\label{prvoronoi}
Let $q $ be a prime. Let $W$ be a smooth function compactly supported in
$]0,\infty[$ and let $f$ be a holomorphic cuspidal Hecke eigenform
of level $1$ and weight $k$. Let $\varepsilon(f)=\pm 1$ denote the sign of
the functional equation of the Hecke $L$-function $L(f,s)$ and let
\[
\widetilde W(y)=\int_{0}^\infty W(u)\mathcal{J}_k(4\pi\sqrt{ uy})du,
\]
where
\begin{equation*}
\mathcal{J}_k(u) =
2\pi i^kJ_{k-1}(u).
\end{equation*}
Then, for any $q$-periodic arithmetic function $K\colon \mathbf{Z}\to\mathbf{C}$, we have
\[
\sum_{n\geqslant 1}\lambda_f(n)K(n)W\Bigl(\frac nN\Bigr)= \frac{\widehat K(0)}{q^{1/2}}\sum_{n\geqslant 1} \lambda_f(n)W\Bigl(\frac nN\Bigr)+\\
\varepsilon(f)\frac{N}{q} \sum_{n\geqslant 1}\lambda_f(n)\widecheck K( n)\widetilde
W\Bigl(\frac{nN}{q^2}\Bigr).
\]
In particular, for $a$ coprime to $q$, we have
\begin{multline*}
\sum_{n\geqslant 1} \lambda_f(n)\Kl(an;q)W\Bigl(\frac nN\Bigr)=
\varepsilon(f)\frac{N}{q^{1/2}} \sum_{n\equiv a \mods q
}\lambda_f(n)\widetilde W\Bigl(\frac{nN}{q^2}\Bigr)
- \varepsilon(f)\frac{N}{q^{3/2}} \sum_{n\geqslant 1 }\lambda_f(n)\widetilde
W\Bigl(\frac{nN}{q^2}\Bigr).
\end{multline*}
\end{proposition}
\begin{proof}
We expand $K(n)$ into additive characters
\[
K(n)=\frac{1}{q^{1/2}}\sum_{a\mods q}\widehat K(a)e_q(-an)
\]
and apply the classical summation formula
\[
\sum_{n\geqslant 1} \lambda_f(n)W\Bigl(\frac{n}N\Bigr)e\Bigl(-\frac{an}{q}\Bigr) =
\varepsilon(f)\frac{N}{q} \sum_{n\geqslant
1}\lambda_f(n)e\Bigl(\frac{\overline{a}n}{q}\Bigr) \widetilde
W\Bigl(\frac{Nn}{q^2}\Bigr),
\]
valid for all $N>0$ and all $a$ coprime to $q$ (\cite[Theorem A.4]{KMVDMJ}).
\end{proof}
We can now easily prove Proposition \ref{propIScuspidal}: integration
by parts shows that for any $A\geqslant 0$ and $\varepsilon>0$ we have
\[\widetilde W\Bigl(\frac{nN}{q^2}\Bigr)\ll_{k,A,\varepsilon}
q^\varepsilon\Bigl(1+\frac{nN}{q^2}\Bigr)^{-A}\] (see \cite[Lemma 2.4]{445}),
so that (using Deligne's bound $|\lambda_f(n)|\leqslant d(n)\ll_\varepsilon n^\varepsilon$), we get
\[
\sum_{n} \lambda_f(n)\Kl(an;q)W\Bigl(\frac nN\Bigr)\ll_{\varepsilon,k} (qN)^\varepsilon
\Bigl(q^{1/2}+\frac{N}{q^{1/2}}\Bigr).
\]
\section{Application to the fourth moment of Dirichlet \texorpdfstring{$L$-functions}{L-functions}}
In this section we prove Theorem \ref{472}. The general strategy of
the proof has been explained in detail in our paper \cite{445}. We
assume some familiarity with this paper, and refer in particular to
\cite[\S 1.2, \S 6.1, \S 6.3]{445} for notations.
We begin with the unconditional bound. Let
\begin{multline*}
B_{E, E}^{\pm}(M,N) =\frac{1}{(MN)^{1/2}}\sum_{\substack{m\equiv\pm n\mods q \\ m \not= n}}{d(m)d(n)}W_1\Bigl(\frac{m}M\Bigr)W_2\Bigl(\frac{n}N\Bigr)\\
- \frac{1}{q(MN)^{1/2}} \sum_{m, n} d(m) d(n)
W_1\Bigl(\frac{m}M\Bigr)W_2\Bigl(\frac{n}N\Bigr).
\end{multline*}
Our objective is to prove that
for $\eta=1/20$ one has
\begin{equation}\label{eqgoal}
B_{E,E}^{\pm}(M,N)-\mathrm{MT}^{od,\pm}_{E,E}(M,N)=:\mathrm{ET}_{E,E}^{\pm}(M,N)\ll_\varepsilon q^{-\eta+o(1)},
\end{equation}
where $\mathrm{MT}^{od,\pm}_{E,E}(M,N)$ is a suitable main term (described in \cite{MY}) and $M,N$ range over a set of $O(\log^2q)$ real numbers satisfying
\[1\leqslant M\leqslant N,\ MN\leqslant q^{2+o(1)}\]
(the first bound is by symmetry, the second is the length of the approximate functional equation). We set
\[N^*=q^2/N,\ M=q^\mu,\ N=q^\nu,\ \nu^*=2-\nu,\]
so that
\[ 0\leqslant \mu\leqslant\nu,\quad -\varepsilon \leqslant \nu^*-\mu. \]
In view of the bound \cite[(3.18)]{445}, which reads
$$\ET^{\pm}_{E,E}(M,N) \ll q^{\varepsilon}\Bigl( \frac{N}{qM}\Bigr)^{1/4} \left(1+ \Bigl( \frac{N}{qM}\Bigr)^{1/4} \right)$$
and which is proved using spectral theory, we may also assume that
\begin{equation}\label{4eta}
\mu+\nu^*\leqslant 1+4\eta
\end{equation}
for otherwise \eqref{eqgoal} is certainly true.
Proceeding in the same way as in \cite[\S 6.3]{445}, we apply Voronoi summation to reduced to
the following bounds for $O(\log^4 q)$ sums of the shape
\begin{multline*}
S^\pm(M_1,M_2,M_3,M_4)=\frac{1}{(qMN^*)^{1/2}}\mathop{\sum \Bigl.^{*}}\limitsum_{m_1,m_2,m_3,m_4}
W_1\left(\frac{m_1}{M_1}\right)W_2\left(\frac{m_2}{M_2}\right) \\
\times W_3\left(\frac{m_3}{M_3}\right)W_4\left(\frac{m_4}{M_4}\right)
\Kl(\pm m_1m_2m_3m_4;q)\ll q^{-\eta+o(1)},
\end{multline*}
where the $W_i$ satisfy~(\ref{Wbound}) with $Q=q^{\varepsilon}$, and the
$M_i$ written in the shape $M_i=q^{\mu_i}$, $i=1,2,3,4$, satisfy
\[ \mu_1\leqslant \mu_2\leqslant \mu_3\leqslant\mu_4,\quad 0\leqslant
\mu_1+\mu_2+\mu_3+\mu_4 = \mu+\nu', \quad \nu' \leqslant \nu^*. \] By the
trivial bound for Kloosterman sums (and recalling \eqref{4eta}), we may assume that
\begin{equation}\label{munu*range}
1-2\eta\leqslant \mu+\nu'\leqslant \mu+\nu^*\leqslant 1+4\eta,
\end{equation}
for otherwise \eqref{eqgoal} is true.
We use the same strategy as in \cite[\S 6.3]{445}, except that we
replace \cite[(5.5)]{445} by Proposition \ref{propSZ}. Thus, if the largest
variables $m_3,m_4$ are large enough, we apply \eqref{SZboundsmooth} to
them (fixing $m_1,m_2$); otherwise, we find it more beneficial to
group variables differently producing a bilinear sum of Kloosterman
sums to which we apply Proposition \ref{proptypeII}.
Explicitly, using \eqref{SZboundsmooth} we obtain that
\begin{align*}
S^\pm(M_1,M_2,M_3,M_4)
&\ll q^{o(1)} \frac{M_1M_2}{(qMN^*)^{1/2}}\Bigl(q^{1/2}+\frac{M_3M_4}{q^{1/2}}\Bigr)\\
& \ll
q^{o(1)}\Bigl(\sqrt{\frac{M_1M_2}{M_3M_4}}+\frac{(MN')^{1/2}}q\Bigr)
\ll q^{o(1)}\Bigl(\sqrt{\frac{M_1M_2}{M_3M_4}}+q^{-\eta}\Bigr)
\end{align*}
since $q^{\frac12(1+4\eta)-1}\leqslant q^{-\eta}$. We may therefore assume
that
\begin{equation}\label{SZcondition}
0\leqslant \mu_3+\mu_4-(\mu_1+\mu_2)\leqslant 2\eta.
\end{equation}
We now apply Proposition \ref{proptypeII} with ${\tt M}=M_4$ and ${\tt N}=M_1M_2M_3$ so that ${\tt MN}=q^{\mu+\nu'}\leqslant MN^*$ and derive
\[ S^\pm(M_1,M_2,M_3,M_4)\ll q^{o(1)}\big(q^{\frac{\mu_1+\mu_2+\mu_3-1}2}+q^{-\frac{1}4+\frac{\mu_4}2}\big). \]
We claim that under the current assumptions both exponents on the right hand side are $\leqslant - \eta$, which completes the proof. Indeed, since $\mu_4\geqslant \mu_i$ for $i=1$,
$2$, $3$, we obtain by \eqref{munu*range} that
\[ \Bigl(1+\frac13\Bigr)(\mu_1+\mu_2+\mu_3)\leqslant \mu_1+\mu_2+\mu_3+\mu_4\leqslant 1+4\eta\implies
\mu_1+\mu_2+\mu_3\leqslant \frac34+3\eta, \]
hence
\[ {\frac{\mu_1+\mu_2+\mu_3-1}2}\leqslant {-\frac{1}8+\frac{3}2\eta}\leqslant {-\eta}. \]
Moreover, by \eqref{SZcondition} and \eqref{munu*range} (since
$\mu_1\leqslant \mu_2\leqslant \mu_3\leqslant \mu_4$) we have
\[ \mu_4\leqslant 2\eta+\mu_1+\mu_2-\mu_3\leqslant 2\eta+\mu_1\leqslant 2\eta+\frac{1}3(1+4\eta-\mu_4)=\frac{1}3+\frac{10}3\eta-\frac13\mu_4,\]
which implies that $\mu_4\leqslant \frac14+\frac52\eta,$
and so
\[ -\frac{1}4+\frac{\mu_4}2\leqslant-\frac18+\frac54\eta\leqslant-\eta. \]
If the Ramanujan--Petersson conjecture is available, we can use
\cite[(1.7)]{445} with $\theta = 0$ in place of \cite[(3.2)]{445} and
replace \eqref{4eta} with $\mu+\nu^*\leqslant 1+2\eta.$ Then the same
strategy leads to the numerical value $\eta = 1/16$.
\section{Sums of Kloosterman sums along the primes: proof of Theorem \ref{Klpq}}
\subsection{Proof of inequality \eqref{evening2}}
We now recall the main ideas of the proof of \cite[Thm.~1.5]{FKM2},
since our proof will follow the same path until the moment we use
Proposition \ref{propSZ}. We will incorporate some shortcuts and
combinatorial improvements to \cite{FKM2}, mainly due to the
assumption $X \leqslant q$. By \cite[p.~1711--1716]{FKM2}, we are
reduced to proving the same bound as \eqref{evening2} for the sum
\[
\mathcal S_{W,X} (\Lambda, \Kl):= \sum_n \Lambda (n) \Kl (n;q) W
\Bigl( \frac{n}{X} \Bigr),
\]
where $\Lambda$ is the von Mangoldt function. We now apply
Heath-Brown's identity \cite{HB} with integer parameter $J\geqslant 2$. This
decomposes $\mathcal S_{W,X} (\Lambda, \Kl)$ into a linear
combination, with coefficients bounded by $O_J (\log X)$, of
$O(\log^{2J} X)$ sums of the shape
\begin{multline}\label{HBdecomp}
\Sigma (\uple{M}, \uple{N})=\underset{m_1, \dots, m_J}{\sum\cdots
\sum}
\alpha_1(m_1) \alpha_2(m_2) \cdots \alpha_J (m_J)\\
\times \underset{n_1, \dots, n_J}{\sum \cdots\sum} V_1
\Bigl(\frac{n_1}{N_1}\Bigr) \cdots V_J \Bigl(\frac{n_J}{N_J}\Bigr) W \Bigl(
\frac{m_1\cdots m_J n_1\cdots n_J}{X}\Bigr) \Kl (m_1\cdots
m_Jn_1\cdots n_J;q)
\end{multline}
where
\begin{itemize}
\item $\uple{M}= (M_1, \dots, M_J)$, $\uple{N} =(N_1, \dots, N_J)$
are $J$-tuples of parameters in $[1/2, 2X]^{2J}$ which satisfy
\begin{equation}\label{restr}
N_1 \geqslant N_2 \geqslant \cdots \geqslant N_J, \ \ M_i \leqslant X^{1/J}, \ \ M_1\cdots M_J N_1 \cdots N_J\asymp_J X;
\end{equation}
\item the arithmetic functions $m\mapsto \alpha_i (m)$ are bounded and
supported in $[M_i/2, 2M_i]$;
\item the smooth functions $x\mapsto V_i (x)$ satisfy~(\ref{Wbound})
with parameter $Q$.
\end{itemize}
It now remains to study the sum $\Sigma (\uple{M}, \uple{N})$ defined in \eqref{HBdecomp} for every $(\uple{M}, \uple{N})$ as above. We estimate $\Sigma(\uple{M},\uple{N})$ in two ways.
Our first method is to bound $\Sigma (\uple{M}, \uple{N})$ by applying \eqref{SZboundsmooth1} to the largest smooth variables $n_1$ and $n_2$ in
$\Sigma(\uple{M},\uple{N})$ and a trivial summation over the other variables. We obtain
\begin{equation*}
\Sigma(\uple{M},\uple{N}) \ll q^\varepsilon Q^2 X \Bigl( \frac{q^{1/2}}{N_1 N_2}+ \frac{1}{q^{1/2}}\Bigr),
\end{equation*}
which, by \eqref{restr} and the assumption $X\leqslant q$, simplifies into
\begin{equation}\label{ineq3}
\Sigma(\uple{M},\uple{N}) \ll q^\varepsilon Q^2 X\ \bigl( q^{1/2}/ (N_1N_2)\bigr).
\end{equation}
Our second method is to apply Proposition \ref{proptypeII} to $\Sigma (\uple{M}, \uple{N})$; in this way we obtain
\begin{equation}\label{ineq1}
\Sigma (\uple{M}, \uple{N}) \ll q^\varepsilon Q^{1/2}\,X\, \Bigl( \frac{1}{M^{1/2}} +\frac{q^{1/4}}{(X/M)^{1/2}}\Bigr)
\end{equation}
for any factorization
\[
M_1\cdots M_JN_1\cdots N_J =M\times N.
\]
We have now to play with \eqref{ineq3} and \eqref{ineq1} in an optimal way to bound $\Sigma (\uple{M}, \uple{N})$. We follow the same presentation as in \cite[\S 4.2]{FKM2}. We introduce the real numbers $\kappa$, $x$, $\mu_i$, $\nu_j$, $1\leqslant i,j\leqslant J$, defined by
\[ Q=q^\kappa,\ X=q^x,\ M_i=q^{\mu_i},\ N_j=q^{\nu_j} \]
and we set
\[( \uple{m}, \uple{n}) =(\mu_1, \dots, \mu_J, \nu_1, \dots, \nu_J) \in [0, x]^{2J}.
\]
The conditions \eqref{restr} are reinterpreted as
\begin{equation}\label{restr1}
\sum_i \mu_i +\sum_j \nu_j =x\leqslant 1, \quad \mu_i \leqslant x/J, \quad \nu_1 \geqslant \nu_2\geqslant \cdots \geqslant \nu_J.
\end{equation}
According to \eqref{ineq3} and \eqref{ineq1}, we introduce the function (compare with \cite[definition (4.5)]{FKM2})
$\eta (\uple{m}, \uple{n})$ defined by
\begin{equation}\label{defeta}
\eta (\uple{m}, \uple{n}):= \max\Bigl\{
(\nu_1+\nu_2) -\frac{1}{2}-2 \kappa \ ;\, \max_\sigma \min
\Bigl( \frac{\sigma}{2}, \frac{x-\sigma}{2} -\frac{1}{4} \Bigr) -\frac{\kappa}{2}
\Bigr\},
\end{equation}
where $\sigma$ ranges over all possible sub-sums of the $\mu_i$ and $\nu_j$ for $1 \leqslant i, j \leqslant J$, that is, over the sums
\[
\sigma =\sum_{i \in \mathcal I} \mu_i +\sum_{j \in \mathcal J} \nu_j,
\]
for $\mathcal I $ and $\mathcal J$ ranging over all possible subsets of $\{1, \dots, J\}$.
With these conventions, as a consequence of \eqref{ineq3} and \eqref{ineq1} we have the inequality
\[
\Sigma(\uple{M},\uple{N}) \ll (qQ)^\varepsilon q^{-\eta (\uple{m}, \uple{n})}\,X,
\]
and finally, summing aver all possible $(\uple M, \uple N)$, we have the inequality
\begin{equation}\label{793}
\mathcal S_{W,X} (\Lambda, \Kl) \ll (qQ)^\varepsilon \, q^{-\eta}\, X,
\end{equation}
where
\[
\eta = \min_{(\uple{m}, \uple{n})} \eta (\uple{m}, \uple{n}),
\]
where $( \uple{m}, \uple{n})$ satisfy \eqref{restr1}.
The estimate~\eqref{evening2} is trivial for $x<3/4$, so we may assume that $3/4 \leqslant x \leqslant 1$. For $\varepsilon >0$ sufficiently small, let $\mathcal I_x$ be the interval
\[
\mathcal I_x = [x/6-\varepsilon, x/3+\varepsilon],
\]
and choose $J=10$ to apply Heath-Brown's identity.
We now consider two different cases in the combinatorics of $(\uple{m}, \uple{n})$.
\begin{itemize}
\item If $(\uple{m}, \uple{n})$ contains a subsum $\sigma \in \mathcal I_x$, then, by \eqref{defeta}, we have the inequality
\[
\eta (\uple{m}, \uple{n}) \geqslant \min \Bigl(\frac{x/6}{2}, \frac{x-x/3}{2}-\frac{1}{4}\Bigr)-\frac{\kappa}{2}-\frac{\varepsilon}{2},
\]
which simplifies into
\begin{equation}\label{firstbound}
\eta (\uple{m}, \uple{n}) \geqslant \frac{x}{3} -\frac{1}{4} -\frac{\kappa}{2} -\frac{\varepsilon}{2}.
\end{equation}
\item If $(\uple{m}, \uple{n})$ contains no subsum $\sigma \in \mathcal I_x$, then the sum of all the $\mu_i$ and $\nu_j$ which are less than $x/6-\varepsilon$ is also less than $x/6-\varepsilon$ (this is a consequence of the inequality $2(x/6-\varepsilon) < x/3 +\varepsilon$). In light of~\eqref{restr1}, this includes all $\mu_i$, and so some $\nu_j$ must be greater than $x/3+\varepsilon$. On the other hand, since $3 (x/3 +\varepsilon) > x$, we deduce that at most two $\nu_i$ (more precisely, $\nu_1$ or $\nu_1$ and $\nu_2$) are greater than $x/3 +\varepsilon$. Combining these remarks, we deduce the inequality
\[
\nu_1+\nu_2 \geqslant x-(x/6 -\varepsilon) = 5x/6 +\varepsilon,
\]
which implies, by \eqref{defeta}, the inequality
\begin{equation}\label{secondbound}
\eta (\uple{m}, \uple{n}) \geqslant \frac{5x}{6}- \frac{1}{2} -2 \kappa -\varepsilon.
\end{equation}
\end{itemize}
By \eqref{793}, \eqref{firstbound} and \eqref{secondbound}, we deduce the inequality
\begin{equation}\label{828}
\mathcal S_{W,X} (\Lambda, \Kl) \ll (qQ)^\varepsilon \bigl( q^{1/4} Q^{1/2} X^{2/3} + q^{1/2} Q^2 X^{1/6}
\bigr).
\end{equation}
In the above upper bound, the first term is larger than the second one if and only if $Q<q^{-1/6} X^{1/3}$, and in this case, we have $Q^\varepsilon < q^\varepsilon$. However, when $Q\geqslant q^{-1/6} X^{1/3}$, it is easy to see that the bound \eqref{evening2} is trivial since we have
\[
q^{1/4} Q^{1/2} X^{2/3}\geqslant q^{1/4} (q^{-1/6} X^{1/3})^{1/2} X^{2/3}= q^{1/6} X^{5/6} \geqslant X,
\]
since we suppose $X\leqslant q.$ In conclusion, we may drop the second term on the right-hand side of \eqref{828}. This remark completes the proof of \eqref{evening2}.
\subsection{Proof of inequality \eqref{evening1}} The proof mimics the proof appearing in \cite[\S 4.3]{FKM2}. By a simple subdivision, it is sufficient to prove the inequality
\begin{equation}\label{836}
\sum_\stacksum{X<p\leqslant\frac32X}{p\text{ prime}}\Kl(p;q)\ll q^{1/6+\varepsilon}\, X^{7/9}.
\end{equation}
Let $\Delta <1/2$ be some parameter, let $W$ be a smooth function defined on $[0, +\infty[$ such that
\[
\supp (W) \subset [1-\Delta, \textstyle \frac{3}{2}+\Delta], \ 0\leqslant W \leqslant 1, \ W(x) =1 \text{ for } 1 \leqslant x \leqslant \frac{3}{2},
\]
and such that the derivatives satisfy
\[
x^j W^{(j)} (x)\ll_j Q^j,
\]
with $Q=\Delta^{-1}$. By applying \eqref{evening2}, we have
\begin{align*}
\sum_\stacksum{X<p\leqslant \frac{3}{2}X}{p\text{ prime}}\Kl(p;q)& \ll \Delta X + 1 + \Bigl\vert\, \sum_p W \Bigl( \frac{p}{X}\Bigr) \Kl (p;q)\, \Bigr\vert\\
& \ll \Delta X + q^{1/4 +\varepsilon} Q^{1/2} X^{2/3} \ll q^{1/6 +\varepsilon} X^{7/9},
\end{align*}
by the choice $\Delta =q^{1/6} X^{-2/9} < 1/2$ (the claim is trivial if $q^{1/6} \geqslant \frac{1}{2}X^{2/9}$). This completes the proof of \eqref{836}.
\begin{bibdiv}
\begin{biblist}
\bib{445}{article}{
author={V. Blomer},
author={\' E. Fouvry},
author={E. Kowalski},
author={Ph. Michel},
author={D. Mili\'cevi\' c},
title={On moments of twisted $L$-functions},
journal={Amer. J. Math.},
date={to appear, \url{arXiv:1411.4467}},
}
\bib{FKM2}{article}{
author={{\'E}. Fouvry },
author={E. Kowalski},
author={Ph. Michel},
title={Algebraic trace functions over the primes},
journal={Duke Math. J.},
volume={163},
date={2014},
number={9},
pages={1683--1736},
}
\bib{FKM1}{article}{
author={{\'E}. Fouvry},
author={E. Kowalski},
author={Ph. Michel},
title={Algebraic twists of modular forms and Hecke orbits},
journal={Geom. Funct. Anal.},
volume={25},
date={2015},
number={2},
pages={580--657},
}
\bib{FKMd3}{article}{
author={{\'E}. Fouvry},
author={E. Kowalski },
author={Ph. Michel },
title={On the exponent of distribution of the ternary divisor function},
journal={Mathematika},
volume={61},
date={2015},
number={1},
pages={121--144},
}
\bib{HB}{article}{
author = {D. R. Heath-Brown},
title = {Prime numbers in short intervals and a generalized Vaughan identity},
journal={Canad. J. Math.},
volume={34},
date={1982},
pages={1365--1377},}
\bib{KMVDMJ}{article}{
author={E. Kowalski },
author={Ph. Michel},
author={J. VanderKam},
title={Rankin-Selberg $L$-functions in the level aspect},
journal={Duke Math. J.},
volume={114},
date={2002},
number={1},
pages={123--191},
}
\bib{SZ}{article}{
author={I. Shparlinski},
author={T. P. Zhang},
title={Cancellations amongst Kloosterman sums},
journal={Acta Arith.},
note={(to appear, \url{arXiv:1601.05123})},
}
\bib{MY}{article}{
author={M. P. Young},
title={The fourth moment of Dirichlet $L$-functions},
journal={Ann. of Math. (2)},
pages={1--50},
date={2011},
volume={173},
number={1},
}
\end{biblist}
\end{bibdiv}
\end{document} |
\begin{document}
\title{Fairness in the Assignment Problem with Uncertain Prioritiesootnote{This work is supported by NSF grant CCF-2113798.}
\begin{abstract}
In the assignment problem, a set of items must be allocated to unit-demand agents who express ordinal preferences (rankings) over the items. In the assignment problem with priorities, agents with higher priority are entitled to their preferred goods with respect to lower priority agents. A priority can be naturally represented as a ranking and an uncertain priority as a distribution over rankings. For example, this models the problem of assigning student applicants to university seats or job applicants to job openings when the admitting body is uncertain about the true priority over applicants. This uncertainty can express the possibility of bias in the generation of the priority ranking. We believe we are the first to explicitly formulate and study the assignment problem with uncertain priorities. We introduce two natural notions of fairness in this problem: stochastic envy-freeness (SEF) and likelihood envy-freeness (LEF). We show that SEF and LEF are incompatible and that LEF is incompatible with ordinal efficiency. We describe two algorithms, Cycle Elimination (CE) and Unit-Time Eating (UTE) that satisfy ordinal efficiency (a form of ex-ante Pareto optimality) and SEF; the well known random serial dictatorship algorithm satisfies LEF and the weaker efficiency guarantee of ex-post Pareto optimality. We also show that CE satisfies a relaxation of LEF that we term 1-LEF which applies only to certain comparisons of priority, while UTE satisfies a version of proportional allocations with ranks. We conclude by demonstrating how a mediator can model a problem of school admission in the face of bias as an assignment problem with uncertain priority.
\end{abstract}
\section{Introduction}
Consider a motivating example of the assignment problem where a number of university admission slots (the items) must be assigned to student applicants (the agents). The university slots could be at a single university or several. Applicants might have preferences over different universities, or might have preferences over different slots at the same university (for example, some slots might be associated with merit-based financial aid, or include admission to particular academic programs). Applicants are \textit{unit-demand}, meaning they only need to be assigned a single slot (and derive no benefit from being assigned multiple).
Most university systems employ some form of priority-based admissions; this can be expressed through a ranking over applicants. For example, a priority might rank applicants by standardized exam scores, or perhaps by some more complex holistic assessment. Given any deterministic priority (a ranking), one might naturally solve the assignment problem using the \textit{serial dictatorship} rule, so that students choose their most preferred remaining university slot one at a time in order of their standardized exam score. Indeed, systems roughly like this are employed in several countries around the world such as the Indian Institutes of Technology~\cite{JET-IIT}.
Despite the appeal of such a simple and ostensibly fair system, there is reason to suspect that any scoring or ranking system is based on imperfect noisy signals of the true underlying priority (whatever that might be). For example, an applicant A scoring 1 point higher on a standardized exam or holistic assessment than another applicant B is not, in general, 100\% more likely to be a better student than B. Even more worryingly, studies show that standardized exam performance is closely related to demographic factors such as race and income~\cite{DEM2013}, leading to uncertainty based on social bias and inequality in addition to random noise like whether one had a good breakfast the day of an exam. More holistic assessments are further vulnerable to the well documented phenomenon of implicit bias against historically marginalized groups~\cite{ImplicitBias}. Ignoring these uncertainties may result in arbitrary decisions (deterministically preferring one applicant over another when the comparison is unclear and noisy) and systemic discrimination against historically marginalized groups.
Previous work has attempted to solve the second problem of bias without explicitly modeling an uncertain priority by adapting the so-called ``Rooney Rule''~\cite{kleinberg2018, celis2020interventions}. There are variations, but roughly speaking these methods reserve a number of ``minority'' spots and prioritize this many ``minority'' applicants in some serial dictatorship assignment. This approach can lead to \textit{fairness gerrymandering}~\cite{Kearns18} by which structured subgroups remain disadvantaged. In particular, Rooney Rule style approaches are predicated on a single binary distinction of the applicant population into ``majority'' (or privileged) and ``minority'' (or disadvantaged) applicants. But in reality, applicant identity is multidimensional (race, gender, income, disability, first language, etc.) and bias can compound along intersections. In fact, it is perfectly plausible that the vast \textit{majority} of applicants are disadvantaged (that is, suffer from bias leading to underestimation of their priority) along one or more dimensions of identity, though not all to the same extent. In addition to group identity, there may sometimes be uncertainties related to the priority of individual applicants, unique circumstances that merit accounting.
For these reasons, we consider the more general problem that takes as input an uncertain priority, expressed as a probability distribution over rankings of applicants. The generality of the input to our algorithms ensures that a decision maker can fully model the complexity of uncertainty and bias inherent in the creation of a priority. This modeling problem is outside the scope of this paper, though we do provide an example for our experiments in Section~\ref{sec:experiments}. Rather, our emphasis is on the question of characterizing fairness and efficiency given a random priority, and providing algorithms to compute random assignments that satisfy these desiderata.
\subsection{Contributions}
We study an extension of the random assignment problem~\cite{MB2001, MB2002, Abdulkadiroglu2003OrdinalEA} in which a decision maker must allocate a number of items to unit-demand agents in a way that is consistent with an \textit{uncertain priority} represented as a distribution over rankings of the agents. To the best of our knowledge, we are the first to characterize this more general problem.
In general we want to compute a random assignment that is simultaneously \textit{efficient} with respect to agent preferences over the items and \textit{fair} with respect to the agent priorities. \textit{Ordinal efficiency} (OE)~\cite{MB2001} generalizes the concept of Pareto efficiency to the case of a random assignment. Our main contribution is to characterize two alternative notions of fairness for the random assignment problem with uncertain priorities in Section~\ref{sec:desiderata}. The first notion, which we call \textit{stochastic envy-freeness} (SEF), guarantees that any agent whose priority first-order stochastically dominates another agent's priority should prefer their own (random) assignment to that of the other agent. The second notion, which we call \textit{likelihood envy-freeness} (LEF), guarantees that the likelihood (over the random assignment) that an agent prefers the assignment of another should be at most the likelihood (over the uncertain priority) that the latter agent has higher priority than the former.
We introduce additional notions that helps more finely distinguish between algorithms that satisfy one of the above notions. The first is a relaxation of LEF called 1-LEF that holds only when an agent has higher priority than another with probability 1. The next is {\em ranked proportionality} (PROP), where the allocation of any agent should stochastically dominate the allocation where she gets her $i$-th preferred item with probability $p_i$ if she herself is ranked at position $i$ with that probability.
Formal definitions are provided in Section~\ref{sec:desiderata}. We provide illustrative examples of these concepts as well as justification for why multiple definitions of fairness might be appropriate in Section~\ref{sec:need}.
In Section~\ref{impossible} we show that it is impossible to guarantee OE and LEF simultaneously. We also show that it is impossible to guarantee SEF and LEF simultaneously. Given this, we focus on achieving OE and SEF. In Section~\ref{sec:algorithms} we describe two algorithms: \textit{Unit-time Eating} (UTE) and \textit{Cycle Elimination} (CE). We show that both of these algorithms satisfy OE and SEF. To more finely distinguish between these algorithms, we show that CE also satisfies the relaxed 1-LEF property, while UTE satisfies PROP. We also show that any algorithm achieving OE cannot achieve PROP and 1-LEF simultaneously, so that we cannot achieve a super-set of the properties achieved by these algorithms.
It is straightforward to observe that the well known \textit{Random Serial Dictatorship} (RSD) that samples a priority from $\Sigma$ and then uses the serial dictatorship satisfies LEF, PROP, and is ex-post Pareto efficient, though it does not satisfy OE~\cite{MB2001}. We obtain a nearly complete characterization of achievable subsets of our efficiency and fairness properties, as shown in Table~\ref{tab0}.
\begin{table}[htbp]
\begin{center}
\begin{tabular}{||c || c |c | c | c |c ||}
\hline
Algorithm & OE & SEF & LEF & 1-LEF & PROP \\
\hline\hline
RSD & & & \textbf{\checkmark} & \textbf{\checkmark} & \textbf{\checkmark} \\
\hline
UTE (new) & \textbf{\checkmark} & \textbf{\checkmark} & & & \textbf{\checkmark} \\
\hline
CE (new) & \textbf{\checkmark} & \textbf{\checkmark} & & \textbf{\checkmark} & \\
\hline
\hline
\end{tabular}
\caption{\label{tab0} Summary of fairness properties achieved.}
\end{center}
\end{table}
In Section~\ref{sec:experiments} we return to a consideration of our motivating application of biased school admissions. We provide a practical example modeling an uncertain priority in the presence of bias and compare our CE and UTE algorithms with previous approaches to address bias using ``Rooney Rule'' style approaches~\cite{kleinberg2018, celis2020interventions}.
\subsection{Related Work}
\paragraph{Random Assignment.} There is a large body of work studying the problem of random assignment with no priority (or, in our framework, when the priority is uniform). The work of~\cite{randomserialdictatorship} proposed a {\em random serial dictatorship} mechanism, which draws an ordering of agents uniformly at random and let them choose items in that order, and showed that this mechanism is ex-post efficient. The work of~\cite{Zhou1990OnAC} observed that though random serial dictatorship is fair, it is not efficient when the agents are endowed with Von Neumann-Morgenstern preferences over lotteries. The work of~\cite{MB2001} introduced a notion of efficiency that is stronger than ex-post efficiency, namely {\em ordinal efficiency}, and showed that random serial dictatorship is not ordinally efficient. They proposed the {\em probabilistic serial} rule that is ordinally efficient. Moreover, probabilistic serial is (stochastically) envy-free while random serial dictatorship is not. The work of~\cite{Abdulkadiroglu2003OrdinalEA} studied the relationship between ex-post efficiency and ordinal efficiency, showing that a lottery induces an ordinally efficient random assignment if and only if each subset of the full support of the lottery is undominated (in a specific sense).
Subsequent works investigated natural extensions of the canonical setup. The work of~\cite{MB2002} considered the problem of random assignment in the case where agents can opt out, and characterised probabilistic serial by ordinal efficiency, envy-freeness, strategyproofness, and equal treatment of equals in this setting. The work of~\cite{rankefficiency} studied the notion of {\em rank efficiency}, which maximises the number of agents matched to their first choices.
\paragraph{Fair Ranking.} The assignment problem with priority is closely related to the subset selection problem that has been studied extensively as a problem in fair ranking~\cite{kleinberg2018, CME19, NCGW20, celis2020interventions, EGGL20, MC21, GB21} where the goal is to optimize some latent measure of utility for the algorithm designer subject to group fairness constraints on the resulting ranking. Recent work considers explicitly modeling the uncertainty from bias when estimating a ranking based on observed utilities~\cite{SKJ21}, similar to our approach in modeling an uncertain priority. Our work differs from the fair ranking literature in that we study a more general assignment problem in which agents may not all have the same preferences over items. Of course, one can always translate a given ranking into an assignment by employing the serial dictatorship rule, but this need not be ordinally efficient~\cite{MB2001}. Instead, we formulate our desiderata more explicitly in the wider context of the assignment problem itself.
\paragraph{Two-sided matching.} School choice problems are often studied in the context of two-sided matching, where applicants have preferences over schools and schools have preferences over applicants. For example, the deferred acceptance algorithm (and its extensions) calculates stable matchings and has been extensively studied and deployed in the real world~\cite{gale1962college, Roth82, Roth84, Atila03, Atila05}. Our problem is different in two ways. First, the ``items'' in our problem (eg., school seats) share a single common priority over applicants, so the notion of stability simply means no applicant of lower priority is assigned an item preferred by an agent of higher priority. However, our setting is more complex in the second sense: The shared priority is uncertain, and the assignment will be random, requiring an extension of existing fairness properties and algorithms.
\section{Preliminaries}
We are given $n$ unit demand agents $\mathcal{A} = \{1, 2, \hdots, n\}$ and a set of $m$ items $\mathcal{I}$. We assume without loss of generality that $m \geq n$ (if not, one can create additional ``dummy'' items that are least preferred by all agents). We write $a \succ_i b$ to denote that agent $i$ prefers item $a$ to item $b$. Each agent has ordinal preferences represented as a total order over $\mathcal{I}$, that is, for every agent $i$ we have a permutation $\pi_i: \mathcal{I} \rightarrow \{1, \hdots, n\}$ such that $\pi_i(a) < \pi_i(b)$ if and only if $a \succ_i b$.\footnote{In general, results extend trivially to the case where agents may have \textit{objective} indifferences between items, meaning that if any agent is indifferent between two items then all agents are indifferent between those items.
However, our results do \textit{not} necessarily extend straightforwardly if agents have subjective indifferences, see~\cite{MB2001}.}
A \textit{simple priority} over agents is a permutation $\sigma: \mathcal{A} \rightarrow \{1, \hdots, n\}$ where $\sigma(i) < \sigma(j)$ means that $i$ has higher priority than $j$. A \textit{random priority} is a probability distribution over simple priorities which we denote as $\Sigma = \{(\sigma_k, \rho_k)\}$ where each $\sigma_k$ is a simple priority, $\rho_k \geq 0$, and $\sum_k \rho_k = 1$.
A \textit{simple assignment} is a matching $f: \mathcal{A} \rightarrow \mathcal{I}$. A \textit{lottery} is a probability distribution over simple assignments which we denote as $\mathcal{L} = \{(f_k, p_k)\}$ where each $f_k$ is a simple assignment, $p_k \geq 0$, and $\sum_k p_k = 1$.
Following~\cite{MB2001}, we call a probability distribution over $[m]$ itself a \textit{random allocation} to an agent. It is important to note that agents have ordinal preferences over deterministic items which only induces a partial order over random allocations. That is, given $\pi_i$, it may be unclear whether $i$ would prefer one random allocation to another. We denote by $P = \{p_{ij}\}$ a \textit{random assignment}, the $n$ by $m$ matrix where $P_{i}$, the $i$-th row, is agent $i$'s random allocation, and where $\sum_i p_{ij} = 1$ for all columns $j$. In general, a random assignment $P$ can be induced by one or more lotteries, the existence of which is guaranteed by the Birkhoff-von Neumann Theorem, but a particular lottery induces a unique random assignment $P$.
In the \textit{assignment problem with uncertain priorities} we are given a random priority $\Sigma$ and agent preferences $\{\pi_i\}$ and we must compute a random assignment.
\section{Desiderata}
\label{sec:desiderata}
In this section we introduce the normative properties that an algorithm for the random assignment with uncertain priorities problem should satisfy. Broadly speaking, these desiderata require that the algorithm be efficient with respect to agent preferences and fair with respect to agent priorities.
\subsection{Efficiency}
A simple assignment $f$ is \textit{Pareto efficient} (or Pareto optimal) if it is not dominated by any other simple assignment, which simply means that there is no alternative such that no agent is worse off and at least one agent is better off.
\begin{definition}[Pareto Efficiency]
A simple assignment $f$ is Pareto efficient if for all simple assignments $g$ one of the following holds: (i) $\exists i \in \mathcal{A}$ such that $f(i) \succ_i g(i)$, or (ii) $g(i) \nsucc f(i)$ for all $i \in [n]$.
\end{definition}
A lottery $\mathcal{L}$ is ex-post Pareto efficient if every simple assignment in the support of $\mathcal{L}$ (i.e., every simple assignment $f_k$ with $p_k > 0$) is Pareto efficient.
A stronger efficiency property for a random assignment is \textit{ordinal efficiency} {\sc (OE)}~\cite{MB2001}. To define ordinal efficiency we must first define the notion of stochastic dominance.
\begin{definition}[Stochastic Domination]
A probability distribution $X$ \textit{stochastically dominates} another distribution $Y$ under permutation $\pi$ (denoted $X \succ^{sd}_{\pi} Y$) if for all $t \in \{1, \hdots, n\}$ it holds that $\sum_{r=1}^{t} X_{\pi^{-1}(r)} \geq \sum_{r=1}^{t} Y_{\pi^{-1}(r)}$, where $\pi^{-1}$ is the inverse permutation. A random assignment $P$ is stochastically dominated by a random assignment $Q \neq P$ if the random allocation induced by $Q$ stochastically dominates the random allocation induced by $P$ under preferences $\pi_i$ for every agent $i \in [n]$.
\end{definition}
Note that this implies the following: If random assignment $Q$ stochastically dominates random assignment $P$, then every agent prefers $Q$ to $P$ under any Von Neumann-Morgenstern utility function consistent with their ordinal preferences. Now we can define ordinal efficiency, following~\cite{MB2001}.
\begin{definition}[Ordinal Efficiency, OE]
We say that a random assignment $P$ is \textit{ordinally efficient} if it is not stochastically dominated by any other random assignment.
\end{definition}
At a high level, a random assignment is ordinally efficient if there is no other random assignment that is better for all agents and all utility functions consistent with their ordinal preferences. The property is not trivial: Some natural algorithms such as random serial dictatorship are Pareto efficient but not ordinally efficient.
\subsection{Fairness}
\label{fairness}
We define fairness in terms of envy. We say that one agent \textit{envies} another if the former prefers the item assigned to the latter. Envy of a lower priority agent constitutes a justified complaint against an assignment; ideally we would like to compute an \textit{envy-free} assignment with respect to the priority.
\begin{definition}[Envy-Freeness]
We say that a simple assignment $f$ is \textit{envy-free} with respect to a simple priority $\sigma$ if for all $i, j \in [n]$, $\sigma(i) < \sigma(j) \implies f(i) \succ_i f(j).$
\end{definition}
However, it is immediately evident that it is impossible to compute a single simple assignment that is envy-free in this sense for every simple priority in the support of a random priority (for example, if there are two agents with uncertain priority who both prefer the same item). Instead, we need to compute a random assignment so that each agent is fairly treated ex-ante (for example, so that each agent has a fair probability of receiving the preferred good).
There are two natural ways to generalize the concept of envy to a random assignment with a random priority. One is to imagine that one agent envies another if the random allocation of the latter stochastically dominates that of the former under the former's ordinal preferences. Envy of this type forms a justified complaint if the envying agent also stochastically dominates the envied agent in terms of the random priority. More formally,
\begin{definition}[Stochastic Envy-Freeness, SEF]
\label{def_sef}
Consider a random assignment $P$ generated under a random priority $\Sigma$. Let $S_i$ be the probability distribution over $[n]$ induced by $\Sigma$ for agent $i$, that is, for $r \in [n]$, $S_{ir} = \sum_{k: \sigma_k(i)=r} \rho_k$. Let $\sigma_{*}$ be the identity permutation, i.e., $\sigma_{*}(i) = i$. $P$ is \textit{stochastically envy-free} (SEF) with respect to $\Sigma$ if for all $i, j \in [n]$, $S_i \succ^{sd}_{\sigma_{*}} S_j \implies P_i \succ^{sd}_{\pi_i} P_j.$
\end{definition}
Loosely speaking, the implication of stochastic envy-freeness can be read as ``if agent $i$ probably has higher priority than $j$ then $i$ should prefer their random allocation to $j$'s under all utility functions consistent with $i$'s ordinal preferences.''
A second way to generalize envy is by considering the likelihood of envy (in the simple sense) with respect to a lottery inducing a given random assignment. Envy of this type is justified if the likelihood of agent $i$ envying another agent $j$ is greater than the likelihood over the random priority that $i$ has lower priority than agent $j$. We call a random assignment \textit{likelihood envy-free} if there is a lottery which induces it and has no envy of this kind.
\begin{definition}[Likelihood Envy-Freeness, LEF]
A random assignment $P$ satisfies \textit{likelihood envy-freeness} (LEF) under $\Sigma$ if $P$ can be induced by a lottery $\mathcal{L}$ such that for all $i, j \in [n]$, $\Pr_{\sigma \sim \Sigma}[\sigma(i) < \sigma(j)] \leq \Pr_{f \sim \mathcal{L}}[f(i) \succ_i f(j)].$
\end{definition}
In other words, LEF means that an agent $i$ who is $\ell$-likely to have higher priority than another agent $j$ should be at least $\ell$-likely to prefer their assigned item to $j$'s.
We say an algorithm satisfies OE (resp. SEF, LEF) if it always produces random assignment that satisfies OE (resp. SEF, LEF). As we show in Section~\ref{impossible}, it is not possible to guarantee SEF and LEF simultaneously.
\subsection{Relationship between LEF and SEF}
\label{sec:need}
The relationship between SEF and LEF is subtle; neither implies the other and it is not immediately evident which is the ``better'' or more ``natural'' fairness property. We present two examples to illustrate that an assignment satisfying only one of SEF and LEF might still be unfair, so that both properties are useful competing notions of fairness, and neither is strictly stronger than the other.
We first present an example which shows that an assignment that satisfies SEF can be unfair. Consider $n = 2$ agents and $m = 2$ items which we label $a, b$ for clarity. Both agents prefer $a$ to $b$, and the random priority is simply $\Sigma = \{(\sigma, 1)\}$ with $\sigma(1) < \sigma(2)$, {\em i.e.} agent 1 has higher priority than agent 2 with probability $1$. In this setup, allocating $\frac{1}{2}$ unit of $a$ and $b$ to both agent yields an assignment that satisfies SEF. However, this assignment is clearly unfair, because even though agent 1 has higher priority than agent 2, they are getting the same assignment. Notice that this assignment does not satisfy LEF. In this instance, LEF could be used to characterize how much one agent is prioritized over the other.
The next example shows that an assignment that only satisfies LEF can also be unfair. Consider $n = 2$ agents and $m = 100$ items which we label $i_1, \ldots, i_{100}$ for clarity. The preferences of both agents are $i_1 \succ \cdots \succ i_{100}$. The random priority is given by $\Sigma = \{(\sigma_1, \frac{1}{2}), (\sigma_2, \frac{1}{2})\}$ with $\sigma_1(1) < \sigma_1(2)$ and $\sigma_2(2) < \sigma_2(1)$. In other words, both agents have the same priority. In this setup, allocating $\frac{1}{2}$ unit of $i_1$ and $\frac{1}{2}$ unit of $i_{100}$ to agent 1 and $\frac{1}{2}$ unit of $i_{99}$ and $\frac{1}{2}$ unit of $i_{100}$ to agent 2 yields an assignment that satisfies LEF. Notice that this assignment can be induced by a lottery $\mathcal{L} = \{(f_1, \frac{1}{2}), (f_2, \frac{1}{2})\}$ where $f_1(1) = i_1, f_1(2) = i_{100}, f_2(1) = i_{100}, f_2(2) = i_{99}$. However, this assignment is clearly unfair, because even though the two agents have the same priority, agent 1 gets a strictly better assignment than agent 2. This shows that LEF alone has limitations as well, and the appropriate concept here is SEF.
The above examples show that SEF and LEF provide reasonable competing notions of fairness. When combined with the efficiency notion of OE, we will show in Section~\ref{impossible} that LEF and OE are incompatible. If OE is replaced by the weaker notion of Pareto-efficiency, then it is easy to check that random serial dictatorship (RSD), which simply samples a priority of agents from the distribution and allocates each agent their favorite remaining item in this priority order, satisfies LEF\footnote{To see why RSD satisfies LEF, suppose the random priority is given by $\Sigma = \{(\sigma_k, \rho_k)\}$, then the random assignment produced by RSD can be induced by the lottery $\mathcal{L} = \{(f_k, \rho_k)\}$, where $f_k$ is the deterministic assignment produced by letting agents successively choose an item based on the order given by $\sigma_k$.} and pareto efficiency. Thus, in our work, we will focus on the more non-trivial part of finding algorithms that satisfy SEF and OE.
\subsection{Additional Fairness Criteria}
As we show in Section~\ref{sec:algorithms}, there can be multiple algorithms that satisfy the same subset of the fairness criteria. We therefore consider two additional notions to more finely distinguish between them.
The first criterion is the following relaxation of LEF: If agent $i$ with probability $1$ has higher priority than another agent $j$ then agent $i$ should certainly (again, with probability 1) not envy $j$.
\begin{definition}[1-LEF]
A random assignment $P$ under random priority $\Sigma$ satisfies {\sc 1-LEF} if there exists some lottery $\mathcal{L}$ which induces $P$ such that for all agents $i \neq j \in [n]$, if $\Pr_{\sigma \sim \Sigma}[\sigma(i) < \sigma(j)] = 1$, then $\Pr_{f \sim \mathcal{L}}[f(i) \succ_i f(j)] = 1$.
\end{definition}
The next criterion is called {\em Ranked Proportionality} (PROP), which captures stochastic dominance over an allocation that matches the probability an agent gets her $i^{th}$ ranked item to the probability of she being ranked at position $i$. Note that if all rankings of agents were equally likely, this captures stochastic dominance to an allocation that assigns every item to every agent uniformly at random.
\begin{definition}[{\sc PROP}] Given a random priority $\Sigma = \{(\sigma_k, \rho_k)\}$, we define the baseline allocation $\overline{P}_i$ for agent $i$ by $\overline{P}_{i\pi_i^{-1}(r)} = S_{ir} = \sum_{k: \sigma_k(i) = r}\rho_k$ for all $r \in [n]$. In other words, if an agent $i$ ranks the $r$-th in the random priorities with probability $p$, then we add $p$ fraction of the $r$-th preferred item of agent $i$ to her baseline allocation. For an allocation to satisfy {\em ranked proportionality} ({\sc PROP}), it should stochastically dominate this baseline for each agent.
\end{definition}
\section{Impossibility Results}
\label{impossible}
In this part, we present several impossibility results. We note that these are existential hardness results, not computational. We begin by observing that LEF is incompatible with OE.
\begin{theorem}
\label{DOMOE}
LEF is incompatible with OE.
\end{theorem}
\begin{proof}
We present an instance in which no random assignment can satisfy both LEF and OE. There are $n=4$ agents and $m=4$ items which we label $a, b, c, d$ for clarity. Agent preferences are given by
$$\pi_1, \pi_3: a \succ b \succ c \succ d, \quad \pi_2, \pi_4: b \succ a \succ c \succ d$$
Moreover, we consider the priority $\Sigma = \{(\sigma_1, \frac{1}{2}), (\sigma_2, \frac{1}{2})\}$ where
$$\sigma_1(4) < \sigma_1(2) < \sigma_1(3) < \sigma_1(1),$$
$$\sigma_2(3) < \sigma_2(1) < \sigma_2(4) < \sigma_2(2).$$
In other words, with probability $\frac{1}{2}$ under $\sigma_1$, agent 4 has the highest priority, then agent 2, then agent 3, finally agent 1. Similarly for $\sigma_2$ with probability $\frac{1}{2}$. Assume for contradiction that there exists a random assignment $P = [p_{ij}]$, together with a lottery $\mathcal{L}$ which induces $P$, satisfying LEF and OE. By definition of LEF, we note that
$$\Pr_{f \sim \mathcal{L}}[f(3) \succ_3 f(1)] \geq \Pr_{\sigma \sim \Sigma}[\sigma(3) < \sigma(1)] = 1,$$
so it must be that $\Pr_{f \sim \mathcal{L}}[f(3) \succ_3 f(1)] = 1$. Thus, we must have $p_{1a} = 0$, because otherwise there would exist a simple assignment in the lottery in which agent $1$ is assigned with $a$ and agent $3$ is assigned with some less preferred item under $\pi_3$. By the same reasoning, we note that $p_{2b} = 0$.
Also by definition of LEF, observe that
$$\Pr_{f \sim \mathcal{L}}[f(2) \succ_2 f(3)] \geq \Pr_{\sigma \sim \Sigma}[\sigma(2) < \sigma(3)] = \frac{1}{2}.$$
This implies $p_{3a} < 1$, as otherwise we have $f(3) = a$ for all $f \sim \mathcal{L}$; combined with the fact that $p_{2b} = 0$, we would have $f(3) \succ_2 f(2)$ for all $f \sim \mathcal{L}$, which contradicts $\Pr_{f \sim \mathcal{L}}[f(2) \succ_2 f(3)] \geq \frac{1}{2}$.
Since $p_{1a} = 0$, $p_{3a} < 1$, and $\sum_{i}p_{ia} = 1$, it follows that $p_{2a} + p_{4a} > 0$. Similarly, we have $p_{1b} + p_{3b} > 0$. Without loss of generality, we assume that $p_{2a} > 0$ and $p_{1b} > 0$ (if $p_{4a} > 0$ or $p_{3b} > 0$, the proof proceeds similarly). Let $p_{min} = \min(p_{2a}, p_{1b})$; define random assignment $Q = [q_{ij}]$ by
$$q_{ij} =
\begin{cases}
p_{ij} \text{ if } i \notin \{1, 2\} \text{ and } j \notin \{a, b\}\\
p_{ij} + p_{min} \text{ if } (i, j) = (1, a) \text{ or } (2, b)\\
p_{ij} - p_{min} \text{ if } (i, j) = (1, b) \text{ or } (2, a)
\end{cases}
$$
We can see that $Q$ stochastically dominates $P$. In particular, all that is different in $Q$ is that agent 1 swaps agent 2 some of agent 2's allocated probability mass on item $a$ in exchange for an equivalent amount of agent 1's probability mass on item $b$. Since $a \succ_1 b$ and $b \succ_2 a$ and nothing else changes, agents 1 and 2 prefer $Q$, and nothing has changed for agents 3 and 4. This contradicts with the fact that $P$ satisfies OE. Thus, we can conclude that no random assignment in this instance satisfies LEF and OE.
\end{proof}
Theorem~\ref{DOMOE} can be interpreted as a fundamental tradeoff between efficiency and fairness conceived as LEF. Next, we show that LEF and SEF are two fundamentally different notions of fairness that are incompatible with one another. As we will see later in Section~\ref{sec:algorithms}, each of LEF and SEF independently can be guaranteed. Thus, neither notion of fairness is subsumed by the other.
\begin{theorem}
\label{theorem2}
LEF is incompatible with SEF.
\end{theorem}
\begin{proof}
We present an instance in which no random assignment can satisfy both LEF and SEF. There are $n=5$ agents and $m=5$ items which we label $a, b, c, d, e$ for clarity. Preferences are given by
\begin{align*}
&\pi_1, \pi_3: a \succ b \succ c \succ d \succ e, \quad \pi_2, \pi_4: b \succ a \succ c \succ d \succ e,\\
&\pi_5: a \succ c \succ b \succ d \succ e.
\end{align*}
We consider the priority $\Sigma = \{(\sigma_1, \frac{1}{2}), (\sigma_2, \frac{1}{2})\}$ defined by
$$\sigma_1(3) < \sigma_1(5) < \sigma_1(1) < \sigma_1(4) < \sigma_1(2),$$
$$\sigma_2(4) < \sigma_2(5) < \sigma_2(2) < \sigma_2(3) < \sigma_2(1).$$
In other words, with probability $\frac{1}{2}$ under $\sigma_1$, agent 3 has the highest priority, then agents 5, 1, 4, and finally 2. Similarly for $\sigma_2$.
Assume for contradiction that there exists a random assignment $P = [p_{ij}]$, together with a lottery $\mathcal{L}$ which induces $P$, that satisfies LEF and SEF. Since agent 3 always has higher priority than agent 1 and agent 3 prefers $a$ over all other items, LEF implies that $p_{1a} = 0$. Similarly, since agent 4 always has higher priority than agent 2 prefers $b$ over all other itmes, LEF implies that $p_{2b} = 0$.
Recall that $S_i$ is the probability density over $[n]$ induced by $\Sigma$ for agent $i$ and $\sigma^*$ is the identity permutation. Since $S_1 \succ_{\sigma^*}^{sd} S_2$ by construction and $P$ satisfies {\sc (SEF)} by assumption, we have $P_1 \succ_{\pi_1}^{sd} P_2$. Combined with the fact that $p_{1a} = 0$, we must have $p_{2a} = 0$. Similarly, $p_{1b} = 0$.
We next show $p_{1c} = p_{2c} = \frac{1}{2}$. First, observe that LEF guarantees
$$\Pr_{f \sim \mathcal{L}}[f(4) \succ_4 f(2)] \geq \Pr_{\sigma \sim \Sigma}[\sigma(4) < \sigma(2)] = 1,$$
Thus, since $e$ is the least preferred item by agent 4, we must have $p_{4e} = 0$. Also by LEF, we have
$$\Pr_{f \sim \mathcal{L}}[f(1) \succ_1 f(4)] \geq \Pr_{\sigma \sim\Sigma}[\sigma(1) < \sigma(4)] = \frac{1}{2},$$
i.e. $\Pr_{f \sim \mathcal{L}}[f(1) \succ_1 f(4)] \geq \frac{1}{2}$. On the other hand, since $p_{4e} = 0$, the worst item that agent 4 can get under $\pi_4$ is $d$, so
$$\Pr_{f \sim \mathcal{L}}[f(1) \succ_1 f(4)] \leq p_{1a} + p_{1b} + p_{1c} = p_{1c},$$ since we earlier found that $p_{1a} = p_{1b} = 0$.
Recall $\Pr_{f \sim \mathcal{L}}[f(1) \succ_1 f(4)] \geq \frac{1}{2}$, we get $p_{1c} \geq \frac{1}{2}$. Similarly, we have $p_{2c} \geq \frac{1}{2}$. Since $\sum_{i}p_{ic} = 1$, it must be the case that $p_{1c} = p_{2c} = \frac{1}{2}$. We deduce that for any $f \sim \mathcal{L}$, either $f(1) = c$ or $f(2) = c$, because on one hand, for any fixed $f$, we should have $f(1) \neq f(2)$, while on the other hand, $p_{1c} + p_{2c} = 1$.
Observe that $p_{5a} \leq \frac{1}{2}$. This follows directly from LEF, because
$$\Pr_{f \sim \mathcal{L}}[f(3) \succ_3 f(5)] \geq \Pr_{\sigma \sim \Sigma}[\sigma(3) < \sigma(5)] = \frac{1}{2};$$
if $p_{5a} > \frac{1}{2}$, we would have $\Pr_{f \sim \mathcal{L}}[f(3) \succ_3 f(5)] < 1 - p_{5a} = \frac{1}{2}$, leading to contradiction. What's more, we have $p_{5c} = 0$, since we already have $p_{1c} + p_{2c} = 1$.
On one hand, we should have $\Pr_{f \sim \mathcal{L}}[f(5) \succ_5 f(1)$ and $f(5) \succ_5 f(2)] = 1,$ since $\sigma_i(5) < \sigma_i(1)$ and $\sigma_i(5) < \sigma_i(2)$ for $i \in \{1, 2\}$; but on the other hand, we have
$\Pr_{f \sim \mathcal{L}}[f(5) \succ_5 f(1)$ and $f(5) \succ_5 f(2)] \leq p_{5a} \leq \frac{1}{2},$
because for any $f \sim \mathcal{L}$, either $f(1) = c$ or $f(2) = c$, so $f(5) \succ_5 f(1)$ and $f(5) \succ_5 f(2)$ if and only if $f(5) = a$. This leads to contradiction. Thus, LEF and SEF are incompatible.
\end{proof}
We finally show that OE, 1-LEF, and PROP are simultaneously incompatible. This will inform the design of algorithms in Section~\ref{sec:algorithms}.
\begin{lemma}
\label{lem:imposs3}
There is an instance where no allocation simultaneously satisfies OE, 1-LEF, and PROP.
\end{lemma}
\begin{proof}
We use the instance in the proof of Theorem~\ref{DOMOE}. Since agent $3$ is ranked higher than agent $1$ with probability $1$ and since their preferences over items is identical, 1-LEF implies agent $1$ is allocated item $a$ with probability $0$. Now PROP implies agent $1$ receives item $b$ with probability at least $1/2$. By a similar reasoning, agent $2$ must get item $a$ with probability at least $1/2$. But any such allocation cannot be OE, completing the proof.
\end{proof}
\section{Algorithms}
\label{sec:algorithms}
As we have seen, LEF is a very strong notion of fairness which is incompatible with both OE and SEF. In the following, we present two algorithms -- {\em cycle elimination} (CE) and {\em unit time eating} (UTE) -- that satisfy both OE and SEF. In addition, we show that CE satisfies 1-LEF and UTE satisfies PROP. Given Lemma~\ref{lem:imposs3}, we cannot design an algorithm that achieves OE and both these properties.
Therefore, both CE and UTE are reasonable fair allocation algorithms in that they satisfy efficiency (OE) and envy-freeness (SEF). The choice of which to implement depends on whether we care more about a form of proportionality in the resulting allocation (UTE satisfies PROP) or whether we care about additional envy-freeness in a deterministic sense (CE satisfies 1-LEF).
\subsection{Cycle Elimination algorithm}
We first introduce a \emph{Cycle Elimination algorithm} (CE), which works by constructing a directed graph based on the random priority, and allocate items based on this graph.
To begin with, we introduce the \emph{Probabilistic Serial rule}~\cite{MB2001}, a continuous algorithm which works as follows. Initially, each agent $i$ goes to their favorite item $j$ and starts ``eating'' it (that is, increasing $p_{ij}$) at unit speed. It is possible that several agents eat the same item at the same time. Whenever an item is fully eaten, each of the agents eating it goes to their favorite remaining item not fully allocated (that is, $\sum_{i} p_{ij} < 1$) and starts eating it in the same way. This process continues until all items are consumed, or all the agents are full (that is, $\sum_{j} p_{ij} = 1$). We use {\sc PS($\mathcal{A}$, $\mathcal{I}$)} to denote the assignment produced by running Probabilistic Serial rule on the set of agents $\mathcal{A}$ and items $\mathcal{I}$.
We construct a graph from $\Sigma$, which we call a Stochastic-Dominance graph (SD-graph), as follows: Start with a graph with $n$ vertices, where the $i$-th vertex corresponds to the $i$-th agent. For any pair of distinct agents $i$ and $j$, if $S_i \succ_{\sigma^*}^{sd} S_j$, then we draw a directed edge from $i$ to $j$. The algorithm is now formally stated in Algorithm~\ref{cycle}.
\begin{algorithm}[htbp]
\SetAlgoLined
{\bf Input:} Set of agents $\mathcal{A}$, set of items $\mathcal{I}$, SD-graph $G$\;
Let $\hat{G}$ be the condensation\footnotemark of $G$\;
Let $\widetilde{\mathcal{A}}$ be the set of agents that belong to a strongly connected component whose in-degree in $\hat{G}$ is zero\;
\eIf{$\mathcal{A} = \widetilde{\mathcal{A}}$}{
Output {\sc PS($\mathcal{A}$, $\mathcal{I}$)}\;
}
{
$\mathcal{A}' \leftarrow \mathcal{A} \setminus \widetilde{\mathcal{A}}$; $\mathcal{I}'\leftarrow \mathcal{I}\: \setminus$ {\sc PS($\widetilde{\mathcal{A}}$; $\mathcal{I}$)}; $G' \leftarrow G \setminus \widetilde{\mathcal{A}}$\;
Output {\sc PS($\widetilde{\mathcal{A}}$, $\mathcal{I}$)} + Eliminate($\mathcal{A}'$, $\mathcal{I}'$, $G'$)\;
}
\caption{Cycle Elimination, Eliminate($\mathcal{A}$, $\mathcal{I}$, $G$)}
\nllabel{cycle}
\end{algorithm}
\footnotetext{Condensation of a graph is a directed acyclic graph formed by contracting each strongly connected component to a single vertex.}
\paragraph{Analysis.} Our main result is the following theorem.
\begin{theorem}
\label{thm:main1}
The Cycle Elimination algorithm satisfies OE, SEF, and 1-LEF. It runs in $O(n^3 + nm + n|\Sigma|)$ time.
\end{theorem}
\begin{proof}
Theorem 1 in~\cite{MB2001} states that any simultaneous eating algorithm where each agent always eats from her favorite remaining item satisfies OE. Hence, CE satisfies OE.
To show SEF, fix two agents $i$ and $j$, and assume $S_i \succ^{sd}_{\sigma_{*}} S_j$. Let $P$ be the random assignment produced by CE. We show that $P_i \succ^{sd}_{\pi_i} P_j$. Since $S_i \succ^{sd}_{\sigma_{*}} S_j$, there exists an edge from $i$ to $j$ in the SD-graph. Thus, $i$ and $j$ either belong to the same strongly connected component, or the strongly connected component of $i$ has higher topological order than that of $j$'s. Either way, we have $P_i \succ^{sd}_{\pi_i} P_j$, from which we can conclude that CE satisfies SEF.
To show 1-LEF, fix two agents $i$ and $j$, and assume $\Pr_{\sigma \sim \Sigma}[\sigma(i) < \sigma(j)] = 1$. Let $P$ be the random assignment produced by CE. We show that, for any lottery $\mathcal{L}$ inducing $P$, we have $\Pr_{f \sim \mathcal{L}}[f(i) \succ_i f(j)] = 1$. We use proof by contradiction. Assume that there exists a lottery $\mathcal{L}_0$ which induces $P$ such that $\Pr_{f \sim \mathcal{L}_0}[f(i) \succ_i f(j)] < 1$. This implies that there exists two items $a$ and $b$ such that $a \succ_i b$, $p_{ib} > 0$ and $p_{ja} > 0$. On the other hand, since $\Pr_{\sigma \sim \Sigma}[\sigma(i) < \sigma(j)] = 1$, so we have $S_i \succ^{sd}_{\sigma_{*}} S_j$; thus, the strongly connected component that agent $i$ belongs to must have higher topological order than that of $j$'s. By CE, agent $j$ could start eating only when agent $i$ is completely full. Thus, $p_{ja} > 0$ implies that there is still item $a$ remaining when agent $i$ finishes eating; this leads to contradiction, because $a\succ_i b$ implies that $i$ could eat $a$ instead of $b$. Therefore, we must have $\Pr_{f \sim \mathcal{L}}[f(i) \succ_i f(j)] = 1$ for all lotteries $\mathcal{L}$ which induces $P$, from which we can conclude that CE satisfies 1-LEF.
To show the running time, preprocessing $\Sigma$ in order to compute the stochastic dominance relation between agents takes $O(n|\Sigma| + n^3)$ time. Constructing the SD-graph by the stochastic dominance relation between agents takes $O(n^2)$ time, as there are $n\choose{2}$ pair of agents. Given the SD-graph, running CE takes $O(nm)$ time. This is because we only need to consider at most $m$ time points: the time at which each item is eaten up. We divide this process into $m$ time intervals. During each time interval, each agent keeps eating the same item, so it simply takes $O(n)$ time to keep track of the state of each agent, and the running time over $m$ intervals is $O(nm)$. Hence, the total running time is $O(n^3 + nm + n|\Sigma|)$.
\end{proof}
\subsection{Unit-time Eating Algorithm}
We next introduce the \emph{Unit-time Eating Algorithm} (UTE). Recall that $\Sigma = \{(\sigma_k, \rho_k)\}$. Essentially, the algorithm works by dividing the time into $n$ units, each of duration one; in time unit $t$, the $t$-th ranked agent in $\sigma_k$ eats their favorite item among those left over at rate $\rho_k$ for all $k$. The procedure is formally stated in Algorithm~\ref{unit}.
\begin{algorithm}[htbp]
\SetAlgoLined
\For{$t = 1, \ldots, n$}{
The $t$-th ranked agent in each $\sigma_k$ eats their favorite item among those left over at rate $\rho_k$ for all $(\sigma_k, \rho_k) \in \Sigma$\;
}
\caption{Unit-time Eating Algorithm}
\nllabel{unit}
\end{algorithm}
\paragraph{Analysis.} We show the following theorem.
\begin{theorem}
\label{thm:main2}
The Unit-time Eating Algorithm satisfies OE, SEF, and PROP. Further, it runs in $O(n^2|\Sigma| + nm)$ time.
\end{theorem}
\begin{proof}
By Theorem 1 in~\cite{MB2001}, we have UTE satisfies OE.
To show SEF, fix two agents $i$ and $j$; assume that $S_i \succ^{sd}_{\sigma_{*}} S_j$. Let $P$ be the random assignment produced by UTE, we show that $P_i \succ^{sd}_{\pi_i} P_j$. Let $t_k$ be the time when item $\pi_i^{-1}(k)$ has been eaten up. Fix some $k \in [m]$; because $S_i \succ^{sd}_{\sigma_{*}} S_j$, we have
$$ \sum_{t = 1}^{\lfloor t_k\rfloor} S_{it} \geq \sum_{t = 1}^{\lfloor t_k\rfloor} S_{jt} \qquad \mbox{and} \qquad \sum_{t = 1}^{\lceil t_k\rceil} S_{it} \geq \sum_{t = 1}^{\lceil t_k\rceil} S_{jt} $$
Combining these gives
\begin{equation}
\label{eq3}
\big(t_k - \lfloor t_k\rfloor\big)S_{i\lceil t_k\rceil} + \sum_{t = 1}^{\lfloor t_k\rfloor} S_{it} \geq \big(t_k - \lfloor t_k\rfloor\big)S_{j\lceil t_k\rceil} + \sum_{t = 1}^{\lfloor t_k\rfloor} S_{jt}.
\end{equation}
Observe that
$$\sum_{r = 1}^k P_{i\pi_i^{-1}(r)} = \big(t_k - \lfloor t_k\rfloor\big)S_{i\lceil t_k\rceil} + \sum_{t = 1}^{\lfloor t_k\rfloor} S_{it},$$
$$\sum_{r = 1}^k P_{j\pi_i^{-1}(r)} \leq \big(t_k - \lfloor t_k\rfloor\big)S_{j\lceil t_k\rceil} + \sum_{t = 1}^{\lfloor t_k\rfloor} S_{jt},$$
which gives $\sum_{r = 1}^k P_{i\pi_i^{-1}(r)} \geq \sum_{r = 1}^k P_{j\pi_i^{-1}(r)}$. Because this holds for all $k \in [m]$, we conclude that $P_i \succ^{sd}_{\pi_i} P_j$. Hence, UTE satisfies SEF.
To show PROP, fix some agent $i$. Suppose the allocation produced by UTE for this agent is $P_i$, and the baseline allocation for this agent is $\overline{P}_i$. We will show that $P_i \succ_{\pi_i}^{sd} \overline{P}_i$. Let $t_k$ be the time when item $\pi_i^{-1}(k)$ has been eaten. Fix some $k \in [m]$. Clearly, we have $t_k \geq k$, because in order to eat up $\pi_i^{-1}(k)$, we have to eat up $\pi^{-1}(r)$ for all $r < k$. We observe that
$$\sum_{r = 1}^k P_{i\pi_i^{-1}(r)} = \big(t_k - \lfloor t_k\rfloor\big)S_{i\lceil t_k\rceil} + \sum_{t = 1}^{\lfloor t_k\rfloor} S_{it}.$$
Combined with $t_k \geq k$, we have
$$\sum_{r = 1}^k P_{i\pi_i^{-1}(r)} \geq \sum_{t = 1}^{k} S_{it} = \sum_{r = 1}^k\overline{P}_{i\pi_i^{-1}(r)}.$$
Because this holds for all $k \in [m]$, we conclude that that $P_i \succ_{\pi_i}^{sd} \overline{P}_i$, and hence UTE satisfies PROP.
To show running time, preprocessing $\Sigma$ to obtain the eating speed of each agent in each unit time interval takes $O(n^2|\Sigma|)$ time. Then, running UTE takes $O(nm)$ time, as we similarly only need to consider at most $m$ time points and keeping track of the state of each agent in each time interval takes $O(n)$ time. Therefore, the total running time is $O(n^2|\Sigma| + nm)$.
\end{proof}
\section{Generating Random Priorities and Empirical Results}
\label{sec:experiments}
In this section, we will demonstrate how one could obtain random priorities in practical settings using an example of school admission under implicit bias. In several environments based on such generative model, we will compare our proposed algorithms, namely Cycle Elimination (CE) and Unit-time Eating (UTE), with other common bias mitigating allocation algorithms such as ``the Rooney Rule''~\cite{celis2020interventions}. We empirically demonstrate that all existing algorithms induce stochastic envy. To show this, using the same notation as in Definition~\ref{def_sef}, we say a pair of agents $i$ and $j$ form a {\em stochastic envy pair} if $Z_i\succ_{\sigma_*}^{sd} Z_j$ but $P_i \nsucc_{\pi_i}^{sd} P_j$, and we will count the number of stochastic envy pairs produced by each algorithm.
\newcommand{advantaged\ }{advantaged\ }
\newcommand{disadvantaged\ }{disadvantaged\ }
\subsection{Random Priority in School Admission}
\label{sec:experiments-priority-gen}
Consider a group of $N$ students, including $n$ disadvantaged\ students with indices $\cbr{1,\dots, n}$ and $N-n$ advantaged\ students with indices $\cbr{n+1, \ldots, N}$. Suppose that they are competing for admission priorities of $\ell$ schools with capacities $c_1,\dots, c_\ell\in \NN$ that $\sum_{i=1}^\ell c_i = N$, in which process disadvantaged students are subjected to implicit bias on their capability. We will quantify the effect of implicit bias in the experiments.
This is equivalent to allocating $N$ items to $N$ agents, where items correspond to seats, and the agents' preferences are their school choices.
Denote the $j$-th seat of school $i$ as $s_{i_j}$, then the set of seats is $S\triangleq \bigcup_{i=1}^\ell\{s_{i_1}, s_{i_2},\dots, s_{i_{c_i}}\}$. For any ordinal preference $\tilde\pi:[\ell]\to[\ell]$ over the schools, it induces an ordinal preference $\pi:S\to[N]$ over the seats such that for any $s_{i_j}\in S$, $\pi(s_{i_j}) = j + \sum_{k:\tilde\pi(k)<\tilde\pi(i)}c_k.$ In other words, if a student prefers school $a$ to school $b$, then all seats of school $a$ are preferred over the seats of school $b$. For the seats in the same school, smaller indices are preferred. In the following, we describe how random priorities over students are generated.
For each student, we assign a ``capability score" $x_i$ that is drawn from the same distribution $\cD$, and students with higher capability score should have higher priority. Moreover, assume every student from the disadvantaged\ group is subjected to a multiplicative implicit bias $b_i$, which is independently sampled from some distribution $\cB$. A disadvantaged\ student with capability score $x_i$ is perceived to have a biased score $\hat x_i \triangleq b_ix_i$. We will also consider {\em additive bias} $\hat x_i \triangleq x_i+b_i$ in our experiments. The admission committee make decisions based on the perceived scores (which are biased for disadvantaged students and equal to the true scores for advantaged students).
\begin{figure*}
\caption{\label{tab:add}
\label{tab:add}
\label{tab:mult}
\end{figure*}
For each experiment, we fix a set of unbiased capability scores $\cbr{x_i}_{i = 1}^N$ for the students, where $x_i\overset{\mathrm{iid}}{\sim} \cD$. Then, we take $n$ bias parameters $\{b_i\}_{i = 1}^n$ independently from $\cB$. The perceived scores of the students are $\cbr{\hat x_1,\dots, \hat x_{n}, x_{n+1}, \dots, x_N}$, where $\hat{x_i} = b_ix_i$. Now imagine we are the admission committee. We know $\cB$, $\cD$, and the perceived scores of the students. The goal is to approximately recover the underlying true scores of the students. To do this, we compute a posterior distribution for the bias factor of each disadvantaged student given $\cB$, the biased score of this student, and $\cD$. Concretely, the density of the posterior distribution for the bias factor of the $i^{\text{th}}$ disadvantaged student, which we denote by $\bm{b_i}$, is $f_{\bm{b_i}}(b) = \frac{f_{\cB}(b)f_{\cD}(\hat{x}_i/b)}{\int_{0}^\infty f_{\cB}(u)f_{\cD}(\hat{x}_i/u)du}$. Given $\{\bm{b_i}\}_{i = 1}^n$, we independently draw $q$ sets of bias parameters for disadvantaged students, where we denote the $j^{\text{th}}$ set of bias parameters as $\{b_i^{(j)}\}_{i = 1}^n$, {\em i.e.} $b_i^{(j)}\overset{\mathrm{iid}}{\sim} \bm{b_i}$. Let the ordinal relationship induced by $\{b_i^{(j)}\}_{i = 1}^n$ be $\sigma^{(j)}$. We consider the random priority $\{(\sigma^{(j)}, \frac{1}{q})\}_{j = 1}^q$. We denote the random priority induced by $q$ sets of bias parameters as $\Sigma^{(q)}$.
\subsection{Algorithms for Comparison}
To compare with CE and UTE, we consider four alternative solutions to the allocation problem under implicit bias. Fix a set of biased scores $\cbr{\hat x_1,\dots, \hat x_{n}, x_{n+1}, \dots, x_N}$, let $\hat\sigma$ denote its induced ordinal relationship. For a deterministic priority $\sigma$ over the students and ordinal preferences $\Pi\triangleq\{\pi_i\}_{i\in [N]}$ of students over the seats, let $GS(\sigma, \Pi)$ denote the deterministic assignment produced by the Gale-Shapley algorithm~\cite{gale1962college} which produces a stable matching between students and seats.
The algorithms that we compare with are as follows:
\begin{enumerate}
\item \emph{Naive Stable Matching} (N) takes deterministic priority $\hat\sigma$ and returns the assignment $P_{\text{N}}(\hat\sigma,\Pi)\triangleq GS(\hat\sigma,\Pi)$.
\item \emph{Random Naive Stable Matching} (RN) takes the random priority $\Sigma^{(q)} = \cbr{(\sigma_i, p_i)}_{i = 1}^q$ and outputs a lottery based on (N), namely $ \cbr{(P_{\text{N}}(\sigma_i,\Pi), p_i)}_{i = 1}^q$.
\item \emph{Rooney Stable Matching} (R) takes in the deterministic priority $\hat\sigma$ as input. Using the Rooney constraint in Theorem 3.3 of~\cite{celis2020interventions}, it creates a new priority $\hat\sigma_{\text{R}}$. We present this formally in Algorithm~\ref{alg:rooney-like}.
Using $\hat\sigma_{\text{R}}$, \emph{Rooney Stable Matching} returns the assignment $P_{\text{R}}(\hat{\sigma}_{\text{R}},\Pi)\triangleq GS(\hat\sigma_{\text{R}}, \Pi)$.
\item \emph{Random Rooney Stable Matching} (RR) takes the random priority $\Sigma^{(q)} = \cbr{(\sigma_i, p_i)}_{i = 1}^q$ and outputs a lottery based on (R), namely $ \cbr{(P_{\text{R}}(\sigma_i,\Pi), p_i)}_{i = 1}^q$.
\end{enumerate}
\begin{algorithm}[htbp]
\SetAlgoLined
Let $A,B$ be the ordered sub-sequences of disadvantaged\ and advantaged\ candidates in $\hat\sigma$ respectively, {\em i.e.} $p<q\iff \hat\sigma(A[p])<\hat\sigma(A[q])$\;
$i,j\gets 0$\;
\While{$i+j<N$}{
\eIf{$\lfloor \frac{i}{i + j}\rfloor < \frac{n}{N} \normalfont{\textbf{ or }} \hat\sigma(i) < \hat\sigma(n+j)$}
{$\hat\sigma_{\text{R}}(A[i]) = i+j$ and $i\gets i+1$\;}
{$\hat\sigma_{\text{R}}(B[j]) = i+j$ and $j\gets j+1$\;}
}
\Return $\hat\sigma_{\text{R}}$
\caption{Proportional Rooney-rule-like Constraint~\cite{celis2020interventions}}
\nllabel{Ronnie}
\label{alg:rooney-like}
\end{algorithm}
\subsection{Prevalence of Stochastic Envy}
We now demonstrate that with random priority induced by the generative model described in Section \ref{sec:experiments-priority-gen}, stochastic envy exists for the bias mitigating algorithms N, RN, R, RR.
We consider an admission problem with $\ell$ schools each with $\lfloor \frac{N}{\ell + 1} \rfloor$ seats. Every student $i\in [N]$ has a uniformly random preference order over the $\ell$ schools. There is also a "dummy school" with $N-\ell\lfloor \frac{N}{\ell + 1} \rfloor$ seats representing no admission. Every student prefers seats in the dummy school the least. For seats in the same school, all students have the same preference order. This represents the situation in which schools may distribute educational resources to students based on their rank when admitted.
We take $N = 35$ and $n=10$, and experiment with $\ell = 1, 2, 3$. For each choice of $k$, we experiment with $\beta = 0.2, 0.5, 0.8$. For multiplicative bias, we take $\mathcal{D} = \text{Exponential}(1)$ and $\cB = \text{Exponential}(\beta)$; for additive bias, we take $\cD=\text{Uniform}(0,2)$ and $\cB=\text{Uniform}(0,\beta)$.
Figure~\ref{tab:mult} presents the number of stochastic envy pairs for each algorithm averaged over 100 experiments. For each experiment, the random priority is computed with 1000 sets of bias parameters.
Except for RN in the one school setting
, stochastic envy exists in all other scenarios for N, RN, R, RR. While empirically Rooney-rule-like constraints do significantly reduce the number of stochastic envy pairs compared to applying no mitigation mechanism at all, we still need CE or UTE to obtain guaranteed SEF.
\section{Conclusion}
We conclude with some open questions. First, even though SEF and LEF are incompatible, we do not know whether they can be compatible under certain natural generative assumptions on the random priorities and agent preferences. Second, it is known~\cite{MB2001} that OE (and hence CE and UTE) is incompatible with strategyproofness under natural assumptions. However, it is interesting to explore whether SEF alone is also incompatible with strategy-proofness. Finally, can our framework be extended to the scenario where the agent preferences are random as well, {\em i.e.} each agent reports a distribution over preferences instead of a deterministic preference?
\end{document} |
\begin{document}
\title{Global-in-$x$ Stability of Steady Prandtl Expansions \ for 2D Navier-Stokes Flows}
\begin{abstract}
In this work, we establish the convergence of 2D, stationary Navier-Stokes flows, $(u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon})$ to the classical Prandtl boundary layer, $(\bar{u}_p, \bar{v}_p)$, posed on the domain $(0, \infty) \times (0, \infty)$:
\begin{align*}
\| u^\ensuremath{\varepsilon} - \bar{u}_p \|_{L^\infty_y} \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- \frac 1 4 + \delta}, \qquad \| v^\ensuremath{\varepsilon} - \sqrt{\ensuremath{\varepsilon}} \bar{v}_p \|_{L^\infty_y} \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- \frac 1 2}.
\end{align*}
This validates Prandtl's boundary layer theory \textit{globally} in the $x$-variable for a large class of boundary layers, including the entire one parameter family of the classical Blasius profiles, with sharp decay rates. The result demonstrates asymptotic stability in two senses simultaneously: (1) asymptotic as $\ensuremath{\varepsilon} \rightarrow 0$ and (2) asymptotic as $x \rightarrow \infty$. In particular, our result provides the first rigorous confirmation for the Navier-Stokes equations that the boundary layer cannot ``separate" in these stable regimes, which is very important for physical and engineering applications.
\end{abstract}
\section{Introduction}
\hspace{5 mm} A major problem in mathematical fluid mechanics is to describe the inviscid limit of Navier-Stokes flows in the presence of a boundary. This is due to the mismatch of boundary conditions for the Navier-Stokes velocity field (the ``no-slip" or Dirichlet boundary condition), and that of a generic Euler velocity field (the ``no penetration" condition). In order to aptly characterize the inviscid limit (in suitably strong norms), in \cite{Prandtl} Prandtl proposed, in the precise setting of 2D, stationary flows considered here, the existence of a thin ``boundary layer", $(\bar{u}_p, \bar{v}_p)$, which transitions the Dirichlet boundary condition to an outer Euler flow.
The introduction of the Prandtl ansatz has had a monumental impact in physical and engineering applications, specifically in the 2D, steady setting, which is used to model flows over an airplane wing, design of golf balls, etc... (see \cite{Schlicting}, for instance). However, its mathematical validity has largely been in question since its inception. Validating the Prandtl ansatz is an issue of \textit{asymptotic stability of the profiles $(\bar{u}_p, \bar{v}_p)$}: $u^\ensuremath{\varepsilon} \rightarrow \bar{u}_p$ and $v^\ensuremath{\varepsilon} \rightarrow \bar{v}_p$ as the viscosity, $\ensuremath{\varepsilon}$, tends to zero (again, in an appropriate sense). Establishing this type of stability (or instability) has inspired several works, which we shall detail in Section \ref{existing}. The main purpose of this work is to provide an affirmation of Prandtl's ansatz, in the precise setting of his seminal 1904 work (2D, stationary flows), most notably \textit{globally} in the variable, $x$, (with asymptotics as $x \rightarrow \infty$) which plays the role of a ``time" variable in this setting.
The role that $x$ plays as a ``time" variable will be discussed from a mathematical standpoint in Section \ref{subsection:asy:x}. Physically, the importance of this ``time" variable dates back to Prandtl's original work, in which he says:
\begin{quote}
``The most important practical result of these investigations is that, in certain cases, the flow separates from the surface at a point [$x_\ast$] entirely determined by external conditions... As shown by closer consideration, the necessary condition for the separation of the flow is that there should be a pressure increase along the surface in the direction of the flow." (L. Prandtl, 1904, \cite{Prandtl})
\end{quote}
In this work, we provide the first rigorous confirmation, for the Navier-Stokes equations, that in the conjectured stable regime (in the absence of a pressure increase), the flow does not separate as $x \rightarrow \infty$. Rather, we prove that the flow relaxes back to the classical self-similar Blasius profiles, introduced by H. Blasius in \cite{Blasius}, which we also introduce and discuss in Section \ref{subsection:asy:x}.
\subsection{The Setting}
\hspace{5 mm} First, we shall introduce the particular setting of our work in more precise terms. We consider the Navier-Stokes (NS) equations posed on the domain $\mathcal{Q} := (0, \infty) \times (0, \infty)$:
\begin{align} \label{NS:1}
&u^\ensuremath{\varepsilon} u^\ensuremath{\varepsilon}_x + v^\ensuremath{\varepsilon} u^\ensuremath{\varepsilon}_Y + P^\ensuremath{\varepsilon}_x = \ensuremath{\varepsilon} \Delta u^\ensuremath{\varepsilon}, \\ \label{NS:2}
&u^\ensuremath{\varepsilon} v^\ensuremath{\varepsilon}_x + v^\ensuremath{\varepsilon} v^\ensuremath{\varepsilon}_Y + P^\ensuremath{\varepsilon}_Y = \ensuremath{\varepsilon} \Delta v^\ensuremath{\varepsilon}, \\ \label{NS:3}
&u^\ensuremath{\varepsilon}_x + v^\ensuremath{\varepsilon}_Y = 0,
\end{align}
We are taking the following boundary conditions in the vertical direction
\begin{align} \label{BC:vert:intro}
&[u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon}]|_{Y = 0} = [0,0], \qquad [u^\ensuremath{\varepsilon}(x,Y), v^\ensuremath{\varepsilon}(x,Y)] \xrightarrow{Y \rightarrow \infty} [u_E(x,\infty), v_E(x,\infty)].
\end{align}
which coincide with the classical no-slip boundary condition at $\{Y =0\}$ and the Euler matching condition as $Y \uparrow \infty$. We now fix the vector field
\begin{align}
[u_E, v_E] := [1, 0], \qquad P_E = 0
\end{align}
as a solution to the steady, Euler equations ($\ensuremath{\varepsilon} = 0$ in \eqref{NS:1} - \eqref{NS:3}), upon which the matching condition above reads $[u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon}] \xrightarrow{Y \rightarrow \infty} [1, 0]$.
Generically, there is a mismatch at $Y = 0$ between the boundary condition \eqref{BC:vert:intro} and that of an inviscid Eulerian fluid, which typically satisfies the no-penetration condition, $v^E|_{Y = 0} = 0$. Given this, it is not possible to demand convergence of the type $[u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon}] \rightarrow [1, 0]$ as $\ensuremath{\varepsilon} \rightarrow \infty$ in suitably strong norms, for instance in the $L^\infty$ sense. To rectify this mismatch, Ludwig Prandtl proposed in his seminal 1904 paper, \cite{Prandtl}, that one needs to \textit{modify} the limit of $[u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon}]$ by adding a corrector term to $[1, 0]$, which is effectively supported in a thin layer of size $\sqrt{\ensuremath{\varepsilon}}$ near $\{Y = 0\}$. Mathematically, this amounts to proposing an asymptotic expansion of the type
\begin{align} \label{ansatz:1:1}
u^\ensuremath{\varepsilon}(x, Y) = 1 + u^0_p(x, \frac{Y}{\sqrt{\ensuremath{\varepsilon}}}) + O(\sqrt{\ensuremath{\varepsilon}}) = \bar{u}^0_p(x, \frac{Y}{\sqrt{\ensuremath{\varepsilon}}}) + O(\sqrt{\ensuremath{\varepsilon}}),
\end{align}
where the rescaling $\frac{Y}{\sqrt{\ensuremath{\varepsilon}}}$ ensures that the corrector, $u^0_p$, is supported effectively in a strip of size $\sqrt{\ensuremath{\varepsilon}}$. The quantity $\bar{u}^0_p$ is classically known as the Prandtl boundary layer, whereas the $O(\sqrt{\ensuremath{\varepsilon}})$ term will be referred to in our paper as ``the remainder". Motivated by this ansatz, we introduce the Prandtl rescaling
\begin{align}
y := \frac{Y}{\sqrt{\ensuremath{\varepsilon}}}.
\end{align}
We now rescale the solutions via
\begin{align}
U^\ensuremath{\varepsilon}(x, y) := u^\ensuremath{\varepsilon}(x, Y), \qquad V^\ensuremath{\varepsilon}(x, y) := \frac{v^\ensuremath{\varepsilon}(x, Y)}{\sqrt{\ensuremath{\varepsilon}}}
\end{align}
which satisfy the following system
\begin{align} \label{eq:NS:1}
&U^\ensuremath{\varepsilon} U^\ensuremath{\varepsilon}_x + V^\ensuremath{\varepsilon} U^\ensuremath{\varepsilon}_y + P^\ensuremath{\varepsilon}_x = \Delta_\ensuremath{\varepsilon} U^\ensuremath{\varepsilon}, \\ \label{eq:NS:2}
&U^\ensuremath{\varepsilon} V^\ensuremath{\varepsilon}_x + V^\ensuremath{\varepsilon} V^\ensuremath{\varepsilon}_y + \frac{P^\ensuremath{\varepsilon}_y}{\ensuremath{\varepsilon}} = \Delta_\ensuremath{\varepsilon} V^\ensuremath{\varepsilon}, \\ \label{eq:NS:3}
&U^\ensuremath{\varepsilon}_x + V^\ensuremath{\varepsilon}_y = 0.
\end{align}
\ensuremath{\nonumber}oindent Above, we have denoted the scaled Laplacian operator, $\Delta_\ensuremath{\varepsilon} := \ensuremath{\partial}_y^2 + \ensuremath{\varepsilon} \ensuremath{\partial}_x^2$.
The effectiveness of the ansatz \eqref{ansatz:1:1}, and the crux of Prandtl's revolutionary idea, is that the leading order term $\bar{u}^0_p$ (and its divergence-free counterpart, $\bar{v}^0_p$) satisfy a much simpler equation than the full Navier-Stokes system, known as the Prandtl system,
\begin{align} \label{BL:0:intro:intro}
&\bar{u}^0_p \ensuremath{\partial}_x \bar{u}^0_p + \bar{v}^0_p \ensuremath{\partial}_y \bar{u}^0_p - \ensuremath{\partial}_y^{2} \bar{u}^0_p + P^{0}_{px} = 0, \qquad P^0_{py} = 0, \qquad \ensuremath{\partial}_x \bar{u}^0_p + \ensuremath{\partial}_y \bar{v}^0_p = 0,
\end{align}
which are supplemented with the boundary conditions
\begin{align} \label{BL:1:intro:intro}
&\bar{u}^0_p|_{x = 0} = \bar{U}^0_p(y), \qquad \bar{u}^0_p|_{y = 0} = 0, \qquad \bar{u}^0_p|_{y = \infty} = 1, \qquad \bar{v}^0_p = - \int_0^y \ensuremath{\partial}_x \bar{u}^0_p.
\end{align}
This system is simpler than \eqref{eq:NS:1} - \eqref{eq:NS:3} in several senses. First, due to the condition $P^0_{py} = 0$, we obtain that the pressure is constant in $y$ (and then in $x$ due to the Bernoulli's equation), and hence \eqref{BL:0:intro:intro} is really a scalar equation.
In addition to this, by temporarily omitting the transport term $\bar{v}^0_p \ensuremath{\partial}_y \bar{u}^0_p$, one can make the formal identification that $\bar{u}^0_p \ensuremath{\partial}_x \approx \ensuremath{\partial}_{yy}$, which indicates that \eqref{BL:1:intro:intro} is really a \textit{degenerate, parabolic} equation, which is in stark contrast to the elliptic system \eqref{eq:NS:1} - \eqref{eq:NS:3}. From this perspective, $x$ acts as a time-like variable, whereas $y$ acts as a space-like variable. We thus treat \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro} as one would a typical Cauchy problem. Indeed, one can ask questions of local (in $x$) wellposedness, global (in $x$) wellposedness, finite-$x$ singularity formation, decay and asymptotics, etc,... This perspective will be emphasized more in Section \ref{subsection:asy:x}.
\subsection{Asymptotics as $\ensuremath{\varepsilon} \rightarrow 0$ (Expansion \& Datum)} \label{section:viscosity}
We expand the rescaled solution as
\begin{align}
\begin{aligned} \label{exp:base}
&U^\ensuremath{\varepsilon} := 1 + u^0_p + \sum_{i = 1}^{N_{1}} \ensuremath{\varepsilon}^{\frac{i}{2}} (u^i_E + u^i_P) + \ensuremath{\varepsilon}^{\frac{N_2}{2}} u =: \bar{u} + \ensuremath{\varepsilon}^{\frac{N_2}{2}} u \\
&V^\ensuremath{\varepsilon} := v^0_p + v^1_E + \sum_{i = 1}^{N_{1}-1} \ensuremath{\varepsilon}^{\frac i 2} (v^i_P + v^{i+1}_E) + \ensuremath{\varepsilon}^{\frac{N_{1}}{2}} v^{N_{1}}_p + \ensuremath{\varepsilon}^{\frac{N_2}{2}} v =: \bar{v} + \ensuremath{\varepsilon}^{\frac{N_2}{2}} v, \\
&P^\ensuremath{\varepsilon} := \sum_{i = 0}^{N_1+1} \ensuremath{\varepsilon}^{\frac{i}{2}} P^i_p + \sum_{i = 1}^{N_{1}} \ensuremath{\varepsilon}^{\frac{i}{2}} P^i_E + \ensuremath{\varepsilon}^{\frac{N_2}{2}} P = \bar{P} + \ensuremath{\varepsilon}^{\frac{N_2}{2}}P.
\end{aligned}
\end{align}
\ensuremath{\nonumber}oindent Above,
\begin{align}
[u^0_E, v^0_E] := [1, 0], \qquad [u^i_p, v^i_p] = [u^i_p(x, y), v^i_p(x, y)], \qquad [u^i_E, v^i_E] = [u^i_E(x, Y), v^i_E(x, Y)],
\end{align}
and the expansion parameters $N_{1}, N_2$ will be specified in Theorem \ref{thm.main} for the sake of precision. We note that these are not optimal choices of these parameters, but we chose them large for simplicity. Certainly, it will be possible to bring these numbers significantly smaller.
First, we note that, given the expansion \eqref{exp:base}, we enforce the vertical boundary conditions for $i = 0,...N_1$, $j = 1, ... N_1$,
\begin{align}
&u^i_p(x, 0) = - u^i_E(x, 0), & &v^j_E(x, 0) = - v^{j-1}_p(x, 0), & &[u, v]|_{y = 0} = 0, \\
&v^i_p(x, \infty) = u^i_p(x, \infty) = 0, & & u^j_E(x, \infty) = v^j_E(x, \infty) = 0, & &[u, v]|_{y = \infty} = 0.
\end{align}
which ensure the no-slip boundary condition for $[U^\ensuremath{\varepsilon}, V^\ensuremath{\varepsilon}]$ at $y = 0, \infty$. As we do not include an Euler field to cancel out last $v^{N_1}_p$, we need to also enforce the condition $v^{N_1}_p|_{y = 0} = 0$, which is a particularity for the $N_1$'th boundary layer.
The side $\{x =0\}$ has a distinguished role in this setup as being where ``in-flow" datum is prescribed. This datum is prescribed ``at the level of the expansion", in the sense described below (see the discussion surrounding equations \eqref{datum:given}). Moreover, as $x \rightarrow \infty$, one expects the persistence of just one quantity from \eqref{exp:base}, which is the leading order boundary layer, $[\bar{u}^0_p, \bar{v}^0_p]$, and the remaining terms from \eqref{exp:base} are expected to decay in $x$. This decay will be established rigorously in our main result.
As is standard in this type of problem, the expansion \eqref{exp:base} is part of the ``prescribed data". Indeed, the point is that we assume that the Navier-Stokes velocity field $[U^\ensuremath{\varepsilon}, V^\ensuremath{\varepsilon}]$ attains the expansion \eqref{exp:base} initially at $\{x = 0\}$, and then aim to prove that this expansion propagates for $x > 0$. Given this, there are two categories of ``prescribed data" for the problem at hand:
\begin{itemize}
\item[(1)] the inviscid Euler profiles,
\item[(2)] initial datum at $\{x = 0\}$ (for relevant quantities from \eqref{exp:base}).
\end{itemize}
We shall now describe the datum that we take for our setting. First, we take the inviscid Euler profile to be
\begin{align} \label{choice:Euler}
[u^0_E, v^0_E, P^0_E] := [1, 0, 0]
\end{align}
as given. We have selected \eqref{choice:Euler} as the simplest shear flow with which to work. Our analysis can be extended with relatively small (but cumbersome) modifications to general Euler shear flows of the form
\begin{align}
[u^0_E, v^0_E, P^0_E] := [u^0_E(Y), 0, 0],
\end{align}
under mild assumptions on the shear profile $u^0_E(Y)$.
Apart from prescribing the outer Euler profile, we also get to pick ``Initial datum", that is at $\{x = 0\}$ of various terms in the expansion \eqref{exp:base}. Specifically, the prescribed initial datum comes in the form of the functions:
\begin{align} \label{datum:given}
u^i_P|_{x = 0} =: U^i_P(y), \qquad v^j_E|_{x = 0} =: V^j_E(Y),
\end{align}
for $i = 0,...,N_1$, and $j = 1,..,N_1$. Note that we do not get to prescribe, at $\{x = 0\}$, all of the terms appearing in \eqref{exp:base}. On the one hand, to construct $[u^i_p, v^i_p]$, we use that $u^i_p$ obeys a degenerate parabolic equation, with $x$ occupying the time-like variable and $y$ the space-like variable and that
$v^i_p$ can be recovered from $u^i_p$ via the divergence-free condition. Therefore, only $u^i_p|_{x = 0}$ is necessary to determine these quantities.
On the other hand, to construct the Euler profiles $[u^i_E, v^i_E]$ for $i = 1,..,N_1$, we use an elliptic problem for $v^i_E$ (in the special case of \eqref{choice:Euler}, it is in fact $\Delta v^i_E = 0$). As such, we prescribe the datum for $v^i_E$, as is displayed in \eqref{datum:given}, and then recover $u^i_E$ via the divergence-free condition. Therefore, only $v^i_E|_{x = 0}$ is necessary to determine these quantities.
We wish to emphasize that although \eqref{exp:base} is a classical ansatz (in fact, Prandtl's original proposal \cite{Prandtl} was in precisely this setting of 2D stationary flows over a plate), the rigorous justification of \eqref{exp:base} in the steady setting has only been obtained very recently, see \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN} for instance. Moreover, all of these works require crucially that $x << 1$ (the analog of ``short-time"), in order to extract a subtle and delicate stability mechanism as $\ensuremath{\varepsilon} \rightarrow 0$ of $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$. This stability mechanism was based, in \cite{GI1}, \cite{GI3}, \cite{GN}, on virial-type estimates for certain carefully selected weighted quantities \textit{that are compatible with the system aspect of the Navier-Stokes equations.} In the case of \cite{Varet-Maekawa}, it was based on a novel Rayleigh-Airy-Airy iteration, which also required $x << 1$. Prior to the present work, it is far from clear how to extract a stability mechanism (again, $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$ as $\ensuremath{\varepsilon} \rightarrow 0$) that applies for even $x \sim O(1)$, let alone as $x \rightarrow \infty$ with precise asymptotics.
An alternative point of view is on the Fourier side in the tangential variable $x$. The results \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN} extract a stability mechanism, $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$, which relies crucially on including only high frequencies in $x$ (due to the requirement of $x << 1$) and excluding even frequencies of order $1$. Prior to our work, it was not known whether these known stability mechanisms can work in the presence of frequencies of order $1$ ($x \sim 1$), let alone allowing for zero frequency issues ($x \rightarrow \infty$).
\subsection{Asymptotics as $x \rightarrow \infty$} \label{subsection:asy:x}
We will now discuss more precisely the role of the $x$-variable, specifically emphasizing the role that $x$ plays as a ``time-like" variable, controlling the ``evolution" of the fluid. The importance of studying the large $x$ behavior of both the Prandtl equations and the Navier-Stokes equations is not just mathematical (in analogy with proving global wellposedness/ decay versus finite-$x$ blowup), but is also of importance physically due to the possibility of boundary layer separation, which is a large $x$ phenomena (which was noted by Prandtl himself in his original 1904 paper).
We shall discuss first the large-$x$ asymptotics at the level of the Prandtl equations, \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro}, which govern $[\bar{u}^0_p, \bar{v}^0_p]$. It turns out that there are two large-$x$ regimes for $[\bar{u}^0_p, \bar{v}^0_p]$, depending on the sign of the Euler pressure gradient:
\begin{itemize}
\item[(1)] Favorable pressure gradient, $\ensuremath{\partial}_x P^E \le 0$: $[\bar{u}^0_p, \bar{v}^0_p]$ exists globally in $x$, and becomes asymptotically self-similar,
\item[(2)] Unfavorable pressure gradient, $\ensuremath{\partial}_x P^E > 0$, $[\bar{u}^0_p, \bar{v}^0_p]$ may form a finite-$x$ singularity, known as ``separation".
\end{itemize}
In this work, our choice of $[1, 0]$ for the outer Euler flow guarantees that we are in setting (1), that of a favorable pressure gradient.
This dichotomy above was introduced by Oleinik, \cite{Oleinik}, \cite{Oleinik1}, who established the first mathematically rigorous results on the Cauchy problem \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro}. Indeed, Oleinik established that solutions to \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro} are locally (in $x$) well-posed in both regimes (1) and (2), and globally well-posed in regime (1) (under suitable hypothesis on the datum, which we do not delve into at this stage).
Now, we investigate what it means for $[\bar{u}^0_p, \bar{v}^0_p]$ to become asymptotically self-similar. In order to describe this behavior more quantitatively, we need to introduce the Blasius solutions. Four years after Prandtl's seminal 1904 paper, H. Blasius introduced the (by now) famous ``Blasius boundary layer" in \cite{Blasius}, which takes the following form
\begin{align} \label{Blasius:1}
&[\bar{u}_\ast^{x_0}, \bar{v}_\ast^{x_0}] = [f'(z), \frac{1}{\sqrt{x + x_0}} (z f'(z) - f(z)) ], \\ \label{Blasius:2}
&z := \frac{y}{\sqrt{x + x_0}} \\ \label{Blasius:3}
&ff'' + f''' = 0, \qquad f'(0) = 0, \qquad f'(\infty) = 1, \qquad \frac{f(z)}{z} \xrightarrow{\eta \rightarrow \infty} 1,
\end{align}
where above, $f' = \ensuremath{\partial}_z f(z)$ and $x_0$ is a free parameter. Physically, $x_0$ has the meaning that at $x = - x_0$, the fluid interacts with the ``leading edge" of, say, a plate (hence the singularity at $x = - x_0$). Our analysis will treat any fixed $x_0 > 0$ (one can think, without loss of generality, that $x_0 = 1$). In fact, we will make the following notational convention which enables us to omit rewriting $x_0$ repeatedly:
\begin{align}
[\bar{u}_\ast, \bar{v}_\ast] := [\bar{u}_\ast^{1}, \bar{v}_\ast^{1}].
\end{align}
We emphasize that the choice of $1$ above could be replaced with any positive number, without loss of generality.
The Blasius solutions, $[\bar{u}_\ast^{x_0}, \bar{v}_\ast^{x_0}]$ are distinguished solutions to the Prandtl equations in several senses. First, physically, they have demonstrated remarkable agreement with experiment (see \cite{Schlicting} for instance). Mathematically, their importance is two-fold. First, they are self-similar, and second, they act as large-$x$ attractors for the Prandtl dynamic. Indeed, a classical result of Serrin, \cite{Serrin}, states:
\begin{align} \label{simple:Prandtl}
\lim_{x \rightarrow \infty} \| \bar{u}^0_p - \bar{u}_\ast \|_{L^\infty_y} \rightarrow 0
\end{align}
for a general class of solutions, $[\bar{u}^0_p, \bar{v}^0_p]$ of \eqref{BL:0:intro:intro}.
This was revisited by the first author in the work \cite{IyerBlasius}, who established a refined description of the above asymptotics, in the sense
\begin{align} \label{asy:blas:1}
\| \bar{u}^0_p - \bar{u}_\ast \|_{L^\infty_y} \lesssim o(1) \langle x \rangle^{- \frac 1 2 + \sigma_\ast}, \text{ for any } 0 < \sigma_\ast << 1,
\end{align}
which is the essentially optimal decay rate from the point of view of regarding $\bar{u}^0_p$ as a parabolic equation with one spatial dimension. The work of \cite{IyerBlasius} used energy methods and virial-type identities, whereas the work of \cite{Serrin} was based on maximum principle methods.
The case of (2) above (the setting of unfavorable pressure gradient) has been treated in the work of \cite{MD} as well as in the paper of \cite{Zhangsep} for the Prandtl equation with $\ensuremath{\partial}_x P^E > 0$ (which appears as a forcing term on the right-hand side of \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro} in their setting). These results establish the physically important phenomenon of separation, which occurs when $\ensuremath{\partial}_y \bar{u}^0_p(x, 0) = 0$ for some $x > 0$, even though the datum starts out with the monotonicity $\ensuremath{\partial}_y \bar{u}^0_p(0, 0) > 0$.
Our main theorem below establishes two pieces of asymptotic information as $x \rightarrow \infty$:
\begin{align} \label{main:estimate}
\| u^\ensuremath{\varepsilon} - \bar{u}^0_p \|_{L^\infty_y} \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- \frac 1 4 + \sigma_\ast}, \qquad \| \bar{u}^0_p - \bar{u}_\ast \|_{L^\infty_y} \le o(1) \langle x \rangle^{- \frac 1 4 + \sigma_\ast}.
\end{align}
This means that the full Navier-stokes velocity field undergoes this type of stabilization as $x \rightarrow \infty$. As a by-product, it recovers (using different techniques than \cite{Serrin} and \cite{IyerBlasius}) stability information of the form \eqref{asy:blas:1}. \textit{We emphasize that this is the first result which characterizes the asymptotic in $x$ behavior for the full Navier-Stokes velocity field.}
We would also like to emphasize that establishing a large $x$ stabilization and decay mechanism for the Navier-Stokes equations, uniformly in the viscosity, requires a completely new functional framework for the Navier-Stokes equations which is absent from the Prandtl setting. Seeing as the stability for $x << 1$ (``short-time") of the boundary layer was only recently established in \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, it is far from clear that stability mechanism as $\ensuremath{\varepsilon} \rightarrow 0$ (which from the various works \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN} is already quite involved and delicate for small $x << 1$) is at all consistent with the refined asymptotic in $x$ behavior predicted by \eqref{asy:blas:1}.
Therefore, we interpret our result, stated below in Theorem \ref{thm.main}, as a confluence of two stability mechanisms: stability as $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$, and the large $x$ stability observed in the substantially simpler Prandtl equations, \eqref{simple:Prandtl}. Prior to our work, it is far from clear how these two mechanisms interact (or if they even \textit{can} interact favorably), specifically because prior known results which have demonstrated stability $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$ require crucially that $x << 1$.
\begin{remark} One may notice when comparing the estimate \eqref{asy:blas:1} with the second estimate in \eqref{main:estimate} that the estimate from \eqref{main:estimate} is weaker by a decay factor of $\langle x \rangle^{- \frac 1 4}$. This is simply due to the fact that in the present paper, our aim is not to obtain the sharpest possible asymptotics of $\bar{u}^0_p \rightarrow \bar{u}_\ast$, but rather to close the first estimate from \eqref{main:estimate}. It is certainly possible to optimize our arguments for $\bar{u}^0_p$ to recover the sharper decay rate, but we have opted for simplicity in this matter, as the second estimate from \eqref{main:estimate} suffices to carry out our analysis of $u^\ensuremath{\varepsilon} \rightarrow \bar{u}^0_p$.
\end{remark}
\subsection{Main Theorem}
The main result of our work is the following.
\begin{theorem} \label{thm.main} Fix $N_{1} = 400$ and $N_2 = 200$ in \eqref{exp:base}. Fix the leading order Euler flow to be
\begin{align}
[u^0_E, v^0_E, P^0_E] := [1, 0, 0].
\end{align}
Assume the following pieces of initial data at $\{x = 0\}$ are given for $i = 0,...,N_{1}$, and $j = 1,...N_1$,
\begin{align} \label{datum:in}
u^i_p|_{x = 0} =: U^i_p(y), && v^j_E|_{x = 0} =: V_E^j(Y)
\end{align}
where we make the following assumptions on the initial datum \eqref{datum:in}:
\begin{itemize}
\item[(1)] For $i = 0$, the boundary layer datum $\bar{U}^0_p(y)$ is in a neighborhood of Blasius, defined in \eqref{Blasius:1}. More precisely, we will assume
\begin{align} \label{near:blasius}
\| (\bar{U}^0_p(y) - \bar{u}_\ast(0, y) ) \langle y \rangle^{m_0} \|_{C^{\ell_0}} \le \delta_\ast,
\end{align}
where $0 < \delta_\ast << 1$ is small relative to universal constants, where $m_0, \ell_0$, are large, explicitly computable numbers. Assume also the difference $\bar{U}^0_p(y) - \bar{u}_\ast(0, y)$ satisfies generic parabolic compatibility conditions at $y = 0$.
\item[(2)] For $i = 1,..,N_1$, the boundary layer datum, $U^i_p(\cdot)$ is sufficiently smooth and decays rapidly:
\begin{align} \label{hyp:1}
\| U^i_p \langle y \rangle^{m_i} \|_{C^{\ell_i}} \lesssim 1,
\end{align}
where $m_i, \ell_i$ are large, explicitly computable constants (for instance, we can take $m_0 = 10,000$, $\ell_0 = 10,000$ and $m_{i+1} = m_i - 5$, $\ell_{i + 1} = \ell_i - 5$), and satisfies generic parabolic compatibility conditions at $y = 0$.
\item[(3)] The Euler datum $V^i_E(Y)$ satisfies generic elliptic compatibility conditions.
\item[(4)] Assume Dirichlet datum for the remainders, that is
\begin{align} \label{Dirichlet}
[u, v]|_{x = 0} = [u, v]|_{x = \infty} = 0.
\end{align}
\end{itemize}
Then there exists an $\ensuremath{\varepsilon}_0 << 1$ small relative to universal constants such that for every $0 < \ensuremath{\varepsilon} \le \ensuremath{\varepsilon}_0$, there exists a unique solution $(u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon})$ to system \eqref{NS:1} - \eqref{NS:3}, which satisfies the expansion \eqref{exp:base} in the quadrant, $\mathcal{Q}$. Each of the intermediate quantities in the expansion \eqref{exp:base} satisfies the following estimates for $i = 1,...,N_1$
\begin{align}
&\| \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j u^i_p \langle z \rangle^M \|_{L^\infty_y} \le C_{M, k, j} \langle x \rangle^{- \frac 1 4 - k - \frac j 2 + \sigma_{\ast}} && \| v^i_p \|_{L^\infty_y} \le C_{M, k, j} \langle x \rangle^{- \frac 3 4 - k - \frac j 2 + \sigma_\ast} \\
&\| \ensuremath{\partial}_x^k \ensuremath{\partial}_Y^j u^i_E \|_{L^\infty_y} \le C_{k,j} \langle x \rangle^{- \frac 1 2 - k - j} && \| v^i_E \|_{L^\infty_y} \le C_{k,j} \langle x \rangle^{- \frac 1 2 - k - j},
\end{align}
where $\sigma_\ast := \frac{1}{10,000}$. Finally, the remainder $(u, v)$ exists globally in the quadrant, $\mathcal{Q}$, and satisfies the following estimates
\begin{align}
\| u, v \|_{\mathcal{X}} \lesssim 1,
\end{align}
where the space $\mathcal{X}$ will be defined precisely in \eqref{X:norm}.
\end{theorem}
As an immediate corollary, we obtain the following asymptotics, which are valid uniformly for $\ensuremath{\varepsilon} \le \ensuremath{\varepsilon}_0$ and for all $x > 0$.
\begin{corollary} The solution $(u^\ensuremath{\varepsilon}, v^\ensuremath{\varepsilon})$ to \eqref{NS:1} - \eqref{NS:3} satisfies the following asymptotics
\begin{align}
\| u^\ensuremath{\varepsilon} - (1 + u^0_p) \|_{L^\infty_y} \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- \frac 1 4 + \sigma_\ast} \qquad \| v^\ensuremath{\varepsilon} - \sqrt{\ensuremath{\varepsilon}} (v^0_p + v^1_E) \|_{L^\infty_y} \lesssim \ensuremath{\varepsilon} \langle x \rangle^{- \frac 1 2},
\end{align}
\end{corollary}
\begin{remark}[Generalizations] There are several ways in which our result, as stated in Theorem \ref{thm.main}, can easily be generalized, but we have foregone this generality for the sake of clarity and concreteness. We list these here.
\begin{itemize}
\item[(1)] As stated, the leading order boundary layer, $\bar{u}^0_p$ will be near the self-similar Blasius profile. This is ensured by the assumption \eqref{near:blasius} (and proven rigorously in our construction). In reality, our proof uses only mild properties of the Blasius profile, and we can therefore generalize the type of boundary layers we consider. The most general class of boundary layers that we can treat would be those (globally defined) $\bar{u}^0_p$ that can be expressed as $\bar{u}^0_p = \tilde{u}^0_p + \hat{u}^0_{p}$, where $\tilde{u}_{pyy} \le 0$, $\tilde{u}^0_{py} \ge 0$, $\tilde{u}^0_p$ satisfies estimates \eqref{water:1} - \eqref{v:blasius:2}, and $\hat{u}^0_p$ satisfies estimates \eqref{water:65}.
\item[(2)] We have taken, again to make computations simpler, the leading order Euler vector field $[u_E, v_E] = [1, 0]$. It would not require new ideas to generalize our work to general shear flows $[u_E, v_E] = [b(Y), 0]$, where $b(Y)$ satisfies mild hypotheses (for instance, $b \in C^\infty, \frac 1 2 \le b \le \frac 3 2$, $\ensuremath{\partial}_Y^k b(Y)$ decays rapidly as $Y \rightarrow \infty$ for $k \ge 1$), though it would make the expressions more complicated. The case of Euler flows that are not shear flows, however, poses more challenges and would require some new ideas.
\item[(3)] The datum assumed at $\{x = 0\}$ for the remainders, \eqref{Dirichlet}, can easily be generalized to any smooth vector-field, $[u, v]|_{x = 0} = [b_1(y), \sqrt{\ensuremath{\varepsilon}} b_2(y)]$, where $b_1, b_2$ are smooth, rapidly decaying functions.
\end{itemize}
\end{remark}
It is convenient to think of Theorem \ref{thm.main} in two parts: the first is a result on the \textit{construction of the approximate solution}, $[\bar{u}, \bar{v}]$ from \eqref{exp:base}, given all of the necessary initial/ boundary datum described in Theorem \ref{thm.main}. The second is a result on \textit{asymptotic stability} of $[\bar{u}, \bar{v}]$, which amounts to controlling the differences from \eqref{exp:base}, $u := \ensuremath{\varepsilon}^{- \frac{N_2}{2}} (U^\ensuremath{\varepsilon} - \bar{u}), v := \ensuremath{\varepsilon}^{- \frac{N_2}{2}} (V^\ensuremath{\varepsilon} - \bar{v})$. We state these two steps as two distinct theorems, which, when combined, yield the result of Theorem \ref{thm.main}.
\begin{theorem}[Construction of Approximate Solution] \label{thm:approx} Assume the boundary and initial conditions are prescribed as specified in Theorem \ref{thm.main}. Define $\sigma_\ast = \frac{1}{10,000}$. Then for $i = 1,...,N_1$, for $M \le m_i$ and $2k+j \le \ell_i$, the quantities $[u^i_p, v^i_p]$ and $[u^i_E, v^i_E]$ exist globally, $x > 0$, and the following estimates are valid
\begin{align} \label{blas:conv:1}
&\| \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j ( \bar{u}^0_p - \bar{u}_\ast) \langle z \rangle^M \|_{L^\infty_y} \lesssim \delta_\ast \langle x \rangle^{- \frac 1 4 - k - \frac j 2 + \sigma_\ast}, \\ \label{blas:conv:2}
&\| \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j ( \bar{v}^0_p - \bar{v}_\ast) \langle z \rangle^M \|_{L^\infty_y} \lesssim \delta_\ast \langle x \rangle^{- \frac 3 4 - k - \frac j 2 + \sigma_\ast} \\ \label{water:65}
&\| \langle z \rangle^M \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j u_p^{i} \|_{L^\infty_y} \lesssim \langle x \rangle^{- \frac 1 4 - k - \frac j 2 + \sigma_\ast} \\ \label{water:54}
&\| \langle z \rangle^M \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j v_p^{i} \|_{L^\infty_y} \lesssim \langle x \rangle^{- \frac 3 4 - k - \frac j 2 + \sigma_\ast}, \\ \label{water:78}
&\|(Y \ensuremath{\partial}_Y)^l \ensuremath{\partial}_x^k \ensuremath{\partial}_Y^j v^{i}_E \|_{L^\infty_Y} \lesssim \langle x \rangle^{- \frac 1 2 - k - j}, \\ \label{water:88}
&\| (Y \ensuremath{\partial}_Y)^l \ensuremath{\partial}_x^k \ensuremath{\partial}_Y^j u^{i}_E \|_{L^\infty_Y} \lesssim \langle x \rangle^{- \frac 1 2 - k - j}.
\end{align}
Moreover, the more precise estimates stated in Assumptions \eqref{assume:1} -- \eqref{assume:3} are valid. Finally, define the contributed forcing by:
\begin{align}
\begin{aligned} \label{forcing:remainder}
F_R := &(U^\ensuremath{\varepsilon} U^\ensuremath{\varepsilon}_x + V^\ensuremath{\varepsilon} U^\ensuremath{\varepsilon}_y + P^\ensuremath{\varepsilon}_x - \Delta_\ensuremath{\varepsilon} U^\ensuremath{\varepsilon} ) - (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y + \bar{P}_x - \Delta_\ensuremath{\varepsilon} \bar{u}) \\
G_R := &(U^\ensuremath{\varepsilon} V^\ensuremath{\varepsilon}_x + V^\ensuremath{\varepsilon} V^\ensuremath{\varepsilon}_y + \frac{P^\ensuremath{\varepsilon}_y}{\ensuremath{\varepsilon}} - \Delta_\ensuremath{\varepsilon} V^\ensuremath{\varepsilon} ) - (\bar{u} \bar{v}_x + \bar{v} \bar{v}_y + \frac{\bar{P}_y}{\ensuremath{\varepsilon}} - \Delta_\ensuremath{\varepsilon} \bar{v}).
\end{aligned}
\end{align}
Then the following estimates hold on the contributed forcing:
\begin{align} \label{est:forcings:part1}
\| \ensuremath{\partial}_x^j \ensuremath{\partial}_y^k F_R \langle x \rangle^{\frac{11}{20}+ j + \frac k 2} \| + \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^j \ensuremath{\partial}_y^k G_R \langle x \rangle^{\frac{11}{20}+ j + \frac k 2} \| \le \ensuremath{\varepsilon}^{5}.
\end{align}
\end{theorem}
\begin{theorem}[Stability of Approximate Solution]\label{thm:2} Assume the boundary and initial conditions are prescribed as specified in Theorem \ref{thm.main}. There exists a unique, global solution, $[u, v]$, to the problem \eqref{vel:form} - \eqref{vel:eqn:2}, where the modified unknowns $(U, V)$ defined through \eqref{vm:1} satisfy the estimate
\begin{align}
\| U, V \|_{\mathcal{X}} \lesssim \ensuremath{\varepsilon}^{5},
\end{align}
where the $\mathcal{X}$ norm is defined precisely in \eqref{X:norm}.
\end{theorem}
Of these two steps, by far the most challenging is the second step, the stability analysis of Theorem \ref{thm:2}. \textit{This paper is devoted exclusively to this stability analysis,} whereas the first step, construction of approximate solutions, is obtained in a companion paper, \cite{IM21}. As a result, for the remainder of this paper, we will take Theorem \ref{thm:approx} as given and use it as a ``black-box". This serves to more effectively highlight the specific new techniques we develop for this stability result.
\subsection{Existing Literature} \label{existing}
The boundary layer theory originated with Prandtl's seminal 1904 paper, \cite{Prandtl}. First, we would like to emphasize that this paper presented the boundary layer theory in precisely the present setting: for 2D, steady flows over a plate (at $Y = 0$). In addition, Prandtl's original paper discussed the physical importance of understanding \eqref{exp:base} for large $x$, due to the possibility of boundary layer separation.
We will distinguish between two types of questions that are motivated by the ansatz, \eqref{ansatz:1:1}. First, there are questions regarding the description of the leading order boundary layer, $[\bar{u}^0_p, \bar{v}^0_p]$, and second, there are questions regarding the study of the $O(\sqrt{\ensuremath{\varepsilon}})$ remainder, which, equivalently, amounts to questions regarding the validity of the asymptotic expansion \eqref{ansatz:1:1}.
A large part of the results surrounding the system \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro} were already discussed in Section \ref{subsection:asy:x}, although the results discussed there were more concerned with the large $x$ asymptotic behavior. We point the reader towards \cite{MD} for a study of separation in the steady setting, using modulation and blowup techniques. For local-in-$x$ behavior, let us supplement the references from Section \ref{subsection:asy:x} with the results of \cite{GI2}, which established higher regularity for \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro} through energy methods, and the recent work of \cite{Zhifei:smooth} which obtains global $C^\infty$ regularity using maximum principle methods.
We now discuss the validity of the ansatz (\ref{ansatz:1:1}). While rigorous results on the Prandtl equation itself date all the way back to Oleinik, proving \textit{stability} of the boundary layer in the inviscid limit has proven to be substantially more difficult and has only recently been achieved in the steady setting. Moreover, all of the known stability results have been for $x << 1$ (the analog of ``short time"). The classical setting we consider here, with the no-slip condition, was first treated, locally in the $x$ variable by the works \cite{GI1} - \cite{GI2}, \cite{Varet-Maekawa}, and the related work of \cite{GI3}. These works of \cite{GI1} - \cite{GI2} are distinct from that of \cite{Varet-Maekawa} in the sense that the main concern of \cite{GI1} - \cite{GI2} are $x$-dependent boundary layer profiles, and in particular addresses the classical Blasius solution. On the other hand, the work of \cite{Varet-Maekawa} is mainly concerned with shear solutions $(U(y), 0)$ to the forced Prandtl equations (shears flows are not solutions to the homogeneous Prandtl equations), which allows for Fourier analysis in the $x$ variable. Both of these works are \textit{local-in-$x$} results, which can demonstrate the validity of expansion \eqref{exp:base} for $0 \le x \le L$, where $L << 1$ is small (but of course, fixed relative to the viscosity $\ensuremath{\varepsilon}$). We also mention the recent work of \cite{Gao-Zhang}, which generalized the work of \cite{GI1} - \cite{GI2} to the case of Euler flows which are not shear for $0 < x < L << 1$.
We also point the reader towards the works \cite{GN}, \cite{Iyer}, \cite{Iyer2a} - \cite{Iyer2c}, and \cite{Iyer3}. All of these works are under the assumption of a moving boundary at $\{Y = 0\}$, which while they face the difficulty of having a transition from $Y = 0$ to $Y = \infty$, crucially eliminate the degeneracy of $\bar{u}^0_p$ at $\{Y =0\}$, which is a major difficulty posed by the boundary layer theory. The work of \cite{Iyer2a} - \cite{Iyer2c} is of relevance to this paper, as the question of global in $x$ stability was considered (again, under the assumption of a moving $\{Y = 0\}$ boundary, which significantly simplifies matters).
For unsteady flows, there is also a large literature studying expansions of the type \eqref{exp:base}. We refrain from discussing this at too much length because the unsteady setting is quite different from the steady setting. Rather, we point the reader to (an incomplete) list of references. First, in the analyticity setting, for small time, the seminal works of \cite{Caflisch1}, \cite{Caflisch2} establish the stability of expansions \eqref{exp:base}. This was extended to the Gevrey setting in \cite{DMM}, \cite{DMM20}. The work of \cite{Mae} establishes stability under the assumption of the initial vorticity being supported away from the boundary. The reader should also see the related works \cite{LXY}, \cite{Taylor}, \cite{TWang}, \cite{Wang}.
When the regularity setting goes from analytic/ Gevrey to Sobolev, there have also been several works in the opposite direction, which demonstrate, again in the unsteady setting, that expansion of the type \eqref{exp:base} should not be expected. A few works in this direction are \cite{Grenier}, \cite{GGN1}, \cite{GGN2}, \cite{GGN3}, \cite{GN2}, as well as the remarkable series of recent works of \cite{GrNg1}, \cite{GrNg2}, \cite{GrNg3} which settle the question and establish invalidity in Sobolev spaces of expansions of the type (\ref{exp:base}). The related question of $L^2$ (in space) convergence of Navier-Stokes flows to Euler has been investigated by many authors, for instance in \cite{CEIV}, \cite{CKV}, \cite{CV}, \cite{Kato}, \cite{Masmoudi98}, and \cite{Sueur}.
There is again the related matter of wellposedness of the unsteady Prandtl equation. This investigation was initiated by \cite{Oleinik}, who obtained global in time solutions on $[0, L] \times \mathbb{R}_+$ for $L << 1$ and local in time solutions for any $L < \infty$, under the crucial monotonicity assumption $\ensuremath{\partial}_y u|_{t = 0} > 0$. The $L << 1$ was removed by \cite{Xin} who obtained global in time weak solutions for any $L < \infty$. These works relied upon the Crocco transform, which is only available under the monotonicity condition. Also under the monotonicity condition, but without using the Crocco transform, \cite{AL} and \cite{MW} obtained local in time existence. \cite{AL} introduced a Nash-Moser type iterative scheme, whereas \cite{MW} introduced a good unknown which enjoys an extra cancellation and obeys good energy estimates. The related work of \cite{KMVW} removes monotonicity and replaces it with multiple monotonicity regions.
Without monotonicity of the datum, the wellposedness results are largely in the analytic or Gevrey setting. Indeed, \cite{GVDie}, \cite{GVM}, \cite{Vicol}, \cite{Kuka}, \cite{Lom}, \cite{LMY}, \cite{Caflisch1} - \cite{Caflisch2}, \cite{IyerVicol} are some results in this direction. Without assuming monotonicity, in Sobolev spaces, the unsteady Prandtl equations are, in general, illposed: \cite{GVD}, \cite{GVN}. Finite time blowup results have also been obtained in \cite{EE}, \cite{KVW}, \cite{Hunter}. Moreover, the issue of boundary layer separation in the unsteady setting has been tackled by the series of works \cite{Collot1}, \cite{Collot2}, \cite{Collot3} using modulation and blowup techniques.
The above discussion is not comprehensive, and we have elected to provide a more in-depth of the steady theory due to its relevance to the present paper. We refer to the review articles, \cite{E}, \cite{Temam} and references therein for a more complete review of other aspects of the boundary layer theory.
We would, however, like to point out that going from results on Prandtl (a \textit{significantly simplified model equation}) to a corresponding stability theorem about Navier-Stokes has proven to be highly nontrivial (and in fact, generically false in the unsteady setting, \cite{GrNg1} -- \cite{GrNg3}). In the time-dependent case, the well-known Tollmien-Schlichting instability (see for instance \cite{GGN2}) shows that there is a \textit{destabilizing effect of the viscosity}, which creates growing modes at the Navier-Stokes level (thus requiring Gevrey/ analytic spaces to prove Navier-Stokes to Prandtl type stability results). Part of the novelty of our work is to provide a framework which is robust enough to simultaneously (1) prove the stability of the boundary layer in Sobolev spaces as $\ensuremath{\varepsilon} \rightarrow 0$ which, even for $x << 1$ in the steady setting, has been done in very few cases (\cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN}) and (2) capture the detailed asymptotic in $x$ (analogous in this setting to ``large time") behavior that is known at the level of the Prandtl system.
It is also striking to compare our result to what is expected in the unsteady setting. In the unsteady setting, certain well-known ``geometric" criteria exist, for instance monotonicity, to ensure (local in time) wellposedness in Sobolev spaces for the Prandtl equations. However, these criteria (even one adds concavity) are not enough to establish a stability result of the type $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$ due to the destabilizations due to the viscosity. For this reason, the sharpest known stability results are in Gevrey spaces (see for instance \cite{DMM}, \cite{DMM20}). Remarkably, in the steady setting, we see that the \textit{same} geometric criteria which guarantee regularity of Prandtl equation (monotonicity and concavity) suffice to prove global stability \textit{in Sobolev spaces} of $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$.
As we will explain below, we will be seeing a ``destabilizing effect" of the Navier-Stokes equations, although it is distinct from the instabilities of the unsteady setting. In our setting, the Navier-Stokes system requires the appearance of certain singular quantities at $\{y = 0\}$ that do not appear for any analysis of the Prandtl equations. In turn, these quantities correspond to a loss of derivative (specifically of the derivative $x \ensuremath{\partial}_x$) in our energy scheme, for instance seen through \eqref{turn:2} and \eqref{nonlinear:est:intro}. This destabilizing effect is due to the pressure in the Navier-Stokes system, and therefore would clearly not have been seen at the Prandtl level (for instance in \cite{IyerBlasius}). Since the apparent manifestation of this is a loss of $x \ensuremath{\partial}_x$ (in some sense the natural scaling field in the tangential direction), this effect will also not have been observed in any of the $x << 1$ stability results (\cite{GI1}, \cite{Varet-Maekawa}, \cite{GN}). Given the (in a sense, \textit{expected}) destabilizing effect of viscosity, a striking aspect of our work is to, despite this, find a framework that establishes stability both as $\ensuremath{\varepsilon} \rightarrow 0$ and as $x \rightarrow 0$ in Sobolev spaces.
\subsection{Main Ingredients} \label{section:ideas}
We will now describe the main ideas that enter our analysis. In order to do so, we need to describe the equation that is satisfied by the remainders, $(u, v)$, in the expansion \eqref{exp:base}, which is the following,
\begin{align} \label{vel:eqn:1:intro}
&\mathcal{L}[u, v] + \begin{pmatrix} \ensuremath{\partial}_x \\ \frac{\ensuremath{\partial}_y}{\ensuremath{\varepsilon}} \end{pmatrix} P = \mathcal{N}(u,v) + \text{Forcing }, \qquad u_x + v_y = 0, \text{ on } \mathcal{Q},
\end{align}
where $F$ is a forcing term that exists due to the fact that $(\bar{u}, \bar{v})$ is not an exact solution to the Navier-Stokes equations, (we leave it undefined now, as it does not play a central role in the present discussion), and $\mathcal{N}(u, v)$ contain quadratic terms. The operator $\mathcal{L}[u, v]$ is a vector-valued linearized operator around $[\bar{u}, \bar{v}]$, and is defined precisely via
\begin{align}
\begin{aligned} \label{vel:form:intro}
\mathcal{L}[u, v] := \begin{cases} \mathcal{L}_1 := \bar{u} u_x + \bar{u}_{y} v + \bar{u}_{x} u + \bar{v} u_y - \Delta_\ensuremath{\varepsilon} u \\ \mathcal{L}_2 := \bar{u} v_x + u \bar{v}_{x} + \bar{v} v_y + \bar{v}_{y} v - \Delta_\ensuremath{\varepsilon} v.\end{cases}
\end{aligned}
\end{align}
The main goal in the study of \eqref{vel:eqn:1:intro} is to obtain an estimate of the form $\| u, v \|_{\mathcal{X}} \lesssim 1$, for an appropriately defined space $\mathcal{X}$ (which importantly needs to control the $L^\infty$ norm). We note that this is a variable-coefficients operator, for which Fourier analysis is not conducive for obtaining estimates.
\subsubsection{The New Point of View}
We will discuss our point of view as compared to prior works on the Navier-Stokes to Prandtl stability (\cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN}). These prior works all in the small $x$ regime (equivalently, the high-frequencies in $x$ regime). In some sense, these papers all introduce a transform or change of unknown which factors the vectorial Rayleigh operator, $\begin{pmatrix} \bar{u} \ensuremath{\partial}_x u + \bar{u}_y v \\ \bar{u} \ensuremath{\partial}_x v + \bar{v}_y v \end{pmatrix} = \begin{pmatrix} - \bar{u}^2 \ensuremath{\partial}_y \{ \frac{v}{\bar{u}} \} \\ \bar{u}^2 \ensuremath{\partial}_x \{ \frac{v}{\bar{u}} \} \end{pmatrix}$ as a (almost) divergence-form operator on a modified unknown, $\frac{v}{\bar{u}}$. The primary purpose of the change of unknown is to avoid losing $x$ derivatives, (since high frequencies in $x$ were being considered). This gives one starting point to study the vector valued operator \eqref{vel:form:intro} (for instance, to design virial type multipliers), though this point of view breaks down once $x \sim 1$ (or equivalently, when $O(1)$ frequencies in $x$ are introduced). The breakdown occurs due to low-frequency commutators which arise in this process from the diffusive terms.
The new point of view we introduce in this work is the following: there is a mechanism by which the linearized transport operator (Rayleigh) and the diffusion actually ``talk to each-other" in \eqref{vel:form:intro} and, in fact, produce a damping effect as $x \rightarrow \infty$. This mechanism is known in the significantly simpler setting of the Prandtl equations through a change of variables, ``the von-Mise transform", and will be discussed in Point (1) below. Most importantly, we develop a point of view on the Navier-Stokes equations which shows that this mechanism is consistent and cooperates with the mechanism of \textit{stability in $\ensuremath{\varepsilon} \rightarrow 0$} of the boundary layer, which itself, even for $x << 1$, has only recently been understood (see \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN}) through a variety of techniques.
Prior works on the Navier-Stokes to Prandtl stability, \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN} do not capitalize on this feature because the von-Mise mechanism appears much too delicate to interact favorably with the pressure of Navier-Stokes (in fact, this difference between Prandtl and Navier-Stokes is a central simplification of the boundary layer theory)! Indeed, changing coordinates in the Navier-Stokes equations and trying to emulate a von-Mise type transform seems completely fruitless, as the new coordinate system interacts particularly poorly with the pressure terms.
We establish in this work that indeed, the way these two operators ``talk to eachother" is robust enough to carry through to the Navier-Stokes setting, which distinguishes our work from the prior results on the stability of boundary layers. However, to carry through this mechanism to the Navier-Stokes setting, we need to take significant steps which are not present in (1) any prior analysis on boundary layer stability (for instance, \cite{GI1}, \cite{GI3}, \cite{Varet-Maekawa}, \cite{GN}) and (2) any Prandtl analysis (in particular, from \cite{IyerBlasius}).
We first list these new steps, and then itemize them for a more thorough discussion. Our first task is to extract, in $(x, y)$ coordinate system, the main mechanism behind the Transport-Diffusion interaction of the von-Mise transform. This is discussed in point (1) below. We identify that the transform \textit{does not work} well with the pressure terms of Navier-Stokes due to a mismatch in the homogeneity of $\bar{u}$, which we correct in our ``renormalized unknowns" \eqref{good:variables:2}, which contain a renormalized version of the von-Mise velocity unknowns, \eqref{real:phi}. Even at this first step, we need to deviate from what is done at the Prandtl level, for instance in \cite{IyerBlasius}. We establish first that these renormalized velocities \textit{still keep enough of the cancellation enjoyed by the traditional von-Mise velocity, \eqref{real:phi}.}
This change in homogeneity, which is particular to the Navier-Stokes setting, subsequently requires the development of a completely new functional framework to close our analysis that has been absent from all prior works. The central details of this framework are discussed in points (4), (5), (6) below. At a very high-level, one can see that certain necessary estimates from \eqref{turn:1} -- \eqref{turn:3} lose derivatives in $x \ensuremath{\partial}_x$, which is, again, absent from any prior Prandtl-type analyses and any local-in-$x$ Navier-Stokes analyses. The loss of derivative is created by excess powers of $\frac{1}{\bar{u}}$ in certain crucial quantities, which again, is due specifically to the Navier-Stokes setting, and can be thought of as an unavoidable ``destabilizing effect" of the viscosity (which is well-known in the unsteady setting and actually precludes a result of the type we are proving from holding in the time-dependent setting). Given this well-known \textit{destabilizing effect of viscosity} from other settings, it is completely non-obvious that this type of loss can be recovered in any framework consistent with the Navier-Stokes equations.
\subsubsection{Specific Discussion of New Ideas}
\begin{itemize}
\item[(1)] \underline{Damping Mechanism of ``Twisted Differences"}: We will extract the ``main part" of $\mathcal{L}_1$, \eqref{vel:form:intro}, as the operator
\begin{align} \label{Lmain:def}
\mathcal{L}_{main} := \bar{u} u_x + \bar{u}_y v - u_{yy}.
\end{align}
One can think of the (scalar-valued) $\mathcal{L}_{main}$ as the ``Prandtl" component of \eqref{vel:form:intro}, though it is simpler than \eqref{vel:form:intro} for a variety of reasons (for instance, it is scalar-valued). To ease the discussion, we also make the assumption that $\bar{u}, \bar{v}$ solve the Prandtl equation, even though in reality this is only true to leading order in $\sqrt{\ensuremath{\varepsilon}}$. That this is a central object in the study of $\mathcal{L}$ itself is well known, and has been discussed extensively in the works of the past several years, such as \cite{GI1}, \cite{Varet-Maekawa}, \cite{IyerBlasius}.
The perspective taken in \cite{GI1} is to view the operator $\mathcal{L}_{main}$ as being comprised of two separate operators, the Rayleigh piece, $\bar{u} u_x + \bar{u}_y v$, and the diffusion, $- u_{yy}$. Crucially, \cite{GI1} was able to establish that one could obtain coercivity of $\mathcal{L}_{main}$ for $0 \le x \le L$ for $L$ small, using a new ``quotient estimate" by applying the multiplier $\ensuremath{\partial}_x \frac{v}{\bar{u}}$ to the $x$-differentiated version of $\mathcal{L}_{main}$. We also draw a parallel to the approach of \cite{Varet-Maekawa} where their ``Rayleigh-Airy iteration" is comprised of viewing the Rayleigh piece of $\mathcal{L}_{main}$, and the Airy (or diffusive) piece as two separate operators.
Using this perspective, these prior results are able to generate inequalities of the form $\|U, V \|_{X_0} \lesssim L \|U, V \|_{X_{\frac 1 2}}$ and $\|U, V \|_{X_{\frac 1 2}} \lesssim \|U, V \|_{X_0}$ (for appropriately defined spaces $X_0, X_{\frac 1 2}$, not necessarily the ones we have selected here). For this reason, the work \cite{GI1} requires $0 < L << 1$ to close their scheme.
There is, in fact, a mechanism by which both components of $\mathcal{L}_{main}$ actually ``talk to each other" (and, in fact, this is a damping mechanism as $x \rightarrow \infty$). This link between the two components of $\mathcal{L}_{main}$ is provided by way of a \textit{change of unknown} at the velocity level.
\hspace{3 mm} We may re-interpret $\mathcal{L}_{main}$ as the same operator which controls the decay displayed in the Prandtl system, namely \eqref{asy:blas:1}. That is, we temporarily regard the $\bar{u}$ as a background Prandtl profile, say Blasius, and $u$ is to represent the difference between Blasius and some other Prandtl profile, $\bar{u}^0_p$. Schematically, replace
\begin{align}
&\bar{u} \text{ in } \eqref{Lmain:def} \rightarrow \bar{u}_\ast, \qquad u \text{ in } \eqref{Lmain:def} \rightarrow \bar{u}^0_p - \bar{u}_\ast.
\end{align}
To obtain \eqref{asy:blas:1}, one introduces the change of variables and change of unknowns:
\begin{align} \label{def:phi:1:1}
\ensuremath{\partial}hi(x, \ensuremath{\partial}si) := |\bar{u}^0_p(x, \ensuremath{\partial}si)|^2 - |\bar{u}_\ast(x, \ensuremath{\partial}si)|^2,
\end{align}
where $\ensuremath{\partial}si$ is the associated stream function. First, we will clarify the above abuse of notation. Indeed, to understand \eqref{def:phi:1:1}, we need to define the inverse map via the relation
\begin{align}
(\ensuremath{\partial}si, \bar{u}^0_p) \mapsto y = y(\ensuremath{\partial}si; \bar{u}^0_p) \iff \ensuremath{\partial}si = \int_0^{y(\ensuremath{\partial}si; \bar{u}^0_p)} \bar{u}^0_p.
\end{align}
Then the abuse of notation from \eqref{def:phi:1:1} really means the following
\begin{align} \label{compare:y}
\ensuremath{\partial}hi(x, \ensuremath{\partial}si) := |\bar{u}^0_p(x, y(\ensuremath{\partial}si; \bar{u}^0_p))|^2 - |\bar{u}_\ast(x, y(\ensuremath{\partial}si; \bar{u}_\ast))|^2,
\end{align}
which we interpret as a \textit{``twisted subtraction"} because we wish to compare $\bar{u}^0_p$ and $\bar{u}_\ast$ at two different $y$ values, depending on the solutions themselves.
In the new, nonlinear, coordinate system one obtains the equation,
\begin{align} \label{coordinates:NL}
\ensuremath{\partial}_x \ensuremath{\partial}hi - \bar{u}^0_p \ensuremath{\partial}_{\ensuremath{\partial}si \ensuremath{\partial}si} \ensuremath{\partial}hi + A \ensuremath{\partial}hi = 0, \qquad A := - 2 \frac{\ensuremath{\partial}_{yy} \bar{u}_\ast}{\bar{u}_\ast \bar{u}^0_p},
\end{align}
whenever $\bar{u}^0_p$ and $\bar{u}_\ast$ satisfy the Prandtl equation. Assuming now $\ensuremath{\partial}_{yy} \bar{u}_\ast \le 0$, which is true for the Blasius solution $\bar{u}_\ast$ and that $\ensuremath{\partial}_{yy} \bar{u}^0_p \le 0$ (which \textit{a-posteiori} becomes true, up to harmless remainders, upon controlling $\ensuremath{\partial}hi$ in sufficiently strong norms), the above equation admits a good energy estimate:
\begin{align} \label{energy:damping:1}
\frac{\ensuremath{\partial}_x}{2} \int \ensuremath{\partial}hi^2 \,\mathrm{d} \ensuremath{\partial}si + \int \bar{u}^0_p |\ensuremath{\partial}_\ensuremath{\partial}si \ensuremath{\partial}hi|^2 \,\mathrm{d} \ensuremath{\partial}si - \frac{1}{2} \int \ensuremath{\partial}_{\ensuremath{\partial}si \ensuremath{\partial}si} \bar{u}^0_p |\ensuremath{\partial}hi|^2 \,\mathrm{d} \ensuremath{\partial}si + \int A \ensuremath{\partial}hi^2 \,\mathrm{d} \ensuremath{\partial}si = 0.
\end{align}
Note that both coefficients $- \frac 1 2 \ensuremath{\partial}_{\ensuremath{\partial}si \ensuremath{\partial}si} \bar{u}^0_p$ and $A$ are nonnegative, and hence contribute damping terms. This is precisely the ``von-Mise" damping mechanism, and at the crux is that all the pieces of $\mathcal{L}_{main}$ talk to each-other.
To extend this to the more complicated setting of NS, we need to extract the two crucial points from this mechanism:
\begin{itemize}
\item[(1a)] The precise manner in which one forms the difference between two solutions $\bar{u}^0_p, \bar{u}_\ast$ is via a ``twisted subtraction";
\item[(1b)] This ``twisted subtraction" leads to a damping mechanism as $x \rightarrow \infty$, as in \eqref{energy:damping:1}. We note that one manifestation of \eqref{energy:damping:1} is that one should expect the basic energy norm (called $\| \cdot \|_{X_0}$ here) to be ``stand-alone", as is shown in \eqref{turn:1}.
\end{itemize}
We now aim to make (1a) and (1b) more robust, in a setting that will work well with linearized and nonlinear energy estimates. In particular, we prefer to work in the original coordinate system, $(x, y)$, as this is more suitable for the Navier-Stokes system.
We begin with $(1a)$. Let now $y_1 := y(\ensuremath{\partial}si; \bar{u}^0_p)$ and $y_2 := y(\ensuremath{\partial}si; \bar{u}_\ast)$. A simple computation can show that, to leading order in the difference, $u$, between $\bar{u}^0_p$ and $\bar{u}_\ast$,
\begin{align}
y_1 - y_2 = - \frac{1}{\bar{u}_\ast} \int_0^y u + O(u^2).
\end{align}
Using this linearization, we can re-visit the quantity $\ensuremath{\partial}hi$ from \eqref{def:phi:j} and express it as
\begin{align} \label{real:phi}
\ensuremath{\partial}hi = \bar{u}_\ast (\bar{u}^0_p(x, y_1) - \bar{u}_\ast(x, y_2)) + O(u^2) = \bar{u}_\ast (u - \frac{\ensuremath{\partial}_y \bar{u}_\ast}{\bar{u}_\ast} \ensuremath{\partial}si) + O(u^2),
\end{align}
where $\ensuremath{\partial}si := \int_0^y u$.
The leading order of the formula \eqref{real:phi} then motivates our introduction of the ``Good Variables", at the velocity level. More specifically, we define
\begin{align} \label{good:variables:2}
U := \frac{1}{\bar{u}} (u - \frac{\bar{u}_y}{\bar{u}} \ensuremath{\partial}si), \qquad V := \frac{1}{\bar{u}} (v + \frac{\bar{u}_x}{\bar{u}} \ensuremath{\partial}si ).
\end{align}
We note that this selection of $(U, V)$ compared to the right-hand side of \eqref{real:phi} differs in homogeneity of $\bar{u}^2$ (temporarily ignoring the matter of replacing $\bar{u}_\ast$ (Blasius) by the more general background profile used in practice, $\bar{u}$). This is because the homogeneity selected in \eqref{good:variables:2} also works well with the full Navier-Stokes system.
We note that the choice of homogeneity of $\bar{u}$ in \eqref{good:variables:2} is a serious difference between Prandtl and Navier-Stokes \textit{at the very outset of the analysis}. In analyses of the scalar Prandtl equation (for instance \cite{IyerBlasius}), the natural quantity is actually \eqref{real:phi}. The Navier-Stokes equations, however, are severely constraining due to the presence of the pressure (again, the lack of a pressure is one of the \textit{main simplifications} of the boundary layer theory, and it is bound to have a significant impact on the stability analysis $U^\ensuremath{\varepsilon} \rightarrow \bar{u}$). This comparatively \textit{singular} $U$ for Navier-Stokes, \eqref{good:variables:2}, does create losses of derivatives in our energy scheme, \eqref{turn:1} - \eqref{turn:3}, since singular weights $\frac{1}{\bar{u}}$ cost excess derivatives to control. Due to this, it is \textit{a-priori} unclear whether the von-Mise mechanism described above is actually consistent with the Navier-Stokes equations and with the stability as $\ensuremath{\varepsilon} \rightarrow 0$. Our analytic work is devoted to designing and controlling several new norms in order to overcome this destabilizing effect (see points (4), (5), (6) below).
We now address $(1b)$. For the unknowns $(U, V)$, the operator $\mathcal{L}_{main}$ reads (upon invoking the Prandtl equation for the coefficients $\bar{u}, \bar{v})$
\begin{align} \label{reform}
\mathcal{L}_{main}[u, v] := \mathcal{T}[U] - u_{yy},
\end{align}
where the transport operator $\mathcal{T}[U] \sim \bar{u}^2 U_x + \bar{u} \bar{v} U_y + 2 \bar{u}_{yy} U$. The ``von-Mise" mechanism, which was observed in \cite{IyerBlasius} in an energetic context, is that upon taking inner product with $U$ and invoking the condition $\bar{u}_{yy} < 0$ (which is true for the Blasius profile), one obtains coercivity of $\mathcal{L}_{main}$. Indeed, computing now (entirely in $(x, y)$ coordinates), we obtain (after integration by parts)
\begin{align} \ensuremath{\nonumber}
\int \mathcal{L}_{main}[u,v] U \,\mathrm{d} y = & \int \mathcal{T}[U] U \,\mathrm{d} y + \int \ensuremath{\partial}_y (\bar{u} U + \frac{\bar{u}_y}{\bar{u}} \ensuremath{\partial}si) U_y \,\mathrm{d} y \\ \ensuremath{\nonumber}
\approx & \frac{\ensuremath{\partial}_x}{2} \int \bar{u}^2 U^2 \,\mathrm{d} y + \frac 3 2\int \bar{u}_{yy} U^2 \,\mathrm{d} y + \int \bar{u} U_y^2 \,\mathrm{d} y - \int 2 \bar{u}_{yy} U^2 \,\mathrm{d} y \\ \label{est:lin:intro:crucial}
\approx & \frac{\ensuremath{\partial}_x}{2} \int \bar{u}^2 U^2 \,\mathrm{d} y + \int \bar{u} U_y^2 \,\mathrm{d} y - \int \frac 1 2 \bar{u}_{yy} U^2 \,\mathrm{d} y,
\end{align}
where we have omitted several harmless terms (hence the $\approx$). The point is that, as in \eqref{energy:damping:1}, the dangerous Rayleigh contribution $\int \frac 3 2 \bar{u}_{yy}U^2$ is cancelled out by the diffusive commutator term $- 2 \bar{u}_{yy} U^2$, leaving an excess factor of $- \frac 1 2 \int \bar{u}_{yy} U^2$. This term acts as a damping term as $x \rightarrow \infty$ due to the property that $\bar{u}_{yy} \le 0$, analogously to \eqref{energy:damping:1}. Note that, if this were not the case, we would see growth as $x \rightarrow \infty$. The calculation \eqref{est:lin:intro:crucial} is precisely the version of \eqref{energy:damping:1} in $(x, y)$ coordinates.
Therefore, our starting point is working with the ``good variables" \eqref{good:variables:2}, but in a robust enough manner to extend to the full Navier-Stokes system, and to capture large-$x$ dynamics.
\item[(2)] \underline{Sharp $L^\infty$ decay and the design of the space $\|U, V \|_{\mathcal{X}}$:} A consideration of the nonlinear part of $\mathcal{N}(u, v)$ in \eqref{vel:eqn:1:intro} demonstrates that, at the very least, one needs to control a final norm that is strong enough to encode pointwise decay of the form
\begin{align} \label{decay:v:intro}
|\sqrt{\ensuremath{\varepsilon}} v | \langle x \rangle^{\frac 1 2} \lesssim \|U, V \|_{\mathcal{X}}.
\end{align}
This is due to having to control trilinear terms of the form $\int B(x, y) v u_y u_x \langle x \rangle$, where $B(x, y)$ is a bounded function. This baseline requirement motivates our choice of space $\mathcal{X}$, defined in \eqref{X:norm}, which contains enough copies of the $x \ensuremath{\partial}_x$ scaling vector-field of the solution in order to obtain the crucial decay estimate \eqref{decay:v:intro}. This is a sharp requirement, and our norm $\mathcal{X}$ is just barely strong enough to control this decay.
\item[(3)] \underline{Cauchy-Kovalevskaya weighted energy}: Upon reformulation into \eqref{reform}, and to deal with the large $x$ problem, we need to apply a Cauchy-Kovalevskaya type weighted multiplier of the form $(U \langle x \rangle^{- \delta}, \ensuremath{\varepsilon} V \langle x \rangle^{- \delta})$ for a $\delta$ chosen small enough. The purpose is to produce the positive terms $\| \bar{u} U \langle x \rangle^{-\frac{1}{2} - \frac{\delta}{2} } \|^2 + \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V \langle x \rangle^{- \frac 1 2 - \frac{\delta}{2}} \|$ that appear in the $X_0$ energy (see the precise definition of $X_0$ below in \eqref{def:X0}, in which we made a specific choice of $\delta = \frac{1}{100}$ for the sake of concreteness). In turn, these are crucially used to control error terms in the $X_0$ energy estimate.
The idea of using Cauchy-Kovalevskaya weights has been employed before in the setting of steady Prandtl even for bounded $x$ (see, for instance, in chronological order: \cite{Iyer3}, \cite{IyerBlasius}, \cite{Gao-Zhang} as some examples). However our setting of asymptotic in $x$ is particularly tricky and has not been encountered in either of the aforementioned works due to the confluence of two reasons. First for infinite $x$, one \textit{cannot use} linear CK weights. Second, due to the requirement that the vector-field multiplier must be divergence-free, we need to select a multiplier of the form $(U \langle x \rangle^{- \delta}, \ensuremath{\varepsilon} V \langle x \rangle^{- \delta} + \ensuremath{\varepsilon} \delta \frac{\ensuremath{\partial}si}{\bar{u}} \langle x \rangle^{-\delta-1} )$.
Due to the fact that $\ensuremath{\partial}_{xx} \langle x \rangle^{-\delta} > 0$ (which is exactly false for linear CK weights), the combination $\ensuremath{\varepsilon} V \langle x \rangle^{- \delta} + \ensuremath{\varepsilon} \delta \frac{\ensuremath{\partial}si}{\bar{u}} \langle x \rangle^{-\delta-1}$ produces terms of competing sign, which are not obviously positive. We are able to handle such contributions by selecting CK weights that in turn enable us to employ Hardy-type inequalities \textit{with sharp constants} that allow for coercivity (see Lemma \ref{precise:1}).
\item[(4)] \underline{Linearized energy estimates and the scaling vector field $S = x \ensuremath{\partial}_x$:} In order to control the designer norm $\| U, V \|_{\mathcal{X}}$, we perform a sequence of estimates which results in the following loop (the reader is encouraged to consult \eqref{def:X0} - \eqref{def:Xn} for the definitions of these spaces), for $n= 0, 1, ..., 10$:
\begin{align} \label{turn:1}
&\| U, V \|_{X_0}^2 \lesssim \mathcal{F}_0 + \mathcal{N}_0, \\ \label{turn:2}
&\| U, V \|_{X_{n + \frac 1 2} \cap Y_{n + \frac 1 2}}^2 \lesssim C_\delta \| U, V \|_{X_{\le n}}^2 + \delta \|U, V \|_{X_{n+1}}^2 + \mathcal{F}_{n+ \frac 1 2} + \mathcal{N}_{n + \frac 1 2}, \\ \label{turn:3}
&\|U, V \|_{X_{n+1}}^2 \lesssim \|U, V \|_{X_{\le n + \frac 1 2}}^2 + \mathcal{F}_{n + 1} + \mathcal{N}_{n+1},
\end{align}
for a small number $0 < \delta << 1$, and where the implicit constant above is independent of the chosen $\delta$. Above the $\mathcal{F}$ terms represent forcing terms, which depend on the approximate solution, and the $\mathcal{N}$ terms represent quadratic terms. The coupling of these estimates is required by the vector aspect of the the full linearized Navier-Stokes operator $\mathcal{L}$. To keep matters simple, the reader can identify these spaces with ``regularity of $x \ensuremath{\partial}_x$". That is, $X_0$ is a baseline norm, $X_{\frac 1 2}, Y_{\frac 1 2}$ contain (in a sense made precise by the definitions of these norms) estimates on $(x\ensuremath{\partial}_x)^{\frac 1 2}$ of the quantities in $X_0$, $X_1$ is basically $\| (x \ensuremath{\partial}_x) U, (x \ensuremath{\partial}_x) V\|_{X_0}$.
It turns out that estimation of the nonlinear terms schematically work in the following manner:
\begin{align} \label{nonlinear:est:intro}
|\mathcal{N}_n| \lesssim \ensuremath{\varepsilon} \|U, V \|_{X_n}^2 \|U, V \|_{X_{n+\frac 1 2}} , \qquad |\mathcal{N}_{n + \frac 1 2}| \lesssim \ensuremath{\varepsilon} \|U, V \|_{X_{\le n + \frac 1 2}}^3
\end{align}
The difficulty in closing our scheme becomes clear upon comparing the linear estimates in \eqref{turn:1} - \eqref{turn:3} and the estimation of the nonlinearity from \eqref{nonlinear:est:intro}: \textit{the linear estimates lose half $x \ensuremath{\partial}_x$ derivative for half-integer order spaces, whereas the nonlinear estimates lose a half $x \ensuremath{\partial}_x$ derivative for integer order spaces.} This is a new obstacle that has only appeared in the present work. We shall now discuss the origin of both the ``linear loss of derivative", appearing in \eqref{turn:2}, and the ``nonlinear loss of derivative", appearing in \eqref{nonlinear:est:intro}, together with our technique to eliminate these losses.
\item[(5)] \underline{Loss of $x \ensuremath{\partial}_x$ derivative due to degeneracy of $\bar{u}$ and weights of $x$:} The ``loss of half-$x\ensuremath{\partial}_x$ derivative" at the linearized level, that is \eqref{turn:2}, is due to degeneracy of $\bar{u}$ at $y= 0$. The reader is invited to consult, for instance, the estimation of term \eqref{est:Xhalf:loss:deriv} in our energy estimates, which displays such a loss. To summarize, we have to estimate the following integral when performing the estimate of $X_{\frac 12}$, \eqref{turn:2}
\begin{align}
I_{sing} := |\int x \bar{u}_y U_x U_y \,\mathrm{d} y \,\mathrm{d} x|.
\end{align}
A consultation with the $X_{\frac 1 2}$ and the previously controlled $X_0$ norm shows that this quantity is out of reach due to the confluence of two issues: the degeneracy of the weight $\bar{u}$ (and the lack of degeneracy of the coefficient $\bar{u}_y$) and the weight of $x$ appearing in $I_{sing}$. We emphasize that this type of ``loss-of-$x \ensuremath{\partial}_x$" is a confluent issue: it only appears if one is concerned with global-in-$x$ matters in the presence of the degenerate weights $\bar{u}$, and hence appears for the first time in this work.
For bounded $x$ values, one can simply appeal to the extra positive terms appearing from the viscosity in the $X_{n+ \frac 1 2}, Y_{n + \frac 1 2}$ norms. However, as we are here concerned with large $x$, a general principle we uncover is that we often \textit{``lose $x \ensuremath{\partial}_x$ derivative, but do not lose $\sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x$ derivative"}. Therefore, it is almost never advantageous, in large $x$ matters, to invoke the tangential diffusion $- \ensuremath{\varepsilon} \ensuremath{\partial}_{xx}$.
Due to this peculiarity, we design the scheme (and our norm $\mathcal{X}$, see \eqref{X:norm}) to terminate at the $X_{11}$ stage, as opposed to what appears to the be the more natural stopping point of $X_{11.5}, Y_{11.5}$.
\item[(6)] \underline{Nonlinear Change of Variables (and nonlinearly modified norms)}: There is a major price to pay for the truncation of our energy scheme at the $X_{11}$ level, which comes from the nonlinear loss of $x \ensuremath{\partial}_x$ derivative displayed in \eqref{nonlinear:est:intro}. Let us explain further the reason for this loss, temporarily setting $n = 11$ in the first inequality of \eqref{nonlinear:est:intro}. Indeed, considering the trilinear quantity
\begin{align}
T_{sing} := \int u_y \ensuremath{\partial}_x^{11} v \ensuremath{\partial}_x^{11} U \langle x \rangle^{22} \,\mathrm{d} y \,\mathrm{d} x,
\end{align}
and comparing to the controls provided by the $\| \cdot \|_{\mathcal{X}}$ norm, such a quantity is out of reach (due to growth as $x \rightarrow \infty$). Estimating this type of quantity would not be out of reach if we had the right to include $X_{11.5}, Y_{11.5}$ into the $\mathcal{X}$-norm, but due to the issue raised above, we must truncate our energy scheme at the $X_{11}$ level.
To contend with this difficulty, we introduce a further \textit{nonlinear change of variables} that has the effect of cancelling out these most singular terms at the top order. This amounts to replacing the linearized good variables \eqref{good:variables:2} with another, nonlinear version, which is defined in \eqref{Chan:var:2}. We note that this difficulty does not appear in any previous work, due to the ability, for bounded values of $x$, to appeal to the positive contributions of the tangential viscosity.
In turn, the energy estimate for the new nonlinear good unknown requires estimation of several trilinear terms in the nonlinearly modified norms $\Theta_{11}$, defined in \eqref{def:Theta:11}. We subsequently establish the equivalence of measuring the nonlinear good unknown in the modified norm $\Theta_{11}$ to measuring the original good unknown in our original space $\mathcal{X}$ (see for instance, the analysis in Section \ref{subsection:NLMN}).
We emphasize that, in order to establish the equivalence of measuring the new nonlinear good unknowns in the nonlinearly modified norm to the full $\mathcal{X}$ norm, we need to rely upon the full strength of the $\mathcal{X}$-norm.
\item[(7)] \underline{Weighted in $x$ and $\bar{u}$ mixed $L^p_xL^q_y$ embeddings:} Due to the inherent nonlinear nature of this top order analysis for $\Theta_{11}$, we rely upon several mixed-norm estimates of $L^p_x L^q_y$ with precise weights in $x$ and $\bar{u}$ in order to close our analysis of $\Theta_{11}$. This requirement is amplified upon noting that $u_{yy}$ (1) lacks the regularity in $y$ that the background $\bar{u}_{yy}$ has, due to the lack of higher-order $y$ derivative quantities in our $\|\cdot \|_{\mathcal{X}}$ (which appear to be difficult to achieve due to $\{y = 0\}$ boundary effects) and (2) lacks decay as $y \rightarrow \infty$ (we do not control weights in $y$ in our $\mathcal{X}$ space). Thus we must always place $u_{yy}$ in $L^2_y$ in the vertical direction, and develop the appropriate weighted in $\bar{u}$ and $x$ $L^p_x L^q_y$ embeddings. These, in turn, are developed in Sections \ref{Lpq:embed:section}, \ref{pw:section:1}, \ref{subsection:NLMN}, and again, rely upon the full strength of our $\mathcal{X}$ norm in order to close. Again, we emphasize that these analyses are completely new.
\end{itemize}
\subsection{Notational Conventions}
We first define (in contrast with the typical bracket notation) $\langle x \rangle := 1+ x$. We also define the quantity
\begin{align} \label{z:choice}
z := \frac{y}{\sqrt{x + 1}} = \frac{y}{\sqrt{\langle x \rangle}},
\end{align}
due to our choice that $x_0 = 1$ (which we are again making without loss of generality). The cut-off function $\chi(\cdot): \mathbb{R}_+ \rightarrow \mathbb{R}$ will be reserved for a particular decreasing function, $0 \le \chi \le 1$, satisfying
\begin{align} \label{def:chi}
\chi(z) = \begin{cases} 1 \text{ for } 0 \le z \le 1 \\ 0 \text{ for } 2 \le z < \infty \end{cases}
\end{align}
Regarding norms, we define for functions $u(x, y)$,
\begin{align}
\| u \| := \|u \|_{L^2_{xy}} = \Big( \int u^2 \,\mathrm{d} x \,\mathrm{d} y \Big)^{\frac 1 2}, \qquad \|u \|_\infty := \sup_{(x,y) \in \mathcal{Q}} |u(x, y)|.
\end{align}
We will often need to consider ``slices", whose norms we denote in the following manner
\begin{align}
\| u \|_{L^p_y} := \Big( \int u(x, y)^p \,\mathrm{d} y \Big)^{\frac 1 p}.
\end{align}
We use the notation $a \lesssim b$ to mean $a \le Cb$ for a constant $C$, which is independent of the parameters $\ensuremath{\varepsilon}, \delta_\ast$. We define the following scaled differential operators
\begin{align}
\ensuremath{\nonumber}abla_\ensuremath{\varepsilon} := \begin{pmatrix} \ensuremath{\partial}_x \\ \frac{\ensuremath{\partial}_y}{\sqrt{\ensuremath{\varepsilon}}} \end{pmatrix}, \qquad \Delta_\ensuremath{\varepsilon} := \ensuremath{\partial}_{yy} + \ensuremath{\varepsilon} \ensuremath{\partial}_{xx}.
\end{align}
For derivatives, we will use both $\ensuremath{\partial}_x f$ and $f_x$ to mean the same thing. For integrals, we will use $\int f := \int_0^\infty \int_0^\infty f(x, y) \,\mathrm{d} y \,\mathrm{d} x$. These conventions are taken unless otherwise specified (by appending a $\,\mathrm{d} y$ or a $\,\mathrm{d} x$), which we sometimes need to do.
We will often use the parameter $\delta$ to be a generic small parameter, that can change in various instances. The constant $C_\delta$ will refer to a number that may grow to $\infty$ as $\delta \downarrow 0$.
\subsection{Plan of the paper}
\hspace{5 mm} The plan of the paper is as follows. Throughout the paper, we take Theorem \ref{thm:approx} as given and use it as a ``black box". Indeed, Theorem \ref{thm:approx} is proven in the companion paper, \cite{IM21}. Section \ref{remainder:section:1} is devoted to introducing the Navier-Stokes system satisfied by the remainders, $(u, v)$, and proving basic properties of the associated linearized operator. Section \ref{remainder:section:2} is devoted to developing the functional framework, notably the various components of the space $\mathcal{X}$, in which we analyze the remainders, $(u, v)$. Section \ref{remainder:section:3} is devoted to providing the energy estimates to control $\|U, V \|_{X_n}$ and $\|U, V \|_{X_{n + \frac 1 2} \cap Y_{n + \frac 1 2}}$ for $n = 0,...,10$. Section \ref{section:top:order} contains our top ($11$'th order) analysis, which notably includes our nonlinear change of variables and nonlinearly modified norms. Section \ref{section:NL} contains the nonlinear analysis to close the complete $\mathcal{X}$ norm estimate for $(U, V)$.
\section{The Remainder System} \label{remainder:section:1}
\subsection{Presentation of Equations} \label{subsection:background}
\subsubsection{Background Profiles, $[\bar{u}, \bar{v}]$}
We recall the definition of $[\bar{u}, \bar{v}]$ from \eqref{exp:base}. In addition, for a few of the estimates in our analysis, we will require slightly more detailed information on these background profiles, in the form of decomposing into an Euler and Prandtl component. Indeed, define
\begin{align} \label{split:split:1}
\bar{u}_P &:= \bar{u}^0_P + \sum_{i = 1}^{N_1} \ensuremath{\varepsilon}^{\frac i 2} u^i_P , & \bar{u}_E &:= \sum_{i = 1}^{N_{1}} \ensuremath{\varepsilon}^{\frac{i}{2}} u^i_E.
\end{align}
We will now summarize the quantitative estimates on $[\bar{u}, \bar{v}]$ that we will be using in the analysis of \eqref{vel:eqn:1} - \eqref{vel:eqn:2}.
First, let us recall now the Blasius profiles, defined in \eqref{Blasius:1} - \eqref{Blasius:3}, which are a family (due to the parameter $x_0$) of exact solutions to \eqref{BL:0:intro:intro} - \eqref{BL:1:intro:intro}. Recall also that, without loss of generality, we set $x_0 = 1$. We now record the following quantitative estimates on the Blasius solution:
\begin{lemma} For any $k, j, M \ge 0$,
\begin{align} \label{water:1}
&\| \langle z \rangle^M \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j (\bar{u}_\ast - 1) \|_{L^\infty_y} \le C_{M, k, j} \langle x \rangle^{- k - \frac j 2}, \\ \label{v:blasius}
&\| \langle z \rangle^M \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j (\bar{v}_\ast - \bar{v}_\ast(x, \infty)) \|_{L^\infty_y} \le C_{M, k, j} \langle x \rangle^{ - \frac 1 2- k - \frac j 2}, \\ \label{v:blasius:2}
&\| \ensuremath{\partial}_x^k \ensuremath{\partial}_y^j \bar{v}_\ast \|_{L^\infty_y} \le C_{k,j} \langle x \rangle^{- \frac 1 2 - k - \frac j 2}.
\end{align}
\end{lemma}
We also have the following properties of the Blasius profile, which are well known and which will be used in our analysis.
\begin{lemma} For $[\bar{u}_\ast, \bar{v}_\ast]$ defined in \eqref{Blasius:1}, the following estimates are valid
\begin{align} \label{Blas:prop:1}
&|\ensuremath{\partial}_y \bar{u}_\ast(x, 0)| \gtrsim \langle x \rangle^{- \frac 1 2}, \\ \label{Blas:prop:2}
&\ensuremath{\partial}_{yy}\bar{u}_{\ast} \le 0.
\end{align}
\end{lemma}
We will now state the estimates we will be using about our approximate solution. Note that we state these estimates as \textit{assumptions} for the purpose of this present paper. However, they are established rigorously in our companion paper according to Theorem \ref{thm:approx}.
\begin{assumption} \label{assume:1} For $0 \le j,m,k, M, l \le 20$, the following estimates are valid
\begin{align} \label{prof:u:est}
& \| \ensuremath{\partial}_x^j (y\ensuremath{\partial}_y)^m \ensuremath{\partial}_y^k \bar{u} x^{j + \frac k 2} \|_\infty + \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j \bar{u} x^j \|_{\infty} \le C_{k,j} , \\ \label{prof:v:est}
& \| \ensuremath{\partial}_x^j (y\ensuremath{\partial}_y)^m \ensuremath{\partial}_y^k \bar{v} x^{j + \frac k 2 + \frac 1 2} \|_\infty + \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j \bar{v} x^{j + \frac 1 2} \|_\infty \le C_{k,j}, \\ \label{est:Eul:piece}
&\ensuremath{\varepsilon}^{- \frac 1 2}\| \ensuremath{\partial}_x^j (Y \ensuremath{\partial}_Y)^l \ensuremath{\partial}_Y^k \bar{u}_E \langle x \rangle^{j+k + \frac 1 2} \|_\infty + \| \ensuremath{\partial}_x^j (Y \ensuremath{\partial}_Y)^l \ensuremath{\partial}_Y^k \bar{v}_E \langle x \rangle^{j+k + \frac 1 2} \|_\infty \le C_{k,j} , \\ \label{est:Pr:piece}
&\| \ensuremath{\partial}_x^j (y\ensuremath{\partial}_y)^k \ensuremath{\partial}_y^l \bar{u}_P \langle z \rangle^M \langle x \rangle^{j + \frac l 2}\|_\infty \le C_{k,j,M} \\ \label{est:PR:bar:v}
&\| \ensuremath{\partial}_x^j (y\ensuremath{\partial}_y)^k \ensuremath{\partial}_y^l \bar{v}_P \langle z \rangle^M \langle x \rangle^{j + \frac l 2+ \frac 1 2}\|_\infty \le C_{k,j,M}.
\end{align}
\end{assumption}
We will need estimates which amount to showing that $\bar{u}$ remains a small perturbation of $\bar{u}^0_p$.
\begin{assumption} \label{lemma:bofz} Define a monotonic function $b(z) := \begin{cases} z \text{ for } 0 \le z \le \frac{3}{4} \\ 1 \text{ for } 1 \le z \end{cases}$, where $b \in C^\infty$. Then
\begin{align} \label{est:ring:1}
&\| \ensuremath{\partial}_y^j \ensuremath{\partial}_x^k (\bar{u} - \bar{u}^0_p) \langle x \rangle^{\frac j 2 + k + \frac{1}{50}} \|_{\infty} \le C_{k,j} \sqrt{\ensuremath{\varepsilon}}, \\ \label{samezies:1}
&1 \lesssim \frac{\bar{u}^0_p}{b(z)} \lesssim 1 \text{ and } 1 \lesssim \Big| \frac{\bar{u}}{\bar{u}^0_p} \Big| \lesssim 1, \\ \label{prime:pos}
&|\bar{u}_y|_{y = 0}(x)| \gtrsim \langle x \rangle^{- \frac 1 2}.
\end{align}
\end{assumption}
We will need to remember the equations satisfied by the approximate solutions, $[\bar{u}, \bar{v}]$, which we state in the following assumption.
\begin{assumption} \label{assume:3} Define the auxiliary quantities,
\begin{align} \label{def:zeta}
&\zeta := \bar{u} \bar{u}_x + \bar{v} \bar{u}_y - \bar{u}^0_{pyy}, \\ \label{def:alpha}
&\alpha := \bar{u} \bar{v}_x + \bar{v} \bar{v}_y,
\end{align}
For any $j, k, m \ge 0$, the following estimates hold:
\begin{align} \label{S:0}
|(x\ensuremath{\partial}_x)^k (y\ensuremath{\partial}_y)^m \zeta| \lesssim &\sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- (1 + \frac{1}{50})} \\ \label{est:zeta:2}
|(x \ensuremath{\partial}_x)^k (x^{\frac 1 2} \ensuremath{\partial}_y)^j \zeta| \lesssim & \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- (1 + \frac{1}{50})} \\ \label{S:1}
|(x \ensuremath{\partial}_x)^k (y \ensuremath{\partial}_y)^m \alpha| \lesssim & \bar{u} \langle x \rangle^{-\frac 3 2}.
\end{align}
\end{assumption}
\subsubsection{System on $[u, v]$}
We are now going to study the nonlinear problem for the remainders, $[u, v]$. We define the linearized operator in velocity form via
\begin{align}
\begin{aligned} \label{vel:form}
\mathcal{L}[u, v] := \begin{cases} \mathcal{L}_1 := \bar{u} u_x + \bar{u}_{y} v + \bar{u}_{x} u + \bar{v} u_y - \Delta_\ensuremath{\varepsilon} u \\ \mathcal{L}_2 := \bar{u} v_x + u \bar{v}_{x} + \bar{v} v_y + \bar{v}_{y} v - \Delta_\ensuremath{\varepsilon} v\end{cases}
\end{aligned}
\end{align}
Our objective is to study the problem
\begin{align} \label{vel:eqn:1}
&\mathcal{L}[u, v] + \begin{pmatrix} P_x \\ \frac{P_y}{\ensuremath{\varepsilon}} \end{pmatrix} = \begin{pmatrix} F_R \\ G_R \end{pmatrix} + \begin{pmatrix} \mathcal{N}_1(u, v) \\ \mathcal{N}_2(u, v) \end{pmatrix}, \qquad u_x + v_y = 0, \text{ on } \mathcal{Q} \\ \label{vel:eqn:2}
&[u, v]|_{y = 0} = [u, v]|_{y \uparrow \infty} = 0, \qquad [u,v]|_{x = 0} = [u, v]|_{x = \infty} = 0.
\end{align}
Above, the forcing terms $F_R$ and $G_R$ are defined in \eqref{forcing:remainder}, and obey estimates \eqref{est:forcings:part1}. The nonlinear terms are given by
\begin{align} \label{def:N1:N2}
\mathcal{N}_1(u, v) := \ensuremath{\varepsilon}^{\frac{N_2}{2}} (uu_x + vu_y), \qquad \mathcal{N}_2(u, v) := \ensuremath{\varepsilon}^{\frac{N_2}{2}} (uv_x + vv_y).
\end{align}
\ensuremath{\nonumber}oindent In vorticity form, the operator is
\begin{align} \label{vort:form}
\begin{aligned}
\mathcal{L}_{vort}[u, v] := & - u_{yyy} + 2 \ensuremath{\varepsilon} v_{xxy} + \ensuremath{\varepsilon}^2 v_{xxx} - \bar{u} \Delta_\ensuremath{\varepsilon} v + v \Delta_\ensuremath{\varepsilon} \bar{u} - u \Delta_\ensuremath{\varepsilon} \bar{v} + \bar{v} \Delta_\ensuremath{\varepsilon} u.
\end{aligned}
\end{align}
\subsection{The good unknowns}
We first introduce the unknowns
\begin{align} \label{vm:1}
\qquad q = \frac{\ensuremath{\partial}si}{\bar{u}}, \qquad U = \ensuremath{\partial}_y q, \qquad V := -\ensuremath{\partial}_x q,
\end{align}
from which it follows that
\begin{align} \label{formula:1}
u = \bar{u} U + \bar{u}_y q , \qquad v = \bar{u} V - \bar{u}_x q.
\end{align}
An algebraic computation using \eqref{formula:1} yields the following
\begin{align} \label{inserting}
\bar{u} u_x + \bar{u}_y v + \bar{v} u_y + \bar{u}_x u = \bar{u}^2 U_x + \bar{u} \bar{v} U_y + (2 \bar{u} \bar{u}_x + 2 \bar{v} \bar{u}_y) U + (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y)_y q.
\end{align}
Recalling \eqref{def:zeta}, we obtain the identity
\begin{align} \ensuremath{\nonumber}
\bar{u} u_x + \bar{u}_y v + \bar{v} u_y + \bar{u}_x u = \mathcal{T}_1[U] + \bar{u}^0_{pyyy} q + 2\zeta U + \zeta_y q,
\end{align}
where we define the operator $\mathcal{T}_1[U]$ via
\begin{align} \label{def:T1}
&\mathcal{T}_1[U] := \bar{u}^2 U_x + \bar{u} \bar{v} U_y + 2 \bar{u}^0_{pyy} U.
\end{align}
We now perform a similar computation for the transport terms in $\mathcal{L}_2$. Again, a computation using \eqref{formula:1} yields the following
\begin{align} \ensuremath{\nonumber}
\bar{u} v_x + u \bar{v}_x + \bar{v} v_y + \bar{v}_y v = & \bar{u} \ensuremath{\partial}_x ( \bar{u} V - \bar{u}_x q ) + \bar{v}_x ( \bar{u} U + \bar{u}_yq ) + \bar{v} \ensuremath{\partial}_y (\bar{u} V - \bar{u}_x q) + \bar{v}_y (\bar{u} V - \bar{u}_x q) \\ \ensuremath{\nonumber}
= & \bar{u}^2 V_x + \bar{u} \bar{v} V_y + (2 \bar{u} \bar{u}_x + \bar{u}_y v + \bar{u} \bar{v}_y) V + (\bar{u} \bar{v}_x - \bar{u}_x \bar{v}) U \\ \ensuremath{\nonumber}
&+ (- \bar{u} \bar{u}_{xx} + \bar{u}_y \bar{v}_x - \bar{u}_{xy} \bar{v} - \bar{u}_x \bar{v}_y)q \\ \label{vM:exp:eq:2}
= & \bar{u}^2 V_x + \bar{u} \bar{v} V_y + (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y) V + \alpha U + \ensuremath{\partial}_y \alpha q,
\end{align}
where we have defined the coefficients $\alpha$ in \eqref{def:alpha}. We now again use \eqref{def:zeta} to simplify the coefficient of $V$ in \eqref{vM:exp:eq:2}, which yields
\begin{align} \ensuremath{\nonumber}
\bar{u} v_x + u \bar{v}_x + \bar{v} v_y + \bar{v}_y v = & \mathcal{T}_2[V] + \alpha U + \alpha_y q + \zeta V,
\end{align}
where we have defined the operator $\mathcal{T}_2[V]$ via
\begin{align} \label{def:T2}
&\mathcal{T}_2[V] := \bar{u}^2 V_x + \bar{u} \bar{v} V_y + \bar{u}^0_{pyy}V.
\end{align}
We thus write our simplified system as
\begin{align} \label{sys:sim:1}
&\mathcal{T}_1[U] + 2 \zeta U + \zeta_y q - \Delta_\ensuremath{\varepsilon} u + \bar{u}^0_{pyyy} q + P_x = F_R + \mathcal{N}_1, \\ \label{sys:sim:2}
&\mathcal{T}_2[V] + \alpha U + \alpha_y q + \zeta V + \frac{P_y}{\ensuremath{\varepsilon}} - \Delta_\ensuremath{\varepsilon} v = G_R + \mathcal{N}_2, \\ \label{sys:sim:3}
&U_x + V_y = 0,
\end{align}
and we note crucially that due to division by a factor of $\bar{u}$, we do not get a Dirichlet boundary condition at $\{y = 0\}$ for $U$, although we retain that $V|_{y = 0} = 0$. Summarizing the boundary conditions on $[U, V]$, we have
\begin{align} \label{BC:UVYW}
U|_{x = 0} = V|_{x = 0} = 0, \qquad U|_{y = \infty} = V|_{y = \infty} = 0, \qquad U|_{x = \infty} = V|_{x = \infty} = 0, \qquad V|_{y = 0} = 0.
\end{align}
It will be convenient also to introduce the vorticity formulation, which we will use to furnish control over the $Y_{n + \frac 1 2}$ norms, which reads
\begin{align} \ensuremath{\nonumber}
&\ensuremath{\partial}_y \mathcal{T}_1[U] - \ensuremath{\varepsilon} \ensuremath{\partial}_x \mathcal{T}_2[V] - u_{yyy} - 2 \ensuremath{\varepsilon} u_{xxy} + \ensuremath{\varepsilon}^2 v_{xxx} + \ensuremath{\partial}_y^4 (\bar{u}^0_{pyyy}q) \\ \label{eq:vort:pre}
=& 2 (\zeta U)_y + (\zeta_y q)_y - \ensuremath{\varepsilon} (\alpha U)_x - \ensuremath{\varepsilon} (\alpha_y q)_x - \ensuremath{\varepsilon} (\zeta V)_x + \ensuremath{\partial}_y F_R - \ensuremath{\varepsilon} \ensuremath{\partial}_x G_R + \ensuremath{\partial}_y \mathcal{N}_1 - \ensuremath{\varepsilon} \ensuremath{\partial}_x \mathcal{N}_2.
\end{align}
We also now apply $\ensuremath{\partial}_x^{(n)}$ to \eqref{sys:sim:1} - \eqref{sys:sim:3}, which produces the system for $U^{(n)} := \ensuremath{\partial}_x^n U, V^{(n)} := \ensuremath{\partial}_x^n V$,
\begin{align} \label{sys:sim:n1}
&\mathcal{T}_1[U^{(n)}] + 2 \zeta U^{(n)} + \zeta_y q^{(n)} - \Delta_\ensuremath{\varepsilon} u^{(n)} + \bar{u}^0_{pyyy} q^{(n)} + P^{(n)}_x = \ensuremath{\partial}_x^n F_R + \ensuremath{\partial}_x^n \mathcal{N}_1 - \mathcal{C}_1^n , \\ \label{sys:sim:n2}
&\mathcal{T}_2[V^{(n)}] + \alpha U^{(n)} + \alpha_y q^{(n)} + \zeta V^{(n)} + \frac{P^{(n)}_y}{\ensuremath{\varepsilon}} - \Delta_\ensuremath{\varepsilon} v^{(n)} = \ensuremath{\partial}_x^n G_R + \ensuremath{\partial}_x^n \mathcal{N}_2 - \mathcal{C}_2^n, \\ \label{sys:sim:n3}
&U^{(n)}_x + V^{(n)}_y = 0,
\end{align}
where the quantities $\mathcal{C}_1^n, \mathcal{C}_2^n$ contain lower order commutators, and are specifically defined by
\begin{align} \label{def:C1n}
\mathcal{C}_1^n := & \sum_{k = 0}^{n-1} \binom{n}{k} ( \ensuremath{\partial}_x^{n-k} \zeta U^{(k)} - \ensuremath{\partial}_x^{n-k} \zeta_y q^{(k)} ), \\ \label{def:C2n}
\mathcal{C}_2^n := & \sum_{k = 0}^{n-1} \binom{n}{k} ( \ensuremath{\partial}_x^{n-k} \alpha U^{(k)} + \ensuremath{\partial}_x^{n-k} \alpha_y q^{(k)} + \ensuremath{\partial}_x^{n-k} \zeta V^{(k)} ).
\end{align}
\section{The Space $\mathcal{X}$} \label{remainder:section:2}
In this section, we provide the basic functional framework for the analysis of the remainder equation, \eqref{vel:eqn:1} - \eqref{vel:eqn:2}. In particular, we define our space $\mathcal{X}$ and develop the associated embedding theorems that we will need.
\subsection{Definition of Norms}
To define the basic energy norm, we will define the following weight function
\begin{align}
g(x)^2 := 1 + \langle x \rangle^{-\frac{1}{100}}.
\end{align}
The purpose of $g$ is to act like $1$ as $x$ gets large, but as $g' < 0$, this will provide extra control for $(U, V)$ near $x = 0$, due to the presence of the final two terms in \eqref{def:X0} below.
Define the basic energy norm via
\begin{align} \ensuremath{\nonumber}
\|U, V \|_{X_0}^2 := &\| \sqrt{\bar{u}} U_y g \|^2 + \| \sqrt{\ensuremath{\varepsilon}} \sqrt{\bar{u}} U_x g \|^2 + \| \ensuremath{\varepsilon} \sqrt{\bar{u}} V_x g\|^2 \\ \ensuremath{\nonumber}
+& \| \sqrt{-\bar{u}_{yy}} U g \|^2 + \ensuremath{\varepsilon} \| \sqrt{-\bar{u}_{yy}} V g \|^2+ \| \sqrt{\bar{u}_y} U g \|_{y = 0}^2 \\ \label{def:X0}
+& \| \bar{u} U \langle x \rangle^{-\frac{1}{2} - \frac{1}{200} } \|^2 + \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V \langle x \rangle^{-1 - \frac{1}{200}} \|.
\end{align}
To define higher-order norms, we need to define increasing cut-off functions, $\ensuremath{\partial}hi_n(x)$, for $n = 1,...,12$, where $0 \le \ensuremath{\partial}hi_n \le 1$, and which satisfies
\begin{align} \label{def:phi:j}
\ensuremath{\partial}hi_n(x) = \begin{cases} 0 \text{ on } 0 \le x \le 200 + 10n \\ 1 \text{ on } x \ge 205 + 10n. \end{cases}
\end{align}
The ``half-level" norms will be defined as (for $n = 0,...,10$),
\begin{align} \label{def:half:norm}
&\| U, V \|_{X_{n + \frac 1 2}} := \| \bar{u} U^{(n)}_x x^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| + \sqrt{\ensuremath{\varepsilon}} \| \bar{u} V^{(n)}_x x^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \|, \\ \ensuremath{\nonumber}
&\| U, V \|_{Y_{n+ \frac 1 2}} := \| \sqrt{\bar{u}} U^{(n)}_{yy} x^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| + \| \sqrt{\bar{u}} \sqrt{\ensuremath{\varepsilon}} U^{(n)}_{xy} x^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| \\ \label{def:half:norm:Y}
& \qquad \qquad \qquad + \| \sqrt{\bar{u}} \ensuremath{\varepsilon} U^{(n)}_{xx} x^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| + \| \sqrt{\bar{u}_y} U^{(n)}_y x^{n+\frac 1 2} \ensuremath{\partial}hi_{n+1} \|_{y = 0}, \\
&\| U, V \|_{X_{n + \frac 1 2} \cap Y_{n + \frac 1 2}} := \| U, V \|_{X_{n + \frac 1 2}} + \| U, V \|_{Y_{n + \frac 1 2}}.
\end{align}
\begin{remark} The motivation for the above space (and the corresponding notation) is that we think of the quantities measured by the $X_{\frac 1 2} \cap Y_{\frac 1 2}$ norms as ``half $x \ensuremath{\partial}_x$ derivative" more than those in $X_0$. Indeed, if we (on a \textit{very} formal level) consider the heat equation scaling of $\ensuremath{\partial}_x \approx \ensuremath{\partial}_{yy}$, then this is the case. The requirement of including two norms $X_{\frac 1 2}$ and $Y_{\frac 1 2}$ is that, if we had a scalar equation, we could immediately deduce properties of $U_{yy}$ from those on $U_x$. However, since we have a complicated system, this is not possible. Information about $U_{yy}$ needs to be obtained through separate control of the norm $Y_{\frac 1 2}$.
\end{remark}
We would now like to define higher order versions of the $X_0$ norm, which we do via
\begin{align} \ensuremath{\nonumber}
\| U, V \|_{X_n} := &\| \sqrt{\bar{u}} U^{(n)}_{y} x^n \ensuremath{\partial}hi_n \|^2 + \| \sqrt{\ensuremath{\varepsilon}} \sqrt{\bar{u}} U^{(n)}_{x} x^n \ensuremath{\partial}hi_n \|^2 + \| \ensuremath{\varepsilon} \sqrt{\bar{u}} V^{(n)}_{x} x^n \ensuremath{\partial}hi_k\|^2 \\ \label{def:Xn}
+& \| \sqrt{-\bar{u}_{yy}} U^{(n)} x^n \ensuremath{\partial}hi_n \|^2 + \ensuremath{\varepsilon} \| \sqrt{-\bar{u}_{yy}} V^{(n)} x^n \ensuremath{\partial}hi_n \|^2+ \| \sqrt{\bar{u}_y} U^{(n)} x^n \ensuremath{\partial}hi_n \|_{y = 0}^2,
\end{align}
We will need ``local" versions of the higher-order norms introduced above. According to \eqref{def:phi:j}, since $\ensuremath{\partial}hi_1 = 1$ only on $x \ge 215$, we will need higher regularity controls for $0 \le x \le 215$. Define now a sequence of parameters, $\rho_j$, according to
\begin{align}
\rho_2 = 0, \qquad \rho_j = \rho_{j-1} + 5
\end{align}
Set now the cut-off functions $\ensuremath{\partial}si_2(x) = 1$, and
\begin{align}
\ensuremath{\partial}si_j(x) := \begin{cases} 0 \text{ for } x < \rho_j \\ 1 \text{ for } x \ge \rho_{j} + 1 \end{cases} \text{ for } 3 \le j \le 11
\end{align}
Our complete norm will be
\begin{align} \label{X:norm}
\| U, V \|_{\mathcal{X}} := & \sum_{n = 0}^{10} \Big( \| U, V \|_{X_n} + \| U, V \|_{X_{n + \frac 1 2}} + \| U, V \|_{Y_{n + \frac 1 2}} \Big) + \| U, V \|_{X_{11}} + \| U, V \|_E,
\end{align}
where quantity $\|U, V\|_E$ will be defined below, in \eqref{def:E:norm}. We will also set the parameter $M_1 = 24$.
It will be convenient to introduce the following notation to simplify expressions, where $k = 1, ..., 11$,
\begin{align}
\| U, V \|_{\mathcal{X}_{\le k}} := & \sum_{j = 0}^{k} \| U, V \|_{X_j} + \sum_{j = 0}^{k-1} \| U, V \|_{X_{j + \frac 1 2} \cap Y_{j + \frac 1 2}}, \\
\| U, V \|_{\mathcal{X}_{\le k - \frac 1 2}} := & \sum_{j = 0}^{k-1} \| U, V \|_{X_j} + \sum_{j = 0}^{k-1} \| U, V \|_{X_{j + \frac 1 2} \cap Y_{j + \frac 1 2}},
\end{align}
and the ``elliptic" part of the norm, \eqref{X:norm} via
\begin{align} \label{def:E:norm}
\| U, V \|_{E} := \sum_{k = 1}^{11} \ensuremath{\varepsilon}^k \| (\ensuremath{\partial}_x^k u_y, \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^k u_x, \ensuremath{\varepsilon} \ensuremath{\partial}_x^k v_x ) \ensuremath{\partial}si_{k+1} \| + \sum_{k = 1}^{11} \ensuremath{\varepsilon} \| (\ensuremath{\partial}_x^k u_y, \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^k u_x, \ensuremath{\varepsilon} \ensuremath{\partial}_x^k v_x ) \gamma_{k-1,k} \|.
\end{align}
Above, the functions $\gamma_{k,k+1}(x)$ are additional cut-off functions defined by
\begin{align}
\gamma_{k-1,k}(x) := \begin{cases} 0 \text{ on } x \le 197 + 10k \\ x \ge 198 + 10k \end{cases}.
\end{align}
The point of $\gamma_{k-1,k}$ is to satisfy the following two properties: $\gamma_{k-1,k}$ is supported on the set where $\ensuremath{\partial}hi_{k-1} =1$ and $\ensuremath{\partial}hi_k$ is supported on the set when $\gamma_{k-1,k} = 1$. The inclusion of the $\| U, V \|_E$ norm above is to provide information near $\{x = 0\}$ (comparing the region where $\ensuremath{\partial}si_{k} = 1$ versus where $\ensuremath{\partial}hi_k = 1$). The estimation of $\|U, V \|_E$ is through elliptic regularity, and therefore cannot give any useful asymptotic in $x$ information on the solution.
\subsection{Hardy-type Inequalities}
We first recall from \eqref{z:choice} that $z = \frac{y}{\sqrt{x + 1}}$. We now prove the following lemma.
\begin{lemma}\label{lemma:hardy:9} For $0 < \gamma << 1$, and for any function $f \in H^1_y$, for all $x \ge 0$, the following inequality is valid:
\begin{align} \label{Hardy:1}
\| f \|_{L^2_y}^2 \lesssim \gamma \| \sqrt{ \bar{u}^0_p} f_y \langle x \rangle^{\frac 1 2} \|_{L^2_y}^2 + \frac{1}{\gamma^2} \| \bar{u}^0_p f \|_{L^2_y}^2.
\end{align}
\end{lemma}
\begin{proof} We square the left-hand side of \eqref{Hardy:1} and localize the integral based on $z$ via
\begin{align}
\int f^2 \,\mathrm{d} y = \int f^2 \chi(\frac{z}{\gamma}) \,\mathrm{d} y + \int f^2 (1 - \chi(\frac{z}{\gamma})) \,\mathrm{d} y.
\end{align}
For the localized component, we integrate by parts in $y$ via
\begin{align}
\int f^2 \chi(\frac{z}{\gamma}) \,\mathrm{d} y = \int \ensuremath{\partial}_y (y) f^2 \chi(\frac{z}{\gamma}) \,\mathrm{d} y = - \int 2 y f f_y \chi(\frac{z}{\gamma}) \,\mathrm{d} y - \frac{1}{\gamma} \int \frac{y}{\sqrt{x}} f^2 \chi'(\frac{z}{\gamma}) \,\mathrm{d} y.
\end{align}
We estimate each of these terms via
\begin{align}
\Big| \int y f f_y \chi(\frac{z}{\gamma})\,\mathrm{d} y \Big| \lesssim \| f \|_{L^2_y} \| \sqrt{x} \sqrt{\bar{u}^0_p} \sqrt{\gamma} f_y \|_{L^2_y} \le \delta \| f \|_{L^2_y}^2 + C_\delta \gamma x \| \sqrt{\bar{u}^0_p} f_y \|_{L^2_y}^2,
\end{align}
where above, we have used \eqref{samezies:1}. For the far-field term, we estimate again by invoking \eqref{samezies:1} via
\begin{align}
|\int f^2 (1 - \chi(\frac{z}{\gamma})) \,\mathrm{d} y| = |\int \frac{1}{|\bar{u}^0_p|^2} |\bar{u}^0_p|^2 f^2 (1 - \chi(\frac{z}{\gamma})) \,\mathrm{d} y| \lesssim \frac{1}{\gamma^2} \| \bar{u}^0_p f \|_{L^2_y}^2.
\end{align}
We have thus obtained
\begin{align}
\| f \|_{L^2_y}^2 \le \delta \| f \|_{L^2_y}^2 + C_\delta \gamma x \| \sqrt{\bar{u}^0_p} f_y \|_{L^2_y}^2 + \frac{C}{\gamma^2} \| \bar{u}^0_p f \|_{L^2_y}^2,
\end{align}
and the desired result follows from taking $\delta$ small relative to universal constants and absorbing to the left-hand side.
\end{proof}
We will often use estimate \eqref{Hardy:1} in the following manner
\begin{corollary} For any $0 < \gamma << 1$,
\begin{align} \label{bob:1}
\| U_y \| + \| \sqrt{\ensuremath{\varepsilon}} U_x \| + \| \ensuremath{\varepsilon} V_x \| \le \gamma \| U, V \|_{Y_{\frac 1 2}} + C_\gamma \| U, V \|_{X_0}.
\end{align}
\end{corollary}
\begin{proof} This follows immediately upon taking $f = U_y$, $\sqrt{\ensuremath{\varepsilon}} U_x$, or $\ensuremath{\varepsilon} V_x$ in \eqref{Hardy:1}.
\end{proof}
\begin{lemma} Assume $f(0, y) = f(\infty, y) = 0$. Let $\tilde{\chi}$ be an increasing cut-off function, $\tilde{\chi}' \ge 0$. Then for any $\sigma > 0$,
\begin{align}
\Big\| \frac{1}{\langle x \rangle^{\frac 1 2+\sigma}} f \tilde{\chi}(z) \Big\| \lesssim_\sigma \| f_x \langle x \rangle^{\frac 1 2 -\sigma} \tilde{\chi}(z) \|.
\end{align}
\end{lemma}
\begin{proof} We square the left-hand side via
\begin{align} \ensuremath{\nonumber}
\Big\| \frac{1}{\langle x \rangle^{\frac 1 2+\sigma}} f \tilde{\chi}(z) \Big\|^2 = & \int \langle x \rangle^{- 1 - 2\sigma} f^2 \tilde{\chi}(z) = - \int \frac{\ensuremath{\partial}_x }{ 2 \sigma} \langle x \rangle^{-2\sigma} f^2 \tilde{\chi}(z) \\ \label{local:hardy:1}
= & \frac{1}{\sigma} \int \langle x \rangle^{-2\sigma} f f_x \tilde{\chi}(z) + \frac{1}{2\sigma} \int \langle x \rangle^{-2\sigma} f^2 \ensuremath{\partial}_x \tilde{\chi}(z).
\end{align}
Next, we observe that the right-most term in \eqref{local:hardy:1} is signed negative and can thus be moved to the left-side, as
\begin{align}
\ensuremath{\partial}_x \tilde{\chi}(z) = \ensuremath{\partial}_x \tilde{\chi}(\frac{y}{\sqrt{x}}) = - \frac{z}{2x} \tilde{\chi}'(z) \le 0.
\end{align}
We thus may conclude by estimating the first term from \eqref{local:hardy:1} by Cauchy-Schwartz.
\end{proof}
This will often be used in conjunction with the Hardy-type inequality \eqref{Hardy:1}, via:
\begin{corollary} For any $k \ge 0$, and for any $0 < \sigma << 1$, any $0 < \gamma << 1$,
\begin{align} \label{Hardy:three:a}
\| \frac{1}{\langle x \rangle^{\frac 1 2+\sigma}} U \| & \le \gamma \| \sqrt{\bar{u}} U_y \| + C_{\gamma, \sigma} \| \bar{u} U_x \langle x \rangle^{\frac 1 2} \|, \\ \label{Hardy:four:a}
\| \frac{1}{\langle x \rangle^{\frac 1 2+\sigma}} \sqrt{\ensuremath{\varepsilon}}V \| &\le \gamma \| \sqrt{\bar{u}} \sqrt{\ensuremath{\varepsilon}} V_y \| + C_{\gamma, \sigma} \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x \langle x \rangle^{\frac 1 2} \|, \\ \label{Hardy:three}
\| U^{(k)}_x \langle x \rangle^{k + \frac 1 2} \| &\le \gamma \| \sqrt{\bar{u}} U^{(k+1)}_y \langle x \rangle^{k+1} \| + C_{\gamma, \sigma} \| \bar{u} U^{(k)}_x \langle x \rangle^{k +\frac 1 2} \|, \\ \label{Hardy:four}
\| \sqrt{\ensuremath{\varepsilon}}V^{(k)}_x \langle x \rangle^{k + \frac 1 2} \| & \le \gamma \| \sqrt{\bar{u}} \sqrt{\ensuremath{\varepsilon}} V^{(k+1)}_y \langle x \rangle^{k+1} \| + C_{\gamma, \sigma} \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V^{(k)}_x \langle x \rangle^{k+\frac 1 2} \|
\end{align}
\end{corollary}
We now need to record a Hardy-type inequality in which the precise constant is important. More precisely, the fact that the first coefficient on the right-hand side below is very close to $1$ will be important.
\begin{lemma} For any function $f(x): \mathbb{R}_+ \rightarrow \mathbb{R}$ satisfying $f(0) = 0$ and $f \rightarrow 0$ as $x \rightarrow \infty$, there exists a $C > 0$ such that
\begin{align} \label{precise:1}
\int \langle x \rangle^{-3.01} \bar{u}^2 f^2 \,\mathrm{d} x \le \frac{1}{1.01} \int \langle x \rangle^{-1.01} \bar{u}^2 f_x^2 \,\mathrm{d} x + \frac{2}{1.01} \int \langle x \rangle^{-2.01} \bar{u} \bar{u}_x f^2 \,\mathrm{d} x.
\end{align}
\end{lemma}
\begin{proof} We compute the quantity on the left-hand side of above via
\begin{align} \ensuremath{\nonumber}
\int \langle x \rangle^{-3.01} \bar{u}^2 f^2 \,\mathrm{d} x = &- \int \frac{\ensuremath{\partial}_x}{2.01} \langle x \rangle^{-2.01} \bar{u}^2 f^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
=& \frac{2}{2.01} \int \langle x \rangle^{-2.01} \bar{u}^2 f f_x \,\mathrm{d} x + \frac{2}{2.01} \int \langle x \rangle^{-2.01} \bar{u} \bar{u}_x f^2 \,\mathrm{d} x \\
\le & \frac{1}{2.01} \int \langle x \rangle^{-3.01} \bar{u}^2 f^2 \,\mathrm{d} x + \frac{1}{2.01} \int \langle x \rangle^{-1.01} \bar{u}^2 f_x^2 \,\mathrm{d} x + \frac{2}{2.01} \int \langle x \rangle^{-2.01} \bar{u} \bar{u}_x f^2 \,\mathrm{d} x,
\end{align}
which, upon bringing the first term on the right-hand side to the left, gives the inequality \eqref{precise:1} with the precise constants.
\end{proof}
\subsection{$L^p_x L^q_y$ Embeddings} \label{Lpq:embed:section}
We will now state some $L^p_x L^q_y$ type embedding theorems on $(U, V)$ using the specification of $\| U, V \|_{\mathcal{X}}$.
\begin{lemma} For $1 \le j \le 10$,
\begin{align} \label{Lpq:emb:V:1}
\ensuremath{\varepsilon}^{\frac 1 4}\| V^{(j)} \langle x \rangle^j \ensuremath{\partial}hi_{j+1} \|_{L^2_x L^\infty_y} \lesssim & \| U, V \|_{\mathcal{X}_{\le j+1}}, \\ \label{Lpq:emb:V:2}
\ensuremath{\varepsilon}^{\frac 1 4}\| \bar{u} V^{(j)} \langle x \rangle^j \ensuremath{\partial}hi_{j+1} \|_{L^2_x L^\infty_y} \lesssim & \| U, V \|_{\mathcal{X}_{\le j + \frac 1 2}}
\end{align}
\end{lemma}
\begin{proof} We begin with \eqref{Lpq:emb:V:1}. For this, we first freeze $x$ and integrate from $y = \infty$ to obtain
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac 1 2}|V^{(j)}|^2 \langle x \rangle^{2j} \ensuremath{\partial}hi_{j+1}^2 = & 2 \ensuremath{\varepsilon}^{\frac 1 2} |\int_y^\infty V^{(j)} V^{(j)}_y\langle x \rangle^{2j} \ensuremath{\partial}hi_{j+1}^2 \,\mathrm{d} y' | \lesssim \| \ensuremath{\varepsilon}^{\frac 1 2} V^{(j)} \langle x \rangle^{j - \frac 1 2} \ensuremath{\partial}hi_{j+1} \|_{L^2_y} \| U^{(j)}_x \langle x \rangle^{j+ \frac 1 2} \ensuremath{\partial}hi_{j+1} \|_{L^2_y} \\
= & \| \ensuremath{\varepsilon}^{\frac 1 2} V^{(j-1)}_x \langle x \rangle^{(j-1)+ \frac 1 2} \ensuremath{\partial}hi_{j+1} \|_{L^2_y} \| U^{(j)}_x \langle x \rangle^{j + \frac 1 2} \ensuremath{\partial}hi_{j+1} \|_{L^2_y}.
\end{align}
We now take $L^2_x$ and appeal to \eqref{Hardy:three} - \eqref{Hardy:four}.
Similarly, we compute
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac 1 2}\bar{u}_\ast^2 |V^{(j)}|^2 \langle x \rangle^{2j} \ensuremath{\partial}hi_{j+1}^2 \le & 2 \ensuremath{\varepsilon}^{\frac 1 2} \bar{u}_\ast^2 |\int_y^\infty V^{(j)} V^{(j)}_y\langle x \rangle^{2j} \ensuremath{\partial}hi_{j+1}^2 \,\mathrm{d} y' | \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \int_y^\infty \bar{u}_\ast^2| V^{(j-1)}_x ||U^{(j)}_x|\langle x \rangle^{2j} \ensuremath{\partial}hi_{j+1}^2 \,\mathrm{d} y' \\
\lesssim & \ensuremath{\varepsilon}^{\frac 1 2} \| \bar{u}_\ast V^{(j-1)}_x \langle x \rangle^{j - \frac 1 2} \ensuremath{\partial}hi_{j+1}\| \| \bar{u}_\ast U^{(j)}_x \langle x \rangle^{j + \frac 1 2} \ensuremath{\partial}hi_{j+1} \|
\end{align}
where above we have used that $\bar{u}_\ast(y') \ge \bar{u}_\ast(y) \ge 0$ when $y' \ge y$ to bring the $\bar{u}_\ast^2$ factor inside the integral. We now use \eqref{samezies:1} to conclude.
\end{proof}
We will now need to translate the information on $(U, V)$ from the norms stated above to information regarding $(u, v)$.
\begin{lemma} For $2 \le j \le 10$, $0 \le k \le 10$, $1 \le m \le 11$, and for any $0 < \delta << 1$,
\begin{align} \label{L2:uv:eq}
\| u^{(k)}_x x^{k+ \frac 1 2} \ensuremath{\partial}hi_{k+1} \| + \| \sqrt{\ensuremath{\varepsilon}} v^{(k)}_x x^{k + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| \le & C_\delta \| U, V \|_{\mathcal{X}_{\le k + \frac 1 2}} + \delta \| U, V\|_{\mathcal{X}_{\le k+1}} \\ \label{L2:uv:eq:2}
\| u^{(m)}_y x^{m} \ensuremath{\partial}hi_m \| +\sqrt{\ensuremath{\varepsilon}} \| u^{(m)}_x x^m \ensuremath{\partial}hi_{m} \| + \ensuremath{\varepsilon} \| v^{(m)}_x x^{m} \ensuremath{\partial}hi_{m} \| \lesssim & \| U, V \|_{\mathcal{X}_{\le m}} \\ \label{mixed:L2:orig:1}
\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \langle x\rangle^{j } \ensuremath{\partial}hi_{j+1} \|_{L^2_x L^\infty_y} \lesssim & \| U, V \|_{\mathcal{X}_{\le j + 1}}, \\ \label{mL2again}
\| \ensuremath{\partial}_x^j v \langle x\rangle^{j } \ensuremath{\partial}hi_{j+1} \|_{L^2_x L^\infty_y} \lesssim & \| U, V \|_{\mathcal{X}_{\le j + \frac 1 2 }}.
\end{align}
\end{lemma}
\begin{proof} To prove these estimates, we simply use \eqref{formula:1} to express $(u, v)$ in terms of $(U, V)$. We do this now, starting with \eqref{L2:uv:eq}. Differentiating \eqref{formula:1} $k+1$ times in $x$, we obtain
\begin{align} \ensuremath{\nonumber}
\| u^{(k)}_x x^{k+ \frac 1 2} \ensuremath{\partial}hi_{k+1} \| \le &\sum_{l = 0}^{k+1} \binom{k+1}{l} ( \| \ensuremath{\partial}_x^l \bar{u} \ensuremath{\partial}_x^{k - l} U_x x^{k+\frac 1 2} \ensuremath{\partial}hi_{k+1} \| + \| \ensuremath{\partial}_x^{l} \bar{u}_y \ensuremath{\partial}_x^{k-l}V x^{k + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| ) \\ \ensuremath{\nonumber}
\lesssim & \sum_{l = 0}^{k+1} \| \frac{\ensuremath{\partial}_x^l \bar{u}}{ \bar{u}} x^l \|_\infty \| \bar{u} \ensuremath{\partial}_x^{k-l} U_x \langle x \rangle^{k - l + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| + \| \ensuremath{\partial}_x^l \bar{u}_y y x^l \|_\infty \| \ensuremath{\partial}_x^{k-l} V_y x^{k-l + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| \\ \ensuremath{\nonumber}
\lesssim & \sum_{l = 0}^{k+1} \| \bar{u} \ensuremath{\partial}_x^{k-l} U_x \langle x \rangle^{k - l + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| + \| \bar{u} \ensuremath{\partial}_x^{k-l} V_y x^{k-l + \frac 1 2} \ensuremath{\partial}hi_{k+1} \| +\| \sqrt{\bar{u}} \ensuremath{\partial}_x^{k+1-l} U_y x^{k+1 -l} \ensuremath{\partial}hi_{k+1} \| \\
\lesssim & \sum_{l = 0}^{k+1} (\| U, V \|_{\mathcal{X}_{\le k-l + \frac 1 2}} + \| U, V \|_{\mathcal{X}_{\le k+1-l}} ),
\end{align}
which establishes \eqref{L2:uv:eq}. The analogous proof works for the second quantity in \eqref{L2:uv:eq}.
For the first quantity in \eqref{L2:uv:eq:2}, we obtain the identity
\begin{align}
u_y^{(m)} = \sum_{l =0}^m \binom{m}{l} (\ensuremath{\partial}_x^l \bar{u} \ensuremath{\partial}_x^{m-l} U_y + 2 \ensuremath{\partial}_x^l \bar{u}_y \ensuremath{\partial}_x^{m-l} U + \ensuremath{\partial}_x^l \bar{u}_{yy} \ensuremath{\partial}_x^{m-l} q).
\end{align}
We now estimate
\begin{align} \ensuremath{\nonumber}
\| u_y^{(m)} x^m \ensuremath{\partial}hi_m \| \lesssim &\sum_{l = 0}^m \Big\| \frac{\ensuremath{\partial}_x^l \bar{u}}{\bar{u}} \langle x \rangle^l \Big\|_\infty \| \sqrt{\bar{u}} \ensuremath{\partial}_x^{m-l} U_y \langle x \rangle^{m-l} \ensuremath{\partial}hi_{m-l} \| \\ \ensuremath{\nonumber}
&+ \sum_{l = 0}^{m-1} \| \ensuremath{\partial}_x^l \bar{u}_y \langle x \rangle^{l + \frac 1 2}\|_\infty \| \ensuremath{\partial}_x^{m-1-l} U_x \langle x \rangle^{m-1-l +\frac 1 2} \ensuremath{\partial}hi_{m-1-l} \| \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^m \bar{u}_y y x^m \|_\infty \Big\| \frac{U - U(x, 0)}{y} \Big\| + \| \ensuremath{\partial}_x^m \bar{u}_y \langle x \rangle^{m+\frac 1 4} \|_{L^\infty_x L^2_y} \| U(x, 0) \langle x \rangle^{-\frac 1 4} \|_{L^2_x} \\ \ensuremath{\nonumber}
&+ \sum_{l = 0}^{m-1} \| \ensuremath{\partial}_x^l \bar{u}_{yy} \langle x \rangle^{l + \frac 1 2} y \|_\infty \| \ensuremath{\partial}_x^{m-1-l} \frac{q_x}{y} \langle x \rangle^{m-1-l +\frac 1 2} \ensuremath{\partial}hi_{m-1-l} \| \\
& + \| \ensuremath{\partial}_x^m \bar{u}_{yy} y^2 x^m \|_\infty \| \frac{q - y U(x, 0)}{\langle y \rangle^2} \|,
\end{align}
where we use above that $m \ge 1$ in \eqref{L2:uv:eq:2}.
We now arrive at the mixed norm estimates in \eqref{mixed:L2:orig:1}. For this, we first record the identity
\begin{align}
\ensuremath{\partial}_x^j v = \sum_{l = 0}^{j} \binom{j}{l} ( \ensuremath{\partial}_x^l \bar{u} \ensuremath{\partial}_x^{j-l} V - \ensuremath{\partial}_x^l \bar{u}_x \ensuremath{\partial}_x^{j-l} q ).
\end{align}
From here, we compute
\begin{align} \ensuremath{\nonumber}
\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \langle x \rangle^j \ensuremath{\partial}hi_j \|_{L^2_x L^\infty_y} \lesssim &\sum_{l = 0}^{j-1} \Big\| \frac{\ensuremath{\partial}_x^l \bar{u}}{ \bar{u}} \langle x \rangle^l \Big\|_\infty \| \ensuremath{\partial}_x^{j-l} V \langle x \rangle^{j-l} \ensuremath{\partial}hi_{j} \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
&+ \sum_{l = 0}^{j-2} \Big\| \frac{\ensuremath{\partial}_x^{l+1} \bar{u}}{ \bar{u}} \langle x \rangle^{l+1} \Big\|_\infty \| \ensuremath{\partial}_x^{j-l} q \langle x \rangle^{j-l-1} \ensuremath{\partial}hi_{j} \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^j \bar{u} \langle x \rangle^{j - \frac 1 2} y \|_\infty \Big\| \frac{V}{y} \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_j\Big\|_{L^2_x L^\infty_y} + \| \ensuremath{\partial}_x^{j+1} \bar{u} y \langle x \rangle^{j + \frac 1 2} \|_\infty \Big\| \frac{q}{y} \langle x \rangle^{- \frac 1 2} \ensuremath{\partial}hi_j \Big\|_{L^2_x L^\infty_y} \\ \label{RAC:1}
\lesssim & \sum_{l = 0}^{j-1} \| U, V \|_{\mathcal{X}_{\le l+1}} + \Big\| \frac{V}{y} \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_j \Big\|_{L^2_x L^\infty_y} +\Big\| \frac{q}{y} \langle x \rangle^{- \frac 1 2} \ensuremath{\partial}hi_j \Big\|_{L^2_x L^\infty_y},
\end{align}
where we have invoked \eqref{Lpq:emb:V:1}. To conclude, we need to estimate the final two terms appearing above. First, we have by using $V|_{y = 0} = 0$,
\begin{align}
\Big\| \frac{V}{y} \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_j \Big\|_{L^2_x L^\infty_y} \lesssim \| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_j \|_{L^2_x L^\infty_y} \lesssim \| U, V \|_{\mathcal{X}_{\le 1.5}},
\end{align}
where above we have used the estimate
\begin{align} \ensuremath{\nonumber}
\langle x \rangle U_x^2 = & |\int_y^\infty \langle x \rangle U^{(1)} U^{(1)}_{y} \,\mathrm{d} y'| \lesssim \| U_x \langle x \rangle^{\frac 1 2} \|_{L^2_y} \| U_{xy} \langle x \rangle \|_{L^2_y} \\ \ensuremath{\nonumber}
\lesssim & (\| \bar{u} U_x \langle x \rangle^{\frac 1 2} \|_{L^2_y} + \| \sqrt{\bar{u}} U_{xy} \langle x \rangle \|_{L^2_y} )(\| \sqrt{\bar{u}} U^{(1)}_y \langle x \rangle \|_{L^2_y} + \| \sqrt{\bar{u}} U^{(1)}_{yy} \langle x \rangle^{\frac 3 2} \|_{L^2_y} ),
\end{align}
which, upon taking supremum in $y$ and subsequently integrating in $x$, yields
\begin{align}
\| U_x \langle x \rangle^{\frac 1 2}\ensuremath{\partial}hi_j \|_{L^2_x L^\infty_y}^2 \lesssim \| U, V \|_{\mathcal{X}_{\le 1.5}}^2.
\end{align}
An analogous estimate applies to the third term from \eqref{RAC:1}. The estimate \eqref{mL2again} works in a nearly identical manner, invoking \eqref{Lpq:emb:V:2} instead of \eqref{Lpq:emb:V:1}.
\end{proof}
\subsection{Pointwise Decay Estimates} \label{pw:section:1}
A crucial feature of space $\mathcal{X}$ is that it is strong enough to control sharp pointwise decay rates of various quantities, which are in turn used to control the nonlinearity. To be precise, we need to treat large values of $x$ (the more difficult case) in a different manner than small values of $x$. Large values of $x$ will be treated through the weighted norms in \eqref{X:norm}, whereas small values of $x$ will be treated with the $\dot{H}^k$ component of \eqref{X:norm}, at an expense of $\ensuremath{\varepsilon}^{-M_1}$. We recall from \eqref{def:phi:j} that $\ensuremath{\partial}hi_{12}(x)$ is only non-zero when $\ensuremath{\partial}hi_{j} = 1$ for $1 \le j \le 11$.
\begin{lemma} For $0 \le k \le 8$, and for $j = 0, 1$,
\begin{align} \label{UV:decay}
&\| \ensuremath{\partial}_y^j U^{(k)} \langle x \rangle^{k + \frac 1 4 + \frac j 2} \ensuremath{\partial}hi_{12}\|_{L^\infty_y} \lesssim \| U, V \|_{\mathcal{X}}, &&\| \sqrt{\ensuremath{\varepsilon}} V^{(k)} \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \|_{L^\infty_y} \lesssim \| U, V \|_{\mathcal{X}}.
\end{align}
\end{lemma}
\begin{proof} We first establish the $U$ decay via
\begin{align} \ensuremath{\nonumber}
U_x^2 \langle x \rangle^{\frac 5 2} \ensuremath{\partial}hi_{12}^2 = &\Big| - \int_y^\infty 2 U_x U_{xy} \langle x \rangle^{\frac 5 2} \ensuremath{\partial}hi_{12}^2 \Big|\\ \ensuremath{\nonumber}
\le & \Big| \int_y^\infty \int_x^\infty 2 U_{xx} U_{xy} \langle x \rangle^{\frac 5 2} \ensuremath{\partial}hi_{12}^2 \Big| + \Big| 2 \int_y^\infty \int_x^\infty U_x U_{xxy} \langle x \rangle^{\frac 5 2} \ensuremath{\partial}hi_{12}^2\Big| \\ \ensuremath{\nonumber}
&+ 5 \Big| \int_y^\infty \int_x^\infty U_x U_{xy} \langle x \rangle^{\frac 3 2}\ensuremath{\partial}hi_{12}^2 \Big| + \Big| \int_y^\infty \int_x^\infty 4 U_x U_{xy} \ensuremath{\partial}hi_{12} \ensuremath{\partial}hi_{12}' \Big| \\ \ensuremath{\nonumber}
\lesssim & \| U_{xx} \langle x \rangle^{\frac 3 2} \ensuremath{\partial}hi_{12} \| \| U_{xy} \langle x \rangle \ensuremath{\partial}hi_{12} \| + \| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12} \| \| U_{yxx} \langle x \rangle^2 \ensuremath{\partial}hi_{12} \| \\
&+ \| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12}\| \| U_{xy} \langle x \rangle \ensuremath{\partial}hi_{12} \| + \| U_x \ensuremath{\partial}hi_{11} \| \| U_{xy} \ensuremath{\partial}hi_{11} \| .
\end{align}
We now perform the same calculation for the $V$ decay in \eqref{UV:decay}. We begin with $V_x$, via
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon} V_x^2 \langle x \rangle^{3} \ensuremath{\partial}hi_{12}^2 = &\Big| \int_y^\infty 2 \ensuremath{\varepsilon} V_x V_{xy} \langle x \rangle^3 \ensuremath{\partial}hi_{12}^2 \,\mathrm{d} y' \Big| \\ \ensuremath{\nonumber}
\le & \int_y^\infty \int_x^\infty 2 \ensuremath{\varepsilon} | V_{xx} | | V_{xy} | \langle x \rangle^{3} \ensuremath{\partial}hi_{12}^2 \,\mathrm{d} y' \,\mathrm{d} x' + \int_y^\infty \int_x^\infty 2 \ensuremath{\varepsilon} | V_{x} | | V_{xxy} | \langle x \rangle^{3} \ensuremath{\partial}hi_{12}^2 \,\mathrm{d} y' \,\mathrm{d} x' \\ \ensuremath{\nonumber}
& + \int_y^\infty \int_x^\infty 6 \ensuremath{\varepsilon} | V_{x} | | V_{xy} | \langle x \rangle^{2} \ensuremath{\partial}hi_{12}^2 \,\mathrm{d} y' \,\mathrm{d} x' + |\int_y^\infty \int_x^\infty 4\ensuremath{\varepsilon} |V_x| |V_{xy}| \langle x \rangle^{3} \ensuremath{\partial}hi_{12} \ensuremath{\partial}hi_{12}' \,\mathrm{d} y' \,\mathrm{d} x' \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} V^{(1)}_x \langle x \rangle^{\frac 3 2} \ensuremath{\partial}hi_{12}\| \| U^{(1)}_x \langle x \rangle^{\frac 3 2} \ensuremath{\partial}hi_{12} \| + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} V_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12} \| \| U^{(2)}_x \langle x \rangle^{2.5} \ensuremath{\partial}hi_{12} \| \\
& + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} V_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12} \| \| U^{(1)}_x \langle x \rangle^{\frac 3 2} \ensuremath{\partial}hi_{12} \| + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} V_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{11} \| \| U^{(1)}_x \langle x \rangle^{\frac 3 2} \ensuremath{\partial}hi_{11} \|.
\end{align}
From here the result follows upon invoking \eqref{Hardy:three} - \eqref{Hardy:four}. The same computation can be done for higher derivatives, and for $U, V$ themselves we use the estimate
\begin{align}
U \ensuremath{\partial}hi_{12} = \ensuremath{\partial}hi_{12} \int_x^\infty U_x \lesssim \|U, V \|_{\mathcal{X}} \int_x^\infty \langle x' \rangle^{- \frac 5 4} \,\mathrm{d} x' \lesssim \langle x \rangle^{- \frac 1 4} \| U, V \|_{\mathcal{X}},
\end{align}
and similarly for $V$, we integrate
\begin{align}
V \ensuremath{\partial}hi_{12} = \ensuremath{\partial}hi_{12} \int_x^\infty V_x \lesssim \ensuremath{\varepsilon}^{- \frac 1 2} \|U, V \|_{\mathcal{X}} \int_x^\infty \langle x' \rangle^{- \frac 3 2} \,\mathrm{d} x' \lesssim \langle x \rangle^{- \frac 1 2} \ensuremath{\varepsilon}^{- \frac 1 2} \| U, V \|_{\mathcal{X}}.
\end{align}
This concludes the proof.
\end{proof}
It is also necessary that we establish decay estimates on the original unknowns $(u, v)$. For this purpose, we define another auxiliary cut-off function in the following manner
\begin{align} \label{psi:twelve:def}
\ensuremath{\partial}si_{12}(x) := \begin{cases} 0 \text{ for } 0 \le x \le 60 \\ 1 \text{ for } x \ge 61 \end{cases}
\end{align}
The main point in specifying $\ensuremath{\partial}si_{12}$ in this manner is so that its support is contained where $\ensuremath{\partial}si_j = 1$ for $j = 2,...,11$ and simultaneously $\ensuremath{\partial}si_{12} = 1$ on the set where $\ensuremath{\partial}hi_1$ is supported. Then, we have the following lemma.
\begin{lemma} For $1 \le k \le 8$, and $j = 1, 2$,
\begin{align} \label{pw:dec:u}
\| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}si_{12} \|_\infty + \| u^{(k)}_y x^{\frac 3 4} \ensuremath{\partial}si_{12} \|_\infty + \| \frac{1}{\bar{u}} u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}si_{12}\|_\infty \lesssim & \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}} \\ \label{pw:v:1}
\| v^{(k)} x^{k + \frac 1 2} \ensuremath{\partial}si_{12} \|_\infty + \| \frac{v^{(k)}}{\bar{u}} x^{k + \frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \lesssim & \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}} \\ \label{mixed:emb}
\| \ensuremath{\partial}_y^j u^{(k)} x^{k + \frac j 2} \ensuremath{\partial}si_{12}\|_{L^\infty_x L^2_y} \lesssim & \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}}.
\end{align}
In addition,
\begin{align} \label{Linfty:wo}
\| u \langle x \rangle^{\frac 1 4} \|_\infty + \| \sqrt{\ensuremath{\varepsilon}} v \langle x \rangle^{\frac 1 2} \|_\infty \lesssim \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}}
\end{align}
\end{lemma}
\begin{proof} We note that standard Sobolev embeddings gives $\| u^{(k)} \ensuremath{\partial}si_{12} \|_\infty \lesssim \| u^{(k)} \ensuremath{\partial}si_{12} \|_{H^2} \lesssim \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}}$, and similarly for the remaining quantities in \eqref{pw:dec:u} - \eqref{mixed:emb}. Next, we appeal to \eqref{formula:1} to obtain
\begin{align} \ensuremath{\nonumber}
\| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}hi_{12} \|_{\infty} \le & \sum_{l = 0}^k \binom{k}{l} (\| \ensuremath{\partial}_x^l \bar{u} \ensuremath{\partial}_x^{k-l} U x^{k + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty + \| \ensuremath{\partial}_x^l \bar{u}_y \ensuremath{\partial}_x^{k-l} q x^{k + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty) \\ \ensuremath{\nonumber}
\lesssim & \sum_{l = 0}^k \| \ensuremath{\partial}_x^l \bar{u} x^l \|_\infty \| \ensuremath{\partial}_x^{k-l} U x^{k-l + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty + \| \ensuremath{\partial}_x^l \bar{u}_y y x^{l} \|_\infty \| \ensuremath{\partial}_x^{k-l} \frac{q}{y} x^{k-l + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty \\ \ensuremath{\nonumber}
\lesssim &\sum_{l = 0}^k \| \ensuremath{\partial}_x^{k-l} U x^{k-l + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty \lesssim \| U, V \|_{\mathcal{X}},
\end{align}
where we have invoked the Hardy inequality (in $L^\infty_y$), admissible as $q|_{y = 0} = 0$, as well as estimate \eqref{UV:decay}. To conclude, by using that $x < 400$ is bounded on the set $\{\ensuremath{\partial}si_{12} = 1 \} \cap \{ \ensuremath{\partial}hi_{12} < 1 \}$, we have
\begin{align} \ensuremath{\nonumber}
\| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}si_{12} \|_\infty \le & \| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}si_{12} (1 - \ensuremath{\partial}hi_{12}) \|_\infty + \| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}si_{12} \ensuremath{\partial}hi_{12} \|_\infty \\
\lesssim & \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}} + \| u^{(k)} x^{k + \frac 1 4} \ensuremath{\partial}hi_{12} \|_\infty \lesssim \ensuremath{\varepsilon}^{-M_1} \| U, V \|_{\mathcal{X}}.
\end{align}
The remaining estimates in \eqref{pw:dec:u} - \eqref{Linfty:wo} work in largely the same manner.
\end{proof}
\section{Global \textit{a-priori} Bounds} \label{remainder:section:3}
In this section, we perform our main energy estimates, which control the $\|U, V \|_{X_0}, \|U, V \|_{X_{\frac 1 2}}, \|U, V \|_{Y_{\frac 1 2}}$, and their higher order counterparts, up to $\|U, V \|_{X_{10}}, \|U, V \|_{X_{10.5}}, \|U, V \|_{Y_{10.5}}$. When we perform these estimates, we recall the notational convention for this section, which is that, unless otherwise specified $\int g := \int_0^\infty \int_0^\infty g(x, y) \,\mathrm{d} y \,\mathrm{d} x$. For this section, we define the operator
\begin{align}
\text{div}_\ensuremath{\varepsilon}(\bold{M}) := \ensuremath{\partial}_x M_1 + \frac{\ensuremath{\partial}_y}{\ensuremath{\varepsilon}} M_2, \text{ where } \bold{M} = (M_1, M_2).
\end{align}
\subsection{$X_0$ Estimates}
\iffalse
\begin{lemma} \label{Lem:1}
\begin{align} \ensuremath{\nonumber}
\| U, V \|_{X_{0, -\delta}}^2 \lesssim |\int f U \langle x \rangle^{-2\delta}| + \ensuremath{\varepsilon} | \int g V \langle x \rangle^{-2\delta} |.
\end{align}
\end{lemma}
\begin{proof} We apply the multiplier $[U \langle x \rangle^{-2\delta}, \ensuremath{\varepsilon} V \langle x \rangle^{-2\delta}]$ to the system \eqref{sys:sim:1} - \eqref{sys:sim:3}. \sameer{Need to modify to make it div. free...}
\ensuremath{\nonumber}oindent \textit{Step 1: $\mathcal{T}_1[U]$ Terms} We now arrive at the transport terms from $\mathcal{T}_1$, which produce
\begin{align} \label{T1:cont}
\int \mathcal{T}_1[U] U \langle x \rangle^{-2\delta} = \frac 3 2 \int \bar{u}_{yy} U^2 \langle x \rangle^{-2\delta} + \delta \int \bar{u}^2 U^2 \langle x \rangle^{-1-2\delta}.
\end{align}
\ensuremath{\nonumber}oindent \textit{Step 2: Diffusive Terms} Recalling the diffusive terms, we have
\begin{align} \ensuremath{\nonumber}
&\int \Big( - \ensuremath{\partial}_y^2 u + \bar{u}_{yyy} q \Big)U \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int u_y U_y \langle x \rangle^{-2\delta} + \int_{y = 0} u_y U \langle x \rangle^{-2\delta} \,\mathrm{d} x - \frac 1 2 \int \bar{u}_{yyyy} q^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\partial}_y (\bar{u} U + \bar{u}_y q) U_y \langle x \rangle^{-2\delta} + \int_{y = 0} u_y U \langle x \rangle^{-2\delta} \,\mathrm{d} x - \frac 1 2 \int \bar{u}_{yyyy} q^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \bar{u} U_y^2 \langle x \rangle^{-2\delta} + 2 \bar{u}_y U U_y \langle x \rangle^{-2\delta}+ \bar{u}_{yy} q U_y\langle x \rangle^{-2\delta} + \int_{y = 0} u_y U \langle x \rangle^{-2\delta} \,\mathrm{d} x \\ \ensuremath{\nonumber}
& - \frac 1 2 \int \bar{u}_{yyyy} q^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \bar{u} U_y^2 \langle x \rangle^{-2\delta}- 2\bar{u}_{yy} U^2 \langle x \rangle^{-2\delta}+ \frac 1 2 \bar{u}_{yyyy} q^2\langle x \rangle^{-2\delta} - \frac 1 2 \bar{u}_{yyyy}q^2\langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
&- \int_{y = 0} \bar{u}_y U^2 \langle x \rangle^{-2\delta}+ \int_{y = 0} u_y U \langle x \rangle^{-2\delta}\,\mathrm{d} x \\ \label{earl:2}
= & \int \bar{u} U_y^2 \langle x \rangle^{-2\delta} - 2\bar{u}_{yy} U^2 \langle x \rangle^{-2\delta} + \int_{y = 0} \bar{u}_y U^2\langle x \rangle^{-2\delta} \,\mathrm{d} x.
\end{align}
We now note that the second term above cancels the contribution from \eqref{T1:cont} and generates a positive damping term. Hence,
\begin{align} \ensuremath{\nonumber}
\eqref{T1:cont} + \eqref{earl:2} = & \int \bar{u} U_y^2 \langle x \rangle^{-2\delta}- \frac 1 2 \int \bar{u}_{yy} U^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
&+ \int_{y = 0} \bar{u}_y U^2\langle x \rangle^{-2\delta} \,\mathrm{d} x + \delta \int \bar{u}^2 U^2 \langle x \rangle^{-1-2\delta}.
\end{align}
The next diffusive term is
\begin{align} \ensuremath{\nonumber}
- \int \ensuremath{\varepsilon} u_{xx} U \langle x \rangle^{-2\delta} =& \int \ensuremath{\varepsilon} u_x U_x \langle x \rangle^{-2\delta} - 2 \delta \int \ensuremath{\varepsilon} u_x U \langle x \rangle^{-2\delta - 1} \\ \label{rhn:1}
= & \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_x \langle x \rangle^{-2\delta} - 2 \delta \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U \langle x \rangle^{-2\delta - 1}.
\end{align}
We first expand
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_x \langle x \rangle^{-2\delta} = &\int \ensuremath{\varepsilon} \bar{u} U_x^2 \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \bar{u}_x U U_x \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_{xy} q U_x \langle x \rangle^{-2\delta}\\ \ensuremath{\nonumber}
&- \int \ensuremath{\varepsilon} \bar{u}_y U_xV \langle x \rangle^{-2\delta}\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2 \langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 \langle x \rangle^{-2\delta} + \delta \int \ensuremath{\varepsilon} \bar{u}_x U^2 \langle x \rangle^{-1-2\delta}\\ \ensuremath{\nonumber}
& - \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_y VV_y \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2 \langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 \langle x \rangle^{-2\delta} + \delta \int \ensuremath{\varepsilon} \bar{u}_x U^2 \langle x \rangle^{-1-2\delta} \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV \langle x \rangle^{-2\delta} - \frac 1 2\int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2 \langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 \langle x \rangle^{-2\delta} + \delta \int \ensuremath{\varepsilon} \bar{u}_x U^2 \langle x \rangle^{-1-2\delta} \\ \ensuremath{\nonumber}
&+ \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xxyy} q^2 \langle x \rangle^{-2\delta} - \delta \int \ensuremath{\varepsilon} \bar{u}_{xyy} q^2 \langle x \rangle^{-1 - 2\delta} + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV \langle x \rangle^{-2\delta} \\ \label{juice:0}
& - \frac 1 2\int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta}.
\end{align}
We next expand the second term from \eqref{rhn:1}, which gives
\begin{align} \ensuremath{\nonumber}
- 2 \delta \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U \langle x \rangle^{-2\delta - 1} = & 2\delta \int \ensuremath{\varepsilon} \bar{u}_y UV \langle x \rangle^{-2\delta - 1} + \delta \int \ensuremath{\varepsilon} \bar{u}_{xyy} q^2 \langle x \rangle^{-2\delta -1} \\ \label{juice:0:1}
& - \delta (2 \delta + 1) \ensuremath{\varepsilon} \int \bar{u} U^2 \langle x \rangle^{-2\delta - 2} - \delta \ensuremath{\varepsilon} \int \bar{u}_x U^2 \langle x \rangle^{-2\delta - 1}.
\end{align}
We now arrive at the third diffusive term,
\begin{align} \ensuremath{\nonumber}
- \int \ensuremath{\varepsilon} v_{yy} V \langle x \rangle^{-2\delta} = &\int \ensuremath{\varepsilon} v_y V_y \langle x \rangle^{-2\delta} = \int \ensuremath{\varepsilon} \ensuremath{\partial}_y (\bar{u} V - \bar{u}_x q) V_y \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} V_y^2 \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_y V V_y \langle x \rangle^{-2\delta} - \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y \langle x \rangle^{-2\delta} - \int \ensuremath{\varepsilon} \bar{u}_x U V_y \langle x \rangle^{-2\delta}\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} V_y^2 \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_y V V_y \langle x \rangle^{-2\delta} - \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u}_x U U_x \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} V_y^2 \langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta} + \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xxyy} q^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
& - \delta \int \ensuremath{\varepsilon} \bar{u}_{xyy} q^2 \langle x \rangle^{-2\delta - 1} + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV \langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 \langle x \rangle^{-2\delta} \\
& + \delta \int \ensuremath{\varepsilon} \bar{u}_x U^2 \langle x \rangle^{-2\delta - 1}.
\end{align}
We now arrive at the final diffusive term, which is
\begin{align} \ensuremath{\nonumber}
-\ensuremath{\varepsilon}^2 \int v_{xx} V \langle x \rangle^{-2\delta} = & \int \ensuremath{\varepsilon}^2 v_x V_x \langle x \rangle^{-2\delta} - 2 \delta \int \ensuremath{\varepsilon}^2 v_x V \langle x \rangle^{-2\delta - 1} \\ \label{two:money}
= & \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_x \langle x \rangle^{-2\delta} - 2\delta \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V \langle x \rangle^{-2\delta - 1}.
\end{align}
For the first term above, we have
\begin{align} \ensuremath{\nonumber}
&\int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_x \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u} V_x^2 \langle x \rangle^{-2\delta} - 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} V^2 \langle x \rangle^{-2\delta} + \frac{\ensuremath{\varepsilon}^2}{2} \int \bar{u}_{xxxx} q^2 \langle x \rangle^{-2\delta} \\
& + 2 \delta \ensuremath{\varepsilon}^2 \int \bar{u}_x V^2 \langle x \rangle^{-2\delta - 1} - 2\delta \ensuremath{\varepsilon}^2 \int \bar{u}_{xxx} q^2 \langle x \rangle^{-2\delta - 1} + \delta (2 \delta + 1) \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} q^2 \langle x \rangle^{-2\delta - 2}.
\end{align}
For the second term from \eqref{two:money}, we have
\begin{align*}
&- 2 \delta \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V \langle x \rangle^{-2\delta -1} \\
= & - 3 \delta \int \ensuremath{\varepsilon}^2 \bar{u}_x V^2 \langle x \rangle^{-2\delta - 1} - \delta (2\delta + 1) \int \ensuremath{\varepsilon}^2 \bar{u} V^2 \langle x \rangle^{-2\delta - 2} \\
& + \delta \int \ensuremath{\varepsilon}^2 \bar{u}_{xxx} q^2 \langle x \rangle^{-2\delta - 1} - \delta (2\delta+1) \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} q^2 \langle x \rangle^{-2\delta - 2}.
\end{align*}
\ensuremath{\nonumber}oindent \textit{Step 3: $\mathcal{T}_2[V]$ Terms} We have
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon} \int \mathcal{T}_2[V] V \langle x \rangle^{-2\delta} = & \int \ensuremath{\varepsilon} \bar{u}^2 V_x V \langle x \rangle^{-2\delta} + \int \ensuremath{\varepsilon} \bar{u} \bar{v} V_y V \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2\langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & - \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x V^2 \langle x \rangle^{-2\delta} + \frac \ensuremath{\varepsilon} 2 \int \bar{u} \bar{u}_x V^2\langle x \rangle^{-2\delta} - \int \ensuremath{\varepsilon} \bar{v} \bar{u}_y V^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta} + \ensuremath{\varepsilon} \delta \int \bar{u}^2 V^2 \langle x \rangle^{-2\delta - 1} \\ \ensuremath{\nonumber}
= & - \frac \ensuremath{\varepsilon} 2 \int (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y ) V^2 \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta}+ \ensuremath{\varepsilon} \delta \int \bar{u}^2 V^2 \langle x \rangle^{-2\delta - 1} \\ \label{juice:2}
= & \frac \ensuremath{\varepsilon} 2 \int \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta}+ \ensuremath{\varepsilon} \delta \int \bar{u}^2 V^2 \langle x \rangle^{-2\delta - 1}.
\end{align}
We now add
\begin{align} \ensuremath{\nonumber}
&\int 2 \ensuremath{\varepsilon} \bar{u} U_x^2 \langle x \rangle^{-2\delta} + \int \bar{u} \ensuremath{\varepsilon}^2 V_x^2 \langle x \rangle^{-2\delta} + \ensuremath{\varepsilon} \delta \int \bar{u}^2 V^2 \langle x \rangle^{-2\delta - 1} \\
&- \frac \ensuremath{\varepsilon} 2 \int \bar{u}_{yy} V^2 \langle x \rangle^{-2\delta} + \mathcal{J}_1,
\end{align}
where
\begin{align} \ensuremath{\nonumber}
\mathcal{J}_1 := & \int \Big( - \ensuremath{\varepsilon} \bar{u}_{xx} \langle x \rangle^{-2\delta} + \delta \ensuremath{\varepsilon} \bar{u}_x \langle x \rangle^{-1-2\delta} - \ensuremath{\varepsilon} \delta (2\delta + 1) \bar{u} \langle x \rangle^{-2\delta - 2} \Big) U^2 \\ \ensuremath{\nonumber}
& + \int \Big( - 2 \ensuremath{\varepsilon}^2 \bar{u}_{xx} \langle x \rangle^{-2\delta} - \delta \ensuremath{\varepsilon}^2 \bar{u}_x \langle x \rangle^{-2\delta - 1} - \delta (2 \delta + 1) \ensuremath{\varepsilon}^2 \bar{u} \langle x \rangle^{-2\delta - 2} \Big) V^2 \\ \ensuremath{\nonumber}
& + \int \Big( 2 \ensuremath{\varepsilon} \bar{u}_{xy} \langle x \rangle^{-2\delta} + 2 \delta \ensuremath{\varepsilon} \bar{u}_y \langle x \rangle^{-1 -2 \delta} \Big) UV \\
& + \int \Big( \ensuremath{\varepsilon} \bar{u}_{xxyy} \langle x \rangle^{-2\delta} + \frac{\ensuremath{\varepsilon}^2}{2} \bar{u}_{xxxx} \langle x \rangle^{-2\delta} - \delta \ensuremath{\varepsilon} \bar{u}_{xyy} \langle x \rangle^{-1-2\delta} - \delta \ensuremath{\varepsilon}^2 \bar{u}_{xxx} \langle x \rangle^{-2\delta - 1} \Big) q^2.
\end{align}
\ensuremath{\nonumber}oindent \textit{Step 4: Junk Terms} For the $q$ term from \eqref{sys:sim:2}, we have
\begin{align}
\ensuremath{\varepsilon} \int \alpha q V \langle x \rangle^{-2\delta} = -\ensuremath{\varepsilon} \int \alpha q \ensuremath{\partial}_x q \langle x \rangle^{-2\delta} = \ensuremath{\varepsilon} \int \Big( \frac{\alpha_x}{2} \langle x \rangle^{-2\delta} - \delta \alpha \langle x \rangle^{-2\delta - 1} \Big) q^2
\end{align}
where $\alpha$ is defined in \eqref{def:alpha}.
We now arrive at the $U$ terms from \eqref{sys:sim:2}, which we write
\begin{align}
\int \ensuremath{\varepsilon} (\bar{u} \bar{v}_x - \bar{v} \bar{u}_x) UV \langle x \rangle^{-2\delta}
\end{align}
We thus define the second junk term via
\begin{align}
\mathcal{J}_2 := \ensuremath{\varepsilon} \int (\frac{\alpha_x}{2} \langle x \rangle^{-2\delta} - \delta \alpha \langle x \rangle^{-2\delta - 1}) q^2 + \int \ensuremath{\varepsilon} \Big( \bar{u} \bar{v}_x - \bar{v} \bar{u}_x \Big) UV \langle x \rangle^{-2\delta}.
\end{align}
Consolidating the junk terms, we obtain
\begin{align} \ensuremath{\nonumber}
\mathcal{J} := & \mathcal{J}_1 + \mathcal{J}_2 \\ \ensuremath{\nonumber}
= & \int \Big( - \ensuremath{\varepsilon} \bar{u}_{xx} \langle x \rangle^{-2\delta} + \delta \ensuremath{\varepsilon} \bar{u}_x \langle x \rangle^{-1-2\delta} - \ensuremath{\varepsilon} \delta (2\delta + 1) \bar{u} \langle x \rangle^{-2\delta - 2} \Big) U^2 \\ \ensuremath{\nonumber}
& + \int \Big( - 2 \ensuremath{\varepsilon}^2 \bar{u}_{xx} \langle x \rangle^{-2\delta} - \delta \ensuremath{\varepsilon}^2 \bar{u}_x \langle x \rangle^{-2\delta - 1} - \delta (2 \delta + 1) \ensuremath{\varepsilon}^2 \bar{u} \langle x \rangle^{-2\delta - 2} \Big) V^2 \\ \ensuremath{\nonumber}
& + \int \Big( 2 \ensuremath{\varepsilon} \bar{u}_{xy} \langle x \rangle^{-2\delta} + 2 \delta \ensuremath{\varepsilon} \bar{u}_y \langle x \rangle^{-1 -2 \delta} + \ensuremath{\varepsilon} \bar{u} \bar{v}_x \langle x \rangle^{-2\delta} - \ensuremath{\varepsilon} \bar{v} \bar{u}_x \langle x \rangle^{-2\delta} \Big) UV \\ \ensuremath{\nonumber}
& + \int \Big( \ensuremath{\varepsilon} \bar{u}_{xxyy} \langle x \rangle^{-2\delta} + \frac{\ensuremath{\varepsilon}^2}{2} \bar{u}_{xxxx} \langle x \rangle^{-2\delta} + \frac{\ensuremath{\varepsilon}}{2} \alpha_x \langle x \rangle^{-2\delta} - \delta \ensuremath{\varepsilon} \alpha \langle x \rangle^{-2\delta - 1} \\ \label{whilk:1}
& \qquad \qquad - \delta \ensuremath{\varepsilon} \bar{u}_{xyy} \langle x \rangle^{-1-2\delta} - \delta \ensuremath{\varepsilon}^2 \bar{u}_{xxx} \langle x \rangle^{-2\delta - 1} \Big) q^2.
\end{align}
\sameer{Do we find any further cancellation in the $q$ equation above? I do not....}
\ensuremath{\nonumber}oindent \textit{Step 5: $S_1, S_2$ Terms} We now use the form of $S_1$ in \eqref{S1:form} to compute
\begin{align} \ensuremath{\nonumber}
&\int S_1 U \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= &\int \bar{u} \tilde{u}_s U_x U \langle x \rangle^{-2\delta} + \int \bar{u} \tilde{v}_s U_y U \langle x \rangle^{-2\delta}+ \int \Big( 2 \bar{u}_y \tilde{v}_s + \bar{u} \tilde{u}_{sx} + \bar{u}_x \tilde{u}_s \Big) U^2 \langle x \rangle^{-2\delta}\\ \ensuremath{\nonumber}
& + \int \Big( \bar{u} \tilde{u}_{sy} - \tilde{u}_s \bar{u}_y \Big) VU \langle x \rangle^{-2\delta}+ \int \Big(\tilde{u}_s \bar{u}_{xy} + \tilde{u}_{sx} \bar{u}_y - \bar{u}_x \tilde{u}_{sy} + \bar{u}_{yy} \tilde{v}_s \Big) q U\langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \Big( - \frac 1 2 \ensuremath{\partial}_x (\bar{u} \tilde{u}_s) - \frac 1 2 \ensuremath{\partial}_y ( \bar{u} \tilde{v}_s) + 2 \bar{u}_y \tilde{v}_s + \bar{u} \tilde{u}_{sx} + \bar{u}_x \tilde{u}_s \Big) U^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
& + \int \Big( \bar{u} \tilde{u}_{sy} - \tilde{u}_s \bar{u}_y \Big) VU\langle x \rangle^{-2\delta} - \frac 1 2 \int \ensuremath{\partial}_y \Big(\tilde{u}_s \bar{u}_{xy} + \tilde{u}_{sx} \bar{u}_y - \bar{u}_x \tilde{u}_{sy} + \bar{u}_{yy} \tilde{v}_s \Big) q^2\langle x \rangle^{-2\delta} \\
& + \delta \int \bar{u} \tilde{u}_s U^2 \langle x \rangle^{-2\delta -1}.
\end{align}
Next, we use the form of $S_2$ in \eqref{S2:form} to compute
\begin{align} \ensuremath{\nonumber}
&\int \ensuremath{\varepsilon} S_2 V \langle x \rangle^{-2\delta}\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} \tilde{u}_s V_x V \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \bar{u} \tilde{v}_s V_y V \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \Big( \bar{u} \tilde{v}_{sy} + \bar{u}_y \tilde{v}_s + 2 \bar{u}_x \tilde{u}_s \Big) V^2 \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \Big( \bar{u} \tilde{v}_{sx} - \bar{u}_x \tilde{v}_s \Big) U V \langle x \rangle^{-2\delta}+ \int \ensuremath{\varepsilon} \Big( - \bar{u}_x \tilde{v}_{sy} - \tilde{v}_s \bar{u}_{xy} + \bar{u}_y \tilde{v}_{sx} - \tilde{u}_s \bar{u}_{xx} \Big) q V \langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
= & \int \Big( - \frac 1 2 \bar{u}_x \tilde{u}_s - \frac 1 2 \bar{u} \tilde{u}_{sx} - \frac 1 2 \bar{u}_y \tilde{v}_s - \frac 1 2 \bar{u} \tilde{v}_{sy} + \bar{u} \tilde{v}_{sy} + \bar{u}_y \tilde{v}_s + 2 \bar{u}_x \tilde{u}_s \Big) \ensuremath{\varepsilon} V^2\langle x \rangle^{-2\delta} \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \Big( \bar{u} \tilde{v}_{sx} - \bar{u}_x \tilde{v}_s \Big) UV\langle x \rangle^{-2\delta} + \frac 1 2 \int \ensuremath{\varepsilon} \Big( - \bar{u}_x \tilde{v}_{sy} - \tilde{v}_s \bar{u}_{xy} + \bar{u}_y \tilde{v}_{sx} - \tilde{u}_s \bar{u}_{xx} \Big)_x q^2 \langle x \rangle^{-2\delta} \\
& + \delta \int \bar{u} \tilde{u}_s \ensuremath{\varepsilon} V^2 \langle x \rangle^{-2\delta - 1}.
\end{align}
\end{proof}
\fi
\begin{lemma} \label{Lem:2} Let $(U, V)$ be a solution to \eqref{sys:sim:1} - \eqref{sys:sim:3}. Then the following estimate is valid,
\begin{align} \label{basic:X0:est:st}
\| U, V \|_{X_{0}}^2 \lesssim \mathcal{T}_{X_0} + \mathcal{F}_{X_0},
\end{align}
where
\begin{align} \label{def:TX0}
&\mathcal{T}_{X_0} := \int \mathcal{N}_1 U g(x)^2 + \int \ensuremath{\varepsilon} \mathcal{N}_2 (V g(x)^2 + \frac{1}{100} q \langle x \rangle^{-1 - \frac{1}{100}}), \\ \label{def:FX0}
&\mathcal{F}_{X_0} := \int F_R U g(x)^2 + \int \ensuremath{\varepsilon} G_R (V g(x)^2 + \frac{1}{100} q \langle x \rangle^{-1 - \frac{1}{100}}).
\end{align}
\end{lemma}
\begin{proof} We apply the multiplier
\begin{align} \label{mult:X0}
\bold{M}_{X_0} := [U g^2, \ensuremath{\varepsilon} V g^2 - \ensuremath{\varepsilon} q \ensuremath{\partial}_x (g^2)] = [U g^2, \ensuremath{\varepsilon} V g^2 + \ensuremath{\varepsilon} \frac{1}{100} q \langle x \rangle^{-1 - \frac{1}{100}}]
\end{align}
to the system \eqref{sys:sim:1} - \eqref{sys:sim:3}. We note that $\text{div}_\ensuremath{\varepsilon}(\bold{M}_{X_0}) = 0$, and moreover that the normal component vanishes at $y = 0, y = \infty$. Therefore, the pressure term vanishes via
\begin{align} \ensuremath{\nonumber}
&\int P_x U g^2 + \int \frac{P_y}{\ensuremath{\varepsilon}} (\ensuremath{\varepsilon} V g^2 + \ensuremath{\varepsilon} \frac{1}{100} q \langle x \rangle^{-1 - \frac{1}{100}} ) = - \int P \text{div}_\ensuremath{\varepsilon} (\bold{M}_{X_0})= 0.
\end{align}
\ensuremath{\nonumber}oindent \textit{Step 1: $\mathcal{T}_1[U]$ Terms:} We now arrive at the transport terms from $\mathcal{T}_1$, defined in \eqref{def:T1}, which produce
\begin{align} \ensuremath{\nonumber}
\int \mathcal{T}_1[U] U g(x)^2 \,\mathrm{d} y \,\mathrm{d} x = &\int (\bar{u}^2 U_x + \bar{u} \bar{v} U_y + 2 \bar{u}^0_{pyy} U) U g^2 \,\mathrm{d} y \,\mathrm{d} x \\ \ensuremath{\nonumber}
= & \frac{1}{200}\int \bar{u}^2 U^2 \langle x \rangle^{-1-\frac{1}{100}} - \int \bar{u} \bar{u}_x U^2 g^2 - \frac 12 \int \ensuremath{\partial}_y (\bar{u} \bar{v}) U^2 g^2 + \int 2 \bar{u}^0_{pyy} U^2 g^2 \\ \ensuremath{\nonumber}
= & \frac{1}{200} \int \bar{u}^2 U^2 \langle x \rangle^{-1 - \frac{1}{100}} - \frac 1 2 \int (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y) U^2 g^2 + \int 2 \bar{u}^0_{pyy} U^2 g^2 \\ \label{T1:cont}
= & \frac{1}{200} \int \bar{u}^2 U^2 \langle x \rangle^{-1 - \frac{1}{100}} + \frac 3 2 \int \bar{u}^0_{pyy} U^2 g^2 - \frac 1 2 \int \zeta U^2 g^2 =: \sum_{i = 1}^3 T^0_i,
\end{align}
where we have invoked \eqref{def:zeta}. The term $T^0_1$ is a positive contribution towards the $X_0$ norm. The term $T^0_2$ will be cancelled out below, see \eqref{cancel:now:0}, and so we do not need to estimate it now. The third term from \eqref{T1:cont}, $T^0_3$, will be estimated via
\begin{align} \label{pourquoi}
\Big| \int \zeta U^2 g^2 \Big| \lesssim & \sqrt{\ensuremath{\varepsilon}} \int U^2 \langle x \rangle^{-(1+\frac{1}{50})} \lesssim \sqrt{\ensuremath{\varepsilon}} \Big( \| \bar{u} U \langle x \rangle^{-\frac 1 2 - \frac{1}{200}} \|^2 + \| \sqrt{\bar{u}} U_y \|^2 \Big) \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2,
\end{align}
where we have invoked the pointwise estimates \eqref{S:0}, as well as the Hardy-type inequality \eqref{Hardy:1}.
We note that an analogous estimate applies to the $q$ term from \eqref{sys:sim:1}, which we record now
\begin{align} \label{hardy:q:est}
\Big| \int \zeta_y q U g^2 \Big| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \frac{q}{y} \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \| \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \|^2,
\end{align}
where we have invoked the pointwise estimates \eqref{S:0} on the quantity $|y \ensuremath{\partial}_y \zeta|$, and used the standard Hardy inequality in $y$, admissible as $q|_{y = 0} = 0$. Estimate \eqref{hardy:q:est} concludes in the same manner as \eqref{pourquoi}.
\ensuremath{\nonumber}oindent \textit{Step 2: Diffusive Terms} We would now like to treat the diffusive terms from \eqref{sys:sim:1} - \eqref{sys:sim:2}. We group the term $\bar{u}^0_{pyyy}q$ from \eqref{sys:sim:1} with this treatment for the purpose of achieving a cancellation. More precisely, we will begin by treating the following quantity:
\begin{align} \ensuremath{\nonumber}
\int \Big( - \ensuremath{\partial}_y^2 u + \bar{u}^0_{pyyy} q \Big)U g^2 = & \int u_y U_y g^2 + \int_{y = 0} u_y U g^2 \,\mathrm{d} x - \frac 1 2 \int \bar{u}^0_{pyyyy} q^2 g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\partial}_y (\bar{u} U + \bar{u}_y q) U_y g^2 + \int_{y = 0} u_y U g^2 \,\mathrm{d} x - \frac 1 2 \int \bar{u}^0_{pyyyy} q^2 g^2 \\ \ensuremath{\nonumber}
= & \int \bar{u} U_y^2 g^2 + 2 \bar{u}_y U U_y g^2 + \bar{u}_{yy} q U_y g^2 + \int_{y = 0} u_y U g^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
& - \frac 1 2 \int \bar{u}^0_{pyyyy} q^2 g^2\\ \ensuremath{\nonumber}
= & \int \bar{u} U_y^2 g^2 - 2\bar{u}_{yy} U^2 g^2+ \frac 1 2 \bar{u}_{yyyy} q^2 g^2 - \frac 1 2 \bar{u}^0_{pyyyy}q^2 g^2\\ \ensuremath{\nonumber}
& - \int_{y = 0} \bar{u}_y U^2 g^2 + \int_{y = 0} u_y Ug^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
= & \int \bar{u} U_y^2 g^2- 2\int \bar{u}_{yy} U^2 g^2 + \frac 1 2 \int \ensuremath{\partial}_y^4 (\bar{u} - \bar{u}^0_p) q^2 g^2+ \int_{y = 0} \bar{u}_y U^2 g^2 \,\mathrm{d} x \\ \label{earl:2}
= & D^0_1 + D^0_2 + D^0_3 + D^0_4.
\end{align}
We have used that
\begin{align}
u_y|_{y = 0} = ( \bar{u} U_y + 2 \bar{u}_y U + \bar{u}_{yy}q)|_{y = 0} = 2 \bar{u}_y U|_{y = 0}.
\end{align}
Both $D^0_1, D^0_4$ are positive contributions, thanks to \eqref{prime:pos}. We now note that the main contribution from the $D^0_2$ term cancels the contribution $T^0_2$, and generates a positive damping term of
\begin{align} \ensuremath{\nonumber}
T^0_2 + D^0_2 = & \frac 3 2 \int \bar{u}^0_{pyy} U^2 g^2 - 2 \int \bar{u} U^2 g^2 = - \frac 1 2 \int \bar{u}^0_{pyy} U^2 g^2 - 2 \int (\bar{u}_{yy} - \bar{u}^0_{pyy}) U^2 g^2 \\ \label{cancel:now:0}
= &- \frac 1 2 \int \ensuremath{\partial}_{yy} \bar{u}_\ast U^2 g^2 - \frac 1 2 \int \ensuremath{\partial}_y^2 (\bar{u}^0_{p} - \bar{u}_\ast) U^2 g^2 - 2 \int (\bar{u}_{yy} - \bar{u}^0_{pyy}) U^2 g^2 \\ \ensuremath{\nonumber}
= :& D^0_{2,1} + D^0_{2,2} + D^0_{2,3}.
\end{align}
The first term on the right-hand side above, due to $\bar{u}_\ast$, is a positive contribution due to \eqref{Blas:prop:2}. For the term $D^0_{2,2,}$, we estimate via
\begin{align*}
| \frac 1 2 \int \ensuremath{\partial}_y^2 (\bar{u}^0_{p} - \bar{u}_\ast) U^2 g^2| \lesssim \delta_\ast \int \langle x \rangle^{- \frac 5 4+ \sigma_\ast} U^2 \lesssim \delta_\ast \|U, V \|_{X_0}^2,
\end{align*}
where we have appealed to estimate \eqref{blas:conv:1}.
The remaining contribution, $D^0_{2,2}$, we estimate by invoking the pointwise estimate $|\bar{u}_{yy} - \bar{u}^0_{pyy}| \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{-1-\frac{1}{50}}$ due to \eqref{est:ring:1}. The third term from \eqref{earl:2}, $D^0_3$, is estimated by
\begin{align}
|\int \ensuremath{\partial}_y^4 (\bar{u} - \bar{u}^0_p) q^2 g^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \frac{q}{y} \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \|^2 \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \|^2 \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2,
\end{align}
where we have invoked estimate \eqref{est:ring:1} and subsequently the standard Hardy inequality in $y$, as $q|_{y = 0} = 0$.
The next diffusive term is
\begin{align} \label{home:home:1}
- \int \ensuremath{\varepsilon} u_{xx} U g(x)^2 = \int \ensuremath{\varepsilon} u_x U_x g(x)^2 + 2 \int \ensuremath{\varepsilon} u_x U g(x) g'(x).
\end{align}
upon using that $U|_{x = 0} = 0$. The $g'$ term above is easily controlled by $\sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{200}} \|^2 + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} \sqrt{\bar{u}} U_x g \|^2 + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}}V \langle x \rangle^{- \frac 1 2 - \frac{1}{200}} \|^2$ upon consulting the definition of $g$ to compute $g'$, and definition \eqref{formula:1} to expand $u_x$ in terms of $(U, V, q)$.
We now address the first term on the right-hand side of \eqref{home:home:1}, which yields,
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} u_x U_x g^2 =& \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_x g^2 \\ \ensuremath{\nonumber}
= &\int \ensuremath{\varepsilon} \bar{u} U_x^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}_x U U_x g^2 + \int \ensuremath{\varepsilon} \bar{u}_{xy} q U_x g^2 - \int \ensuremath{\varepsilon} \bar{u}_y U_xV g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2 g^2- \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2g^2 - \ensuremath{\varepsilon} \int \bar{u}_x gg' U^2 - \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y g^2 + \int \ensuremath{\varepsilon} \bar{u}_y VV_y g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2g^2 - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 g^2 - \ensuremath{\varepsilon} \int \bar{u}_x gg' U^2 + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V g^2 + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV g^2 \\ \ensuremath{\nonumber}
& - \frac 1 2\int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 g^2\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} U_x^2 g^2- \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2g^2 + \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xxyy} q^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV g^2 \\ \label{juice:0}
&- \frac 1 2\int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q^2 gg' - \ensuremath{\varepsilon} \int \bar{u}_x g g' U^2 = \sum_{i = 1}^7 D^1_i.
\end{align}
We observe that $D^1_1$ is a positive contribution. We estimate $D^1_7$ via
\begin{align}
|\ensuremath{\varepsilon} \int \bar{u}_x gg' U^2| \lesssim \ensuremath{\varepsilon} \| U \langle x \rangle^{-1} \|^2 \lesssim \ensuremath{\varepsilon} \| U, V \|_{X_0}^2,
\end{align}
and similarly for $D^1_6$, where we have invoked the pointwise decay estimate on $\bar{u}_x$ in \eqref{prof:u:est}. The remaining terms, $D^1_2,...,D^1_5$ will be placed into the term $\mathcal{J}_1$ defined below in \eqref{error:J1}.
We now arrive at the $v_{yy}$ diffusive term from \eqref{sys:sim:2}, which reads after integrating by parts in $y$,
\begin{align}
- \int v_{yy} (\ensuremath{\varepsilon} V g^2 + \ensuremath{\varepsilon} \frac{1}{100} q \langle x \rangle^{-1 - \frac{1}{100}}) = \int \ensuremath{\varepsilon} v_y V_y g^2 - \frac{1}{100} \int \ensuremath{\varepsilon} v U_y \langle x \rangle^{- 1 - \frac{1}{100}}.
\end{align}
The second contribution on the right-hand side above is easily estimated by appealing to \eqref{formula:1} and estimate \eqref{prof:v:est}, which generates
\begin{align}
|\int \ensuremath{\varepsilon} (\bar{u} V - \bar{u}_x q) U_y \langle x \rangle^{- 1- \frac{1}{100}}| \lesssim \sqrt{\ensuremath{\varepsilon}} \Big( \| V \langle x \rangle^{- \frac 1 2 - \frac{1}{200}} \| + \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{x} y \|_\infty \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{200}} \| \Big)\| \sqrt{\bar{u}} U_y \|.
\end{align}
For the first contribution on the right-hand side, we have
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} v_y V_y g^2= & \int \ensuremath{\varepsilon} \ensuremath{\partial}_y (\bar{u} V - \bar{u}_x q) V_y g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} V_y^2 g^2+ \int \ensuremath{\varepsilon} \bar{u}_y V V_y g^2- \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y g^2 - \int \ensuremath{\varepsilon} \bar{u}_x U V_y g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u} V_y^2 g^2 - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 g^2 + \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xxyy} q^2 g^2+ \int \ensuremath{\varepsilon} \bar{u}_{xy} UV g^2 \\ \label{tennis:1}
&- \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q^2 gg' - \int \ensuremath{\varepsilon} \bar{u}_x U^2 gg' = \sum_{i = 1}^7 D^2_i.
\end{align}
We observe that $D^2_1$ is a positive contribution, whereas $D^2_6, D^2_7$ are estimated identically to $D^1_6, D^1_7$. The remaining terms, $D^2_2,...,D^2_5$ will be placed into the term $\mathcal{J}_1$, defined below in \eqref{error:J1}.
We now arrive at the final diffusive term, for which we first integrate by parts using the boundary condition $V|_{x = 0} = q|_{x = 0} = 0$,
\begin{align} \label{bup:1}
&\int - \ensuremath{\varepsilon}^2 v_{xx} V g^2 - 2 \int \ensuremath{\varepsilon}^2 v_{xx} q gg' = \int \ensuremath{\varepsilon}^2 v_x V_x g^2 + \int 2 \ensuremath{\varepsilon}^2 v_x q (gg')'.
\end{align}
The second term above, which contains a $g'$ factor, is easily controlled by a factor of $\sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2$ by again appealing to the definition of $g$ and \eqref{formula:1}. For the first term on the right-hand side of \eqref{bup:1}, we have
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon}^2 v_x V_x g^2= &\int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_x g^2\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 (\bar{u} V_x + 2 \bar{u}_x V - \bar{u}_{xx}q ) V_x g^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u} V_x^2 g^2 - 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} V^2 g^2+ \frac 1 2\int \ensuremath{\varepsilon}^2 \bar{u}_{xxxx} q^2 g^2 \\ \label{juice:1}
& + \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} q^2 (gg')' + \int 2 \ensuremath{\varepsilon}^2 \bar{u}_{xxx} q^2 gg' - \int 2 \ensuremath{\varepsilon}^2 \bar{u}_{xx} V^2 g^2 = \sum_{i = 1}^6 D^3_i.
\end{align}
The terms with $g'$ above are easily controlled by a factor of $\sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2$ by again appealing to the definition of $g$ and estimate \eqref{prof:u:est}.
We now expand the damping terms, which is are terms $D^1_5$ and $D^2_2$,
\begin{align} \ensuremath{\nonumber}
D^1_5 + D^2_2 = &- \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 g^2 = - \int \ensuremath{\varepsilon} \bar{u}^0_{pyy} V^2 g^2 - \int \ensuremath{\varepsilon}^2 (\bar{u}_{yy} - \bar{u}^0_{pyy}) V^2 g^2.
\end{align}
We estimate the latter term above via an appeal to \eqref{est:ring:1}, which gives
\begin{align}
\Big| \int \ensuremath{\varepsilon}^2 (\bar{u}_{yy} - \bar{u}^0_{pyy}) V^2 g^2 \Big| \lesssim \ensuremath{\varepsilon} \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \|^2 \lesssim \ensuremath{\varepsilon} \| U, V \|_{X_0}^2.
\end{align}
We now consolidate the remaining terms from \eqref{juice:0}, \eqref{tennis:1}, \eqref{juice:1}. Specifically, we obtain
\begin{align*}
D^1_2 + D^1_3 + D^1_4 + D^2_3 + D^2_4 + D^2_5 + D^3_2 + D^3_3 + D^3_6 = \mathcal{J}_1,
\end{align*}
where we have defined
\begin{align} \label{error:J1}
\mathcal{J}_1 := - \int \ensuremath{\varepsilon} \bar{u}_{xx} U^2 g^2 - 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} V^2 g^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} UV g^2+ \int \Big( \ensuremath{\varepsilon} \bar{u}_{xxyy} + \frac 1 2 \ensuremath{\varepsilon}^2 \bar{u}_{xxxx} \Big) q^2 g^2.
\end{align}
To estimate these contributions, we simply use the fact that $\| \bar{u}_{xx} \langle x \rangle^2 \|_\infty \lesssim 1$, $\| \bar{u}_{xy} \langle x \rangle^{\frac 3 2} \|_\infty \lesssim 1$, $\| y^2 (\bar{u}_{xxyy} + \bar{u}^P_{xxxx}) \langle x \rangle^2 \|_\infty \lesssim 1$, and $\| \bar{u}_{E xxxx} \|_{L^\infty_y} \lesssim \sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{-\frac 9 2}$ according to the estimates \eqref{prof:u:est} - \eqref{prof:v:est}.
\ensuremath{\nonumber}oindent \textit{Step 3: $\mathcal{T}_2[V]$ Terms} We now treat the terms arising from $\mathcal{T}_2[V]$, which has been defined in \eqref{def:T2}. Specifically, the result of applying the multiplier \eqref{mult:X0} is
\begin{align} \ensuremath{\nonumber}
&\int \mathcal{T}_2[V] (\ensuremath{\varepsilon} V g^2 + \ensuremath{\varepsilon} (.01) q \langle x \rangle^{-1.01} ) \\ \ensuremath{\nonumber}
=& \int \ensuremath{\varepsilon} \mathcal{T}_2[V] V g^2 + \ensuremath{\varepsilon} (.01) \int \bar{u}^2 V_x q \langle x \rangle^{-1.01} + \ensuremath{\varepsilon} (.01) \int (\bar{u} \bar{v} V_y + \bar{u}^0_{pyy} V) q \langle x \rangle^{-1.01} \\ \label{out:1}
= :& \tilde{T}^1_1 + \tilde{T}^1_2 + \tilde{T}^1_3.
\end{align}
For the first term on the right-hand side of \eqref{out:1}, $\tilde{T}^1_1$, we have
\begin{align} \ensuremath{\nonumber}
\tilde{T}^1_1 = \int \ensuremath{\varepsilon} \mathcal{T}_2[V] V g^2 = & \int \ensuremath{\varepsilon} \bar{u}^2 V_x V g^2 + \int \ensuremath{\varepsilon} \bar{u} \bar{v} V_y V g^2 + \int \ensuremath{\varepsilon} \bar{u}^0_{pyy} V^2 g^2 \\ \ensuremath{\nonumber}
= & - \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x V^2 g^2 - \frac 1 2 \int \ensuremath{\varepsilon} (\bar{u} \bar{v})_y V^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}^0_{pyy} V^2 g^2 - \int \ensuremath{\varepsilon} \bar{u}^2 V^2 gg' \\ \ensuremath{\nonumber}
= & - \frac 1 2 \int \ensuremath{\varepsilon} (\bar{u} \bar{u}_x + \bar{v} \bar{u}_y) V^2 g^2 + \int \ensuremath{\varepsilon} \bar{u}^0_{pyy} V^2 g^2 - \int \ensuremath{\varepsilon} \bar{u}^2 V^2 gg' \\ \label{juice:2}
= & \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}^0_{pyy} V^2 g^2 - \frac 1 2 \int \ensuremath{\varepsilon} \zeta V^2 g^2 + \frac{1}{200} \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1 - \frac{1}{100}} = \sum_{i = 1}^3 T^1_i,
\end{align}
where we have invoked \eqref{def:zeta}. The term $T^1_2$ is estimated in an analogous manner to \eqref{pourquoi}, whereas $T^1_3$ is a positive contribution to the $X_0$ norm.
We now need to address the contribution of $\tilde{T}^1_2$. Integrating by parts, we get
\begin{align} \ensuremath{\nonumber}
\tilde{T}^1_2 = &.01 \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1.01} -2 (.01) \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x V q \langle x \rangle^{-1.01} + (.01)(1.01) \int \ensuremath{\varepsilon} \bar{u}^2 V q \langle x \rangle^{-2.01} \\ \ensuremath{\nonumber}
= & .01 \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1.01} -2 (.01) \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x V q \langle x \rangle^{-1.01} \\ \label{juice:3}
& - \frac 1 2 (.01)(1.01) (2.01) \int \ensuremath{\varepsilon} \bar{u}^2 q^2 \langle x \rangle^{-3.01} - \frac 1 2 (.01)(1.01) (2.01) \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x q^2 \langle x \rangle^{-2.01} \\ \ensuremath{\nonumber}
= & \tilde{T}^1_{2,1} + \tilde{T}^1_{2,2} + \tilde{T}^1_{2,3} + \tilde{T}^1_{2,4}.
\end{align}
Of these, the third term, $\tilde{T}^1_{2,3}$ is very dangerous due to a lack of decay in $z$ for the coefficient. To treat it, we combine $\tilde{T}^1_{2,1}, \tilde{T}^1_{2,3}$ and $T^1_3$ to obtain the expression
\begin{align} \ensuremath{\nonumber}
\tilde{T}^1_{2,1} + \tilde{T}^1_{2,3} + T^1_3 = &\frac{3}{2}(.01) \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1.01} - \frac{(.01)(1.01)(2.01)}{2} \int \ensuremath{\varepsilon} \bar{u}^2 q^2 \langle x \rangle^{-3.01} \\ \ensuremath{\nonumber}
\ge &\Big(\frac{3}{2}(.01) - \frac{(.01)(1.01)(2.01)}{2} \frac{1}{1.01} \Big) \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1.01} - \frac{(.01)(1.01)(2.01)}{2} \frac{2}{1.01} \int \langle x \rangle^{-2.01} \bar{u} \bar{u}_x q^2 \\ \label{precision:1}
\ge & \frac{.01}{2} \int \ensuremath{\varepsilon} \bar{u}^2 V^2 \langle x \rangle^{-1.01} - (.01)(2.01) \int \ensuremath{\varepsilon} \langle x \rangle^{-2.01} \bar{u} \bar{u}_x q^2,
\end{align}
where we have used the precise constants appearing in \eqref{precise:1}.
We now estimate the error term from \eqref{precision:1} by now splitting $\bar{u} = \bar{u}_P + \bar{u}_E$, according to \eqref{split:split:1}. First, we have
\begin{align}
|\int \ensuremath{\varepsilon} \langle x \rangle^{-2.01} \bar{u} \bar{u}_{Px} q^2| \lesssim \| \bar{u}_{Px} y^2 \|_\infty \ensuremath{\varepsilon} \| \frac{q}{y} \langle x \rangle^{-1.01} \|^2 \lesssim \ensuremath{\varepsilon} \| U \langle x \rangle^{-1.01} \|^2 \lesssim \ensuremath{\varepsilon} \| U, V \|_{X_0}^2,
\end{align}
where we have used estimate \eqref{est:Pr:piece}. For the $\bar{u}_E$ component, we may use the small amplitude and importantly the enhanced decay in $x$ from \eqref{est:Eul:piece} to obtain
\begin{align}
|\int \ensuremath{\varepsilon} \langle x \rangle^{-2.01} \bar{u} \bar{u}_{Ex} q^2| \lesssim \ensuremath{\varepsilon}^{\frac 3 2} | \int \langle x \rangle^{- 3.51} q^2| \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \| \sqrt{\ensuremath{\varepsilon}}V \langle x \rangle^{- \frac 3 4} \|^2 \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2.
\end{align}
The error terms $\tilde{T}^1_{2,2}$ and $\tilde{T}^1_{2,4}$ are estimated in a nearly identical manner.
We now address the third terms from \eqref{out:1}, $\tilde{T}^1_3$, which upon integration by parts in $y$ gives
\begin{align} \ensuremath{\nonumber}
&\ensuremath{\varepsilon} (.01) |\int (\bar{u} \bar{v} V_y + \bar{u}^0_{pyy} V) q \langle x \rangle^{-1.01}| \lesssim \ensuremath{\varepsilon} | \int (\bar{u}^0_{pyy} - (\bar{u} \bar{v})_y) V q \langle x \rangle^{-1.01}| + \ensuremath{\varepsilon} | \int \bar{u} \bar{v} V U \langle x \rangle^{-1.01}| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \Big( \| (\bar{u}^0_{pyy} - (\bar{u} \bar{v})_y) y \langle x \rangle^{\frac 1 2} \|_\infty + \| \bar{v} \langle x \rangle^{\frac 1 2} \|_\infty \Big) \| U \langle x \rangle^{- \frac 3 4} \| \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{- \frac 3 4} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{X_0}^2,
\end{align}
where we have appealed to the estimates \eqref{prof:u:est} as well as \eqref{Hardy:three:a} - \eqref{Hardy:four:a}.
\ensuremath{\nonumber}oindent \textit{Step 4: Remaining terms in \eqref{sys:sim:2}} We first treat the $\alpha U$ terms from \eqref{sys:sim:2}, which we estimate via
\begin{align}
\Big| \int \ensuremath{\varepsilon} \alpha UV g^2\Big| \lesssim \ensuremath{\varepsilon} \int \langle x \rangle^{- \frac 3 2}|UV| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 3 4} \| \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{- \frac 3 4} \|.
\end{align}
Above we have relied on the coefficient estimate in \eqref{S:1}. The $\ensuremath{\partial}_y \alpha q$ term from \eqref{sys:sim:2} as well as the corresponding contributions from the $q$ term in the multiplier \eqref{mult:X0} work in an identical manner. This concludes the proof of Lemma \ref{Lem:2}.
\end{proof}
\subsection{$\frac 1 2$ Level Estimates}
We now provide estimates on the two half-level norms, $\|U, V \|_{X_{\frac 1 2}}$ and $\|U, V \|_{Y_{\frac 1 2}}$.
\begin{lemma} \label{Lem:3} Let $(U, V)$ be a solution to \eqref{sys:sim:1} - \eqref{BC:UVYW}. Then for $0 < \delta << 1$,
\begin{align} \label{Xh:right}
\| U, V \|_{X_{\frac 1 2}}^2 \lesssim C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{X_1}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2 + \mathcal{T}_{X_{\frac 1 2}} + \mathcal{F}_{X_{\frac 1 2}},
\end{align}
where
\begin{align} \label{TX12:spec}
\mathcal{T}_{X_{\frac 1 2}} := & \int \mathcal{N}_1 U_x x \ensuremath{\partial}hi_1^2 + \int \mathcal{N}_2 ( \ensuremath{\varepsilon} V_x x \ensuremath{\partial}hi_1(x)^2 + \ensuremath{\varepsilon} V \ensuremath{\partial}hi_1(x)^2 + 2\ensuremath{\varepsilon} V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'), \\ \label{FX12:spec}
\mathcal{F}_{X_{\frac 1 2}} := & \int F_R U_x x \ensuremath{\partial}hi_1^2 + \int G_R ( \ensuremath{\varepsilon} V_x x \ensuremath{\partial}hi_1(x)^2 + \ensuremath{\varepsilon} V \ensuremath{\partial}hi_1(x)^2 + 2\ensuremath{\varepsilon} V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1')
\end{align}
\end{lemma}
\begin{proof} We apply the weighted in $x$ vector-field
\begin{align} \label{mult:2}
\bold{M}_{X_{\frac 1 2}} := [U_x x \ensuremath{\partial}hi_1(x)^2 , \ensuremath{\varepsilon} V_x x \ensuremath{\partial}hi_1(x)^2 + \ensuremath{\varepsilon} V \ensuremath{\partial}hi_1(x)^2 + 2\ensuremath{\varepsilon} V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1']
\end{align}
as a multiplier to \eqref{sys:sim:1} - \eqref{sys:sim:3}. We first of all notice that $\text{div}_\ensuremath{\varepsilon}(\bold{M}_{X_{\frac 1 2}}) = 0$, and thus,
\begin{align} \ensuremath{\nonumber}
&\int P_x U_x x \ensuremath{\partial}hi_1^2 + \int \frac{P_y}{\ensuremath{\varepsilon}} (\ensuremath{\varepsilon} V_x x \ensuremath{\partial}hi_1^2 + \ensuremath{\varepsilon} V \ensuremath{\partial}hi_1^2 + 2 \ensuremath{\varepsilon} V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1') = - \int P \text{div}_\ensuremath{\varepsilon}(\bold{M}_{X_{\frac 1 2}}) = 0,
\end{align}
where we use that $V|_{y = 0} = V_x|_{y = 0} = 0$ and $\ensuremath{\partial}hi_1$ to eliminate any contributions from $\{x = 0\}$.
\ensuremath{\nonumber}oindent \textit{Step 1: $\mathcal{T}_1[U]$ terms:} We address the terms from $\mathcal{T}_1$ which produces
\begin{align} \ensuremath{\nonumber}
\int \mathcal{T}_1[U] U_x x \ensuremath{\partial}hi_1^2 = & \int \bar{u}^2 U_x^2 x \ensuremath{\partial}hi_1^2 + \int \bar{u} \bar{v} U_y U_x x \ensuremath{\partial}hi_1^2+ \int 2 \bar{u}_{yy} U U_x x \ensuremath{\partial}hi_1^2 \\ \label{speak:1}
= & \int \bar{u}^2 U_x^2 x \ensuremath{\partial}hi_1^2+ \int \bar{u} \bar{v} U_y U_x x \ensuremath{\partial}hi_1^2- \int \bar{u}_{yy} U^2 \ensuremath{\partial}hi_1^2- \int \bar{u}_{xyy} x U^2 \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - 2\int \bar{u}_{yy} U^2 x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' = :\sum_{i = 1}^5 T^{(2)}_i.
\end{align}
First, we observe $T^{(2)}_1$ is a positive contribution. We estimate $T^{(2)}_2$ via
\begin{align}
&|\int \bar{v} \bar{u} U_y U_x x \ensuremath{\partial}hi_1^2 | \lesssim \| \frac{\bar{v}}{\bar{u}} x^{\frac 1 2} \|_\infty \| \sqrt{\bar{u}} U_y \| \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \le \delta \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \|^2 + C_\delta \| \sqrt{\bar{u}} U_y \|^2,
\end{align}
where above we have invoked estimate \eqref{prof:v:est} for $\bar{v}$.
For $T^{(2)}_3$, we need to split $U = U(x, 0) + (U - U(x, 0))$, and subsequently estimate via
\begin{align} \ensuremath{\nonumber}
|\int \bar{u}_{yy} U^2 \ensuremath{\partial}hi_1^2 | \lesssim & \int |\bar{u}_{yy}| (U - U(x, 0))^2 \ensuremath{\partial}hi_1^2 + \int |\bar{u}_{yy}| U(x, 0)^2 \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
\lesssim & \| y^2 \bar{u}_{yy} \|_\infty \Big\| \frac{U - U(x, 0)}{y} \ensuremath{\partial}hi_1 \Big\|^2 +\Big( \sup_x \int |\bar{u}_{yy} | x^{\frac 1 2} \,\mathrm{d} y \Big) \| U x^{-\frac 1 4} \|_{L^2(x = 0)}^2 \\ \label{fray:1}
\lesssim & \| U_y \ensuremath{\partial}hi_1\|^2 + \| U, V \|_{X_0}^2 \le C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2,
\end{align}
where above, we used Hardy inequality in $y$, which is admissible as $(U - U(x, 0))|_{y = 0} = 0$, as well as the inequality \eqref{bob:1}. $T^{(2)}_4$ works in an analogous manner, and so we omit it. For $T^{(2)}_5$, we note that the support of $\ensuremath{\partial}hi_1'$ is bounded in $x$, and so this term can trivially be controlled by $\| U, V \|_{X_0}^2$.
\ensuremath{\nonumber}oindent \textit{Step 2: $\mathcal{T}_2[V]$ terms:} We now address the contributions from $\mathcal{T}_2[V]$. For this, we first note that, examining the multiplier \eqref{mult:2}, the contribution from $\ensuremath{\varepsilon} V$ has already been treated in Lemma \ref{Lem:2}, and we therefore just need to estimate the contribution from the principal term, $\ensuremath{\varepsilon} V_x x$. More precisely, we have already established the following estimate,
\begin{align}
|\int \mathcal{T}_2[V] \ensuremath{\varepsilon} (V \ensuremath{\partial}hi_1^2 + 2 V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' )| \lesssim \|U, V \|_{X_0}^2.
\end{align}
We now estimate the contribution of the principal term, $\ensuremath{\varepsilon} V_x x$. For this, recall the definition \eqref{def:T2},
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} \mathcal{T}_2[V] V_x x \ensuremath{\partial}hi_1^2 = & \int \ensuremath{\varepsilon} \Big( \bar{u}^2 V_x + \bar{u} \bar{v} V_y + \bar{u}_{yy} V \Big) V_x x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \bar{u}^2 V_x^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u} \bar{v} V_y V_x x \ensuremath{\partial}hi_1^2 - \frac 1 2 \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (x \bar{u}_{yy}) V^2 \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} x \bar{u}_{yy} V^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & T^{(3)}_1 + ... + T^{(3)}_4.
\end{align}
We observe that $T^{(3)}_1$ is a positive contribution. The integrand in the term $T^{(3)}_4$ has a bounded support of $x$ and so can immediately be controlled by $\|U, V \|_{X_0}^2$. We may estimate $T^{(3)}_2$, and $T^{(3)}_3$ via
\begin{align}
&|\int \ensuremath{\varepsilon} \bar{u} \bar{v} V_y V_x x \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \frac{ \bar{v}}{\bar{u}} x^{\frac 1 2} \|_\infty \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x x^{\frac 1 2}\ensuremath{\partial}hi_1 \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_{\frac 1 2}}^2, \\
&|\int \ensuremath{\varepsilon} \ensuremath{\partial}_x (x \bar{u}_{yy}) V^2 \ensuremath{\partial}hi_1^2| \lesssim \| y^2 \ensuremath{\partial}_x (x \bar{u}_{yy}) \|_\infty \ensuremath{\varepsilon} \Big\| \frac{V}{y} \ensuremath{\partial}hi_1\Big\|^2 \lesssim \ensuremath{\varepsilon} \| V_y \ensuremath{\partial}hi_1\|^2 \le C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2,
\end{align}
where we have invoked the Hardy inequality \eqref{Hardy:1}.
\ensuremath{\nonumber}oindent \textit{Step 3: Diffusive Terms} We now address the main diffusive term, which is the contribution of $- u_{yy}$ in \eqref{sys:sim:1}. We, again, group the term $\bar{u}^0_{pyyy}q$ from \eqref{sys:sim:1} with this term. More precisely, we have
\begin{align} \ensuremath{\nonumber}
\int \Big(- u_{yy} +& \bar{u}^0_{pyyy} q \Big) U_x x\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int u_y U_{xy} x \ensuremath{\partial}hi_1^2+ \int_{y = 0} u_y U_x x\ensuremath{\partial}hi_1^2 \,\mathrm{d} x + \int \bar{u}^0_{pyyy} q U_x x\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\partial}_y (\bar{u} U + \bar{u}_y q) U_{xy} x \ensuremath{\partial}hi_1^2+ \int_{y = 0} \bar{u}_y U_x x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x + \int \bar{u}^0_{pyyy} q U_x x \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
= & \int (2 \bar{u}_y U + \bar{u} U_y + \bar{u}_{yy} q \Big) U_{xy} x \ensuremath{\partial}hi_1^2+ \int_{y = 0} \ensuremath{\partial}_y (\bar{u} U + \bar{u}_y q) U_x x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
& + \int \bar{u}^0_{pyyy} q U_x x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \int U_y \ensuremath{\partial}_x (2 x \bar{u}_y U) \ensuremath{\partial}hi_1^2 - \frac 1 2 \int U_y^2 \ensuremath{\partial}_x (x \bar{u}) \ensuremath{\partial}hi_1^2 - \int \bar{u}_{yyy} q U_x x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \bar{u}^0_{pyyy} q U_x x \ensuremath{\partial}hi_1^2 - \int \bar{u}_{yy} UU_x x\ensuremath{\partial}hi_1^2+ 2 \int_{y = 0} \bar{u}_y U U_x x\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \int \bar{u} x U_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int 4 \bar{u}_y UU_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & - \int 2 x \bar{u}_y U_x U_y \ensuremath{\partial}hi_1^2- \int 2 x \bar{u}_{xy} U U_y \ensuremath{\partial}hi_1^2- \int 2 \bar{u}_y UU_y \ensuremath{\partial}hi_1^2- \frac 1 2 \int \bar{u} U_y^2\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \frac 1 2 \int x \bar{u}_x U_y^2\ensuremath{\partial}hi_1^2 + \frac 1 2 \int \ensuremath{\partial}_x (x \bar{u}_{yy}) U^2\ensuremath{\partial}hi_1^2 - \int_{y = 0} \ensuremath{\partial}_x (x \bar{u}_y) U^2\ensuremath{\partial}hi_1^2 - \int (\bar{u}_{yyy} - \bar{u}^0_{pyyy}) qU_x x\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int x \bar{u}_{yy} U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' -2 \int_{y = 0} U^2 x \bar{u}_y \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int \bar{u} x U_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int 4 \bar{u}_y UU_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & - \int 2 x \bar{u}_y U_x U_y \ensuremath{\partial}hi_1^2+ \int x \bar{u}_{xyy} U^2\ensuremath{\partial}hi_1^2 + \int \bar{u}_{yy} U^2\ensuremath{\partial}hi_1^2 - \frac 1 2 \int \bar{u} U_y^2 \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& - \frac 1 2 \int x \bar{u}_x U_y^2 \ensuremath{\partial}hi_1^2+ \frac 1 2 \ensuremath{\partial}_x (x \bar{u}_{yy}) U^2\ensuremath{\partial}hi_1^2 - \int_{y = 0} \ensuremath{\partial}_x (x \bar{u}_{y}) U^2\ensuremath{\partial}hi_1^2 - \int (\bar{u}_{yyy} - \bar{u}^0_{pyyy}) qU_x x\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int_{y = 0} \ensuremath{\partial}_x ( x \bar{u}_y ) U^2 \ensuremath{\partial}hi_1^2 + \int x \bar{u}_{yy} U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' -2 \int_{y = 0} U^2 x \bar{u}_y \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int \bar{u} x U_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
&- \int 4 \bar{u}_y UU_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & - \int 2 x \bar{u}_y U_x U_y\ensuremath{\partial}hi_1^2 + \frac 3 2 \int \ensuremath{\partial}_x (x \bar{u}_{yy}) U^2 \ensuremath{\partial}hi_1^2- \frac 1 2 \int (\bar{u} + x \bar{u}_x ) U_y^2\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \int_{y = 0} \ensuremath{\partial}_x (x \bar{u}_y) U^2 \ensuremath{\partial}hi_1^2 \,\mathrm{d} x -\int (\bar{u}_{yyy} - \bar{u}^0_{pyyy}) qU_x x \ensuremath{\partial}hi_1^2 + \int x \bar{u}_{yy} U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{million}
& -2 \int_{y = 0} U^2 x \bar{u}_y \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int \bar{u} x U_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int 4 \bar{u}_y UU_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' = \sum_{i = 1}^{9} D^{(4)}_i.
\end{align}
For the first term above, $D^{(4)}_1$, we localize in $z$ using the cutoff function $\chi(\cdot)$ (see \eqref{def:chi}), via
\begin{align}
D_1^{(4)} = - \int 2 x \bar{u}_y U_x U_y\ensuremath{\partial}hi_1^2 (1 - \chi(z)) - \int 2 x \bar{u}_y U_x U_y\ensuremath{\partial}hi_1^2 \chi(z).
\end{align}
The far-field component is controlled easily via
\begin{align}
|\int x \bar{u}_y U_x U_y (1- \chi(z)) \ensuremath{\partial}hi_1^2| \lesssim \| \bar{u}_y x^{\frac 1 2} \| \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \| \sqrt{\bar{u}} U_y \| \le \delta \| U, V \|_{X_{\frac 1 2}}^2 + C_\delta \| U, V \|_{X_0}^2,
\end{align}
where we have used $\bar{u} \gtrsim 1$ when $z \gtrsim 1$, according to \eqref{samezies:1}.
The localized piece requires the use of higher order norms, and we estimate it via
\begin{align} \ensuremath{\nonumber}
|\int x \bar{u}_y U_x U_y \chi(z) \ensuremath{\partial}hi_1^2| \lesssim & \| \sqrt{x} \bar{u}_y \|_\infty \| U_x x^{\frac 1 2} \chi(z) \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \\ \ensuremath{\nonumber}
\lesssim & ( \| \sqrt{\bar{u}} U_{xy} x\ensuremath{\partial}hi_1 \| + \| \bar{u} U_x \sqrt{x} \ensuremath{\partial}hi_1\| ) ( \delta \| U, V \|_{Y_{\frac 1 2}} + C_\delta \| U, V \|_{X_0} ) \\ \label{est:Xhalf:loss:deriv}
\lesssim & \delta \| U, V \|_{X_1}^2 + \delta \| U, V \|_{X_{\frac 1 2}}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2 + C_\delta \| U, V \|_{X_0}^2,
\end{align}
where above, we have appealed to \eqref{bob:1}. For $D^{(4)}_2$, we estimate in the same manner as \eqref{fray:1}, whereas $D^{(4)}_3$ can easily be controlled upon using $\| \bar{u} + x \ensuremath{\partial}_x \bar{u} \|_\infty \lesssim 1$, according to \eqref{prof:u:est}. The term $D^{(4)}_4$ is immediately controlled by $\|U, V \|_{X_0}^2$. The term $D^{(4)}_5$ we estimate via
\begin{align} \ensuremath{\nonumber}
|\int (\bar{u}_{yyy} - \bar{u}^0_{pyyy}) q U_x x \ensuremath{\partial}hi_1^2| \lesssim & \| (\bar{u}_{yyy} - \bar{u}^0_{pyyy}) y x^{1.01} \|_\infty \| U \langle x \rangle^{-1.01} \| \| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_1 \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} (\|U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2 + \| U, V \|_{X_1}^2),
\end{align}
where we have invoked \eqref{est:ring:1}. Finally, for the remaining four terms from \eqref{million}, $D^{(4)}_k$, $k = 6,7,8,9$, due to the presence of $\ensuremath{\partial}hi_1'$, the $x$ weights are all bounded, and these terms can thus be easily controlled by $\|U, V \|_{X_0}^2$.
We now move to the contribution of the tangential diffusive term, $-\ensuremath{\varepsilon} u_{xx}$, which produces
\begin{align} \ensuremath{\nonumber}
- \int \ensuremath{\varepsilon} u_{xx} U_x x \ensuremath{\partial}hi_1^2 = &\int \ensuremath{\varepsilon} u_x U_{xx} x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} u_x U_x \ensuremath{\partial}hi_1^2 + 2 \int \ensuremath{\varepsilon} u_x U_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_{xx} x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_x \ensuremath{\partial}hi_1^2 + 2 \int \ensuremath{\varepsilon} u_x U_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} (\bar{u} U_x + \bar{u}_x U + \bar{u}_{xy}q - \bar{u}_y V) U_{xx} x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon} (\bar{u} U_x + \bar{u}_x U + \bar{u}_{xy}q - \bar{u}_y V) U_x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + 2 \int \ensuremath{\varepsilon} ( \bar{u} U_x + \bar{u}_x U + \bar{u}_{xy}q - \bar{u}_y V ) U_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \frac 1 2 \int \ensuremath{\varepsilon} \bar{u} U_x^2 \ensuremath{\partial}hi_1^2 - \frac 3 2 \int \ensuremath{\varepsilon} x \bar{u}_x U_x^2 \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} (\bar{u}_{xx} + \frac{1}{2} \bar{u}_{xxx}x) U^2 \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \bar{u}_{xy} U V_x x \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}_x U V_y \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y \ensuremath{\partial}hi_1^2 + \frac{\ensuremath{\varepsilon}}{2} \int \bar{u}_{xyy} x V^2 \ensuremath{\partial}hi_1^2 \\ \label{umbrella:1}
& + \int \ensuremath{\varepsilon} \bar{u}_y V_y V_x x \ensuremath{\partial}hi_1^2 + E_{loc}^{(1)} =: \sum_{i = 1}^{9} D^{(5)}_i + E^{(1)}_{loc},
\end{align}
where
\begin{align}
E_{loc}^{(1)} := - 2\int \ensuremath{\varepsilon} \bar{u} x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' U_x^2 + \int \ensuremath{\varepsilon} (\bar{u}_x + \bar{u}_{xx} x) U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - 2 \int \ensuremath{\varepsilon} x \bar{u}_x UU_x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + \int \ensuremath{\varepsilon} \bar{u}_{yy} V^2 x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'.
\end{align}
First, it is evident that $|E_{loc}^{(1)}| \lesssim \|U, V \|_{X_0}^2$ as $|x| \lesssim 1$ on the support of $\ensuremath{\partial}hi_1'$. We now estimate each of the remaining terms in \eqref{umbrella:1}. $D^{(5)}_1$ and $D^{(5)}_2$ are controlled by the right-hand side of \eqref{Xh:right} upon invoking \eqref{bob:1} and upon using $\| \bar{u} + x \bar{u}_x \|_\infty \lesssim 1$. We estimate $D^{(5)}_3$ by noting that $|\bar{u}_{xx}| + |x \bar{u}_{xxx}| \lesssim \langle x \rangle^{-2}$, after which it is easily controlled by $\| U, V \|_{X_0}$.
For the fourth term, we estimate via first localizing in $z$ using the cut-off function $\chi$, defined in \eqref{def:chi}, via
\begin{align*}
D^{(5)}_4 = \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x \ensuremath{\partial}hi_1^2 \chi(z) + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x \ensuremath{\partial}hi_1^2 (1- \chi(z)).
\end{align*}
First for the far-field component, we have
\begin{align} \ensuremath{\nonumber}
| \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x x \ensuremath{\partial}hi_1^2 (1- \chi(z))| \lesssim & \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{xyy} y x^{\frac 3 2} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x x^{\frac 1 2} \ensuremath{\partial}hi_1\| \| \frac{q}{y} \langle x \rangle^{-1} \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_{\frac 1 2}} \| U \langle x \rangle^{-1} \|,
\end{align}
which is an admissible contribution according to Hardy type inequality \eqref{Hardy:three}. For the localized component, we have
\begin{align} \ensuremath{\nonumber}
| \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x x \ensuremath{\partial}hi_1^2 \chi(z)| = & | \int \ensuremath{\varepsilon} \bar{u}_{xyy} \frac{q}{y} \frac{y}{\sqrt{x}} V_x x^{\frac 32} \ensuremath{\partial}hi_1^2 \chi(z)| \lesssim \int \ensuremath{\varepsilon} |\bar{u}_{xyy}| |\frac{q}{y}| \bar{u} V_x x^{\frac 32} \ensuremath{\partial}hi_1^2| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{-1} \| \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_1 \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2 + \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_{\frac 12}}^2,
\end{align}
where we have used the pointwise decay estimate $|\bar{u}_{xyy} \langle x \rangle^2| \lesssim 1$, according to \eqref{prof:u:est}. The fifth term, $D^{(5)}_5$, follows by a nearly identical calculation.
For the sixth term from \eqref{umbrella:1}, $D^{(5)}_6$, it is convenient to integrate by parts in $x$, which produces
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon} \bar{u}_{xy} U V_x x \ensuremath{\partial}hi_1^2| = &| \int \ensuremath{\varepsilon} \bar{u}_{xxy} U V x \ensuremath{\partial}hi_1^2 + \int \frac{\ensuremath{\varepsilon}}{2} \bar{u}_{xyy} V^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}_{xy} UV \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}_{xy} UV x 2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' | \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} (\| \bar{u}_{xxy} x^{2+2\sigma} \|_\infty + \| \bar{u}_{xy} x^{1+2\sigma} \|_\infty ) \| U \langle x \rangle^{- \frac 1 2 - \sigma} \| \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{-\frac 1 2 - \sigma} \| \\ \ensuremath{\nonumber}
& + \| \bar{u}_{xyy} y^2 x \|_\infty \Big\| \sqrt{\ensuremath{\varepsilon}} \frac{V}{y} \ensuremath{\partial}hi_1 \Big\|^2 \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} ( \| U, V \|_{X_0} + \| U, V \|_{X_{\frac 1 2}} ) + C_\delta \| U, V \|_{X_0} + \delta \| U, V\|_{Y_{\frac 1 2}}^2.
\end{align}
We estimate $D^{(5)}_7$ via
\begin{align}
|\int \ensuremath{\varepsilon} \bar{u}_{xy} q V_y \ensuremath{\partial}hi_1^2| \lesssim & \sqrt{\ensuremath{\varepsilon}} \| y x \bar{u}_{xy} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \| \| \frac{q}{y} \langle x \rangle^{-1} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}}U_x \ensuremath{\partial}hi_1\| \| U \langle x \rangle^{-1} \|,
\end{align}
which is an admissible contribution according to \eqref{Hardy:three}.
We estimate $D^{(5)}_8$ via
\begin{align}
|\int \ensuremath{\varepsilon} x \bar{u}_{xyy} V^2 \ensuremath{\partial}hi_1^2| \le \| x \bar{u}_{xyy} y^2 \|_\infty \Big\| \sqrt{\ensuremath{\varepsilon}} \frac{V}{y} \ensuremath{\partial}hi_1 \Big\|^2 \lesssim \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1\|^2,
\end{align}
upon which we invoke \eqref{bob:1}.
Finally, we estimate $D^{(5)}_9$ via
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon} \bar{u}_y V_y V_x x \ensuremath{\partial}hi_1^2| \lesssim & \| \bar{u}_y x^{\frac 1 2} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \| \| \sqrt{\ensuremath{\varepsilon}} V_x x^{\frac 1 2}\ensuremath{\partial}hi_1 \| \\ \ensuremath{\nonumber}
\le & (C_{\delta_1} \| U, V \|_{X_0} + \delta_1 \| U, V \|_{Y_{\frac 1 2}} )( C_{\delta_2} \| U, V \|_{X_{\frac 1 2}} + \delta_2 \| U, V \|_{X_1} ) \\
\le & \delta \| U, V \|_{Y_{\frac 1 2}}^2 + \delta \| U, V \|_{X_1}^2 + C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{X_{\frac 1 2}}^2.
\end{align}
We move to the third diffusive term, by which we mean
\begin{align}
- \ensuremath{\varepsilon} \int v_{yy} (V_x x \ensuremath{\partial}hi_1^2 + V \ensuremath{\partial}hi_1^2 + 2 V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1') = - \int \ensuremath{\varepsilon} v_{yy} V_x x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} v_y V_y \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} v_y V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'.
\end{align}
We easily estimate the final two terms above via
\begin{align}
|\int \ensuremath{\varepsilon} v_y V_y \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} v_y V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' | \lesssim \|U, V \|_{X_0}^2.
\end{align}
We thus deal with the principal contribution, which gives
\begin{align} \ensuremath{\nonumber}
- \ensuremath{\varepsilon} \int v_{yy} V_x x \ensuremath{\partial}hi_1^2 = & \int \ensuremath{\varepsilon} v_y V_{xy} x \ensuremath{\partial}hi_1^2 = \int \ensuremath{\varepsilon} \ensuremath{\partial}_y (\bar{u} V - \bar{u}_x q) V_{xy} x \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon} (\bar{u} V_y + \bar{u}_y V - \bar{u}_{xy}q - \bar{u}_x U) V_{xy} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \int \frac{\ensuremath{\varepsilon}}{2} \ensuremath{\partial}_x (x \bar{u}) V_y^2 \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}_y V_y V_x x\ensuremath{\partial}hi_1^2 + \frac 1 2 \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (x \bar{u}_{yy}) V^2 \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \bar{u}_{xyy} q V_x x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}_{xy} U V_x x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon} \bar{u}_x U_y V_x x \ensuremath{\partial}hi_1^2 \\ \label{coffee:2}
& - \int \ensuremath{\varepsilon} x \bar{u} V_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + \frac 1 2 \int \ensuremath{\varepsilon} x \bar{u}_{yy} V^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'.
\end{align}
These terms are largely identical to those in \eqref{umbrella:1}. The only slightly different term is the sixth term of \eqref{coffee:2}, which is estimated as
\begin{align}
|\ensuremath{\varepsilon} \int \bar{u}_x U_y V_x x| \lesssim \| \frac{\bar{u}_x}{\bar{u}} x \|_\infty \| \sqrt{\bar{u}} U_y \| \| \ensuremath{\varepsilon} \sqrt{\bar{u}} V_x \| \lesssim \| U, V \|_{X_0}^2.
\end{align}
We now move to the fourth and final diffusive term, by which we mean
\begin{align}
- \ensuremath{\varepsilon}^2 \int v_{xx} V_x x \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon}^2 v_{xx} V \ensuremath{\partial}hi_1^2 -2 \int \ensuremath{\varepsilon}^2 v_{xx} V x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1',
\end{align}
An integration by parts in $x$ demonstrates that the final two terms above are estimated above by $\|U, V \|_{X_0}^2$. The first term above gives
\begin{align} \ensuremath{\nonumber}
- \ensuremath{\varepsilon}^2 \int v_{xx} V_x x \ensuremath{\partial}hi_1^2 = & \int \ensuremath{\varepsilon}^2 v_x V_{xx} x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon}^2 v_x V_x \ensuremath{\partial}hi_1^2 + 2\int \ensuremath{\varepsilon}^2 v_x V_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_{xx} x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_x \ensuremath{\partial}hi_1^2 + 2 \int \ensuremath{\varepsilon}^2 v_x V_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{econ}
= & \tilde{D}^{(6)}_1 + \tilde{D}^{(6)}_2 + \tilde{D}^{(6)}_3.
\end{align}
The term $\tilde{D}^{(6)}_3$ is easily controlled by a factor of $\|U, V \|_{X_0}^2$. A few integrations by parts produces for the first term, $\tilde{D}^{(6)}_1$, above
\begin{align} \ensuremath{\nonumber}
\tilde{D}^{(6)}_1 = &\int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_{xx} x \ensuremath{\partial}hi_1^2= \int \ensuremath{\varepsilon}^2 (\bar{u} V_x + 2 \bar{u}_x V - \bar{u}_{xx}q) V_{xx} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \frac 1 2 \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x(\bar{u} x) V_x^2 \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon}^2 \bar{u} x V_x^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - 2 \ensuremath{\varepsilon}^2 \int (\bar{u}_x x V)_x V_x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon}^2 V_x \ensuremath{\partial}_x (\bar{u}_{xx} qx) \ensuremath{\partial}hi_1^2 - 4 \int \ensuremath{\varepsilon}^2 \bar{u}_x x V V_x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} q V_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'\\ \ensuremath{\nonumber}
= & - \frac{\ensuremath{\varepsilon}^2}{2} \int (\bar{u} x)_x V_x^2 \ensuremath{\partial}hi_1^2 - \int 2 \ensuremath{\varepsilon}^2 x \bar{u}_x V_x^2 \ensuremath{\partial}hi_1^2 - 2 \ensuremath{\varepsilon}^2 \int (x \bar{u}_x)x VV_x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \ensuremath{\varepsilon}^2 \int q V_x (x \bar{u}_{xx})_x \ensuremath{\partial}hi_1^2 - \ensuremath{\varepsilon}^2 \int x \bar{u}_{xx} VV_x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \frac{\ensuremath{\varepsilon}^2}{2} \int (x \bar{u})_x V_x^2 \ensuremath{\partial}hi_1^2- 2 \ensuremath{\varepsilon}^2 \int x \bar{u}_x V_x^2 \ensuremath{\partial}hi_1^2 + \ensuremath{\varepsilon}^2 \int (x \bar{u}_x)_{xx} V^2 \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon}^2 V_x q \ensuremath{\partial}_x (x \bar{u}_{xx}) \ensuremath{\partial}hi_1^2+ \ensuremath{\varepsilon}^2 \int \ensuremath{\partial}_x (x \bar{u}_{xx}) V^2 \ensuremath{\partial}hi_1^2 - 4 \int \ensuremath{\varepsilon}^2 \bar{u}_x x V V_x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{coffee:3}
& + 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} q V_x x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' =: \sum_{i = 1}^{7} D^{(6)}_i.
\end{align}
We now proceed to estimate all the terms above. The first term of \eqref{coffee:3}, $D^{(6)}_1$ ,we estimate via
\begin{align}
|\frac{\ensuremath{\varepsilon}^2}{2} \int (x \bar{u})_x V_x^2| \lesssim \| \ensuremath{\partial}_x (x \bar{u}) \|_\infty \ensuremath{\varepsilon}^2 \| V_x \|^2 \lesssim \| U, V \|_{X_0}^2.
\end{align}
$D^{(6)}_2$ and $D^{(6)}_3$ are estimated in an analogous manner. For $D^{(6)}_4$ and $D^{(6)}_6$, we invoke the Hardy type inequality \eqref{Hardy:four} coupled with the estimate $\| \ensuremath{\partial}_x^2(x \bar{u}_x) x^2\|_\infty \lesssim 1$. We estimate $D^{(6)}_5$ via
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon}^2 V_x q \ensuremath{\partial}_x (x \bar{u}_{xx})| \lesssim & \ensuremath{\varepsilon} \Big\| \frac{\ensuremath{\partial}_x (x \bar{u}_{xx})}{\bar{u}} x y \Big\|_\infty \| \| \frac{q}{y} x^{-1} \| \| \ensuremath{\varepsilon} \sqrt{\bar{u}} V_x \| \lesssim \ensuremath{\varepsilon} \| U \langle x \rangle^{-1} \| \| \ensuremath{\varepsilon} \sqrt{\bar{u}} V_x \| \\
\lesssim & \ensuremath{\varepsilon} ( \| U, V \|_{X_0} + \| U, V \|_{X_{\frac 1 2}} ) \| U, V \|_{X_0},
\end{align}
where we have invoked the Hardy-type inequality \eqref{Hardy:three}.
For the second term from \eqref{econ}, $\tilde{D}^{(6)}_2$, we expand and integrate by parts to generate
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_x | =&| \int \ensuremath{\varepsilon}^2 \bar{u} V_x^2 - \int 2 \ensuremath{\varepsilon}^2 \bar{u}_{xx} V^2 + \frac 1 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xxxx} q^2 | \\ \ensuremath{\nonumber}
\lesssim & \| \sqrt{\bar{u}} \ensuremath{\varepsilon} V_x \|^2 + \ensuremath{\varepsilon} \| \bar{u}_{xx} x^2 \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{-1} \|^2 + \ensuremath{\varepsilon}^2 \| \bar{u}_{xxxx} y^2 x^2 \| \frac{q}{y} \langle x \rangle^{-1} \|^2 \\
\lesssim & \| U, V \|_{X_0}^2 + \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2),
\end{align}
where we have invoked \eqref{Hardy:three} - \eqref{Hardy:four}.
\ensuremath{\nonumber}oindent \textit{Step 4: Error Terms} We now move to the remaining error terms, the first of which is the $\zeta U$ term from \eqref{sys:sim:1}. For this, we estimate via
\begin{align} \ensuremath{\nonumber}
\Big| \int \zeta U U_x x \ensuremath{\partial}hi_1^2 \Big| \lesssim & \sqrt{\ensuremath{\varepsilon}} \int \langle x \rangle^{- (1 + \frac{1}{50})} |U| |U_x| x \ensuremath{\partial}hi_1^2 \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{50}} \| \| U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} ( \| \bar{u} U \langle x \rangle^{- \frac 1 2 - \frac{1}{200}} \| + \| \sqrt{\bar{u}} U_y \| ) ( \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| + \| \sqrt{\bar{u}} U_{xy} x \ensuremath{\partial}hi_1 \| ) \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0} (\| U, V\|_{X_{\frac 1 2}} + \| U, V \|_{X_1} ),
\end{align}
where we have invoked estimate \eqref{S:0} for pointwise decay of $\zeta$. The $\zeta_y q$ term from \eqref{sys:sim:1} and $\zeta V$ term from \eqref{sys:sim:2} is estimated in an identical manner.
We now address the remaining error terms in equation \eqref{sys:sim:2}. The first of these is the term
\begin{align} \ensuremath{\nonumber}
\Big| \int \ensuremath{\varepsilon} \alpha U (V_x x + V ) \ensuremath{\partial}hi_1^2 \Big| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{-\frac 3 4} \| ( \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| + \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V \langle x \rangle^{- \frac 3 4} \| ) \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0} \| U, V \|_{X_{\frac 1 2}},
\end{align}
where we have invoked the pointwise decay estimate on $\alpha$ from \eqref{S:1}. The estimate on the $\alpha_y q$ term follows in an identical manner. This concludes the proof of Lemma \ref{Lem:3}.
\end{proof}
\begin{lemma} \label{lemma:y:half} Let $(U, V)$ be a solution to \eqref{sys:sim:1} - \eqref{BC:UVYW}. Then for $0 < \delta << 1$,
\begin{align} \label{basic:Yhalf:est:st}
\| U, V \|_{Y_{\frac 1 2}}^2 \lesssim C_\delta \| U, V \|_{X_0}^2 + C_{\delta} \|U, V \|_{E}^2+ \delta \| U, V \|_{X_{\frac 1 2}}^2 + \ensuremath{\varepsilon} \| U, V \|_{X_1}^2 + \mathcal{T}_{Y_{\frac 1 2}} + \mathcal{F}_{Y_{\frac 1 2}},
\end{align}
where
\begin{align} \label{TY12spec}
\mathcal{T}_{Y_{\frac 1 2}} := & \int ( \ensuremath{\partial}_y \mathcal{N}_1 - \ensuremath{\varepsilon} \ensuremath{\partial}_x \mathcal{N}_2) U_y x \ensuremath{\partial}hi_1^2 \\ \label{FY12spec}
\mathcal{F}_{Y_{\frac 1 2}} := & \int (\ensuremath{\partial}_y F_R - \ensuremath{\varepsilon} \ensuremath{\partial}_x G_R) U_y x \ensuremath{\partial}hi_1^2.
\end{align}
\end{lemma}
\begin{proof} For this proof, it is convenient to work in the vorticity formulation, \eqref{eq:vort:pre}. We apply the multiplier $U_y x \ensuremath{\partial}hi_1(x)^2$ to \eqref{eq:vort:pre}.
\ensuremath{\nonumber}oindent \textit{Step 1: $\mathcal{T}_1$ Terms} We first note that since $\mathcal{T}_1[U](x, 0) = 0$, we may integrate by parts in $y$ to view the product in the velocity form, and subsequently integrate by parts several times in $y$ and $x$ to produce
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\partial}_y \mathcal{T}_1[U] U_y x \ensuremath{\partial}hi_1^2= &- \int \mathcal{T}_1[U] U_{yy} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \int \bar{u}^2 U_x U_{yy} x \ensuremath{\partial}hi_1^2 - \int \bar{u} \bar{v} U_y U_{yy} x \ensuremath{\partial}hi_1^2 - \int 2 \bar{u}^0_{pyy} U U_{yy} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int 2 \bar{u} \bar{u}_y U_x U_y x \ensuremath{\partial}hi_1^2 - \frac 1 2 \int \bar{u}^2 U_y^2 \ensuremath{\partial}hi_1^2 - \int \bar{u} \bar{u}_x U_y^2 x \ensuremath{\partial}hi_1^2 + \frac 1 2 \int (\bar{u} \bar{v})_y U_y^2 x \ensuremath{\partial}hi_1^2 \\ \label{form:1}
& + \int 2 \bar{u}^0_{pyy} U_y^2 x - \int \ensuremath{\partial}_y^4 \bar{u}^0_p U^2 x - \int \bar{u}^2 U_y^2 x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' =: \sum_{i = 1}^7 A^{(1)}_i.
\end{align}
Note that above, we used the integration by parts identity
\begin{align} \ensuremath{\nonumber}
- \int 2 \bar{u}^0_{pyy} U U_{yy} x \ensuremath{\partial}hi_1^2= & \int 2 \bar{u}^0_{pyy} U_y^2 x \ensuremath{\partial}hi_1^2 + \int 2 \bar{u}^0_{pyyy} U U_y x \ensuremath{\partial}hi_1^2 \\ \label{form:2}
= & \int 2 \bar{u}^0_{pyy} U_y^2 x \ensuremath{\partial}hi_1^2 - \int \bar{u}^0_{pyyyy} U^2 x \ensuremath{\partial}hi_1^2,
\end{align}
which is available due to the condition that $\bar{u}^0_{pyy}|_{y = 0} = 0$ and $\bar{u}^0_{pyyy}|_{y = 0} = 0$.
We now estimate each of the terms in \eqref{form:1}, starting with $A^{(1)}_1$, which is controlled by
\begin{align} \ensuremath{\nonumber}
|\int 2\bar{u} \bar{u}_y U_x U_y x \ensuremath{\partial}hi_1^2| \lesssim & \| \bar{u}_y x^{\frac 1 2} \|_\infty \| \bar{u} U_x x^{\frac 1 2} \ensuremath{\partial}hi_1\| \| U_y \ensuremath{\partial}hi_1 \| \le C_{\delta_1} \| U_y \ensuremath{\partial}hi_1 \|^2 + \delta_1 \| U, V \|_{X_{\frac 1 2}}^2 \\ \ensuremath{\nonumber}
\le & C_{\delta_1} C_{\delta_2} \| \sqrt{\bar{u}} U_y \|^2 + C_{\delta_1} \delta_2 \| U, V \|_{Y_{\frac 1 2}}^2 + \delta_1 \| U, V \|_{X_{\frac 1 2}}^2 \\
\le & C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2 + \delta \| U, V \|_{X_{\frac 1 2}}^2,
\end{align}
where we have invoked \eqref{bob:1}.
For $A^{(1)}_2$, $A^{(1)}_3$, $A^{(1)}_4$, and $A^{(1)}_5$, we appeal to the coefficient estimate
\begin{align} \label{coeff:1}
\|\frac{1}{2} \bar{u}^2\|_\infty + \| \bar{u} \bar{u}_x x \|_\infty + \frac 1 2 \| x \ensuremath{\partial}_y (\bar{u} \bar{v}) \|_\infty + \| 2 \bar{u}_{yy} x \|_\infty \lesssim 1,
\end{align}
to control these terms by $C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2$.
We estimate $A^{(1)}_6$ via
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\partial}_y^4 \bar{u}^0_p U^2 x \ensuremath{\partial}hi_1^2| \lesssim & |\int \ensuremath{\partial}_y^4 \bar{u}^0_p \mathring{U}^2 x \ensuremath{\partial}hi_1^2| + |\int \ensuremath{\partial}_y^4 \bar{u}^0_p U(x, 0)^2 x \ensuremath{\partial}hi_1^2| \\ \ensuremath{\nonumber}
\lesssim & \| \ensuremath{\partial}_y^4 \bar{u}^0_p x y^2 \|_\infty \| U_y \ensuremath{\partial}hi_1 \|^2 + \| \ensuremath{\partial}_y^4 \bar{u}^0_p x^{\frac 12} \|_{L^\infty_x L^1_y} \| U(x, 0) x^{\frac 1 4} \|_{x = 0}^2 \\
\le & C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2.
\end{align}
The final term in \eqref{form:1}, $A^{(1)}_7$, is localized in $x$, and is clearly bounded above a factor of $\| U, V \|_{X_0}^2$.
\ensuremath{\nonumber}oindent \textit{Step 2: $\mathcal{T}_2$ Terms:} We now estimate the contributions from $\mathcal{T}_2$ via first integrating by parts in $x$ to produce
\begin{align}
- \ensuremath{\varepsilon} \int \ensuremath{\partial}_x \mathcal{T}_2[V] U_y x \ensuremath{\partial}hi_1^2 = \int \ensuremath{\varepsilon} \mathcal{T}_2[V] U_{xy} x \ensuremath{\partial}hi_1^2 + 2\int \ensuremath{\varepsilon} \mathcal{T}_2[V] U_y \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'.
\end{align}
We now appeal to the definition of $\mathcal{T}_2[V]$ in \eqref{def:T2} to produce
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} \mathcal{T}_2[V] U_{xy} x \ensuremath{\partial}hi_1^2 = - & \int \ensuremath{\varepsilon} ( \bar{u}^2 V_x + \bar{u} \bar{v} V_y + \bar{u}_{yy}V )V_{yy} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \bar{u} \bar{u}_y V_x V_y x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}^2 V_{xy} V_y x \ensuremath{\partial}hi_1^2 + \frac 1 2 \int \ensuremath{\varepsilon} \ensuremath{\partial}_y (\bar{u} \bar{v}) V_y^2 x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon} \bar{u}_{yy} V_y^2 x \ensuremath{\partial}hi_1^2 - \frac 1 2 \int \ensuremath{\varepsilon} \bar{u}_{yyyy} V^2 x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \bar{u} \bar{u}_y V_x V_y x \ensuremath{\partial}hi_1^2 - \int \frac 1 2 \ensuremath{\varepsilon} \bar{u}^2 V_y^2 \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u} \bar{u}_x V_y^2 x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \frac 1 2 \int \ensuremath{\varepsilon} (\bar{u} \bar{v})_y V_y^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}_{yy} V_y^2 x \ensuremath{\partial}hi_1^2 - \int \frac{\ensuremath{\varepsilon}}{2} \bar{u}_{yyyy} V^2 x \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}^2 V_y^2 x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{wolf:2}
= & A^{(2)}_1 + ... + A^{(2)}_7.
\end{align}
For the first term, $A^{(2)}_1$, we estimate via
\begin{align} \ensuremath{\nonumber}
| \int \ensuremath{\varepsilon} \bar{u} \bar{u}_y V_x V_y x \ensuremath{\partial}hi_1^2 | \lesssim &\| \bar{u}_y x^{\frac 1 2} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \| \| \sqrt{\ensuremath{\varepsilon}} \bar{u} V_x x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \\
\le & C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2 + \delta \| U, V \|_{X_{\frac 1 2}}^2.
\end{align}
For $A^{(2)}_2, A^{(2)}_3, A^{(2)}_4, A^{(2)}_5$, we estimate using the same coefficient estimate as \eqref{coeff:1}. We estimate $A^{(2)}_6$ via
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon} \bar{u}_{yyyy} V^2 x| \lesssim \| \bar{u}_{yyyy} xy^2 \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V_y \|^2 \le C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2.
\end{align}
The final term, $A^{(2)}_7$, can be controlled by $\|U, V \|_{X_0}^2$ upon invoking the bounded support of $\ensuremath{\partial}hi_1'$.
\ensuremath{\nonumber}oindent \textit{Step 3: Diffusive Terms:} We now compute (in the vorticity form) via a long series of integrations by parts the following identity
\begin{align} \ensuremath{\nonumber}
- \int \ensuremath{\partial}_y^3 u U_y x \ensuremath{\partial}hi_1^2= & \int u_{yy} U_{yy} x \ensuremath{\partial}hi_1^2+ \int_{y = 0} u_{yy} U_y x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
= & \int \ensuremath{\partial}_y^2 (\bar{u} U + \bar{u}_y q) U_{yy} x\ensuremath{\partial}hi_1^2 + \int_{y = 0} \ensuremath{\partial}_y^2 (\bar{u} U + \bar{u}_y q) U_{y} x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x \\ \ensuremath{\nonumber}
= & \int \bar{u} U_{yy}^2 x \ensuremath{\partial}hi_1^2 - \frac 9 2 \int \bar{u}_{yy} U_y^2 x \ensuremath{\partial}hi_1^2+ \int 3 \bar{u}_{yyyy} U^2 x \ensuremath{\partial}hi_1^2- \int \frac 1 2 \ensuremath{\partial}_y^6 \bar{u} q^2 x \ensuremath{\partial}hi_1^2 \\ \label{likeyou}
& + \frac 3 2 \int_{y = 0} \bar{u}_y U_y^2 x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x + \frac 3 2 \int_{y = 0} \bar{u}_{yy} UU_y x \ensuremath{\partial}hi_1^2 = \sum_{i = 1}^6 B^{(1)}_i.
\end{align}
We first notice that $B^{(1)}_1$ and $B^{(1)}_5$ are positive contributions. $B^{(1)}_2$ is easily estimated by $\| U, V \|_{X_0}^2$ upon using $\|\frac{\bar{u}_{yy}}{\bar{u}}x \|_\infty \lesssim 1$. We estimate $B^{(1)}_3$ by
\begin{align} \ensuremath{\nonumber}
|\int 3 \bar{u}_{yyyy} U^2 x \ensuremath{\partial}hi_1^2| \lesssim & |\int \bar{u}_{yyyy} (U - U(x, 0))^2 x \ensuremath{\partial}hi_1| + |\int \bar{u}_{yyyy} U(x, 0)^2 x| \\ \ensuremath{\nonumber}
\lesssim & \| \bar{u}_{yyyy} x y^2 \|_\infty \Big\| \frac{U - U(x, 0)}{y} \ensuremath{\partial}hi_1 \Big\|^2 + \| \bar{u}_{yyyy} x^{\frac 1 2} \|_{L^\infty_x L^1_y} \| U(x, 0) x^{\frac 1 4} \|_{y = 0}^2 \\ \label{rockL}
\lesssim & \| U_y \|^2 + \| U(x, 0) x^{\frac 1 4} \|_{y = 0}^2 \le C_\delta \|U, V \|_{X_0}^2 + \delta \|U, V \|_{Y_{\frac 1 2}}^2.
\end{align}
The term $B^{(1)}_4$ is estimated in an entirely analogous manner. The term $B^{(1)}_6$ is estimated by
\begin{align}
|\int_{y = 0} \bar{u}_{yy} UU_y x \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \| U \langle x \rangle^{- \frac 1 4} \|_{y = 0} \| U_y \ensuremath{\partial}hi_1 \langle x \rangle^{\frac 1 4} \|_{y = 0} \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \| U, V\|_{X_0} \|U, V \|_{Y_{\frac 1 2}},
\end{align}
upon invoking the bound $|\bar{u}_{yy}(x, 0)| \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \langle x \rangle^{-1}$ due to \eqref{prof:u:est} coupled with the fact that $\bar{u}^0_{pyy}(x, 0) = 0$.
In addition to this term, we need to estimate the term $\bar{u}^0_{p yyy}q$ from \eqref{sys:sim:1}, we do so via
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\partial}_y (\bar{u}^0_{pyyy} q) U_y x =& \int \bar{u}^0_{p yyyy} q U_y x + \int \bar{u}^0_{p yyy} UU_y x = - \int \ensuremath{\partial}_y^5 \bar{u}^0_p q U x - \frac 1 2 \int \ensuremath{\partial}_y^4 \bar{u}^0_p U^2 x \\
= & \frac 1 2 \int \ensuremath{\partial}_y^6 \bar{u}^0_p q^2 x - \frac 1 2 \int \ensuremath{\partial}_y^4 \bar{u}^0_p U^2 x,
\end{align}
which we estimate in an identical manner to \eqref{rockL}.
The next diffusive term is
\begin{align} \label{grey}
- 2 \int \ensuremath{\varepsilon} u_{xxy} U_y x \ensuremath{\partial}hi_1^2= & \int 2 \ensuremath{\varepsilon} u_{xy} U_{xy} x \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} u_{xy} U_y \ensuremath{\partial}hi_1^2 + \int 4 \ensuremath{\varepsilon} u_{xy} U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_{xy} (\bar{u} U + \bar{u}_y q) U_{xy} x \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_y \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
&+ \int 4 \ensuremath{\varepsilon} u_{xy} U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' =: \sum_{i = 1}^{5} \tilde{B}^{(2)}_i.
\end{align}
Due to the localization in $x$ of $\ensuremath{\partial}hi_1'$ that the term $\tilde{B}^{(2)}_5$ above is estimated by
\begin{align} \label{use:E:norm}
|\int \ensuremath{\varepsilon} u_{xy} U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'| \lesssim \| \ensuremath{\varepsilon} u_{xy} \ensuremath{\partial}hi_1' \| \| U_y \ensuremath{\partial}hi_1 \| \lesssim \|U, V \|_E (C_\delta \|U, V \|_{X_0} + \delta \|U, V \|_{Y_{\frac 1 2}}).
\end{align}
Due to the length of the forthcoming expressions, we handle each of the remaining four terms in \eqref{grey}, $\tilde{B}^{(2)}_k$, $k = 1,2,3,4$, individually. First, integration by parts yields for $\tilde{B}^{(2)}_1$,
\begin{align} \ensuremath{\nonumber}
\int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_{xy}(\bar{u} U) U_{xy} x \ensuremath{\partial}hi_1^2 = & \int 2 \ensuremath{\varepsilon} \bar{u} U_{xy}^2 x\ensuremath{\partial}hi_1^2 - \int 4 \ensuremath{\varepsilon} \bar{u}_{xxy} UU_y x \ensuremath{\partial}hi_1^2 - \int 4 \ensuremath{\varepsilon} \bar{u}_{xy} U_x U_y x \ensuremath{\partial}hi_1^2- \int 4 \ensuremath{\varepsilon} \bar{u}_{xy} U U_y \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
&- \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (x \bar{u}_x) U_y^2 \ensuremath{\partial}hi_1^2 - \int 4 \ensuremath{\varepsilon} \bar{u}_{yy} U_x^2 x \ensuremath{\partial}hi_1^2- \int_{y = 0} 2 \ensuremath{\varepsilon} \bar{u}_y U_x(x, 0)^2 x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x \\ \label{park1}
& + \int \ensuremath{\varepsilon} \bar{u}_{yyyy} V^2 x \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xyy} q U_{xy}x \ensuremath{\partial}hi_1^2 + E_{loc}^{(2)} =: \sum_{i = 1}^{9} B^{(2)}_i + E_{loc}^{(2)},
\end{align}
where $E_{loc}^{(2)}$ are localized contributions that can be controlled by a large factor of $\| U, V \|_{X_0}^2 + \|U, V \|_E^2$.
The first term, $B^{(2)}_1$, is a positive contribution. The terms $B^{(2)}_2$ and $B^{(2)}_4$ are estimated via
\begin{align} \ensuremath{\nonumber}
| \int 4 \ensuremath{\varepsilon} \bar{u}_{xxy} UU_y x \ensuremath{\partial}hi_1^2| + |\int 4 \ensuremath{\varepsilon} \bar{u}_{xy} UU_y \ensuremath{\partial}hi_1^2| \lesssim & \ensuremath{\varepsilon} \Big( \| \bar{u}_{xxy} x^2 \|_\infty + \| \bar{u}_{xy} x \|_\infty \Big) \| U \langle x \rangle^{-1} \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \\
\lesssim & \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2 + \| U, V \|_{X_{\frac 1 2}}^2),
\end{align}
where we have appealed to \eqref{bob:1} and \eqref{Hardy:three}.
The terms $B^{(2)}_3$, $B^{(2)}_5$, $B^{(2)}_6$, and $B^{(2)}_8$ are estimated via
\begin{align} \ensuremath{\nonumber}
&|\int 4 \ensuremath{\varepsilon} \bar{u}_{xy} U_x U_y x \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{xy} x \|_\infty \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \lesssim \sqrt{\ensuremath{\varepsilon}} \Big( \| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2 \Big), \\ \ensuremath{\nonumber}
&|\int \ensuremath{\varepsilon} \ensuremath{\partial}_x (x \bar{u}_x) U_y^2 \ensuremath{\partial}hi_1^2| \lesssim \| \ensuremath{\partial}_x (x \bar{u}_x) \|_\infty \ensuremath{\varepsilon} \| U_y \ensuremath{\partial}hi_1 \|^2 \lesssim \ensuremath{\varepsilon} \Big( \| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2 \Big) \\ \ensuremath{\nonumber}
&|\int 4 \ensuremath{\varepsilon} \bar{u}_{yy} U_x^2 x \ensuremath{\partial}hi_1^2 | \lesssim \| \bar{u}_{yy} x \|_\infty \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \|^2 \le C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2, \\ \ensuremath{\nonumber}
&|\int \ensuremath{\varepsilon} \bar{u}_{yyyy} V^2 x \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon} \| \bar{u}_{yyyy} x y^2 \|_\infty \Big\| \frac{V}{y} \ensuremath{\partial}hi_1 \Big\|^2 \le C_\delta \| U, V \|_{X_0}^2 + \delta \| U, V \|_{Y_{\frac 1 2}}^2,
\end{align}
and $B^{(2)}_9$ is estimated via
\begin{align} \ensuremath{\nonumber}
| \int\ensuremath{\varepsilon} \bar{u}_{xyy} q U_{xy} x \ensuremath{\partial}hi_1^2| \lesssim &\sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{xyy} x^{\frac 3 2} y \|_\infty \Big\| \frac{q}{y} \langle x \rangle^{-1} \ensuremath{\partial}hi_1 \Big\| \| \sqrt{\ensuremath{\varepsilon}} \sqrt{\bar{u}} U_{xy} x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{-1} \| \| \sqrt{\ensuremath{\varepsilon}} \sqrt{\bar{u}} U_{xy} x^{\frac 1 2} \ensuremath{\partial}hi_1 \| \\ \label{whyb}
\lesssim & \sqrt{\ensuremath{\varepsilon}} ( \| U, V \|_{X_0} + \| U, V \|_{X_{\frac 1 2}} ) \| U, V \|_{Y_{\frac 1 2}}.
\end{align}
The term $B^{(2)}_7$ requires us to use the $X_1$ norm, albeit with a pre-factor of $\ensuremath{\varepsilon}$ and with a weaker weight in $x$:
\begin{align}
|\int_{y = 0} \ensuremath{\varepsilon} \bar{u}_y U_x(x, 0)^2 x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x| \lesssim \ensuremath{\varepsilon} \| \sqrt{\bar{u}_y} U_x(x, 0) x \ensuremath{\partial}hi_1 \|_{x = 0}^2 \lesssim \ensuremath{\varepsilon} \| U, V \|_{X_1}^2,
\end{align}
where we use that the choice of $\ensuremath{\partial}hi_1$ is the same as that of $\| \cdot \|_{X_1}$. This concludes treatment of $\tilde{B}^{(2)}_1$.
The second term from \eqref{grey}, $\tilde{B}^{(2)}_2$, gives
\begin{align} \ensuremath{\nonumber}
\int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_{xy}( \bar{u}_y q) U_{xy} x \ensuremath{\partial}hi_1^2 = &\int 2\ensuremath{\varepsilon} \Big( \bar{u}_{xyy} q + \bar{u}_{xy} U - \bar{u}_{yy} V + \bar{u}_y U_x \Big) U_{xy} x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \bar{u}_{xyy} q U_{xy} x \ensuremath{\partial}hi_1^2 - \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} U_x U_y x \ensuremath{\partial}hi_1^2 - \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} UU_y \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \int 2 \ensuremath{\varepsilon} \bar{u}_{xxy} x UU_y \ensuremath{\partial}hi_1^2 - \int 2 \ensuremath{\varepsilon} \bar{u}_{yy} V_y^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon} \bar{u}_{yyyy} V^2 x \ensuremath{\partial}hi_1^2\\ \label{jup}
& - \int_{y = 0} \ensuremath{\varepsilon} \bar{u}_y U_x(x, 0)^2 x \ensuremath{\partial}hi_1^2 \,\mathrm{d} x - 4 \int \ensuremath{\varepsilon} \bar{u}_{xy} U U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' = : \sum_{i = 1}^8 J_i.
\end{align}
The final term above, $J_8$, is a localized in $x$ contribution, can easily be controlled by $\| U, V \|_{X_0}^2$. $J_1$ is treated in the same manner as \eqref{whyb}. $J_2, J_3$, and $J_4$ are estimated by
\begin{align} \ensuremath{\nonumber}
&|\int \ensuremath{\varepsilon} \bar{u}_{xy} U_x U_y x \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{xy} x \|_\infty \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \lesssim \sqrt{\ensuremath{\varepsilon}} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\ \ensuremath{\nonumber}
&|\int \ensuremath{\varepsilon} (x \bar{u}_{xy})_x UU_y \ensuremath{\partial}hi_1| \lesssim \ensuremath{\varepsilon} \| \bar{u}_{xy} x \|_\infty \| U \langle x \rangle^{-1} \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \lesssim \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2+ \| U, V \|_{Y_{\frac 1 2}}^2).
\end{align}
Terms $J_5$, $J_6$, $J_7$ are identical to $B^{(2)}_6$, $B^{(2)}_8$, and $B^{(2)}_7$. This concludes treatment of $\tilde{B}^{(2)}_2$.
The third and fourth terms from \eqref{grey}, $\tilde{B}^{(2)}_3$ and $\tilde{B}^{(2)}_4$ together give
\begin{align} \ensuremath{\nonumber}
\int 2\ensuremath{\varepsilon} \ensuremath{\partial}_x (\bar{u} U + \bar{u}_y q) U_y \ensuremath{\partial}hi_1^2 = &\int 2 \ensuremath{\varepsilon} \bar{u} U_x U_y \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} \bar{u}_x U U_y \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} q U_y - \int 2 \ensuremath{\varepsilon} \bar{u}_y V U_y \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \bar{u} U_x U_y \ensuremath{\partial}hi_1^2- \int \ensuremath{\varepsilon} \bar{u}_{xy} U^2 \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} q U_y \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} \bar{u}_y V_y U \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + \int 2 \ensuremath{\varepsilon} \bar{u}_{yy} UV\ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \bar{u} U_x U_y \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon} \bar{u}_{xy} U^2 \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} q U_y \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon} \bar{u}_{xy} U^2 \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + \int 2 \ensuremath{\varepsilon} \bar{u}_{yy} UV \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} \bar{u}_y U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{hein}
= & \int 2 \ensuremath{\varepsilon} \bar{u} U_x U_y \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{xy} q U_y \ensuremath{\partial}hi_1^2+ \int 2 \ensuremath{\varepsilon} \bar{u}_{yy} UV \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon} \bar{u}_y U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1'.
\end{align}
The first and final terms from \eqref{hein} can easily be estimated by $\sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2$, while the second term \eqref{hein}
\begin{align} \ensuremath{\nonumber}
|\int 2 \ensuremath{\varepsilon} \bar{u}_{xy} q U_y \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon} \| \bar{u}_{xy} y x \|_\infty \| \frac{q}{y} \langle x \rangle^{-1} \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| \lesssim \ensuremath{\varepsilon} \| U \langle x \rangle^{-1} \| \| U_y \ensuremath{\partial}hi_1 \|
\end{align}
and the third term from \eqref{hein} can be estimated via
\begin{align}
|\int 2 \ensuremath{\varepsilon} \bar{u}_{yy} UV \ensuremath{\partial}hi_1^2| \lesssim & \ensuremath{\varepsilon} | \int \bar{u}_{yy} \mathring{U} V \ensuremath{\partial}hi_1^2| + \ensuremath{\varepsilon} |\int \bar{u}_{yy} U(x, 0) V \ensuremath{\partial}hi_1^2| \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{yy} y^2 \|_\infty \| U_y \ensuremath{\partial}hi_1 \| \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \| + \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{yy} y x^{- \frac 1 4} \|_{L^\infty_x L^1_y} \| U(x, 0) x^{\frac 1 4} \|_{L^2(x = 0)} \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \|.
\end{align}
We now arrive at the final diffusive term, which we integrate by parts in $x$ via
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon}^2 v_{xxx} U_y x \ensuremath{\partial}hi_1^2 = & - \ensuremath{\varepsilon}^2 \int v_{xx} U_{xy} x \ensuremath{\partial}hi_1^2- \int \ensuremath{\varepsilon}^2 v_{xx} U_y \ensuremath{\partial}hi_1^2 - 2 \ensuremath{\varepsilon}^2 \int v_{xx} U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 v_{xx} V_{yy} x \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon}^2 v_x V_{yy} \ensuremath{\partial}hi_1^2 + 2 \int \ensuremath{\varepsilon}^2 v_x U_y \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - 2 \ensuremath{\varepsilon}^2 \int v_{xx} U_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{misfit}
= & \tilde{P}_1 + \tilde{P}_2 + \tilde{P}_3 + \tilde{P}_4.
\end{align}
Again, the terms with $\ensuremath{\partial}hi_1'$ above, $\tilde{P}_3, \tilde{P}_4$, are easily estimated above by a factor of $\| U, V \|_{X_0}^2 + \|U, V \|_E^2$ due to the localization in $x$, in an analogous manner to \eqref{use:E:norm}.
For the second term on the right-hand side of \eqref{misfit}, $\tilde{P}_2$, we produce the following identity
\begin{align} \ensuremath{\nonumber}
- \int \ensuremath{\varepsilon}^2 v_x V_{yy} \ensuremath{\partial}hi_1^2= & - \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\bar{u} V - \bar{u}_x q) V_{yy} \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & - \int \ensuremath{\varepsilon}^2 (\bar{u} V_x + 2 \bar{u}_x V - \bar{u}_{xx}q) V_{yy} \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon}^2 \bar{u} V_{xy} V_y \ensuremath{\partial}hi_1^2 + \int 2\ensuremath{\varepsilon}^2 \bar{u}_x V_y^2 \ensuremath{\partial}hi_1^2 + \int 2 \ensuremath{\varepsilon}^2 \bar{u}_{xy} V V_y \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} U V_y \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon}^2 \bar{u}_{xxy} UV \ensuremath{\partial}hi_1^2+ \int \frac{\ensuremath{\varepsilon}^2}{2} \bar{u}_{xxxyy} q^2 \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \bar{u}_{xxyy} q^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y \ensuremath{\partial}hi_1^2- \frac 3 2 \int \ensuremath{\varepsilon}^2 \bar{u}_x V_y^2\ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon}^2 \bar{u}_{xyy} V^2 \ensuremath{\partial}hi_1^2- \frac{\ensuremath{\varepsilon}^2}{2} \int \bar{u}_{xxx} U^2 \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon}^2 \bar{u}_{xxy} UV \ensuremath{\partial}hi_1^2 + \frac{\ensuremath{\varepsilon}^2}{2} \int \bar{u}_{xxxyy} q^2 \ensuremath{\partial}hi_1^2- \int \ensuremath{\varepsilon}^2 \bar{u} V_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + \int \ensuremath{\varepsilon}^2 \bar{u}_{xxyy} q^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\
& - \int \ensuremath{\varepsilon}^2 \bar{u}_{xx} U^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' = \sum_{i = 1}^{9} P^{(1)}_i.
\end{align}
Again, the terms with a $\ensuremath{\partial}hi_1'$, $P^{(1)}_7, P^{(1)}_8, P^{(1)}_9$, are easily controlled by a factor of $\| U, V \|_{X_0}^2$.
We now proceed to estimate each of the remaining terms above via
\begin{align*}
&|\int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\varepsilon} V_x \ensuremath{\partial}hi_1\| \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \| \lesssim \sqrt{\ensuremath{\varepsilon}} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\
&|\int \ensuremath{\varepsilon}^2 \bar{u}_x V_y^2 \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon} \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \|^2 \lesssim \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\
&|\int \ensuremath{\varepsilon}^2 \bar{u}_{xyy} V^2 \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon} \| \bar{u}_{xyy} y^2 \|_\infty \Big\| \sqrt{\ensuremath{\varepsilon}} \frac{V}{y} \ensuremath{\partial}hi_1 \Big\|^2 \lesssim \ensuremath{\varepsilon} \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1\|^2 \lesssim \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\
&|\int \ensuremath{\varepsilon}^2 \bar{u}_{xxx} U^2 \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon}^2 \| \bar{u}_{xxx} x^2 \|_\infty \| U \langle x \rangle^{-1} \|^2 \lesssim \ensuremath{\varepsilon}^2 (\| U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2), \\
&|\int \ensuremath{\varepsilon}^2 \bar{u}_{xxy} UV \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon}^{\frac 32} \| U \langle x \rangle^{-1} \| \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1\| \lesssim \ensuremath{\varepsilon}^{\frac 3 2} (\| U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\
&| \frac{\ensuremath{\varepsilon}^2}{2} \int \bar{u}_{xxxyy} q^2 \ensuremath{\partial}hi_1^2| \lesssim \ensuremath{\varepsilon}^2 \| \bar{u}_{xxxyy} x^2 y^2 \|_\infty \| U \langle x \rangle^{-1} \|^2 \lesssim \ensuremath{\varepsilon}^2 (\| U, V \|_{X_0}^2 + \| U, V \|_{X_{\frac 1 2}}^2).
\end{align*}
This concludes the treatment of $\tilde{P}_2$.
We now treat $\tilde{P}_1$. We further integrate by parts using that $v = \bar{u} V - \bar{u}_x q$, which produces the following identity
\begin{align} \label{mis2}
\tilde{P}_1 = \int \ensuremath{\varepsilon}^2 v_{xx} V_{yy} x \ensuremath{\partial}hi_1^2= & \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_{xx} (\bar{u} V - \bar{u}_x q) V_{yy} x \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 (\bar{u} V_{xx} + 3 \bar{u}_x V_x + 3 \bar{u}_{xx} V - \bar{u}_{xxx} q) V_{yy} x \ensuremath{\partial}hi_1^2 =: \tilde{P}_{1,1} + \tilde{P}_{1,2} + \tilde{P}_{1,3} + \tilde{P}_{1,4}.
\end{align}
For $\tilde{P}_{1,1}$, we integrate by parts several times in $x$ and $y$ to produce
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon}^2 \bar{u} V_{xx} V_{yy} x \ensuremath{\partial}hi_1^2 = & - \int \ensuremath{\varepsilon}^2 \bar{u} V_{xxy} V_y x \ensuremath{\partial}hi_1^2 - \int \ensuremath{\varepsilon}^2 \bar{u}_y V_{xx} V_y x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u} V_{xy}^2 x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 x \bar{u}_x V_{xy} V_y \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \bar{u} V_{xy} V_y \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \bar{u}_{xy} V_x V_y x \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_{xy}x \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
& + 2\int \ensuremath{\varepsilon}^2 \bar{u} V_{xy} V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + 2 \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 \bar{u} V_{xy}^2 x \ensuremath{\partial}hi_1^2 - \ensuremath{\varepsilon}^2 \frac 1 2 \int \ensuremath{\partial}_x (x \bar{u}_x) V_y^2 \ensuremath{\partial}hi_1^2 - \frac 1 2 \int \ensuremath{\varepsilon}^2 \bar{u}_x V_y^2 \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& + \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y \ensuremath{\partial}hi_1^2+ \int \ensuremath{\varepsilon}^2 \bar{u}_{xy} V_x V_y x\ensuremath{\partial}hi_1^2 - \int \frac{\ensuremath{\varepsilon}^2}{2} \bar{u}_{yy} V_x^2 x \ensuremath{\partial}hi_1^2 \\ \ensuremath{\nonumber}
& - \int \ensuremath{\varepsilon}^2 \bar{u} V_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' - \int \ensuremath{\varepsilon}^2 x \bar{u}_x V_y^2 \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' + 2\int \ensuremath{\varepsilon}^2 \bar{u} V_{xy} V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' \\ \label{clay}
& + 2 \int \ensuremath{\varepsilon}^2 \bar{u}_y V_x V_y x \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_1' =: \sum_{i = 1}^{10} H^{(1)}_i.
\end{align}
All of the terms with $\ensuremath{\partial}hi_1'$ can again be controlled by a factor of $\| U, V \|_{X_0}^2 + \|U, V \|_E^2$. The first term, $H^{(1)}_1$, is a positive contribution. $H^{(1)}_2$ and $H^{(1)}_3$ are estimated by
\begin{align*}
|\int \frac 1 2 \ensuremath{\partial}_x (x \bar{u}_x) V_y^2 \ensuremath{\partial}hi_1^2| + |\int \frac 1 2 \ensuremath{\varepsilon}^2 \bar{u}_x V_y^2 \ensuremath{\partial}hi_1^2| \lesssim &(\| \ensuremath{\partial}_x (x \bar{u}_x) \|_\infty + \| \bar{u}_x \|_\infty ) \ensuremath{\varepsilon} \| \sqrt{\ensuremath{\varepsilon}} V_y \ensuremath{\partial}hi_1 \|^2 \\
\lesssim & \ensuremath{\varepsilon} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2),
\end{align*}
while the $H^{(1)}_4, H^{(1)}_5$ and $H^{(1)}_6$ are estimated via
\begin{align*}
&| \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (x \bar{u}_y) V_x V_y\ensuremath{\partial}hi_1^2 | \lesssim \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x (x \bar{u}_y) \|_\infty \| \ensuremath{\varepsilon} V_x \| \| \sqrt{\ensuremath{\varepsilon}} V_y \| \lesssim \sqrt{\ensuremath{\varepsilon}} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2), \\
&|\int \ensuremath{\varepsilon}^2 \bar{u}_{yy} V_x^2 x\ensuremath{\partial}hi_1^2| \lesssim \| \bar{u}_{yy} x \|_\infty \| \ensuremath{\varepsilon} V_x \|^2 \le \delta \| U, V \|_{Y_{\frac 1 2}}^2 + \| U, V \|_{X_0}^2.
\end{align*}
This concludes the treatment of $\tilde{P}_{1,1}$.
The terms $\tilde{P}_{1,k}$, $k = 2, 3, 4$, are equivalent to
\begin{align} \ensuremath{\nonumber}
&\ensuremath{\varepsilon}^2 \int (3 \bar{u}_x V_x + 3 \bar{u}_{xx} V - \bar{u}_{xxx} q) V_{yy} x \ensuremath{\partial}hi_1^2\\ \ensuremath{\nonumber}
= & - \int 3 \ensuremath{\varepsilon}^2 \bar{u}_{xy} V_x V_y x \ensuremath{\partial}hi_1^2+ \frac 3 2 \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (x\bar{u}_{x}) V_y^2 x \ensuremath{\partial}hi_1^2 - \int 3 \ensuremath{\varepsilon}^2 \bar{u}_{xx} V_y^2 x \ensuremath{\partial}hi_1^2 \\ \label{clay2}
& + \frac 3 2 \int \ensuremath{\varepsilon}^2 \bar{u}_{xxyy} V^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\varepsilon}^2 \bar{u}_{xxxy} q V_y x + \int \ensuremath{\varepsilon}^2 \bar{u}_{xxx} U V_y x \ensuremath{\partial}hi_1^2.
\end{align}
We estimate each of these contributions in a nearly identical fashion to the terms from \eqref{clay}, and so omit repeating these details.
\ensuremath{\nonumber}oindent \textit{Step 4: Error Terms} We now estimate the error terms on the right-hand side of \eqref{eq:vort:pre}, starting with
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\partial}_y(\zeta U) U_y x \ensuremath{\partial}hi_1^2 = & \int \zeta U_y^2 x \ensuremath{\partial}hi_1^2 + \int \ensuremath{\partial}_y \zeta U U_y x \ensuremath{\partial}hi_1^2 \\ \label{riwiu}
= & \int \zeta U_y^2 x \ensuremath{\partial}hi_1^2 - \frac 1 2 \int \ensuremath{\partial}_y^2 \zeta U^2 x - \frac 1 2 \int_{y = 0} \ensuremath{\partial}_y \zeta U^2 x \ensuremath{\partial}hi_1^2.
\end{align}
The first term above is estimated via
\begin{align} \ensuremath{\nonumber}
|\int \zeta U_y^2 x \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U_y \ensuremath{\partial}hi_1 \|^2 \lesssim \sqrt{\ensuremath{\varepsilon}} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2),
\end{align}
where we have appealed to the estimate \eqref{S:0} as well as the Hardy type inequality \eqref{bob:1}.
For the second and third terms from \eqref{riwiu}, we estimate via
\begin{align}
|\int \ensuremath{\partial}_y^2 \zeta U^2 x| + |\int_{y = 0} \ensuremath{\partial}_y \zeta U^2 x \ensuremath{\partial}hi_1^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2 - \frac{1}{100}} \|^2 + \sqrt{\ensuremath{\varepsilon}} \| U \langle x \rangle^{- \frac 1 2} \|_{y = 0}^2 \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{X_0}^2,
\end{align}
where we have appealed to \eqref{est:zeta:2}.
The $(\zeta_y q)_y$ and $(\zeta V)_x$ terms on the right-hand side of \eqref{eq:vort:pre} are estimated in a completely analogous manner. We now estimate the term
\begin{align} \ensuremath{\nonumber}
| \int \ensuremath{\varepsilon} (\alpha U)_x U_y x \ensuremath{\partial}hi_1^2 | \le & |\int \ensuremath{\varepsilon} \alpha U_x U_y x \ensuremath{\partial}hi_1^2| + |\int \ensuremath{\varepsilon} \alpha_x U U_y x \ensuremath{\partial}hi_1^2| \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_1 \| \| U_y \ensuremath{\partial}hi_1 \| + \ensuremath{\varepsilon} \| U \langle x \rangle^{-1} \| \| U_y \ensuremath{\partial}hi_1 \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} (\| U, V \|_{X_0}^2 + \| U, V \|_{Y_{\frac 1 2}}^2) + \ensuremath{\varepsilon} \| U, V \|_{X_0}^2,
\end{align}
where we have appealed to estimate \eqref{S:1} to estimate the coefficient $\alpha$. The remaining term with $(\alpha_y q)_x$ is estimated in a completely analogous manner. This concludes the proof of Lemma \ref{lemma:y:half}.
\end{proof}
\subsection{$X_n$ Estimates, $1 \le n \le 10$}
It is convenient to estimate the commutators, $\mathcal{C}_1^n, \mathcal{C}_2^n$, defined in \eqref{def:C1n} - \eqref{def:C2n}.
\begin{lemma} The quantities $\mathcal{C}_1^{n}, \mathcal{C}_2^{n}$ satisfy the following estimates
\begin{align} \label{twins:1}
\| \ensuremath{\partial}_y^j \mathcal{C}_1^{n} \langle x \rangle^{n + \frac 1 2 + \frac j 2} \ensuremath{\partial}hi_n \| + \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_y^j \mathcal{C}_2^{n} \langle x \rangle^{n + \frac 1 2 + \frac j 2} \ensuremath{\partial}hi_n \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| U, V \|_{\mathcal{X}_{\le n-1 + \frac j 2}},
\end{align}
for $j = 0, 1$.
\end{lemma}
\begin{proof} We start with the estimation of $\mathcal{C}_1^n$, defined in \eqref{def:C1n}, which we do via
\begin{align} \ensuremath{\nonumber}
\| \mathcal{C}_1^n \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_n \| \lesssim& \sum_{k = 0}^{n-1} \| \ensuremath{\partial}_x^{n-k} \zeta \langle x \rangle^{(n-k) + 1.01} \|_\infty \| U^{(k)} \langle x \rangle^{k - \frac 1 2 - .01} \| \\ \label{dan:bek}
&+ \| \ensuremath{\partial}_x^{n-k} \ensuremath{\partial}_y \zeta \langle x \rangle^{(n-k) + 1.01} y \|_\infty \| \frac{q^{(k)}}{y} \langle x \rangle^{k - \frac 1 2 - .01} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le n-1}},
\end{align}
where we have appealed to estimate \eqref{S:0} for the coefficient of $\zeta$.
We now address the terms in $\mathcal{C}_2^{n}$ via
\begin{align} \ensuremath{\nonumber}
\| \sqrt{\ensuremath{\varepsilon}} \mathcal{C}_2^n \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_n \| \lesssim& \sum_{k = 0}^{n-1} \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^{n-k} \alpha \langle x \rangle^{(n-k) + \frac 3 2} \|_\infty \| U^{(k)} \langle x \rangle^{k-1} \ensuremath{\partial}hi_n \| \\ \ensuremath{\nonumber}
& + \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^{n-k} \alpha_y \langle x \rangle^{(n-k) + \frac 3 2} y \|_\infty \| \frac{ q^{(k)} }{y} \langle x \rangle^{k-1} \ensuremath{\partial}hi_n \| \\
& + \| \ensuremath{\partial}_x^{n-k} \zeta \langle x \rangle^{(n-k) + 1.01} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V^{(k)} \langle x \rangle^{k - \frac 1 2 - .01} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le n-1}},
\end{align}
where we have appealed to estimate \eqref{S:1} to estimate the coefficient $\alpha$. The higher order $y$ derivative works in an identical manner.
\end{proof}
\begin{lemma} For any $n \ge 1$,
\begin{align} \label{estXnnorm}
\| U, V \|_{X_n}^2 \lesssim \| U, V \|_{\mathcal{X}_{\le n-\frac 1 2}}^2 + \mathcal{T}_{X_{n}} + \mathcal{F}_{X_{n}},
\end{align}
where
\begin{align} \ensuremath{\nonumber}
\mathcal{T}_{X_n} := &\int \ensuremath{\partial}_x^{n} \mathcal{N}_1(u, v) U^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2 + \int \ensuremath{\varepsilon} \ensuremath{\partial}_x^n \mathcal{N}_2(u, v) \Big( \ensuremath{\varepsilon} V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2 + 2n \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n-1} \ensuremath{\partial}hi_n^2 \\ \label{def:TXn}
&+ 2 \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n \ensuremath{\partial}hi_n' \Big), \\ \ensuremath{\nonumber}
\mathcal{F}_{X_n} := & \int \ensuremath{\partial}_x^n F_R U^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2 + \int \ensuremath{\varepsilon} \ensuremath{\partial}_x^n G_R \Big( \ensuremath{\varepsilon} V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2 + 2n \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n-1} \ensuremath{\partial}hi_n^2 \\ \label{def:FXn}
&+ 2 \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n \ensuremath{\partial}hi_n' \Big).
\end{align}
\end{lemma}
\begin{proof} We apply the multiplier
\begin{align} \label{mult:Xn}
[U^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_{n}^2, \ensuremath{\varepsilon} V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2 + 2n \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n-1} \ensuremath{\partial}hi_n^2+ 2 \ensuremath{\varepsilon} V^{(n-1)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n \ensuremath{\partial}hi_n']
\end{align}
to the system \eqref{sys:sim:n1} - \eqref{sys:sim:n3}. The interaction of the multipliers \eqref{mult:Xn} with the the left-hand side of \eqref{sys:sim:n1} - \eqref{sys:sim:n2} is essentially identical to that of Lemma \ref{Lem:2}. As such, we treat the new commutators arising from the $\mathcal{C}_1^n, \mathcal{C}_2^n$ terms, defined in \eqref{def:C1n} - \eqref{def:C2n}. First, we have
\begin{align} \ensuremath{\nonumber}
|\int \mathcal{C}_1^{n} U^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2| \lesssim & \| \mathcal{C}_1^{n} \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_n \| \| U^{(n)} \langle x \rangle^{n - \frac 1 2} \ensuremath{\partial}hi_n \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le n - 1}} ( \|U, V \|_{\mathcal{X}_{\le n - 1}} + \| U, V \|_{X_n} ).
\end{align}
Next,
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon} \mathcal{C}_2^{n} V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_n^2| \lesssim & \| \sqrt{\ensuremath{\varepsilon}} \mathcal{C}_2^{n} \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_n \| \| \sqrt{\ensuremath{\varepsilon}} V^{(n)} \langle x \rangle^{n - \frac 1 2} \ensuremath{\partial}hi_n \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le n - 1}} ( \|U, V \|_{\mathcal{X}_{\le n - 1}} + \| U, V \|_{X_n} ).
\end{align}
The identical estimate works as well for the middle term from the multiplier in \eqref{mult:Xn}, whereas the final term with $\ensuremath{\partial}hi_n'$ is localized in $x$ and lower order, and therefore trivially bounded by $\|U, V\|_{\mathcal{X}_{\le n - \frac 1 2}}^2$.
\end{proof}
\subsection{$X_{n + \frac 1 2} \cap Y_{n + \frac 1 2}$ Estimates, $1 \le n \le 10$}
We now provide estimates on the higher order $X_{n + \frac 1 2}$ and $Y_{n + \frac 1 2}$ norms. Notice that these estimates still ``lose a derivative", due to degeneracy at $y = 0$.
\begin{lemma} For any $0 < \delta << 1$,
\begin{align} \label{esthalfnX}
\| U, V \|_{X_{n + \frac 1 2 }}^2 \le &C_\delta \|U, V \|_{\mathcal{X}_{\le n}}^2 + \delta \| U, V \|_{X_{n+1}}^2 + \delta \| U, V \|_{Y_{n + \frac 1 2}}^2 + \mathcal{T}_{X_{n+ \frac 1 2}} + \mathcal{F}_{X_{n+ \frac 1 2}},
\end{align}
where we define
\begin{align} \ensuremath{\nonumber}
\mathcal{T}_{X_{n + \frac 1 2}} := & \int \ensuremath{\partial}_x^{n} \mathcal{N}_1(u, v)U^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 + \int \ensuremath{\varepsilon} \ensuremath{\partial}_x^n \mathcal{N}_2(u, v) \Big( V^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 \\ \label{def:TXnp12}
&+ (1 + 2n) V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_{n+1}^2 + 2 V^{(n)} \langle x\rangle^{1+2n} \ensuremath{\partial}hi_{n+1} \ensuremath{\partial}hi_{n+1}' \Big) \\ \ensuremath{\nonumber}
\mathcal{F}_{X_{n + \frac 1 2}} := & \int \ensuremath{\partial}_x^{n}F_R U^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 + \int \ensuremath{\varepsilon} \ensuremath{\partial}_x^n G_R \Big( V^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 \\ \label{def:FXnp12}
&+ (1 + 2n) V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_{n+1}^2 + 2 V^{(n)} \langle x\rangle^{1+2n} \ensuremath{\partial}hi_{n+1} \ensuremath{\partial}hi_{n+1}' \Big).
\end{align}
\end{lemma}
\begin{proof} We apply the multiplier
\begin{align}
[U^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2, \ensuremath{\varepsilon} V^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 + \ensuremath{\varepsilon} (1 + 2n) V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_{n+1}^2 + 2\ensuremath{\varepsilon} V^{(n)} \langle x\rangle^{1+2n} \ensuremath{\partial}hi_{n+1} \ensuremath{\partial}hi_{n+1}']
\end{align}
to the system \eqref{sys:sim:n1} - \eqref{sys:sim:n3}. Again, the interaction of these multipliers with the left-hand side of \eqref{sys:sim:n1} - \eqref{sys:sim:n3} is nearly identical to that of Lemma \ref{Lem:3}, and so we proceed to treat the commutators arising from $\mathcal{C}_1^{n}, \mathcal{C}_2^{n}$. We also may clearly estimate the contribution of the $\ensuremath{\partial}hi_{n+1}'$ term by a factor of $\|U, V \|_{\mathcal{X}_{\le n}}$. We have
\begin{align} \ensuremath{\nonumber}
|\int \mathcal{C}_1^{n} U^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2| \lesssim & \| \mathcal{C}_1^{n} \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| \| U_x^{(n)} \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \|U \|_{\mathcal{X}_{\le n-1}} ( \|U \|_{X_{n+\frac 1 2}} + \|U, V \|_{X_{n+1}} )
\end{align}
and similarly
\begin{align} \ensuremath{\nonumber}
&|\int \mathcal{C}_2^{n} (\ensuremath{\varepsilon} V^{(n)}_x \langle x \rangle^{1+2n} \ensuremath{\partial}hi_{n+1}^2 + \ensuremath{\varepsilon} (1 + 2n) V^{(n)} \langle x \rangle^{2n} \ensuremath{\partial}hi_{n+1}^2 )| \\ \ensuremath{\nonumber}
\lesssim & \| \sqrt{\ensuremath{\varepsilon}} C_2^{n} \langle x \rangle^{n + \frac 12} \ensuremath{\partial}hi_{n+1} \| ( \| \sqrt{\ensuremath{\varepsilon}} V^{(n)}_x \langle x \rangle^{n + \frac 1 2} \ensuremath{\partial}hi_{n+1} \| + \| \sqrt{\ensuremath{\varepsilon}} V^{(n)} \langle x \rangle^{n - \frac 12} \ensuremath{\partial}hi_{n+1} \| ) \\
\lesssim& \sqrt{\ensuremath{\varepsilon}} \|U \|_{\mathcal{X}_{\le n-1}} ( \|U \|_{X_{n+\frac 1 2}} + \|U, V \|_{X_{n+1}} ),
\end{align}
where above we have invoked estimate \eqref{twins:1}.
\end{proof}
\begin{lemma}For any $0 < \delta << 1$,
\begin{align} \label{esthalfnY}
\| U, V \|_{Y_{n + \frac 1 2 }}^2 \le &C_\delta \|U, V \|_{\mathcal{X}_{\le n}}^2 + C_\delta \|U, V \|_E^2+ \delta \| U, V \|_{X_{n+1}}^2 + \delta \| U, V \|_{X_{n + \frac 1 2}}^2 + \mathcal{T}_{Y_{n+ \frac 1 2}} + \mathcal{F}_{Y_{n+ \frac 1 2}},
\end{align}
where
\begin{align} \label{def:TYn12}
\mathcal{T}_{Y_{n + \frac 1 2}} := & \int \Big( \ensuremath{\partial}_x^n \ensuremath{\partial}_y \mathcal{N}_1(u, v) - \ensuremath{\varepsilon} \ensuremath{\partial}_x^{n+1} \mathcal{N}_2(u, v) \Big) U^{(n)}_y \langle x \rangle^{1 + 2n} \ensuremath{\partial}hi_{n+1}^2, \\ \label{def:TXn12}
\mathcal{F}_{Y_{n + \frac 1 2}} := & \int \Big( \ensuremath{\partial}_x^n \ensuremath{\partial}_y F_R - \ensuremath{\varepsilon} \ensuremath{\partial}_x^{n+1} G_R \Big) U^{(n)}_y \langle x \rangle^{1 + 2n} \ensuremath{\partial}hi_{n+1}^2.
\end{align}
\end{lemma}
\begin{proof} We again only need to estimate the commutator terms, which are
\begin{align} \ensuremath{\nonumber}
|\int ( \ensuremath{\partial}_y \mathcal{C}_1^n - \ensuremath{\varepsilon} \ensuremath{\partial}_x \mathcal{C}_2^n) U^{(n)}_y\langle x \rangle^{1 + 2n} \ensuremath{\partial}hi_{n+1}^2| \lesssim &\| ( \ensuremath{\partial}_y \mathcal{C}_1^n - \ensuremath{\varepsilon} \ensuremath{\partial}_x \mathcal{C}_2^n) \langle x \rangle^{n + 1} \ensuremath{\partial}hi_{n+1} \| U^{(n)}_y \langle x \rangle^{n } \ensuremath{\partial}hi_{n+1} \| \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le n - \frac 1 2}} \|U, V \|_{\mathcal{X}_{\le n + \frac 1 2}},
\end{align}
with the help again of estimate \eqref{twins:1}.
\end{proof}
\section{Top Order Estimates} \label{section:top:order}
In this section, we obtain top order control over the solution, more specifically we provide an estimate for $\| U, V \|_{X_{11}}$, defined in \eqref{def:Xn}. To establish this, we need to first perform a \textit{nonlinear change of variables} and to define auxiliary norms which are nonlinear (these will eventually control the $\| U, V \|_{X_{11}}$).
\subsection{Nonlinear Change of Variables}
We group the linearized and nonlinear terms from \eqref{vel:eqn:1} via
\begin{align} \label{id:1L1}
\mathcal{L}_1[u, v] + \mathcal{N}_1(u, v) = \mu_s u_x + \mu_{sy} v + \bar{v} u_y + \bar{u}_x u ,
\end{align}
where we have denoted the nonlinear coefficients by
\begin{align} \label{def:mu:s:nu:s}
\mu_s := \bar{u} + \ensuremath{\varepsilon}^{\frac{N_2}{2}}u, \qquad \ensuremath{\nonumber}u_s := \bar{v} + \ensuremath{\varepsilon}^{\frac{N_2}{2}}v.
\end{align}
We now apply $\ensuremath{\partial}_x^{11}$ to \eqref{id:1L1}, which produces the identity
\begin{align} \label{sum:1}
\ensuremath{\partial}_x^{11} ( \mathcal{L}[u, v] + \mathcal{N}_1(u, v) ) = \mu_s u^{(11)}_x + \mu_{sy} v^{(11)} + \sum_{i = 1}^3 \mathcal{R}^{(i)}_1[u, v].
\end{align}
where we have isolated those terms with twelve $x$ derivatives, and the remainder terms above have fewer than twelve $x$ derivatives on $u$, and are defined by
\begin{align} \label{R11}
\mathcal{R}_1^{(1)}[u, v] :=& \sum_{j = 1}^{10} \binom{11}{j} ( \ensuremath{\partial}_x^j \mu_s \ensuremath{\partial}_x^{11-j} u_x + \ensuremath{\partial}_x^{11-j} \bar{u}_x \ensuremath{\partial}_x^j u + \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \ensuremath{\partial}_x^{11-j} u_y + \ensuremath{\partial}_x^{11-j} \bar{u}_y \ensuremath{\partial}_x^j v ), \\ \label{R12}
\mathcal{R}_1^{(2)}[u, v] := & \mu_{sx} u^{(11)} + \ensuremath{\nonumber}u_s u^{(11)}_y, \\ \label{R13}
\mathcal{R}_1^{(3)}[u, v] := & u \ensuremath{\partial}_x^{12} \bar{u} + u_y \ensuremath{\partial}_x^{11} \bar{v} + v \ensuremath{\partial}_y \bar{u}^{(11)} + u_x \ensuremath{\partial}_x^{11} \bar{u}.
\end{align}
We now introduce the change of variables, which is adapted to the first two terms on the right-hand side of \eqref{sum:1}. The basic objects are
\begin{align} \label{Chan:var:2}
Q := \frac{\ensuremath{\partial}si^{(11)}}{\mu_s}, \qquad \tilde{U} := \ensuremath{\partial}_y Q, \qquad \tilde{V} := - \ensuremath{\partial}_x Q.
\end{align}
From here, we derive the identities
\begin{align} \label{move:2}
&u^{(11)} = \ensuremath{\partial}_x^{11} u = \ensuremath{\partial}_y \ensuremath{\partial}si^{(11)} = \ensuremath{\partial}_y (\mu_s Q) = \mu_s \tilde{U} + \ensuremath{\partial}_y \mu_s Q, \\ \label{move:3}
&v^{(11)} = \ensuremath{\partial}_x^{11} v = - \ensuremath{\partial}_x \ensuremath{\partial}si^{(11)} = - \ensuremath{\partial}_x (\mu_s Q) = \mu_s \tilde{V} - \ensuremath{\partial}_x \mu_s Q.
\end{align}
We thus rewrite the primary two terms from \eqref{sum:1} as
\begin{align} \ensuremath{\nonumber}
\mu_s u^{(11)}_x + \mu_{sy} v^{(11)} = & \mu_s \mu_{sx} \tilde{U} + \mu_s^2 \tilde{U}_x + \mu_s \mu_{sxy} Q - \mu_s \mu_{sy} \tilde{V} + \mu_s \mu_{sy} \tilde{V} - \mu_{sy} \mu_{sx} Q \\
= & \mu_s^2 \tilde{U}_x + \mu_s \mu_{sx} \tilde{U} + (\mu_s \mu_{sxy} - \mu_{sx} \mu_{sy}) Q.
\end{align}
We may subsequently rewrite \eqref{sum:1} via
\begin{align}
\ensuremath{\partial}_x^{11} ( \mathcal{L}_1[u, v] + \mathcal{N}_1(u, v) ) = \mu_s^2 \tilde{U}_x + \mu_s \mu_{sx} \tilde{U} + (\mu_s \mu_{sxy} - \mu_{sx} \mu_{sy}) Q + \sum_{i = 1}^3 \mathcal{R}_1^{(i)}[u, v].
\end{align}
We now address the second equation, for which we similarly record the identity
\begin{align} \label{move:1}
\ensuremath{\partial}_x^{11} (\mathcal{L}_{2}[u, v] + \mathcal{N}_2(u, v)) = & \mu_s v^{(11)}_x + \ensuremath{\nonumber}u_s v^{(11)}_y + \mathcal{R}_2^{(1)}[u, v] + \mathcal{R}_2^{(2)}[u, v] + \mathcal{R}_2^{(3)}[u, v],
\end{align}
where we again define the lower order terms appearing above via
\begin{align} \label{def:R21:def}
\mathcal{R}^{(1)}_2[u, v] := & \sum_{j = 1}^{10} \binom{11}{j} ( \ensuremath{\partial}_x^j \mu_s \ensuremath{\partial}_x^{11-j} v_x + \ensuremath{\partial}_x^{11-j} \bar{v}_x \ensuremath{\partial}_x^j u + \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \ensuremath{\partial}_x^{11-j} v_y + \ensuremath{\partial}_x^{11-j} \bar{v}_y \ensuremath{\partial}_x^j v ), \\ \label{def:R22}
\mathcal{R}_2^{(2)}[u, v] := &\ensuremath{\nonumber}u_{sx} u^{(11)} + \ensuremath{\nonumber}u_{sy} v^{(11)}, \\ \label{def:R23}
\mathcal{R}_2^{(3)}[u, v] := & v \ensuremath{\partial}_x^{11} \bar{v}_y + u \ensuremath{\partial}_x^{12} \bar{v} + v_x \ensuremath{\partial}_x^{11} \bar{u} + v_y \ensuremath{\partial}_x^{11}\bar{v}.
\end{align}
We will now rewrite the first two terms from \eqref{move:1} by using \eqref{move:2} - \eqref{move:3} so as to produce
\begin{align} \ensuremath{\nonumber}
\mu_s v^{(11)}_x + \ensuremath{\nonumber}u_s v^{(11)}_y = & \mu_s^2 \tilde{V}_x + \mu_s \ensuremath{\nonumber}u_s \tilde{V}_y + (2 \mu_s \mu_{sx} + \ensuremath{\nonumber}u_s \mu_{sy}) \tilde{V} - \mu_{sx} \ensuremath{\nonumber}u_s \tilde{U} - (\mu_s \mu_{sxx} + \ensuremath{\nonumber}u_s \mu_{sxy})Q.
\end{align}
Continuing then from \eqref{move:1}, we obtain
\begin{align} \ensuremath{\nonumber}
\ensuremath{\partial}_x^{11} (\bar{\mathcal{L}}_{2}[u, v] + \mathcal{N}_2(u, v)) = & \mu_s^2 \tilde{V}_x + \mu_s \ensuremath{\nonumber}u_s \tilde{V}_y + (2 \mu_s \mu_{sx} + \ensuremath{\nonumber}u_s \mu_{sy}) \tilde{V} - \mu_{sx} \ensuremath{\nonumber}u_s \tilde{U} \\
& -(\mu_s \mu_{sxx} + \ensuremath{\nonumber}u_s \mu_{sxy})Q + \sum_{i = 1}^3 \mathcal{R}_2^{(i)}[u, v].
\end{align}
We now summarize the full nonlinear equation upon introducing these new quantities:
\begin{align} \label{eq:11:first}
\mu_s^2 \tilde{U}_x + & \mu_s \mu_{sx} \tilde{U} - \Delta_\ensuremath{\varepsilon} (\ensuremath{\partial}_x^{11} u) + (\mu_s \mu_{sxy} - \mu_{sx} \mu_{sy}) Q + \sum_{i = 1}^3 \mathcal{R}_1^{(i)}[u, v] + \ensuremath{\partial}_x^{11} P_x = \ensuremath{\partial}_x^{11} F_R,
\end{align}
and the second equation which reads
\begin{align} \ensuremath{\nonumber}
\mu_s^2 \tilde{V}_x& + \mu_s \ensuremath{\nonumber}u_s \tilde{V}_y - \Delta_\ensuremath{\varepsilon} (\ensuremath{\partial}_x^{11} v) + (2 \mu_s \mu_{sx} + \ensuremath{\nonumber}u_s \mu_{sy}) \tilde{V} - \mu_{sx} \ensuremath{\nonumber}u_s \tilde{U} - (\mu_s \mu_{sxx} + \ensuremath{\nonumber}u_s \mu_{sxy})Q \\ \label{eq:11:second}
&+ \sum_{i = 1}^3 \mathcal{R}_2^{(i)}[u, v] + \ensuremath{\partial}_x^{11}\frac{P_y}{\ensuremath{\varepsilon}} = \ensuremath{\partial}_x^{11} G_R.
\end{align}
\subsection{Nonlinearly Modified Norms} \label{subsection:NLMN}
While our objective is to control $\| U, V \|_{X_{11}}$, we will need to change the weights appearing in this norm from $\bar{u}$ to $\mu_s$. Define thus
\begin{align} \label{def:Theta:11}
\| \tilde{U}, \tilde{V} \|_{\Theta_{11}} := \| \sqrt{\mu_s} \tilde{U}_y x^{11} \ensuremath{\partial}hi_{11} \| + \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\mu_s} \tilde{U}_x x^{11} \ensuremath{\partial}hi_{11} \| + \ensuremath{\varepsilon} \| \sqrt{\mu_s} \tilde{V}_x x^{11} \ensuremath{\partial}hi_{11} \| + \| \mu_{sy} \tilde{U} x^{11} \ensuremath{\partial}hi_{11} \|_{y = 0}.
\end{align}
We now prove
\begin{lemma} The following estimates are valid, for $j = 0, 1$,
\begin{align} \label{same:Q}
\| \sqrt{\ensuremath{\varepsilon}}Q x^{9.5} \ensuremath{\partial}hi_{10} \| \lesssim & \|U, V \|_{\mathcal{X}_{\le 10}}, \\ \label{same:Q:2}
\| \sqrt{\ensuremath{\varepsilon}} Q x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \lesssim & \|U, V \|_{\mathcal{X}}, \\\label{same:same}
\| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| + \sqrt{\ensuremath{\varepsilon}} \| \mu_s \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| \lesssim &\ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1 - 5}\|U, V \|_{\mathcal{X}} + \|U, V \|_{\mathcal{X}_{\le 10.5}},
\end{align}
and, for any $0 < \delta << 1$,
\begin{align} \label{same:same:high}
\| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| + \sqrt{\ensuremath{\varepsilon}} \| \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| \le & \delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} + C_\delta \|U, V \|_{\mathcal{X}_{\le 10.5}} + \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1 - 5} \|U, V \|_{\mathcal{X}}.
\end{align}
\end{lemma}
\begin{proof} We use the formulas \eqref{Chan:var:2} to write
\begin{align} \label{trans:Q}
\mu_s Q = \ensuremath{\partial}_x^{11} \ensuremath{\partial}si = \ensuremath{\partial}_x^{11} (\bar{u} q) = \bar{u} \ensuremath{\partial}_x^{11} q + \sum_{k = 1}^{11} \binom{11}{k} \ensuremath{\partial}_x^k \bar{u} \ensuremath{\partial}_x^{11-k} q.
\end{align}
We divide through both sides by $\mu_s$, multiply by $\sqrt{\ensuremath{\varepsilon}} x^{9.5} \ensuremath{\partial}hi_{10}$, and compute the $L^2$ norm, which gives
\begin{align} \ensuremath{\nonumber}
\| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{10} \| \lesssim & \| \sqrt{\ensuremath{\varepsilon}} V^{(9)}_x x^{9.5} \ensuremath{\partial}hi_{10} \| + \sum_{k = 1}^9 \| \frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}} x^k \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V^{(9-k)}_x x^{9-k + \frac 1 2} \ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^{10} \bar{u}_P x^9 y \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \frac{V}{y} x^{\frac 1 2} \ensuremath{\partial}hi_{11} \| + \| \ensuremath{\partial}_x^{10} \bar{u}_E x^{10.5} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{-1} \| \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^{11} \bar{u}_P x^{10.5}y \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \frac{q}{y} \langle x \rangle^{-1} \| + \| \ensuremath{\partial}_x^{11} \bar{u}_E x^{11.5} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} q \langle x \rangle^{-2} \| \\
\lesssim & \|U, V \|_{\mathcal{X}_{\le 10}},
\end{align}
where we need to treat the lower order terms corresponding to $k = 9, 10$ in the sum \eqref{trans:Q} differently, in order to avoid using the critical Hardy inequality. We have invoked estimates \eqref{Hardy:four}, \eqref{est:Eul:piece}, \eqref{est:Pr:piece}.
We now address the second inequality, \eqref{same:Q:2}. We divide \eqref{trans:Q} by $\mu_s$, multiply by $\sqrt{\ensuremath{\varepsilon}} x^{10} \ensuremath{\partial}hi_{11}$, and compute the $L^2_x L^\infty_y$ norm, which gives
\begin{align} \ensuremath{\nonumber}
\| \sqrt{\ensuremath{\varepsilon}} Q x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \lesssim & \| V^{(10)} x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} + \sum_{k = 1}^9 \| \frac{\ensuremath{\partial}_x^{k} \bar{u}}{\bar{u}} x^{k} \|_{\infty} \| V^{(10-k)} x^{10-k} \ensuremath{\partial}hi_k \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^{10} \bar{u}_P x^{9.5} y \|_{\infty} \| \frac{V}{y} x^{\frac 1 2} \|_{L^2_x L^\infty_y} + \| \ensuremath{\partial}_x^{10} \bar{u}_E x^{10.5} \|_\infty \| V \langle x \rangle^{- \frac 1 2} \|_{L^2_x L^\infty_y} \\ \label{bieb:1}
& + \| \ensuremath{\partial}_x^{11} \bar{u}_P x^{10.5} y \|_{\infty} \| \frac{q}{y} x^{-\frac 1 2} \|_{L^2_x L^\infty_y} + \| \ensuremath{\partial}_x^{11} \bar{u}_E x^{11.5} \|_\infty \| q \langle x \rangle^{- \frac 3 2} \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
\lesssim & \|U, V \|_{\mathcal{X}},
\end{align}
where we have again invoked estimates \eqref{est:Eul:piece}, \eqref{est:Pr:piece} for the $\bar{u}$ terms, the mixed norm estimate \eqref{Lpq:emb:V:1}, as well as the following Sobolev interpolation estimates
\begin{align}
&\| \frac{V}{y} x^{\frac 1 2} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \lesssim \| V_y x^{\frac 1 2} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \lesssim \| V_y x^{\frac 1 2} \ensuremath{\partial}hi_{11} \|^{\frac 1 2} \| U^{(1)}_y x^{\frac 1 2} \ensuremath{\partial}hi_{11} \|^{\frac 1 2} \lesssim \|U, V \|_{\mathcal{X}_{\le 1.5}}, \\
&\| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{- \frac 1 2} \|_{L^2_x L^\infty_y} \lesssim \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{-1} \ensuremath{\partial}hi_{11} \|^{\frac 1 2} \| \sqrt{\ensuremath{\varepsilon}} U_x \ensuremath{\partial}hi_{11} \| \lesssim \| U, V \|_{\mathcal{X}_{\le 1}},
\end{align}
and the analogous estimates for $q$ instead of $V$ for the final two terms from \eqref{bieb:1}.
Dividing through by $\mu_s$ and differentiating in $y$ yields
\begin{align} \label{trans:U}
\tilde{U} = \frac{\bar{u}}{\mu_s} U^{(11)} - \ensuremath{\partial}_y ( \frac{\bar{u}}{\mu_s} ) V^{(10)} + \sum_{k = 1}^{11} \binom{11}{k} \ensuremath{\partial}_y ( \frac{\ensuremath{\partial}_x^k \bar{u}}{\mu_s} ) \ensuremath{\partial}_x^{11-k} q + \sum_{k = 1}^{11} \binom{11}{k} \frac{\ensuremath{\partial}_x^k \bar{u}}{\mu_s} U^{(11-k)},
\end{align}
and similarly, dividing through by $\mu_s$ and differentiating in $x$ yields
\begin{align} \label{trans:V}
\tilde{V} = \frac{\bar{u}}{\mu_s} V^{(11)} + \ensuremath{\partial}_x (\frac{\bar{u}}{\mu_s}) V^{(10)} + \sum_{k = 1}^{11} \binom{11}{k} \frac{\ensuremath{\partial}_x^k \bar{u}}{\mu_s} V^{(11-k)} - \sum_{k = 1}^{11} \binom{11}{k} \ensuremath{\partial}_x (\frac{\ensuremath{\partial}_x^k \bar{u}}{\mu_s}) \ensuremath{\partial}_x^{11-k} q.
\end{align}
We first establish the following auxiliary estimate, which will be needed in forthcoming calculations due to the second term from \eqref{trans:U}.
\begin{align} \ensuremath{\nonumber}
\ensuremath{\partial}_y (\frac{\bar{u}}{\mu_s}) = \ensuremath{\partial}_y (\frac{\mu_s - \ensuremath{\varepsilon}^{\frac{N_2}{2}} u }{\mu_s}) = - \ensuremath{\varepsilon}^{\frac{N_2}{2}} \ensuremath{\partial}_y (\frac{u}{\mu_s}) = - \ensuremath{\varepsilon}^{\frac{N_2}{2}} \ensuremath{\partial}_y ( \frac{u}{\bar{u}} \frac{\bar{u}}{\mu_s} ) = - \ensuremath{\varepsilon}^{\frac{N_2}{2}} \frac{\bar{u}}{\mu_s} \ensuremath{\partial}_y (\frac{u}{\bar{u}}) - \ensuremath{\varepsilon}^{\frac{N_2}{2}} \frac{u}{\bar{u}} \ensuremath{\partial}_y (\frac{\bar{u}}{\mu_s}),
\end{align}
which, rearranging for the quantity on the left-hand side, yields the identity
\begin{align}
\ensuremath{\partial}_y (\frac{\bar{u}}{\mu_s}) = - \frac{\ensuremath{\varepsilon}^{\frac{N_2}{2}}}{1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \frac{u}{\bar{u}} } \frac{\bar{u}}{\mu_s} \ensuremath{\partial}_y (\frac{u}{\bar{u}}),
\end{align}
from which we estimate
\begin{align} \label{depen:1}
\| \ensuremath{\partial}_y (\frac{\bar{u}}{\mu_s}) x^{\frac 12} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{\bar{u}}{\mu_s} \|_{L^\infty} \frac{1}{1 - \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{u}{\bar{u}} \|_{L^\infty} } \| \ensuremath{\partial}_y (\frac{u}{\bar{u}}) x^{\frac 1 2} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \|U,V \|_{\mathcal{X}},
\end{align}
where we have invoked estimates \eqref{mixed:emb} and \eqref{Linfty:wo}.
From these formulas, we provide the estimate \eqref{same:same:high} via
\begin{align} \ensuremath{\nonumber}
\| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| & \lesssim \| \bar{u} U^{(10)}_x x^{10.5} \ensuremath{\partial}hi_{11} \| + \| \ensuremath{\partial}_y ( \frac{\bar{u}}{\mu_s}) x^{\frac 1 2} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| V^{(10)} x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
&+ \sum_{k = 1}^{10} \| \frac{\bar{u}}{\mu_s} \|_{\infty} \| \ensuremath{\partial}_y (\frac{\ensuremath{\partial}_x^{k} \bar{u}}{\bar{u}}) y \langle x \rangle^k \|_{\infty} \| \frac{\ensuremath{\partial}_x^{11-k} q}{y} \langle x \rangle^{11-k - \frac 1 2} \| \\ \ensuremath{\nonumber}
& + \| \frac{\bar{u}}{\mu_s} \|_\infty \| \ensuremath{\partial}_y (\frac{\ensuremath{\partial}_x^{11} \bar{u}_P}{\bar{u}}) y^2 \langle x \rangle^{10.5} \|_\infty \| \frac{q - y U(x, 0)}{\langle y \rangle^2} \| + \| \frac{\bar{u}}{\mu_s} \|_\infty \| \ensuremath{\partial}_y (\frac{\ensuremath{\partial}_x^{11} \bar{u}_E}{\bar{u}}) y \langle x \rangle^{11.5} \|_\infty \| U \langle x \rangle^{-1} \| \\ \ensuremath{\nonumber}
&+ \sum_{k = 1}^{10} \| \frac{\bar{u}}{\mu_s} \|_\infty \| \frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}} x^k \|_\infty \| U^{(11-k)} \langle x \rangle^{(11-k- \frac 1 2)} \| + \| \frac{\bar{u}}{\mu_s} \|_\infty \| \frac{\ensuremath{\partial}_x^{11} \bar{u}_P}{\bar{u}} y \langle x \rangle^{10.5} \|_\infty \| \frac{U - U(x, 0)}{\langle y \rangle} \| \\ \ensuremath{\nonumber}
& + \| \frac{\bar{u}}{\mu_s} \|_\infty \| \frac{\ensuremath{\partial}_x^{11} \bar{u}_P}{\bar{u}} \langle x \rangle^{11-\frac 1 4} \|_{L^\infty_x L^2_y} \| U(x, 0) \langle x \rangle^{- \frac 1 4} \|_{L^2_x} + \| \frac{\ensuremath{\partial}_x^{11} u_E}{\bar{u}} \langle x \rangle^{11.5} \|_\infty \| U \langle x \rangle^{-1} \| \\
& \lesssim \| U, V \|_{\mathcal{X}_{\le 10.5}} + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}},
\end{align}
where we have invoked estimates \eqref{prof:u:est}, \eqref{est:Eul:piece}, \eqref{est:Pr:piece}, \eqref{depen:1}, and \eqref{Lpq:emb:V:1}.
An essentially identical proof applies also to the $\tilde{V}$ quantity from \eqref{same:same}, so we omit repeating these details. This establishes estimate \eqref{same:same}, with \eqref{same:same:high} following similarly, upon using the Hardy-type inequality \eqref{Hardy:three}.
\end{proof}
As long as we have sufficiently strong control on lower-order quantities, it will turn our that the $\Theta_{11}$ norm will control the $X_{11}$ norm. This is the content of the following lemma.
\begin{lemma} Assume $\| U, V \|_{\mathcal{X}} \le 1$. Then,
\begin{align}
\| U, V \|_{\mathcal{X}} \lesssim \| \tilde{U}, \tilde{V} \|_{\Theta_{11}}+ \| U, V \|_{\mathcal{X}_{\le 10.5}} \lesssim \| U, V \|_{\mathcal{X}}.
\end{align}
\end{lemma}
\begin{proof} Dividing through equation \eqref{trans:Q} by $\bar{u}$ and computing $\ensuremath{\partial}_y^2$ gives
\begin{align} \ensuremath{\nonumber}
U^{(11)}_y = & \frac{\mu_s}{\bar{u}} \tilde{U}_y + 2\ensuremath{\partial}_y (\frac{\mu_s}{\bar{u}}) \tilde{U} + \ensuremath{\partial}_y^2 (\frac{\mu_s}{\bar{u}}) Q - \sum_{k = 1}^{11} \binom{11}{k} \ensuremath{\partial}_y^2 (\frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}}) \ensuremath{\partial}_x^{11-k} q \\
& - 2 \sum_{k = 1}^{11} \binom{11}{k} \ensuremath{\partial}_y^2 (\frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}} ) \ensuremath{\partial}_x^{11-k} U - \sum_{k = 1}^{11} \binom{11}{k} \frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}} \ensuremath{\partial}_x^{11-k} U_y
\end{align}
From here, we obtain the estimate
\begin{align} \ensuremath{\nonumber}
\| \sqrt{\bar{u}} U^{(11)}_y x^{11} \ensuremath{\partial}hi_{11} \| \lesssim & \| \sqrt{ \frac{\mu_s}{\bar{u}} } \|_\infty \| \sqrt{\mu_s} \tilde{U}_y x^{11} \ensuremath{\partial}hi_{11} \| + \| \sqrt{\bar{u}} \ensuremath{\partial}_y (\frac{\mu_s}{\bar{u}}) x^{\frac 1 2} \|_\infty \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
& + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{yy} x \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| Q x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
& + \sum_{k = 1}^{11} \| \ensuremath{\partial}_y^2(\frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}}) y x^{k + \frac 1 2} \|_\infty \| \frac{\ensuremath{\partial}_x^{11-k} q}{y} x^{11-k - \frac 1 2} \ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
& + \sum_{k = 1}^{11} \| \frac{\ensuremath{\partial}_x^k \bar{u}}{\bar{u}} x^k \|_\infty \| U^{(11-k)}_y x^{11-k} \ensuremath{\partial}hi_{11} \| \\
\lesssim & \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} + \| U, V \|_{\mathcal{X}_{\le 10.5}} + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}} \| U, V \|_{\mathcal{X}_{\le 10.5}},
\end{align}
where we have invoked \eqref{Linfty:wo}, \eqref{mixed:emb}, \eqref{same:Q:2}, and \eqref{bob:1}.
An essentially identical calculation applies to the remaining terms from the $\| U, V \|_{X_{11}}$ norm, and also an essentially identical computation enables us to go backwards. We note, however, that to compare the quantities $ \| \mu_{sy} \tilde{U} x^{11} \ensuremath{\partial}hi_{11} \|_{y = 0}$ and $\| \bar{u}_y U^{(11)} x^{11} \ensuremath{\partial}hi_{11} \|_{y = 0}$, we also need to demonstrate boundedness of the coefficients $|\frac{\bar{u}_y}{\mu_{sy}}| \ensuremath{\partial}hi_{11}$ and $|\frac{\mu_{sy}}{\bar{u}_y}| \ensuremath{\partial}hi_{11}$. For this purpose, we estimate
\begin{align} \ensuremath{\nonumber}
|\mu_{sy}(x, 0) - \bar{u}^0_{py}(x, 0)| \ensuremath{\partial}hi_{11} \le &\sum_{i = 1}^{N_1} \ensuremath{\varepsilon}^{\frac i 2} (\sqrt{\ensuremath{\varepsilon}} \| u^i_{EY} \|_{L^\infty_y} + \| u^i_{py} \|_{L^\infty_y} ) + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_y \ensuremath{\partial}si_{12} \|_{L^\infty_y} \\
\lesssim & \sum_{i = 1}^{N_1} \ensuremath{\varepsilon}^{\frac i 2} (\sqrt{\ensuremath{\varepsilon}} \langle x \rangle^{- \frac 3 2} + \langle x \rangle^{- \frac 3 4 + \sigma_\ast}) + \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1} \langle x \rangle^{- \frac 3 4} \| U, V \|_{\mathcal{X}} \lesssim \ensuremath{\varepsilon}^{\frac 1 2} \langle x \rangle^{- \frac 3 4 + \sigma_\ast},
\end{align}
where we have invoked estimates \eqref{water:88}, \eqref{water:65}, \eqref{pw:dec:u}, the identity that $\ensuremath{\partial}hi_{11} = \ensuremath{\partial}si_{12} \ensuremath{\partial}hi_{11}$, the fact that $\frac{N_2}{2} - M_1 >> 0$, and finally the assumption that $\|U, V \|_{\mathcal{X}} \le 1$.
\end{proof}
We will need the following interpolation estimates to close the nonlinear estimates below.
\begin{lemma} \label{lemma:Mixed:here} Let $\tilde{W} \in \{ \tilde{U}, \tilde{V}\}$. The following estimates are valid:
\begin{align} \label{weds:weds:1}
\| \bar{u}^{\frac 1 2} \tilde{W} \langle x \rangle^{10.75} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y}^2 \lesssim & \| \bar{u} \tilde{W} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \|^2 + \| \sqrt{\bar{u}} \tilde{W}_y \langle x \rangle^{11} \ensuremath{\partial}hi_{11} \|^2, \\ \label{weds:weds:2}
\| \tilde{W} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \chi(z) \|_{L^2_x L^4_y}^2 \lesssim & \| \bar{u} \tilde{W} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \|^2 + \| \sqrt{\bar{u}} \tilde{W}_y \langle x \rangle^{11} \ensuremath{\partial}hi_{11} \|^2.
\end{align}
\end{lemma}
\begin{proof} We begin with the first estimate, \eqref{weds:weds:1}. For this, we consider
\begin{align} \ensuremath{\nonumber}
\bar{u} \tilde{W}^2 \langle x \rangle^{21.5} \le & \langle x \rangle^{21.5} |\int_y^\infty 2 \bar{u} \tilde{W} \tilde{W}_y(y') \,\mathrm{d} y' | + \langle x \rangle^{21.5} |\int_y^\infty 2 \tilde{W}^2 \bar{u}_y \,\mathrm{d} y' | \\
\lesssim& \| \tilde{W} \langle x \rangle^{10.5} \|_{L^2_y} \| \sqrt{\bar{u}} \tilde{W}_y \langle x \rangle^{11} \|_{L^2_y} + \| \bar{u}_y \langle x \rangle^{\frac 1 2} \|_\infty \| \tilde{W} \langle x \rangle^{10.5} \|_{L^2_y}^2.
\end{align}
Multiplying both sides by $\ensuremath{\partial}hi_{11}^2$ and placing both sides in $L^2_x$ gives estimate \eqref{weds:weds:1}.
We turn now to \eqref{weds:weds:2}. To compute the $L^4_y$, we raise to the fourth power and integrate by parts via
\begin{align} \ensuremath{\nonumber}
\| \tilde{W} \langle x \rangle^{10.5} \chi(z) \|_{ L^4_y}^4 = &\int \tilde{W}^4 \chi(z) \langle x \rangle^{42} \,\mathrm{d} y = \int \ensuremath{\partial}_y (y) \tilde{W}^4 \chi(z) \langle x \rangle^{42} \,\mathrm{d} y \\ \label{lauvlauv}
=& - \int 4 y \tilde{W}^3 \tilde{W}_y \chi(z) \langle x \rangle^{42} - \int y \tilde{W}^4 \frac{1}{\sqrt{x}} \chi'(z) \langle x \rangle^{42} \,\mathrm{d} y.
\end{align}
We will first handle the first integral above. Using that $z \le 1$ on the support of $\chi(z)$, we can estimate via
\begin{align} \ensuremath{\nonumber}
|\int 4y \tilde{W}^3 \tilde{W}_y \chi(z) \langle x \rangle^{42}| \lesssim & \int |\tilde{W}|^3 \bar{u} |\tilde{W}_y| \langle x \rangle^{42.5} \,\mathrm{d} y \\
\lesssim & \| \sqrt{\bar{u}} \tilde{W} \langle x \rangle^{10.5} \|_{L^\infty_y} \| \tilde{W} \langle x \rangle^{10.5} \|_{L^4_y}^2 \| \sqrt{\bar{u}} \tilde{W}_y \langle x \rangle^{11} \|_{L^2_y},
\end{align}
For the far-field integral from \eqref{lauvlauv}, we estimate it by first noting that $|y/\sqrt{x}| \lesssim 1$ on the support of $\chi'$. Thus,
\begin{align}
|\int \tilde{W}^4 \langle x \rangle^{42} z \chi'(z) \,\mathrm{d} y| \lesssim \| \bar{u} \tilde{W} \langle x \rangle^{10.5} \|_{L^\infty_y}^2 \| \bar{u} \tilde{W} \langle x \rangle^{10.5} \|_{L^2_y}^2 \lesssim \| \bar{u} \tilde{W} \langle x \rangle^{10.5} \|_{L^2_y}^3 \| \sqrt{\bar{u}} \tilde{W}_y \langle x \rangle^{10.5} \|_{L^2_y}.
\end{align}
Multiplying by $\ensuremath{\partial}hi_{11}^2$, integrating over $x$, and appealing to \eqref{weds:weds:1} gives the desired estimate.
\end{proof}
\subsection{Complete $\|\tilde{U}, \tilde{V} \|_{\Theta_{11}}$ Estimate} \label{subsection:NL:PR}
Before performing our main top order energy estimate in Lemma \ref{lemma:top}, we first record an estimate on the lower-order error terms.
\begin{lemma} Assume $\| U, V \|_{\mathcal{X}} \le 1$. Let $\mathcal{R}_1^{(1)}$ and $\mathcal{R}_2^{(1)}$ be defined as in \eqref{R11} and \eqref{def:R21:def}. Then the following estimate is valid, for any $0 < \delta << 1$,
\begin{align} \label{R11:estimate}
\| \mathcal{R}_{1}^{(1)} \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| + \| \sqrt{\ensuremath{\varepsilon}} \mathcal{R}_2^{(1)} \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \le \delta \| U, V \|_{\mathcal{X}} + C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}}.
\end{align}
\end{lemma}
\begin{proof} We begin first with $\mathcal{R}_1^{(1)}$, defined in \eqref{R11}. For the first term in \eqref{R11}, we assume $1 \le j \le 6$, in which case we bound
\begin{align} \ensuremath{\nonumber}
\| \ensuremath{\partial}_x^j \mu_s \ensuremath{\partial}_x^{11-j} u_x \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim & \| \ensuremath{\partial}_x^j \mu_s \langle x \rangle^j \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^{11-j} u_x \langle x \rangle^{11-j+\frac 1 2} \ensuremath{\partial}hi_{11} \| \\
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}})(C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V \|_{\mathcal{X}}),
\end{align}
where we have invoked estimates \eqref{prof:u:est}, \eqref{pw:dec:u}, and \eqref{L2:uv:eq}. We have also invoked the identity $\ensuremath{\partial}si_{12} \ensuremath{\partial}hi_{11} = \ensuremath{\partial}hi_{11}$ to insert the cut-off function $\ensuremath{\partial}si_{12}$ freely above. The remaining case $j > 6$ can be treated symmetrically, as can the second term from \eqref{R11}. For the third term from \eqref{R11}, we first treat the case when $1 \le j \le 6$, which we estimate via
\begin{align} \ensuremath{\nonumber}
\| \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \ensuremath{\partial}_x^{11-j} u_y \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim &\| \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \langle x \rangle^{j + \frac 12} \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^{11-j} u_y \langle x\rangle^{11-j} \ensuremath{\partial}hi_{11} \| \\
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1} \| U, V \|_{\mathcal{X}} ) \| U, V \|_{\mathcal{X}_{\le 10}},
\end{align}
where above we have invoked \eqref{prof:v:est}, \eqref{pw:v:1}, and \eqref{L2:uv:eq:2}, and again the ability to insert freely the cut-off $\ensuremath{\partial}si_{12}$ in the presence of $\ensuremath{\partial}hi_{11}$. In the case $6 < j \le 10$, we estimate the nonlinear component via
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{11-j} u_y \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \ensuremath{\partial}_x^{11-j} u_y \langle x \rangle^{11-j + \frac 1 2} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| \ensuremath{\partial}_x^j v \langle x \rangle^j \ensuremath{\partial}hi_{11}\|_{L^2_x L^\infty_y} \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \|U, V \|_{\mathcal{X}} \| U, V \|_{\mathcal{X}_{\le 10.5}}
\end{align}
where we have used the mixed-norm estimates in \eqref{mL2again} and \eqref{mixed:emb}.
We now move to $\mathcal{R}_2^{(1)}$. The first term here is estimated, in the case when $j \le 6$, via
\begin{align} \ensuremath{\nonumber}
\sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^j \mu_s \ensuremath{\partial}_x^{11-j} v_x \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim &\| \ensuremath{\partial}_x^j \mu_s \langle x \rangle^j \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{11-j} v_x \langle x \rangle^{11-j+ \frac 1 2} \ensuremath{\partial}hi_{11}\| \\
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \| U, V \|_{\mathcal{X}}) (C_\delta \|U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V \|_{\mathcal{X}}),
\end{align}
where we have invoked \eqref{prof:u:est}, \eqref{L2:uv:eq}, \eqref{pw:dec:u}.
In the case when $6 < j \le 10$, we estimate the nonlinear term via
\begin{align} \ensuremath{\nonumber}
\sqrt{\ensuremath{\varepsilon}} \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \ensuremath{\partial}_x^j u \ensuremath{\partial}_x^{11-j} v_x \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim &\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{12-j} v \langle x \rangle^{12-j + \frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^{j-1} u_x \langle x \rangle^{j-1 + \frac 1 2} \ensuremath{\partial}hi_{11} \|, \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \| U, V \|_{\mathcal{X}} \| U, V \|_{\mathcal{X}_{\le 10}}
\end{align}
where we have invoked \eqref{pw:v:1} and \eqref{L2:uv:eq}.
The same estimates work for the second term in $\mathcal{R}_2^{(1)}$, and so we move to the third term. In the case when $j \le 6$, we can estimate the third term via
\begin{align} \ensuremath{\nonumber}
\sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \ensuremath{\partial}_x^{11-j} v_y \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim & \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^j \ensuremath{\nonumber}u_s \langle x \rangle^{j + \frac 1 2} \ensuremath{\partial}si_{12}\|_\infty \| \ensuremath{\partial}_x^{12-j} u \langle x \rangle^{11-j + \frac 1 2} \ensuremath{\partial}hi_{11} \| \\
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \| U, V \|_{\mathcal{X}}) (C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V \|_{\mathcal{X}}),
\end{align}
where we have invoked \eqref{pw:v:1}, \eqref{L2:uv:eq}.
The same estimate will apply even when $j \ge 7$, for the $\bar{v}$ contribution from $\ensuremath{\nonumber}u_s$ for this term. It remains to treat the nonlinear contribution when $7 \le j \le 10$, for which we estimate via
\begin{align}\ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}}\| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{12-j} u \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \ensuremath{\partial}_x^{12-j} u \langle x \rangle^{12-j } \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{j-1}v_x \langle x\rangle^{j-1 + \frac 1 2} \ensuremath{\partial}hi_{11}\| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \| U, V \|_{\mathcal{X}} \| U, V \|_{\mathcal{X}_{\le 10}},
\end{align}
where we have invoked \eqref{pw:dec:u} and \eqref{L2:uv:eq}. The identical estimate applies also to the fourth term from \eqref{def:R21:def}. This concludes the proof.
\end{proof}
\begin{lemma} \label{lemma:top} Let $[\tilde{U}, \tilde{V}]$ satisfy \eqref{eq:11:first} - \eqref{eq:11:second}, and suppose that $\| U, V \|_{\mathcal{X}} \le 1$.
\begin{align} \label{energy:theta11}
\| \tilde{U}, \tilde{V} \|_{\Theta_{11}}^2 \lesssim \sum_{k = 0}^{10} \| U, V \|_{X_k}^2 + \| U, V \|_{X_{k + \frac 1 2} \cap Y_{k + \frac 1 2}}^2+ \mathcal{F}_{X_{11}} + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1-5} \|U, V \|_{\mathcal{X}}^2,
\end{align}
where we define $\mathcal{F}_{X_{11}}$ to contain the forcing terms from this estimate,
\begin{align} \label{def:FX11}
\mathcal{F}_{X_{11}} := \int \ensuremath{\partial}_x^{11} F_R \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 + \int \ensuremath{\partial}_x^{11} G_R \Big( \ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2- 22 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi'_{11} \Big)x^{22} \ensuremath{\partial}hi_{11}^2.
\end{align}
\end{lemma}
\begin{proof} We apply the multiplier
\begin{align}
\int \eqref{eq:11:first} \times \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 + \int \eqref{eq:11:second} \times (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2- 22 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi'_{11}).
\end{align}
We note that the multiplier above is divergence free and moreover that $\tilde{V}|_{y = 0} = Q|_{y = 0} = 0$, and hence the pressure contribution will vanish.
We compute the first two terms from \eqref{eq:11:first}, which yields
\begin{align} \ensuremath{\nonumber}
|\int (\mu_s^2 \tilde{U}_x + \mu_s \mu_{sx} \tilde{U}) \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2| =& |- 11 \int \mu_s^2 \tilde{U}^2 x^{21}\ensuremath{\partial}hi_{11}^2 - \int \mu_s^2 \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}'| \\
\lesssim & \| \mu_s \tilde{U} x^{10.5} \|^2 \lesssim \| U, V \|_{\mathcal{X}_{\le 10.5}}^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1 - 5} \|U, V \|_{\mathcal{X}}^2,
\end{align}
upon invoking \eqref{same:same}.
We now compute the $Q$ terms from \eqref{eq:11:first}. The first we split based on the definition of $\mu_s$
\begin{align} \ensuremath{\nonumber}
&\Big| \int \mu_s \mu_{sxy} Q \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \Big| = \Big| \int \mu_s (\bar{u}_{xy} + \ensuremath{\varepsilon}^{\frac{N_2}{2}} u_{xy}) Q \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \Big| \\ \ensuremath{\nonumber}
\lesssim& \| \bar{u}_{xy} xy \|_\infty \| \frac{Q}{y} x^{10.5} \ensuremath{\partial}hi_{11} \| \| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{xy} x^{\frac 3 2} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| Q x^{10} \ensuremath{\partial}hi_{10} \|_{L^2_x L^\infty_y} \| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
\lesssim& \| \bar{u}_{xy} xy \|_\infty \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{xy} x^{\frac 3 2} \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| Q x^{10} \ensuremath{\partial}hi_{10} \|_{L^2_x L^\infty_y} \| \mu_s \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \\
\lesssim & (C_\delta\| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} ) \| U, V \|_{\mathcal{X}_{\le 10.5}} + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}}^3,
\end{align}
where we have invoked estimate \eqref{prof:u:est} for $\bar{u}$, \eqref{same:same:high}, \eqref{same:same}, \eqref{same:Q:2}, as well as the embedding \eqref{mixed:emb}. Note that we have used that $\ensuremath{\partial}hi_{11}^2 = \ensuremath{\partial}si_{12} \ensuremath{\partial}hi_{11}^2$, according to the definition \eqref{psi:twelve:def}. The remaining $Q$ term works in an identical manner.
We now address the terms in $\mathcal{R}_1^{(1)}$, which are defined in \eqref{R11}. For this, we invoke \eqref{R11:estimate} as well as \eqref{same:same:high} to estimate
\begin{align} \ensuremath{\nonumber}
\Big| \int \mathcal{R}_1^{(1)} \tilde{U} \langle x \rangle^{22} \ensuremath{\partial}hi_{11}^2 \Big| \lesssim & \| \mathcal{R}_1^{(1)} \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \| \tilde{U} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \| \lesssim \| U, V \|_{\mathcal{X}} (\delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} + C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}}),
\end{align}
where we have invoked estimates \eqref{same:same:high} and \eqref{R11:estimate}.
We now address the terms in $\mathcal{R}_1^{(2)}$, which are defined in \eqref{R12}. We estimate these terms easily via
\begin{align} \ensuremath{\nonumber}
|\int \mathcal{R}_1^{(2)} \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2| \le & |\int \mu_{sx} u^{(11)} \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2| + |\int \ensuremath{\nonumber}u_{s} u^{(11)}_y \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2| \\ \ensuremath{\nonumber}
\lesssim & \| \mu_{sx} x \ensuremath{\partial}si_{12} \|_\infty \| u^{(11)} x^{10.5} \ensuremath{\partial}hi_{11} \| \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| + \| \frac{\ensuremath{\nonumber}u_s}{\bar{u}} x^{\frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \| u^{(11)}_y x^{11} \ensuremath{\partial}hi_{11} \| \| \bar{u} \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}_{\le 10}} ) \| U, V \|_{\mathcal{X}_{\le 10}} (C_\delta \| U, V \|_{\mathcal{X}_{\le 10}} + \delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} ) \\
& + (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \| U, V \|_{\mathcal{X}_{\le 10.5}} ) \| U, V \|_{X_{11}} \| U, V \|_{\mathcal{X}_{\le 10}},
\end{align}
where we have invoked the estimate \eqref{prof:u:est}, \eqref{prof:v:est}, \eqref{L2:uv:eq}, \eqref{same:same:high}, \eqref{pw:v:1}, and \eqref{same:same}, and again the identity $\ensuremath{\partial}hi_{11}^2 = \ensuremath{\partial}si_{12} \ensuremath{\partial}hi_{11}^2$.
We now move to $\mathcal{R}_1^{(3)}$, defined in \eqref{R13}, which we estimate the first two terms via
\begin{align} \ensuremath{\nonumber}
\Big| \int ( u \ensuremath{\partial}_x^{12} \bar{u} + u_y \ensuremath{\partial}_x^{11} \bar{v} ) \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \Big| \lesssim & \| \ensuremath{\partial}_x^{12} \bar{u} x^{11.5} y \|_\infty \| \frac{u}{y} \| \| \tilde{U} x^{10.5} \| + \| \ensuremath{\partial}_x^{11} \bar{v} x^{11.5} \|_\infty \| u_y \| \| \tilde{U} x^{10.5} \| \\
\lesssim & \| u_y \| \| \tilde{U} x^{10.5}\| \le C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V \|_{\Theta_{11}}^2,
\end{align}
where we have invoked \eqref{prof:u:est}, \eqref{L2:uv:eq:2} and \eqref{same:same:high}. The final two terms of $\mathcal{R}_1^{(3)}$ are estimated via
\begin{align} \ensuremath{\nonumber}
\Big| \int ( v \ensuremath{\partial}_y \ensuremath{\partial}_x^{11} \bar{u} + u_x \ensuremath{\partial}_x^{11} \bar{u} ) \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \Big| \lesssim &( \| \ensuremath{\partial}_y \ensuremath{\partial}_x^{11} \bar{u} y x^{11} \|_\infty \| \frac{v}{y} x^{\frac 1 2} \| + \| \ensuremath{\partial}_x^{11} \bar{u} x^{11} \|_\infty \| u_x x^{\frac 1 2} \| ) \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \\
\lesssim & \| v_y x^{\frac 1 2} \ensuremath{\partial}hi_{11} \| \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| \le C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| \tilde{U}, \tilde{V}\|_{\Theta_{11}}^2,
\end{align}
where we have invoked \eqref{prof:u:est}, \eqref{L2:uv:eq:2}, and \eqref{same:same:high}.
We now move to the diffusive terms, starting with the $- u^{(11)}_{yy}$ term, for which one integration by parts yields
\begin{align} \label{diffuse:1}
- \int u^{(11)}_{yy} \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 = & \int u^{(11)}_y \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2+ \int_{y = 0} u^{(11)}_y \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \,\mathrm{d} x.
\end{align}
We now use \eqref{move:2} to expand the first term on the right-hand side of \eqref{diffuse:1}, via
\begin{align} \ensuremath{\nonumber}
\int u^{(11)}_y \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2 = & \int ( \mu_s \tilde{U}_y + 2 \mu_{sy} \tilde{U} + \mu_{syy} Q ) \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2 \\
= & \int \mu_s \tilde{U}_y^2 x^{22} \ensuremath{\partial}hi_{11}^2 - \int \mu_{syy} \tilde{U}^2 x^{22}\ensuremath{\partial}hi_{11}^2 - \int_{y =0} \mu_{sy} \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2+ \int \mu_{syy} Q \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2.
\end{align}
We also expand the second term on the right-hand side of \eqref{diffuse:1}, again by using \eqref{move:2}, which gives
\begin{align}
\int_{y = 0} u^{(11)}_y \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \,\mathrm{d} x = \int_{y = 0} ( \mu_s \tilde{U}_y + 2 \mu_{sy} \tilde{U} + \mu_{syy} Q ) \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 \,\mathrm{d} x = 2 \int_{y = 0} \mu_{sy} \tilde{U}^2 x^{22}\ensuremath{\partial}hi_{11}^2 \,\mathrm{d} x.
\end{align}
Hence, we obtain
\begin{align} \ensuremath{\nonumber}
- \int u_{yy}^{(11)} \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2= &\int \mu_s \tilde{U}_y^2 x^{22} \ensuremath{\partial}hi_{11}^2+ \int_{y = 0} \mu_{sy} \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2 \,\mathrm{d} x - \int \mu_{syy} \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2\\ \label{ay1}
&+ \int \mu_{syy} Q \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2.
\end{align}
The first two terms from \eqref{ay1} are positive contributions towards the $\Theta_{11}$ norm, whereas the third and fourth terms need to be estimated. We first estimate the third term from \eqref{ay1} via
\begin{align} \ensuremath{\nonumber}
\Big| \int \mu_{syy} \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2\Big| \lesssim &\| \bar{u}_{yy} x \|_\infty \| \tilde{U} x^{10.5}\ensuremath{\partial}hi_{11}\|^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{yy} x \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11}\|_{L^2_x L^4_y}^2 \\ \ensuremath{\nonumber}
\lesssim & \delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}}^2 + C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}}^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1 } \| U, V \|_{\mathcal{X}} ( \| \bar{u} \tilde{U} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \|^2 \\ \label{keep:damp:1}
&+ \| \sqrt{\bar{u}} \tilde{U}_y \langle x \rangle^{11} \ensuremath{\partial}hi_{11} \|^2 ),
\end{align}
where we have invoked \eqref{same:same:high}, as well as \eqref{mixed:emb} and \eqref{weds:weds:2}.
We now address the fourth term from \eqref{ay1}, for which we split the coefficient $\mu_s$. In the case of $\bar{u}_{yy}$, we may integrate by parts in $y$ to obtain
\begin{align} \ensuremath{\nonumber}
\int \bar{u}_{yy} Q \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2 = & - \int \bar{u}_{yy} \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2 + \frac 1 2 \int \bar{u}_{yyyy} Q^2 x^{22} \ensuremath{\partial}hi_{11}^2,
\end{align}
both of which are estimated in an identical manner to \eqref{keep:damp:1}. In the case of $u_{yy}$, we split into the regions where $z \le 1$ and $z \ge 1$. First, the localized contribution is estimated via
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int u_{yy} Q \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2 \chi(z) | \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{yy} x \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| \frac{Q}{\sqrt{y}} x^{10.25} \chi(z) \ensuremath{\partial}hi_{11}\|_{L^2_x L^\infty_y} \| \sqrt{\bar{u}} \tilde{U}_y x^{11} \| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \|U, V \|_{\mathcal{X}}^3,
\end{align}
where we have invoked \eqref{mixed:emb}, as well as the inequality
\begin{align} \ensuremath{\nonumber}
|Q| \chi(z) = \chi(z) |\int_0^y \tilde{U} \,\mathrm{d} y'| = \chi(z)| \int_0^y \tilde{U} (y')^{\frac 1 2} (y')^{-\frac 1 2} \,\mathrm{d} y'| \lesssim \chi(z) y^{\frac 1 2} x^{\frac 1 4} \| \sqrt{\bar{u}} \tilde{U} \|_{L^\infty_y},
\end{align}
from which we obtain $\|\frac{Q}{\sqrt{y}} \chi(z) \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11}\|_{L^2_x L^\infty_y} \lesssim \|U, V \|_{\mathcal{X}}$, after using the interpolation inequality
\begin{align}
\| \bar{u}^{\frac{1}{2}} \tilde{U} x^{10.75} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \lesssim \|\tilde{U} x^{10.5} \ensuremath{\partial}hi_{11} \| ^{\frac 1 2} \| \bar{u} \tilde{U}_y x^{11} \ensuremath{\partial}hi_{11} \|_{L^2_y}^{\frac 1 2} \lesssim \|U, V \|_{\mathcal{X}}.
\end{align}
For the far-field contribution, we estimate via
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int u_{yy} Q \tilde{U}_y x^{22} \ensuremath{\partial}hi_{11}^2 (1- \chi(z)) | \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{yy} x \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| Q x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \| \mu_s \tilde{U}_y x^{11} \ensuremath{\partial}hi_{11} \| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1- \frac 1 2} \|U, V \|_{\mathcal{X}}^3,
\end{align}
where we have invoked the mixed-norm estimates \eqref{mixed:emb}, \eqref{same:Q:2}.
We now address the $- \ensuremath{\varepsilon} u^{(11)}_{xx}$ terms from \eqref{eq:11:first}. This produces
\begin{align} \ensuremath{\nonumber}
&- \int \ensuremath{\varepsilon} u^{(11)}_{xx} \tilde{U} x^{22} \ensuremath{\partial}hi_{11}^2 - \int \ensuremath{\varepsilon} v^{(11)}_{yy} \ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 + 22 \int \ensuremath{\varepsilon} v^{(11)}_{yy} Q x^{21} + 2 \int \ensuremath{\varepsilon} v^{(11)}_{yy} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' \\ \label{Mon:1}
= &2 \int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U}_x x^{22} \ensuremath{\partial}hi_{11}^2 + 44 \int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U} x^{21} \ensuremath{\partial}hi_{11}^2 + 4 \int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U} x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}'.
\end{align}
We first estimate easily the second and third terms from \eqref{Mon:1}. First,
\begin{align}
|\int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U} x^{21} \ensuremath{\partial}hi_{11}^2| \lesssim & \sqrt{\ensuremath{\varepsilon}} \| \sqrt{\ensuremath{\varepsilon}} u^{(11)}_x x^{11} \ensuremath{\partial}hi_{11} \| \| \tilde{U} x^{10} \ensuremath{\partial}hi_{11} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{11}}^2, \\ \label{fir}
|\int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U} x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}'| \lesssim & \| \sqrt{\ensuremath{\varepsilon}} u_x^{(11)} x^{11} \ensuremath{\partial}hi_{11} \| \| \tilde{U} x^{10} \ensuremath{\partial}hi_{10} \| \le \delta \|U, V \|_{\mathcal{X}_{11}} + C_\delta \|U, V \|_{\mathcal{X}_{\le 10}},
\end{align}
where we have invoked \eqref{L2:uv:eq:2} and \eqref{same:same:high}, and for estimate \eqref{fir}, we use that $\ensuremath{\partial}hi_{10} = 1$ on the support of $\ensuremath{\partial}hi_{11}$.
We now treat the primary term, which is the first term from \eqref{Mon:1}, using the formula \eqref{move:2}, which gives
\begin{align} \ensuremath{\nonumber}
2 \int \ensuremath{\varepsilon} u^{(11)}_x \tilde{U}_x x^{22} \ensuremath{\partial}hi_{11}^2 = & 2 \int \ensuremath{\varepsilon} \ensuremath{\partial}_x (\mu_s \tilde{U} + \ensuremath{\partial}_y \mu_s Q) \tilde{U}_x x^{22} \ensuremath{\partial}hi_{11}^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} (\mu_s \tilde{U}_x + \ensuremath{\partial}_x \mu_s \tilde{U} + \ensuremath{\partial}_{xy} \mu_s Q - \ensuremath{\partial}_y \mu_s \tilde{V}) \tilde{U}_x x^{22} \ensuremath{\partial}hi_{11}^2 \\ \ensuremath{\nonumber}
= & \int 2 \ensuremath{\varepsilon} \mu_s \tilde{U}_x^2 x^{22} \ensuremath{\partial}hi_{11}^2 - \int \ensuremath{\varepsilon} \ensuremath{\partial}_{xx} \mu_s \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2 - \int 22 \ensuremath{\varepsilon} \ensuremath{\partial}_x \mu_s \tilde{U}^2 x^{21} \ensuremath{\partial}hi_{11}^2 \\ \ensuremath{\nonumber}
&- \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_x \mu_s \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' + \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_y^2 \ensuremath{\partial}_x \mu_s Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 + \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_{xy} \mu_s \tilde{U} \tilde{V} x^{22} \ensuremath{\partial}hi_{11} \\ \label{lauv:1}
& - \int \ensuremath{\varepsilon} \ensuremath{\partial}_y^2 \mu_s \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2.
\end{align}
The first term in \eqref{lauv:1} is a positive contribution. For the second and third terms, we estimate via
\begin{align}
|\int \ensuremath{\varepsilon} \ensuremath{\partial}_{xx} \mu_s \tilde{U}^2 x^{22} \ensuremath{\partial}hi_{11}^2| + |\int 22 \ensuremath{\varepsilon} \ensuremath{\partial}_x \mu_s \tilde{U}^2 x^{21} \ensuremath{\partial}hi_{11}^2| \lesssim \ensuremath{\varepsilon} ( \| \frac{ \ensuremath{\partial}_{xx} \mu_s}{\mu_s} x \ensuremath{\partial}si_{12}\|_\infty + \| \ensuremath{\partial}_x \mu_s \ensuremath{\partial}si_{12}\|_\infty) \| \sqrt{\mu_s} \tilde{U} x^{10.5} \|^2,
\end{align}
whereas for the fifth term, it is advantageous for us to split up the coefficient via
\begin{align} \label{best:1}
\int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_y^2 \ensuremath{\partial}_x \mu_s Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 = \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_y^2 \ensuremath{\partial}_x \bar{u} Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2} + 1} \int u_{xyy} Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2,
\end{align}
after which we estimate the first term from \eqref{best:1} via
\begin{align}
|\int 2 \ensuremath{\varepsilon} \bar{u}_{xyy} Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \bar{u}_{xyy} y x^{\frac 3 2} \|_\infty \| \frac{Q}{y} x^{10.5} \| \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \|,
\end{align}
and for the second term from \eqref{best:1}, we obtain
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} |\int u_{xyy} Q \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u_{xyy} x^2 \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| Q x^{10} \ensuremath{\partial}hi_{11} \|_{L^2_x L^\infty_y} \| \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \|
\end{align}
We now estimate the sixth term from \eqref{lauv:1} via
\begin{align} \ensuremath{\nonumber}
| \int 2 \ensuremath{\varepsilon} \ensuremath{\partial}_{xy} \mu_s \tilde{U} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}| \lesssim \| \ensuremath{\partial}_{xy} \mu_s \langle x \rangle^{\frac 3 2} \ensuremath{\partial}si_{12}\|_\infty \| \tilde{U} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \| \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \|
\end{align}
The seventh term from \eqref{lauv:1} is fairly tricky. First, the contribution arising from the $\bar{u}_{yy}$ component of $\ensuremath{\partial}_y^2 \mu_s$ is straightforward, and we estimate it via
\begin{align}
|\int \ensuremath{\varepsilon} \ensuremath{\partial}_y^2 \bar{u} \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2 | \lesssim \| \bar{u}_{yy} \langle x \rangle \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \|^2,
\end{align}
which is an admissible contribution according to \eqref{same:same:high}.
To handle the $\ensuremath{\varepsilon}^{\frac{N_2}{2}}u_{yy}$ contribution from $\ensuremath{\partial}_y^2 \mu_s$, we first localize in $z$. The far-field contribution is handled via
\begin{align} \ensuremath{\nonumber}
|\ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \ensuremath{\varepsilon} u_{yy} \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2 (1- \chi(z))| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}+1} \| u_{yy} \langle x \rangle \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} (1- \chi(z)) \|_{L^2_x L^\infty_y} \\ \ensuremath{\nonumber}
& \times \| \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} (1- \chi(z)) \|\\ \ensuremath{\nonumber}
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}+1 - M_1} \| U, V \|_{\mathcal{X}} \| \bar{u} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11}\|_{L^2_x L^\infty_y} \| \bar{u} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11}\| \\ \ensuremath{\nonumber}
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}+1 - M_1} \| U, V \|_{\mathcal{X}} \| \bar{u} \tilde{V}_y \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11}\|^{\frac 1 2} \| \bar{u} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11}\|^{\frac 3 2},
\end{align}
where we have used the presence of $(1 - \chi(z))$ to insert factors of $\bar{u}$ above, as well as estimate \eqref{mixed:emb}.
To handle this same contribution for $z \le 1$, we use Holder's inequality in the following manner
\begin{align*}
|\ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \ensuremath{\varepsilon} u_{yy} \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2 \chi(z)| \lesssim &\ensuremath{\varepsilon}^{\frac{N_2}{2}+1} \| u_{yy} \langle x \rangle \ensuremath{\partial}si_{12} \|_{L^\infty_x L^2_y} \| \tilde{V} \langle x \rangle^{10.5} \chi(z) \ensuremath{\partial}hi_{11}\|_{L^2_x L^4_y}^2
\end{align*}
from which the result follows from an application of \eqref{mixed:emb} and \eqref{weds:weds:2}.
We now move to the final diffusive term, which contributes the following
\begin{align} \ensuremath{\nonumber}
&- \int \ensuremath{\varepsilon}^2 v^{(11)}_{xx} (\tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 22 Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' ) \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 v^{(11)}_x (\tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2)_x - 22 \int \ensuremath{\varepsilon}^2 v^{(11)}_x (Q x^{21} \ensuremath{\partial}hi_{11}^2)_x - 2 \int \ensuremath{\varepsilon}^2 v^{(11)}_x ( Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}')_x \\ \ensuremath{\nonumber}
= & \int \ensuremath{\varepsilon}^2 v^{(11)}_x \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 + 44 \int \ensuremath{\varepsilon}^2 v^{(11)}_x \tilde{V} x^{21} \ensuremath{\partial}hi_{11}^2 - 462 \int \ensuremath{\varepsilon}^2 v^{(11)}_x Q x^{20} \ensuremath{\partial}hi_{11}^2 \\ \label{bkl}
& + 2 \int \ensuremath{\varepsilon}^2 v^{(11)}_x \tilde{V} x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' - 44 \int \ensuremath{\varepsilon}^2 v^{(11)}_x Q x^{21} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}'- 2 \int \ensuremath{\varepsilon}^2 v^{(11)}_x ( Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}')_x
\end{align}
We will now analyze the first term from \eqref{bkl}, which gives upon appealing to \eqref{move:2},
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon}^2 v^{(11)}_x \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 = & \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x (\mu_s \tilde{V} - \ensuremath{\partial}_x \mu_s Q) \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 \\ \label{load:1}
= & \int \ensuremath{\varepsilon}^2 \mu_s \tilde{V}_x^2 x^{22} \ensuremath{\partial}hi_{11}^2 + 2 \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x \mu_s \tilde{V} \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 - \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_{xx} \mu_s Q \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2
\end{align}
The first term above in \eqref{load:1} is a positive contribution, whereas the second two can easily be estimated via
\begin{align} \ensuremath{\nonumber}
&|2 \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_x \mu_s \tilde{V} \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 - \int \ensuremath{\varepsilon}^2 \ensuremath{\partial}_{xx} \mu_s Q \tilde{V}_x x^{22} \ensuremath{\partial}hi_{11}^2 | \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x \mu_s x \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| \| \ensuremath{\varepsilon} \sqrt{\mu_s} \tilde{V}_x x^{11} \ensuremath{\partial}hi_{11} \| \\
&+ \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_{xx} \mu_s x^2 \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{11}\| \| \ensuremath{\varepsilon} \sqrt{\mu_s} \tilde{V}_x x^{11} \ensuremath{\partial}hi_{11} \|.
\end{align}
We now estimate the remaining terms in \eqref{bkl}. First, the contributions from $\ensuremath{\partial}hi_{11}'$ are supported for finite $x$, and can thus be estimated by lower order norms. The second term from \eqref{bkl} is bounded by
\begin{align} \ensuremath{\nonumber}
|\int \ensuremath{\varepsilon}^2 v_x^{(11)} \tilde{V} x^{21} \ensuremath{\partial}hi_{11}^2 | \lesssim \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\varepsilon} v_x^{(11)} \ensuremath{\partial}hi_{11} x^{11} \| \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} \ensuremath{\partial}hi_{11} x^{10.5} \|,
\end{align}
and the third term from \eqref{bkl} by
\begin{align} \ensuremath{\nonumber}
| \int \ensuremath{\varepsilon}^2 v^{(11)}_x Q x^{20} \ensuremath{\partial}hi_{11}^2 | \lesssim \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\varepsilon} v^{(11)}_x \ensuremath{\partial}hi_{11} \langle x \rangle^{11} \| \| \sqrt{\ensuremath{\varepsilon}}Q \ensuremath{\partial}hi_{10} x^{9} \|,
\end{align}
both of which are acceptable contributions according to estimates \eqref{L2:uv:eq:2}, \eqref{same:Q}, and \eqref{same:same:high}.
We now arrive at the remaining terms from \eqref{eq:11:second}, which will all be treated as error terms. We first record the identity
\begin{align} \ensuremath{\nonumber}
&\int \mu_s^2 \tilde{V}_x (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' ) \\ \ensuremath{\nonumber}
= & - \ensuremath{\varepsilon} \int \mu_s \ensuremath{\partial}_x \mu_s \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2 - 33 \int \ensuremath{\varepsilon} \mu_s^2 \tilde{V}^2 x^{21} \ensuremath{\partial}hi_{11}^2 - \int \ensuremath{\varepsilon} \mu_s^2 \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' \\ \ensuremath{\nonumber}
& - 44 \int \ensuremath{\varepsilon} \tilde{V} Q \mu_s \ensuremath{\partial}_x \mu_s x^{21} \ensuremath{\partial}hi_{11}^2 + 462 \int \ensuremath{\varepsilon} \mu_s^2 \tilde{V} Q x^{20} \ensuremath{\partial}hi_{11}^2 + 44 \int \ensuremath{\varepsilon} \mu_s^2 \tilde{V} Q x^{21} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' \\ \label{bss}
& + 2 \int \ensuremath{\varepsilon} \tilde{V} \ensuremath{\partial}_x (\mu_s^2 Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}').
\end{align}
To estimate these, we simply note that due to the pointwise estimates $|\ensuremath{\partial}_x \mu_s \langle x \rangle \ensuremath{\partial}si_{12} | \lesssim \bar{u}$, we have
\begin{align}
\ensuremath{\varepsilon} |\int \mu_s \ensuremath{\partial}_x \mu_s \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2| \lesssim \| \sqrt{\ensuremath{\varepsilon}} \bar{u} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \|^2,
\end{align}
and similarly for the second term from \eqref{bss}. An analogous estimate applies to the fourth and fifth terms from \eqref{bss}.
We next move to
\begin{align} \ensuremath{\nonumber}
&\int \mu_s \ensuremath{\nonumber}u_s \tilde{V}_y (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' ) \\ \ensuremath{\nonumber}
= & - \frac 1 2 \int \ensuremath{\varepsilon} (\mu_s \ensuremath{\nonumber}u_s)_y \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2 + 2 \int \ensuremath{\varepsilon} \mu_s \ensuremath{\nonumber}u_s \tilde{V} \tilde{U} x^{21} \ensuremath{\partial}hi_{11}^2 + \int \ensuremath{\varepsilon} (\mu_s \ensuremath{\nonumber}u_s)_y \tilde{V} Q x^{21} \ensuremath{\partial}hi_{11}^2 \\
& + 2 \int \ensuremath{\varepsilon} \mu_s \ensuremath{\nonumber}u_s \tilde{V} \tilde{U} x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' + 2 \int \ensuremath{\varepsilon} (\mu_s \ensuremath{\nonumber}u_s)_y \tilde{V} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}'.
\end{align}
To estimate these terms, we proceed via
\begin{align} \ensuremath{\nonumber}
&|\int \frac 1 2 \ensuremath{\varepsilon} (\mu_s \ensuremath{\nonumber}u_s)_y \tilde{V}^2 x^{22} \ensuremath{\partial}hi_{11}^2| + 2| \int \ensuremath{\varepsilon} \mu_s \ensuremath{\nonumber}u_s \tilde{V} \tilde{U} x^{21} \ensuremath{\partial}hi_{11}^2| + | \int \ensuremath{\varepsilon} (\mu_s \ensuremath{\nonumber}u_s)_y \tilde{V} Q x^{21} \ensuremath{\partial}hi_{11}^2| \\ \ensuremath{\nonumber}
\lesssim & \| \ensuremath{\partial}_y (\mu_s \ensuremath{\nonumber}u_s) x \ensuremath{\partial}si_{12}\|_\infty \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \|^2 + \sqrt{\ensuremath{\varepsilon}}\| \ensuremath{\nonumber}u_s x^{\frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \| \tilde{U} x^{10.5} \ensuremath{\partial}hi_{11}\| \| \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| \\ \label{analogue:1}
& + \| (\mu_s \ensuremath{\nonumber}u_s)_y x \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{11} \|,
\end{align}
all of which are acceptable contributions according to the pointwise decay estimates \eqref{pw:dec:u} - \eqref{Linfty:wo}, and according to \eqref{same:Q} - \eqref{same:same:high}.
We now arrive at the three error terms from \eqref{eq:11:second} which are of the form $(2 \mu_s \mu_{sx} + \ensuremath{\nonumber}u_s \mu_{sy}) \tilde{V} - \mu_{sx} \ensuremath{\nonumber}u_s \tilde{U} - (\mu_s \mu_{sxx} + \ensuremath{\nonumber}u_s \mu_{sxy})Q$. To estimate these contributions it suffices to note that the coefficient in front of $\tilde{V}$ satisfies the estimate $|2 \mu_s \mu_{sx} + \ensuremath{\nonumber}u_s \mu_{sy}| \lesssim \langle x \rangle^{-1}$, and similarly the coefficient in front of $\tilde{U}$ satisfies $|\mu_{sx}\ensuremath{\nonumber}u_s| \lesssim \langle x \rangle^{-1}$. Third, the coefficient in front of $Q$ satisfies $|\mu_s \mu_{sxx} + \ensuremath{\nonumber}u_s \mu_{sxy}| \lesssim \langle x \rangle^{-2}$. Thus, we may apply an analogous estimate to \eqref{analogue:1}.
We now estimate the error terms in $\mathcal{R}_2^{(i)}[u, v]$, for $i = 1,2, 3$, beginning first with those of $\mathcal{R}_2^{(1)}$. For this, we invoke \eqref{R11:estimate} as well as \eqref{same:same:high} to estimate
\begin{align} \ensuremath{\nonumber}
\Big| \int \ensuremath{\varepsilon} \mathcal{R}_2^{(1)} \tilde{V} \langle x \rangle^{22} \ensuremath{\partial}hi_{11}^2 \Big| \lesssim & \|\sqrt{\ensuremath{\varepsilon}} \mathcal{R}_2^{(1)} \langle x \rangle^{11.5} \ensuremath{\partial}hi_{11} \| \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} \langle x \rangle^{10.5} \ensuremath{\partial}hi_{11} \| \\
\lesssim & (\delta \|U, V \|_{\mathcal{X}} + C_\delta \|U, V \|_{X_{\le 10.5}})(\delta \|U, V \|_{\mathcal{X}} + C_\delta \|U, V \|_{X_{\le 10.5}}).
\end{align}
We now estimate the contributions from $\mathcal{R}_2^{(2)}[u, v]$, defined in \eqref{def:R22}, for which we first have
\begin{align} \ensuremath{\nonumber}
&|\int \ensuremath{\nonumber}u_{sx} u^{(11)} (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' )| \\ \ensuremath{\nonumber}
\lesssim & \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\nonumber}u_{sx} x \ensuremath{\partial}si_{12} \|_\infty \| u^{(11)} x^{10.5} \ensuremath{\partial}hi_{11}\| ( \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} + \| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{10} \|) + \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le 10.5}} \\
\lesssim & \sqrt{\ensuremath{\varepsilon}} (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \|U, V \|_{\mathcal{X}}) (C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V\|_{\mathcal{X}}) (C_\delta \| U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \| U, V\|_{\mathcal{X}}),
\end{align}
where we have invoked \eqref{pw:v:1}, \eqref{L2:uv:eq}, \eqref{same:same:high}.
and similarly for the second term from $\mathcal{R}_2^{(2)}$, we have
\begin{align} \ensuremath{\nonumber}
&|\int \ensuremath{\nonumber}u_{sy} v^{(11)} (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' )| \\ \ensuremath{\nonumber}
\lesssim & \| \ensuremath{\nonumber}u_{sy} x \|_\infty \| \sqrt{\ensuremath{\varepsilon}} v^{(11)} x^{10.5} \ensuremath{\partial}hi_{11}\| ( \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} + \| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{10} \|) + \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le 10.5}} \\
\lesssim & (1 + \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \| U, V \|_{\mathcal{X}} )( C_\delta \|U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \|U, V \|_{\mathcal{X}_{11}} ) ( C_\delta \|U, V \|_{\mathcal{X}_{\le 10.5}} + \delta \|U, V \|_{\mathcal{X}_{11}} ),
\end{align}
where we have used \eqref{pw:dec:u}, \eqref{L2:uv:eq}, and \eqref{same:same:high}.
We now move to the error terms from $\mathcal{R}_2^{(3)}$, First, we estimate using the definition \eqref{def:R23},
\begin{align} \ensuremath{\nonumber}
\| \sqrt{\ensuremath{\varepsilon}} \mathcal{R}_2^{(3)} x^{11.5} \ensuremath{\partial}hi_{11}\| \lesssim &\sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^{11} \bar{v}_y y x^{11.5} \|_\infty \| v_y \ensuremath{\partial}hi_{11} \| + \| \ensuremath{\partial}_x^{12} \bar{v} x^{12.5} \|_\infty \| u \langle x \rangle^{-1}\ensuremath{\partial}hi_{11} \| \\ \ensuremath{\nonumber}
& + \| \ensuremath{\partial}_x^{11} \bar{u} x^{11} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} v_x x^{\frac 1 2}\ensuremath{\partial}hi_{11} \| + \| \ensuremath{\partial}_x^{11} \bar{v} x^{11.5} \|_\infty \| v_y \ensuremath{\partial}hi_{11}\| \\
\lesssim & \| U, V \|_{\mathcal{X}_{\le 4}}.
\end{align}
From this, we estimate simply
\begin{align} \ensuremath{\nonumber}
&|\int \mathcal{R}_2^{(3)} (\ensuremath{\varepsilon} \tilde{V} x^{22} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{21} \ensuremath{\partial}hi_{11}^2 - 2 \ensuremath{\varepsilon} Q x^{22} \ensuremath{\partial}hi_{11} \ensuremath{\partial}hi_{11}' )| \\ \ensuremath{\nonumber}
\lesssim & \| \sqrt{\ensuremath{\varepsilon}} \mathcal{R}_2^{(3)} x^{11.5} \ensuremath{\partial}hi_{11} \| ( \| \sqrt{\ensuremath{\varepsilon}} \tilde{V} x^{10.5} \ensuremath{\partial}hi_{11} \| + \| \sqrt{\ensuremath{\varepsilon}} Q x^{9.5} \ensuremath{\partial}hi_{10} \|+ \sqrt{\ensuremath{\varepsilon}} \|U, V \|_{\mathcal{X}_{\le 10.5}}) \\
\lesssim & \| U, V \|_{\mathcal{X}_{\le 4}} ( \delta \| \tilde{U}, \tilde{V} \|_{\Theta_{11}} + C_\delta \|U, V \|_{\mathcal{X}_{\le 10.5}} ),
\end{align}
where we have invoked estimate \eqref{same:same:high}. This concludes the proof.
\end{proof}
\section{Nonlinear Analysis} \label{section:NL}
We first obtain estimates on the ``elliptic" component of the $\mathcal{X}$-norm, defined in \eqref{def:E:norm}. For this component of the norm, the mechanism is entirely driven by elliptic regularity.
\begin{lemma} \label{lemma:elliptic} Let $(u, v)$ solve \eqref{vel:eqn:1} - \eqref{vel:eqn:2}. Then the following estimate is valid
\begin{align} \label{elliptic:1}
\|U, V \|_E^2 \lesssim &\| U, V \|_{X_0}^2 + \mathcal{F}_{Ell},
\end{align}
where we define
\begin{align}
\mathcal{F}_{Ell} := & \sum_{k = 1}^{11} \| \ensuremath{\partial}_x^{k-1} F_R \|^2 + \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{k-1} G_R \|^2,
\end{align}
\end{lemma}
\begin{proof} This is a consequence of standard elliptic regularity. Indeed, rewriting \eqref{vel:eqn:1} - \eqref{vel:eqn:2} as a perturbation of the scaled Stokes operator, we obtain
\begin{align}
&- \Delta_\ensuremath{\varepsilon} u + P_x = F_R + \mathcal{N}_1 - (\bar{u} u_x + \bar{u}_y v + \bar{u}_x u + \bar{v} u_y), \\
&- \Delta_\ensuremath{\varepsilon} v + \frac{P_y}{\ensuremath{\varepsilon}} = G_R + \mathcal{N}_2 - (\bar{u} v_x + \bar{v}_y v + \bar{v}_x u + \bar{v} v_y), \\
&u_x + v_y = 0,
\end{align}
with boundary conditions \eqref{vel:eqn:2}. From here, we apply standard $H^2$ estimates for the Stokes operator on the quadrant, \cite{Blum}, and subsequently bootstrap elliptic regularity for the Stokes operator away from $\{x = 0\}$ in the standard manner (see, for instance, \cite{Iyer2a} - \cite{Iyer2c}), which immediately results in \eqref{elliptic:1}.
\end{proof}
We now analyze the nonlinear terms. Define the total trilinear contribution via
\begin{align} \label{def:full:T}
\mathcal{T} := \sum_{k = 0}^{10} \Big( \mathcal{T}_{X_k} + \mathcal{T}_{X_{k + \frac 1 2}} + \mathcal{T}_{Y_{k + \frac 1 2}} \Big),
\end{align}
where the quantities appearing on the right-hand side of \eqref{def:full:T} are defined in \eqref{def:TX0}, \eqref{TX12:spec}, \eqref{TY12spec}, \eqref{def:TXn}, and \eqref{def:TXnp12}. Our main proposition regarding the trilinear terms will be
\begin{proposition} \label{prop:trilinear} The trilinear quantity $\mathcal{T}$ obeys the following estimate
\begin{align} \label{est:T}
| \mathcal{T} | \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2} -M_1 - 5} \| U, V \|_{\mathcal{X}}^3.
\end{align}
\end{proposition}
\begin{proof}[Proof of Proposition] The proposition follows from combining estimate \eqref{TX0:est} and \eqref{Trilin:rest}.
\end{proof}
\begin{lemma} The quantity $\mathcal{T}_{X_0}$, defined in \eqref{def:TX0}, obeys the following estimate
\begin{align} \label{TX0:est}
|\mathcal{T}_{X_0}| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1 - 5} \| U, V \|_{\mathcal{X}}^3.
\end{align}
\end{lemma}
\begin{proof} We recall the definition of $\mathcal{T}_{X_0}$ from \eqref{def:TX0}. We first address the terms from $\mathcal{N}_1$, which give
\begin{align} \ensuremath{\nonumber}
\int \mathcal{N}_1 U g^2 = & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int u u_x U g^2+ \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int v u_y U g^2\\ \ensuremath{\nonumber}
= & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int u \ensuremath{\partial}_x (\bar{u} U g^2+ \bar{u}_y q) Ug^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int v \ensuremath{\partial}_y (\bar{u} U + \bar{u}_y q) Ug^2 \\ \ensuremath{\nonumber}
= & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u} u U U_x g^2+ \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_x u U^2 g^2+ \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_{xy} u q Ug^2- \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_y u VUg^2 \\ \label{N1}
& + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u} v U_y U g^2+ 2 \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_y v U^2 g^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_{yy} v q U g^2.
\end{align}
We now proceed to estimate
\begin{align} \label{walked:1}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} |\int \bar{u} u U U_x g^2 | \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u x^{\frac 14} \|_\infty \| U \langle x \rangle^{- \frac 3 4} \| \| U_x \langle x \rangle^{\frac 1 2} \| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2} - M_1} \|U, V \|_{\mathcal{X}}^2 \|\bar{u}U_x \langle x \rangle^{\frac 1 2} \|,
\end{align}
where we have invoked estimate \eqref{Linfty:wo}. To conclude, we estimate the final term appearing in \eqref{walked:1} by splitting $\| \bar{u}U_x \langle x \rangle^{\frac 1 2} \| \lesssim \| \bar{u}U_x (1 - \ensuremath{\partial}hi_{12}) \| + \| \bar{u}U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12}\|$, where we have used that the support of $(1 - \ensuremath{\partial}hi_{12})$ is bounded in $x$, and so we can get rid of the weight in $x$ for this term. For the $x$ large piece, we use that $\ensuremath{\partial}hi_1 = 1$ in the support of $\ensuremath{\partial}hi_{12}$, and so $\| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{12} \| \le \| U_x \langle x \rangle^{\frac 1 2} \ensuremath{\partial}hi_{1} \| \lesssim \|U, V \|_{\mathcal{X}}$. For the ``near $x = 0$" case, we simply estimate by using $\| \sqrt{\bar{u}} U_x \| \le \ensuremath{\varepsilon}^{- \frac 1 2} \|U, V \|_{X_0}$.
The second and third terms from \eqref{N1} follows in the same manner, via
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int \bar{u}_x u U^2 g^2| + | | \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \bar{u}_x x \|_\infty \| u x^{\frac 1 4} \|_\infty \| U \langle x \rangle^{- \frac 5 8} \|
\end{align}
For the sixth term from \eqref{N1}, we first decompose $\bar{u}$ into its Euler and Prandtl components via
\begin{align} \label{dec:hard:1}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \bar{u}_y v U^2 g^2= \ensuremath{\varepsilon}^{\frac{N_2}{2}} \int \ensuremath{\partial}_y \bar{u}_P v U^2 g^2+ \ensuremath{\varepsilon}^{\frac{N_2+1 }{2}} \int \ensuremath{\partial}_Y \bar{u}_E v U^2 g^2.
\end{align}
For the Euler component, we can use the enhanced $x$-decay available from \eqref{est:Eul:piece} to estimate
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2+1 }{2}} |\int \ensuremath{\partial}_Y \bar{u}_E v U^2 g^2| \lesssim \ensuremath{\varepsilon}^{\frac{N_2+1 }{2}} \int \langle x \rangle^{- \frac 3 2} U^2 \lesssim \ensuremath{\varepsilon}^{\frac{N_2 + 1}{2}} \| U \langle x \rangle^{- \frac 3 4} \|^2.
\end{align}
For the Prandtl component of \eqref{dec:hard:1}, we do not get strong enough $x$-decay, but rather must rely on self-similarity coupled with the sharp decay of $v$. More specifically, we need to first decompose $U = U(x, 0) + (U - U(x, 0))$, after which we obtain
\begin{align} \ensuremath{\nonumber}
&\ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int \bar{u}_{Py} v U^2 g^2| \le \ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int \bar{u}_{Py} v U(x, 0)^2| + \ensuremath{\varepsilon}^{\frac{N_2}{2}} | \int \bar{u}_{Py} v (U - U(x, 0))^2| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \sup_x \| \bar{u}_{Py} \|_{L^1_y} \| v \langle x \rangle^{\frac 1 2} \|_\infty \| U(x, 0) \langle x \rangle^{- \frac 1 2} \|_{y = 0}^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \bar{u}_{Py} y^2 x^{- \frac 1 2} \|_\infty \| v \langle x \rangle^{\frac 1 2} \|_\infty \| \frac{U - U(x, 0)}{y} \|^2.
\end{align}
We now address the terms from $\mathcal{N}_2$, which gives
\begin{align} \ensuremath{\nonumber}
\int \ensuremath{\varepsilon} \mathcal{N}_2 ( V g^2 + \frac{1}{100} q \langle x \rangle^{- 1 - \frac{1}{100}} )= & \ensuremath{\varepsilon}^{\frac{N_2}{2} + 1} \int u v_x V g^2 + \ensuremath{\varepsilon}^{\frac{N_2}{2} + 1} \int v v_y V g^2 \\
&+ \frac{1}{100} \ensuremath{\varepsilon}^{\frac{N_2}{2}+ 1} \int u v_x q \langle x \rangle^{- 1 - \frac{1}{100}} + \frac{1}{100} \ensuremath{\varepsilon}^{\frac{N_2}{2}+ 1} \int v v_y q \langle x \rangle^{- 1 - \frac{1}{100}}.
\end{align}
We estimate these terms directly via,
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2}{2}+1} |\int u v_x V g^2| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u \langle x \rangle^{\frac 1 4}\|_\infty \| \sqrt{\ensuremath{\varepsilon}} v_x \langle x \rangle^{\frac 1 2} \| \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{- \frac 3 4} \|, \\
\ensuremath{\varepsilon}^{\frac{N_2}{2}+1} |\int v v_y V g^2| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| v \langle x \rangle^{\frac 1 2} \|_\infty \| v_y \langle x \rangle^{\frac 1 2} \| \| \sqrt{\ensuremath{\varepsilon}} V \langle x \rangle^{-1} \| \\
\ensuremath{\varepsilon}^{\frac{N_2}{2}+1} |\int u v_x q \langle x \rangle^{-1 - \frac{1}{100}} | \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| u \langle x \rangle^{\frac 1 4} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} v_x \langle x \rangle^{\frac 1 2} \| \| \sqrt{\ensuremath{\varepsilon}} q \langle x \rangle^{- \frac 7 4} \|, \\
\ensuremath{\varepsilon}^{\frac{N_2}{2}+ 1} | \int v v_y q \langle x \rangle^{- 1 - \frac{1}{100}}| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| v \langle x \rangle^{\frac 1 2} \|_\infty \| v_y \langle x \rangle^{\frac 1 2} \| \| \sqrt{\ensuremath{\varepsilon}} q \langle x \rangle^{ -2 } \|.
\end{align}
\end{proof}
We note that in the estimation of the trilinear terms, $\mathcal{T}_{X_{\frac 1 2}}$ and $\mathcal{T}_{Y_{\frac 1 2}}$, we do not need to integrate by parts to find extra structure. In fact, it is a bit more convenient to state a general lemma first, which simplifies the forthcoming estimates.
\begin{lemma} For $0 \le k \le 10$,
\begin{align} \label{N2:quant}
\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_1 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_1 \| + \sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_2 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_1 \| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2} - 2M_1} \| U, V \|_{\mathcal{X}}^2.
\end{align}
\end{lemma}
\begin{proof} First, regarding the cutoff function $\ensuremath{\partial}hi_1$ present in \eqref{N2:quant}, we will rewrite it as $\ensuremath{\partial}hi_1 = \ensuremath{\partial}hi_1 \ensuremath{\partial}si_{12} = \ensuremath{\partial}hi_1 (\ensuremath{\partial}si_{12} - \ensuremath{\partial}hi_{12} ) + \ensuremath{\partial}hi_1 \ensuremath{\partial}hi_{12}$, according to the definitions \eqref{def:phi:j} and \eqref{psi:twelve:def}. As a result, we separate the estimation of \eqref{N2:quant} into
\begin{align} \ensuremath{\nonumber}
&\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_1 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_1 \| + \sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_2 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_1 \| \\ \ensuremath{\nonumber}
\le & \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_1 \langle x \rangle^{k + \frac 1 2} (\ensuremath{\partial}si_{12} - \ensuremath{\partial}hi_{12}) \| + \sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_2 \langle x \rangle^{k + \frac 1 2} (\ensuremath{\partial}si_{12} - \ensuremath{\partial}hi_{12}) \| \\
& + \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_1 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \| + \sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^k \mathcal{N}_2 \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \|.
\end{align}
The quantities with $\ensuremath{\partial}si_{12} - \ensuremath{\partial}hi_{12}$ are supported in a finite region of $x$, and are thus estimated by $\ensuremath{\varepsilon}^{\frac{N_2}{2} - 2M_1} \|U, V \|_{\mathcal{X}}^2$. We must thus consider the more difficult case of large $x$, in the support of $\ensuremath{\partial}hi_{12}$.
We first treat the two terms arising from $\mathcal{N}_1$. Applying the product rule yields
\begin{align} \label{x:N1:1}
\ensuremath{\partial}_x^k \mathcal{N}_1 = \sum_{j = 0}^k \binom{k}{j} (\ensuremath{\partial}_x^j u \ensuremath{\partial}_x^{k-j+1} u + \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y u )
\end{align}
Let us first treat the first quantity in the sum. As $0 \le k \le 10$, either $j$ or $k - j + 1$ must be less than $6$. By symmetry of this term, we assume that $j \le 6$ and then $k \le 10$. In this case, note that $k-j+1 \le 11$, and so we estimate via
\begin{align}
\ensuremath{\varepsilon}^{\frac{N_2}{2}}\| \frac{1}{ \bar{u} } \ensuremath{\partial}_x^j u \ensuremath{\partial}_x^{k-j+1} \langle x \rangle^{k+ \frac 1 2} \ensuremath{\partial}hi_{12} \| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}}\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j u \langle x \rangle^{j + \frac 1 4} \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^{k-j+1} u \langle x \rangle^{k-j+\frac 12} \ensuremath{\partial}hi_{12} \| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1} \|U, V \|_{\mathcal{X}}^2,
\end{align}
where we have invoked \eqref{pw:dec:u} and \eqref{L2:uv:eq}.
We now move to the second term from \eqref{x:N1:1}, which is not symmetric and thus we consider two different cases. First, we assume that $j \le 6$ and $k \le 10$. In this case, we estimate
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y u \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12}\| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}}\| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \langle x \rangle^{j + \frac 1 2} \ensuremath{\partial}si_{12}\|_\infty \| \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y u \langle x \rangle^{k-j} \ensuremath{\partial}hi_{12}\| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}-M_1} \|U, V \|_{\mathcal{X}}^2,
\end{align}
where we have invoked \eqref{pw:v:1} and \eqref{L2:uv:eq:2}.
We next consider the case that $0 \le k-j \le 6$ and $6 \le j \le 10$. In this case, we estimate via
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{1}{\bar{u}}\ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y u \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \| \lesssim &\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y u \langle x \rangle^{k-j + \frac 1 2} \ensuremath{\partial}si_{12}\|_{L^\infty_x L^2_y} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \langle x\rangle^{j } \ensuremath{\partial}hi_{12} \|_{L^2_x L^\infty_y} \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1} \| U, V \|_{\mathcal{X}}^2
\end{align}
where we have used the mixed-norm estimates in \eqref{pw:v:1} and \eqref{mixed:L2:orig:1} (and crucially that $j \le 10$ to be in the range of admissible exponents for \eqref{mixed:L2:orig:1}).
We now consider the second quantity in \eqref{N2:quant}, for which we again apply the product rule to obtain
\begin{align} \label{second:sum}
\ensuremath{\partial}_x^k \mathcal{N}_2 = \sum_{j = 0}^{k} \binom{k}{j} ( \ensuremath{\partial}_x^j u \ensuremath{\partial}_x^{k-j+1} v + \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y v )
\end{align}
We again treat two cases. First, assume that $j \le 6$, so $1 \le k-j+1 \le 11$. In this case, estimate by
\begin{align} \ensuremath{\nonumber}
\ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j u \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{k+\frac 1 2} v \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \| \lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j u \langle x \rangle^{j + \frac 1 4} \ensuremath{\partial}si_{12} \|_\infty \| \sqrt{\ensuremath{\varepsilon}} \ensuremath{\partial}_x^{k-j+1} v \langle x \rangle^{k-j+ \frac 1 2} \ensuremath{\partial}hi_{12}\| \\
\lesssim & \ensuremath{\varepsilon}^{\frac{N_2}{2}- M_1} \| U, V \|_{\mathcal{X}}^2,
\end{align}
where we have invoked \eqref{L2:uv:eq} and \eqref{pw:dec:u}.
Second, we assume that $1 \le k-j+1 \le 4$ and $j \ge 7$, in which case we estimate by
\begin{align}
\sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j u \ensuremath{\partial}_x^{k-j+1} v \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^{k-j+1} v \langle x\rangle^{k-j+1 + \frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^j u \langle x \rangle^{j - \frac 1 2} \ensuremath{\partial}hi_{12}\|.
\end{align}
For the second contribution from \eqref{second:sum}, we again split into two cases. For the first case, we assume that $j \le 6$, in which case
\begin{align}
\sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y v \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12}\| \lesssim \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^j v \langle x \rangle^{j + \frac 1 2} \ensuremath{\partial}si_{12} \|_\infty \| \ensuremath{\partial}_x^{k-j} u_x \langle x \rangle^{k-j+\frac 1 2} \ensuremath{\partial}hi_{12}\|.
\end{align}
In the second case, we assume that $7 \le j \le 10$, in which case $k-j+1 \le 4$, and so we put
\begin{align}
\sqrt{\ensuremath{\varepsilon}} \| \frac{1}{\bar{u}} \ensuremath{\partial}_x^{j} v \ensuremath{\partial}_x^{k-j} \ensuremath{\partial}_y v \langle x \rangle^{k + \frac 1 2} \ensuremath{\partial}hi_{12} \| \lesssim \sqrt{\ensuremath{\varepsilon}} \| \ensuremath{\partial}_x^j v \langle x \rangle^{j - \frac 1 2}\ensuremath{\partial}hi_{12} \| \|\frac{1}{\bar{u}} \ensuremath{\partial}_x^{k-j+1} u \langle x \rangle^{k-j+1 + \frac 1 4} \ensuremath{\partial}si_{12} \|_\infty.
\end{align}
This concludes the proof of the lemma.
\end{proof}
We now establish the following corollary
\begin{corollary} The following estimate is valid:
\begin{align} \label{Trilin:rest}
\Big| \sum_{k = 1}^{10} \mathcal{T}_{X_k} + \sum_{k = 0}^{10} \mathcal{T}_{X_{k + \frac 1 2}} + \mathcal{T}_{Y_{k + \frac 1 2}} \Big| \lesssim \ensuremath{\varepsilon}^{\frac{N_2}{2} - 2M_1-5} \| U, V \|_{\mathcal{X}}^3.
\end{align}
\end{corollary}
\begin{proof} This is an immediate corollary of \eqref{N2:quant} and the definitions of $\mathcal{T}_{X_k}, \mathcal{T}_{X_{k + \frac 1 2}}$, and $\mathcal{T}_{Y_{k + \frac 1 2}}$.
\end{proof}
From the above analysis, the proof of the main theorem, Theorem \ref{thm:2}, is essentially immediate. Indeed, we have
\begin{proof}[Proof of Theorem \ref{thm:2}] We now add together estimates \eqref{basic:X0:est:st}, \eqref{Xh:right}, \eqref{basic:Yhalf:est:st}, \eqref{estXnnorm}, \eqref{esthalfnX}, \eqref{esthalfnY}, \eqref{energy:theta11}, \eqref{elliptic:1}, from which we obtain
\begin{align}
\| U, V \|_{\mathcal{X}}^2 \le \sum_{k =0}^{11} \mathcal{F}_{X_k} + \sum_{k = 0}^{10} \mathcal{F}_{X_{k + \frac 1 2}} + \mathcal{F}_{Y_{k + \frac 1 2}} + \mathcal{F}_{Ell} + \mathcal{T}.
\end{align}
Appealing to estimate \eqref{est:T} and the established estimates on the forcing quantities, \eqref{est:forcings:part1} gives the main \textit{a-priori} estimate, which reads
\begin{align}
\| U, V \|_{\mathcal{X}}^2 \lesssim \ensuremath{\varepsilon}^5 + \ensuremath{\varepsilon}^{\frac{N_2}{2} - 2M_1-5} \| U, V \|_{\mathcal{X}}^3.
\end{align}
From here, the existence and uniqueness follows from a standard contraction mapping argument.
\end{proof}
\ensuremath{\nonumber}oindent \textbf{Acknowledgements:} S.I is grateful for the hospitality and inspiring work atmosphere at NYU Abu Dhabi, where this work was initiated. The work of S.I is partially supported by NSF grant DMS-1802940. The work of N.M. is supported by NSF grant DMS-1716466 and by Tamkeen under the NYU Abu Dhabi Research Institute grant
of the center SITE.
\def3.5em{3.5em}
\end{document} |
\begin{document}
\title{The vectorial kernel method for walks with longer steps}
\author{Valerie Roitner\thanks{TU Wien, Institute for discrete mathematics and geometry. ORCiD of the author: \href{https://orcid.org/0000-0002-2621-431X}{0000-0002-2621-431X}}}
\maketitle
\begin{abstract}
Asinowski, Bacher, Banderier and Gittenberger \cite{ABBG-vkm} recently developed the vectorial kernel method -- a powerful extension of the classical kernel method that can be used for paths that obey constraints that can be described by finite automata, e.g. avoid a fixed pattern, avoid several patterns at once, stay in a horizontal strip and many others more. However, they only considered walks with steps of length one. In this paper we will generalize their results to walks with longer steps. We will also give some applications of this extension and prove a conjecture about the asymptotic behavior of the expected number of ascents in Schröder paths.\\
\\
2010 Mathematics subject classification: 05A15, 05A16, 05C81\\
\\
Key words and phrases: lattice path, Schröder path, generating functions, kernel method, asymptotic behavior
\end{abstract}
\section{Introduction}
Lattice path structures appear often in mathematical models in natural sciences or computer science, for example in analysis of algorithms (see e.g. \cite{Knuth3, ADHKP}) or physics when modeling wetting and melting processes \cite{Fisher} or Brownian motion \cite{Mar-brownian}. Another field is bioinformatics (\cite{MTM-dna, RG-dna, JQR}), where the lattice paths are usually constrained to avoid certain patterns.
The generating function of these objects can often be described by a functional equation that can be solved by the kernel method.
In its easiest form, i.e., for solving equations of the type
$$K(z,u)F(z,u)=A(z,u)+B(z,u)G(z),$$
where $K,A$ and $B$ are known functions and $F$ and $G$ are unknown functions, where $K(z,u)=0$ has only one small root (i.e. a root $u_i(z)$ with $u_i(z)\sim 0$ as $z\sim 0$), the kernel method has been folklore in combinatorics and related fields like probability theory. One identifiable source is Knuth's book \cite{Knuth} from 1968, where he used this idea as a new method for solving the ballot problem. Ever since there have been several extensions and applications of this method, see for example \cite{BW, BMJ, BMM}, one of the most recent being the \em{}vectorial kernel method\em{} by Asinowski, Bacher, Banderier and Gittenberger \cite{ABBG-vkm}. It allows to solve enumeration problems for lattice paths obeying constraints that can be described by a finite automaton. Furthermore, it also allows enumeration of the occurrence of any phenomenon that can be described by a finite automaton, e.g. the number of occurrences of a given pattern.
In their paper they only considered directed walks with steps where the first coordinate is 1. However, this method can be generalized to directed walks with longer steps which will be done in this paper. The proofs used here follow the same methods as in the case with steps of length one, but some adaptions have to be made.
In the final two chapters we will have a look at some applications of this method. Firstly, we will re-derive the number of Schröder paths (excursions with steps $U=(1,1), D=(1,-1)$ and $F=(2,0)$) avoiding the pattern $UF$, which has been studied in \cite{Yan}. Furthermore, we will derive the trivariate generating function for the number of ascents (i.e. number of sequences of nonempty consecutive up-steps) in Schröder paths and prove that the expected number of ascents in Schröder paths of length $2n$ indeed behaves asymptotically like $(\sqrt{2}-1)n$ as Callan conjectured on the OEIS \cite{OEIS}, entry \oeis{A090981}.
\section{Definitions and notations}
A \em{}lattice path\em{} in $\mathbb{Z}^2$ is a finite sequence or finite word $w=[\nu_1,\dots,\nu_m]$ such that all vectors $\nu_i$ lie in the \em{}step set\em{} $\mathcal{S}$, which is a finite subset of $\mathbb{Z}^2.$ A lattice path can be visualized as a polygonal line in the plane, which is created by starting at the origin and successively appending the vectors $\nu_i=(u_i,v_i)$ at the end. The vectors $\nu_i$ are called \em{}steps\em{}. In this paper we will only consider \em{}directed\em{} lattice paths where all steps $(u_i,v_i)$ have a positive first entry.
\begin{figure}
\caption{A lattice path}
\label{ex-lp}
\end{figure}
The first entry $u_i$ of a step is called its \em{}length\em{} and the length of a walk $w$, denoted by $|w|$ is the sum of the length of all its steps, i.e. $|w|=u_1+\dots+u_m$. This does not always coincide with the number of steps, only if $u_i=1$ for all steps in $\mathcal{S}$ this is the case. The \em{}final altitude\em{} of a walk $w$, denoted by $\text{alt}(w)$ is the sum of the altitudes of all steps, i.e. $\text{alt}(w)=v_1+\dots+v_m$. Thus, a walk starting in $(0,0)$ terminates in $(|w|,\text{alt}(w))$.
The \em{}step polynomial\em{} $P(t,u)$ of the step set $\mathcal{S}$ is given by
\begin{equation}
\label{steppoly}
P(t,u)=\sum_{s\in\mathcal{S}} t^{|s|}u^{\text{alt}(s)}.
\end{equation}
The variable $t$ encodes length, the variable $u$ encodes altitude. When all steps have length one, we can omit the dependency on $t$ and write
$$P(u)=\sum_{s\in\mathcal{S}}u^{\text{alt}(s)}.$$
Denote $-c$ the smallest (negative) power of $u$ in $P(t,u)$ and $d$ the largest (positive) power of $u$. If $\mathcal{S}$ only contains negative or only positive altitudes of steps, the following results still hold, but the corresponding models are easy to solve and lead to rational generating functions.
Often, there are constraints imposed on the lattice paths one wants to consider, e.g., the path is not allowed to leave a certain region or has to end at a certain altitude, usually at altitude zero. This leads to the following
\begin{defi} For lattice paths obeying constraints we define:
\begin{itemize}
\item
A \em{}walk\em{} is an unconstrained lattice path.
\item
A \em{}bridge\em{} is a lattice path whose endpoint lies on the $x$-axis.
\item
A \em{}meander\em{} is a lattice path that lies in the quarter-plane ${\mathbb Z}_{\geq0}\times{\mathbb Z}_{\geq0}$. Since we only consider lattice paths with steps with positive $x$-coordinates, this is equivalent to lattice paths that never attain negative altitude.
\item
An \em{}excursion\em{} is a lattice path that is both a bridge and a meander, i.e., a lattice path that ends on the $x$-axis, but never crosses the $x$-axis.
\end{itemize}
\end{defi}
Banderier and Flajolet \cite{BF} computed generating functions for all these classes of lattice paths. Their results can be summarized by the table in figure \ref{table-BF}:\\
\begin{figure}
\caption{The generating functions for walks, bridges, meanders and excursions (in the case of steps of length one). Here, $P(u)$ is the step polynomial, $c$ is the number of small roots, which are given by $u_1,\dots, u_c$. Here, $W_0(t)$ stands for $[u^0]W(t,u)=W(t,0)$ (and analogously for $M_0$).}
\label{table-BF}
\end{figure}
\\
This study was generalized in \cite{ABBG-vkm} to paths with steps of length one that avoid one single pattern. In this paper we will show similar results for walks with longer steps.\\
\\
A \em{}pattern\em{} is a fixed path
$$p=[a_1,\dots,a_{\ell}]$$
where $a_i\in \mathcal{S}$. The length of a pattern is the sum of the lengths of its steps. An \em{}occurrence\em{} of a pattern $p$ in a lattice path $w$ is a contiguous sub-string of $w$, which coincides with $p$. We say a lattice path $w$ \em{}avoids\em{} the pattern $p$ if there is no occurrence of $p$ in $w$. For example, the path $w=[(1,1),(3,0),(3,0),(1,1), (1,-2),(3,0),(1,1)]$ has two occurrences of the pattern $[(3,0),(1,1)]$ but avoids the pattern $[(1,-2),(1,-2)]$.
A \em{}prefix\em{} of length $k$ of a string is a contiguous non-empty sub-string that matches the first $k$ letters (or steps, to phrase it with words more familiar for a lattice path setting). Similarly, a \em{}suffix\em{} of length $k$ of a string is a contiguous non-empty sub-string that matches the last $k$ letters. For example, $[(1,1),(3,0),(3,0)]$ is a prefix (of length 3) of the path from the previous example and $[(1,-2),(3,0),(1,1)]$ is a suffix. A \em{}presuffix\em{} of a pattern is a non-empty string that is both prefix and suffix. In our above example, $[(1,1)]$ is the only presuffix of this given path.
Some authors use a different definition of a pattern, namely when the pattern is contained in the path as non-contiguous substring, see for example \cite{BBLGPW}. The path $w$ as defined in the previous example contains $[(1,1),(3,0),(1,1),(3,0)]$ in the non-contiguous-sense, but not in the contiguous sense. Lattice paths avoiding patterns in the non-contiguous sense also can be dealt with the vectorial kernel method. In this paper we will only consider consecutive patterns.
\noindent
In order to describe pattern avoidance we will need the concept of finite automata.
\begin{defi}
A \em{}finite automaton\em{} is a quadruple $(\Sigma, \mathcal{M}, s_0, \delta)$ where
\begin{itemize}
\item
$\Sigma$ is the input alphabet (in our case, $\Sigma$ will usually be the step set)
\item
$\mathcal{M}$ is a finite, nonempty set of states
\item
$s_0\in \mathcal{M}$ is the initial state
\item
$\delta: \mathcal{M}\times\Sigma\to \mathcal{M}$ is the state transition function. In many cases, it is useful to allow $\delta$ to be a partial function as well, i.e., not every input $\delta(S_i,x)$ has to be defined. Especially for pattern avoidance the usage of partial functions is very helpful.
\end{itemize}
Sometimes there is also a set $F\subseteqseteq\mathcal{M}$ of final states given in the definition of a finite automaton. Here, however, we will not have any final states (i.e. $F=\emptyset$).
\end{defi}
A finite automaton can be described as a weighted directed graph (the states being the vertices, the edges and their weights given by the transition function) or by an adjacency matrix $A$, where the entry $A_{ij}$ consists of the sum of all letters $x$ that, when being in state $S_i$ and reading the letter $x$, transition to state $S_j$. Phrased differently,
$${A_{ij}=\sum_{x:\delta(S_i,x)=S_j} x}.$$
\noindent
\textbf{Example:} Let $\mathcal{S}=\{U,F,D\}$ where $U=(1,1), F=(2,0)$ and $D=(1,-1)$ be the step set and $p=[U,F,U,D]$ the forbidden pattern. We will build an automaton with $s=4$ states, where $s$ is the number of steps in the pattern. Each state corresponds to a proper prefix of $p$ collected so far by walking along the lattice path. Let us label these states $X_0,\dots, X_{s-1}$ (in our case $X_0,\dots, X_3$). The first state $X_0$ is labeled by the empty word. The next states are labeled by proper prefixes of $p$, more precisely $X_i$ is labeled by $X_i=[a_1,\dots,a_i]$ where $a_j$ are the letters of the forbidden pattern. For $i,j\in\{1,\dots,s\}$ we have $\delta(X_i,\lambda)=X_j$ (or, in the graph setting, an arrow labeled $\lambda$) if $j$ is the maximal number such that $X_j$ is a suffix of $X_i\lambda$.
When the automaton reads a path $w$, it ends in the state labeled with the longest prefix of $p$ that coincides with a suffix of $w$. The automaton is completely determined by the step set and the pattern.
\begin{figure}
\caption{The automaton for $\mathcal{S}
\label{ex-UFUD}
\end{figure}
When looking at the adjacency matrix of this automaton we also have to keep track of the length of the steps. We obtain
$$A=\begin{pmatrix}
t^2+tu^{-1} & tu & 0 & 0\\
tu^{-1} & tu & t^2 & 0\\
t^2+tu^{-1} & 0 & 0 & tu\\
0& tu & t^2 & 0
\end{pmatrix}.$$
In each row except the last one, all entries sum up to $P(t,u)$, because at each state except the last one, all possible steps are allowed. The entries in the last row of the matrix sum up to $P(u)-w_s$, where $w_s$ is the weight of the last step in the forbidden pattern $p$. This is because in the last state $X_{s-1}$ all steps except the one that would make $p$ complete.
\noindent
Automata can not only be used to describe the avoidance of one pattern, but also for other constraints, e.g. the avoidance of several patterns at once (see \cite{ABR}) or height constraints. Or to describe the avoidance of patterns in the non-contiguous sense.
\begin{defi}
The \em{}kernel\em{} of an automaton is defined to be the determinant of $I-A(t,u)$, where $A$ is the adjacency matrix of the automaton, i.e.,
$$K(t,u):=\det(I-A(t,u)).$$
\end{defi}
For certain kinds of automata, for example the automata that arise when considering walks that avoid a pattern, there are easier expressions for the kernel that avoid the computation of the adjacency matrix and its determinant. For more details on this, see \cite{ABBG-vkm}.
\section{The vectorial kernel method for walks with longer steps}
The vectorial kernel method indeed works for walks with longer steps if the right adaptions are made. Instead of the adjacency matrix $A=A(u)$ we now have to consider the adjacency matrix $A(t,u)$ that takes into account the different lengths of the steps by weighting them with the corresponding powers of $t$, i.e. a step of length $i$ is weighted with $t^{i}$. With these adapted adjacency matrix we obtain the following theorems:
\begin{thm}
\label{walks-longsteps}
The bivariate generating function for walks obeying constraints that can be described by a finite automaton (e.g. pattern avoidance, height restrictions, etc.) is given by
\begin{equation}
\label{gf-walks-longsteps}
W(t,u)=\frac{(1,0,\dots,0)\mathrm{adj}(I-A(t,u))\vec{\mathbf{1}}}{\det(I-A(t,u))}
\end{equation}
where $t$ encodes length and $u$ encodes final altitude.
\end{thm}
\begin{thm}
\label{meanders-longsteps}
The bivariate generating function for meanders obeying constraints that can be described by a finite automaton is given by
\begin{equation}
\label{gf-meanders-longsteps}
M(t,u)=\frac{G(t,u)}{u^{e}K(t,u)}\prod_{i=1}^{e}(u-u_i(t))
\end{equation}
where $t$ encodes length and $u$ encodes final altitude, $u_i$ $(i=1,\dots, e)$ are the small roots of $K(t,u)$ and $G(t,u)$ is a polynomial in $u$ which will be characterized in (\ref{G}).
\end{thm}
\textit{Proof of Theorem \ref{walks-longsteps}:} The proof follows the same idea as in the case with steps of length one, which was considered in \cite{ABBG-vkm}. Writing $W_i:=W_i(t,u)$ for the generating function of walks ending in state $X_i$ and using a step-by-step-construction we obtain the following functional equation
$$(W_1,\dots,W_{\ell})=(1,0,\dots, 0)+(W_1,\dots, W_{\ell})\cdotot A(t,u),$$
or equivalently
$$(W_1,\dots,W_{\ell})(I-A(t,u))=(1,0,\dots, 0).$$
Multiplying this from the right with $(I-A(t,u))^{-1}=\frac{\text{adj}(I-A(t,u))}{\det(I-A(t,u))}$ we obtain
$$(W_1,\dots,W_{\ell})=\frac{(1,0,\dots,0)\text{adj}(I-A(t,u))}{\det(I-A(t,u))}.$$
The generating function $W(t,u)$ is the sum of the generating functions $W_i(t,u)$ thus we have that
$$W(t,u)=(W_1,\dots,W_{\ell})\vec{\mathbf{1}}=\frac{(1,0,\dots,0)\text{adj}(I-A(t,u))\vec{\mathbf{1}}}{\det(I-A(t,u))}$$
which finishes the proof.
$\Box$
\begin{cor}
The generating function for bridges is given by
$$B(t)=[u^0]W(t,u)=\frac{1}{2\pi i}\int_{|u|=\varepsilon}\frac{W(t,u)}{u}=\sum_{i=1}^{e}\mathrm{Res}_{u=u_i}\frac{W(t,u)}{u}.$$
\end{cor}
\textit{Proof of Theorem \ref{meanders-longsteps}:} This proof works similarly as the one for walks, only that now we also have to take care of the fact that the walk is not allowed to attain negative altitude. Writing $M_i=M_i(t,u)$ for the generating function of meanders ending in state $X_i$ of the automaton and using a step-by step construction we obtain the following vectorial functional equation
$$(M_1,\dots, M_{\ell})=(1,0,\dots, 0)+ (M_1,\dots, M_{\ell})\cdotot A(t,u)-\{u^{<0}\}((M_1,\dots, M_{\ell})\cdotot A(t,u)).$$
This is equivalent to
$$(M_1,\dots, M_{\ell})(I-A(t,u))=(1,0,\dots, 0)-\{u^{<0}\}((M_1,\dots, M_{\ell})\cdotot A(t,u)).$$
Writing $F:=(F_1,\dots,F_{\ell})$ for the right hand side of the above equation we obtain
\begin{equation}
\label{blue}
(M_1,\dots, M_{\ell})(I-A(t,u))=(F_1,\dots, F_{\ell}).
\end{equation}
Multiplying (\ref{blue}) from the right by $(I-A(t,u))^{-1}=\frac{\text{adj}(I-A(t,u))}{\det(I-A(t,u))}$ we obtain
$$(M_1,\dots, M_{\ell})=(F_1,\dots,F_{\ell})\cdotot \frac{\text{adj}(I-A(t,u))}{\det(I-A(t,u))}.$$
The generating function $M(t,u)$ is the sum of all the generating functions $M_i$. Using this, defining
$$\vec{v}:=\text{adj}(I-A(t,u))\vec{\mathbf{1}}$$
and using
$$\det(I-A(t,u))=K(t,u)$$
we obtain
\begin{equation}
\label{green}
M(t,u)=\frac{(F_1,\dots, F_{\ell})\vec{v}}{K(t,u)}.
\end{equation}
Let $u_i=u_i(t)$ be a small root of the kernel $K(t,u)$. We plug $u=u_i$ into (\ref{blue}). The matrix $(I-A(t,u))|_{u=u_i}$ is then singular. Furthermore, we observe that $\vec{v}_{u=u_i}$ is an eigenvector of $(I-A(t,u))|_{u=u_i}$ for the eigenvalue $\lambda=0$.
Thus, multiplying (\ref{blue}) from right with $\vec{v}_{u=u_i}$ the left hand side of the equation vanishes. Said differently, the equation
$$(F_1(t,u),\dots, F_{\ell}(t,u))\vec{v}(t,u)=0$$
is satisfied by all small roots $u_i(t)$ of $K(t,u)$.
Let
\begin{equation}
\label{Phi}
{\mathbb P}hi(t,u):=u^{e}(F_1(t,u),\dots, F_{\ell}(t,u))\vec{v}(t,u).
\end{equation}
Note that ${\mathbb P}hi$ is a Laurent polynomial in $u$, because $F_i$ and $\vec{v}$ are Laurent polynomials in $u$ by construction. Because of (\ref{green}) we have that
$${\mathbb P}hi(t,u)=u^{e}M(t,u)K(t,u)$$
and since $M$ is a power series in $u$ and $K$ has exactly $e$ small roots the Laurent-polynomial ${\mathbb P}hi$ contains no negative powers in $u$ and is a polynomial in $u$. Each small root $u_i$ is a root of the polynomial equation ${\mathbb P}hi(t,u)=0$, thus we have that
\begin{equation}
\label{G}
{\mathbb P}hi(t,u)=G(t,u)\prod_{i=1}^{e} (u-u_i(t))
\end{equation}
where $G(t,u)$ is a polynomial in $u$ and formal power series in $t$. It can be computed by comparing coefficients. Plugging $G$ in (\ref{green}) we obtain
$$M(t,u)=\frac{G(t,u)}{u^{e}K(t,u)}\prod_{i=1}^{e}(u-u_i(t))$$
which finishes the proof.
$\Box$
\begin{cor}
The generating function $E(t)$ for excursions with restrictions described by a finite automaton $A(t,u)$ satisfies
$$E(t)=M(t,0)=\left.\frac{G(t,u)}{u^{e}K(t,u)}\prod_{i=1}^{e}(u-u_i(t))\right|_{u=0}.$$
\end{cor}
\section{Examples}
In this section we will consider some examples illustrating applications of the previous theorems. The first example is more of the simple and introductory kind and deals with Schröder paths avoiding the pattern $UF$, the second one counts Schröder paths having $k$ ascents and proves a conjecture about the asymptotic behavior of the expected number of ascents.
\subseteqsection{Number of Schröder paths of semilength $n$ avoiding $UF$}
Schröder paths are lattice paths consisting of the steps $U=(1,1), D=(1,-1)$ and $F=(2,0)$ which start at $(0,0)$, end at $(2n,0)$ and never go below the $x$-axis. In this section we are dealing with Schröder paths of length $2n$ avoiding $p=UF$. These objects are enumerated by OEIS \oeis{A007317} and have been studied by Yan in \cite{Yan}, where a bijection with Schröder paths without peaks at even level as well as two pattern avoiding partitions were constructed.
The generating function for Schröder paths avoiding $UF$ can be obtained by a first passage decomposition -- if $S^*$ denotes all Schröder paths avoiding $UF$, then
$$S^*=\varepsilon \cup F\times S^* \cup UD \times S^* \cup U \times (S^*\setminus\{\varepsilon \cup F\times S^*)\times D \times S^*,$$
i.e. a Schröder path avoiding $UF$ is either empty, or starts with either F followed by another Schröder path avoiding $UF$, UD and another Schröder path avoiding $UF$ or starts with an up step, followed by an nonempty Schröder path avoiding $UF$ which does not start with F, a down step to altitude zero (the first passage) and another Schröder path avoiding $UF$. For generating functions, this translates to
$$F(x)=1+2xF(x)+x(F(x)-1-xF(x))F(x),$$
where $x$ encodes semilength. From here, the generating function can easily be obtained by solving a quadratic equation. However, in many cases a first passage decomposition does not work while the enumeration problem can still be solved by the vectorial kernel method.
The automaton describing Schröder paths avoiding $UF$ is given by
\begin{center}
\includegraphics[height=3cm]{automaton-schroeder-noUF.pdf}
\end{center}
Its adjacency matrix is
$$A(t,u)=
\begin{pmatrix}
t^2+tu^{-1} & tu\\
tu^{-1} & tu\\
\end{pmatrix}.
$$
Thus the kernel is given by
$$K(t,u)=\det(I-A)=\frac{t^3u^2-t^2u-tu^2-t+u}{u}.$$
Its roots are
$$u_{1/2}=\frac{1-t^2\pm\sqrt{1-6t^2+5t^4}}{2t(1-t^2)},$$
the root with minus being the small root.
Denote $M_0$ the generating function of the walks ending in state $X_0$, i.e., with a $D$ or $F$-step, and $M_1$ the generating function of the walks ending in state $X_1$, i.e., in an $U$-step. Via a step-by-step-construction we obtain the following system of equations for the generating functions:
$$(M_0,M_1)=1+(M_0,M_1)A-\{u^{<0}\}(M_0,M_1)A.$$
This can be rephrased as
$$(M_0,M_1)(I-A)=1-\{u^{<0}\}(M_0,M_1)A.$$
We have that
$$\{u^{<0}\}(M_0,M_1)A=(tu^{-1}m_0,0),$$
where $m_0=[u^0]M_0+M_1$. Thus the forbidden vector $F$ is
$$F=1-\{u^{<0}\}(M_0,M_1)A=(1-tu^{-1}m_0, 0).$$
Using
$$\text{adj}(I-A)=
\begin{pmatrix}
1-tu & tu\\
tu^{-1} & 1-tu^{-1}-t^2
\end{pmatrix}$$
we obtain
$$\vec{v}=\text{adj}(I-A)\cdotot\begin{pmatrix} 1\\ 1\end{pmatrix}=\begin{pmatrix} 1\\1-t^2\end{pmatrix}.$$
Thus we have that
$${\mathbb P}hi(t,u)=u^eF\vec{v}=u-tm_0.$$
Using
$${\mathbb P}hi(t,u)=G(t,u)(u-u_1)$$
and comparing coefficients we obtain
$$G(t,u)=1.$$
Using
$$M(t,u)=\frac{G(t,u)}{u^e K(t,u)}(u-u_1(t))=\frac{1}{t^3u-t^2u-tu^2-t+u}\left(u-\frac{1-t^2-\sqrt{1-6t^2+5t^4}}{2t(1-t^2)}\right)$$
we obtain for the generating function $M(t)$ of meanders
$$M(t)=M(t,1)=\frac {2\,{t}^{3}-{t}^{2}-2\,t-\sqrt {5\,{t}^{4}-6\,{t}^{2}+1}+1}{2t \left( {t}^{2}-1 \right) \left( {t}^{3}-{t}^{2}-2\,t+1 \right) }$$
and the generating function $E(t)$ of excursions
$$E(t)=M(t,0)=\frac{1-t^2-\sqrt{1-6t^2+5t^4}}{2t^2(1-t^2)}.$$
Making a transition to semilength (i.e., the substitution $x:=t^2$) we obtain exactly the same result for the generating function as in \cite{Yan}.
\subseteqsection{Schröder paths of semilength n having $k$ ascents}
\begin{defi}
An \em{ascent}\em{} in a Schröder path is a maximal string of up-steps.
\end{defi}
\begin{figure}
\caption{A Schröder path with $k=4$ ascents (marked in red).}
\label{pic-asc}
\end{figure}
\begin{thm}
\label{schroeder-asc}
Let $X_n$ be the random variable counting ascents in a Schröder path of length $2n$ which is chosen uniformly at random among all Schröder paths of length $2n$. Then $\mathbb{E} (X_n) \sim (\sqrt{2} -1 ) n$ for $n\to infty$.
\end{thm}
\noindent\textbf{Remark:} This theorem was formulated as conjecture by D. Callan in the OEIS, entry \oeis{A090981}.\\
\\
Before we give the proof, let us first recall some central definitions and theorems of analytic combinatorics. Proofs and more details can be found in \cite{FS-anacomb}.
\begin{defi}
Let $R$ be a real number greater than one, and $\phi$ be an angle such that $0<\phi<\frac{\pi}{2}$. An open $\Delta$-\em{}domain\em{} (at $1$), denoted $\Delta (\phi, R)$ is then defined as
$$\Delta(\phi, R):=\{z: |z|<r, z\not=1, |\arg(z-1)|<\phi\}.$$
For any complex number $\zeta\not=0$ a $\Delta$-\em{}domain at\em{} $\zeta$ is the image of a $\Delta$-domain at $1$ under the mapping $z\mapsto \zeta z$. A function is called $\Delta$-\em{}analytic\em{} if it is analytic in some $\Delta$-domain.
\end{defi}
\begin{thm}
\label{thm-fs}
Let $f(z)=(1-z)^{-\alpha}$ for $\alpha\in\mathbb{C}\setminus\mathbb{Z}_{\leq 0}$. Then
$$[z^n]f(z)=\frac{n^{\alpha-1}}{\Gamma(\alpha)}\left(1+O\left(\frac{1}{n}\right)\right),$$
where $\Gamma$ denotes the Gamma-function.
\end{thm}
\begin{thm}[Transfer theorem]
\label{transfer-thm}
Suppose that $f$ satisfies in an intersection of a neighborhood of 1 with a $\Delta$-domain the condition
$$f(z)=O\left((1-z)^{-\alpha}\left(\log\frac{1}{1-z}\right)^{\beta}\right).$$
Then
$$[z^n]f(z)=O(n^{\alpha -1}(\log n)^\beta).$$
The same statement also holds for $o$-notation.
\end{thm}
\begin{cor}
\label{cor-fs}
Let $f(z)$ be $\Delta$-analytic and $f(z)\sim (1-z)^{-\alpha}$ for $z\to 1, z\in \Delta$ and $\alpha\not\in\mathbb{Z}_{\leq 0}$. Then
$$[z^n]f(z)\sim\frac{n^{\alpha -1}}{\Gamma(\alpha)}.$$
\end{cor}
\noindent
\textit{Proof of Theorem \ref{schroeder-asc}.}
The (contiguous) patterns $UD$ and $UF$ mark the end of an ascent. Thus, when counting ascents we want to count how many times these two patterns occur. Problems like this can also be dealt with the vectorial kernel method: Instead of not allowing a transition from one state to another which would complete the pattern, we mark such transitions with a new variable and then read off the corresponding coefficients in the generating function in order to obtain the number of walks where this pattern occurs $k$ times, since it is encoded by the $k$-th power of this new variable.
Our problem can be encoded by the following automaton:\\
\begin{center}
\includegraphics[height=3cm]{automaton-schroeder-ascents.pdf}
\end{center}
The red arrow marks the ascents we want to count and will be marked by a new variable $v$ in the adjacency matrix. Its adjacency matrix is given by
$$
A=\begin{pmatrix}
tu^{-1}+t^2 & tu\\
(tu^{-1}+t^2){\color{red} v} & tu\\
\end{pmatrix}$$
where $u$ encodes altitude, $t$ encodes length of the path, and $v$ counts the number of ascents. Thus we have that
$$I-A=\begin{pmatrix}
1-tu^{-1}-t^2 & -tu\\
-tu^{-1}v-t^2v & 1-tu\\
\end{pmatrix}.$$
The kernel is then given by
\begin{equation}
\label{kern-asc}
K(t,u)=\det(I-A)=u^{-1}((t^3-t^3v-t)u^2+(1-t^2v)u-t).
\end{equation}
Its zeroes are
$$u_{1,2}=\frac{1-t^2v\pm\sqrt{t^4(v-2)^2-2t^2(v+2)+1}}{2t(1+t^2(v-1))},$$
the one with minus being the small root. Hence, the number of small roots is $e=1$.
Writing $M_0=M_0(t,u,v)$ for the walks ending in state $X_0$ (i.e. in an $F$- or $D$-step) and $M_1=M_1(t,u)$ for the walks ending in state $X_1$ (i.e. in an $U$-step), we obtain the following vectorial functional equation
\begin{equation}
\label{feqn-asc}
(M_0,M_1)(I-A)=(1,0)-\{u^{<0}\}((M_0,M_1)A).
\end{equation}
We are interested in $M(t,0,v)=M_0(t,0,v)$, i.e. walks ending at altitude zero (since walks ending in state $X_1$ end in an up-step, they have final altitude at least 1, they will not contribute). In order to compute the forbidden vector $F=(1,0)-\{u^{<0}\}((M_0,M_1)A$ we compute
$$\{u^{<0}\}((M_0,M_1)A=(tu^{-1}M_0+t^2M_0+tu^{-1}vM_1+t^2vM_1, tu(M_0+M_1)).$$
Writing $m_0:=[u^0]M_0(t,u)$ and using $[u^0]M_1(t,u)=0$ we obtain
$$\{u^{<0}\}((M_0,M_1)A)=(tu^{-1}m_0,0)$$
and
$$F=(1-tu^{-1}m_0,0).$$
The adjoint of the adjacency matrix is given by
$$\text{adj}(I-A)=\begin{pmatrix}
1-tu & tu\\
t^2v+tu^{-1}v & -t^2-tu^{-1}+1\\
\end{pmatrix}$$
and thus the autocorrelation vector $\vec{v}$ is
$$\vec{v}=\text{adj}(I-A)\cdotot\begin{pmatrix} 1\\ 1\end{pmatrix}= \begin{pmatrix} 1\\ t^2v+tu^{-1}v-t^2-tu^{-1}+1\end{pmatrix}.$$
We obtain that
$${\mathbb P}hi(t,u)=u^{e}F\cdotot\vec{v}=u-tm_0.$$
Using
$${\mathbb P}hi(t,u)=G(t,u)(u-u_1)$$
where $u_1$ is the small root of the kernel we obtain that $\deg_u G=0$ and by comparing coefficients we obtain that
$$G=1 \quad\text{and}\quad Gu_1=tm_0.$$
Thus we have
$$M(t,0,v)=E(t,v)=m_0=\frac{Gu_1}{t}=\frac{1-t^2v-\sqrt{t^4(v-2)^2-2t^2(v+2)+1}}{2t^2(1+t^2(v-1))}.$$
Transitioning to semilength $x:=t^2$ (and omitting the dependency on $u$) we obtain
$$E(x,v)=\frac{1-xv-\sqrt{1-2x(v+2)+x^2(v-2)^2}}{2x(1+x(v-1))}.$$
We are interested in the asymptotic behavior of
$$\mathbb{E}X_n=\frac{[x^n]\partial_v E(x,v)|_{v=1}}{[x^n]E(x,1)}.$$
We have
\begin{equation}
\label{mx1}
E(x,1)=\frac{1-x-\sqrt{1-6x+x^2}}{2x},
\end{equation}
which is the generating function of Schröder paths, and
\begin{equation}
\label{dvmx1}
\partial_v E(x,v)|_{v=1}=\frac{x^2-5x+2+(x+2)\sqrt{1-6x+x^2}}{2\sqrt{1-6x+x^2}}=\frac{x+2}{2}+\frac{x^2-5x+2}{2\sqrt{1-6x+x^2}}.
\end{equation}
By the rules for computing limits we have
$$\lim_{n\to\infty}\mathbb{E}X_n=\lim_{n\to\infty}\frac{[x^n]\partial_v E(x,v)|_{v=1}}{[x^n]E(x,1)}=\frac{\lim_{n\to\infty}[x^n]\partial_v E(x,v)|_{v=1}}{\lim_{n\to\infty}[x^n]E(x,1)}$$
thus it remains to compute the coefficient asymptotics for (\ref{mx1}) and (\ref{dvmx1}).
First we are going to determine
$$[x^n]E(x,1)=[x^{n+1}]\frac{-\sqrt{1-6x+x^2}}{2}$$
for $n$ large. The discriminant $1-6x+x^2$ has the roots $x_{1,2}=3\pm\sqrt{8}$, where $\rho=3-\sqrt{8}$ is the dominant singularity and $3+\sqrt{8}$ lies outside every $\Delta$-domain around $\rho$. First, we want to move the dominant singularity to one in order to use the above theorems. This can be done via the substitution $z=\frac{x}{3-\sqrt{8}}$. We have that
\begin{align*}
\sqrt{1-6x+x^2} &=\sqrt{3-\sqrt{8}-x}\cdotot \sqrt{3+\sqrt{8}-x}=\sqrt{3-\sqrt{8}}\sqrt{1-z}\cdotot\sqrt{3+\sqrt{8}-(3-\sqrt{8})z}\\
&\sim(3-\sqrt{8})^{1/2}(2\sqrt{8})^{1/2}\sqrt{1-z}
\end{align*}
locally for $z\to 1$. Thus, by Corollary \ref{cor-fs} with $\alpha=-\frac{1}{2}$ we have that
\begin{align}
[x^n]E(x,1) &\sim [x^{n+1}]\frac{1}{2}(2\sqrt{8}(3-\sqrt{8}))^{1/2}\left(-\sqrt{1-\frac{x}{3-\sqrt{8}}}\right)\nonumber\\
&= -\frac{1}{2} (2\sqrt{8}(3-\sqrt{8}))^{1/2} (3-\sqrt{8})^{-n-1}[z^{n+1}]\sqrt{1-z}\nonumber\\
&= -\frac{1}{2} (2\sqrt{8})^{1/2} (3-\sqrt{8})^{-n-1/2}\frac{(n+1)^{-3/2}}{\Gamma(\frac{1}{2})}\nonumber\\
&\sim \frac{1}{2} (3-\sqrt{8})^{-n-1/2} \frac{(2\sqrt{8})^{1/2}}{2\sqrt{\pi}}n^{-3/2}\label{xnm}
\end{align}
for $n\to\infty$.
In order to compute $[x^n]\partial_vE(x,v)|_{v=1}$ we first compute $[x^n](1-6x+x^2)^{-1/2}$ because this expression will appear in the computation of $[x^n]\partial_vE(x,v)|_{v=1}$. By the substitution $z=\frac{x}{3-\sqrt{8}}$ and Corollary \ref{cor-fs} with $\alpha=\frac{1}{2}$ we obtain
\begin{align}
[x^n](1-6x+x^2)^{-1/2} &= [x^n]((3-\sqrt{8})-x)^{-1/2}((3+\sqrt{8})-x)^{-1/2}\nonumber\\
&= [z^n](3-\sqrt{8})^{-n-1/2}(1-z)^{-1/2}((3+\sqrt{8})-(3-\sqrt{8})z)^{-1/2}\nonumber\\
&\sim (3-\sqrt{8})^{-n-1/2}(2\sqrt{8})^{-1/2}\frac{n^{-1/2}}{\sqrt{\pi}} \label{aux}
\end{align}
for $n\to \infty$.
For $n$ large we have that
\begin{align*}
[x^n]\partial_vE(x,v)|_{v=1} &=\frac{1}{2}[x^n](x^2-5x+2)(1-6x+x^2)^{-1/2}\\
&= \frac{1}{2}[x^{n-2}](1-6x+x^2)^{-1/2}-\frac{5}{2}[x^{n-1}](1-6x+x^2)^{-1/2}+[x^n](1-6x+x^2)^{-1/2}.
\end{align*}
Using (\ref{aux}) and the fact that $(n-k)^{-1/2}\sim n^{-1/2}$ for $k$ constant and $n\to \infty$ we obtain after some simplifications that
\begin{equation}
\label{xndvmx1}
[x^n]\partial_vE(x,v)|_{v=1} \sim \frac{(2\sqrt{8})^{1/2}}{\sqrt{\pi}}n^{-1/2}(3-\sqrt{8})^{-n-1/2}(2-\sqrt{2})
\end{equation}
Using the expressions for (\ref{xnm}) and (\ref{xndvmx1}) we obtain that for $n\to\infty$ the expected value of ascents behaves like
$$\mathbb{E}X_n\sim\frac{(3-\sqrt{8})^{-n-1/2}(2-\sqrt{2})}{\sqrt{\pi}n^{1/2}(2\sqrt{8})^{1/2}}\cdotot\frac{2\cdotot2\sqrt{\pi}n^{3/2}}{(3-\sqrt{8})^{-n-1/2}(2\sqrt{8})^{1/2}}$$
which, after some simplifications becomes
\begin{equation}
\label{mu}
\mathbb{E}X_n\sim(\sqrt{2}-1)n.
\end{equation}
This proves Callans conjecture.
$\Box$
\begin{thm}
Let $X_n$ be the random variable counting ascents in a Schröder path of length $n$ which is chosen uniformly at random among all Schröder paths of length $n$. Then
\begin{equation}
\label{sigma}
\mathbb{V}X_n\sim\frac{188-133\sqrt{2}}{8\sqrt{2}-12}n\approx 0.1317\, n
\end{equation}
for $n\to \infty$.
\end{thm}
\begin{proof}
The variance can be computed using similar means as the expected value. We have that
\begin{equation}
\label{var}
\mathbb{V}(X_n) =\frac{[x^n]\partial_v^2E(x,v)|_{v=1}}{[x^n]E(x,1)}+\frac{[x^n]\partial_vE(x,v)|_{v=1}}{[x^n]A(x,1)}-\left(\frac{[x^n]\partial_vE(x,v)|_{v=1}}{[x^n]A(x,1)}\right)^2.
\end{equation}
The second derivative of $E$ with respect to $v$ is given by
$$\partial_v^2E(x,v)|_{v=1}=(-x^5+11x^4-33x^2+21x^2+2x)(x^2-6x+1)^{-3/2}-\frac{x^4-8x^3+13x^2-2x}{x^2-6x+1}.$$
Using the substitution $z=\frac{x}{3-\sqrt{8}}$ and the tables for the asymptotics of standard functions from \cite{FS-anacomb}, p. 388 we see that
\begin{align*}
[z^n](1-z)^{1/2} &\sim -\frac{1}{\sqrt{\pi n^3}}\left(\frac{1}{2}+\frac{3}{16n}+\frac{25}{256n^2}+\mathcal{O}\left(\frac{1}{n^3}\right)\right),\\
[z^n](1-z)^{-1/2} &\sim \frac{1}{\sqrt{\pi n}}\left(1-\frac{1}{8n}+\frac{1}{128n^2}+\mathcal{O}\left(\frac{1}{n^3}\right)\right),\\
[z^n](1-n)^{-1} &\sim 1\\
[z^n](1-z) &\sim \sqrt{\frac{n}{\pi}}\left(2+\frac{3}{4n}-\frac{7}{64n^2}\mathcal{O}\left(\frac{1}{n^3}\right)\right)
\end{align*}
(we need the additional terms because there will be a cancellation of the leading terms of order $n^2$, just the previously computed terms will not do the trick).
Plugging these as well as the correct asymptotic growth rates in the formula for the variance (\ref{var}) we obtain the claim of the theorem after some cancellations and computing limits.
\end{proof}
With the help of the Drmota-Lalley-Woods theorem we can obtain even more information about the limiting distribution of the number of ascents.
\begin{thm}[Drmota-Lalley-Woods theorem, limiting distribution version from \cite{BD}] Suppose that $\mathbf{y}=\mathbf{P}(z,\mathbf{y},u)$ is a strongly connected and analytically well defined entire or polynomial system of equations that depends on $u$ and has a solution $\mathbf{f}$ that exists in a neighborhood of $u=1$. Furthermore, let $h(z,u)$ be given by
$$h(z,u)=\sum_{n\geq 0} h_n(u)z^n = H(z,\mathbf{f}(z,u), u),$$
where $H(z,y,u)$ is entire or a polynomial function with non-negative coefficients that depends on $\mathbf{y}$ and suppose that $h_n(u)\not=0$ for all $n\geq n_0$ (for some $n_0\geq 0$).
Let $X_n$ be a random variable whose distribution is defined by
$$\mathbb{E}[\left[u^{X_n}\right]=\frac{h_n(u)}{h_n(1)}.$$
Then $X_n$ has a Gaussian limiting distribution. More precisely, we have $\mathbb{E}[X_n]=\mu n+O(1)$ and $\mathbb{V}[X_n]=\sigma^2 n+O(1)$ for constants $\mu>0$ and $\sigma^2\geq 0$ and
$$\frac{1}{\sqrt{n}}(X_n-\mathbb{E}[X_n])\to N(0,\sigma^2).$$
\end{thm}
\noindent
\begin{proof}
See \cite{BD} or \cite{Drmota}.
\end{proof}
\begin{cor}
The number of ascents in Schröder paths has a Gaussian limiting distribution with parameters $\mu=\sqrt{2}-1$ and $\sigma^2=\frac{188-133\sqrt{2}}{8\sqrt{2}-12}$.
\end{cor}
\noindent
\textit{Proof.} Let
$$P(z,y,u)=z(1+z(u-1))y^2+zuy+1.$$
Solving the system $y=P(z,y,u)$ gives us
$$f(z,u)=\frac{1-zu-\sqrt{1-2z(u+2)+z^2(u-2)^2}}{2z(1-z(u-1))}$$
which is a formal power series in a neighborhood of $u=1$ (the other solution with plus is not and can be disregarded). The function $f$ coincides with $E(x,v)$ (after a substitution $z=x$ and $u=v$). The system is strongly connected since it consists of only one equation in one unknown. Let $H(z,y,u)=y$ such that $H(z,f,u)=f(z,u)$. From the combinatorial interpretation we see that $h_n(u)\not=0$ for $n\geq n_0$ (remember, $h_n(u)$ counts ascents in Schröder paths of length $n$, thus being a power series of the form $1+c_1u+O(u^2)$ for any $n>0$, the 1 comes from the Schröder path consisting only of flat steps, thus having no ascent). The random variable $X_n$ counting ascents has distribution defined by
$$\mathbb{E}\left[u^{X_n}\right]=\frac{h_n(u)}{h_n(1)}.$$
Thus, we can apply the Drmota-Lalley-Woods theorem and obtain that $X_n$ has Gaussian limiting distribution. We already computed the constants $\mu=\sqrt{2}-1$ and $\sigma^2=\frac{188-133\sqrt{2}}{8\sqrt{2}-12}$ earlier in Equations \ref{mu} and \ref{sigma}. \hspace{\fill}$\Box$
\section{Conclusion}
The vectorial kernel method is a powerful tool, unifying various results on the enumeration of lattice paths which avoid a given pattern. In this paper the vectorial kernel method was generalized to lattice paths with longer steps and used to prove a conjecture on the asymptotic behavior of the expected number of ascents in Schröder paths. The results from this paper also allow to tackle other parameters (e.g. humps, peaks or plateaus) of paths with longer steps obeying some constraints that can be described by a finite automaton which might become a subject of further studies.
\end{document} |
\begin{document}
\title{Finite key performance of satellite quantum key distribution under practical constraints}
\author{Jasminder S. Sidhu}
\email{[email protected]}
\thanks{Corresponding author email}
\affiliation{SUPA Department of Physics, University of Strathclyde, Glasgow, G4 0NG, United Kingdom}
\author{Thomas Brougham}
\affiliation{SUPA Department of Physics, University of Strathclyde, Glasgow, G4 0NG, United Kingdom}
\author{Duncan McArthur}
\affiliation{SUPA Department of Physics, University of Strathclyde, Glasgow, G4 0NG, United Kingdom}
\author{Roberto G. Pousa}
\affiliation{SUPA Department of Physics, University of Strathclyde, Glasgow, G4 0NG, United Kingdom}
\author{Daniel K. L. Oi}
\affiliation{SUPA Department of Physics, University of Strathclyde, Glasgow, G4 0NG, United Kingdom}
\date{\today}
\begin{abstract}
Global-scale quantum communication networks will require efficient long-distance distribution of quantum signals. Optical fibre communication channels have range constraints due to exponential losses in the absence of quantum memories and repeaters. Satellites enable intercontinental quantum communication by exploiting more benign inverse square free-space attenuation and long sight lines. However, the design and engineering of satellite quantum key distribution (QKD) systems is difficult and characteristic differences to terrestrial QKD networks and operations pose additional challenges. The typical approach to modelling satellite QKD (SatQKD) has been to estimate performances with a fully optimised protocol parameter space and with few payload and platform resource limitations. Here, we analyse how practical constraints affect the performance of SatQKD for the Bennett-Brassard 1984 (BB84) weak coherent pulse decoy state protocol with finite key size effects. We consider engineering limitations and trade-offs in mission design including limited in-orbit tunability, quantum random number generation rates and storage, and source intensity uncertainty. We quantify practical SatQKD performance limits to determine the long-term key generation capacity and provide important performance benchmarks to support the design of upcoming missions.
\end{abstract}
\maketitle
\section{Introduction}
\label{sec:intro}
\noindent
Quantum technologies have the potential to enable or greatly enhance applications including secure communications~\cite{QKDreview2020, sidhu2021advances, Liorni2021, Wallnofer2022}, improved computation~\cite{Donkor2004distributed, Meter2016the}, sensing, and imaging~\cite{Giovannetti2011_NP, Sidhu2017_PRA, Sidhu2018_arxiv, Moreau2019_NRP, Sidhu2020_AVS, Polino2020_AVS, Sidhu2021_PRX}. In addition, a distributed ecosystem of quantum technologies would provide further performance improvements and additional capabilities. The distribution of quantum resources across such a networked architecture comprises the fundamental building blocks of the quantum internet~\cite{Wallnofer2022}.
Satellites will be integral to a scalable architecture to expand the range of quantum networks to global scales, motivating the surge in recent activities in space quantum communications~\cite{Liao2017_N, Yin2017_S,Kerstel2018_EPJ, Mazzarella2020_C, Villar2020,Yin2020_N,Gundogan2021_NPJQI, belenchia2021quantum,gundogan2021topical}. Satellite-based quantum key distribution (SatQKD) is a precursor to long-range applications of general quantum communication~\cite{sidhu2021advances, belenchia2021quantum}. Although a general-purpose quantum network requires substantial advancements in quantum memories, multi-partite entangled state generation, routing techniques, and error correction~\cite{wehner2018quantum}, the development of SatQKD provides crucial knowledge and experience for global-scale quantum networks by developing the infrastructure and maturity of space-based long-distance quantum links.
Pioneering quantum communication demonstrations by the ${\sim}650$~kg Micius satellite showed that SatQKD and entanglement distribution is possible over record scales~\cite{jianwei2018progress, Yin2017_S, Lu2022_RMP}. Building upon these results, small satellite (${<}100$~kg) missions are attractive due to lower development costs and faster development times compared with conventional large satellites. However, the limited size, weight, and power (SWaP) available on small satellites and reduced capabilities put them at a marked disadvantage versus larger satellites such as Micius. Despite this, feasibility studies for small-satellite-based QKD and in-orbit demonstration CubeSat-based pathfinder missions are promising~\cite{Villar2020, Islam2022finite}. For low-Earth orbit (LEO) satellites, a particular challenge is the limited time window to operate a quantum channel with an optical ground station (OGS)~\cite{sidhu2021key, Sidhu2023satellite}. This limitation disproportionately constrains the volume of secure keys that can be generated due to a pronounced impact of statistical uncertainties in estimated parameters. Together with the constrained SWaP available, small-satellite missions operate under the framework of finite-resource quantum information. Understanding the impact of these constraints on SatQKD has received little attention and has both immediate and practical relevance to future satellite-based missions. Here, we fill this gap by establishing practical performance bounds on SatQKD operation under a representative set of physical resources.
The first constraint we consider is the limited practicality of reconfiguring all QKD protocol parameters in-flight and on a pass-by-pass basis. SatQKD modelling often does not consider this, optimising the secret key length (SKL) over the entire parameter space of the protocol for each pass scenario~\cite{Sidhu2022_npjQI, Sidhu2021arxiv}. It is more realistic to consider a number of parameters as fixed, that include the operating basis bias at the OGS and the transmitted intensities. Parameter fixing has been explored in the context of terrestrial free-space QKD~\cite{airqkd2022}. In SatQKD the highly variable channel losses in SatQKD with fixed parameters require more sophisticated modeling and analysis. The limited transmission times of SatQKD further make these effects more pronounced, highlighting the importance of considering limited system adaptability. We consider a second constraint from small satellite SWaP envelopes that may limit the quantum random number generation (QRNG) subsystem driving a prepare and measure source. This directly impacts the achievable SKL by limiting signal transmission.
We start with an overview of our SatQKD system modelling and the protocol optimisation in section~\ref{sec:background}. Given the recent progress of SatQKD sources, we explore the effect of the repetition rate on key length in section~\ref{sec:source_rate}. Here, we highlight the impact of finite-key effects and establish minimum source rates based on tolerance to operational losses. Given the difficulty of implementing a SatQKD system where all parameters can be reconfigured for different overpasses, section~\ref{sec:param_fixing} explores the impact of fixed parameters on the key length. In particular, we fix the signal intensities and the receiver basis bias. In section~\ref{sec:mem_buffer}, we explore SKL generation for restricted QRNG resources and illustrate the significant impact of limited random bit generation rates on the SKL. We also determine the minimum memory storage required for non-zero finite key extraction for one overpass. Section~\ref{sec:int_fluc} explores the impact of intensity uncertainties due to limited onboard monitoring accuracy. Conclusions and discussions are provided in section~\ref{sec:conc}, where we provide key conclusions to help overcome these limitations for future SatQKD systems.
\section{Background and system model}
\label{sec:background}
\noindent
In this section, we detail our method to model channel losses, how to determine the SKL, and the optimisations considered in this work. The secret key length (SKL) achieved with the efficient BB84 protocol from a single overpass is calculated taking into account finite block size effects.
\subsection{System model}
\label{sec:system_model}
\begin{figure}
\caption{\textbf{General satellite overpass geometry.}
\label{fig:geom}
\end{figure}
\noindent
We consider a satellite in a circular Sun-synchronous orbit (SSO) of altitude $h=500$~km implementing downlink QKD to an OGS during the night to minimise background light. The elevation and range of the satellite-OGS channel are calculated as a function of time for different satellite overpass geometries and ground track offsets, $d_\text{min}$, and maximum satellite overpass elevations, $\theta_\text{max}$ (Fig.~\ref{fig:geom}). Different satellite overpasses have different values for $d_\text{min}$. This means $d_\text{min}$ can be used to characterise each overpass. In fact, for a fixed orbital altitude, the ground track offset $d_\text{min}$ and the maximum elevation angle, $\theta_\text{max}$, are equivalent. The ideal overpass corresponds to the satellite passing the OGS directly overhead, or zenith ($d_\text{min} = 0$ m, $\theta_\text{max}=90^\circ$), since it provides the longest transmission time and has the lowest average channel loss. Generally, a satellite will not pass zenith but will reach a maximum elevation $\theta_\text{max}({<}90^\circ)$. We consider a minimum elevation transmission limit of $\theta_\text{min}=10^\circ$ that reflects practicalities such as local horizon visibility and system pointing limitations.
The instantaneous link efficiency depends on the elevation $\theta(t)$, the range $R(t)$ between the satellite and OGS, and source wavelength $\lambda$, and is used to generate count statistics. For a fixed orbital altitude, the satellite-OGS range is implicitly defined through the satellite's elevation. The link efficiency is then defined as (in dB),
\begin{align}
\eta_{\lambda}\left(\theta\right) = \eta_\text{diff}\left(\lambda,\theta\right) + \eta_\text{atm}\left(\lambda,\theta\right)
+ \eta_\text{int},
\label{eq:ins_loss_func}
\end{align}
where $\eta_\text{diff}$, $\eta_\text{atm}$, and $\eta_\text{int}$ are losses from diffraction, atmospheric scattering and absorption, and a fixed `intrinsic' system efficiency respectively. To characterise the overall system electro-optical efficiency independent of satellite overpass trajectory, we define the system loss metric, $\eta_\text{loss}^\text{sys}$, as the total instantaneous link efficiency at zenith. Diffraction losses are estimated using the Fraunhofer approximation to the Rayleigh-Sommerfeld diffraction integral to determine the power at the receiver, $P_R$,
which is normalised by the power at the transmitter, $P_T$ such that $\smash{\eta_\text{diff} = -10 \log_{10}(P_R/P_T)}$. Atmospheric absorption and scattering losses are calculated using $\smash{\eta_\text{atm} = -10 \log_{10}T_{\lambda}}$, where the transmissivity, $T_{\lambda}$, is determined using MODTRAN for a given wavelength and elevation~\cite{Modtran_inproceedings}. The `intrinsic' system loss, $\eta_\text{int}$, accounts for: fixed losses inherently built into the system due to detector efficiency, internal losses of the receiver; pointing losses; and imperfect non-diffraction-limited beam propagation, and is conservatively set to 20~dB to model a SatQKD system with overall $\eta_\text{loss}^\text{sys} = 40$~dB. Different SatQKD systems with various fixed losses can be modelled by scaling the $\eta_\text{loss}^\text{sys}$ value. See Methods~\ref{subsection:loss_modelling} for more detail on loss modelling.
\begin{figure}
\caption{\textbf{Link model for satellite-to-ground QKD}
\label{fig:loss}
\end{figure}
The link loss characterises the probability that a single photon transmitted by the satellite is detected by the OGS. A lower dB value of $\eta_\text{link}$ represents smaller loss due to better system electro-optical efficiency. This improvement could stem from the use of larger transmit and receive aperture diameters, better pointing accuracy, lower receiver internal losses, and higher detector efficiencies. Internal transmitter losses are not included since they can be countered by adjusting the weak coherent pulse (WCP) source to maintain the desired exit aperture intensities~\cite{Bourgoin:2013fk}. We also do not explicitly consider time-varying transmittance, modelling the average change in channel loss due only to the change in elevation with time. For discrete variable QKD (DV-QKD) protocols, e.g. BB84, channel transmissivity fluctuations do not directly impact the secret key rate, in contrast to continuous variable QKD where this appears as excess noise leading to key reduction~\cite{Usenko2012_NJP,hosseinidehaj2020composable}.
We model a small satellite QKD system, for example~\cite{colquhoun2022responsive}, implementing a decoy-state BB84 protocol in a downlink configuration for QKD service provision using a WCP source. We consider a source wavelength of $\lambda=785$~nm, a transmitter (receiver) aperture diameter of 8~cm (70~cm), and a Gaussian beam waist of 8~cm. Our general analysis is wavelength agnostic, but we specifically analyse $\lambda=785$~nm as this is representative of several missions currently in development~\cite{Podmore2021qkd,Islam2022finite, colquhoun2022responsive}, partly due to favorable atmospheric transmission and the availability of relevant sources and detectors~\cite{Bourgoin:2013fk}. Fig.~\ref{fig:loss} illustrates the modelled transmission loss and link efficiency for different overpass geometries.
In addition to this link loss, we include several error sources. First, after-pulsing in a photon detector can have adverse effects on the estimate of click statistics. While the after-pulsing probability is detector and operating condition dependent, we take a value of 0.1\%, which is consistent with the literature~\cite{Hwang2003_PRL,Chen2021,Zhang2017_PRA}. Second, the intrinsic quantum bit error rate, $\text{QBER}_\text{I}$, is defined as the lumped error from source quality, receiver measurement fidelity, basis misalignment, and polarisation fluctuations~\cite{Toyoshima2009_OE}. Finally, we define the extraneous count probability, $p_\text{ec}$, as the sum of dark and background light count rates and is assumed constant and independent of elevation. Together, these losses and errors provide a complete characterisation of a SatQKD system and are summarised in Table~\ref{tab:system_parameters}.
Before concluding this section, we note that our current analysis could be extended to model an uplink channel by using a suitable link-loss model (loss vs elevation). A ground-to-satellite link will increase channel losses due to the shower curtain effect. While turbulence is highly dependent on elevation, it generally leads to an additional 20~dB of loss compared to a downlink channel~\cite{Bourgoin:2013fk}.
\newcommand*{\tabindent}{ \hspace{-1mm}}
\newlength{\thickarrayrulewidth}
\setlength{\thickarrayrulewidth}{2.1\arrayrulewidth}
\renewcommand{1.25}{1.25}
\setlength{\tabcolsep}{8pt}
\begin{table}[t!]
\centering
\begin{tabular}{m{4cm}|m{1.4cm}|m{1.4cm}}
\thickhline
\textbf{Parameter description} & \textbf{Notation} & \textbf{Value} \\
\hline
Transmitter aperture diameter & $T_X$ & 8 cm \\
Receiver aperture diameter & $R_X$ & 70 cm \\
Gaussian Beam waist & $w_0$ & 4 cm \\
Source wavelength & $\lambda$ & 785 nm \\
Source rate & $f_s$ & 500 MHz \\
Satellite orbit altitude & $h$ & $500$ km \\
Minimum elevation limit & $\theta_\text{min}$ & $10^\circ$ \\
Intrinsic quantum bit error rate & $\text{QBER}_\text{I}$ & 0.5\% \\
Extraneous count probability & $p_\text{ec}$ & $5\times 10^{-7}$ \\
After-pulsing probability & $p_\text{ap}$ & 0.1\% \\
System loss metric & $\eta^\text{sys}_\text{loss}$ & $40$ dB \\
\textcolor{black!60!white}{
$\hookrightarrow$Diffraction loss at zenith} & \textcolor{black!60!white}{$\eta_\text{diff}(\lambda, 90)$} & \textcolor{black!60!white}{19.4~dB} \\
\textcolor{black!60!white}{$\hookrightarrow$Atmospheric loss at zenith} & \textcolor{black!55!white}{$\eta_\text{atm}(\lambda, 90)$} & \textcolor{black!60!white}{0.6~dB} \\
\textcolor{black!60!white}{$\hookrightarrow$Optical inefficiency} & \multirow{2}{*}{\textcolor{black!60!white}{$\eta_\text{int}$}} & \textcolor{black!60!white}{$12.0$~dB} \\
\textcolor{black!60!white}{$\hookrightarrow$Imperfect beam propagation} & & \textcolor{black!60!white}{$8.0$~dB} \\
\color{black}
Correctness parameter & $\epsilon_c$ & $10^{-15}$ \\
Security parameter & $\epsilon_s$ & $10^{-10}$ \\
\thickhline
\end{tabular}
\caption{\textbf{Reference system parameters}. Transmitter, receiver, and source properties determine range and elevation-dependent loss. The system loss metric, $\eta^\text{sys}_\text{loss}$, defined as the link efficiency at zenith, is 40~dB. The `intrinsic' system loss is broken down into two components (Methods~\ref{subsection:loss_modelling}). $\eta^\text{sys}_\text{loss}$ can be scaled to model other SatQKD systems that differ by a fixed link loss ratio, e.g. different $T_X$ or $R_X$ apertures, or detector efficiencies. The intrinsic quantum bit error rate, $\text{QBER}_\text{I}$, incorporates errors from source quality, receiver measurement fidelity, basis misalignment, and polarisation fluctuations, while the extraneous count probability, $p_\text{ec}$, incorporates detector dark count and background rate. The correctness and security parameters are used to determine the finite-block composable SKL.}
\label{tab:system_parameters}
\end{table}
\subsection{The protocol and secret key length}
\label{subsec:protocol}
\noindent
The QKD protocol we investigate is efficient Bennett-Brassard (BB84) with two decoy states, i.e. three different pulse intensities~\cite{Lim2014_PRA,lo2005efficient,Hwang2003_PRL,Wang2005_PRL,Lo2005_PRL,Yin2020_N}. In this protocol, the transmitter (Alice) and the receiver (Bob) encode bits within one of two polarisation bases, denoted $\mathsf{X}$ and $\mathsf{Z}$. We adopt the convention that the $\mathsf{X}$ basis is used for key bits, while the $\mathsf{Z}$-basis is used to detect an eavesdropper through the phase error rate. Alice prepares bits in the $\mathsf{X}$-basis with probability $P^A_\mathsf{X}$, while Bob measures within the $\mathsf{X}$-basis with probability $P^B_\mathsf{X}$. It is standard to take $P^A_\mathsf{X}=P^B_\mathsf{X}=P_\mathsf{X}$, however, in general it is possible that $P^A_\mathsf{X} \ne P^B_\mathsf{X}$, particularly if one probability is fixed due to practical considerations~\cite{airqkd2022}. We consider phase-randomised coherent pulses where the intensity (mean photon number) $\mu_k\in\{\mu_1,\mu_2,\mu_3\}$ is randomly chosen with probability $p_{\mu_k}$. There are alternative carriers to phase-randomised coherent pulses. True single-photon sources could be considered~\cite{Morrison2022_arxiv, Juboori2023_arxiv, MurtazaOE_2023, Abasifard2023_arxiv}, amongst others~\cite{QKDreview2020}, though these are at a much lower stage of maturity, for terrestrial or space applications, compared with WCP sources.
After the quantum signals are transmitted from Alice to Bob, they perform a standard reconciliation procedure to correlate detection events with transmitted pulses, basis matching, intensity announcement, and parameter estimation. Only the bits in the $\mathsf{X}$-basis are used for the key, while the $\mathsf{Z}$-basis bits are made public. The raw key is formed by performing error correction on the $\mathsf{X}$-basis bits, which necessitates the public exchange of $\lambda_\text{EC}$ bits in the information reconciliation phase. In practice, the value of $\lambda_\text{EC}$ is known from the error correction communication, but for the purposes of modelling we use an estimate that varies with the block size, quantum bit error rate, and the required correctness parameter~\cite{Tomamichel2017_QIP}. This estimate generates suitable values for the error correction efficiency for SatQKD data representative of current engineering efforts and capabilities (see Methods~\ref{subsec:error_corr_term} for a detailed discussion and demonstration). The results for the $\mathsf{Z}$-basis are used to estimate parameters such as the number of bits from vacuum events, $s_{\mathsf{X},0}$, the number of bits from single photon events $s_{\mathsf{X},1}$, and the phase error $\phi_{\mathsf{X}}$. The exact formulas for these terms are provided in Ref.~\cite{Sidhu2022_npjQI}, which is based on Refs.~\cite{Lim2014_PRA,Yin2020_N}. After privacy amplification, the final SKL, $\ell$, is given by~\cite{Lim2014_PRA}
\begin{align}
\ell = \Big\lfloor s_{\mathsf{X},0} + s_{\mathsf{X},1} (1 - h(\phi_\mathsf{X}))
- \lambda_{\text{EC}} - 6 \log_2 \frac{21}{\epsilon_\text{s}} - \log_2 \frac{2}{\epsilon_\text{c}}\Big\rfloor,
\label{eqn:skl_lim_result}
\end{align}
where $h(x)=-x\log_2(x)-(1-x)\log_2(1-x)$ is the binary entropy function, and $\epsilon_{\text{s}}$ and $\epsilon_{\text{c}}$ are the composable security and correctness parameters respectively~\cite{Lim2014_PRA,Renner2006_thesis}.
We can maximise the SKL, Eq.~\eqref{eqn:skl_lim_result} by optimising over the protocol parameters $p_k$, $\mu_k$, and $P_\mathsf{X}$ for a given satellite-OGS overpass, system link efficiency, and system configuration (as in Table~\ref{tab:system_parameters}). The value of $\mu_3$ is set to vacuum since this helps with the estimate of the vacuum counts, $s_{\mathsf{X},0}$~\cite{Lim2014_PRA}. The transmission time window from which the finite block is constructed is an additional important optimisation parameter to maximise the achievable finite key~\cite{Sidhu2022_npjQI}. This is because, under finite-size security analysis, higher QBER increases the minimum raw key length necessary for non-zero key length extraction due to less efficient reconciliation and post-processing overheads. However, taking the largest block size permitted by a satellite overpass is sometimes not the best strategy. This is since data from lower elevations have both smaller count rates and higher signal QBER, which increases the average channel QBER and may offset any improvements to the SKL from larger block sizes. We define the processing block transmission time window to run from $-\Delta t$ to $+\Delta t$, such that the total transmission time is $2\Delta t$ with $t=0$ corresponding to the time of highest elevation $\theta_\text{max}$. The SKL in Eq.~\eqref{eqn:skl_lim_result} is additionally optimised over discretised values for $\Delta t$, and the value for $\Delta t$ chosen that yields the largest SKL. This full optimisation is performed in version 1.1 of the Satellite Quantum Modelling and Analysis (SatQuMA) software~\cite{Sidhu2021arxiv}. For more details on the software and the numerical optimisation see Refs.~\cite{Sidhu2022_npjQI, Sidhu2021arxiv}.
This fully optimised scenario yields an upper bound to SatQKD performance. In practice, these bounds may be difficult to achieve due to constraints and trade-offs in the mission design and operation. In the following section, we provide an overview of modifications to the optimisation problem with constraints that closely reflect operational considerations for the derivation of realistic performance bounds.
\subsection{Practical optimisation of the secret key length}
\label{subsec:optimisation}
\noindent
The original protocol parameter optimisation problem is modified to handle different numerical investigations. Though classical communication constraints are important for SatQKD operations, we do not consider these limitations (see Ref.~\cite{Sidhu2022_npjQI} for a brief discussion). First, section~\ref{sec:source_rate} introduces the source-rate normalised SKL to illustrate the impact of finite-key effects on the SKL and to provide an informed decision on the source rate to consider for the remainder of the work. Second, section~\ref{sec:param_fixing} fixes the values of the signal intensity $\mu_1$, decoy intensity $\mu_2$, and the receiver basis bias $P^B_\mathsf{X}$, since it may not be practical to change these parameters on a pass-by-pass basis in an operational system. The transmitter and receiver basis biases are allowed to differ, i.e. $P^A_\mathsf{X} \ne P^B_\mathsf{X}$, to model a fixed OGS basis bias and adjustable transmitter bias. The SKL is then maximised over the remaining protocol parameter space defined by the set $\{P^A_\mathsf{X}, p_{\mu_1}, p_{\mu_2}, \Delta t\}$. The fixed values for $P^B_\mathsf{X}$, $\mu_1$, and $\mu_2$ are set to those that maximise the expected annual SKL through a procedure detailed in Methods~\ref{subsection:param_fixing}. Third, section~\ref{sec:mem_buffer} explores the impact of QRNG subsystem limitations that may constrain the number of signals that can be transmitted during an overpass. This is modelled using a finite-sized onboard random number memory store, corresponding to an associated transmission cutoff time, from which we determine the reduction in long-term average key generation rate. We also determine the minimum memory buffer required to generate non-zero SKL. Finally, in section~\ref{sec:int_fluc}, we consider the effect of pulse intensity uncertainties on the secure key that can be extracted taking into account reduced intensity knowledge. For this, the signal and decoy state intensities are sampled between a range that depends on the uncertainty percentage of the intended intensity values.
\section{Results}
\subsection{Source rate}
\label{sec:source_rate}
\noindent
Micius performed finite key generation with a 100 MHz source repetition rate, later upgraded in-flight to 200~MHz~\cite{Chen2021}. Miniaturisation of such high-speed sources enables their use on small satellites. For example, increasing the source repetition rate leads to a larger block size that reduces statistical uncertainties in parameter estimation, hence a higher finite key rate. This expands the pass opportunities that result in non-zero secret keys, enhancing the robustness and effective key transmission footprint of a SatQKD system~\cite{Sidhu2022_npjQI}. In addition, the use of high-speed sources can help higher altitude SatQKD operation by partially compensating for increased channel losses~\cite{Sidhu2022_npjQI}. In this section, we investigate the effect of operating source rate, $f_s$, on the robustness of SatQKD systems to channel loss in the finite key regime.
To evaluate finite key efficiency, Fig.~\ref{fig:source_vary} illustrates the source rate normalised SKL as a function of source rate for a zenith overpass (solid lines) and a satellite overpass with $\theta_\text{max}=30^\circ$ (dashed lines) for three different system configurations of $\{\text{QBER}_\text{I}, p_\text{ec}\}$. For a given time window $\Delta t$, the block size increases with increasing $f_s$, which improves the normalised finite SKL. This improvement indicates a critical value $f_s^\text{crit}$ below which finite key effects overwhelm raw key transmission and the distillable finite SKL is zero. For $f_s < f_s^\text{crit}$, this \emph{key suppression region} is illustrated in shaded blue for System A with $\text{QBER}_\text{I}=0.1\%$, $p_\text{ec}=1\times10^{-8}$, and $\theta_\text{max}=90^\circ$. Above $f_s^\text{crit}$, we note the SKL scales super-linearly with the source rate due to multiple improvements in parameter estimation, error correction efficiency, and reduced overhead of the composable security parameters with increasing block length.
\begin{figure}
\caption{\textbf{Finite key efficiency vs source rate}
\label{fig:source_vary}
\end{figure}
The vertical gray line in Fig.~\ref{fig:source_vary} corresponds to $500$ MHz, well outside the key suppression region, that we take as a representative value for a near-term small satellite source. This provides robustness against a range of typical extraneous counts and intrinsic QBERs expected in SatQKD and provides feasible finite key generation for a single satellite overpass, but is compatible with modest receiver detectors. Higher source rates, though providing larger key lengths, require lower detector timing jitter. Silicon single-photon avalanche photodiodes (Si-SPADs) typically have timing jitter in the order of $\sim$ 0.5~ns~\cite{Ceccarelli2021_AQT} compatible with coincidence windows of $\sim$ 1~ns and interpulse separations of 2~ns. Extending clock rates to the GHz range requires lower timing jitters such as provided by superconducting nanowire single-photon detectors (SNSPDs)~\cite{Holzman2019_AQT} at the expense of greater SWaP and cost (SWaP-C) owing to the need for cryogenic operation and single mode coupling that raises further system design issues. Therefore, the following analysis will assume a source rate of $500$~MHz unless stated otherwise given it balances the tradeoff between detector performance requirements, hence SWaP-C, and count rate.
\subsection{Impact of parameter fixing}
\label{sec:param_fixing}
\begin{figure*}
\caption{\textbf{Impact of fixed receiver basis bias and source intensities}
\label{fig:param_fixing_plots}
\end{figure*}
\noindent
SatQKD modelling often involves optimising the operational parameter space associated with the protocol and system configuration to maximise the number of finite keys generated. However, achieving these optimised key lengths assumes all parameters can be easily changed to operate at their optimised values. It may be desirable on cost, complexity, and robustness grounds to deploy SatQKD systems with limited reconfigurability, motivating analyses where some parameters are fixed. First, the OGS basis choice is often implemented passively using a fixed beamsplitter. Thus, changing receiver basis bias by physically swapping out the beamsplitter for different optimised values on a per-pass basis may be impractical in live deployment. A variable beamsplitter could be considered but with cost, complexity, and performance considerations. Note that the transmitter basis bias can be easily adjusted in the random bit generation and processing of the data used to control the source, hence we consider this parameter to be easily varied. Second, all the operational pulse intensities $\mu_j$ may be fixed pre-flight to avoid more complex source driving systems with increased SWaP-C and reliability concerns. Since the optimal decoy-state intensities strongly depend on the channel loss, background counts, and the satellite's orbital trajectory, fixed values may significantly impact the SKL.
In this section, we determine the impact of these engineering constraints on the finite SKL. We constrain the receiver basis bias and decoy-state intensities to certain fixed values, such that $P^B_\mathsf{X}=\{0.3, 0.5, 0.7, 0.9\}$ (commonly available beamsplitter splitting ratios) in addition to the ideal value of $P_\mathsf{X}^B = 0.84$ that corresponds to a custom beamsplitter and $\{\mu_1, \mu_2, \mu_3\}=\{0.71, 0.14, 0\}$. The derivation of these ideal values can be seen in Methods~\ref{subsection:param_fixing} for fixed parameter optimisation that maximise the long-term average SKL. For these fixed values, Fig.~\ref{fig:param_fixing_plots}(a) illustrates the finite SKL as a function of different satellite overpasses. Despite this restriction, we note it is possible to generate near-optimal SKLs across a wide range of elevation angles. Further, increasing the OGS bias can generate higher finite SKL. However, we observe that for a choice of $P^B_\mathsf{X}=0.9$, it is not possible to extract a secret key at lower $\theta_\text{max}$. This suggests that choosing too large an OGS bias can reduce the key generation capacity, owing to fewer overpasses opportunities that generate a non-zero key. To understand this effect, we recall that a larger receiver basis bias corresponds to a smaller portion of received bits dedicated to parameter estimation. Therefore, choosing a large OGS basis bias at larger average channel QBERs leads to less efficient parameter estimation, which generates zero secret keys. SatQKD systems should therefore carefully choose the fixed OGS bias to address the tradeoff between a maximised single pass SKL and the long-term key generation capacity. Notice that the secret key length for $P^B_\mathsf{X}=0.7$ is approximately the same as for $P^B_\mathsf{X}=0.9$, but with non-zero keys at lower elevations.
Fig.~\ref{fig:param_fixing_plots}(b) illustrates the optimal $P^A_\mathsf{X}$ values that maximise the SKL as a function of elevation angle for each fixed value of the receiver basis bias. We first note the basis bias for the transmitter and receiver are generally different, which differs from the usual case considered in the literature. The value of $P^A_\mathsf{X}$ can vary to compensate for the fixed value of $P^B_\mathsf{X}$. One can show that if both $P^B_\mathsf{X}$ and $P^A_\mathsf{X}$ can vary freely, then the optimal raw key length is found for $P^B_\mathsf{X}=P^A_\mathsf{X}$~\cite{airqkd2022}. From Fig.~\ref{fig:param_fixing_plots}(b) we find that for $P^B_\mathsf{X}=0.3$ and 0.5, we observe that $P^A_\mathsf{X} > P^B_\mathsf{X}$. This suggests that a small fixed receiver basis bias leads to too large a portion of signals dedicated to parameter estimation, which is compensated for by choosing a large transmitter basis bias. Equally, for $P^B_\mathsf{X}=0.9$ we observe that $P^A_\mathsf{X} < P^B_\mathsf{X}$. This clearly demonstrates that when we \emph{fix} $P^B_\mathsf{X}$, then choosing an equal basis bias is not optimal. However, when we are free to optimise \emph{both} $P^B_\mathsf{X}$ and $P^A_\mathsf{X}$, then choosing $P^A_\mathsf{X}=P^B_\mathsf{X}$ is optimal~\cite{airqkd2022}.
Despite the impracticality of implementing a fully optimised parameter space, we find a number of ways SatQKD missions can enhance finite key generation. This involves careful selection of $P^B_\mathsf{X}$ that maximises both the single-pass SKL and the long-term key generation capacity and careful selection of the decoy-state intensities that can counter the effects of large channel losses.
\subsection{QRNG subsystem limitations}
\label{sec:mem_buffer}
\noindent
Prepare and measure protocols require random bits for the preparation of signal states. QRNGs with the required rate to feed a high-speed source in real time may incur significant onboard processing resources and SWaP. Alternatively, the random bits can be generated at a much slower rate with less resource-hungry QRNGs prior to the overpass, assuming that the transmission time duty cycle is small compared to the total orbital time. For this latter situation, we consider limits on the amount of onboard storage for random bits to drive the source, often limited on small satellites. This constrains the amount of reconciled data established between a satellite and OGS, thus directly impacting the achievable SKL per pass. Unlike in previous sections where we assumed the source can run indefinitely, in this section, we extend our analysis to model the impact of varying memory storage limits of cryptographically secure random bits on the final SKL.
For a two decoy-state weak coherent state protocol, each pulse consumes four random bits; one for the basis choice, one for the key value, and two for the intensity choice. For the efficient BB84 decoy-state protocol, the basis choice bit and the intensity bits are biased. In general, it takes at most two unbiased bits on average to generate one biased bit~\cite{gryszka2021biased}, hence each pulse requires up to seven unbiased bits from the quantum random number generator (QRNG), though only four bits need to be stored after biasing. At 500~MHz source rate, this requires 2~Gb/s of stored random bits to drive the source. Therefore, a zenith pass with a maximum overpass duration of 444~s (accounting for a minimum elevation limit of 10$^\circ$) requires a minimum availability of 111~GB of random bits. Current state of the art in space-validated QRNGs can achieve rates of 1-20~Mb/s~\cite{QRNG2016,IDquantQRNG}, which falls short to support complete transmission, and thus necessitates a buffer.
First, we examine the effects of a limited random bit memory buffer on the finite key. An 8~GB buffer can support up to 32~s transmission time for a 500~MHz source, which is much shorter than the maximum overpass duration of 444~s. Fig.~\ref{fig:membuff} (left-hand axis) shows the per-pass SKL for different memory buffers as a function of overpass geometry ($d_\text{min}$, $\theta_\text{max}$). A larger memory buffer permits longer transmission times, which enhances the finite SKL and extends the operational footprint of the SatQKD system. Second, we determine the minimum memory buffer required to yield non-zero finite keys for different overpasses. For a given overpass, the smallest block size that yields a non-zero finite key defines the smallest operational time window, $t_\text{min}$, that should be supported by the onboard storage. This provides a measure of the memory buffer requirement for a SatQKD mission, given by $f_s t_\text{min}/2$ Bytes. The right-hand axis of Fig.~\ref{fig:membuff} illustrates the minimum memory buffer required for different satellite overpass trajectories. The demand for larger onboard storage requirements increases with increasing ground track distances. This is because satellite overpasses with larger ground track distances require larger minimum transmitted signals to overcome the larger average channel losses and generate a non-zero finite key.
\begin{figure}
\caption{\textbf{Overpass and memory buffer effects.}
\label{fig:membuff}
\end{figure}
Third, to quantify the overall impact of limited memory buffers on the SKL, we estimate the annual amount of secret keys that can be generated using methods from Ref.~\cite{Sidhu2022_npjQI}. For a
Sun-synchronous orbit and neglecting weather effects, the expected annual key for single overpass blocks with an OGS site situated at a particular latitude is approximated by~\cite{Sidhu2022_npjQI}
\begin{align}
\overline{\text{SKL}}_\text{year}=N_\text{orbits}^\text{year}\frac{\text{SKL}_\text{int}}{L_\text{lat}},
\end{align}
where $\text{SKL}_\text{int}$ is twice the integrated area under the SKL vs $d_\text{min}$ curve in Fig.~\ref{fig:membuff} (units of bit metres), $N_\text{orbits}^\text{year}$ is the number of orbits per year, and $L_\text{lat}$ is the longitudinal circumference along the line of latitude at the OGS location. Fig.~\ref{fig:varymem} illustrates how $\overline{\text{SKL}}_\text{year}$ varies as a function of the memory buffer for an OGS at a latitude of $55.9^\circ$ N (latitude of Glasgow). For our reference configuration (System D) with $\eta_\text{loss}^\text{sys}=40$~dB, $\overline{\text{SKL}}_\text{year}$ is 0.81~Gb (3.94~Gb) for a memory buffer of 8~GB (32~GB) respectively. For comparison, without QRNG limitations, $\overline{\text{SKL}}_\text{year}$ is 6.44~Gb. Fig.~\ref{fig:varymem} also shows the gains to $\overline{\text{SKL}}_\text{year}$ from better performing sources and detectors. Comparing Systems B and C shows a crossover in their $\overline{\text{SKL}}_\text{year}$ at around 32~GB, highlighting an important tradeoff between the operational performance of sources and receiver for fixed memory buffers. Namely, SatQKD systems operating with constrained memory buffers should focus on improving sources (minimising $\text{QBER}_\text{I}$, System C). This is because small memory buffers can only support a short signal transmission time around the maximum elevation of a satellite's trajectory, where losses are minimised. Improving the performance of the source leads to a direct improvement of $\overline{\text{SKL}}_\text{year}$. Conversely, SatQKD systems not constrained with memory buffers have a larger operational footprint that maximises the number of overpasses that generate non-zero finite keys. Improving the key generation of these systems can be supported through improved receivers with reduce $p_\text{ec}$ (System B).
We note that a higher source rate, $f_\text{s}$, can improve the satellite overpass opportunities that generate a non-zero finite key and reduce the required memory storage. For the number of transmitted signals enabled by a limited memory buffer, a higher rate allows signal transmission over a shorter time window around $\theta_\text{max}$, where the satellite-OGS range is at its smallest, corresponding to a lower average loss. This improves both the received block length and the overall error rate. Also, the minimum amount of buffer required to generate the secret key is reduced due to more efficient transmission during the lower loss segment of an overpass. To illustrate this, consider a zenith pass with time-window of 444~seconds and a source with repetition rate of 100~MHz, which requires 22.2~GB of random bits. If the repetition rate is increased to 500~MHz, then the same data can be transmitted in 88.8~seconds, five times less. One can thus focus the transmission at higher elevation angles, which have less loss and lower errors. The raw data for the 500~MHz source leads to a greater amount of secret key. It follows that a 500MHz source could generate the same key length as a 100~MHz source, using fewer pulses and therefore fewer random bits.
\begin{figure}
\caption{\textbf{Annual expected SKL vs Memory Buffer.}
\label{fig:varymem}
\end{figure}
\subsection{Source intensity uncertainties}
\label{sec:int_fluc}
\noindent
Standard analyses of WCP decoy-state BB84 protocols usually assume perfect device operation leading to idealised key rates with optimised intensities. We can consider various deviations from ideality, such as a source with fixed and known intensities operational during the entire integration time of a satellite overpass. Active stabilisation of pulse intensities by continuous monitoring and feedback is possible~\cite{Lucamarini2013_OE} but may be limited by inherent power monitor measurement uncertainties. Instead, instantaneous offsets and long-term drifts in the intensity values lead to parameter uncertainties that are an important departure from the fixed operating intensity assumption, which directly impacts the security of distilled finite keys for two reasons. First, source intensity uncertainties can be exploited in general attacks~\cite{Yoshino2018_npjQI} which may be exacerbated in SatQKD with small block sizes. Second, the estimated vacuum and single-photon yields will differ significantly from true expectation values, potentially leading to an underestimation of the required privacy amplification to ensure security.
Several recent works have looked at this general problem by accounting for the uncertainties in source intensities directly within the parameter estimation~\cite{Wang2007_PRA, Wang2008_PRA, Hu2010_PRA, Wang2016_PRA,Liao2017_N}. This changes the estimates of the quantities that appear in Eq.~(\ref{eqn:skl_lim_result}) and could also change the secret key formula itself. A different scenario has also been considered~\cite{airqkd2022} where the existing formalism described in Refs.~\cite{Sidhu2022_npjQI,Lim2014_PRA} is used, but where one assumes that the true intensities are uncertain, though not necessarily fluctuating during a transmission block. This uncertainty results either from measurement uncertainties in the power monitors or from drifts in the calibration settings. We note that in~\cite{airqkd2022} the channels did not vary in time during a transmission block, in contrast to the SatQKD case that we consider here.
\begin{figure}
\caption{\textbf{Impact of source intensity uncertainty}
\label{fig:unstable_sources}
\end{figure}
In this work, as in~\cite{airqkd2022}, we model the impact on the SKL of uncertainties in the source intensities, where we have an upper bound to on the possible deviations of $\mu_j$ from the assumed/measured values. Our approach models the case where the fixed intensity values have a constant and unknown offset from their intended values. The intensities can vary from the intended values by a maximum fraction $f$ of the intended values during an overpass. The probability of the intensity values exceeding the range defined by $f$ must be less than the advertised probability of the protocol being insecure, which is determined by $\epsilon_\text{s}$. These uncertainties are considered separately for the signal and decoy states $\mu_1$ and $\mu_2$ respectively, but not for the vacuum state, since any deviations in the vacuum state due to extraneous counts have already been considered. Crucially, we consider independent uncertainties for $\mu_1$ and $\mu_2$ for all four encoded bit values. This is a more pessimistic approach than in related works, such as \cite{Wang2016_PRA}, where it is assumed that the uncertainties for $\mu_j$ are the same for each bit value and basis. Each intensity value is then sampled independently in the range $\mu_j \pm f\mu_j$ to determine each signal state. Since the true intensity values are unknown to Bob, we take the worst-case combination of deviations that reduces the SKL as a conservative estimate while ensuring security. The range $\mu_j \pm f\mu_j$ is sampled using different numbers of points, though it was found that only 3 points were sufficient to find the worst-case SKL. Fig.~\ref{fig:unstable_sources} illustrates the SKL as a function of $\theta_\text{max}$ for at most a 5\% and 10\% uncertainty in the source intensities. To quantify this reduction, a 5\% and 10\% uncertainty in the source intensities reduces the annual SKL by a significant factor of 2 and 43 respectively. From this reduction, it is clear that source intensity uncertainties have a profound impact on the attainable SKL that significantly reduces the SatQKD operational footprint. For large uncertainties, it is therefore likely that the SKL will be zero for many of the satellite overpass opportunities. This highlights the importance of including the effects of uncertainties in the description of the power monitors. Active stabilisation of intensities in conjunction with high-accuracy power monitoring is important to allow operation close to the desired performance.
\section{Discussion}
\label{sec:conc}
\noindent
Existing analyses of satellite-based QKD (SatQKD) assume an ideal, fully optimised parameter space to determine the maximum finite key rate. In practice, it is difficult to engineer the control of each parameter for different satellite overpasses. Therefore, these analyses effectively serve as an upper bound to the expected performance of SatQKD. We show that SatQKD operates with limited operating margins. It is therefore of immediate practical relevance to investigate the performance of SatQKD with a reduced parameter space optimisation to reflect restrictions on system operations and deployment, and to understand its robustness to additional losses and system imperfections. Further, the limited volumetric space, weight, and power (SWaP) available on small satellites provide limited physical resources that further depart from the ideal scenario of a fully optimised parameter space. We fill this gap by establishing practical SatQKD performance limits that reflect the nature of current engineering efforts and evaluate the impacts of limited resources on the long-term finite secret key length (SKL) generation capacity.
First, we model the impact of a fixed receiver basis bias $P_\mathsf{X}^B$ and pulse intensities $\mu_j$ on the SKL given the impracticality of their dynamic control during transmission. The SKL can be enhanced through carefully selecting the operating values of the fixed parameters. We develop a natural approach to determining the ideal fixed parameter values, based on maximising the expected annual SKL, which can be readily generalised to any parameter set. For the nominal system specifications denoted in Table~\ref{tab:system_parameters}, this leads to the fixed parameter set $\{P_\mathsf{X}^B, \mu_1, \mu_2\}$ $=$ $\{0.84, 0.71, 0.14\}$, corresponding to the receiver beamsplitter basis bias, and signal and decoy state intensities. Despite these fixed values, we find it is possible to generate near-optimal SKLs across a wide range of overpass maximum elevation angles. While larger $P_\mathsf{X}^B$ can generate larger SKL at high elevations, it does so at the expense of zero secret key at lower elevations due to worse parameter estimation. SatQKD missions should therefore carefully choose the fixed OGS bias to address the tradeoff between a maximised single-pass SKL and the long-term key generation capacity. Our optimal fixed value of $P_\mathsf{X}^B = 0.84$ balances this tradeoff to achieve close to optimal performance with fixed intensities. The optimum set of $\{P_\mathsf{X}^B, \mu_1, \mu_2\}$ will require re-evaluation for different SatQKD systems, especially in a large-scale network with several OGSs and a heterogenous space segment. Further trade-offs will have to be considered to establish a set of standard system parameters based on operational and application-specific factors.
Next, we illustrate the significant impact of limited QRNG resources that drive the source on the expected annual SKL. For the nominal system, increasing the memory buffer from 8~GB to 32~GB substantially increases the expected total annual SKL from 0.81~Gb to 3.94~Gb, corresponding to $3.16 \times 10^{6}$ and $1.54 \times 10^{7}$ AES-256 encryption keys respectively, though there are diminishing returns for larger buffers. This insight has significant implications for design trade-offs. We provide the minimum memory buffer required to yield non-zero finite keys for different overpass geometries, providing an important benchmark to support the design of upcoming SatQKD missions. For missions with higher altitudes and source rates, the QRNG subsystem for prepare-and-measure protocols will be increasingly crucial for sustained operations. High-speed QRNGs with sufficient rate for real-time driving of the source, together with ring-buffers and real-time reconciliation would obviate the need for extremely large random number stores, but will have further system design implications for SWaP-C and required communications capabilities.
Finally, we investigate the impact of uncertainties in the signal and decoy state intensities on the SKL. Maintaining fixed intensity values require perfect sources during the entire integration time of a satellite overpass. In practice, imperfect knowledge of the transmitted state intensities directly impact the security and amount of distilled finite keys whilst maintaining security. We find that these uncertainties have a profound impact on the SKL and highlight the importance of the accuracy of power monitors. Actively stabilising the intensities close to their intended values is also important to approach the optimal performances as modelled.
This study opens up a number of interesting open problems that would extend the scope and applicability of this work. First, a more comprehensive quantum channel model that includes elevation and azimuthal-dependent background light distributions, cloud cover, seasonal weather effects, and other location-dependent effects would provide a more representative performance analysis for detailed OGS siting studies. Second, different orbits and altitudes could also be modelled, the optimum altitude to maximise the integrated key generation footprint, hence its expected annual SKL, could be derived in particular. Third, implementing error correction and privacy amplification can be demanding for SatQKD. While these steps do not have to occur during the quantum transmission phase (the limited overpass time and quantum optical channel is the main bottleneck we consider in this work), modelling any inefficiencies would warrant an analysis in its own right. In particular, exploring the impact of limited resources to efficiently implement and measure error syndromes could impact the security and correctness of finite keys. Finally, an interesting extension toward the aim of establishing a global quantum network would be in exploring additional cost and performance trade-offs to reveal deeper insights into performance bottlenecks in SatQKD.
\appendix
\section*{Methods}
\label{sec:methods}
\subsection{Loss modelling}
\label{subsection:loss_modelling}
\noindent
In this section, we introduce the notation and the underlying loss model. In particular, we provide details on our model for the elevation and wavelength-dependent losses for any satellite overpass geometry. Recall that to determine the finite key, we need to determine the expected detector count statistics as a function of time and the operational source wavelength $\lambda$. Therefore, we first determine the instantaneous link efficiency as a function of elevation $\theta(t)$, range $R(t)$, and source wavelength $\lambda$, which captures all systematic and channel losses. Our method to determine the link efficiency differs from our approach in Ref.~\cite{Sidhu2022_npjQI} where we used empirical results published by Micius. In this work, we use a more physically motivated approach that will allow greater flexibility in the analysis and applications that can be considered, such as the effects of OGS positioning. Despite this change, the results of the two methods closely match for elevations above $10^\circ$ which provides confidence in the new approach.
We write the link efficiency as
\begin{align}
\eta_{\lambda}\left(\theta\right) = \eta_\text{diff}\left(\lambda,\theta\right) + \eta_\text{atm}\left(\lambda,\theta\right) + \eta_\text{int},
\label{eqn:tot_loss}
\end{align}
in units of decibels (dB) and where we have three distinct loss contributors. The first term $\eta_\text{diff}$ defines losses from diffraction effects, $\eta_\text{atm}$ from atmosphere effects that include scattering and absorption, and $\eta_\text{int}$ defines a fixed elevation-independent intrinsic system efficiency corresponding to internal losses, and beam misalignment. Eq.~\eqref{eqn:tot_loss} provides a general approach to modelling losses for any SatQKD system. Once a satellite overpass trajectory is defined, we use Eq.~\eqref{eqn:tot_loss} to determine the loss for every second of the overpass to estimate the total count statistics. A single block is then constructed from the entire overpass data, and finite statistics incorporated to maintain composable security. Details for each loss contributor are provided below.
\begin{figure}
\caption{\textbf{Link efficiency as a function of elevation}
\label{fig:loss_vs_elevation}
\end{figure}
\subsubsection{Diffraction losses}
\label{subsubsec:diff_loss}
\noindent
A dominant contribution to loss is diffraction, which broadens the beam after the signal propagates through the satellite's transmitter aperture, $T_\mathsf{X}$. The amount of beam broadening depends on a number of factors, including the channel range $R(t)$, $T_\mathsf{X}$, and the source wavelength $\lambda$. Here, we take a standard approach to estimate diffraction losses by calculating the far-field Fraunhofer diffraction of a initial truncated Gaussian field distribution with a beam waist of $w_0$ at the transmission aperture. We calculate the probability that a single photon exiting the transmit aperture is collected by the receiver aperture from the ratio of the integrated power density across the transmitter aperture, $P_T$, and the receiver aperture, $P_R$,
\begin{align}
\eta_\text{diff}\left(\lambda,\theta\right) = -10 \log_{10}\left(\frac{P_R}{P_T}\right).
\end{align}
Since we are using a weak coherent pulse (WCP), there is no optimal beam waist provided there is no constraint on beam power~\cite{Bourgoin:2013fk}. For a downlink configuration with a WCP source, it is optimal to have the beam waist be as large as possible to achieve close to ideal far-field diffraction. However, practical constraints on the source power will impose a limit to flatness of the Gaussian across the transmission aperture. Therefore, we set the beam waist to be in the order of the transmitter aperture diameter, $w_0 = T_\mathsf{X}/2$. The impact of a central beam obscuration due to secondary mirrors typical of Cassegrain-type reflecting telescopes could be considered~\cite{Bourgoin:2013fk} but has no significant impact on the analysis.
\subsubsection{Atmospheric attenuation}
\label{subsubsec:atm_loss}
\noindent
The second contributor to the instantaneous link efficiency arises from atmospheric attenuation from absorption and scattering from molecules and particulate matter. The magnitude of these atmospheric losses depends on the wavelength and the satellite's elevation, which determines the length of the quantum channel through the atmosphere. We use MODTRAN to model atmospheric propagation and determine the transmissivity, $T_{\lambda}(\theta)$, for a given wavelength as a function of elevation. MODTRAN is a software that solves the radiative transfer equation to provide a standard atmospheric band model~\cite{Modtran_inproceedings}.
The atmospheric loss contribution is then calculated from the transmissivity,
\begin{align}
\eta_\text{atm}\left(\lambda,\theta\right) = -10 \log_{10}\left(T_{\lambda} (\theta)\right),
\end{align}
where the wavelength and elevation dependence is made clear.
\subsubsection{`Intrinsic' system loss}
\label{subsubsec:intrinsic_loss}
\noindent
The final loss contributor is denoted the `intrinsic' system loss $\eta_\text{int}$ that combines several sources. We simplify the analysis by taking this to be fixed, i.e. elevation/time independent. Within our loss budget, the intrinsic system loss combines two distinct loss contributors. First, we conservatively assign a fixed loss of 12~dB to the overall electro-optical inefficiency of the OGS system, which is comprised of 3~dB each from,
\begin{enumerate}
\item photon detection efficiency Si-SPAD,
\item quantum receiver optics,
\item collection telescope,
\item interface and adaptive/tip-tilt optics between telescope and quantum receiver.
\end{enumerate}
We also lump together losses due to an imperfect, non-diffraction limited, beam (beam quality parameter $M^2 > 1$), turbulence induce beam wander and spreading, and transmitter pointing errors. For simplicity, we assign a fixed and conservative value of 8~dB to such non-ideal beam propagation induced losses. Therefore, in this work, we set
\begin{align}
\eta_\text{int} = 20.0~\mathrm{dB},
\end{align}
which brings the total minimum loss at zenith to $\eta_\text{loss}^\text{sys}=40$~dB. Elevation dependence of the turbulence-induced losses has been considered in other works but is neglected for the moment in this work. More detailed modelling of turbulence and pointing losses can be found in~\cite{trinh2022statistical} and references therein. Under-estimation of these losses is compensated in part by conservative estimates made elsewhere in $\eta_\text{int}$.
Note that these are conservative estimates that may be more indicative of practical SatQKD systems. If we are able to engineer better performances and achieve highly optimised operation, then we can further reduce the receiver and transmitter apertures for increased portability, while maintaining the values of $\eta_\text{loss}^\text{sys}$ analysed here. These losses are consistent with the recent mobile OGS designed for the Micius mission~\cite{Ren2022arxiv}.
\subsection{Error correction for one-way information reconciliation}
\label{subsec:error_corr_term}
\noindent
An important step for any QKD protocol is error correction, which identifies and corrects errors due to vacuum events and transmission errors. For this step, Alice and Bob publicly announce $\lambda_\text{EC}$ bits that are assumed known to Eve through a round of classical communication. The number of bits $\lambda_\text{EC}$ depends on the error rate, which is a practical implementation we estimate during the parameter estimation stage. For our simulation, we use an estimate of $\lambda_\text{EC}$ that varies with the quantum bit error rate (QBER), $Q$, and the data block size, $n_\mathsf{X}$. A common approach to modelling the number of error correction bits required during information reconciliation is through $f_\text{EC} n_\mathsf{X} h(Q)$, where $f_\text{EC}$ is the reconciliation factor efficiency and we recall that $h(x)$ is the binary entropy function. The value for $f_\text{EC}$ is crucially larger than unity, and often chosen within the range 1.05 to 1.2, to account for inefficiencies in the error correction protocol. While this approach is well-suited to determining the optimal secret key length, it is assumed that the reconciliation factor efficiency is independent of $Q$, $n_\mathsf{X}$, and the required correctness $\epsilon_\text{c}$. Since SatQKD operates within the finite-key regime, these parameters can vary significantly, however. An improved estimate of the reconciliation factor efficiency would enable a higher SKL under finite statistics.
The amount of information leaked to the eavesdropper during information reconciliation is usually impossible to determine exactly. Therefore it is often upper bounded by $\log\abs{\mathcal{M}}$, where $\mathcal{M}$ denotes the error syndrome. For one-way reconciliation, the size of this error syndrome (in bits) has the following tight lower bound~\cite{Tomamichel2017_QIP}
\begin{align}
\begin{split}
\lambda_\text{EC} = & \; n_{\mathsf{X}} h(Q) + n_{\mathsf{X}} (1 - Q)\log\left[\frac{(1 - Q)}{Q}\right]\\
&-
\left(F^{-1}( \epsilon_\text{c};n_{\mathsf{X}},1-Q,) - 1\right) \log\left[\frac{(1 - Q)}{Q}\right]\\
&- \frac{1}{2} \log(n_{\mathsf{X}}) - \log(1/\epsilon_\text{c}),
\end{split}
\label{eq:lambdaec}
\end{align}
where $F^{-1}$ is the inverse of the cumulative distribution function of the binomial distribution. We use this estimate for the number of error correction bits to determine the optimised SKL. We note that for large block sizes
\begin{align}
\lim_{n_{\mathsf{X}} \rightarrow \infty} \frac{\lambda_\text{EC}}{n_{\mathsf{X}} } = h(Q),
\label{eqn:err_corr_limit}
\end{align}
such that $\lambda_\text{EC}^\infty = n_{\mathsf{X}} h(Q)$, which is the minimum possible bits allowed by information theory. This suggests that the information reconciliation (IR) factor efficiency tends towards unity $f_\text{EC}=1$, which is optimistic even for optimised low-density parity-check (LDPC) codes that can achieve high reconciliation efficiencies and require few rounds of communications~\cite{Elkouss2009}. For application in SatQKD, the IR efficiency does not approach this asymptotic limit over QBERs and data block sizes typical of realistic operation. To demonstrate this, we investigate how the IR efficiency estimate varies for the different memory buffers considered in Section~\ref{sec:mem_buffer}. Specifically, the finite-size estimate for the IR efficiency provided by Eq.~\eqref{eq:lambdaec} can be determined from the ratio $f_\text{EC}^\text{est} = \lambda_\text{EC} / n_{\mathsf{X}} h(Q)$. Fig.~\ref{fig:IR_eff} illustrates this ratio as a function of satellite overpasses with maximum elevation angle $\theta_\text{max}$ for different memory buffers $m_b$. Note that the data block sizes increase with an increasing memory buffer, leading to better $f_\text{EC}^\text{est}$ that approaches unity. We observe that the estimated efficiency dips below the lower quoted value of 1.05 in the literature~\cite{Tomamichel2017_QIP}, which is indicated by the gray region. Recall from section~\ref{sec:mem_buffer}, that a memory buffer of 64~GB achieves near-optimal performance corresponding to the highly optimised scenario. Therefore, the correction estimate in Eq.~\eqref{eqn:err_corr_limit} does not approach the asymptotic limit of unit efficiency for SatQKD data representative of current engineering efforts and capabilities and is well suited to explore the engineering constraints that are the focus in this work.
\begin{figure}
\caption{\textbf{One-way information reconciliation efficiency.}
\label{fig:IR_eff}
\end{figure}
Before concluding, we make two observations. First, a simple remedy to the error correction estimate that would hold for any data block size would be to switch to an updated model whenever the reconciliation efficiency estimated by Eq.~\eqref{eqn:err_corr_limit} falls below 1.05. That is, we can estimate the number of error correction bits required from
\begin{align}
\lambda_\text{EC}^{\text{new}} = f_\text{EC} n_{\mathsf{X}} h(Q) \, ,
\label{eqn:err_corr_improved}
\end{align}
where $f_\text{EC}$ takes values that reflect achievable efficiencies, whenever $\smash{\lambda_\text{EC} < 1.05 n_{\mathsf{X}} h(Q)}$. Second, here we do not consider bi-directional error correction information reconciliation for SatQKD such as CASCADE~\cite{brassard1994secret}. Although it may lead to improved reconciliation efficiencies, the complexity of classical communication protocols and operations, and demands for on-board data processing are significantly greater. Hence, it may be more practical to implement one-way IR in SatQKD to simplify operations and reduce system cost and complexity using schemes such as low-density parity check (LDPC) codes~\cite{johnson2015analysis}.
\subsection{General approach optimisation of fixed parameter values}
\label{subsection:param_fixing}
\noindent
The fully optimised finite SKL is difficult to achieve since it requires active control of the entire parameter space, which may be difficult to engineer. In section~\ref{sec:param_fixing} we explored the impact of fixing the receiver basis bias $P^B_\mathsf{X}$, and the two intensity values $\mu_1$ and $\mu_2$ that are particularly challenging to change. This naturally raises the question \emph{what fixed values should a SatQKD system implement}? Here, we outline a general method to determine fixed values for the set $\mathcal{F} \in \{P^B_\mathsf{X}, \mu_1, \mu_2\}$.
Our method follows from maximising $\overline{\text{SKL}}_\text{year}$, which is proportional to the integrated area under the SKL vs ground track distance curves, $\text{SKL}_\text{int}$~\cite{Sidhu2022_npjQI}. We first establish the fully optimised SKL as a function of $d_\text{min}$, corresponding to optimising the full parameter space. For each point $j$ along the optimised curve, we extract the set, $\mathcal{F}_{d_\text{min}(j)}^\text{opt}$, of the optimal values for $P^B_\mathsf{X}$, $\mu_1$, and $\mu_2$ for $d_\text{min}(j)$ (in units of $10^6$ m). Now fixing $\mathcal{F}_{d_\text{min}(j)}^\text{opt}$, we optimise the SKL over the remaining parameter space to determine the SKL as a function $d_\text{min}(j)$, hence $\text{SKL}_\text{int}$. This procedure is repeated for each optimised point $j$. We then choose the fixed set $\smash{\mathcal{F}_{d_\text{min}(k)}^\text{opt}}$ that maximises $\text{SKL}_\text{int}$ as the best compromise of fixed parameters. This procedure is summarised in Fig.~\ref{fig:param_fix_algo}.
\begin{figure}
\caption{\textbf{Pseudocode to determine the ideal fixed parameter set}
\label{fig:param_fix_algo}
\end{figure}
Fig.~\ref{fig:param_fix_choice} illustrates this procedure for choosing the ideal fixed set $\smash{\mathcal{F}_{d_\text{min}(k)}^\text{opt}}$ that optimises $\overline{\text{SKL}}_\text{year}$. In Fig.~\ref{fig:param_fix_choice}(a), the optimal SKL is illustrated in black. Three illustrative fixed sets $\mathcal{F}_{d_\text{min}(j)}$ are sampled to correspond to the maximum, median, and minimum non-zero SKLs values and are shown in dashed blue, dashed red, and dashed green respectively. We first note that fixing the values for $\mathcal{F}$ has little impact on the SKL over the entire range of satellite overpass trajectories. This reassuringly demonstrates that SatQKD systems operating with a fixed subset of parameters $\mathcal{F}$ do not lead to a large departure from the optimal performance with only a small observed impact on the SKL generation performance. Second, it is possible to improve the SKL by carefully choosing the fixed values for $\mathcal{F}$. The ground track distanced furthest away from the sampled point $j$ along the optimal curve deviates most from the optimal performance. This suggests that the fixed parameter set should be chosen closer to the centre of the curve, since this would maximise the robustness of the SatQKD systems to the widest variety of satellite overpasses leading to the largest annual expected SKL. This specific dependence on the fixed parameter set and the annual SKL is illustrated in Fig.~\ref{fig:param_fix_choice}(b). The peak annual SKL corresponds to the ideal fixed set $\smash{\mathcal{F}_{0.43}^\text{opt} = \{0.841, 0.709, 0.139\}}$. This establishes the fixed values used in section~\ref{sec:param_fixing}. Our method is general and can be extended to determining the ideal values for any alternative subset of fixed parameter sets. Finally, we reassuringly find that despite the constrained parameter space, the estimated annual SKL with these fixed parameters is close to the fully optimised case, shown with the dashed horizontal line in (b).
We note that there is the possibility that a greater $\overline{\text{SKL}}_\text{year}$ could be achieved with a parameter set outside of the per-pass optima but as the presented procedure closely approaches the upper bound, a search for such values may not be worthwhile.
\begin{figure*}
\caption{\textbf{SKL
vs $\smash{d_\text{min}
\label{fig:param_fix_choice}
\end{figure*}
\section*{Data availability}
\noindent
The raw output files from the simulations used to generate data in this work are available upon reasonable request. All material requests should be made to J.S.S.
\section*{Code availability}
\noindent
The SatQuMA v1.1 simulation Python suite is available at Ref.~\cite{Sidhu2021arxiv}. Modified code used to generate all results in this work is accessible on GitHub \href{https://github.com/cnqo-qcomms/SatQuMA/}{https://github.com/cnqo-qcomms/SatQuMA/}. It implements a minor modification of SatQuMA v1.1 to handle fixed parameters that have currently not been released as a stand-alone package.
\section*{Author contributions}
\noindent
J.S.S. conceptualised the main ideas together with D.K.L.O., steered the direction of research, and wrote the initial draft. J.S.S., T.B., and D.M. wrote the initial version of the code (SatQuMA v1.1) that is openly available, with modifications made by J.S.S. and T.B. to obtain numerical results presented in this work. R.G.P. conducted background literature reviews. D.K.L.O. obtained funding and initiated the research. All authors contributed to selecting relevant literature, proofreading, and editing the manuscript.
\section*{Competing financial interests}
\noindent
The authors declare no competing financial interests.
\end{document} |
\begin{document}
\subjclass[2010]{Primary: 03F03; Secondary: 03F25, 03F50}
\begin{abstract} In generic realizability for set theories, realizers treat unbounded quantifiers generically. To this form of realizability, we add another layer of extensionality
by requiring that realizers ought to act extensionally on realizers, giving rise to a realizability universe $\mathrm{V_{ex}}(A)$ in which the axiom of choice in all finite types, ${\sf AC}_{\ft}$, is realized, where $A$ stands for an arbitrary partial combinatory algebra. This construction furnishes ``inner models'' of many set theories that additionally validate ${\sf AC}_{\ft}$, in particular
it provides a self-validating semantics for ${\sf CZF}$ (Constructive Zermelo-Fraenkel set theory) and ${\sf IZF}$ (Intuitionistic Zermelo-Fraenkel set theory).
One can also add large set axioms and many other principles.
\end{abstract}
\maketitle
\tableofcontents
\section{Introduction}
In this paper we define an extensional version of generic\Vdashotnote{The descriptive attribute ``generic'' for this kind of realizability is due to McCarty \cite[p.\ 31]{M84}.}
realizability over any given partial combinatory algebra (pca) and prove that it provides a self-validating semantics for ${\sf CZF}$ (Constructive Zermelo-Fraenkel set theory) as well as ${\sf IZF}$ (Intuitionistic Zermelo-Fraenkel set theory), i.e., every theorem of ${\sf CZF}$ (${\sf IZF}$) is realized by just assuming the axioms of ${\sf CZF}$ (${\sf IZF}$) in the background theory. Moreover, it is shown that the axiom of choice in all finite types, ${\sf AC}_{\ft}$, also holds under this interpretation.\Vdashotnote{As a byproduct, we reobtain the already known result (e.g. \cite[4.31, 4.33]{R03m}) that augmenting ${\sf CZF}$ by ${\sf AC}_{\ft}$ does not increase the stock of provably recursive functions. Likewise, we reobtain the result (a consequence of \cite{friedman73}) that augmenting ${\sf IZF}$ by ${\sf AC}_{\ft}$ does not increase the stock of provably recursive functions.}
This uniform tool of realizability can be combined with forcing to show that ${\sf IZF}+{\sf AC}_{\ft}$ is conservative over ${\sf IZF}$ with respect to arithmetic formulae (and similar results with large set axioms). For special cases, namely, finite type dependent choice, ${\sf DC}_{\ft}$, and finite type countable choice, ${\sf CAC}_{\ft}$,\Vdashotnote{${\sf DC}_{\ft}$ is the scheme $\Vdashrall x^{\sigma}\,\exists y^{\sigma}\,\mathrm{V_{ex}}(A)rphi(x,y)\to \Vdashrall x^{\sigma}\, \exists f^{0\sigma}\,[ f(0)=x\;\wedge\;
\Vdashrall n\,\mathrm{V_{ex}}(A)rphi(f(n),f(n+1))]$, while ${\sf CAC}_{\ft}$ stands for the scheme $\Vdashrall n\, \exists y^{\tau}\, \mathrm{V_{ex}}(A)rphi(n,y)\to \exists f^{0\tau}\, \Vdashrall n\,\mathrm{V_{ex}}(A)rphi(n,f(n))$.} this has been shown in \cite[Theorem 5.1]{friedman_scedrov84} and \cite[XV.2]{B85}, but not for ${\sf AC}_{\ft}$.
The same technology works for ${\sf CZF}$. However, for several subtheories of ${\sf CZF}$ (with exponentiation in lieu of subset collection) such conservativity results have already been obtained by Gordeev \cite{gordeev} by very different methods, using total combinatory algebras and an abstract form of realizability combined with genuine
proof-theoretic machinery.
Generic\ realizability is
markedly different from Kleene's number and function realizability as well as modified realizability. It originates with Kreisel's and Troelstra's \cite{KTr70} definition of realizability
for second order Heyting arithmetic and the theory of species.
Here, the clauses for the realizability relation $\Vdash$ relating
to second order quantifiers are the following: $e\Vdash \Vdashrall
X\, \phi(X)\Leftrightarrow \Vdashrall X\,e\Vdash \phi(X)$, $e\Vdash
\exists X\, \phi(X)\Leftrightarrow \exists X\,e\Vdash \phi(X)$. This
type of realizability does not seem to give any constructive
interpretation to set quantifiers; realizing numbers ``pass
through" quantifiers. However, one could also say that thereby the
collection of sets of natural numbers is generically conceived.
Kreisel-Troelstra realizability was applied to systems of higher
order arithmetic and set theory by Friedman \cite{F73} and to further set theories by Beeson \cite{B79}.
An immediate descendant of the interpretations of Friedman and Beeson was used by McCarty \cite{M84,M86}, who, unlike the realizabilities of Beeson, devised realizability directly for extensional ${\sf IZF}$: {\em ``we found it a nuisance to interpret the extensional theory into the intensional before realizing." } (\cite[p.\ 82]{M84}). A further generalization, inspired by a remark of Feferman in \cite{F75}, that McCarty introduced was that he used realizers from applicative structures, i.e. arbitrary models of Feferman's theory $\mathrm{APP}$, rather than just natural numbers.
Generic\ realizability \cite{M84} is based on the construction of a realizability universe $\mathrm{V}(A)$ on top of an applicative structure or partial combinatory algebra $A$. Whereas in \cite{M84,M86}
the approach is geared towards ${\sf IZF}$, making use of transfinite iterations of
the powerset operation,
it was shown in \cite{R06} that ${\sf CZF}$ suffices for a formalization of $\mathrm{V}(A)$ and the generic realizability based upon it. This tool has been successfully applied to the proof-theoretic analysis of ${\sf CZF}$ ever since \cite{R05a, R05, R08, S14}.
With regard to ${\sf AC}_{\ft}$, it is perhaps worth mentioning that, by using generic\ realizability \cite{M84}, one can show that ${\sf AC}_{0,\tau}$ for $\tau\in\{0,1\}$ holds in the realizability universe $\mathrm{V}(A)$ for any pca $A$ (cf.\ also \cite{DR19}). For instance, one can take Kleene's first algebra. With some effort, one can also see that ${\sf AC}_{1,\tau}$ for $\tau\in\{0,1\}$ holds in $\mathrm{V}(A)$ by taking, e.g., Kleene's second algebra. It is conceivable that one can construct a specific pca $A$ so as to validate ${\sf AC}_{\ft}$ in $\mathrm{V}(A)$. In this paper we show that, by building extensionality into the realizability universe and by adapting the definition of realizability, it is possible to satisfy choice in all finite types at once, regardless of the partial combinatory algebra $A$ one starts with.
Extensional variants of realizability in the context of (finite type) arithmetic have been investigated by Troelstra (see \cite{T98}) and van Oosten \cite{ Oosten97,Oosten08}, as well as \cite{F19, BS18}, and for both arithmetic and set theory by Gordeev in \cite{gordeev}. For earlier references on extensional realizability, in particular \cite{Grayson}, where the notion for first order arithmetic first appeared, and \cite{Pitts}, see Troelstra \cite[p.\ 441]{T98}.
\section{Partial combinatory algebras}
Combinatory algebras are the brainchild of Sch\"onfinkel \cite{Schoen24} who presented his ideas in G\"ottingen in 1920. The quest for an optimization of his framework, singling out
a minimal set of axioms, engendered much work and writings from 1929 onwards, notably by Curry \cite{Curry29,Curry30}, under the heading of {\em combinatory logic}.
Curiously, a very natural generalization of Sch\"onfinkel's structures, where the application operation is not required to be always defined, was axiomatically characterized only in 1975 by Feferman in the shape of the most basic axioms of his theory $T_0$ of explicit mathematics \cite{F75}\Vdashotnote{In the literature, this subtheory of $T_0$ has been christened $\mathrm{EON}$ (for {\em elementary theory of operations and numbers}; see \cite[p.\ 102]{B85}) and $\mathrm{APP}$ (on account of comprising the {\em applicative axioms} of $T_0$; see \cite[Chapter 9, Section 5]{TvD88}). However, to be precise let us point out that $T_0$ as formulated in \cite{F79} differs from the original formulation in \cite{F75}:
\cite{F79} has a primitive classification constant $\mathbb N$ for the natural numbers as well as constants for successor and predecessor on $\mathbb N$, and more crucially, equality is not assumed to be decidable and the definition-by-cases operation is restricted to $\mathbb N$.}
and in \cite[p.\ 70]{F78a}. Feferman called these structures {\em applicative structures}.
\begin{notation} In order to introduce the notion of a pca, we shall start with that of a partial operational structure $(M,\cdot)$,
where $\cdot$ is just a partial binary operation on $M$. We use
$a\cdot b\simeq c$ to convey that $a\cdot b$ is defined and equal to $c$. $a\cdot b\downarrow $ stands for $\exists c\, (a\cdot b\simeq c)$.
In what follows, instead of $a\cdot b$ we will just write $ab$. We also employ the association to the left convention, meaning that e.g.
$abc\simeq d$ stands for the following: there exists $e$ such that $ab\simeq e$ and $ec\simeq d$.
\end{notation}
\begin{definition}
A {\em partial combinatory algebra} (pca) is a partial operational structure $(A,\cdot)$ such that $A$ has at least two elements and there are elements
$\mb k$ and $\mb s$ in $A$ such that $\mb k a$, $\mb sa$ and $\mb sab$ are always defined, and
\begin{itemize}
\item $\mb ka b\simeq a$;
\item $\mb sabc\simeq ac(bc)$.
\end{itemize}
The combinators $k$ and $s$ are due to Sch\"onfinkel \cite{Schoen24} while the axiomatic treatment, although formulated just in the total case, is due to Curry \cite{Curry30}. The word ``combinatory" appears because of a property known as {\em combinatory completeness} described next. For more information on pcas see \cite{F75,F79, B85,Oosten08}.
\end{definition}
\begin{definition}
Given a pca $A$, one can form application terms over $A$ by decreeing that:
\begin{enumerate}[(i)]
\item variables $x_1,x_2,\ldots$ and the constants $\mb k$ and $\mb s$ are applications terms over $A$;
\item elements of $A$ are application terms over $A$;
\item given application terms $s$ and $t$ over $A$, $(ts)$ is also an application term over $A$.
\end{enumerate}
Application terms over $A$ will also be called $A$-terms. Terms generated solely by clauses (i)--(iii), will be called {\em application terms}.
An $A$-term $q$ without free variables has an obvious interpretation $q^A$ in $A$ by interpreting elements of $A$ by themselves and letting $(ts)^A$ be $t^A\cdot s^A$ with $\cdot$ being
the partial operation of $A$. Of course, $q$ may fail to denote an element of $A$. We write $A\models q\downarrow$ (or just $ q\downarrow$) if it does, i.e., if $q^A$ yields an element of $A$.
\end{definition}
The combinatory completeness of a pca $A$ is encapsulated in $\lambda$-abstraction (see \cite[p.\ 95]{F75}, \cite[p.\ 63]{F79}, and \cite[p.\ 101]{B85} for more details).
\begin{lemma}[$\lambda$-abstraction]
For every term $t$ with variables among the distinct variables $x,x_1,\ldots,x_n$, one can find in an effective way a new term $s$, denoted $\lambda x.t$, such that
\begin{itemize}
\item the variables of $s$ are the variables of $t$ except for $x$,
\item $s[a_1/x_1,\ldots,a_n/x_n]\downarrow$ for all $a_1,\ldots,a_n\in A$,
\item $(s[a_1/x_1,\ldots,a_n/x_n]) a\simeq t[a/x,a_1/x_1,\ldots,a_n/x_n]$ for all $a,a_1,\ldots,a_n\in A$.
\end{itemize}
The term $\lambda x.t$ is built solely with the aid of $\mb k, \mb s$ and symbols occurring in $t$.
\end{lemma}
An immediate consequence of the foregoing abstraction lemma is the recursion theorem for pca's (see \cite[p.\ 96]{F75}, \cite[p.\ 63]{F79}, \cite[p.\ 103]{B85}).
\begin{lemma}[Recursion theorem] There exists a closed application term $\mb f$ such that for every pca $A$ and $a,b\in A$ we have $A\models {\mb f}\downarrow$
and
\begin{itemize}
\item $A\models \mb f a\downarrow$;
\item $A\models \mb f ab\simeq a(\mb fa) b$.
\end{itemize}
\end{lemma}
\begin{proof}
The heuristic approach consists in finding a fixed point of the form $cc$. Let us search for
$\mb f$ satisfying $\mb fa\simeq cc$, and hence find a solution of the equation
\[ ccb\simeq a(cc)b. \]
By using $\lambda$-abstraction, we can easily arrange to have, for every $d$,
\[ cdb\simeq a(dd)b. \]
Indeed, let $\mb f:=\lambda a.cc$, where $c:=\lambda db.a(dd)b$. Then $f$ is as desired.
\end{proof}
In every pca, one has pairing and unpairing\Vdashotnote{Let $\mb p=\lambda xyz.zxy$, $\mb{p_0}:=\lambda x.x\mb k$, and $\mb{p_1}:=\lambda x.x\bar{\mb k}$, where $\bar{\mb k}:=\lambda xy.y$. Projections $\mb{p_0}$ and $\mb{p_1}$ need not be total. For realizability purposes, however, it is not necessary to have total projections.} combinators $\mb p$, $\mb {p_0}$, and $\mb {p_1}$ such that:
\begin{itemize}
\item $\mb pab\downarrow$;
\item $\mb {p_i}(\mb pa_0a_1)\simeq a_i$.
\end{itemize}
Generic realizability is based on partial combinatory algebras with some additional structure (see however Remark \ref{remark}).
\begin{definition}
We say that $A$ is a pca over $\omega$ if there are extra combinators $\mb{succ}, \mb{pred}$ (successor and predecessor combinators), $\mb d$ (definition by cases combinator), and a map
$n\mapsto \bar n$ from $\omega$ to $A$ such that for all $n\in \omega$
\begin{align*}
\succe \bar n&\simeq \overline{n+1}, & \pred\overline{n+1}&\simeq \bar n,
\end{align*}
\[ \mb d\bar n\bar mab\simeq
\begin{cases} a & n=m;\\ b & n\neq m. \end{cases}\]
One then defines $\mb 0:=\bar 0$ and $\mb 1:=\bar 1$.
The notion of a pca over $\omega$ coincides with the notion of $\omega$-pca$^+$ in, e.g., \cite{R05a}.
\end{definition}
Note that one can do without $\mb k$ by letting $\mb k:=\mb d\mb 0\mb 0$. The existence of $\mb d$ implies that the map $n\mapsto \bar n$ is one-to-one. In fact, suppose $\bar n=\bar m$ but $n\neq m$. Then $\mb d\bar n\bar n\simeq \mb d\bar n\bar m$. It then follows that $a\simeq \mb d\bar n\bar nab\simeq \mb d\bar n\bar mab\simeq b$ for all $a,b$. On the other hand, by our definition, every pca contains at least two elements.
\begin{remark}\label{remark}
The notion of a pca over $\omega$ is slightly impoverished one compared to that of a model of Beeson's theory $\mathbf{PCA}^+$
\cite[VI.2]{B85} or Feferman's applicative structures \cite{F79}. However, for our purposes all the differences between these structures are immaterial as every pca can be expanded to a model of $\mathbf{PCA}^+$, which at the same time is also an applicative structure (see \cite[VI.2.9]{B85}).
By using, say, Curry numerals, one obtains a combinator $\mb d$ for this representation of natural numbers. So, every pca can be turned
into a pca over $\omega$ by using Curry numerals. On the other hand, the notion of pca over $\omega$ allows for other possible representations of natural numbers. Note that the existence of a combinator $\mb d$ for a given representation of natural
numbers (together with a predecessor combinator), entails the existence of a primitive recursion operator $\mb r$ for such representation, that is, an element $\mb r$ such that:
\begin{align*}
\mb rab\bar 0&\simeq a;\\
\mb rab\overline{n+1}&\simeq b(\mb rab\bar n)\bar n.
\end{align*}
\end{remark}
\section{The theory ${\sf CZF}$}
The logic of ${\sf CZF}$ (Constructive Zermelo-Fraenkel set theory) is intuitionistic first order logic with equality. The only nonlogical symbol is $\in$ as in classical Zermelo-Fraenkel set theory $\sf ZF$.
\begin{center} Axioms \end{center}
1. \textbf{Extensionality}: $\Vdashrall x\, \Vdashrall y\, (\Vdashrall z\, (z\in x\leftrightarrow z\in y)\rightarrow x=y)$,
2. \textbf{Pairing}: $\Vdashrall x\, \Vdashrall y\, \exists z\, (x\in z\land y\in z)$,
3. \textbf{Union}: $\Vdashrall x\, \exists y\, \Vdashrall u\, \Vdashrall z\, (u\in z\land z\in x\rightarrow u\in y)$,
4. \textbf{Infinity}: $\exists x\, \Vdashrall y\, (y\in x\leftrightarrow y=0\lor \exists z\in x\, (y=z\cup\{z\}))$,
5. \textbf{Set induction}: $\Vdashrall x\, (\Vdashrall y\in x\, \mathrm{V_{ex}}(A)rphi(y)\rightarrow \mathrm{V_{ex}}(A)rphi(x))\rightarrow \Vdashrall x\, \mathrm{V_{ex}}(A)rphi(x)$, for all formulae $\mathrm{V_{ex}}(A)rphi$,
6. \textbf{Bounded separation}: $\Vdashrall x\, \exists y\, \Vdashrall z\, (z\in y\leftrightarrow z\in x\land \mathrm{V_{ex}}(A)rphi(z))$, for $\mathrm{V_{ex}}(A)rphi$ bounded, where a formula is bounded if all quantifiers appear in the form $\Vdashrall x\in y$ and $\exists x\in y$,
7. \textbf{Strong collection}: $\Vdashrall u\in x\, \exists v\, \mathrm{V_{ex}}(A)rphi(u,v)\rightarrow \exists y\, (\Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v)\land \Vdashrall v\in y\, \exists u\in x\, \mathrm{V_{ex}}(A)rphi(u,v))$, for all formulae $\mathrm{V_{ex}}(A)rphi$,
8. \textbf{Subset collection}: $\Vdashrall x\, \Vdashrall y\, \exists z\, \Vdashrall p\, (\Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v,p)\rightarrow \exists q\in z\, (\Vdashrall u\in x\, \exists v\in q\, \mathrm{V_{ex}}(A)rphi(u,v,p)\land \Vdashrall v\in q\, \exists u\in x\, \mathrm{V_{ex}}(A)rphi(u,v,p)))$, for all formulae $\mathrm{V_{ex}}(A)rphi$.
\begin{notation} Let $x=0$ be $\Vdashrall y\in x\, \neg (y=y)$ and $x=y\cup\{y\}$ be $\Vdashrall z\in x\, (z\in y\lor z=y)\land \Vdashrall z\in y\, (z\in x)\land y\in x$.
\end{notation}
\section{Finite types and axiom of choice}
Finite types $\sigma$ and their associated extensions $F_\sigma$ are defined by the following clauses:
\begin{itemize}
\item $o\in\ft$ and $F_o=\omega$;
\item if $\sigma,\tau\in\ft$, then $(\sigma)\tau\in\ft$ and \[F_{(\sigma)\tau}=F_\sigma\to F_\tau=\{ \text{total functions from $F_\sigma$ to $F_\tau$}\}.\]
\end{itemize} For brevity we write $\sigma\tau$ for $(\sigma)\tau$, if the type $\sigma$ is written as a single symbol. We say that $x\in F_\sigma$ has type $\sigma$.
The set $\ft$ of all finite types, the set $\{ F_\sigma\colon \sigma\in\ft\}$, and the set $\mathbb{F}=\bigcup_{\sigma\in\ft}F_\sigma$ all exist in ${\sf CZF}$.
\begin{definition}[Axiom of choice in all finite types]
The schema ${\sf AC}_{\ft}$ consists of formulae
\[ \tag{${\sf AC}_{\sigma,\tau}$} \Vdashrall x^\sigma\, \exists y^\tau\, \mathrm{V_{ex}}(A)rphi(x,y)\rightarrow \exists f^{\sigma\tau}\, \Vdashrall x^\sigma\, \mathrm{V_{ex}}(A)rphi(x,f(x)), \]
where $\sigma$ and $\tau$ are (standard) finite types.
\end{definition}
\begin{notation} We write $\Vdashrall x^\sigma\, \mathrm{V_{ex}}(A)rphi(x)$ and $\exists x^\sigma\, \mathrm{V_{ex}}(A)rphi(x)$ as a shorthand for $\Vdashrall x\, (x\in F_\sigma\rightarrow \mathrm{V_{ex}}(A)rphi(x))$ and $\exists x\, (x\in F_\sigma\land \mathrm{V_{ex}}(A)rphi(x))$ respectively.
\end{notation}
\section{Defining extensional realizability in ${\sf CZF}$}
In ${\sf CZF}$, given a pca $A$ over $\omega$, we inductively define a class $\mathrm{V_{ex}}(A)$ such that \[\Vdashrall x\, (x\in\mathrm{V_{ex}}(A)\leftrightarrow x\subseteq A\times A\times \mathrm{V_{ex}}(A)).\]
The intuition for $\pair{a,b,y}\in x$ is that $a$ and $b$ are \emph{equal realizers} of the fact that $y^A\in x^A$, where $x^A=\{y^A\colon \pair{a,b,y}\in x\text{ for some } a,b\in A\}$.
General information on how to handle inductive definitions in ${\sf CZF}$ can be found in \cite{A86,AR01,czf2}. The inductive definition of $\mathrm{V_{ex}}(A)$ within ${\sf CZF}$ is on par with that of $\mathrm{V}(A)$, the specifics of which appear in \cite[3.4]{R06}.
\begin{notation} We use $(a)_i$ or simply $a_i$ for $\mb {p_i}a$. Whenever we write an application term $t$, we assume that it is defined. In other words, a formula $\mathrm{V_{ex}}(A)rphi(t)$ stands for $\exists a\, (t\simeq a\land \mathrm{V_{ex}}(A)rphi(a))$.
\end{notation}
\begin{definition}[Extensional realizability] We define the relation $a=b\Vdash \mathrm{V_{ex}}(A)rphi$, where $a,b\in A$ and $\mathrm{V_{ex}}(A)rphi$ is a realizability formula with parameters in $\mathrm{V_{ex}}(A)$. The atomic cases fall under the scope of definitions by transfinite recursion.
\begin{align*}
a=b&\Vdash x\in y && \Leftrightarrow && \exists z\, (\langle (a)_0,(b)_0,z\rangle \in y\land (a)_1=(b)_1\Vdash x=z)\\
a=b& \Vdash x=y && \Leftrightarrow &&\Vdashrall \langle c,d,z\rangle \in x\, ((ac)_0=(bd)_0\Vdash z\in y) \text{ and } \\
&&&&& \Vdashrall \langle c,d,z\rangle \in y\, ((ac)_1=(bd)_1\Vdash z\in x)\\
a=b& \Vdash \mathrm{V_{ex}}(A)rphi\land \psi && \Leftrightarrow && (a)_0=(b)_0\Vdash \mathrm{V_{ex}}(A)rphi \land (a)_1=(b)_1\Vdash \psi \\
a=b& \Vdash \mathrm{V_{ex}}(A)rphi\lor\psi && \Leftrightarrow && (a)_0\simeq(b)_0\simeq \mb 0\land (a)_1=(b)_1\Vdash \mathrm{V_{ex}}(A)rphi \text{ or } \\
&&&&& (a)_0\simeq (b)_0\simeq \mb 1\land (a)_1=(b)_1\Vdash \psi \\
a=b&\Vdash \neg\mathrm{V_{ex}}(A)rphi && \Leftrightarrow && \Vdashrall c, d\, \neg (c=d\Vdash \mathrm{V_{ex}}(A)rphi) \\
a=b&\Vdash \mathrm{V_{ex}}(A)rphi\rightarrow\psi && \Leftrightarrow && \Vdashrall c,d\, (c=d\Vdash \mathrm{V_{ex}}(A)rphi\rightarrow ac=bd\Vdash \psi) \\
a=b& \Vdash \Vdashrall x\in y\, \mathrm{V_{ex}}(A)rphi && \Leftrightarrow && \Vdashrall \langle c,d,x\rangle\in y\, (ac=bd\Vdash \mathrm{V_{ex}}(A)rphi) \\
a=b&\Vdash \exists x\in y\, \mathrm{V_{ex}}(A)rphi && \Leftrightarrow && \exists x\, (\langle (a)_0,(b)_0,x\rangle \in y\land (a)_1=(b)_1\Vdash \mathrm{V_{ex}}(A)rphi)\\
a=b& \Vdash \Vdashrall x\, \mathrm{V_{ex}}(A)rphi && \Leftrightarrow && \Vdashrall x\in \mathrm{V_{ex}}(A)\, (a=b\Vdash \mathrm{V_{ex}}(A)rphi) \\
a=b& \Vdash \exists x\, \mathrm{V_{ex}}(A)rphi && \Leftrightarrow && \exists x\in \mathrm{V_{ex}}(A)\, (a=b\Vdash \mathrm{V_{ex}}(A)rphi)
\end{align*}
\end{definition}
\begin{notation} We write $a\Vdash \mathrm{V_{ex}}(A)rphi$ for $a=a\Vdash \mathrm{V_{ex}}(A)rphi$.
\end{notation}
The above definition builds on the variant \cite{R06} of generic\ realizability \cite{M84}, where bounded quantifiers are treated as quantifiers in their own right. Note that in the language of ${\sf CZF}$, bounded quantifiers can be seen as syntactic sugar by letting $\Vdashrall x\in y\, \mathrm{V_{ex}}(A)rphi:=\Vdashrall x\, (x\in y\rightarrow \mathrm{V_{ex}}(A)rphi)$ and $\exists x\in y\, \mathrm{V_{ex}}(A)rphi:=\exists x\, (x\in y\land \mathrm{V_{ex}}(A)rphi)$. Nothing gets lost in translation, thanks to the following.
\begin{lemma}
There are closed application terms $\mb u$ and $\mb v$ such that ${\sf CZF}$ proves
\[ \mb u\Vdash \Vdashrall x\in y\, \mathrm{V_{ex}}(A)rphi \leftrightarrow \Vdashrall x\, (x\in y\rightarrow \mathrm{V_{ex}}(A)rphi), \]
\[ \mb v\Vdash \exists x\in y\, \mathrm{V_{ex}}(A)rphi \leftrightarrow \exists x\, (x\in y\land \mathrm{V_{ex}}(A)rphi). \]
\end{lemma}
The advantage of having special clauses for bounded quantifiers is that it simplifies a great deal the construction of realizers.
\begin{remark}
In the context of (finite type) arithmetic, extensional notions of realizability typically give rise to a partial equivalence relation. Namely, for every formula $\mathrm{V_{ex}}(A)rphi$, the relation
$\{(a,b)\in A^2\colon a=b\Vdash \mathrm{V_{ex}}(A)rphi\}$ is symmetric and transitive. This is usually seen by induction on $\mathrm{V_{ex}}(A)rphi$, the atomic case being trivial.
The situation, though, is somewhat different in set theory. Say that $a=b\Vdash x\in y$ and $b=c\Vdash x\in y$. All we know is that for some $u,v\in\mathrm{V_{ex}}(A)$ we have that $\pair{(a)_0,(b)_0,u}, \pair{(b)_0,(c)_0,v}\in y$, $(a)_1=(b)_1\Vdash x=u$, and $(b)_1=(c)_1\Vdash x=v$. Since $u$ and $v$ need not be the same set, even if elements of $\mathrm{V_{ex}}(A)$ behave as expected, that is, $\{(a,b)\colon \pair{a,b,y}\in x\}$ is symmetric and transitive for any given $x,y\in\mathrm{V_{ex}}(A)$,\Vdashotnote{One could inductively define $\mathrm{V_{ex}}(A)$ so as to make $\{(a,b)\in A^2\colon \pair{a,b,y}\in x\}$
symmetric and transitive. Just let $x\in \mathrm{V_{ex}}(A)$ if and only if
\begin{itemize}
\item $x$ consists of triples $\pair{a,b,y}$ with $y\in \mathrm{V_{ex}}(A)$;
\item whenever $\pair{a,b,y}\in x$, $\pair{b,a,y}\in x$;
\item whenever $\pair{a,b,y}\in x$ and $\pair{b,c,y}\in x$, also $\pair{a,c,y}\in x$.
\end{itemize}} we cannot conclude that $a=c\Vdash x\in y$. So, transitivity can fail.
As it turns out, for our purposes, this is not an issue at all. Note however that the canonical names for objects of finite type do indeed behave as desired and so does the relation $a=b\Vdash \mathrm{V_{ex}}(A)rphi$ for formulas of finite type arithmetic. This is in fact key in validating the axiom of choice in all finite types (Section \ref{finite type}). Except for this deviation, the clauses for connectives and quantifiers follow the general blueprint of extensional realizability. We just feel justified in keeping the notation $a=b\Vdash \mathrm{V_{ex}}(A)rphi$.
\end{remark}
\section{Soundness for intuitionistic first order logic with equality}
From now on, let $A$ be a pca over $\omega$ within ${\sf CZF}$. Realizability of the equality axioms relies on the following fact about pca's.
\begin{lemma}[Double recursion theorem]
There are combinators $\mb g$ and $\mb h$ such that, for all $a,b,c\in A$:
\begin{itemize}
\item $\mb gab\downarrow$ and $\mb hab\downarrow$;
\item $\mb gabc\simeq a(\mb hab)c$;
\item $\mb habc\simeq b(\mb gab)c$.
\end{itemize}
\end{lemma}
\begin{proof}
Let $t(a,b):=\lambda xc.a(\lambda c.bxc)c$. Set $\mb g:=\lambda ab.\mb ft(a,b)$, where $\mb f$ is the fixed point operator from the recursion theorem. Set $\mb h:=\lambda abc.b(\mb ft(a,b))c$. Verify that $\mb g$ and $\mb h$ are as desired.
\end{proof}
\begin{lemma}\label{equality}
There are closed application terms $\mb{i_r}$, $\mb{i_s}$, $\mb{i_t}$, $\mb{i_0}$ and $\mb{i_1}$ such that ${\sf CZF}$ proves, for all $x,y,z\in\mathrm{V_{ex}}(A)$,
\begin{enumerate}[\quad $(1)$]
\item $\mb{i_r}\Vdash x=x$;
\item $\mb{i_s}\Vdash x=y\rightarrow y=x$;
\item $\mb{i_t}\Vdash x=y\land y=z\rightarrow x=z$;
\item $\mb{i_0}\Vdash x=y\land y\in z\rightarrow x\in z$;
\item $\mb{i_1}\Vdash x=y\land z\in x\rightarrow z\in y$.
\end{enumerate}
\end{lemma}
\begin{notation} Write, say, $a_{ij}$ for $\mb{p_j}(\mb{p_i} a)$.
\end{notation}
\begin{proof}
(1) By the recursion theorem in $A$, we can find $\mb{i_r}$ such that
\[ \mb{i_r}a\simeq \mb p(\mb p a\mb{i_r})(\mb p a\mb{i_r}). \]
By set induction, we show that $\mb{i_r}\Vdash x=x$ for every $x\in\mathrm{V_{ex}}(A)$. Let $\pair{a,b,y}\in x$. We want $(\mb{i_r}a)_0=(\mb{i_r}b)_0\Vdash y\in x$. Now $(\mb{i_r}a)_{00}\simeq a$ and similarly for $b$. On the other hand, $(\mb{i_r}a)_{01}\simeq(\mb{i_r}b)_{01}\simeq \mb{i_r}$. By induction, $\mb{i_r}\Vdash y=y$, and so we are done. Similarly for $(\mb{i_r}a)_1=(\mb{i_r}b)_1\Vdash y\in x$.
(2) We just need to interchange. Let
\[ \mb{i_s}:=\lambda ac.\mb p (ac)_1(ac)_0. \]
Suppose $a=b\Vdash x=y$. We want $\mb{i_s}a=\mb{i_s}b\Vdash y=x$. Let $\pair{c,d,z}\in y$. By definition, $(ac)_1=(bd)_1\Vdash z\in x$. Now $(ac)_1\simeq (\mb{i_s}ac)_0$, and similarly $(bd)_1\simeq (\mb{i_s}bd)_0$. Then we are done. Similarly for the other direction.
(3,4) Combinators $\mb{i_t}$ and $\mb{i_0}$ are defined by a double recursion in $A$. By induction on triples $\pair{x,y,z}$, one then shows that $\mb{i_t}\Vdash x=y\land y=z\rightarrow x=z$ and $\mb{i_0}\Vdash x=y\land y\in z\rightarrow x\in z$. Eventually, $\mb {i_t}$ and $\mb {i_r}$ are solutions of equations of the form
\begin{align*}
\mb {i_t}a& \simeq \mb t \mb {i_0}a, \\
\mb {i_0}a&\simeq \mb r\mb {i_t}a,
\end{align*}
where $\mb t$ and $\mb r$ are given closed application terms. These are given by the fixed point operators from the double recursion theorem.
(5) Set
\[ \mb{i_1}:=\lambda a.\mb p (a_0a_{10})_{00}(\mb{i_t}(\mb p a_{11}(a_0a_{10})_{01})). \]
\end{proof}
\begin{theorem}\label{int sound}
For every formula $\mathrm{V_{ex}}(A)rphi(x_1,\ldots,x_n)$ provable in intuitionistic first order logic with equality, there exists a closed application term $\mb e$ such that ${\sf CZF}$ proves $\mb e\Vdash \Vdashrall x_1\cdots \Vdashrall x_n\, \mathrm{V_{ex}}(A)rphi(x_1,\ldots,x_n)$.
\end{theorem}
\begin{proof}
The proof is similar to \cite[5.3]{M84} and \cite[4.3]{R06}.
\end{proof}
\section{Soundness for ${\sf CZF}$}
We start with a lemma concerning bounded separation.
\begin{lemma}[${\sf CZF}$]\label{bounded}
Let $\mathrm{V_{ex}}(A)rphi(u)$ be a bounded formula with parameters from $\mathrm{V_{ex}}(A)$ and $x\subseteq\mathrm{V_{ex}}(A)$.
Then
\[ \{\pair{a,b,u}\colon a,b\in A\land u\in x\land a=b\Vdash \mathrm{V_{ex}}(A)rphi(u)\} \]
is a set.
\end{lemma}
\begin{proof}
As in \cite[Lemma 4.5, Lemma 4.6, Corollary 4.7]{R06}.
\end{proof}
\begin{theorem}\label{czf sound}
For every theorem $\mathrm{V_{ex}}(A)rphi$ of ${\sf CZF}$, there is a closed application term $\mb e$ such that ${\sf CZF}$ proves $\mb e\Vdash \mathrm{V_{ex}}(A)rphi$.
\end{theorem}
\begin{proof}
In view of Theorem \ref{int sound}, it is sufficient to show that every axiom of ${\sf CZF}$ has a realizer. The proof is similar to that of \cite[Theorem 5.1]{R06}. The rationale is simple: use the same realizers, duplicate the names. Remember that $\mb e\Vdash\mathrm{V_{ex}}(A)rphi$ means $\mb e=\mb e\Vdash\mathrm{V_{ex}}(A)rphi$.
\textbf{Extensionality}. Let $x,y\in\mathrm{V_{ex}}(A)$. Suppose $a=b\Vdash z\in x\leftrightarrow z\in y$ for all $z\in\mathrm{V_{ex}}(A)$. We look for $\mb e$ such that $\mb ea=\mb eb\Vdash x=y$. Set
\[ \mb e:=\lambda ac.\mb p(a_0(\mb p c\mb{i_r}))(a_1(\mb p c\mb{i_r})). \]
Suppose $\pair{c,d,z}\in x$. Then $\mb p c\mb{i_r}=\mb p d\mb{i_r}\Vdash z\in x$, since $\mb{i_r}\Vdash z=z$. Then $a_0(\mb p c\mb{i_r})=b_0(\mb p d\mb{i_r})\Vdash z\in y$. Therefore, $(\mb eac)_0=(\mb ebd)_0\Vdash z\in y$, as desired. The other direction is similar.\\
\textbf{Pairing}. Find $\mb e$ such that for all $x,y\in \mathrm{V_{ex}}(A)$,
\[ \mb e\Vdash x\in z\land y\in z, \]
for some $z\in \mathrm{V_{ex}}(A)$. Let $x,y\in\mathrm{V_{ex}}(A)$ be given. Define $z=\{\pair{\mb 0,\mb 0,x},\pair{\mb 0,\mb 0,y}\}$.
Let
\[ \mb e=\mb p (\mb p \mb 0\mb{i_r})(\mb p \mb 0\mb{i_r}). \]
\textbf{Union}. Find $\mb e$ such that for all $x\in \mathrm{V_{ex}}(A)$,
\[ \mb e\Vdash \Vdashrall u\in x\, \Vdashrall v\in u\, (v\in y), \]
for some $y\in \mathrm{V_{ex}}(A)$. Given $x\in\mathrm{V_{ex}}(A)$, let $y=\{\pair{c,d,v}\colon \exists \pair{a,b,u}\in x\, (\pair{c,d,v}\in u)\}$. Set $\mb e:=\lambda ac.\mb p c\mb{i_r}$.\\
\textbf{Infinity}. Let $\dot\omega=\{\pair{\bar n,\bar n,\dot n}\colon n\in\omega\}$, where $\dot n=\{\pair{\bar m,\bar m, \dot m}\colon m<n\}$. Let us find $\mb e$ such that for all $y\in\mathrm{V_{ex}}(A)$,
\[ \mb e\Vdash y\in \dot \omega\leftrightarrow y=0\lor \exists z\in \dot\omega\, (y=z\cup\{z\}). \]
Recall that $y=0$ stands for $\Vdashrall x\in y\, \neg (x=x)$ and $y=z\cup\{z\}$ stands for $\Vdashrall x\in y\, (x\in z\lor x=z)\land (\Vdashrall x\in z\, (x\in y)\land z\in y)$.
Let $\mathrm{V_{ex}}(A)rtheta(y):=y=0\lor \exists z\in \dot\omega\, (y=z\cup\{z\})$. We want $\mb e$ such that for every $y\in\mathrm{V_{ex}}(A)$
\[ \tag{1} \mb e_0\Vdash y\in\dot\omega\rightarrow \mathrm{V_{ex}}(A)rtheta(y), \]
\[ \tag{2} \mb e_1\Vdash \mathrm{V_{ex}}(A)rtheta(y)\rightarrow y\in\dot\omega. \]
Let us first consider (1). Suppose $a=b\Vdash y\in\dot \omega$. We want $\mb e_0a=\mb e_0b\Vdash \mathrm{V_{ex}}(A)rtheta(y)$.
By definition, there is $n\in\omega$ such that $a_0\simeq b_0\simeq \bar n$ and $a_1=b_1\Vdash y=\dot n$.
Case $n=0$. Then $\mb 0\Vdash y=0$, and so $\mb p\mb 0\mb 0\Vdash \mathrm{V_{ex}}(A)rtheta(y)$.
Case $n>0$. We have $\pred a_0\simeq \pred b_0\simeq \bar m$ with $n=m+1$. We aim for a term $t(x)$ such that $t(a)=t(b)\Vdash \exists z\in\dot\omega\, (y=z\cup\{z\})$ by requiring
\[ t(a)_0\simeq t(b)_0\simeq \bar m, \]
\[ \tag{3} t(a)_1=t(b)_1\Vdash y=\dot m\cup\{\dot m\}. \]
If we succeed, then
\[ \mb p\mb 1 t(a)=\mb p\mb 1t(b)\Vdash \mathrm{V_{ex}}(A)rtheta(y). \]
Now, (3) amounts to
\begin{align}
\tag{4} t(a)_{10}=t(b)_{10}&\Vdash \Vdashrall x\in y\, (x\in \dot m\lor x=\dot m)\\
\tag{5} t(a)_{110}=t(b)_{110}&\Vdash \Vdashrall x\in \dot m\, (x\in y)\\
\tag{6} t(a)_{111}=t(b)_{111}&\Vdash \dot m\in y
\end{align}
Part (4). Let $\pair{c,d,x}\in y$. Then $(a_1c)_0=(b_1d)_0\Vdash x\in \dot n$, that is,
\[ \pair{(a_1c)_{00},(b_1d)_{00},\dot k}\in \dot n, \]
\[ (a_1c)_{01}=(b_1d)_{01}\Vdash x=\dot k, \]
where $(a_1c)_{00}\simeq (b_1d)_{00}\simeq \bar k$. Here we have two more cases. If $k=m$, then
\[ \mb p\mb 1 (a_1c)_{01}=\mb p\mb 1(b_1d)_{01}\Vdash x\in \dot m\lor x=\dot m. \]
If $k<m$, then $\pair{\bar k,\bar k,\dot k}\in\dot m$ and $\mb p\bar k(a_1c)_{01}=\mb p\bar k(b_1d)_{01}\Vdash x\in \dot m$, so that
\[ \mb p\mb 0(\mb p\bar k(a_1c)_{01})=\mb p\mb 0(\mb p\bar k(b_1d)_{01}) \Vdash x\in\dot m\lor x=\dot m. \]
Then $t(a)$ such that
\[ t(a)_{10}\simeq \lambda c.\mb d(a_1c)_{00}(\pred a_0)(\mb p\mb 1(a_1c)_{01})(\mb p\mb 0(a_1c)_0)\]
is as desired.
Parts (5) and (6). Let $t(a)$ satisfy
\[ t(a)_{110}\simeq \lambda x.(a_1x)_1, \]
\[ t(a)_{111}\simeq (a_1(\pred a_0))_1. \]
We want $\mb e$ such that
\[ \mb e_0\simeq\lambda a.\mb d\mb 0a_0(\mb p\mb 0\mb 0)(\mb p\mb 1t(a)). \]
Then $\mb e_0$ does the job.
As for (2), suppose $a=b\Vdash \mathrm{V_{ex}}(A)rtheta(y)$. We want $\mb e_1a=\mb e_1b\Vdash y\in \dot \omega$. By unravelling the definitions, we obtain two cases.
(i) $a_0\simeq b_0\simeq \mb 0$ and $a_1=b_1\Vdash y=0$. It follows that $y=\dot 0$ and so $\mb{i_r}\Vdash y=\dot 0$. Therefore $\mb p a_0\mb{i_r}=\mb p b_0\mb{i_r}\Vdash y\in\dot\omega$, as $\pair{\mb 0,\mb 0,\dot 0}\in\dot\omega$.
(ii) $a_0\simeq b_0\simeq \mb 1$ and $a_1=b_1\Vdash \exists z\in\dot \omega\, (y=z\cup\{z\})$
Then there exists $m\in\omega$ such that $a_{10}\simeq b_{10}\simeq \bar m$ and
\[ \tag{7} a_{11}=b_{11}\Vdash y=\dot m\cup\{\dot m\}. \]
We aim for a term $s(x)$ such that $s(a)=s(b)\Vdash y=\dot{n}$, where $n=m+1$. If we succeed, then
\[ \mb p(\succe a_{10})s(a)=\mb p(\succe b_{10})s(b) \Vdash y\in\dot\omega. \]
Note in fact that $\succe a_{10}\simeq \succe b_{10}\simeq \bar n$.
For the left to right inclusion, suppose $\pair{c,d,x}\in y$. Our goal is $(s(a)c)_0=(s(b)d)_0\Vdash x\in\dot n$. It follows from (7) that
\[ a_{110}=b_{110}\Vdash \Vdashrall x\in y\, (x\in\dot m\lor x=\dot m), \]
and therefore
\[ \tag{8} a_{110}c=b_{110}d\Vdash x\in\dot m\lor x=\dot m. \]
>From (8) we get two more cases. First case: $(a_{110}c)_0\simeq (b_{110}d)_0\simeq \mb 0$ and $(a_{110}c)_1=(b_{110}d)_1\Vdash x\in\dot m$. Then one can verify that
\[ (a_{110}c)_1=(b_{110}d)_1\Vdash x\in\dot n. \]
Second case: $(a_{110}c)_0\simeq (b_{110}d)_0\simeq \mb 1$ and $(a_{110}c)_1=(b_{110}d)_1\Vdash x=\dot m$. Then
\[ \mb p\bar m(a_{110}c)_1=\mb p\bar m(b_{110}d)_1\Vdash x\in \dot n. \]
Let $s(x)$ be such that
\[ (s(a)c)_0\simeq \mb d\mb 0(a_{110}c)_0(a_{110}c)_1(\mb p a_{10}(a_{110}c)_1). \]
For the right to left inclusion, suppose $k<n$. Our goal is $(s(a)\bar k)_1=(s(b)\bar k)_1\Vdash \dot k\in y$. It follows from (7) that
\begin{align*}
\tag{9} a_{1110}=b_{1110}&\Vdash \Vdashrall x\in\dot m\, (x\in y), \\
\tag{10} a_{1111}=b_{1111}&\Vdash \dot m\in y.
\end{align*}
If $k<m$, then $\pair{\bar k,\bar k, \dot k}\in \dot m$, and hence $a_{1110}\bar k=b_{1110}\bar k\Vdash \dot k\in y$ by (9). On the other hand, if $k=m$ then (10) gives us the realizers. Therefore let $s(x)$ be such that
\[ (s(a)\bar k)_1\simeq \mb d \bar ka_{10}a_{1111}(a_{1110}\bar k). \]
We thus want $\mb e$ such that
\[ \mb e_1\simeq \lambda a.\mb d\mb 0a_0(\mb pa_0\mb{i_r})(\mb p(\succe a_{10})s(a)).\]
Then $\mb e_1$ does the job. \\
\textbf{Set induction}. By the recursion theorem, let $\mb e$ be such that $\mb ea\simeq a(\lambda c.\mb ea)$.
Prove that
\[ \mb e\Vdash \Vdashrall x\, (\Vdashrall y\in x\, \mathrm{V_{ex}}(A)rphi(y)\rightarrow \mathrm{V_{ex}}(A)rphi(x))\rightarrow \Vdashrall x\, \mathrm{V_{ex}}(A)rphi(x). \]
Let $a=b\Vdash \Vdashrall x\, (\Vdashrall y\in x\, \mathrm{V_{ex}}(A)rphi(y)\rightarrow \mathrm{V_{ex}}(A)rphi(x))$. By definition, $a=b\Vdash \Vdashrall y\in x\, \mathrm{V_{ex}}(A)rphi(y)\rightarrow \mathrm{V_{ex}}(A)rphi(x)$ for every $x\in\mathrm{V_{ex}}(A)$. By set induction, we show that $\mb ea=\mb eb\Vdash \mathrm{V_{ex}}(A)rphi(x)$ for every $x\in\mathrm{V_{ex}}(A)$. Assume by induction that $\mb ea=\mb eb\Vdash \mathrm{V_{ex}}(A)rphi(y)$ for every $\pair{c,d,y}\in x$. This means that $\lambda c.\mb ea=\lambda d.\mb eb\Vdash \Vdashrall y\in x\, \mathrm{V_{ex}}(A)rphi(y)$. Then $a(\lambda c.\mb ea)=b(\lambda d.\mb eb)\Vdash \mathrm{V_{ex}}(A)rphi(x)$. The conclusion $\mb ea=\mb eb\Vdash \mathrm{V_{ex}}(A)rphi(x)$ follows. \\
\textbf{Bounded separation}. Find $\mb e$ such that for all $x\in\mathrm{V_{ex}}(A)$,
\[ \mb e\Vdash \Vdashrall u\in y\, (u\in x\land \mathrm{V_{ex}}(A)rphi(u))\land \Vdashrall u\in x\, (\mathrm{V_{ex}}(A)rphi(u)\rightarrow u\in y), \]
for some $y\in\mathrm{V_{ex}}(A)$. Given $x\in\mathrm{V_{ex}}(A)$, let
\[ y=\{ \pair{\mb p ac,\mb p bd,u}\colon \pair{a,b,u}\in x\land c=d\Vdash \mathrm{V_{ex}}(A)rphi(u)\}. \]
It follows from Lemma \ref{bounded} that $y$ is a set. Moreover, $y$ belongs to $\mathrm{V_{ex}}(A)$. We want $\mb e$ such that
\begin{align*}
\mb e_0&\Vdash \Vdashrall u\in y\, (u\in x\land \mathrm{V_{ex}}(A)rphi(u)),\\
\mb e_1&\Vdash \Vdashrall u\in x\, (\mathrm{V_{ex}}(A)rphi(u)\rightarrow u\in y).
\end{align*}
By letting $\mb e=\mb p e_0e_1$, where
\begin{align*}
e_0&:=\lambda f.\mb p(\mb p f_0\mb{i_r})f_1, \\
e_1&:= \lambda ac.\mb p(\mb pac)\mb{i_r},
\end{align*}
one verifies that $\mb e$ is as desired. \\
\textbf{Strong Collection}. Set $\mb e:=\lambda a.\mb p(\lambda c.\mb p c(ac))(\lambda c.\mb p c(ac))$.
Let $a=b\Vdash \Vdashrall u\in x\, \exists v\, \mathrm{V_{ex}}(A)rphi(u,v)$. By strong collection, we can find a set $y$ such that
\begin{itemize}
\item $\Vdashrall \pair{c,d,u}\in x\, \exists v\in\mathrm{V_{ex}}(A)\, (\pair{c,d,v}\in y\land ac=bd\Vdash \mathrm{V_{ex}}(A)rphi(u,v))$, and
\item $\Vdashrall z\in y\, \exists \pair{c,d,u}\in x\, \exists v\in\mathrm{V_{ex}}(A)\, (z=\pair{c,d,v}\land ac=bd\Vdash \mathrm{V_{ex}}(A)rphi(u,v))$.
\end{itemize}
In particular, $y\in\mathrm{V_{ex}}(A)$. Show that
\[ \mb ea=\mb eb\Vdash \Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v)\land \Vdashrall v\in y\exists u\in x\, \mathrm{V_{ex}}(A)rphi(u,v). \]
\textbf{Subset collection}. We look for $\mb e$ such that for all $x,y\in\mathrm{V_{ex}}(A)$ there is a $z\in\mathrm{V_{ex}}(A)$ such that for all $p\in\mathrm{V_{ex}}(A)$
\[ \mb e\Vdash \Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v,p)\rightarrow \exists q\in z\, \psi(x,q,p), \]
where
\[ \psi(x,q,p):= \Vdashrall u\in x\, \exists v\in q\, \mathrm{V_{ex}}(A)rphi(u,v,p)\land \Vdashrall v\in q\, \exists u\in x\, \mathrm{V_{ex}}(A)rphi(u,v,p). \]
Form the set $y'=\{\pair{f,g,v}\colon f,g\in A\land \exists i,j\in A\, \pair{i,j,v}\in y\}$. By subset collection, we can find a set $z'$ such that for all $a,b,p$, if
\[ \tag{11} \Vdashrall \pair{c,d,u}\in x\, \exists \pair{\mb p ac,\mb p bd,v}\in y'\, (ac)_1=(bd)_1\Vdash \mathrm{V_{ex}}(A)rphi(u,v,p), \]
then there is a $q\in z'$ such that
\[ \tag{12} \Vdashrall \pair{c,d,u}\in x\, \exists w\in q\, \mathrm{V_{ex}}(A)rtheta\land \Vdashrall w\in q\, \exists \pair{c,d,u}\in x\, \mathrm{V_{ex}}(A)rtheta, \]
where $\mathrm{V_{ex}}(A)rtheta=\mathrm{V_{ex}}(A)rtheta(c,d,u,w;a,b,p)$ is
\[\exists v\, (w=\pair{\mb p ac,\mb p bd,v}\land (ac)_1=(bd)_1\Vdash \mathrm{V_{ex}}(A)rphi(u,v,p)). \]
Note that the $q\in z'$ asserted to exist is a subset of $y'$ and so $q\in\mathrm{V_{ex}}(A)$. On the other hand, there might be $q\in z'$ that are not in $\mathrm{V_{ex}}(A)$, and hence $z'$ need not be a subset of $\mathrm{V_{ex}}(A)$. Let $z''=\{q\cap y'\colon q\in z'\}$. Now, $z''\subseteq\mathrm{V_{ex}}(A)$. Finally, set
\[ z=\{\pair{\mb 0,\mb 0, q}\colon q\in z''\}. \]
Then $z\in\mathrm{V_{ex}}(A)$. It remains to find $\mb e$. Let $p\in\mathrm{V_{ex}}(A)$ and suppose
\[ \tag{13} a=b\Vdash \Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v,p). \] We would like to have
\[ \mb ea=\mb eb\Vdash \exists q\in z\, \psi(x,q,p). \]
By definition of $z$, we let $(\mb ea)_0\simeq \mb 0$ and we look for a $q\in z''$ such that $(\mb ea)_1=(\mb eb)_1\Vdash \psi(x,q,p)$, that is,
\begin{align*}
(\mb ea)_{10}=(\mb eb)_{10}&\Vdash \Vdashrall u\in x\, \exists v\in q\, \mathrm{V_{ex}}(A)rphi(u,v,p), \\
(\mb ea)_{11}=(\mb eb)_{11}&\Vdash \Vdashrall v\in q\, \exists u\in x\, \mathrm{V_{ex}}(A)rphi(u,v,p).
\end{align*}
By (13) one can see that the parameters $a,b,p$ satisfy (11). Let $q\in z'$ be as in (12). We have already noticed that $q\in z''$. Let $\mb e$ be such that
\begin{align*}
(\mb ea)_{10}&\simeq \lambda c.\mb p(\mb pac)(ac)_1, \\
(\mb ea)_{11}&\simeq \lambda f. \mb pf_1(f_0f_1)_1.
\end{align*}
One can verify that $\mb e$ is as desired.
\end{proof}
\section{Realizing the axiom of choice in all finite types}\label{finite type}
We will make use of certain canonical names for pairs in $\mathrm{V_{ex}}(A)$.
\begin{definition}[Internal pairing]
For $x,y\in\mathrm{V_{ex}}(A)$, let
\[ \vset{x}=\{\pair{\mb 0,\mb 0,x}\}, \]
\[ \vset{x,y}=\{\pair{\mb 0,\mb 0,x},\pair{\mb 1,\mb 1,y}\}, \]
\[ \mathrm{V_{ex}}(A)rphiair{x,y}=\{\pair{\mb 0,\mb 0,\vset{x}}, \pair{\mb 1,\mb 1,\vset{x,y}}\}. \]
Note that all these sets are in $\mathrm{V_{ex}}(A)$.
\end{definition}
Below we shall use $\mathrm{UP}(x,y,z)$ and $\mathrm{OP}(x,y,z)$ as abbreviations for the set-theoretic formulae expressing, respectively, that $z$ is the unordered pair of $x$ and $y$ (in standard notation, $z=\{x,y\}$) and $z$ is the ordered pair of $x$ and $y$ (in standard notation, $z=\pair{x,y}$). E.g., $\mathrm{UP}(x,y,z)$ stands for $x\in z\land y\in z\land \Vdashrall u\in z\, (u=x\lor u=y)$. Similarly, one can pick a suitable rendering of $\mathrm{OP}(x,y,z)$ according to the definition of ordered pair $\pair{x,y}:=\{\{x\},\{x,y\}\}$.
\begin{lemma}\label{pairs}
There are closed application terms $\mb{u_0}$, $\mb{u_1}$, $\mb v$, $\mb w$, $\mb z$ such that for all $x,y\in \mathrm{V_{ex}}(A)$
\begin{align*}
\mb{u_0}&\Vdash \mathrm{UP}(x,x,\vset{x}), \\
\mb{u_1}&\Vdash \mathrm{UP}(x,y,\vset{x,y}), \\
\mb v &\Vdash \mathrm{OP}(x,y, \mathrm{V_{ex}}(A)rphiair{x,y}), \\
\mb w& \Vdash \mathrm{V_{ex}}(A)rphiair{x,y}=\mathrm{V_{ex}}(A)rphiair{u,v}\rightarrow x=u\land y=v,\\
\mb z&\Vdash \mathrm{OP}(x,y,z) \rightarrow z=\mathrm{V_{ex}}(A)rphiair{x,y}.
\end{align*}
\end{lemma}
\begin{proof}
This is similar to \cite[3.2, 3.4]{M84}.
\end{proof}
We now build a copy of the hereditarily effective operations relative to a pca $A$.
\begin{definition}[$\mathsf{HEO}_A$] Let $A$ be a pca over $\omega$ with map $n\mapsto \bar n$ from $\omega$ to $A$. For any finite type $\sigma$, we define $a=_\sigma b$ with $a,b\in A$ by letting:
\begin{itemize}
\item $a=_0 b$ iff there is $n\in\omega$ such that $a=b=\bar n$;
\item $a=_{\sigma\tau} b$ iff for every $c=_{\sigma}d$ we have $ac=_\tau bd$.
\end{itemize}
Let $A_\sigma=\{a\in A\colon a=_\sigma a\}$.
\end{definition}
\begin{lemma}
For any type $\sigma$, and for all $a,b,c\in A$:
\begin{itemize}
\item if $a=_\sigma b$ and $b=_\sigma c$, then $a=_\sigma a$, $b=_\sigma a$, and $a=_\sigma c$.
\end{itemize}
It thus follows that
$A_\sigma=\bigcup_{b\in A}\{a\in A\colon a=_\sigma b\}=\bigcup_{a\in A}\{b\in A\colon a=_\sigma b\}$ and $=_\sigma$ is an equivalence relation on $A_\sigma$.
\end{lemma}
\begin{proof}
By induction on the type.
\end{proof}
\begin{definition}[Internalization of objects of finite type]
For $a\in A_\sigma$, we define $\tuep a\sigma\in\mathrm{V_{ex}}(A)$ as follows:
\begin{itemize}
\item if $a=\bar n$, let $\tuep ao =\{ \pair {\bar m,\bar m,\tuep{\bar{m}}o}\colon m<n\}$;
\item if $a\in A_{\sigma\tau}$, let $\tuep a{\sigma\tau}=\{ \pair{c,d,\mathrm{V_{ex}}(A)rphiair{c^\sigma,\tuep e\tau}} \colon c=_\sigma d\text{ and } ac\simeq e\}$.
\end{itemize}
Finally, for any finite type $\sigma$, let
\[ \dot F_\sigma=\{ \pair{a,b,\tuep a{\sigma}}\colon a=_\sigma b\} \]
be our name for $F_\sigma$.
\end{definition}
Note that $\dot F_o=\dot \omega$, where $\dot\omega$ is the name for $\omega$ used to realize the infinity axiom in the proof of Theorem \ref{czf sound}. \\
\begin{notation} Write $\Vdash \mathrm{V_{ex}}(A)rphi$ for $\exists a, b\in A\, (a=b\Vdash \mathrm{V_{ex}}(A)rphi)$.
\end{notation}
\begin{lemma}[Absoluteness and uniqueness up to extensional equality]\label{abs}
For all $a,b\in A_\sigma$,
\begin{itemize}
\item $\Vdash \tuep a\sigma=\tuep b\sigma$ implies $a=_\sigma b$,
\item $a=_\sigma b$ implies $\tuep a\sigma=\tuep b\sigma$.
\end{itemize}
\end{lemma}
\begin{proof}
By induction on the type.
Type $o$. Let $a=\bar n$ and $b=\bar m$ with $n,m\in\omega$. Suppose $\Vdash \tuep a{o}=\tuep bo$. By a double arithmetical induction one shows $n=m$. The second part is obvious as $a=_ob$ implies $a=b$.
Type $\sigma\tau$. Let $a,b\in A_{\sigma\tau}$. Suppose $\Vdash \tuep a{\sigma\tau}=\tuep b{\sigma\tau}$. The aim is to show that $a=_{\sigma\tau} b$.
Let $c\in A_{\sigma}$ and $ac\simeq e$. Then $\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}\in \tuep a{\sigma\tau}$ and hence $\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}\in \tuep b{\sigma\tau}$.
>From the latter we infer that there exist $c_0\in A_{\sigma}$ and $e_0\in A_{\tau}$ such that $bc_0\simeq e_0$ and $\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}=\mathrm{V_{ex}}(A)rphiair{\tuep {c_0}\sigma,\tuep {e_0}\tau}$. By the properties of internal pairing, we obtain $\Vdash \tuep c\sigma=\tuep {c_0}\sigma\;\wedge\; \tuep e\tau = \tuep {e_0}\tau$ giving
$c=_{\sigma}c_0$ and $e=_{\tau} e_0$ by the induction hypothesis. Whence $ac=_{\tau}bc_0=_{\tau}bc$ as $b \in A_{\sigma\tau}$. As a result one has
$ac=_{\tau}bd$ whenever $c=_{\sigma}d$, yielding $a=_{\sigma\tau}b$.
For the second part, suppose $a=_{\sigma\tau} b$. An element of $\tuep a{\sigma\tau}$ is of the form $\pair{c,d,\mathrm{V_{ex}}(A)rphiair{\tuep c{\sigma},\tuep e\tau}}$ where $c=_{\sigma}d$ and $ac\simeq e$. Let $e_0\simeq bc$. As $ac=_{\tau}bc$ the induction hypothesis yields $\tuep e\tau =\tuep {e_0}\tau$, and hence
$\pair{c,d,\mathrm{V_{ex}}(A)rphiair{\tuep c{\sigma},\tuep e\tau}}=\pair{c,d,\mathrm{V_{ex}}(A)rphiair{\tuep c{\sigma},\tuep {e_0}\tau}}\in \tuep b{\sigma\tau}$, showing $\tuep a{\sigma\tau}\subseteq\tuep b{\sigma\tau}$.
Owing to the symmetry of the argument, we can conclude that $\tuep a{\sigma\tau}=\tuep b{\sigma\tau}$.
\end{proof}
\begin{theorem}[Choice]\label{choice}
There exists a closed application term $\mb e$ such that ${\sf CZF}$ proves
\[ \mb e\Vdash \Vdashrall x\in \dot F_\sigma\, \exists y\in \dot F_\tau\, \mathrm{V_{ex}}(A)rphi(x,y)\rightarrow \exists f\colon \dot F_\sigma\to \dot F_\tau\, \Vdashrall x\in \dot F_\sigma\, \mathrm{V_{ex}}(A)rphi(x,f(x)), \]
for all finite types $\sigma$ and $\tau$ and for every formula $\mathrm{V_{ex}}(A)rphi$.
\end{theorem}
\begin{proof}
Suppose $a=b\Vdash \Vdashrall x\in \dot F_\sigma\, \exists y\in \dot F_\tau\, \mathrm{V_{ex}}(A)rphi(x,y)$. By definition, this means that for every $\pair{c,d,\tuep c\sigma}\in \dot F_\sigma$ we have
\[ \tag{1} \pair{(ac)_0,(bd)_0,\tuep e\tau}\in \dot F_\tau, \]
\[ \tag{2} (ac)_1=(bd)_1\Vdash \mathrm{V_{ex}}(A)rphi(\tuep c\sigma,\tuep e\tau), \]
where $e\simeq (ac)_0$.
Let
\[ f=\{\pair{c,d,\mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}}\colon c=_\sigma d\land e\simeq (ac)_0\}. \]
Note that $c=_\sigma d$ implies $(ac)_0\downarrow$ by (1).
Below we shall use $z=\langle x,y\rangle$ as a somewhat sloppy abbreviation for $\mathrm{OP}(x,y,z)$. We look for an $\mb e$ such that
\[ \tag{3} (\mb ea)_{0}=(\mb eb)_0\Vdash \Vdashrall z\in f\, \exists x\in \dot F_\sigma\, \exists y\in \dot F_\tau\, (z=\pair{x,y}), \]
\[\tag{4} (\mb ea)_{10}=(\mb e b)_{10}\Vdash \Vdashrall x\in \dot F_\sigma\, \exists y\in \dot F_\tau\, \exists z\in f\, (z=\pair{x,y}\land \mathrm{V_{ex}}(A)rphi(x,y)), \]
\[ \tag{5} (\mb ea)_{11}=(\mb eb)_{11}\Vdash \Vdashrall z_0\in f\, \Vdashrall z_1\in f\, \Vdashrall x,y_0,y_1\, (z_0=\pair{x,y_0}\land z_1=\pair{x,y_1}\rightarrow y_0=y_1). \]
First, note that $\lambda c.(ac)_0=_{\sigma\tau}\lambda d.(bd)_0$. This follows from (1). In fact, $c=_\sigma d$ implies $(ac)_0=_\tau (bd)_0$, for all $c,d\in A$. Moreover, since this is an equivalence relation, we have $\lambda c.(ac)_0\in A_{\sigma\tau}$.
For (3), let $\mb e$ be such that
\begin{align*}
((\mb e a)_0c)_0&\simeq c, & ((\mb e a)_0 c)_{10}&\simeq (ac)_0, \\
& & ((\mb e a)_0c)_{11}&\simeq \mb v,
\end{align*}
where $\mb v\Vdash \mathrm{V_{ex}}(A)rphiair{x,y}=\pair{x,y}$ for all $x,y\in\mathrm{V_{ex}}(A)$ as in Lemma \ref{pairs}. Let us show that any such $\mb e$ satisfies (3). Let $\pair{c,d,\mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}}\in f$, where $e\simeq (ac)_0$. We would like
\[ (\mb e a)_0c=(\mb e b)_0d\Vdash \exists x\in \dot F_\sigma\, \exists y\in \dot F_\tau\, (\mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}=\pair{x,y}). \]
Now, $\pair{c,d,\tuep c\sigma}\in \dot F_\sigma$, $c\simeq ((\mb e a)_0c)_0$, and $d\simeq ((\mb e b)_0d)_0$. Therefore, we just need to verify
\[ ((\mb ea)_0c)_1=((\mb eb)_0d)_1\Vdash \exists y\in \dot F_\tau\, \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}=\pair{\tuep c\sigma,y}.\]
Similarly, $\pair{(ac)_0,(bd)_0,\tuep e\sigma}\in \dot F_\tau$ since, as noted before, $(ac)_0=_\tau (bd)_0$. On the other hand, $(ac)_0\simeq ((\mb e a)_0 c)_{10}$ and $(bd)_0\simeq ((\mb eb)_0d)_{10}$. So we just need to show that
\[ ((\mb ea)_0c)_{11}=((\mb eb)_0d)_{11}\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}=\pair{\tuep c\sigma,\tuep e\tau}. \]
Now, $((\mb ea)_0c)_{11}\simeq ((\mb eb)_0d)_{11}\simeq \mb v$, and $\mb v\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep c\sigma,\tuep e\tau}=\pair{\tuep c\sigma,\tuep e\tau}$. So we are done.
As for (4), Let $\mb e$ be such that
\begin{align*}
((\mb ea)_{10}c)_0& \simeq (ac)_0, & ((\mb ea)_{10}c)_{10}&\simeq (ac)_0, & ((\mb ea)_{10}c)_{110}&\simeq \mb v, \\
&&&& ((\mb ea)_{10}c)_{111}&\simeq (ac)_1,
\end{align*}
where $\mb v$ is as in part (3). That $\mb e$ satisfies (4) is proved in similar fashion by using (1) and (2).
For (5), suppose $\pair{c_i,d_i,z_i}\in f$ with $z_i=\mathrm{V_{ex}}(A)rphiair{\tuep {c_i}\sigma,\tuep {e_i}\tau}$ and $e_i\simeq (ac_i)_0$, where $i=0,1$. We are looking for an $\mb e$ such that
\[ (\mb ea)_{11}c_0c_1=(\mb eb)_{11}d_0d_1\Vdash z_0=\pair{x,y_0}\land z_1=\pair{x,y_1}\rightarrow y_0=y_1, \]
for all $x,y_0,y_1\in\mathrm{V_{ex}}(A)$. Suppose
\[ \tag{6} g=h\Vdash z_0=\pair{x,y_0}\land z_1=\pair{x,y_1}. \]
We want $(\mb ea)_{11}c_0c_1g=(\mb eb)_{11}d_0d_1h\Vdash y_0=y_1$. Unravelling (6), we get
\[ g_i=h_i\Vdash \mathrm{V_{ex}}(A)rphiair{\tuep {c_i}\sigma,\tuep {e_i}\tau}=\pair{x,y_i}. \]
By Lemma \ref{pairs},
\[ \mb wg_i=\mb w h_i\Vdash \tuep {c_i}\sigma=x\land \tuep {e_i}\tau=y_i, \]
for some closed application term $\mb w$. By the realizabilty of equality, it follows that
\[ \tag{7} \Vdash \tuep {c_0}\sigma=\tuep {c_1}\sigma. \]
Also,
\[ \mb p(\mb w g_0)_1(\mb wg_1)_1=\mb p(\mb w h_0)_1(\mb wh_1)_1\Vdash \tuep {e_0}\tau=y_0\land \tuep {e_1}\tau=y_1. \]
By absoluteness, (7) implies $c_0=_\sigma c_1$. As $\lambda c.(ac)_0\in A_{\sigma\tau}$, we have $(ac_0)_0=_\tau (ac_{1})_0$, that is, $e_0=_\tau e_1$. By uniqueness, $\tuep {e_0}\tau=\tuep {e_1}\tau$. By realizability of equality, there is a closed application term $\mb i$ such that
\[ \mb i\Vdash z=y_0\land z=y_1\rightarrow y_0=y_1. \]
Therefore $\mb e$ can be chosen such that
\[ (\mb ea)_{11}c_0c_1g\simeq \mb i(\mb p(\mb w g_0)_1(\mb wg_1)_1)\]
is as required.
By $\lambda$-abstraction, one can find $\mb e$ satisfying (3), (4), and (5).
\end{proof}
\begin{theorem}[Arrow types]\label{arrow}
There exists a closed application term $\mb e$ such that ${\sf CZF}$ proves
\[ \mb e\Vdash \dot F_{\sigma\tau}= \dot F_\sigma\to \dot F_\tau, \]
for all finite types $\sigma$ and $\tau$.
\end{theorem}
\begin{proof}
We look for $\mb e$ such that
\[ \mb e_0\Vdash \Vdashrall f\in \dot F_{\sigma\tau}\, (f\colon\dot F_\sigma\to \dot F_\tau), \]
and for every $f\in \mathrm{V_{ex}}(A)$,
\[ \mb e_1\Vdash (f\colon \dot F_\sigma\to \dot F_\tau) \rightarrow f\in \dot F_{\sigma\tau}. \]
For $\mb e_0$, we need that for all $a=_{\sigma\tau} b$,
\[ \tag{1} (\mb e_0 a)_0=(\mb e_0 b)_0 \Vdash \Vdashrall z\in \tuep a{\sigma\tau}\, \exists x\in\dot F_\sigma\, \exists y\in\dot F_\tau\, (z=\pair{x,y}),\]
\[ \tag{2} (\mb e_0a)_{10}=(\mb e_0b)_{10}\Vdash \Vdashrall x\in\dot F_\sigma\, \exists y\in\dot F_\tau\, \exists z\in \tuep a{\sigma\tau}\, (z=\pair{x,y}),\]
\[ \tag{3} (\mb e_0 a)_{11}=(\mb e_0 b)_{11}\Vdash \Vdashrall z_0\in \tuep a{\sigma\tau}\, \Vdashrall z_1\in \tuep a{\sigma\tau}\, \Vdashrall x,y_0,y_1\, (z_0=\pair{x,y_0}\land z_1=\pair{x,y_1}\rightarrow y_0=y_1). \]
For (1), let $\mb e_0$ be such that
\[ (\mb e_0a)_0\simeq\lambda c.\mb p c(\mb p (ac)\mb v), \]
where $\mb v\Vdash \mathrm{V_{ex}}(A)rphiair{x,y}=\pair{x,y}$ for all $x,y\in \mathrm{V_{ex}}(A)$ as in Lemma \ref{pairs}.
Let us verify that $\mb e_0$ does the job. Let $a=_{\sigma\tau}b$.
We want to show
\[ \lambda c.\mb p c(\mb p (ac)\mb v)=\lambda d.\mb p d(\mb p (bd)\mb v)\Vdash \Vdashrall z\in \tuep a{\sigma\tau}\, \exists x\in\dot F_\sigma\, \exists y\in\dot F_\tau\, (z=\pair{x,y}). \]
Let $\pair{c,d,\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}}\in \tuep a{\sigma\tau}$, where $c=_\sigma d$ and $ac\simeq e$. We want
\[ \mb p c(\mb p (ac)\mb v)=\mb p d(\mb p (bd)\mb v)\Vdash \exists x\in \dot F_\sigma\, \exists y\in\dot F_\tau\, (\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}=\pair{x,y}). \]
By definition, $\pair{c,d, \tuep c{\sigma}}\in\dot F_\sigma$. Let us check that
\[ \mb p (ac)\mb v=\mb p (bd)\mb v\Vdash \exists y\in\dot F_\tau\, (\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}=\pair{ \tuep c{\sigma},y}). \]
We have $ac=_\tau bd$ and hence $\pair{ac,bd, \tuep c{\tau}}\in \dot F_\tau$.
Finally,
\[ \mb v\Vdash \mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}=\pair{ \tuep c{\sigma}, \tuep c{\tau}}. \]
For (2), let $\mb e_0$ be such that
\[ (\mb e_0a)_{10}\simeq\lambda x.\mb p (ax)(\mb p x\mb v), \]
where $\mb v$ is as above.
For (3), let $\mb e_0$ be such that
\[ (\mb e_0 a)_{11}c_0c_1g\simeq \mb i (\mb p(\mb w g_0)_1(\mb wg_1)_1),\]
where $\mb w$ and $\mb i$ are as in the proof of Theorem \ref{choice}. \\
As for $\mb e_1$, suppose that $f\in\mathrm{V_{ex}}(A)$ and
\[ a=b\Vdash f\colon\dot F_\sigma\to\dot F_\tau. \]
Then
\[ \tag{4} a_0=b_0\Vdash \Vdashrall z\in f\, \exists x\in\dot F_\sigma\, \exists y\in\dot F_\tau\, (z=\pair{x,y}), \]
\[ \tag{5} a_{10}=b_{10}\Vdash \Vdashrall x\in\dot F_\sigma\, \exists y\in\dot F_\tau\, \exists z\in f\, (z=\pair{x,y}), \]
\[ \tag{6} a_{11}=b_{11}\Vdash \Vdashrall z_0\in f\, \Vdashrall z_1\in f\, \Vdashrall x,y_0,y_1\, (z_0=\pair{x,y_0}\land z_1=\pair{x,y_1}\rightarrow y_0=y_1). \]
We aim for
\[ \mb e_1a=\mb e_1b\Vdash f\in \dot F_{\sigma\tau}. \]
As in the proof of Theorem \ref{choice}, it follows from (5) that $\lambda c.(a_{10}c)_0=_{\sigma\tau}\lambda d.(b_{10}d)_0$. Therefore
\[ \pair{\lambda c.(a_{10}c)_0,\lambda d.(b_{10}d)_0, \tuep g{\sigma\tau}}\in \dot F_{\sigma\tau}, \]
where $g:=\lambda c.(a_{10}c)_0$. We thus want $\mb e_1$ such that
\[ (\mb e_1a)_0\simeq \lambda c.(a_{10}c)_0, \]
\[ (\mb e_1a)_1=(\mb e_1b)_1\Vdash f= \tuep g{\sigma\tau}. \]
By definition and Lemma \ref{abs},
\[ \tuep g{\sigma\tau}=\{\pair{c,d,\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}}\colon c=_\sigma d\land (a_{10}c)_0=_\tau e\}. \]
($\subseteq$) Let $\pair{\tilde{c},\tilde d,z}\in f$. We aim for $((\mb e_1a)_1\tilde c)_0=((\mb e_1b)_1\tilde d)_0\Vdash z\in \tuep g{\sigma\tau}$. By (4), $(a_0\tilde c)_0=_\sigma (b_0\tilde d)_0$ and
\[ (a_0\tilde c)_{11}=(b_0\tilde d)_{11}\Vdash z=\pair{ \tuep c{\sigma}, \tuep c{\tau}}, \]
where $c\simeq (a_0\tilde c)_0$ and $e\simeq (a_0\tilde c)_{10}$.
By Lemma \ref{pairs}, let $\mb z$ be a closed application term such that for all $x,y,z\in\mathrm{V_{ex}}(A)$,
\[ \mb z\Vdash z=\pair{x,y}\rightarrow z=\mathrm{V_{ex}}(A)rphiair{x,y}. \]
Then
\[ \mb z(a_0\tilde c)_{11}=\mb z(b_0\tilde d)_{11}\Vdash z=\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}. \]
By using (5), (6) and absoluteness, one obtains $(a_{10}c)_0=_\tau e$. Let $\mb e_1$ satisfy
\begin{align*}
((\mb e_1a)_1\tilde c)_{00}& \simeq (a_0\tilde c)_0,\\
((\mb e_1a)_1\tilde c)_{01}& \simeq \mb z (a_0\tilde c)_{11}.
\end{align*}
Then $\mb e_1$ is as desired.
($\supseteq$) Let $\pair{c,d,\mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}}\in \tuep g{\sigma\tau}$, with $e\simeq (a_{10}c)_0$. We aim for $((\mb e_1a)_1 c)_1=((\mb e_1b)_1d)_1\Vdash \mathrm{V_{ex}}(A)rphiair{ \tuep c{\sigma}, \tuep c{\tau}}\in f$.
By unravelling (5), we obtain that for some $z\in\mathrm{V_{ex}}(A)$,
\[ \pair{(a_{10}c)_{10},(b_{10}d)_{10}, z}\in f, \]
\[ (a_{10}c)_{11}=(b_{10}d)_{11}\Vdash z=\pair{ \tuep c{\sigma}, \tuep c{\tau}}. \]
Let $\mb e_1$ be such that
\[ ((\mb e_1a)_1c)_1\simeq \mb p (a_{10}c)_{10} (\mb {i_s}(\mb z(a_{10}c)_{11})), \]
where $\mb z$ is as above.
By $\lambda$-abstraction, one can find $\mb e$ satisfying the above equations.
\end{proof}
\begin{theorem}\label{choice sound}
For all finite types $\sigma$ and $\tau$ there exists a closed application term $\mb c$ such that ${\sf CZF}$ proves
\[ \mb c\Vdash \Vdashrall x^\sigma\, \exists y^\tau\, \mathrm{V_{ex}}(A)rphi(x,y)\rightarrow \exists f^{\sigma\tau}\, \Vdashrall x^\sigma\, \mathrm{V_{ex}}(A)rphi(x,f(x)). \]
\end{theorem}
\begin{proof}
A proof is obtained by combining Theorem \ref{choice} and Theorem \ref{arrow}.
Let
\[ \mathrm{V_{ex}}(A)rtheta_0(z):=\text{\lq $z$ is the set of natural numbers\rq}, \]
\[ \mathrm{V_{ex}}(A)rtheta_{\sigma\tau}(z):=\exists x\, \exists y\, (\mathrm{V_{ex}}(A)rtheta_\sigma(x)\land \mathrm{V_{ex}}(A)rtheta_{\tau}(y)\land z=x\to y). \]
We are claiming that for all finite types $\sigma$ and $\tau$ there exists a closed application term $\mb c_{\sigma\tau}$ such that ${\sf CZF}$ proves
\[ \mb c_{\sigma\tau}\Vdash \Vdashrall z_\sigma\, \Vdashrall z_\tau\, (\mathrm{V_{ex}}(A)rtheta_\sigma(z_\sigma)\land \mathrm{V_{ex}}(A)rtheta_\tau(z_\tau)\rightarrow \psi(z_\sigma,z_\tau)), \]
where $\psi(z_\sigma,z_\tau)$ is
\[ \Vdashrall x\in z_\sigma\, \exists y\in z_\tau\, \mathrm{V_{ex}}(A)rphi(x,y)\rightarrow \exists f\colon z_\sigma\to z_\tau\, \Vdashrall x\in z_\sigma\, \mathrm{V_{ex}}(A)rphi(x,f(x)). \]
Let $\mb e_0$ be such that $\mb e_0\Vdash \mathrm{V_{ex}}(A)rtheta_0(\dot \omega)$. By using $\mb e_0$ and Theorem \ref{arrow}, for every finite type $\sigma$, we can find $\mb e_\sigma$ such that $\mb e_\sigma\Vdash \mathrm{V_{ex}}(A)rtheta_\sigma(\dot F_\sigma)$. As ${\sf CZF}\vdash \mathrm{V_{ex}}(A)rtheta_\sigma(z_0)\land \mathrm{V_{ex}}(A)rtheta_\sigma(z_1)\rightarrow z_0=z_1$, by soundness (Theorem \ref{czf sound}) there is a $\mb u_\sigma$ such that
\[ \mb u_\sigma\Vdash \mathrm{V_{ex}}(A)rtheta_\sigma(z_0)\land \mathrm{V_{ex}}(A)rtheta_\sigma(z_1)\rightarrow z_0=z_1 \]
for all $z_0,z_1\in\mathrm{V_{ex}}(A)$. By soundness as well, there are $\mb i_{\sigma\tau}$ and $\mb j_{\sigma\tau}$ such that
\begin{align*}
\mb i_{\sigma\tau}&\Vdash \psi(\dot F_\sigma,\dot F_\tau)\land z_\sigma=\dot F_\sigma \rightarrow \psi(z_\sigma,\dot F_\tau), \\
\mb j_{\sigma\tau}&\Vdash \psi(z_\sigma,\dot F_\tau)\land z_\tau=\dot F_\tau\rightarrow \psi(z_\sigma,z_\tau),
\end{align*}
for all $z_\sigma,z_\tau\in \mathrm{V_{ex}}(A)$. Finally, with the aid of $\mb e_\sigma$, $\mb e_\tau$, $\mb u_\sigma$, $\mb u_\tau$, $\mb i_{\sigma\tau}$, $\mb j_{\sigma\tau}$, and of the closed application term $\mb e$ from Theorem \ref{choice}, one can construct $\mb c_{\sigma\tau}$ as desired.
\end{proof}
\begin{corollary}\label{czf choice sound}
For every theorem $\mathrm{V_{ex}}(A)rphi$ of ${\sf CZF}+{\sf AC}_{\ft}$, there is a closed application term $\mb e$ such that ${\sf CZF}$ proves $\mb e\Vdash \mathrm{V_{ex}}(A)rphi$. In particular, ${\sf CZF}+{\sf AC}_{\ft}$ is consistent relative to ${\sf CZF}$.
\end{corollary}
\begin{proof}
By Theorem \ref{czf sound} and Theorem \ref{choice sound}.
\end{proof}
\begin{corollary}
${\sf CZF}+{\sf AC}_{\ft}$ is conservative over ${\sf CZF}$ with respect to $\Pi^0_2$ sentences.
\end{corollary}
\begin{proof}
Let $\mathrm{V_{ex}}(A)rphi(x,y)$ be a bounded formula with displayed free variables and suppose that
\[ \Vdashrall x\in\omega\, \exists y\in\omega\, \mathrm{V_{ex}}(A)rphi(x,y)\Vdashotnote{Of course we mean that, e.g., $\Vdashrall z\, (\mathrm{V_{ex}}(A)rtheta_0(z)\rightarrow \Vdashrall x\in z\, \exists y\in z\, \mathrm{V_{ex}}(A)rphi(x,y)))$
is provable, where $\mathrm{V_{ex}}(A)rtheta_0(z)$ is a formula defining $\omega$.} \]
is provable in ${\sf CZF}$ plus ${\sf AC}_{\ft}$.
By the corollary above, we can find a closed application term $\mb e$ such that
\[ {\sf CZF}\vdash \mb e\Vdash \Vdashrall x\in\dot\omega\, \exists y\in\dot\omega\, \mathrm{V_{ex}}(A)rphi(x,y). \]
In particular,
\[ {\sf CZF}\vdash \Vdashrall n\in\omega\, \exists m\in\omega\, (e\bar n)_1\Vdash \mathrm{V_{ex}}(A)rphi(\dot n,\dot m). \]
It is a routine matter (cf.\ also \cite[Chapter 4, Theorem 2.6]{M84}) to show that realizability equals truth for bounded arithmetic formulas, namely,
\[ {\sf CZF}\vdash \Vdashrall n_1,\ldots, n_k\in\omega\, (\psi(n_1,\ldots,n_k)\leftrightarrow \exists a,b\in A\, (a=b\Vdash \psi(\dot n_1,\ldots,\dot n_k)), \]
for $\psi(x_1,\ldots,x_k)$ bounded with all the free variables shown.
We can then conclude
\[ {\sf CZF} \vdash \Vdashrall x\in\omega\, \exists y\in\omega\, \mathrm{V_{ex}}(A)rphi(x,y). \]
\end{proof}
\section{Soundness for ${\sf IZF}$}\label{sec IZF}
The theory ${\sf IZF}$ (Intuitionistic Zermelo-Fraenkel set theory) shares the logic and language of ${\sf CZF}$. Its axioms are
1. \textbf{Extensionality},
2. \textbf{Pairing},
3. \textbf{Union},
4. \textbf{Infinity},
5. \textbf{Set induction},
6. \textbf{Separation}: $\Vdashrall x\, \exists y\, \Vdashrall z\, (z\in y\leftrightarrow z\in x\land \mathrm{V_{ex}}(A)rphi(z))$, for all formulae $\mathrm{V_{ex}}(A)rphi$,
7. \textbf{Collection}: $\Vdashrall u\in x\, \exists v\, \mathrm{V_{ex}}(A)rphi(u,v)\rightarrow \exists y\, \Vdashrall u\in x\, \exists v\in y\, \mathrm{V_{ex}}(A)rphi(u,v)$, for all formulae $\mathrm{V_{ex}}(A)rphi$,
8. \textbf{Powerset}: $\Vdashrall x\, \exists y\, \Vdashrall z\, (\Vdashrall u\in z\, (u\in x)\rightarrow z\in y)$.\\
Thus ${\sf IZF}$ is a strengthening of ${\sf CZF}$ with bounded separation replaced by full separation and subset collection replaced by powerset. Note that powerset implies subset collection and strong collection follows from separation and collection.
Note that in ${\sf IZF}$, due to the presence of powerset, the construction of $\mathrm{V_{ex}}(A)$ can proceed by transfinite recursion along the ordinals (cf.\ \cite{M84}).
\begin{theorem}\label{izf choice sound}
For every theorem $\mathrm{V_{ex}}(A)rphi$ of ${\sf IZF}+{\sf AC}_{\ft}$, there is a closed application term $\mb e$ such that ${\sf IZF}$ proves $\mb e\Vdash \mathrm{V_{ex}}(A)rphi$. In particular, ${\sf IZF}+{\sf AC}_{\ft}$ is consistent relative to ${\sf IZF}$.
\end{theorem}
\begin{proof}
The soundness for theorems of intuitionistic first order logic with equality follows immediately from Theorem \ref{int sound}. As for nonlogical axioms, in view of Corollary \ref{czf choice sound}, it is sufficient to deal with separation and powerset.
The argument for separation is similar to the corresponding argument for bounded separation in the proof of Theorem \ref{czf sound}, employing full separation in the background theory.
It thus remains to address powerset.
Write $z\subseteq x$ for $\Vdashrall u\in z\, (u\in x)$. We look for $\mb e$ such that for all $x\in\mathrm{V_{ex}}(A)$ there is a $y\in\mathrm{V_{ex}}(A)$ such that
\[ \mb e\Vdash z\subseteq x \rightarrow z\in y, \]
for all $z\in\mathrm{V_{ex}}(A)$.
On account of powerset, in ${\sf IZF}$, we can define sets $\mathrm{V_{ex}}(A)_\alpha$, with $\alpha$ ordinal (i.e., a transitive set of transitive sets), such that $\mathrm{V_{ex}}(A)=\bigcup_\alpha\mathrm{V_{ex}}(A)_\alpha$ and $\mathrm{V_{ex}}(A)_\alpha=\bigcup_{\beta\in\alpha}\ps(A\times A\times \mathrm{V_{ex}}(A)_\beta)$. Note that in ${\sf CZF}$ the $\mathrm{V_{ex}}(A)_\alpha$'s are just classes.
Given $x\in\mathrm{V_{ex}}(A)_\alpha$, let
\[ y=\{\pair{a,b,z}\in A\times A\times \mathrm{V_{ex}}(A)_\alpha \mid a=b\Vdash z\subseteq x\}. \]
The set $y$ exists by separation. Set
\[ \mb e:=\lambda a.\mb p a\mb {i_r}. \]
It is easy to check that $y$ and $\mb e$ are as desired, once established that if $z\in \mathrm{V_{ex}}(A)$ and
$a=b\Vdash z\subseteq x$ then $z\in\mathrm{V_{ex}}(A)_\alpha$. This is proved by set induction by showing that for all $u,v\in\mathrm{V_{ex}}(A)$:
\begin{itemize}
\item if $a=b\Vdash u\in v$ and $v\in\mathrm{V_{ex}}(A)_\alpha$, then $u\in\mathrm{V_{ex}}(A)_\beta$ for some $\beta\in\alpha$;
\item if $a=b\Vdash u=v$ and $v\in\mathrm{V_{ex}}(A)_\alpha$, then $u\in \mathrm{V_{ex}}(A)_\alpha$.
\end{itemize}
\end{proof}
As before, we obtain the following.
\begin{corollary}
${\sf IZF}+{\sf AC}_{\ft}$ is conservative over ${\sf IZF}$ with respect to $\Pi^0_2$ sentences.
\end{corollary}
\section{Conclusions}
We defined an extensional notion of realizability that validates ${\sf CZF}$ along with all finite type axiom of choice ${\sf AC}_{\ft}$ provably in ${\sf CZF}$. We have shown that one can replace ${\sf CZF}$ with ${\sf IZF}$. Presumably, this holds true for many other intuitionistic set theories as well.
There is a sizable number of well-known \emph{extra principles} $P$ that can be added to the mix, in the sense that $T+P$ proves $\mb e\Vdash P$, for some closed application term $\mb e$, where $T$ is either ${\sf CZF}$ or ${\sf IZF}$. This applies to arbitrary pca's in the case of large set axioms such as $\sf REA$ (Regular Extension axiom) by adapting \cite[Theorem 6.2]{R06}.
In the case of choice principles, this also applies to arbitrary pca's for Countable Choice, ${\sf DC}$ (Dependent Choice), ${\sf RDC}$ (Relativized Dependent Choice), and ${\sf PAx}$ (Presentation Axiom) by adapting the techniques of \cite{DR19}. Specializing to the case of the first Kleene algebra, one obtains extensional realizability of ${\sf MP}$ (Markov Principle) and forms of ${\sf IP}$ (Independence of Premise) adapting results from \cite[Section 11]{M84}, \cite{M86} , \cite[Section 7]{R06}.
We claim that realizability combined with truth and the appropriate pca modeled on \cite{R05,R08} yields the closure under the choice rule for finite types, i.e.,
\[ \text{If }T\vdash \Vdashrall x^\sigma\, \exists y^\tau\, \mathrm{V_{ex}}(A)rphi(x,y), \text{ then } T\vdash \exists f^{\sigma\tau}\, \Vdashrall x^\sigma\, \mathrm{V_{ex}}(A)rphi(x,f(x)) \]
for large swathes of intuitionistic set theories.
Church's thesis,
\[ \tag{{\sf CT}}\Vdashrall f\colon\omega\to\omega\, \exists e\in\omega\, \Vdashrall x\in\omega\, ( f(x)\simeq \{e\}(x)),\]
where $\{e\}(x)$ is Turing machine application, and the finite type axiom of choice are incompatible in extensional finite type arithmetic \cite{T77} (cf.\ \cite[Chapter 5, Theorem 6.1]{B85}).\Vdashotnote{The elementary recursion-theoretic reason that prevents Church's thesis from being extensionally realizable is the usual one: there is no type $2$ extensional index in Kleene's first algebra, that is, there is no $e\in\omega$ such that, for all $a,b\in\omega$, if $\{a\}(n)=\{b\}(n)$ for every $n\in\omega$, i.e., $a=_1b$, then $\{e\}(a)=\{e\}(b)$.} A fortiori, they are incompatible on the basis of ${\sf CZF}$, and thus of ${\sf IZF}$. However, negative versions of Church's thesis can still obtain in a universe in which ${\sf AC}_{\ft}$ holds. The assertion that no function from $\omega$ to $\omega$ is incomputable is known as weak Church's thesis \cite{T73}:
\[ \tag{{\sf WCT}}\Vdashrall f\colon\omega\to\omega\, \neg\neg \exists e\in\omega\, \Vdashrall x\in\omega\, ( f(x)\simeq \{e\}(x)). \]
Using Kleene's first algebra, one can easily verify that {\sf WCT} is extensionally realizable in ${\sf CZF}$. Therefore, ${\sf CZF}$ augmented with both ${\sf AC}_{\ft}$ and ${\sf WCT}$ is consistent relative to ${\sf CZF}$, and similarly for ${\sf IZF}$.
Continuity principles are a hallmark of Brouwer's intuitionism. They are compatible with finite type arithmetic (see \cite{B85,T73,T77,Oosten97}) and also with set theory (see \cite{B85,M84,R05,R05a}). They are known, though, to invite conflict with ${\sf AC}_{\ft}$ (see \cite[ Theorem 9.6.11]{TvD88}). However, as in the case of {\sf CT}, negative versions of them are likely to be compatible with ${\sf AC}_{\ft}$ on the basis of ${\sf CZF}$ and ${\sf IZF}$. Similar to the case of ${\sf CT}$, one would expect that the assertion that no function from $\mathbb R$ to $\mathbb R$ is discontinuous can go together with ${\sf AC}_{\ft}$. One obvious tool that suggests itself here is extensional generic realizability based on Kleene's second algebra. We shall not venture into this here and add the verification of this claim to the task list.
We conclude with the following remark. It is currently unknown whether one can provide a realizability model for choice principles based on larger type structures. Say that $I$ is a base if for every $I$-indexed family $(X_i)_{i\in I}$ of inhabited sets $X_i$ there exists a function $f\colon I\to \bigcup_{i\in I}X_i$ such that $f(i)\in X_i$ for every $i\in I$. Let $\mathcal C$-${\sf AC}$ say that every set $I$ in the class $\mathcal C$ is a base. The question is whether one can realize $\mathcal C$-${\sf AC}$, where $\mathcal C$ is the smallest $\Pi\Sigma$-closed class, or even the smallest $\Pi\Sigma W$-closed class, without assuming choice in the background theory.
\end{document} |
\begin{document}
\title{Reply to Comment on ``State-independent experimental test of quantum
contextuality in an indivisible system''}
\maketitle
The comment by Amselem et al. \cite{1} misinterprates the logic and
assumption of our experiment \cite{2}. Note that for tests of quantum
contextuality, so far no experiment can be done in a loophole-free
and device-independent manner. We need to make some reasonable assumptions
in experiments to rule out the noncontexual hidden variable models.
What we have assumed in our experiment is about functioning of some
simple linear optical devices: half wave plates (HWP) and polarization
beam splitters (PBS). Basically, we assume that a HWP, set at an angle
$\theta$, transforms the polarization $H,V$ of the incoming light
field by $H\rightarrow cos(2\theta)H+sin(2\theta)V$, $V\rightarrow-sin(2\theta)H+cos(2\theta)V$
and a PBS transmits the light component in $H$-polarizaiton and reflects
its component in $V$-polarization \cite{2}. This knowledge does
not require assumption of fomalism of quantum mechanics and can be
regarded as basic experimental facts/laws about these well-calibrated
linear optical devices. The linear transfomormation of these optical
modes is apparently independent of the intensity of the incoming light
and holds in classical optics as well as in quantum case.
A schematic setup of our experiment is shown in Fig. 1. The mode tranformer
composed of the PBS and the HWPs link the modes $A_{i},A_{j},A_{k}$ right before
the light detectors with the modes $0,1,2$, which are prepared in
the same state for different experimental trials. The light detector
behaves like a black box, which gives binary measurement outcomes
(click or no-click) for the incoming field/mode. We assume the detectors
are identical and exchangable as it is the case in experiments. For
test of contextuality, we just need to make sure that the observable
$A_{i}$ before the detector $D_{i}$, expressed in term of the modes
$0,1,2$, remains the same when we change the observable $A_{j}$
to $A_{j'}$ before the other detector for measurement of the correlations
\cite{3}. With knowledge of functioning of the HWPs and the PBS in the
mode transformer, one can easily check that this is the case in our
experiment when we tune the angles of the HWPs. For some trials of
the experiment, we swap the labeling of the modes $2$ and $0$(1).
Again, with knowledge of functioning of the HWPs and PBS, we are still
measuring the same observable, which, expressed in term of the relabeled
modes $0,1,2$,, is under the same system state.
\begin{figure}
\caption{Illustration of the schematic experimental setup. }
\end{figure}
Note that the functioning of these linear optical devices are also
assumed in previous experiments on test of quantum contextuality.
For instance, in Ref. \cite{4}, the real experimental setup is shown
in Fig. 3 there. To reduce the real setup to the schematic setup shown
in Fig. 1 there for test of quantum contextuality, one has to assume
that the PBS and the HWPs set at the right angles transform the optical
modes as they are supposed to function. So this assumption is not
particular to our experiment at all.
This work was supported by the National Basic
Research Program of China (973 Program) 2011CBA00300 (2011CBA00302) and the
NSFC Grant 61033001.
C. Zu$^{1}$, Y.-X. Wang$^{1}$, D.-L. Deng$^{1,2}$, X.-Y. Chang$^{1}$,
K. Liu$^{1}$, P.-Y. Hou$^{1}$, H.-X. Yang$^{1}$, L.-M. Duan$^{1,2}$
$^{1}$Center for Quantum Information, IIIS, Tsinghua University,
Beijing, China;$^{2}$Department of Physics, University of Michigan,
Ann Arbor, Michigan 48109, USA
\end{document} |
\begin{document}
\title[Bohr neighborhoods]{Bohr neighborhoods in three-fold difference sets}
\author{John T. Griesmer}
\address{Department of Applied Mathematics and Statistics\\ Colorado School of Mines, Golden, Colorado}
\email{[email protected]}
\begin{abstract}
Answering a question of Hegyv{\'a}ri and Ruzsa, we show that if $A$ is a set of integers having positive upper Banach density, then the set $A+A-A:=\{a+b-c: a, b, c\in A\}$ contains Bohr neighborhoods of many elements of $A$, where the radius and dimension of the Bohr neighborhood depend only on $d^{*}(A)$.
\end{abstract}
\maketitle
\section{Introduction}\label{secIntroduction} For a real number $t$, $\|t\|$ denotes distance to the nearest integer. If $k\in \mathbb N$, $\eta>0$, and $s_{1},\dots, s_{k}$ are real numbers, then a \emph{Bohr-$(k,\eta)$ set} is a set of integers of the form $\{n: \max_{i\in \{1,\dots,k\}} \|s_{i}n\|<\eta\}$. A Bohr-$(k,\eta)$ neighborhood of $n$ is a set of the form $n+U$, where $U$ is a Bohr-$(k,\eta)$ set. The parameters $\eta$ and $k$ are called the \emph{radius} and \emph{dimension} of $U$, respectively.
Let $d^{*}(A)$ denote the upper Banach density of a set $A\subseteq \mathbb Z$. Theorem 2.2 of \cite{HegyvariRuzsa} says that if $d^{*}(A)>0$, then $A+A-A$ is a Bohr neighborhood of many $a\in A$. The proof therein does not specify parameters $k, \eta$ for the Bohr neighborhood in terms of $d^{*}(A)$, and Section 3 of \cite{HegyvariRuzsa} asks for a proof which makes those parameters effective. Our main result is the following theorem, which provides the requested effective bounds.
\begin{theorem}\label{thmMainSpecial}
\begin{enumerate}
\item[1.] Let $A\subseteq \mathbb Z$ have $d^{*}(A)>0$. There are constants $k\in \mathbb N$, $\eta>0$, depending only on $d^{*}(A)$, such that $A+A-A-a$ contains a Bohr-$(k,\eta)$ set for some $a\in A$.
\item[2.] For all $\varepsilon>0$, there are constants $k\in \mathbb N, \eta>0$, depending only on $d^{*}(A)$ and $\varepsilon$, such that there is a set $A'\subseteq A$ satisfying $d^{*}(A\setminus A')<\varepsilon$, and $A+A-A-a'$ contains a Bohr-$(k,\eta)$ set for all $a'\in A'$.
\end{enumerate}
\end{theorem}
The estimate $d^{*}(A\setminus A')<\varepsilon$ cannot be improved to $d^{*}(A\setminus A')=0$, but we omit examples to this effect, as the constructions are tedious.
Our proof of Theorem \ref{thmMainSpecial} generalizes without modification to the setting of countable abelian groups, so we work in that context. The next section introduces some terminology and notation for countable abelian groups, and states Theorem \ref{thmMain}, the natural generalization of Theorem \ref{thmMainSpecial} to that setting. The proof of Theorem \ref{thmMain} is supplied by Proposition \ref{propComplete}, which reduces the study of $A+A-A$ to an analogous problem for compact abelian groups, solved in Section \ref{secCompactAbelian}. The proof of Proposition \ref{propComplete} is carried out in Section \ref{secErgodic} using ergodic theoretic methods similar to those of \cite{GrIsr,GrAdv}.
It would be interesting to find a shorter, more elementary proof of Theorem \ref{thmMainSpecial}.
\section{Countable abelian groups}\label{secCountableAbelian}
Let $\Gamma$ be a countable abelian group. If $A, B\subseteq \Gamma$, $\gamma\in \Gamma$, write $A+B$ for $\{a+b:a\in A, b\in B\}$, $A-B$ for $\{a-b: a\in A, b\in B\}$, and $\gamma+A$ for $\{\gamma+a: a\in A\}$.
\subsection{Upper Banach density} A F{\o}lner sequence for $\Gamma$ is a sequence $(\Phi_{n})_{n\in \mathbb N}$ of finite subsets of $\Gamma$ such that $\lim_{n\to \infty} \frac{|\Phi_{n}\triangle (\Phi_{n}+\gamma)|}{|\Phi_{n}|}=0$ for every $\gamma\in \Gamma$. It is well known that every countable abelian group admits a F{\o}lner sequence. If $\mathbf \Phi = (\Phi_{n})_{n\in \mathbb N}$ is a F{\o}lner sequence and $A\subseteq \Gamma$, let $\overline{d}_{\mathbf \Phi}(A):= \limsup_{n\to \infty} \frac{|A\cap \Phi_{n}|}{|\Phi_{n}|}$ be the \emph{upper density of $A$ with respect to $\mathbf \Phi$}. Write $d_{\mathbf \Phi}(A)$ for $\overline{d}_{\mathbf \Phi}(A)$ if the limit exists. For $A\subseteq \Gamma$, the \emph{upper Banach density of $A$} is $d^{*}(A):=\sup\{d_{\mathbf \Phi}(A): \mathbf \Phi \text{ is a F{\o}lner sequence}\}$.
\subsection{Bohr neighborhoods} Let $\mathbb T$ denote the group $\mathbb R/\mathbb Z$. For $t\in \mathbb R$, recall that $\|t\|$ is the distance from $t$ to the nearest integer. For $x\in \mathbb T$, $\|x\|$ is defined to be $\|\tilde{x}\|$, where $\tilde{x}\in \mathbb R$ satisfies $\tilde{x}+\mathbb Z = x$.
If $S$ is a finite set of homomorphisms $\rho: \Gamma\to \mathbb T$, $|S|=k$, and $\eta>0$, then $\{\gamma \in \Gamma: \|\rho(\gamma)\|<\eta \text{ for all } \rho \in S\}$ is a called a \emph{Bohr-$(k,\eta)$ set}. A \emph{Bohr-$(k,\eta)$ neighborhood} of $a\in \Gamma$ is a set of the form $a+U$, where $U$ is a Bohr-$(k,\eta)$ set. These definitions of ``Bohr neighborhood" and ``Bohr set" agree with the definitions in Section \ref{secIntroduction}, as the maps $n\mapsto s_{i}n+\mathbb Z$ are homomorphisms from $\mathbb Z$ to $\mathbb T$.
Let $\mathcal S^{1}$ denote the group of complex numbers of modulus $1$, with the group operation of multiplication. The groups $\mathbb T$ and $\mathcal S^{1}$ are isomorphic, via the isomorphism $e_{1}:\mathbb T\to \mathcal S_{1}$, $e_{1}(t):=\exp(2\pi i t)$. This leads to the following observation.
\begin{observation}\label{obsBohrEquiv}
If $S$ is a finite set of homomorphisms $\rho: \Gamma\to \mathcal S^{1}$, $|S|=k$, and $\eta>0$, then the set $\{\gamma \in \Gamma: |\rho(\gamma)-1|<\eta \text{ for all } \rho\in S\}$ contains a Bohr-$(k,\eta/(2\pi))$ set.
\end{observation}
\begin{theorem}\label{thmMain}
Let $\varepsilon, \delta>0$, and let $\Gamma$ be a countable abelian group.
\begin{enumerate}
\item[1.] There are constants $k\in \mathbb N$, $\eta>0$, depending only on $\delta$, such that if $A\subseteq \Gamma$, has $d^{*}(A)\geq \delta$, then $A+A-A-a$ contains a Bohr-$(k,\eta)$ set for some $a\in A$.
\item[2.] There are constants $k'\in \mathbb N, \eta'>0$, depending only on $\delta$ and $\varepsilon$, such that if $A\subseteq \Gamma$ has $d^{*}(A)\geq \delta$, there is a set $A'\subseteq A$ satisfying $d^{*}(A\setminus A')<\varepsilon$, and $A+A-A-a'$ contains a Bohr-$(k',\eta')$ set for all $a'\in A'$.
\end{enumerate}
The constants $k,k',\eta, \eta'$ do not depend on $\Gamma$.
\end{theorem}
Part 2 follows immediately from Part 1, while Part 1 will be derived from Proposition \ref{propComplete} and proved in Section \ref{secReduction}.
\begin{proof}[Proof of Part 2.]
Assume Part 1 holds, and assume, to get a contradiction, that Part 2 fails. Given a set $A\subseteq \Gamma$, $k\in \mathbb N$, $\eta>0$, let
\[E(A,k,\eta):=\{a\in A: A+A-A-a \text{ does not contain a Bohr-$(k,\eta)$ set}\}.\]
Then there are $\delta>0$, $\varepsilon>0$ such that for all $k\in \mathbb N$, $\eta>0$, there exists $A$ having $d^{*}(A)>\delta$, while $d^{*}(E(A,k,\eta))>\varepsilon$. Setting $E:=E(A,k,\eta)$, we have an $\varepsilon>0$ such that for all $k\in \mathbb N, \eta>0$, there is a set $E$ satisfying $d^{*}(E)>\varepsilon$ and for all $e\in E$, $E+E-E-e$ does not contain a Bohr-$(k,\eta)$ set. This contradicts Part 1.
\end{proof}
\section{Compact abelian groups}\label{secCompactAbelian}
\subsection{Bohr neighborhoods in topological abelian groups}
\begin{definition} If $Z$ is a topological abelian group, $S$ is a set of \emph{continuous} homomorphisms $\rho: Z\to \mathbb T$, $|S|=k$, and $\eta>0$, then $\{z: \|\rho(z)\|<\eta \text{ for all } \rho \in S\}$ is a \emph{Bohr-$(k,\eta)$ set}.
A Bohr-$(k,\eta)$ neighborhood of $z\in Z$ is a set of the form $z+U$, where $U$ is a Bohr-$(k,\eta)$ set.
\end{definition}
If $Z$ is a topological abelian group, $\widehat{Z}$ denotes the character group of $Z$, meaning the group of continuous homomorphisms $\chi: Z\to \mathcal S^{1}$ with the group operation of pointwise multiplication.
\begin{observation}\label{obsBohrEquiv2} Following Observation \ref{obsBohrEquiv}, we see that if $S\subseteq \widehat{Z}$, $|S|=k$, and $\eta>0$, then the set $\{z: |\chi(z)-1|< \eta \text{ for all } \chi \in S\}$ contains a Bohr-$(k,\eta/(2\pi))$ set.
\end{observation}
\begin{lemma}\label{lemCompactToCountable}
Let $\Gamma$ be a countable abelian group and $Z$ a topological abelian group. If $\rho:\Gamma \to Z$ is a homomorphism and $U\subseteq Z$ contains a Bohr-$(k,\eta)$ set, then $\rho^{-1}(U)$ contains a Bohr-$(k,\eta)$ set in $\Gamma$.
\end{lemma}
\begin{proof}
Let $S$ be a set of $k$ continuous homomorphisms $\psi:Z\to \mathbb T$ such that $U$ contains $\{z\in Z: \|\psi(z)\|< \eta \text{ for all } \psi\in S\}$. Then $\rho^{-1}(U)$ contains $\{\gamma\in \Gamma: \|\psi \circ \rho(\gamma)\|<\eta \text{ for all } \psi \in S\}$, which is a Bohr-$(k,\eta)$ set.
\end{proof}
\begin{lemma}\label{lemBohrPoly}
Let $Z$ be a topological abelian group and $S$ a finite subset of $\widehat{Z}$. If $p:Z\to \mathbb C$, $p:=\sum_{\chi \in S} c_{\chi}\chi$, where $|c_{\chi}|\leq 1$ for all $\chi$, and $a\in Z$ satisfies $\Re p(a)\geq c$, then $\{x\in Z: \Re p(x)>0\}$ contains a Bohr-$(k,\eta)$ neighborhood of $a$, where $k$ and $\eta$ depend only on $|S|$ and $c$. In fact we can use $k=|S|$ and $\eta=\frac{c}{2\pi|S|}$.
\end{lemma}
\begin{proof}
Let $U:=\{x: |\chi(x)-1|< \frac{c}{|S|} \text{ for all } \chi \in S\}$, so that $U$ is a Bohr-$(|S|, c/(2\pi|S|))$ set, by Observation \ref{obsBohrEquiv2}. Then for $x\in U$,
\begin{align*}
|p(a+x)-p(a)| &\leq \sum_{\chi \in S} |\chi(a+x)-\chi(a)| \\
&= \sum_{\chi\in S} |\chi(a)(\chi(x)-1)| \\
&= \sum_{\chi \in S} |\chi(x)-1| \\
&<c,
\end{align*}
so $\Re p(a+x)>0$ for all $x\in U$, and $a+U \subseteq \{x: \Re p(x)>0\}$.
\end{proof}
The following lemma may be proved similarly.
\begin{lemma}\label{lemBohrIsOpen}
Suppose $U$ is a Bohr-$(k,\eta)$ neighborhood of $a\in Z$. Then there is a neighborhood $V$ of $a$ (in the topology of $Z$) such that $U$ is a Bohr-$(k,\eta/2)$ neighborhood of $z$ for all $z\in V$.
\end{lemma}
\subsection{Fourier identities}\label{secFourier} We summarize some of the basic facts and definitions from harmonic analysis on compact abelian groups, available in standard references such as \cite{Rudin}.
Let $Z$ be a compact abelian group with Haar measure $m$, normalized so that $m(Z)=1$. For $\chi\in \widehat{Z}$ and $f\in L^{2}(m)$, $\hat{f}(\chi):= \int f\cdot \overline{\chi} \,dm$, and the function $\hat{f}:\widehat Z\to \mathbb C$ is the \emph{Fourier transform} of $f$.
For $f, g\in L^{1}(m)$, define the convolution $f*g$ by
\[
f*g(z):=\int f(z-t)g(t) \,dm(t).
\]
Let $f, g\in L^{2}(m)$. Then
\begin{align}
\label{eqnPlancherel} \int f\cdot \bar{g} \,dm &= \sum_{\chi \in \widehat Z} \hat{f}(\chi) \cdot \overline{\hat g(\chi)}\\
\label{eqnIsometry} \int |f|^{2} \,dm &= \sum_{\chi \in \widehat{Z}} |\hat{f}(\chi)|^{2}\\
\label{eqnConvolution} \widehat{f*g}&=\hat{f}\cdot \hat{g}.
\end{align}
Let $g_{-}$ be the function defined by $g_{-}(z)=g(-z)$. Then
\begin{equation}\label{eqnMinus}
\widehat{g_{-}}=\overline{\hat{g}}.
\end{equation}
Convolution is associative, so the three-fold convolution $h:=f*g*g_{-}$ is well-defined. Furthermore, when $f, g\in L^{2}(m)$, $h$ is continuous and its Fourier series $\sum_{\chi \in \widehat{Z}} \hat{h}(\chi)\cdot \chi$ converges uniformly to $h$.
\begin{lemma}\label{lemConvolution}
Let $\delta>0$. There exist $k\in \mathbb N$, $\eta>0$, depending only on $\delta$, such that if $Z$ is a compact abelian group with Haar measure $m$, and $f, g: Z\to [0,1]$ are measurable functions having $\int f \,dm, \int g \,dm \geq \delta$, then $\{z\in Z: f*g*g_{-}(z)>0\}$ contains a Bohr-$(k,\eta)$ neighborhood of some $a\in Z$ having $f(a)>0$.
\end{lemma}
\begin{proof}
Without loss of generality, we assume $\int f \,dm= \int g \,dm$, so let $f, g: Z\to [0,1]$ have $\int f \,dm = \int g \,dm = \delta$.
Let
$h:= f*g*g_{-}$. Then $h: Z\to [0,1]$, $h$ is continuous, and
$\hat{h}=\hat{f}\cdot |\hat{g}|^{2}$, by Equations (\ref{eqnConvolution}) and (\ref{eqnMinus}). Consequently $\hat{h}\in l^{1}(\widehat{Z})$.
Let $S_{1}:=\{\chi\in \widehat{Z}: |\hat{f}(\chi)|\geq \frac{1}{4}\delta^{3}\}$, $S_{2}:=\widehat{Z}\setminus S_{1}$. We claim that
\begin{equation}\label{eqnS1Bound}
|S_{1}| \leq 16\delta^{-5}
\end{equation}
and
\begin{equation}\label{eqnhBound}
h(a) \geq \delta^{4} \text{ for some } a \text{ having } f(a)>0.
\end{equation}
We postpone the proofs of inequalities (\ref{eqnS1Bound}) and (\ref{eqnhBound}) and now prove the conclusion of the lemma. Write the Fourier series of $h$ as $h(x) = p(x)+r(x)$, where
\begin{align*}
p(x):= \sum_{\chi \in S_{1}} \hat{h}(\chi)\chi(x),\ \ r(x):= \sum_{\chi \in S_{2}} \hat{h}(\chi)\chi(x).
\end{align*}
Both series converge uniformly, since $\hat{h}\in l^{1}(\widehat{Z})$.
Estimating $r(x)$, we get
\begin{align*}
\Bigl| \sum_{\chi \in S_{2}} \hat{h}(\chi)\chi(x)\Bigr| & \leq \sum_{\chi \in S_{2}} |\hat{f}(\chi)||\hat{g}(\chi)|^{2}\\
&\leq \tfrac{1}{4}\delta^{3}\sum_{\chi\in S_{2}} |\hat{g}(\chi)|^{2} && \text{by definition of $S_{2}$}\\
&\leq \tfrac{1}{4}\delta^{3}\sum_{\chi\in \widehat{Z}} |\hat{g}(\chi)|^{2}\\
&= \tfrac{1}{4} \delta^{3} \int |g|^{2} \,dm && \text{by (\ref{eqnIsometry})}\\
&\leq \tfrac{1}{4} \delta^{3} \int |g| \,dm && \text{since } 0\leq g \leq 1\\
&\leq \tfrac{1}{4}\delta^{3}\cdot \delta,
\end{align*} so
\begin{equation}\label{eqnRemainderEstimate}
|r(x)|\leq \tfrac{1}{4}\delta^{4} \text{ for all } x.
\end{equation} It follows that $h(x)$ is positive whenever $\Re p(x)> \frac{1}{4}\delta^{4}$, meaning the real part of $q(x):= p(x)-\frac{1}{4}\delta^{4}$ is positive. Choose an $a_{0}$ so that $h(a_{0})\geq \delta^{4}$ and $f(a_{0})>0$. Inequality (\ref{eqnRemainderEstimate}) implies $\Re p(a_{0})\geq \frac{3}{4}\delta^{4}$, so $\Re q(a_{0})\geq \frac{1}{2}\delta^{4}$. By Lemma \ref{lemBohrPoly}, the set $\{x:\Re q(x)>0\}$ contains a Bohr-$(|S_{1}|, c/(2\pi|S_{1}|))$ neighborhood around $a_{0}$, where $c = \Re q(a_{0}) \geq \frac{1}{2}\delta^{4}$. This Bohr neighborhood is contained in $\{x: h(x)>0\}$, so we have proved the lemma.
It remains to prove inequalities (\ref{eqnS1Bound}) and (\ref{eqnhBound}). To prove Inequality (\ref{eqnS1Bound}), consider
\begin{align*}
|S_{1}|\cdot \tfrac{1}{16}\delta^{6}&\leq \sum_{\chi \in S_{1}} |\hat{f}(\chi)|^{2}\\
& \leq \sum_{\chi \in \widehat {Z}} |\hat{f}(\chi)|^{2} \\
&= \int |f|^{2} \,dm && \text{by (\ref{eqnIsometry})} \\
& \leq \int |f| \,dm && \text{since $0\leq f \leq 1$}\\
& = \delta,
\end{align*} so $|S_{1}|\leq 16\delta^{-5}$.
To prove Inequality (\ref{eqnhBound}), consider
\begin{align*}
\int h\cdot f \,dm
&= \int h\cdot \bar{f} \,dm\\
&= \sum_{\chi \in \widehat{Z}} \hat{h}(\chi)\overline{\hat{f}(\chi)} && \text{by (\ref{eqnPlancherel})}\\
&= \sum_{\chi \in \widehat{Z}} |\hat{f}(\chi)|^{2}|\hat{g}(\chi)|^{2}\\
&= \sum_{\chi \in \widehat{Z}} |\widehat{f*g}(\chi)|^{2}&& \text{by (\ref{eqnConvolution})}\\
&= \int |f*g|^{2} \,dm && \text{by (\ref{eqnIsometry})}\\
&\geq \bigl|\int f*g \,dm\bigr|^{2} && \text{by convexity of $t\mapsto t^{2}$}\\
& = \delta^{4},
\end{align*}
so that $\int h\cdot f \,dm\geq \delta^{4}$. Let $A:=\{a\in Z: f(a)>0\}$. Observe that $\sup_{a\in A} h(a) \geq \int h\cdot f \,dm$, and equality holds only if $h$ is constant, so we conclude that $h(a)\geq \delta^{4}$ for some $a\in A$. \end{proof}
\section{From countable to compact}\label{secReduction}
\begin{proposition}\label{propComplete}
Let $\Gamma$ be a countable abelian group and let $A, B\subseteq \Gamma$ have $d^{*}(A)>0$, $d^{*}(B)>0$. Then there are:
\begin{enumerate}
\item[$\bullet$] a compact abelian group $Z$ with normalized Haar measure $m$,
\item[$\bullet$] a homomorphism $\rho:\Gamma\to Z$ such that $\rho(\Gamma)$ is dense in $Z$,
\item[$\bullet$] Borel functions $f, g: Z\to [0,1]$ satisfying $\int f \,dm=d^{*}(A)$, $\int g \,dm=d^{*}(B)$,
\end{enumerate}
such that
\begin{enumerate}
\item[$\bullet$] $f$ is supported on $\overline{\rho(A)}$,
\item[$\bullet$] $\{\gamma\in \Gamma: f*g*g_{-}(\rho(\gamma))>0\}\subseteq A+B-B$,
\end{enumerate}
where $\overline{\rho(A)}$ denotes the topological closure of $\rho(A)$ in $Z$.
\end{proposition}
We now prove Theorem \ref{thmMain} as a consequence of Proposition \ref{propComplete} and Lemma \ref{lemConvolution}.
\begin{proof}[Proof of Theorem \ref{thmMain}.] Let $A, B \subseteq \Gamma$ have $d^{*}(A), d^{*}(B)>0$.
Set $\delta := \min(d^{*}(A),d^{*}(B))$, and let $Z, \rho, f,$ and $g$ be as in Proposition \ref{propComplete}. Let
\begin{align*}
h&:=f*g*g_{-}\\
\tilde{A}&:=\{z\in Z: f(z)>0\}\\
U&:=\{z\in Z: h(z)>0\},
\end{align*} so that $\tilde{A}\subseteq \overline{\rho(A)}$ and $\rho^{-1}(U)\subseteq A+B-B$. By Lemma \ref{lemConvolution}, there is an $\tilde{a}\in \tilde{A}$ such that $U-\tilde{a}$ contains a Bohr-$(k,\eta)$ set, where $k, \eta$ depend only on $\delta$. Lemma \ref{lemBohrIsOpen} provides a neighborhood $V$ of $\tilde{a}$ such that $U-z$ contains a Bohr-$(k,\eta/2)$ set for all $z\in V$. Since $\tilde{a}\in \overline{\rho(A)}$, there is an $a\in A$ such that $\rho(a)\in V$. For such $a$, Lemma \ref{lemCompactToCountable} implies $\rho^{-1}(U)-a$ is a Bohr-$(k,\eta/2)$ set contained in $A+B-B-a$.
\end{proof}
\section{Correspondence principle and proof of Proposition \ref{propComplete}}\label{secErgodic}
In this section we fix a countable abelian group $\Gamma$. We will exploit the theory measure preserving actions of $\Gamma$, see \cite{EinsiedlerWard}, \cite{Furstenberg}, or \cite{Glasner} for general references, and \cite{GrIsr} or \cite{GrAdv} for similar applications.
\subsection{Measure preserving systems}
A \emph{measure preserving $\Gamma$-system} (or briefly, \emph{$\Gamma$-system}) is a quadruple $(X,\mathscr X,\mu,T)$, where $(X,\mathscr X,\mu)$ is a probability measure space and $T$ is an action of $\Gamma$ on $X$ preserving $\mu$:
\begin{equation}\label{eqnPreserveMeasure}
\mu(T^{-\gamma} D)=\mu(D)
\end{equation} for all measurable $D\subseteq X$ and all $\gamma\in \Gamma$. Note that Equation (\ref{eqnPreserveMeasure}) yields the identities
\begin{align}
\mu(C\cap T^{-\gamma}D) &= \mu(T^{a}(C\cap T^{-\gamma}D))=\mu(T^{a}C \cap T^{a-\gamma}D),\\
\label{eqnL2Preserved} \int f\cdot g\circ T^{\gamma} \,d\mu &= \int f\circ T^{-\gamma}\cdot g \,d\mu,
\end{align}
for all measurable $C, D\subseteq X$, all $f, g\in L^{2}(\mu)$, and all $a, \gamma\in \Gamma$.
A $\Gamma$-system is \emph{ergodic} if for every $D\subseteq X$ satisfying $\mu(D \triangle T^{\gamma}D)=0$ for all $\gamma\in \Gamma$, we have $\mu(D)=0$ or $\mu(D)=1$.
A \emph{factor} of a $\Gamma$-system $(X,\mathscr X,\mu,T)$ is a $\Gamma$-system $(Y,\mathscr Y,\nu,S)$ together with a \emph{factor map} $\pi:X\to Y$, defined for $\mu$-a.e. $x\in X$, such that
\begin{equation}\label{eqnEquivariant}
\pi(T^{\gamma}x)=S^{\gamma}\pi(x) \ \text{ for $\mu$-a.e.\ $x\in X$ and all $\gamma\in \Gamma$}.
\end{equation}
The space $L^{2}(\nu)$ may be identified with the subspace of $L^{2}(\mu)$ consisting of functions of the form $g\circ \pi$, where $g\in L^{2}(\nu)$. The $\sigma$-algebra $\pi^{-1}(\mathscr Y)$ consists of those sets which are $\mu$-a.e.\ equal to a set of the form $\pi^{-1}(C)$, where $C\subseteq Y$. Note that $L^{2}(\nu)$ may be identified with those elements of $L^{2}(\mu)$ which are $\pi^{-1}(\mathscr Y)$-measurable. Let $P_{\mathscr Y}:L^{2}(\mu)\to L^{2}(\mu)$ denote the orthogonal projection onto the space of $\pi^{-1}(\mathscr Y)$-measurable functions.
If $g\in L^{2}(\mu)$, let $\tilde{g}$ be the element of $L^{2}(\nu)$ satisfying $P_{\mathscr Y}g = \tilde{g}\circ \pi$. Note that if $f$ is $\pi^{-1}(\mathscr Y)$-measurable, we have
\begin{equation}\label{eqnFactorIntegrate}
\int f\cdot g\circ T^{\gamma} \,d\mu = \int \tilde{f}\cdot \tilde{g}\circ S^{\gamma} \,d\nu \ \ \text{ for all } \gamma\in \Gamma,
\end{equation}
where $\tilde{f}\in L^{2}(\nu)$ satisfies $f= \tilde{f}\circ \pi$.
\subsection{Group rotations}
A \emph{group rotation} is a $\Gamma$-system $(Z,\mathcal Z, m,R_{\rho})$, where $Z$ is a compact abelian group with normalized Haar measure $m$, $\rho: \Gamma\to Z$ is a homomorphism, and the action $R_{\rho}$ is given by $R_{\rho}^{\gamma}(z) = z+\rho(\gamma)$. The group rotation $(Z,\mathcal Z, m,R_{\rho})$ is ergodic iff $\rho(\Gamma)$ is dense in $Z$.
The \emph{Kronecker factor} of a $\Gamma$-system $\mathbf X$ is the maximal factor $\mathbf Y$ of $\mathbf X$ such that $\mathbf Y$ is isomorphic to a group rotation. When $\mathbf X$ is ergodic, such a factor always exists and is ergodic (although it may be trivial). See \cite{EinsiedlerWard}, \cite{Furstenberg}, or \cite{Glasner} for the existence of the Kronecker factor and its properties.
\subsection{A correspondence principle}
The following lemma is standard, but we outline the proof for completeness.
\begin{lemma}\label{lemCorrespondence1}
If $B\subseteq \Gamma$ has $d^{*}(B)>0$, there is an ergodic $\Gamma$-system $(X,\mathscr X,\mu,T)$ and a $D\subseteq X$ having $\mu(D)\geq d^{*}(B)$ such that $B-B$ contains $R(D):=\{\gamma: \mu(D\cap T^{\gamma}D)>0\}$.
\end{lemma}
\begin{proof}
Let $\Omega = \{0,1\}^{\Gamma}$ with the product topology, so that $\Omega$ is a compact metrizable space. Let $T$ be the action of $\Gamma$ on $\Omega$ defined by $(T^{\beta}\omega)(\gamma):=\omega(\gamma+\beta)$. Let $\mathbf \Phi=(\Phi_{n})_{n\in \mathbb N}$ be a F{\o}lner sequence for $\Gamma$ such that $d_{\mathbf \Phi}(B)=d^{*}(B)$. Consider $x:=1_{B}\in\Omega$, and let $X$ be the orbit closure of $x$ under $T$, meaning $X$ is the closure of the set $\{T^{\gamma}x: \gamma\in \Gamma\}$. Let $D$ be the set $\{\omega\in X: \omega(0)=1\}$, so that $D$ is a clopen subset of $X$. Note that $B=\{\gamma: T^{\gamma}x\in D\}$.
We will find a $T$-invariant measure $\mu$ on $X$ such that $\mu(D)=d^{*}(B)$. Let $\delta_{x}$ be the Dirac mass concentrated at $x$, and for each $n$, let $\nu_{n}:= \frac{1}{|\Phi_{n}|}\sum_{\gamma\in \Phi_{n}}\delta_{T^{\gamma}x}$. Let $\nu$ be a $\text{weak}^{*}$ limit of the $\nu_{n}$. Then $\nu$ is a $T$-invariant probability measure, while
\[\nu(D) = \lim_{n\to \infty} \frac{1}{|\Phi_{n}|}|\{\gamma\in \Phi_{n}: T^{\gamma}x\in D\}| = \lim_{n\to \infty} \frac{|B\cap \Phi_{n}|}{|\Phi_{n}|} = d^{*}(B).\]
Applying ergodic decomposition (\cite{EinsiedlerWard}, Theorem 8.20), we may find an ergodic $T$-invariant measure $\mu$ on $X$ such that $\mu(D)\geq \nu(D)$.
We now show that $\mu(D\cap T^{\gamma}D)>0$ implies $\gamma\in B-B$. In fact, if $D\cap T^{\gamma}D\neq \varnothing$, then there exists $y\in D$ such that $T^{\gamma}y\in D$. Since $y$ is a limit of points of the form $T^{\beta}x$ and $D$ is open, there exist $\beta\in \Gamma$ such that $T^{\beta}x\in D$ and $T^{\gamma}T^{\beta}x\in D$, which implies $\beta\in B$ and $\gamma+\beta\in B$. We then have $\gamma\in B-B$.
\end{proof}
If $D\subseteq X$ and $A\subseteq \Gamma$, let $A\cdot D:= \bigcup_{a\in A}T^{a}D$.
\begin{lemma}\label{lemCorrespondence2} If $\mathbf X$ is the $\Gamma$-system obtained in Lemma \ref{lemCorrespondence1} and $A\subseteq \Gamma$, then $A+B-B$ contains $\{\gamma\in \Gamma: \mu((A\cdot D)\cap T^{\gamma}D)>0\}$.
\end{lemma}
\begin{proof}
Note that $\mu(T^{a}D\cap T^{\gamma}D)>0$ iff $\mu(D\cap T^{\gamma-a}D)>0$, which implies $\gamma-a \in B-B$, meaning $\gamma\in a+B-B$. It follows that $\mu((A\cdot D) \cap T^{\gamma}D)>0$ implies $\gamma\in A+B-B$.
\end{proof}
\subsection{Proof of Proposition \ref{propComplete}}
From now on we fix:
\begin{enumerate}
\item[$\bullet$] $A$, $B\subseteq \Gamma$ having $d^{*}(A)>0$, $d^{*}(B)>0$,
\item[$\bullet$] a F{\o}lner sequence $\mathbf \Phi$ satisfying $d_{\mathbf \Phi}(A)=d^{*}(A)$,
\item[$\bullet$] an ergodic $\Gamma$-system $\mathbf X= (X,\mathscr X,\mu,T)$ and a set $D\subseteq X$ satisfying the conclusion of Lemma \ref{lemCorrespondence2},
\item[$\bullet$] the Kronecker factor $\mathbf Z=(Z,\mathcal Z, m,R_{\rho})$ of $(X,\mathscr X,\mu,T)$, with factor map $\pi:X\to Z$. Note that $\rho:\Gamma\to Z$ is a homomorphism with $\rho(\Gamma)$ dense in $Z$.
\item[$\bullet$] a function $g:Z\to [0,1]$ satisfying $g\circ \pi = P_{\mathcal Z}1_{D}$ $\mu$-almost everywhere.
\end{enumerate}
Here $P_{\mathcal Z}:L^{2}(\mu)\to L^{2}(\mu)$ is the orthogonal projection onto the space of $\pi^{-1}(\mathcal Z)$-measurable functions. As a special case of Equation (\ref{eqnEquivariant}), we get
\begin{align}\label{eqnKroneckerFactor}
g(\pi(x)+\rho(\gamma))=P_{\mathcal Z}1_{D}(T^{\gamma}x) \text{ for $\mu$-a.e. } x \text{ and all } \gamma\in \Gamma.
\end{align}
Furthermore, if $F$ is $\pi^{-1}(\mathcal Z)$-measurable, meaning $F=\tilde{F}\circ \pi$ for some $\tilde{F}\in L^{2}(m)$, then Equation (\ref{eqnFactorIntegrate}) implies
\begin{equation}\label{eqnKroneckerIntegrate}
\int F\cdot 1_{D}\circ T^{-\gamma} \,d\mu = \int \tilde{F} \cdot g\circ R_{\rho}^{-\gamma}\, dm = \int \tilde{F}(z) \cdot g(z-\rho(\gamma)) \,dm(z)
\end{equation}
for all $\gamma\in \Gamma$.
The lemmas and proofs in the remainder of this section will refer to the objects defined above. Our goal, in light of Lemma \ref{lemCorrespondence2}, is to describe those $\gamma$ for which $\mu((A\cdot D)\cap T^{-\gamma}D)>0$. Our approach is similar to the proofs of Proposition 3.2 of \cite{GrIsr} and Proposition 4.2 of \cite{GrAdv}.
In order to understand $A\cdot D$, we consider averages of functions supported on $A\cdot D$. We have fixed a F{\o}lner sequence $\mathbf \Phi=(\Phi_{n})_{n\in \mathbb N}$ such that $d_{\mathbf \Phi}(A)=d^{*}(A)$. Consider the sets $A_{n}:=A\cap \Phi_{n}$. Observe that $1_{D}\circ T^{-a}$ is supported on $A\cdot D$ for each $a\in A$, so each average
\begin{equation}\label{eqnDefAvg}
F_{n}:=\frac{1}{|\Phi_{n}|}\sum_{a\in A_{n}} 1_{D}\circ T^{-a}
\end{equation}
is supported on $A\cdot D$, and every weak $L^{2}(\mu)$ limit of these averages is also supported on $A\cdot D$. Passing to a subsequence of $\mathbf \Phi$, we may assume that the limit of $(F_{n})_{n\in \mathbb N}$ exists. Analyzing this limit will lead to the following lemma. See Section \ref{secFourier} for the definitions of $f*g$ and $g_{-}.$
\begin{lemma}\label{lemRecurrenceToSumset}
\begin{enumerate}
\item[(i)] The set $A\cdot D$ supports a function of the form $f*g\circ \pi$, where $f:Z\to [0,1]$ has $\int f \,dm = d^{*}(A)$, $f$ is supported on $\overline{\rho(A)}$, and $g$ is defined above.
\item[(ii)] $\{\gamma\in \Gamma: \mu((A\cdot D)\cap T^{\gamma}D)>0\}\supseteq \{\gamma: f*g*g_{-}(\rho(\gamma))>0\}$.
\end{enumerate}
\end{lemma}
\begin{proof} To prove (i), let $F$ be a weak $L^{2}(\mu)$ limit of the sequence $(F_{n})_{n\in \mathbb N}$, defined in Equation (\ref{eqnDefAvg}). Then $F$ is supported on $A\cdot D$, and by Lemma \ref{lemWeakLimits}, $F$ has the form $f*g\circ \pi$, where $f$ is as described in Part (ii) of that lemma.
Proof of Part (ii). We must prove the implication
\begin{equation}\label{eqnTheImplication}
f*g*g_{-}(\rho(\gamma))>0 \implies \mu((A\cdot D)\cap T^{\gamma}D)>0.
\end{equation} Suppose $\gamma\in \Gamma$ satisfies
\begin{equation}\label{eqnIntersectionIntegral}
\int (f*g\circ \pi) \cdot 1_{D}\circ T^{-\gamma} \,d\mu>0.
\end{equation} Equation (\ref{eqnKroneckerIntegrate}) implies the integral in Inequality (\ref{eqnIntersectionIntegral}) is positive if and only if $\int f*g(z)\cdot g(z-\rho(\gamma)) \,dm(z)>0,$ meaning $f*g*g_{-}(\rho(\gamma))>0$. By Part (i), inequality (\ref{eqnIntersectionIntegral}) implies $\mu((A\cdot D)\cap T^{\gamma}D)>0$, so we have proven the implication (\ref{eqnTheImplication}).
\end{proof}
\begin{proof}[Proof of Proposition \ref{propComplete}]
Proposition \ref{propComplete} now follows from Lemmas \ref{lemCorrespondence2} and \ref{lemRecurrenceToSumset}.
\end{proof}
\begin{lemma}\label{lemWeakLimits}
If the weak $L^2(\mu)$ limit $F$ of the sequence \[F_n:=\frac{1}{|\Phi_n|}\sum_{a\in A_{n}} 1_{D}\circ T^{-a}\] exists and $g:Z\to [0,1]$ is the function satisfying Equation \textup{(\ref{eqnKroneckerFactor})}, then
\begin{enumerate}
\item[(i)]
$F$ is $\pi^{-1}(\mathcal Z)$-measurable, and
\begin{align*}
F = \lim_{n\to \infty} \frac{1}{|\Phi_{n}|} \sum_{a \in A_{n}} g\circ R_{\rho}^{-1}\circ \pi \text{ weakly in } L^{2}(\mu).
\end{align*}
\item[(ii)] $F=(f*g) \circ \pi$, where $f:Z\to [0,1]$ satisfies $\int f \,dm = d^{*}(A)$ and $f$ is supported on $\overline{\rho(A)}$. \end{enumerate}
\end{lemma}
\begin{proof}
Part (i) is a consequence of the proof of Corollary 2.7 of \cite{GrAdv}. To prove Part (ii), it suffices to show that
\begin{equation}\label{eqnInnerProd}
\int F\cdot \psi \,d\mu= \int (f*g)\circ \pi \cdot \psi \,d\mu \text{\ \ for every } \psi\in L^{2}(\mu).
\end{equation}Part (i) already proves Equation (\ref{eqnInnerProd}) for those $\psi$ orthogonal to the $\pi^{-1}(\mathcal Z)$-measurable functions, so we may assume that $\psi$ is $\pi^{-1}(\mathcal Z)$-measurable, meaning $\psi = \tilde{\psi}\circ \pi$ for some $\tilde{\psi} \in L^{2}(m)$. It suffices to establish the identity (\ref{eqnInnerProd}) for an $L^{2}(\mu)$-dense set of functions $\psi$, so we may assume that $\psi=\tilde{\psi}\circ \pi$, where $\tilde{\psi}:Z\to \mathbb C$ is continuous. Let $F$ be a weak $L^{2}(\mu)$ limit of the $(F_{n})_{n\in \mathbb N}$, and let $\nu$ be a measure as in the conclusion of Lemma \ref{lemWeakStar}, so that $d\nu = f \,dm$ for some $f$ as in the conclusion of the lemma. Then we apply Part (i) to compute $F$:
\begin{align}
\int F \cdot \psi \,d\mu &= \lim_{n\to \infty} \frac{1}{|\Phi_{n}|} \sum_{a\in A_{n}} \int g(\pi(x)-\rho(a)) \cdot \tilde{\psi}(\pi(x)) \,d\mu(x)\\
&= \lim_{n\to \infty} \frac{1}{|\Phi_{n}|}\sum_{a\in A_{n}} \int g(\pi(x)) \cdot \tilde{\psi}(\pi(x)+\rho(a))\,d\mu(x)\\
&= \lim_{n\to \infty} \int g(z) \cdot \frac{1}{|\Phi_{n}|}\sum_{a\in A_{n}} \tilde{\psi}(z+\rho(a)) \,dm(z). \label{eqnLast}
\end{align}
We used Equations (\ref{eqnL2Preserved}) and (\ref{eqnKroneckerIntegrate}) to get the second and third lines. Applying Lemma \ref{lemWeakStar} to evaluate the limit in Equation (\ref{eqnLast}), we find
\[\lim_{n\to \infty} \frac{1}{|\Phi_{n}|} \sum_{a\in A_{n}}\tilde{\psi}(z+\rho(a)) = \int \tilde{\psi}(z+w)f(w) \,dm(w) \ \text{ for all } z\in Z,
\] where $f$ is as described above. We then evaluate, continuing from Equation (\ref{eqnLast}),
\begin{align*}
\int F\cdot \psi \,d\mu &= \int g(z) \cdot \int \tilde{\psi}(z+w)f(w) \,dm(w) \,dm(z)\\
&= \int \int f(w)g(z-w) \,dm(w)\, \tilde{\psi}(z)\, dm(z)\\
&= \int f*g(z) \cdot \tilde{\psi}(z) \,dm(z)\\
&= \int (f*g)\circ \pi \cdot \psi \,d\mu,
\end{align*}
establishing Equation (\ref{eqnInnerProd}).
The second line uses Fubini and translation invariance of $dm$, the third is just the definition of $f*g$, and the last line is the special case of Equation (\ref{eqnFactorIntegrate}) with $\gamma=0$.
\end{proof}
\begin{lemma}\label{lemWeakStar} Consider the Borel measures $\nu_{n}$ on $Z$ given by
\begin{align*}
\int \psi \,d\nu_{n}= \frac{1}{|\Phi_{n}|} \sum_{\gamma\in A_{n}} \psi(\rho(\gamma)) && \text{ for } \psi:Z\to \mathbb C.
\end{align*}
Let $\nu$ be a $\text{weak}^{*}$ limit of the $\nu_{n}$. Then $\nu$ is absolutely continuous with respect to Haar measure $m$, and
\begin{enumerate}
\item[(i)] $d\nu = f\, dm$, where $f: Z\to [0,1]$,
\item[(ii)] $\nu(Z)=d^{*}(A)$,
\item[(iii)] $f$ is supported on $\overline{\rho(A)}$.
\end{enumerate}
\end{lemma}
\begin{proof} Parts (i) and (ii) are established in the proof of Lemma 2.11 of \cite{GrAdv}. Part (iii) follows from the fact that $\int \psi \,d\nu=0$ whenever $\psi: Z\to \mathbb C$ is a continuous function which vanishes on $\overline{\rho(A)}$. \end{proof}
\end{document} |
\begin{document}
\verbatimfont{\asciifamily}
\frontmatter
\title{\mytitle}en
\pagenumbering{roman}
{
\pagebreak
\thispagestyle{empty}
\setcounter{page}{1}
\hspace{0pt}
\begin{center}
\end{center}
}
{
\let\cleardoublepage
\makesecond
}
\chapter*{Abstract of the Dissertation}
In this thesis we use \emph{quasiorders} on words to offer a new perspective on two well-studied problems from \emph{Formal Language Theory}: deciding language inclusion and manipulating the finite automata representations of regular languages.
First, we present a generic quasiorder-based framework that, when instantiated with different quasiorders, yields different algorithms (some of them new) for deciding \emph{language inclusion}.
We then instantiate this framework to devise an efficient algorithm for \emph{searching with regular expressions on grammar-compressed text}.
Finally, we define a framework of quasiorder-based automata constructions to offer a new perspective on \emph{residual automata}.
\paragraph*{The Language Inclusion Problem}
First, we study the \emph{language inclusion problem} \(L_1 \subseteq L_2\) where \(L_1\) is regular or context-free and \(L_2\) is regular.
Our approach relies on checking whether an over-approximation of \(L_1\), obtained by successively over-approximating the Kleene iterates of its least fixpoint characterization, is included in \(L_2\).
We show that a language inclusion problem is decidable whenever the over-approximating function satisfies a completeness condition (i.e.\ its loss of precision causes no false alarm) and prevents infinite ascending chains (i.e.\ it guarantees termination of least fixpoint computations).
Such over-approximation of \(L_1\) can be defined using \emph{quasiorder} relations on words where the over-approximation gives the language of all words ``greater than or equal to'' a given input word for that quasiorder.
We put forward a range of quasiorders that allow us to systematically design decision procedures for different language inclusion problems such as regular languages into regular languages or into trace sets of one-counter nets and context-free languages into regular languages.
Some of the obtained inclusion checking procedures correspond to well-known algorithms like the so-called \emph{antichains} algorithms.
On the other hand, our quasiorder-based framework allows us to derive an equivalent greatest fixpoint language inclusion check which relies on quotients of languages and which, to the best of our knowledge, was \emph{not previously known}.
\paragraph*{Searching on Compressed Text}
Secondly, we instantiate our quasiorder-based framework for the scenario in which \(L_1\) consists on a single word generated by a context-free grammar and \(L_2\) is the regular language generated by an automaton.
The resulting algorithm can be used for deciding whether a grammar-compressed text contains a match for a regular expression.
We then extend this algorithm in order to count the number of lines in the uncompressed text that contain a match for the regular expression.
We show that this extension runs in time \emph{linear} in the size of the \emph{compressed} data, which might be exponentially smaller than the uncompressed text.
Furthermore, we propose efficient data structures that yield \emph{optimal} complexity bounds and an implementation --\tool{zearch}-- that outperforms the state of the art, offering up to $40\pct$ speedup with respect to \emph{highly optimized} implementations of the decompress and search approach.
\paragraph*{Residual Finite-State Automata}
Finally, we present a framework of finite-state automata constructions based on quasiorders over words to provide new insights on residual finite-state automata (RFA for short).
We present a new residualization operation and show that the residual equivalent of the double-reversal method holds, i.e.\ our residualization operation applied to a co-residual automaton generating the language \(L\) yields the canonical RFA for \(L\).
We then present a generalization of the double-reversal method for RFAs along the lines of the one for deterministic automata.
Moreover, we use our quasiorder-based framework to offer a new perspective on NL\(^*\), an on-line learning algorithm for RFAs.
We conclude that \emph{quasiorders} are fundamental to \emph{residual automata} in the same way \emph{congruences} are fundamental for \emph{deterministic automata}.
\chapter*{Resumen de la Tesis Doctoral}
En esta tesis, usamos \emph{preórdenes} para dar un nuevo enfoque a dos problemas fundamentales en \emph{Teoría de Lenguajes Formales}: decidir la inclusión entre lenguajes y manipular la representación de lenguajes regulares como autómatas finitos.
En primer lugar, presentamos un esquema que, dado un preorden que satisface ciertos requisitos, nos permite derivar de manera sistemática algoritmos de decisión para la inclusión entre diferentes tipos de lenguajes.
Partiendo de este esquema desarrollamos un algoritmo de búsqueda con expresiones regulares en textos comprimidos mediante gramáticas.
Por último, presentamos una serie de autómatas, cuya definición depende de un preorden, que nos permite ofrecer un nuevo enfoque sobre la clase de autómatas residuales.
\paragraph*{El Problema de la Inclusión de Lenguajes}
En primer lugar, estudiamos el problema de decidir \(L_1 \subseteq L_2\), donde \(L_1\) es un lenguaje independiente de contexto y \(L_2\) es un lenguaje regular.
Para resolver este problema, sobre-aproximamos los sucesivos pasos de la iteración de punto fijo que define el lenguaje \(L_1\).
Con ello, obtenemos una sobre-aproximación de \(L_1\) y comprobamos si está incluida en el lenguaje \(L_2\).
Esta técnica funciona siempre y cuando la sobre-aproximación sea completa (es decir, la imprecisión de la aproximación no produzca falsas alarmas) y evite cadenas infinitas ascendentes (es decir, garantice que la iteración de punto fijo termina).
Para definir una sobre-aproximación que cumple estas condiciones, usamos un preorden.
De este modo, la aproximación del lenguaje \(L_1\) contiene todas las palabras ``mayores o iguales que'' alguna palabra de \(L_1\).
En concreto, definimos una serie de preórdenes que nos permiten derivar, de manera sistemática, algoritmos de decisión para diferentes problemas de inclusión de lenguajes como la inclusión entre lenguajes regulares o la inclusión de lenguajes independientes de contexto en lenguajes regulares.
Algunos de los algoritmos obtenidos mediante esta técnica coinciden con algoritmos bien conocidos como los llamados \emph{antichains algorithms}.
Por otro lado, nuestra técnica también nos permite derivar algoritmos de punto fijo que, hasta donde sabemos, \emph{no han sido descritos anteriormente}.
\paragraph*{Búsqueda en textos comprimidos}
En segundo lugar, aplicamos nuestro algoritmo de decisión de inclusión entre lenguajes al problema \(L_1 \subseteq L_2\), donde \(L_1\) es un lenguaje descrito por una gramática que genera una única palabra y \(L_2\) es un lenguaje regular definido por un autómata o expresión regular.
De esta manera, obtenemos un algoritmo que nos permite decidir si un texto comprimido mediante una gramática contiene, o no, una coincidencia de una expresión regular dada.
Posteriormente, modificamos este algoritmo para contar las líneas del texto comprimido que contienen coincidencias de la expresión regular.
De este modo, obtenemos un algoritmo que opera en tiempo \emph{linear} respecto del tamaño del texto \emph{comprimido} el cual, por definición, puede ser exponencialmente más peque-ño que el texto original.
Además, describimos las estructuras de datos necesarias para que nuestro algoritmo opere en tiempo \emph{óptimo} y presentamos una implementación --\tool{zearch}-- que resulta hasta un $40\pct$ más rápida que las mejores implementaciones del método estándar de descompresión y búsqueda.
\paragraph*{Autómatas Residuales}
Finalmente presentamos una serie de autómatas parametrizados por preórdenes que nos permiten mejorar nuestra compresión de la clase de autómatas residuales (abreviados como RFA).
Estos autómatas parametrizados nos permiten definir una nueva operación de residualization y demostrar que el método de \emph{double-reversal} funciona para RFAs, es decir, residualizar un autómata cuyo reverso es residual da lugar al canonical RFA (un RFA de tamaño mínimo).
Tras esto, generalizamos este método de forma similar a su generalización para el caso de autómatas deterministas.
Por último, damos un nuevo enfoque a NL\(^*\), un algoritmo de aprendizaje de RFAs.
Como conclusión, encontramos que los \emph{preórdenes} juegan el mismo papel para los \emph{autómatas residuales} que las \emph{congruencias} para los \emph{deterministas}.
{
\thispagestyle{empty}
\vspace*{10em}
\begin{flushright}
\textit{To my parents and my wife, for their endless love and support}
\end{flushright}
}
\chapter*{Acknowledgments}
Tras un proyecto tan largo e intenso como un doctorado, la lista de personas a las que quiero dar las gracias es muy extensa.
En general, quiero dar las gracias a todas aquellas personas que, de un modo u otro, han formado parte de mi vida durante estos últimos años.
En las siguientes lineas trataré de nombrarlos a todos, aunque seguramente me deje nombres en el tintero.
En primer lugar, quiero dar las gracias a Pierre quien comenzó siendo mi director de tesis y a quien a día de hoy considero un amigo.
Pierre, gracias por darme la oportunidad de realizar mis primeras prácticas en IMDEA y por ayudarme a realizar mi primera estancia fuera de casa.
Aquella experiencia me hizo descubrir que quería hacer un doctorado y fue tu interés y confianza en mi lo que me llevó a hacerlo en IMDEA.
Gracias por guiarme con paciencia y apoyarme en mis decisiones durante estos 4 años, especialmente en mi interés por realizar estancias para conocer gente y lugares.
Gracias a eso tuve el placer de trabajar con Rupak en Kaiserslatuern, con Javier en Munich y con Yann en San Francisco.
Gracias también a ellos tres, y a los compañeros que tuve en esos viajes, en especial a Isa, Harry, Filip, Dmitry, Rayna, Marko, Bimba, Nick y Felix, por hacer de mis visitas grandes experiencias llenas de buenos recuerdos.
Quiero dar las gracias, también, a todo el personal del Instituto IMDEA Software.
Ha sido un placer llevar a cabo mi trabajo rodeado de grandes profesionales en todos los ámbitos.
Gracias Paloma, Álvaro, Felipe, Miguel, Isabel, Kyveli, Joaquín, Germán, Platón y Srdjan, entre otros, por ser los artífices de tantos buenos recuerdos.
Especialmente, quiero agradecer a Ignacio su humor, su ayuda prestada durante estos últimos años y su paciencia al leer múltiples versiones de la introducción de este trabajo.
Gracias por ser ese amigo del despacho de al lado al que ir a molestar siempre que quería comentar alguna idea, por tonta que fuera.
Elena, creo que ha sido una experiencia estupenda haber compartido mis años de universidad y de doctorado con una amiga como tú.
He disfrutado muchísimo de todas las ocasiones en que hemos podido trabajar juntos y creo que hacíamos un equipo estupendo.
A mis profesores de bachillerato Soraya y Mario.
Con vosotros entendí que estudiar era mucho más que aprobar un examen y me hicisteis disfrutar aprendiendo.
Despertasteis en mi la pasión por aprender y por afrontar nuevos retos y fue esa pasión la que me llevó a estudiar el Doble Grado de Matemáticas con Informática y a realizar posteriormente un doctorado.
A mi familia, que recientemente creció en número, por el mero hecho de estar ahí.
Gracias en especial a mi prima, la Dra. Gámez, por ser la pionera, la primera investigadora y Dra. en la familia, que me ahorró el esfuerzo de explicar a todos cómo funciona el mundo de la investigación en que nos movemos.
A mis amigos de siempre y a los más recientes.
Gracias por tantos buenos momentos, por visitarme cuando estaba fuera y por los viajes y planes que aún quedan por hacer.
Creo firmemente que haber sido feliz en mi vida personal ha sido una pieza clave de mis éxitos profesionales.
Quiero dar las gracias por ello a Alberto, Carlos, David, Rubén, Antonio, Victor, Eduardo, Álvaro, Guillermo, Cristina, Lara e Irene, entre muchos otros.
A mis padres, gracias por hacerme ser quien soy y por apoyarme siempre aún sin terminar de entender la aventura en la que me embarcaba al iniciar el doctorado.
Gracias a vosotros he tenido una vida llena de facilidades, que me ha permitido centrarme siempre en mis estudios y mi trabajo.
Cada uno de mis logros es resultado de vuestro esfuerzo.
Por último, quiero dar las gracias mi mujer.
Jimena, gracias por apoyarme durante este tiempo, por acompañarme en mis viajes siempre que fue posible y por soportar la distancia cuando no. Gracias, en definitiva, por estar ahí.
\hypersetup{citecolor=black, urlcolor=black,linkcolor=black}
{
\thispagestyle{empty}
\tableofcontents
}
{
\thispagestyle{empty}
\listoffigures
}
{
\thispagestyle{empty}
\listoftables
}
\hypersetup{citecolor=blue!80, urlcolor=blue!80,linkcolor=blue!80}
\markboth{List of Publications}{List of Publications}
\chapter*{List of Publications}
This thesis comprises the following four papers for which I am the main author.
The first two have been published in top peer-reviewed academic conferences while the last two have recently been submitted and have not been published yet:
\begin{myEnum}
\item Pedro Valero Mejía\xspace and Dr.~Pierre Ganty\xspace\\
\textbf{Regular Expression Search on Compressed Text}\\
Published in \emph{Data Compression Conference}, March 2019.
\item Pedro Valero Mejía\xspace, Dr.~Pierre Ganty\xspace and Prof. Francesco Ranzato\\
\textbf{Language Inclusion Algorithms as Complete Abstract Interpretations}\\
Published in \emph{Static Analysis Symposium}, October 2019.
\item Pedro Valero Mejía\xspace, Dr.~Pierre Ganty\xspace and Elena Guti\'errez\\
\textbf{A Quasiorder-based Perspective on Residual Automata}\\
Published in \emph{Mathematical Foundations of Computer Science}, August 2020.
\item Pedro Valero Mejía\xspace, Dr.~Pierre Ganty\xspace and Prof. Francesco Ranzato\\
\textbf{Complete Abstractions for Checking Language Inclusion}\\
Submitted to \emph{Transactions on Computational Logic}, \DTMenglishmonthname{\@dtm@month} \@dtm@year\xspace.
\end{myEnum}
Using the techniques presented in first of the above mentioned papers, I developed a tool for searching with regular expressions in compressed text.
The implementation is available on-line at \url{https://github.com/pevalme/zearch}.
I have also contributed to the following papers which are not part of this thesis.
\begin{myEnum}
\item Elena Guti\'errez, Pedro Valero Mejía\xspace and Dr.~Pierre Ganty\xspace\\
\textbf{A Congruence-based Perspective on Automata Minimization Algorithms}\\
Published in \emph{International Symposium on Mathematical Foundations of Computer Science}, August 2019
\item Pedro Valero Mejía\xspace, Dr.~Pierre Ganty\xspace and Boris Köpf\\
\textbf{A Language-theoretic View on Network Protocols}\\
Published in \emph{Automated Technology for Verification and Analysis}, October 2017
\end{myEnum}
\mainmatter
\pagenumbering{arabic}
\setcounter{page}{1}
{}
\chapter{Introduction}
\label{chap:introduction}
\emph{Formal languages}, i.e. languages for which we have a \emph{finite formal description}, are used to model possibly infinite sets so that their finite descriptions can be used to reason about these sets.
As a consequence, \emph{Formal Language Theory}, i.e. the study of formal languages and the techniques for manipulating their finite representations, finds applications in several domains in computer science.
For example, the possibly infinite set of assignments that satisfy a given formula in some logic can be seen as a formal language whose finite description is the formula itself.
In some logics, the set of values that satisfy any formula is regular and, therefore, it can be described by means of a finite-state automaton (automaton for short).
When this is the case, it is possible to reason in that logic by manipulating automata as shown in Example~\ref{example:DecisionProcedure}.
\begin{figure}
\caption{Automata accepting the set of binary encodings of numbers divisible by 4 (top left), divisible by two (top right) and the product of these two automata (bottom).}
\label{fig:FAdivisible}
\end{figure}
\begin{exampleC}\label{example:DecisionProcedure}
Consider the formulas \(f_{2} : `` x \text{ mod } 2 = 0"\) and \(f_{4} : `` x \text{ mod } 4 = 0 "\).
Next we show how to reason about the formula \(f_{42} : `` f_4 \land f_2"\) by means of automata.
A binary sequence ``\(x\)'' encodes a number divisible by 4 if{}f the last two digits are 0's.
Similarly, ``\(x\)'' encodes a number divisible by 2 if{}f the last digit is 0.
Therefore, the automata \(A_4\) and \(A_2\) from Figure~\ref{fig:FAdivisible} accept the binary encodings of numbers ``\(x\)'' that satisfy the formulas \(f_4: ``x \text{ mod } 4 = 0"\) and \(f_2: `` x \text{ mod } 2 = 0"\), respectively.
Since the numbers satisfying the formula \(f_{42}\) are, by definition, the ones satisfying both \(f_4\) and \(f_2\), the automaton for \(f_{42}\) is \(A_{42} = A_2 \times A_4\), shown in Figure~\ref{fig:FAdivisible}, which recognizes exactly the encodings accepted by both \(A_2\) and \(A_4\).
Thus, there exists a number satisfying \(f_{42}\) if{}f the language accepted by \(A_{42}\) is not empty.
On the other hand, since the automaton \(A_4\) accepts a language that is included in the one of \(A_2\), we conclude that the encodings satisfying \(f_4\) also satisfy \(f_2\).
Thus, the automaton for \(A_4\) is equivalent to, i.e. it accepts the same language as, the automaton \(A_{42}\) and both are automata for \(f_{42}\).
{\ensuremath{\Diamond}}
\end{exampleC}
This idea led to the development of \emph{automata-based decision procedures} for logical theories such as Presburger arithmetic~\cite{wolper1995automata} and the Weak Second-order theory of One or Two Successors (WS1S/WS2S)~\cite{Klarlund:mona95,Klarlund:mona99} among others~\cite{allouche2003automatic,schaeffer2013deciding}.
A similar idea is used in \emph{regular model checking}~\cite{bouajjani2000regular,abdulla2012regular,clarke18}, where formal languages are used to describe the possibly infinite sets of states that a system might reach during its execution.
A different use of formal languages in computer science is the \emph{lossless compression of textual data} \cite{Charikar2005Smallest,Hucke2016Smallest}.
In this scenario the data is seen as a language consisting of a single word and its finite formal description as a grammar is seen as a \emph{succinct representation} of the language it generates.
As the following example evidences, the grammar might be exponentially smaller than the data.
\begin{exampleC}\label{example:GrammarCompression}
Let \(k\) be an integer greater than 1 and let \(\mathcal{G}\) be the grammar with the set of variables \(\{X_i \mid 0 \leq i \leq k\}\), alphabet \(\{a\}\), axiom \(X_k\) and set of rules \(\{X_i \to X_{i{-}1}X_{i{-}1} \mid 1 \leq i \leq k\} \cup \{X_0 \to a\}\).
Clearly, \(\mathcal{G}\) has size \emph{linear} in \(k\) and produces the word \(a^{2^k}\).
Therefore, the grammar is \emph{exponentially smaller} than the word it generates.
{\ensuremath{\Diamond}}
\end{exampleC}
The idea of using grammars to compress textual data has led to the development of several grammar-based compression algorithms~\cite{ziv1978compression,nevill1997compression,larsson2000off}.
These algorithms offer some advantages with respect to other classes of compression techniques, such as the ones based on the well-known \text{LZ77} algorithm~\cite{ziv1977compression}, in terms of the structure of the compressed representation of the data (which is a grammar).
In particular, they allow us to analyze the uncompressed text, i.e. the language, by looking at the compressed data, i.e. the grammar~\cite{lohrey2012algorithmics}.
\section{The Contributions of This Dissertation}
In this dissertation we focus on three problems from \emph{Formal Language Theory}: deciding language inclusion, searching on grammar-compressed text and building residual automata.
As we describe next, these are well-studied and important problems in computer science for which there are still challenges to overcome.
\paragraph*{The Language Inclusion Problem}
In the first two scenarios described before, i.e. \emph{automata-based decision procedures} and \emph{regular model checking}, the \emph{language inclusion problem}, i.e. deciding whether the language inclusion \(L_1 \subseteq L_2\) holds, is a fundamental operation.
For instance, in Example~\ref{example:DecisionProcedure}, deciding the language inclusion between the languages generated by automata \(A_4\) and \(A_2\) allows us to infer that all values satisfying \(f_4\) also satisfy \(f_2\).
Similarly, in the context of regular model checking, we can define a possibly infinite set of ``good'' states that the system should never leave and solve a language inclusion problem to decide whether the system is confined to the set of good states.
As a consequence, the \emph{language inclusion problem} is a fundamental and classical problem in computer science~\cite[Chapter 11]{HU79}.
In particular, language inclusion problems of the form \(L_1 \subseteq L_2\), where both \(L_1\) and \(L_2\) are regular languages, appear naturally in different scenarios as the ones previously described.
The standard approach for solving such problems consists on reducing them to \emph{emptiness} problems using the fact that \(L_1 \subseteq L_2\Leftrightarrow L_1 \cap L_2^c = \varnothing\).
However, algorithms implementing this approach suffer from a worst case exponential blowup when computing \(L_2^c\) since it requires determinizing the automaton for \(L_2\).
The state of the art approach to overcome this limitation is to keep the computation of the automaton for \(L_2^c\) implicit, thus preventing the exponential blowup for many instances of the problem.
For instance, \citet{DBLP:conf/cav/WulfDHR06} developed an algorithm for deciding language inclusion between regular languages that uses \emph{antichains}, i.e. sets of incomparable elements, to reduce the blowup resulting from building the complement of a given automaton.
Their work was later improved by \citet{Abdulla2010} and \citet{DBLP:conf/popl/BonchiP13} who used \emph{simulations} between the states of the automata to further reduce the blowup associated to the complementation step.
Then, \citet{Holk2015} adapted the use of antichains to decide the inclusion of context-free languages into regular ones.
However, even though these algorithms have a common foundation, i.e. they all reduce the language inclusion problem to an emptiness one through complementation and use antichains to keep the complementation implicit, the relation between them is not well understood.
This is evidenced by the fact that the generalization by \citet{Holk2015} of the antichains algorithm of \citet{DBLP:conf/cav/WulfDHR06} was obtained by rephrasing the inclusion problem as a data flow analysis problem over a relational domain.
\noindent\textbf{Our Contribution.}
We use \emph{quasiorders}, i.e. reflexive and transitive relations, to define a framework from which we systematically derive algorithms for deciding language inclusion such as the ones of \citet{DBLP:conf/cav/WulfDHR06} and \citet{Holk2015}.
Indeed, we show that these two algorithms are conceptually equivalent and correspond to two instantiations of our framework using different quasiorders.
Moreover, by using a quasiorder based on simulations between the states of an automata, we derive an improved antichains algorithm that partially matches the one of \citet{Abdulla2010}.
Furthermore, our framework goes beyond inclusion into regular languages and allows us to derive an algorithm for deciding the language inclusion \(L_1 \subseteq L_2\) when \(L_1\) is regular and \(L_2\) is the set of traces of a \emph{one counter net}, i.e. an automaton equipped with a counter that cannot test for 0.
Finally, we also derive a \emph{novel} algorithm for deciding inclusion between regular languages.
\paragraph*{Searching on Compressed Text}
The growing amount of information handled by modern systems demands for efficient techniques both for compression, to reduce the storage cost, and for regular expression searching, to speed up querying.
Therefore, the problem of searching on compressed text is of practical interest as evidenced by the fact that state of the art tools for searching with regular expressions, such as \tool{grep}\footnote{\url{https://www.gnu.org/software/grep/manual/grep.html}.} and \tool{ripgrep}\footnote{\url{https://github.com/BurntSushi/ripgrep}.}, provide a method for searching on compressed files by decompressing them on-the-fly.
Due to the high performance of state of the art compressors such as \tool{zstd}\footnote{\url{https://github.com/facebook/zstd}} and \tool{lz4}\footnote{\url{https://github.com/lz4/lz4}}, the performance of searching on the decompressed data as it is recovered by the decompressor is comparable with that of searching on the uncompressed data.
Therefore, the parallel decompress-and-search approach is the state of the art for searching on compressed text.
However, when using a grammar-based compression technique it is possible to manipulate the compressed data, i.e. the grammar, to analyze the uncompress data, i.e. the language generated by the grammar.
Intuitively, this means that the information about repetitions in the text present in its compressed version can be used to enhance the search.
Therefore, \emph{searching on grammar-compressed text} could be even faster than searching on the uncompressed text.
This idea is exploited by multiple algorithms that perform certain operations directly on grammar-compressed text, i.e. without having to recover the uncompressed data, such as finding given words \cite{navarro2005lzgrep}, finding words that match a given regular expression~\cite{navarro2003regular,bille2009improved} or finding approximate matches~\cite{navarro2001approximateSurvey}.
Nevertheless, the implementations of \citet{navarro2003regular} and \citet{navarro2005lzgrep} (to the best of our knowledge, the only existing tools for searching on compressed text) are not faster than the state of the art decompress and search approach. Partly, this due to the fact that these algorithms only apply to data compressed with one specific grammar-based compressor, namely \tool{LZ78}~\cite{ziv1978compression}, which, as shown by \citet{Hucke2016Smallest}, cannot achieve exponential compression ratios\footnote{The compression ratio for a file of size \(T\) compressed into size \(t\) is \(T/t\).}.
\noindent\textbf{Our Contribution.}
We improve this situation by rephrasing the problem of searching on compressed text as a language inclusion problem between a context-free language (the text) and a regular one (the expression).
Then, we instantiate our quasiorder-based framework for solving language inclusion and adapt it to the specifics of this scenario, where the context-free grammar generates a single word: the uncompressed text.
The resulting algorithm is not restricted to any class of grammar-based compressors and it reports the number of lines in the text containing a match for a given expression in time \emph{linear} with respect to the size of the compressed data.
We implement this algorithm in a tool --\tool{zearch}\footnote{\url{https://github.com/pevalme/zearch}}-- for searching with regular expressions in\linebreak grammar-compressed text.
The experiments evidence that compression can be used to enhance the search and, therefore, the performance of \tool{zearch} improves with the compression ratio of the data.
Indeed, our tool is as fast as searching on the uncompressed data when the data is well-compressible, i.e. it results in compression ratio above 13, which occurs, for instance, when considering automatically generated \emph{log files}.
\paragraph*{Building Residual Automata}
Clearly,
the problem of finding a concise representation of a regular language is also a fundamental problem in computer science.
There exists two main classes of automata representations for regular languages, both having the same expressive power: non-deterministic (NFA for short) and deterministic (DFA for short) automata.
While DFAs are simpler to manipulate than NFAs\footnote{For instance, in order to build the complement of a DFA it suffices to switch final and non-final states while complementing an NFA requires determinizing it.} they are, in the worst case, exponentially larger.
\begin{exampleC}
The minimal DFA for the set of words of length \(2n{+}2\) with two 1's separated by \(n\) symbols has size exponential in \(n\) since any DFA for that language must have one state for each of the \(2^n\) possible prefixes of length \(n\).
Figure~\ref{fig:NFAvsDFA} shows the minimal DFA and an exponentially smaller NFA for \(n=2\).
{\ensuremath{\Diamond}}
\end{exampleC}
\begin{figure}
\caption{Minimal DFA (left) and NFA (right) accepting the words in the alphabet \(\{0,1\}
\label{fig:NFAvsDFA}
\end{figure}
Therefore, algorithms relying on determinized automata, such as the standard algorithm for building the complement of an NFA, do not scale despite the existence of different techniques for reducing the size of DFAs~\cite{hopcroft1971,moore1956} and for building DFAs of minimal size~\cite{Sakarovitch,Adamek2012,Brzozowski2014}.
This has led to the introduction of \emph{residual automata}~\cite{denis2001residual,denis2002residual} (RFA for short) as a generalization of DFAs that breaks determinism in favor of conciseness of the representation.
Therefore, RFAs are easier to manipulate than NFAs (there exists a canonical minimal RFA for every regular language, which makes learning easier) and more concise than DFAs (both automata from Figure~\ref{fig:NFAvsDFA} are RFAs).
These properties make RFAs specially appealing in certain domains such us Grammatical Inference~\cite{denis2004learning,bollig2009angluin}.
There exists a clear relationship between RFAs and DFAs as evidenced by the similarities between the \emph{residualization} and \emph{determinization} operations and the fact that a straightforward modification of the double-reversal method for building minimal DFAs yields a method for building minimal RFAs.
However, the connection between these two formalisms is not fully understood as evidenced by the fact that the relation between the generalization of the double-reversal methods for DFAs~\cite{Brzozowski2014} and RFAs~\cite{tamm2015generalization} is not immediate.
\noindent\textbf{Our Contribution.}
We present a framework of quasiorder-based automata constructions that yield residual and co-residual automata.
We find that one of these constructions defines a residualization operation that produces smaller automata than the one of \citet{denis2002residual} and for which the double-reversal method holds: residualizing a co-residual automaton yields the canonical RFA.
Moreover, we derive a generalization of this double-reversal method for RFAs, along the lines of the one of \citet{Brzozowski2014} for DFAs that is more general than the one of \citet{tamm2015generalization}.
Incidentally, we also evidence the connection between the generalized double-reversal method for RFAs of \citet{tamm2015generalization} and the one of \citet{Brzozowski2014} for DFAs.
Finally, we offer a new perspective of the NL\(^*\) algorithm of \citet{bollig2009angluin} for learning RFAs as an algorithm that iteratively refines a quasiorder and uses our automata constructions to build RFAs.
\section{Methodology}
The contributions of this thesis, described in the previous section, are the result of using \emph{monotone well-quasiorders}, i.e. quasiorders that satisfy certain properties with respect to concatenation of words and for which there is no infinite decreasing sequence of elements, as building blocks for tackling problems from \emph{Formal Language Theory}.
Monotone well-quasiorders have proven useful for reasoning about formal languages from a theoretical perspective (see the survey of \citet{d2008well}).
For instance, \citet{ehrenfeucht_regularity_1983} showed that a language is regular if{}f it is closed for a monotone well-quasiorder and \citet{deLuca1994} extended this result by showing that a language is regular if{}f it is closed for a left monotone and for a right monotone well-quasiorders.
On the other hand, \citet{kunc2005regular} used well-quasiorders to show that all maximal solutions of certain systems of inequalities on languages are regular.
Our work evidences that monotone well-quasiorders also have practical applications by placing them at the core of some well-known algorithms.
\paragraph*{Monotone Well-Quasiorders}
\emph{Quasiorders} are binary relations that are \emph{reflexive}, i.e. every word is related to itself, and \emph{transitive}, i.e. if a word ``u'' is related to ``v'' which is related to ``w'' then ``u'' is related to ``w''.
Intuitively, we use quasiorders to group words that behave ``similarly'' (in a certain way) with respect to a given regular language.
This naturally leads to the use of \emph{monotone quasiorders} so that ``similarity'' between words is preserved by concatenation, i.e. when concatenating two ``similar'' words with the same letter the resulting words remain ``similar''.
\begin{exampleC}\label{example:lengthQO}
Consider the \emph{length quasiorder}, which says that ``u'' is related to ``\(v\)'' if{}f \(\len{u} \leq \len{v}\) where \(\len{u}\) denotes the length of a word ``\(u\)''.
It is straightforward to check that this is a monotone quasiorder since
\begin{myEnumI}
\item \(\len{u} \leq \len{u}\) for every word \(u\), hence it is \emph{reflexive};
\item if \(\len{u} \leq \len{v}\) and \(\len{v} \leq \len{w}\) then \(\len{u} \leq \len{w}\), hence it is \emph{transitive};
\item if \(\len{u} \leq \len{v}\) then \(\len{ua} \leq \len{va}\) for every letter \(a\), hence it is \emph{monotone}.
{\ensuremath{\Diamond}}
\end{myEnumI}
\end{exampleC}
The most basic sets of words that can be formed by using a quasiorder are the so called \emph{principals}, i.e. sets of words that are related to a single one which we refer to as the \emph{generating word} of the principal.
For example, given the length quasiorder, the principal with generating word ``u'' is the set of all words ``w'' with \(\len{\text{u}} \leq \len{\text{w}}\).
Finally, when considering \emph{well-quasiorders} we find that the union of the principals of any (possibly infinite) set of words coincides with the union of the principals of a \emph{finite} subset of words. For instance, the quasiorder from Example~\ref{example:lengthQO} is a monotone well-quasiorder since the union of the principals of any infinite set of words coincides with the principal of the shortest word in the set.
Next, we offer a high-level description on how we use \emph{monotone well-quasi-orders} and their induced principals in each of the contributions of this thesis.
\subsection{Quasiorders for Deciding Language Inclusion}
Consider the language inclusion problem \(L_1 \subseteq L_2\) where \(L_1\) is context-free and \(L_2\) is regular.
The principals of a given monotone well-quasiorder can be used to compute an \emph{over-approximation} of \(L_1\) that consists of a \emph{finite} number of elements.
If the quasiorder is such that a principal is included in \(L_2\) if{}f its generating word is in \(L_2\), then we can reduce the language inclusion problem \(L_1 \subseteq L_2\) to the simpler problem of deciding a finite number of membership queries for \(L_2\).
To do that it suffices to compute the over-approximation of \(L_1\) and check membership in \(L_2\) for the generating words of the finitely many principals that form the over-approximation.
This approach is illustrated in Figure~\ref{fig:LangIncl}.
\begin{figure}
\caption{Illustration of our quasiorder-based approach for deciding the language inclusion problems \(L_1 \subseteq L_2\) and \(L_3 \subseteq L_2\).}
\label{fig:LangIncl}
\end{figure}
In order to compute the over-approximation of \(L_1\) we successively over-approximate the Kleene iterates of its least fixpoint characterization.
The following example shows the language equations for a context-free language and the first steps of the Kleene iteration, which converges to the least fixpoint of the equations.
\begin{exampleC}\label{example:KleeneIterate}
Consider the language equations \(\{X = aX \cup Ya \cup bY ,\; Y = a\}\), whose Kleene iterates converge to their least fixpoint:
\[\begin{cases}
X = \varnothing \\
Y = \varnothing
\end{cases} \hspace{-12pt}\Rightarrow \begin{cases}
X = \varnothing \\
Y = \{a\}
\end{cases} \hspace{-12pt}\Rightarrow \begin{cases}
X = \{aa, ba\} \\
Y = \{a\}
\end{cases} \hspace{-12pt}\Rightarrow \ldots \Rightarrow \begin{cases}
X = a^*(aa\,|\,ba) \\
Y = \{a\}
\end{cases} \enspace \tag*{
{\ensuremath{\Diamond}}}\]
\end{exampleC}
This approach for solving language inclusion problems is studied in Chapter~\ref{chap:LangInc}.
In that chapter we present a quasiorder-based framework which, by instantiating it with different monotone well-quasiorders, allows us to systematically derive well-known decision procedures for different language inclusion problems such as the antichains algorithms of \citet{DBLP:conf/cav/WulfDHR06} and \citet{Holk2015}.
Moreover, by switching from least fixpoint equations for computing the over-approximation of \(L_1\) to greatest fixpoint equations, we are able to obtain a \emph{novel} algorithm for deciding language inclusion between regular languages.
\subsection{Quasiorders for Searching on Compressed Text}
Searching with a regular expression in a grammar-compressed text\footnote{By ``searching'' we mean finding subsequences of the uncompressed text that match a regular expression, i.e. that are included in a given regular language.} amounts to deciding whether the language generated by a grammar, which consists of a single word, is included in a regular language.
Therefore, we can apply the quasiorder-based framework described in the previous section, i.e. we can compute an over-approximation of the language generated by the grammar and check inclusion of the over-approximation into the regular language.
However this approach would only indicate whether there is a subsequence in the text that matches the expression and it would not produce enough information to count the matches let alone recover them.
In order to report the exact lines\footnote{We use the standard definition of \emph{line} as a sequence of characters delimited by ``new line'' symbols.} that contain a match (either count them or recover the actual lines), we need to compute some extra information for each variable of the grammar, beyond the over-approximation of the generated language.
Indeed, we need to compute the following information regarding the language generated by each variable, which consists of a single word\footnote{Recall that, in the context of grammar-based compression, the grammar is a compressed representation of a text, hence it generates a single word: the text. As a consequence, each variable of the grammar generates a single word.}, namely \(w\):
\begin{myEnumI}
\item The number of lines that contain a match.
\item Whether there is a ``new line'' symbol in \(w\).
\item Whether the prefix of \(w\) contains a match.
\item Whether the suffix of \(w\) contains a match.
\end{myEnumI}
This quasiorder-based approach is presented in Chapter~\ref{chap:zearch} where we show that the above mentioned extra information for each variable of the grammar is trivially computed for the terminals and then propagated through all the variables until the axiom.
Furthermore, Chapter~\ref{chap:zearch} includes a detailed description of the implementation and evaluation of the resulting algorithm which, as the experiments show, outperforms the state of the art.
\subsection{Quasiorders for Building Residual Automata}
It is well-known that the construction of the minimal DFA for a language is related to the use of \emph{congruences}, i.e. symmetric monotone quasiorders~\cite{Buchi89,Khoussainov2001}.
Recently, \citet{ganty2019congruence} generalized this idea and offered a congruence-based perspective on minimization algorithms for DFAs.
Intuitively, they build automata by using the principals induced by congruences as states and define the transitions according to inclusions between the principals and the sets obtained by concatenating them with letters.
When the congruence has finite index then it induces a finite number of principals and, therefore, the resulting automata have finitely many states.
Figure~\ref{fig:rho_automata} illustrates this automata construction.
\begin{figure}
\caption{The image on the left shows the principals induced by a quasiorder. Each arrow of the form \(ρ(x)\ggoes{a}
\label{fig:rho_automata}
\end{figure}
Let \(ρ(u)\) denote the principal for a word \(u\).
The monotonicity of congruences ensures that every set \(ρ(u)a\) is included in a principal \(ρ(v)\) and, since congruences are symmetric, the principals induced by a congruence are disjoint and, therefore, the resulting automata is deterministic.
By switching from congruences to quasiorders we obtain possibly overlapping principals which enables non-determinism and allows us to obtain \emph{residual automata} which, recall, are a generalization of DFAs.
Clearly, the principals shown in Figure~\ref{fig:rho_automata} correspond to a quasiorder rather than a congruence since they are not disjoint.
This quasiorder-based perspective on RFAs is presented in Chapter~\ref{chap:RFA} where we define quasiorder-based automata constructions that yield RFAs or co-RFAs, depending on the properties of the input quasiorder.
Moreover, given two comparable quasiorders, our automata construction instantiated with the coarser quasiorder yields a smaller automaton.
This is to be expected since a coarser quasiorder induces fewer principals which, recall, are the states of the automata.
As a consequence, building the canonical minimal RFA for a given language amounts to instantiating our automata construction with the coarsest quasiorder that satisfies certain requirements.
Interestingly, building the minimal DFA amounts to instantiating the framework of \citet{ganty2019congruence} with the coarsest congruence that satisfies the same requirements.
As we shall see in Chapter~\ref{chap:RFA}, the congruence and the quasiorder used for building the minimal DFA and RFA, respectively, are closely related.
We conclude that \emph{monotone quasiorders} are fundamental for RFAs as \emph{congruences} are fundamental for DFAs, which evidences the relationship between these two
classes of automata.
{}
{}
\chapter{State of the Art}
\label{chap:related}
In this dissertation, we present two quasiorder-based frameworks that allow us to systematically derive algorithms for solving different language inclusion problems and manipulating residual automata, respectively.
Moreover, we show that our algorithms for deciding language inclusion can be adapted for searching on compressed text.
Our theoretical framework allows us to devise some novel algorithms and offer new insights on existing ones.
Therefore, most of the works related to ours are briefly discussed within the following chapters, when explaining them within our quasiorder-based perspective.
This is the case, specially, in Chapters~\ref{chap:LangInc} and~\ref{chap:RFA}.
However, we present in this chapter a detailed description of some previous works in order to provide an overview of the state of the art for these problems before writing this Ph.D. Thesis.
\section{The Language Inclusion Problem}\label{sec:TheLanguageInclusionProblem}
Consider the language inclusion problem \(L_1 \subseteq L_2\).
When the underlying representations of \(L_1\) and \(L_2\) are regular expressions, one can check language inclusion using some rewriting techniques~\cite{Antimirov1995,keil_et_al:LIPIcs:2014:4841}, thus avoiding the translation of the regular expression into an equivalent automaton.
On the other hand, when the languages are given through finite automata, a well known and standard method to solve the language inclusion problem is to reduce it to a disjointness problem via the construction of the language complement: \(L_1 \subseteq L_2\) if{}f \(L_1 \cap L_2^c = \varnothing\).
The bottleneck of this approach is the language complementation since it involves a determinization step which entails a worst case exponential blowup.
In order to alleviate this bottleneck, \citet{DBLP:conf/cav/WulfDHR06} put forward a breakthrough result where complementation was sidestepped by a lazy construction of the determinized NFA, which provided a huge performance gain in practice.
Their algorithm, deemed the \emph{antichains} algorithm, was subsequently enhanced with simulation relations by \citet{Abdulla2010}.
The current state of the art for solving the language inclusion problem between regular languages is the bisimulation up-to approach proposed by \citet{DBLP:conf/popl/BonchiP13}, of which the antichains algorithm and their enhancement with simulations can be viewed as particular cases.
\subsection{Antichains Algorithms}\label{sec:antichainsAlgorithms}
The \emph{antichains} algorithm of \citet{DBLP:conf/cav/WulfDHR06} was originally designed as an algorithm for solving the universality problem for regular languages, i.e. deciding whether \(Σ^* \subseteq L\) holds when \(L\) is regular.
Before the introduction of this algorithm, the standard approach for deciding universality of a regular language given its automaton was to determinize the automaton and check whether all states are final.
The \emph{antichains} algorithm improved this situation by keeping the determinization step implicit.
In their work, \citet{DBLP:conf/cav/WulfDHR06} also adapted their antichains algorithm for solving the language inclusion problem \(L_1 \subseteq L_2\) when both \(L_1\) and \(L_2\) are regular.
Next, we describe this antichains algorithm for solving language inclusion.
Consider the inclusion problem \(L_1 \subseteq L_2\) and let \(\mathcal{N}_1\) and \(\mathcal{N}_2\) be finite-state automata generating the languages \(L_1\) and \(L_2\) respectively.
The intuition behind the \emph{antichains} algorithm is to compute, for each state \(q\) of \(\mathcal{N}_1\), the set \(S_q\) of sets of states of \(\mathcal{N}_2\) from which no final state of \(\mathcal{N}_2\) is reachable by reading words generated from \(q\) in \(\mathcal{N}_1\).\footnote{Note that this is equivalent to finding states of the complement of the determinized version of \(\mathcal{N}_2\) from which a final state is reachable by reading a word generated from \(q\) in \(\mathcal{N}_1\).}
Clearly, the inclusion \(L_1 \subseteq L_2\) holds if{}f none of the sets of states computed for the initial states of \(\mathcal{N}_1\) contain some initial state of \(\mathcal{N}_2\).
In order to prevent the computation of all possible subsets of \(\mathcal{N}_2\) from which the final states are non-reachable, which would be equivalent to determinizing \(\mathcal{N}_2\), the \emph{antichains} algorithm ensures that the set \(S_q\) for each state \(q\) in \(\mathcal{N}_1\) is an antichain, i.e. \(\forall s,s' \in S_q, \; s \nsubseteq s' \land s' \nsubseteq S\).
The idea behind the use of \emph{antichains} is that, given two sets of states of \(\mathcal{N}_2\), namely \(s\) and \(s'\), if \(s \subseteq s'\) then if no final state of \(\mathcal{N}_2\) is reachable from \(s'\) by reading words in a certain set then the same holds for \(s\).
Therefore, discarding the set \(s\) and keeping the set \(s'\) preserves the correctness of the algorithm.
The resulting algorithm is refer to as the \demph{backward antichains algorithm}.
Furthermore, \citet{DBLP:conf/cav/WulfDHR06} also defined a dual of the antichains algorithm described above.
In this case, the algorithm computes the set \(\widetilde{S}_q\) of sets of states of \(\mathcal{N}_2\) reachable from an initial state by reading a word generated from \(q\) in \(\mathcal{N}_1\).
In this case, the inclusion \(L_1 \subseteq L_2\) holds if{}f for every initial state \(q\) of \(\mathcal{N}_1\), all the sets in \(\widetilde{S}_q\) contain a final state.
Again, by ensuring that \(\widetilde{S}_q\) is an \emph{antichain}, we can reduce the number of sets of states of \(\mathcal{N}_2\) that need to be computed since, whenever \(s \subseteq s'\), if a final state is reachable from \(s\) by a word in a given language, the same holds for \(s'\) and, therefore, it is possible to discard \(s'\).
The resulting algorithm is referred to as the \demph{forward antichains algorithm}.
The proof of the correctness of the \emph{antichains} algorithm, as presented by \citet{DBLP:conf/cav/WulfDHR06}, heavily depends on the automata representation of the languages.
We believe that our quasiorder-based framework, presented in Chapter~\ref{chap:LangInc}, offers a better understanding on the \emph{antichains} algorithm and its correctness proof by offering a new explanation of the algorithm from a language perspective.
\paragraph*{Improvements on the Antichains Algorithm}
The \emph{antichains} algorithm of \citet{DBLP:conf/cav/WulfDHR06} was later improved by \citet{Abdulla2010}, who used simulations (between states and between sets of states) for reducing the amount of sets of states considered by the algorithm.
In particular, they found that, for the \emph{forward antichains algorithm}, there is no need to add the set \(s\) of states of \(\mathcal{N}_2\) to the set \(\widetilde{S}_q\) for a certain state \(q\) of \(\mathcal{N}_1\) if there exists a state \(q'\) of \(\mathcal{N}_1\) such that \(q\) simulates \(q'\) and whose associated set \(\widetilde{S}_{q'}\) contains a set \(s'\) that simulates \(s\).
The idea behind this approach is that simulation is a sufficient condition for language inclusion to hold, i.e. if the set of states \(s'\) simulates the set \(s\) then the language generated from \(s'\) is a subset of the language generated from \(s\).
As we show in Chapter~\ref{chap:LangInc}, this improvement on the \emph{antichains} algorithm can be partially accommodated by our quasiorder-based framework by using simulations in the definition of the quasiorder.
By doing so, the resulting algorithm matches the behavior of the one of ~\citet{Abdulla2010} when \(q = q'\).
On the other hand, \citet{DBLP:conf/popl/BonchiP13} defined a new type of relation between sets of states, denoted \emph{bisimulation up to congruence}, and used it to define a new algorithm for deciding language equivalence between sets of states of a given automaton.
Intuitively, bisimulations up to congruence are enhanced bisimulations (and, therefore, if they relate two sets of states then both sets generate the same language) that might relate sets of states that are not explicitly related by the underlying bisimulation but are related by its implicit congruence closure.
Since \(L_1 \subseteq L_2 \Leftrightarrow L_1 \cup L_2 = L_2\), the algorithm of \citet{DBLP:conf/popl/BonchiP13} can be used to decide the inclusion \(L_1 \subseteq L_2\) by considering the union automaton \(\mathcal{N}_1 \cup \mathcal{N}_2\) and checking whether the bisimulations up to congruence holds between the union of the initial states of \(\mathcal{N}_1\) and \(\mathcal{N}_2\), which generate \(L_1 \cup L_2\), and the initial states of \(\mathcal{N}_2\), which generate \(L_2\).
Finally, \citet{Holk2015} used \emph{antichains} to solve the language inclusion problem \(L_1 \subseteq L_2\) when \(L_1\) is a context-free language and \(L_2\) is regular.
To do that, they reduced the language inclusion problem to a data flow analysis one.
This allowed them to rephrase the language inclusion problem as an inclusion problem between sets of relations on the states of the automaton.
Then, they applied the antichains principle to reduce the number of relations that need to be manipulated.
As we show in Chapter~\ref{chap:LangInc}, our quasiorder-based framework for deciding the language inclusion \(L_1 \subseteq L_2\) also applies to the case in which \(L_1\) is a context-free grammar.
Indeed, when \(L_1\) is regular we instantiate our framework with left or right monotone quasiorders and obtain the antichains algorithm of \citet{DBLP:conf/cav/WulfDHR06} and its variants, among other algorithms.
Similarly, when \(L_1\) is context-free, we use a left and right monotone quasiorders and obtain the antichains algorithm of \citet{Holk2015}, among others.
Therefore, our framework allows us to offer a more direct presentation of the \emph{antichains} algorithm for grammars of \citet{Holk2015} as a straightforward extension of the \emph{antichains} algorithm for regular languages.
\subsection{Solving Language Inclusion through Abstractions}
Our approach draws inspiration from the work of \citet{Hofmann2014}, who considered the language inclusion problem on \emph{infinite words} \(L_1 \subseteq L_2\) where \(L_1\) is represented by a B{\"u}chi automata and \(L_2\) is regular.
They defined a language inclusion algorithm based on fixpoint computations and a language abstraction based on an equivalence relation between states of the underlying automata representation.
Although the equivalence relation is folklore (you find it in several textbooks on language theory \cite{Khoussainov2001,Sakarovitch}), \citet{Hofmann2014} were the first, to the best of our knowledge, to use it as an abstraction and, in particular, as a complete domain in abstract interpretation.
As we show in Chapter~\ref{chap:LangInc}, our framework for solving the language inclusion problem also relies on computing the language abstraction of a fixpoint computation.
However, we focus on languages on finite words and generalize the language abstractions by relaxing their equivalence relations to quasiorders.
Moreover, by considering quasiorders instead of equivalences, we are able to generalize the fixed point-based approach to check $L_1\subseteq L_2$ when $L_2$ is non-regular.
\section{Searching on Compressed Text}\label{sec:related:Search}
The problem of searching with regular expressions on grammar-compressed text has been extensively studied for the last decades.
Results in this topic can be divided in two main groups:
\begin{myEnumA}
\item[a)] Characterization of the problem's complexity from a theoretical point of view~\cite{plandowski1999complexity,markey2004ptime,Amir2018FineGrained}.
\item[b)] Development of algorithms and data structures to efficiently solve different versions of the problem such as pattern matching~\cite{navarro2005lzgrep,de1998direct,navarro2007compressedIndex}, approximate pattern matching~\cite{bille2009improved,karkkainen2003approximate}, multi-pattern matching~\cite{kida1998multipattern,gawrychowski2014simple}, regular expression matching~\cite{navarro2003regular,bille2009improved} and
subsequence matching~\cite{bille2017compressed}.
\end{myEnumA}
To characterize the complexity of search problems on grammar-compressed text it is common to use \emph{straight line programs} (grammars generating a single string) to represent the output of the compression.
Straight line programs are a natural model for algorithms such as \textsc{LZ78}~\cite{ziv1978compression}, \textsc{LZW}~\cite{welch1984technique}, Recursive Pairing~\cite{larsson2000off} or Sequitur~\cite{nevill1997compression} and, as proven by \citet{Rytter2004Equivalent}, polynomially equivalent to \textsc{LZ77}~\cite{ziv1977compression}.
However, algorithms for searching with regular expressions on grammar-compressed text are typically designed for a specific compression scheme~\cite{navarro2005lzgrep,navarro2003regular,bille2009improved}.
The first algorithm to solve this problem is due to \citet{navarro2003regular} and it is defined for \textsc{LZ78}/\textsc{LZW} compressed text.
His algorithm reports all positions in the uncompressed text at which a substring that matches the expression ends and exhibits $\mathcal{O}(2^s+s\cdot T+\text{occ}\cdot s\cdot \log{s})$ worst case time complexity using $\mathcal{O}(2^s+t\cdot s)$ space, where ``occ'' is the number of occurrences, $s$ is the size of the expression and $T$ is the length of the text compressed to size $t$.
To the best of our knowledge this is the only algorithm for regular expression searching on compressed text that has been implemented and evaluated in practice.
\citet{bille2009improved} improved the result of Navarro by defining a relationship between the time and space required to perform regular expression searching on compressed text.
They defined a data structure of size $o(t)$ to represent \textsc{LZ78} compressed texts and an algorithm that, given a parameter $\tau$, finds all occurrences of a regular expression in a \textsc{LZ78} compressed text in $\mathcal{O}(t\cdot s\cdot (s+\tau)+\text{occ}\cdot s\cdot \log{s})$ time using $\mathcal{O}(t\cdot s^2/\tau + t\cdot s)$ space.
To the best of our knowledge, no implementation of this algorithm was carried out.
We tackle the problem of searching in grammar-compressed text by using our algorithms for deciding language inclusion.
We \emph{adapt} these algorithms to efficiently handle straight line programs and \emph{enhance} them with additional information, that is computed for each variable of the grammar, in order to find the exact matches.
Our approach, presented in Chapter~\ref{chap:zearch}, differs from the previous ones in the generality of its definition since, by working on straight line programs, our algorithm and its complexity analysis apply to any grammar-based compression scheme.
This is a major improvement since, as shown by \citet{Hucke2016Smallest}, the \textsc{LZ78} representation of a text of length $T$ has size $t=\Theta((T/\log(T))^{2/3})$ while its representation as a straight line program has size $t=Ω(\log(T)/(\log\log(T)))$ and $t=\mathcal{O}((T/\log(T))^{2/3})$.
Therefore, our approach allows us to handle much more concise representations of the data.
Moreover, the definition of ``occurrence'' used in previous works, i.e. positions in the uncompressed text from which we can read a match of the expression, is of limited practical interest.
As an evidence, state of the art tools for regular expression searching, such as \tool{grep} or \tool{ripgrep}, define an occurrence as a line of text containing a match of the expression and so do us.
As a consequence, our algorithm reports the number of occurrences of a \emph{fixed} regular expression in a compressed text in $\mathcal{O}(t)$ time while previous algorithms require $\mathcal{O}(T)$ since $\text{occ}=\mathcal{O}(T)$.
Even when there are no matches ($\text{occ}=0$), so previous approaches operate in $\mathcal{O}(t)$ time, the result of \citet{Hucke2016Smallest} shows that our algorithm behaves potentially better than the others.
\paragraph*{Deciding the Existence of a Match}
It is worth to remark that the problem of deciding language inclusion between the languages generated by a straight line program and an automaton has been studied before.
In particular \citet{plandowski1999complexity} reduced this problem to a series of matrix multiplications, showing that it can be solved in $\mathcal{O}(t\cdot s^3)$ time ($\mathcal{O}(t\cdot s)$ for deterministic automata) where $t$ is the size of the grammar and $s$ is the size of the automaton.
Note that this problem corresponds to deciding whether a grammar-compressed text contains a match for a given regular expression.
On the other hand, \citet{esparza00} defined an algorithm to solve a number of decision problems involving automata and context-free grammars which, when restricted to grammars generating a single word, results in a particular implementation of Plandowsky's approach.
Indeed, this implementation coincides with our Algorithm \AlgSLPIncS, presented in Chapter~\ref{chap:zearch} as a straightforward adaptation of the algorithm given in Chapter~\ref{chap:LangInc} for deciding the inclusion of a context-free language into a regular one.
\section{Building Residual Automata}
Residual automata (RFA for short) were first introduced by \citet{denis2000residual,denis2001residual,denis2002residual}.
We deliberatively use the notation RFA for residual automata, instead of the standard RFSA, in order to be consistent with the notation used in this thesis for deterministic (DFA) and non-deterministic (NFA) automata.
When introducing RFAs, \citet{denis2000residual} defined an algorithm for \emph{residualizing} an automaton, which is an adaptation of the well-known subset construction used for determinization.
Moreover, they showed that there exists a \emph{unique} \emph{canonical} RFA, which is minimal in number of states, for every regular language.
Finally, they showed that the residual-equivalent of the double-reversal method holds, i.e.\ residualizing an automaton \(\mathcal{N}\) whose reverse is residual yields the canonical RFA for the language generated by \(\mathcal{N}\).
Later, \citet{tamm2015generalization} generalized the double-reversal method for RFAs by giving a sufficient and necessary condition that guarantees that the residualization operation defined by \citet{denis2002residual} yields the canonical RFA.
This generalization comes in the same lines as that of \citet{Brzozowski2014} for the double-reversal method for DFAs.
In Chapter~\ref{chap:RFA}, we present a quasiorder-based framework of automata constructions inspired by the work of \citet{ganty2019congruence}, who defined a framework of automata constructions based on \emph{equivalences} over words to provide new insights on the relation between well-known methods for computing the minimal \emph{deterministic} automaton of a language.
Intuitively, the shift from equivalences to quasiorders allows us to move from deterministic automata to residual ones.
In their work, \citet{ganty2019congruence} used \emph{congruences}, i.e. monotone equivalences, over words that induce finite partitions over \(\Sigma^*\).
Then, they used well-known automata constructions that yield automata generating a given language \(L\)~\cite{Buchi89,Khoussainov2001} to derive new automata constructions parametrized by a congruence.
As a result, when using the Nerode's congruence for \(L\), their automata construction yields the minimal DFA for \(L\)~\cite{Buchi89,Khoussainov2001} while, when using the so-called \emph{automata-based equivalence} relative to an NFA their construction yields the determinized version of the input NFA.
They also obtained counterpart automata constructions that yield, respectively, the minimal co-deterministic and a co-deterministic automaton for the language.
The relation between the automata constructions resulting from the Nerode's and the automata-based congruences allowed them to relate determinization and minimization operations.
Finally, they re-formulated the generalization of the double-reversal method presented by \citet{Brzozowski2014}, which gives a sufficient and necessary condition that guarantees that determinizing an NFA yields the minimal DFA for the language generated by the NFA.
Our quasiorder-based framework allows us to extend the work of \citet{ganty2019congruence} and devise automata constructions that result in residual automata.
Moreover, we derive a residual-equivalent of the generalized double-reversal method from \citet{Brzozowski2014} that is more general than the one presented by \citet{tamm2015generalization}.
{}
{}
\chapter{Background}
\label{chap:prel}
In this section, we introduce all the concepts and notation that will be used throughout the rest of the thesis.
\section{Words and Languages}
Let \(\Sigma\) be a finite nonempty \demph{alphabet} of symbols.
A \demph{string} or \demph{word} \(w\) is a finite sequence of symbols of \(Σ\) where the empty sequence is denoted \(ε\).
We denote \(w^R\) the \demph{reverse} of \(w\) and use \(\len{w}\) to denote the \demph{length} of \(w\) that we abbreviate to \(†\) when \(w\) is clear from the context.
We define \( (w)_i \) as the \(i\)-th symbol of \(w\) if \(1 ≤ i ≤ †\) and \(ε\) otherwise.
Similarly, \((w)_{i,j}\) denotes the substring, also called \demph{factor}, of $w$ between the $i$-th and the $j$-th symbols, both included.
Clearly, \(w = (w)_{1,\dag}\).
We write \(\Sigma^*\) to denote the set of all finite words on $\Sigma$ and write \(\wp(S)\) to denote the set of all subsets of \(S\), i.e. \(\wp(S) \ud \{S' \mid S' \subseteq S\}\).
Given a language \(L \in \wp(\Sigma^*)\), \(L^R \ud \{w^R \mid w \in L\}\) denotes the \demph{reverse} of \(L\) while \(L^c \ud \{w \in Σ^* \mid w \notin L\}\) denotes its \demph{complement}.
Concatenation in \(\Sigma^*\) is simply denoted by juxtaposition, both for concatenating words \(uv\), languages \(L_1L_2\) and words with languages such as \(uLv\).
We sometimes use the symbol \(\cdot\) to refer explicitly to concatenation.
\begin{definition*}[Quotient]
Let \(L \subseteq Σ^*\) and \(u \in Σ^*\).
The \emph{left quotient} of \(L\) by the word \(u\) is the set of suffixes of the word \(u\) in \(L\), i.e.
\[u^{-1}L \ud \{w \in Σ^* \mid uw \in L\}\enspace .\]
Similarly, the \emph{right quotient} of \(L\) by the word \(u\) is the set of all prefixes of \(u\) in \(L\), i.e.
\[Lu^{-1} \ud \{w \in Σ^* \mid wu \in L\}\enspace . \]
Finally, we lift the notions of left and right quotients by a word to sets \(S\subseteq Σ^*\) as:
\[S^{-1}L \ud \{w \in Σ^* \mid \forall s\in S,\; sw \in L\} \;\text{ and }\; LS^{-1} \ud \{w \in Σ^* \mid \forall s\in S,\; ws \in L\} \tag*{
\rule{0.5em}{0.5em}}\]
\end{definition*}
Note that the definition of quotient by a set is unconventional as it uses the universal quantifier instead of existential.
We use this definition since it guarantees that the quotient by a set is the adjoint of concatenation, i.e.
\[XY \subseteq L \Leftrightarrow Y \subseteq X^{-1}L \Leftrightarrow X \subseteq LY^{-1}\enspace .\]
\begin{definitionNI*}[Composite and Prime Quotients]\index{Quotient!composite}\index{Quotient!prime}
A left (resp. right) quotient \(u^{-1}L\) is \emph{composite} if{}f it is the union of all the left (resp. right) quotients that it strictly contains, i.e.
\[u^{-1}L = \hspace{-15pt}\bigcup_{x \in Σ^*, \; x^{-1}L \subsetneq u^{-1}L}\hspace{-15pt} x^{-1}L\qquad\qquad (\text{resp. }Lu^{-1} = \hspace{-15pt}\bigcup_{x \in Σ^*, \; Lx^{-1} \subsetneq Lu^{-1}}\hspace{-15pt} Lx^{-1})\enspace .\]
When a quotient is not composite, we say it is \emph{prime}.\eod
\end{definitionNI*}
\section{Finite-state Automata}\label{sec:FSA}
Throughout this dissertation we consider three different classes of automata: non-deterministic, deterministic and residual.
Next, we define these classes of automata and introduce some basic notions related them.
\paragraph*{Non-Deterministic Finite-State Automata}
\begin{definition*}[NFA]\index{non-deterministic automaton}
A \emph{non-deterministic finite-state automaton} (NFA for short) is a tuple \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) where \(\Sigma\) is the \emph{alphabet}, \(Q\) is the finite \emph{set of states}, \(I\subseteq Q\) is the subset of \emph{initial states}, \(F\subseteq Q\) is the subset of \emph{final states}, and \(\delta\colon Q\times \Sigma \rightarrow \wp(Q)\) is the \emph{transition relation}. \eod
\end{definition*}
We sometimes use the notation \(q\ggoes{a} q'\) to denote that \(q'\in \delta(q,a)\).
If \(u\in \Sigma^*\) and \(q,q'\in Q\) then \(q \stackrel{u}{\leadsto} q'\) means that the state \(q'\) is reachable
from \(q\) by following the string \(u\).
Formally, by induction on the length of $u\in \Sigma^*$:
\begin{myEnumI}
\item if $u=\epsilon$ then \(q \goes{\epsilon} q'\) if{}f \(q=q'\);
\item if $u=av$ with $a\in \Sigma,v\in \Sigma^*$ then \(q \goes{av} q'\) if{}f $\exists q''\in \delta(q,a),\; q''\goes{v}q'$.
\end{myEnumI}
The \emph{language} generated by an NFA \(\mathcal{N}\), often referred to as the \demph{language accepted} by \(\mathcal{N}\) is \(\lang{\mathcal{N}}\ud\{u \in \Sigma^* \mid \exists q_i\in I, \exists q_f \in F, \; q_i\goes{u}q_f\}\).
We define the successors and the predecessors of a set \(S \subseteq Q\) by a word \(w \in Σ^*\) as:
\begin{align*}
\mindex{\post_w^{\mathcal{N}}}(S) & \ud \{q \in Q \mid \exists q' \in S, \; q' \goes{w} q\} &
\mindex{Pe_w^{\mathcal{N}}}(S) & \ud \{q \in Q \mid \exists q' \in S, \; q \goes{w}q'\} \enspace .
\end{align*}
In general, we omit the automaton \(\mathcal{N}\) from the superscript when it is clear from the context.
Figure~\ref{fig:NFA} shows an example of an NFA.
\begin{figure}
\caption{An NFA \(\mathcal{N}
\label{fig:NFA}
\end{figure}
Given \(S,T \subseteq Q\), define
\[\mindex{W^{\mathcal{N}}_{S,T}} \ud \{w \in \Sigma^* \mid \exists q \in S, q' \in T, \; q \goes{w} q')\}\enspace .\]
When \(S\) or \(T\) are singletons, we abuse of notation and write \(W_{q,T}^{\mathcal{N}}\), \(W_{S,q'}^{\mathcal{N}}\) or even \(W_{q,q'}^{\mathcal{N}}\).
In particular, when \(S = \{q\}\) and \(T = F\), we say that \(W^{\mathcal{N}}_{q,F}\) is the \demph{right language} of \(q\).
Likewise, when \(S = I\) and \(T = \{q\}\), we say that \(W^{\mathcal{N}}_{I,q}\) is the \demph{left language} of \(q\).
We say that a state \(q\) is \demph{unreachable} if{}f \(W^{\mathcal{N}}_{I,q} = \varnothing\) and we say that \(q\) is \demph{empty} if{}f \(W^{\mathcal{N}}_{q,F} = \varnothing\).
Finally, note that
\[\lang{\mathcal{N}} = \bigcup_{q \in I} W_{q,F}^{\mathcal{N}} = \bigcup_{q \in F} W_{I,q}^{\mathcal{N}} = W_{I,F}^{\mathcal{N}} \enspace . \]
\begin{definition*}[Sub-automaton]
Let \(\mathcal{N} = \tuple{Q,Σ,δ,I,F}\) be an NFA.
A \emph{sub-automaton} of \(\mathcal{N}\) is an NFA \(\mathcal{N}' = \tuple{Q', Σ, δ', I', F'}\) for which \(Q' \subseteq Q\), \(F' \subseteq F\), \(I' \subseteq I\) and for every \(q,q' \in Q\) and \(a \in Σ\) we have that \(q' \in δ'(q,a) \Rightarrow q' \in δ(q,a)\).\eod
\end{definition*}
Clearly, if \(\mathcal{N}'\) is a sub-automaton of \(\mathcal{N}\) then \(\lang{\mathcal{N}'}\subseteq \lang{\mathcal{N}}\).
\begin{definition*}[Reverse Automaton]
Let \(\mathcal{N} = \tuple{Q,Σ,δ,I,F}\) be an NFA.
The \emph{reverse} of \(\mathcal{N}\) is the NFA \(\mathcal{N}^R \!\ud\! \tuple{Q, \Sigma, \delta^R, F, I}\) where for every \(q,q' \in Q\) and \(a \in Σ\) we have that \(q \!\in\! \delta^R (q',a) \Leftrightarrow q' \!\in\! \delta(q,a)\).\eod
\end{definition*}
It is straightforward to check that \(\lang{\mathcal{N}}^R = \lang{\mathcal{N}^R}\).
\paragraph*{Deterministic Finite-State Automata}
\begin{definitionNI*}[DFA and co-DFA]\index{DFA}\index{co-DFA}\index{deterministic automaton}\index{co-deterministic automaton}
A \emph{deterministic finite-state automaton} (DFA for short) is an NFA such that \(I = \{q_0\}\) and, for every state \(q \in Q\) and every symbol \(a \in \Sigma\), there exists \emph{at most} one state \(q' \in Q\) such that \(\delta(q,a) = q'\).
A \emph{co-deterministic finite-state automaton} (co-DFA for short) is an NFA \(\mathcal{N}\) such that \(\mathcal{N}^R\) is a DFA.\eod
\end{definitionNI*}
\begin{definition*}[Subset Construction]\index{determinization}
Let \(\mathcal{N} = \tuple{Q, \Sigma, \delta, I, F}\) be an NFA.
The \emph{subset construction} builds a DFA \(\mathcal{N}^D \ud \tuple{Q^D, \Sigma, \delta^D, I^D, F^D}\) where
\begin{align*}
Q^D & \ud \{\post_u^{\mathcal{N}}(I) \mid u \in Σ^*\} \\
I^D & \ud \{I\} \\
F^D & \ud \{S \in \wp(Q) \mid S \cap F \neq \varnothing\}\\
\delta^D(S,a) & \ud \{q' \mid \exists q \in S, \; q' \in \delta(q,a)\} \text{ for every \(S \in Q\) and \(a \in \Sigma\)} \tag*{\eod}
\end{align*}
\end{definition*}
Given an NFA \(\mathcal{N}\), we denote by \(\mathcal{N}^D\) the DFA that results from applying the subset construction to \(\mathcal{N}\) where only subsets that are reachable from the initial states of \(\mathcal{N}^D\) are used.
As shown by \citet{Ullman2003}, \(\lang{\mathcal{N}^D} = \lang{\mathcal{N}}\) for every automaton \(\mathcal{N}\).
Figure~\ref{fig:DFA} shows the DFA obtained when applying the subset construction to the NFA from Figure~\ref{fig:NFA}.
\begin{figure}
\caption{DFA \(\mathcal{N}
\label{fig:DFA}
\end{figure}
A DFA for the language \(\lang{\mathcal{N}}\) is \emph{minimal}, denoted by \(\mathcal{N}^{DM}\), if it has no unreachable states and no two states have the same right language.
For instance, the DFA from Figure~\ref{fig:DFA} is not minimal since the states \(\{0,1,3\},\{0,3\},\{0,1,2,3\}\) and \(\{3\}\) all have the same right language.
The minimal DFA for a regular language is \emph{unique} modulo isomorphism and is determined by the right quotients of the generated language.
\begin{definition*}[Minimal DFA]
Let \(L\) be a regular language.
The \emph{minimal DFA} for \(L\) is the DFA \(\mathcal{D} \ud \tuple{Q^D, Σ, δ^D, I^D, F^D}\) where
\begin{align*}
Q^D & \ud \{u^{-1}L \mid u \in Σ^*\}\\
I^D & \ud \{u^{-1}L \in Q \mid u^{-1}L \subseteq L\}\\
F^D & \ud \{u^{-1}L \in Q \mid \varepsilon \in u^{-1}L\}\\
δ^D(u^{-1}L, a) & \ud \{v^{-1}L \in Q \mid v^{-1}L = a^{-1}(u^{-1}L)\} \text{ for every \(u^{-1}L \in Q\) and \(a \in Σ\)} \tag*{\eod}
\end{align*}
\end{definition*}
\paragraph*{Residual Finite-State Automata}
\begin{definitionNI*}[RFA and co-RFA]\index{RFA}\index{co-RFA}\index{residual automaton}\index{co-residual automaton}
A \emph{residual finite-state automaton} (RFA for short) is an NFA such that the right language of each state is a left quotient of the language generated by the automaton.
A \emph{co-residual automaton} (co-RFA for short) is an NFA \(\mathcal{N}\) such that \(\mathcal{N}^R\) is residual, i.e. the left language of each state is a right quotient of the language generated by the automaton.\eod
\end{definitionNI*}
Formally, an RFA is an NFA \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) satisfying
\[\forall q \in Q, \exists u \in Σ^*, \; W_{q,F} = u^{-1}\lang{\mathcal{N}}\enspace .\]
Similarly, \(\mathcal{N}\) is a co-RFA if{}f it satisfies
\[\forall q \in Q, \exists u \in Σ^*, \; W_{I,q} = Lu^{-1}\enspace .\]
The right quotients of the form \(u^{-1}L\), where \(L \subseteq Σ^*\) is a language and \(u \in Σ^*\), are also known as \demph{residuals}, which gives name to RFAs.
We say \(u \in Σ^*\) is a \demph{characterizing word} for \(q \in Q\) if{}f \(W_{q,F}^{\mathcal{N}} = u^{-1}\lang{\mathcal{N}}\) and we say \(\mathcal{N}\) is \emph{consistent}\index{consistent RFA} if{}f each state \(q\) is reachable by a characterizing word for \(q\).
Moreover, \(\mathcal{N}\) is \emph{strongly consistent}\index{strongly consistent RFA} if{}f every state \(q\) is reachable by every characterizing word of \(q\).
Similarly to the case of DFAs, there exists a \emph{residualization} operation~\cite{denis2002residual} that, given an NFA \(\mathcal{N}\), builds an RFA \(\mindex{\mathcal{N}^{\text{res}}}\) such that \(\lang{\mathcal{N}^{\text{res}}} = \lang{\mathcal{N}}\).
This construction can be seen as a determinization followed by the removal of coverable states and the addition of new transitions.
We say that the set \(\post_u^{\mathcal{N}}(I)\) is \demph{coverable} if{}f
\[\post_u^{\mathcal{N}}(I) = \hspace{-25pt}\bigcup_{x\in\Sigma^*, \; \post_x^{\mathcal{N}}(I) \subsetneq \post_u^{\mathcal{N}}(I)}\hspace{-25pt}\post_{x}^{\mathcal{N}}(I) \enspace .\]
\begin{definition*}[Residualization]\index{\(\mathcal{N}^{\text{res}}\)}
Let \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) be an NFA.
Then the \emph{residualization} operation builds the RFA \(\mathcal{N}^{\text{res}} \ud \tuple{\widetilde{Q}, Σ, \widetilde{δ}, \widetilde{I}, \widetilde{F}}\) with
\begin{align*}
\widetilde{Q} & \ud \{ \post_u^{\mathcal{N}}(I) \mid u \in Σ^* \land \post_u^{\mathcal{N}}(I) \text{ is not coverable}\}\\
\widetilde{I} & \ud \{S \in \widetilde{Q} \mid S \subseteq I\}\\
\widetilde{F} & \ud \{S \in \widetilde{Q} \mid S \cap F \neq \varnothing\}\\
\widetilde{δ}(S, a) & = \{S' \in \widetilde{Q} \mid S' \subseteq δ(S, a)\} \text{ for every \(S \in \widetilde{Q}\) and \(a \in Σ\)}\tag*{\eod}
\end{align*}
\end{definition*}
Figure~\ref{fig:RFA} shows the RFA obtained by applying the residualization operation to the NFA from Figure~\ref{fig:NFA}.
\begin{figure}
\caption{RFA \(\mathcal{N}
\label{fig:RFA}
\end{figure}
Similarly, to the case of DFAs, there exists an RFA for every regular language that is minimal in the number of states and is \emph{unique} modulo isomorphism: the \emph{canonical RFA}.
\begin{definition*}[Canonical RFA]
Let \(L\) be a regular language.
The \emph{canonical RFA} for \(L\) is the RFA \(\mathcal{C} \ud \tuple{Q^C, Σ, δ^C, I^C, F^C}\) with
\begin{align*}
Q^C & \ud \{u^{-1}L \mid u \in Σ^*, \; u^{-1}L \text{ is prime}\}\\
I^C & \ud \{u^{-1}L \in Q \mid u^{-1}L \subseteq L\}\\
F^C & \ud \{u^{-1}L \in Q \mid \varepsilon \in u^{-1}L\}\\
δ^C(u^{-1}L, a) & \ud \{v^{-1}L \in Q \mid v^{-1}L \subseteq a^{-1}(u^{-1}L)\} \text{ for every \(u^{-1}L \in Q\) and \(a \in Σ\)} \tag*{\eod}
\end{align*}
\end{definition*}
The canonical RFA is a strongly consistent RFA and it is the \emph{minimal} (in number of states) RFA such that \(\lang{\mathcal{C}} = L\)~\cite{denis2002residual}.
Moreover, by definition, the canonical RFA has the \emph{maximal} number of transitions.
Finally, it is straightforward to check that any DFA \(\mathcal{D}\) is also an RFA since \(W_{q,F}^{\mathcal{D}} = u^{-1}L\) for all \(u \in W_{I,q}^{\mathcal{D}}\).
Therefore, we have the following relations between these classes of automata:
\[\text{{\large DFA}} \subsetneq \text{{\large RFA}} \subsetneq \text{{\large NFA}} \enspace .\]
\section{Context-free Grammars}
\begin{definitionNI*}[CFG]\index{grammar}\index{Context-Free Grammar}\index{CFG}
A \emph{context-free grammar} (grammar or CFG for short) is a tuple \(\mathcal{G} \ud \tuple{\mathcal{V},\Sigma, P}\) where \(\mathcal{V}=\{X_0,\ldots,X_n\}\) is a finite set of \emph{variables} including the \emph{start symbol} \(X_0\) (also denoted \demph{axiom}), \(\Sigma\) is a \emph{finite alphabet} of terminals and \(P\) is the set of \emph{rules} \(X_i\rightarrow \beta\) where \(\beta\in (\mathcal{V}\cup\Sigma)^*\)\eod
\end{definitionNI*}
In the following we assume, for simplicity and without loss of generality, that grammars are always given in Chomsky Normal Form (CNF)\index{Chomsky Normal Form}\index{CNF}~\cite{DBLP:journals/iandc/Chomsky59a}, that is, every rule \(X_i \rightarrow \beta\in P\) is such that \(\beta\in (\mathcal{V}\times \mathcal{V}) \cup \Sigma \cup \{\epsilon\}\) and if $\beta=\epsilon$ then \(i=0\).
We also assume that for all \(X_i \in \mathcal{V}\) there exists a rule \(X_i \rightarrow \beta\in P\), otherwise \(X_i\) can be safely removed from \(\mathcal{V}\).
Given two strings \(w, w' \in (\mathcal{V} \cup \Sigma)^*\) we write \(w \Rightarrow w'\) if{}f there exists two strings \(u, v \in (\mathcal{V} \cup \Sigma)^*\) and a grammar rule \(X \to \beta \in P\) such that \(w = u X v\) and \(w' = u\beta v\).
We denote by \(\Rightarrow^*\) the reflexive-transitive closure of \(\Rightarrow\).
The \emph{language} generated by a \(\mathcal{G}\) is \(\lang{\mathcal{G}} \ud \{w \in \Sigma^* \mid X_0 \Rightarrow^* w\}\).
\paragraph*{Straight-line Programs}
In the context of grammar-based compression we are interested in straight line programs, i.e. grammars generating exactly one word.
\begin{definitionNI*}[SLP]\index{Straight Line Program}\index{SLP}
A \emph{straight line program} (SLP for short), is a CFG $\mathcal{P}=\tuple{\mathcal{V},Σ,P}$ where the set of rules is of the form
\[P \ud \{X_i → α_i β_i \mid 1 \leqslant i \leqslant \len{V}, \; α_i,β_i \in (Σ \cup \{X_1,…,X_{i-1}\}\}\enspace . \]
We refer to $X_{\len{V}} → \alpha_\len{V}β_\len{V}$ as the \emph{axiom rule}.\eod
\end{definitionNI*}
It is straightforward to check that the language generated by an SLP consists of a single string $w ∈ Σ^*$ and, by definition, $\len{w} > 1$.
Since \(\lang{P}=\{w\}\) we identify \(w\) with \(\lang{P}\).
\section{Quasiorders}
Let $f:X\rightarrow Y$ be a function between sets and let $S\in \wp(X)$.
We denote the image of \(f\) on $S$ by $f(S) \ud \{f(x) \in Y \mid x\in S\}$.
The composition of two functions $f$ and $g$ is denoted by $fg$ or $f\comp g$.
A \demph{quasiordered set} (qoset for short) is a tuple \(\tuple{D,\mathord{\leqslant}}\) such that \(\mathord{\leqslant}\) is a \demph{quasiorder} (qo for short) relation on $D$, i.e.\ a reflexive and transitive binary relation.
Given a qoset \(\tuple{D,\mathord{\leqslant}}\) we denote by \(\sim_D\) the equivalence relation induced by \(\leqslant\):
\[d \sim_D d' \udrshort d\leqslant d' \:\wedge\: d' \leqslant d, \quad\text{for all $d,d'\in D$}\enspace .\]
Moreover, given a qo \(\leqslant\) we denote its strict version by \(<\):
\[u < v \udiff u \leqslant v \land v \not\leqslant u \enspace. \]
We say that a qoset satisfies the \emph{ascending} (resp.\ \emph{descending}) \emph{chain condition} (\demph{ACC}, resp.\ \demph{DCC}) if there is no countably infinite sequence of distinct elements \(\{x_i\}_{i \in \mathbb{N}}\) such that, for all $i\in\mathbb{N}$, \(x_i \leqslant x_{i{+}1}\) (resp. \(x_{i{+}1} \leqslant x_{i}\)).
If a qoset satisfies the ACC (resp. DCC) we say it is ACC (resp. DCC).
\begin{definitionNI*}[Closure and Principals]\index{Closure}\index{Principal}
Let \({\leqslant}\) be a quasiorder on \(Σ^*\) and let \(S \subseteq Σ^*\).
The \emph{closure} of \(S\) is
\[ρ_{\leqslant}(S) \ud \{w \in Σ^* \mid \exists x \in S, \; x \leqslant w\}\enspace .\]
We say \(ρ_{\leqslant}(S)\) is a \emph{principal} if \(S\) is a singleton.
In that case, we abuse of notation and write \(ρ_{\leqslant}(u)\) instead of \(ρ_{\leqslant}(\{u\})\).\eod
\end{definitionNI*}
Given two quasiorders \(\mathord{\leqslant}\) and \(\mathord{\leqslant'}\) we say that \(\mathord{\leqslant}\) is finer than \(\mathord{\leqslant'}\) (or \(\mathord{\leqslant'}\) is coarser than \(\mathord{\leqslant}\)) and write \(\mathord{\leqslant} \subseteq \mathord{\leqslant'}\) if{}f \(ρ_{\leqslant}(S)\subseteq ρ_{\leqslant'}(S)\) for every set \(S \subseteq \Sigma^*\).
\begin{definitionNI*}[Left and Right Quasiorders]\index{Left quasiorder}\index{Right quasiorder}
Let \(\leqslant\) be a quasiorder.
We say \(\leqslant\) is \emph{right monotone} (or equivalently, \(\leqslant\) is a \emph{right quasiorder}), and denote it by \(\leqslant^{r}\), if{}f
\[u \leqslant^{r} v \Rightarrow ua \leqslant^{r} va, \quad \text{ for all \(u,v \in Σ^*\) and \(a \in Σ\)} \enspace .\]
Similarly, we say \(\leqslant\) is a \emph{left quasiorder}, and denote it by \(\leqslant^{\ell}\), if{}f
\[u \leqslant^{\ell} v \Rightarrow au \leqslant^{\ell} av, \quad \text{ for all \(u,v \in Σ^*\) and \(a \in Σ\)}\enspace \tag*{\eod}\]
\end{definitionNI*}
A qoset \(\tuple{D,\mathord{\leqslant}}\) is a \demph{partially ordered set} (poset for short) when \(\mathord{\leqslant}\) is antisymmetric.
A subset $X\subseteq D$ of a poset is \demph{directed} if{}f $X$ is nonempty and every pair of elements in $X$ has an upper bound in $X$.
\begin{definition*}[Least Upper Bound]\index{lub}
Let \(\tuple{D,\leqslant}\) be a partially ordered set and let \(x, y \in D\).
The \emph{least upper bound} of \(x\) and \(y\) is the element \(z \in D\) such that
\[x \leqslant z \land y \leqslant z \land \left(\forall d \in D, \; (x \leqslant d \land y \leqslant d)\Rightarrow z \leqslant d \right)\enspace . \tag*{\eod}\]
\end{definition*}
\begin{definition*}[Greatest Lower Bound]\index{glb}
Let \(\tuple{D,\leqslant}\) be a partially ordered set and let \(x, y \in D\).
The \emph{greatest lower bound} of \(x\) and \(y\) is the element \(z \in D\) such that
\[z \leqslant x \land z \leqslant y \land \left(\forall d \in D, \; (d \leqslant x \land d \leqslant y)\Rightarrow d \leqslant z \right)\enspace . \tag*{\eod}\]
\end{definition*}
A poset \(\tuple{D,\mathord{\leqslant}}\) is a \demph{directed-complete partial order} (CPO for short) if{}f it has the least upper bound (lub for short) of all its directed subsets.
A poset is a \demph{join-semilattice} if{}f it has the lub of all its nonempty finite subsets (therefore binary lubs are enough).
A poset is a \demph{complete lattice} if{}f it has
the lub of all its arbitrary (possibly empty) subsets; in this case, let us recall that
it also has the greatest lower bound (glb for short) of all its arbitrary subsets.
\paragraph{Well-quasiorders}
\begin{definition*}[Antichain]
Let \(\tuple{D,\mathord{\leqslant}}\) be a qoset.
A subset $X \subseteq D$ is an \emph{antichain} if{}f any two distinct elements in $X$ are incomparable. \eod
\end{definition*}
We denote the set of antichains of a qoset $\tuple{D,\mathord{\leqslant}}$ by
\[\AC_{\tuple{D,\mathord{\leqslant}}} \ud \{X\subseteq D \mid X \text{ is an antichain}\}\enspace .\]
\begin{definition*}[Well-quasiorder]
Let \(\tuple{D,\mathord{\leqslant}}\) be a quasiordered set.
We say it is a \emph{well-quasiordered set} (wqoset for short), and $\mathord{\leqslant}$ is a \emph{well-quasiorder} (wqo for short), if{}f for every countably infinite sequence of elements \(\{x_i\}_{i\in \mathbb{N}}\) there exist \(i,j\in \mathbb{N}\) such that \(i<j\) and \(x_i\leqslant x_j\).
Equivalently, we say \(\tuple{D,\mathord{\leqslant}}\) is a well-quasiordered set if{}f $D$ is DCC and $D$ has no infinite antichain. \eod
\end{definition*}
For every qoset \(\tuple{D,\mathord{\leqslant}}\), we shift the quasiorder \(\leqslant\) to a binary relation $\sqsubseteq_{\leqslant}$ on the powerset as follows.
Given \(X,Y\in \wp(D)\),
\[X\mindex{\sqsubseteq_{\leqslant}} Y \udr \forall x\in X, \exists y\in Y,\; y\leqslant x \enspace .
\]
When the quasiorder is clear from the context, we drop the subindex and write simply \(\sqsubseteq\).
Given a qoset \(\tuple{D, \leqslant}\), we define the set of \demph{minimal elements} of a subset \(X \subseteq D\):
\[\mindex{\minim_{\leqslant}}(X) \ud \{x \in X \mid \forall y \in X, y \leqslant x \Rightarrow y=x\}\enspace .\]
\begin{definition*}[Minor]
Let \(\tuple{D, \leqslant}\) be a qoset.
A \emph{minor} of a subset \(X \subseteq D\), denoted by \(\minor{X}\), is a subset of the minimal elements of \(X\) w.r.t.\ \(\leqslant\), i.e. \(\minor{X}\subseteq\minim_{\leqslant}(X)\), such that
\(X \sqsubseteq \minor{X}\) holds.\eod
\end{definition*}
Clearly, a minor $\minor{X}$ of some set \(X\) is always an antichain.
Let us recall that every subset $X$ of a wqoset \(\tuple{D,\leqslant}\) has at least one minor set, all minor sets of $X$ are finite,
$\minor{\{x\}}=\{x\}$, $\minor{\varnothing}=\varnothing$, and
if \(\tuple{D,\mathord{\leqslant}}\) is additionally a poset then there exists exactly one minor set of $X$.
It turns out that \(\tuple{\AC_{\tuple{D,\mathord{\leqslant}}},\sqsubseteq}\) is a qoset which is ACC if \(\tuple{D,\leqslant}\) is a wqoset and is a poset if \(\tuple{D,\leqslant}\) is a poset.
\paragraph{Nerode Quasiorders}
\begin{definition*}[Nerode's Quasiorders]
Let \(L \subseteq Σ^*\) be a language.
The left and right \emph{Nerode's quasiorders} on \(\Sigma^*\) are, respectively
\begin{align*}\
u\mindex{\leqslant^{\ell}L} v &\udrshort\; L u^{-1} \subseteq L v^{-1} \,,&
u\mindex{\leqslant^{r}L} v &\udrshort\; u^{-1} L \subseteq v^{-1} L \tag*{\eod}
\end{align*}
\end{definition*}
As shown by \citet{deLuca1994}, \(\mathord{\leqslant^{\ell}L}\) and \(\mathord{\leqslant^{r}L}\) are, respectively, left and right monotone and, if $L$ is regular then both \(\mathord{\leqslant^{\ell}L}\) and \(\mathord{\leqslant^{r}L}\) are wqos \citep[Theorem~2.4]{deLuca1994}.
Furthermore, \citet{deLuca1994} showed that \(\mathord{\leqslant^{\ell}L}\) is maximum in the set of all left monotone quasiorders \(\leqslant^{\ell}\) that satisfy \(ρ_{\leqslant^{\ell}}(L) = L\).
Therefore, for every left quasiorder \(\leqslant^{\ell}\), if \(ρ_{\leqslant^{\ell}}(L) = L\) then \(x \leqslant^{\ell} y \Rightarrow x \leqslant^{\ell}L y\).
Similarly holds for right quasiorders and the right Nerode quasiorder.
\section{Kleene Iterates}
Let \(\tuple{X,\leqslant}\) be a qoset and \(f:X \rightarrow X\) be a function.
The function $f$ is \emph{monotone} if{}f $x\leqslant y$ implies $f(x) \leqslant f(y)$.
Given \(b\in X\), the trace of values of the variable \(x\in X\) computed by the following iterative procedure:
\[
\mindex{\Kleene}(f,b) \ud \left\{ \begin{array}{l}
x:=b; \\
\textbf{while~} f(x) \neq x \textbf{~do~} x:=f(x);\\
\textbf{return~} x;
\end{array}
\right.
\]
provides the possibly infinite sequence of so-called
\demph{Kleene iterates} of the function \(f\) starting from the basis \(b\).
Whenever \(\tuple{X,\leqslant}\) is an ACC (resp. DCC) CPO, \(b\leqslant f(b)\) (resp. \(f(b)\leqslant b\)) and \(f\) is monotone
then, by Knaster-Tarski-Kleene fixpoint theorem, \(\Kleene(f,b)\) terminates and returns the least (resp.\ greatest) fixpoint of the function \(f\) which is greater (resp.\ lower) than or equal to \(b\).
In particular, if $\bot_X$ (resp.\ $\top_X$) is the least (resp. greatest) element of $X$ then
\(\Kleene(f,\bot_X)\) (resp.\ \(\Kleene(f,\top_X)\)) computes
the sequence of Kleene iterates that finitely converges to the least (resp.\ greatest)
fixpoint of $f$, denoted by \(\lfp(f)\) (resp.\ \(\gfp(f)\)).
\begin{theorem}\label{theorem:Kleene}
Let \(\tuple{X, \leqslant}\) be an ACC CPO and let \(f:X \rightarrow X\) be a monotone function.
Then \(\Kleene(f,\bot_X)\) terminates and returns the least fixpoint of \(f\).
\end{theorem}
\begin{proof}
To simplify the notation, we use \(\bot\) to denote the least element of \(X\), \(\bot_X\).
Next, we show by induction that \(f^n(\bot) \leqslant f^{n{+}1}(\bot)\) for all \(n \geq 0\).
\begin{myItem}
\item \emph{Base case:} The relation \(\bot \leqslant f(\bot)\) holds since \(\bot\) is the least element in \(X\).
\item \emph{Inductive step:} Assume \(f^n(\bot) \leqslant f^{n{+}1}(\bot)\) for some value \(n\).
Then, since \(f\) is a monotone function, we have that \(f^{n{+}1}(\bot) \leqslant f^{n{+}2}(\bot)\).
\end{myItem}
We conclude that \(f^n(\bot) \leqslant f^{n{+}1}(\bot)\) holds for all \(n \geq 0\).
Since the qoset \(\tuple{X,\leqslant}\) is an ACC, there is no infinite sequence of ascending elements and, as a consequence, \(\Kleene(f,\bot)\) terminates and returns a fixpoint of function \(f\).
Next, we show that if \(f^n(\bot) = f^{n{+}1}(\bot)\) for some \(n\) then \(f^n(\bot) = \lfp(f)\).
To do that, we show that \(f^{i}(\bot) \leqslant p\) for every \(i \geq 0\) and for every fixpoint \(p\) of \(f\).
Therefore, the fixpoint \(f^{n}(\bot)\) is below (for the quasiorder \(\leqslant\)) than any other fixpoint, hence \(f^n(\bot)\) is the least fixpoint of \(f\), i.e. \(f^n(\bot) =\lfp(f)\).
Again, we proceed by induction on \(n\).
Let \(p\) be a fixpoint of \(f\), i.e. \(f(p) = p\).
\begin{myItem}
\item \emph{Base case:} The relation \(\bot \leqslant p\) trivially holds by definition of \(\bot\).
\item \emph{Inductive step:} Assume \(f^n(\bot) \leqslant p\) for some value \(n\).
Then, since \(f\) is a monotone function, we have that \(f^{n{+}1}(\bot) \leqslant f(p) = p\), where the last equality follows from the fact that \(p\) is a fixpoint.
\end{myItem}
Clearly, \(f^n(\bot) \leqslant p\) for all \(n \geq 0\) and for all fixpoint \(p\) of \(f\).
Therefore \(\Kleene(f, \bot) = \lfp(f)\).
\end{proof}
For the sake of clarity, we overload the notation and use the same symbol for a function/relation
and its componentwise (i.e.\ pointwise) extension on product domains.
For instance, if $f:X\rightarrow Y$ then $f$ also denotes the standard product function $f:X^n \rightarrow Y^n$ defined by
$\lambda\tuple{x_1,...,x_n}\in X^n.\tuple{f(x_1),...,f(x_n)}$.
A vector \(\vect{Y}\) in some product domain \(D^{|S|}\) is also denoted by \(\tuple{Y_i}_{i \in S}\) and, for some $i\in S$,
\(\vect{Y}_{\!\! i}\) denotes its component \(Y_i\).
\section{Closures and Galois Connections}
We conclude this chapter by recalling some basic notions on closure operators and Galois Connections commonly used in abstract interpretation (see, e.g., \cite{CC79,mine17}).
Closure operators and Galois Connections are equivalent notions \cite{Cousot78-1-TheseEtat} and, therefore, they are both used for defining the notion of \emph{approximation} in abstract interpretation, where closure operators allow us to define and reason on abstract domains independently of a specific representation which is required by
Galois Connections.
\begin{definition*}[Upper Closure Operator]
Let \(\tuple{C,\mathord{\leqslant_C},\vee,\wedge}\) be a complete lattice, where $\vee$ and $\wedge$ denote, respectively, the lub and glb.
An \emph{upper closure operator}, or simply \demph{closure}, on \(\tuple{C,\mathord{\leqslant_C}}\) is a function \(\rho:C\to C\) which is:
\begin{myEnumI}
\item \emph{monotone}, i.e. \(x \leqslant_C y \Rightarrow ρ(x) \leqslant_C ρ(y) \) for all \(x,y \in C\);
\item \emph{idempotent}, i.e. \(\rho(\rho(x)) = \rho(x)\) for all \(x \in C\), and
\item \emph{extensive}, i.e. \(x \leqslant_C \rho(x)\) for all \(x \in C\). \eod
\end{myEnumI}
\end{definition*}
The set of all upper closed operators on \(C\) is denoted by \(\uco(C)\).
We often write \(c \in \rho(C)\), or simply \(c \in \rho\), to denote that
there exists \(c' \in C\) such that \(c = \rho(c')\), and
recall that this happens if{}f $\rho(c) = c$.
If $\rho\in \uco(C)$ then for all \(c_1\in C\), \(c_2\in \rho\) and \(X \subseteq C\),
it turns out that:
\begin{align}
&c_1 \leqslant_C c_2 \Leftrightarrow \rho(c_1)\leqslant_C \rho(c_2) \Leftrightarrow \rho(c_1)\leqslant_C c_2 \label{equation:abstractcheck}\\
&\rho ({\textstyle\vee} X) = \rho({\textstyle\vee}\rho(X)) \quad \text{and}\quad {\textstyle\wedge}\rho (X) = \rho({\textstyle\wedge}\rho(X))\enspace. \label{equation:lubAndGlb}
\end{align}
In abstract interpretation, a closure operator \(\rho\in \uco(C)\) on a concrete domain $C$ plays
the role of abstraction function for objects of $C$. Given two closures \(\rho,\rho' \in \uco(C)\), \(\rho\) is a
\demph{coarser abstraction}
than \(\rho'\) (or, equivalently,
$\rho'$ is a more precise abstraction than $\rho$) if{}f the image of
\(\rho\) is a subset of the image of \(\rho'\), i.e. \(\rho(C) \subseteq \rho'(C)\), and this happens if{}f for any $x\in C$,
$\rho'(x) \leqslant_C \rho(x)$.
\begin{definition*}[Galois Connection]
A \emph{Galois Connection} (GC for short) or \emph{adjunction} between two posets \(\tuple{C,\leqslant_C}\) (a concrete domain) and \(\tuple{A,\leqslant_A}\) (an abstract domain) consists of two monotone functions \(\alpha\colon C\rightarrow A\) and \(\gamma \colon A\rightarrow C\) such that
\[\alpha(c)\leqslant_A a \:\Leftrightarrow\: c\leqslant_C \gamma(a), \quad \text{for all } a\in A, c \in C \enspace .\]
A Galois Connection is denoted by \( \tuple{C,\leqslant_C} \galois{\alpha}{\gamma} \tuple{A,\leqslant_A}\).\eod
\end{definition*}
\pagebreak
\begin{lemma}\label{lemma:propertiesGC}
Let \(\tuple{C,\leqslant_C}\galois{\alpha}{\gamma}\tuple{A,\leqslant_A}\) be a GC.
The following properties hold:
\begin{myEnumA}
\item \(x \leqslant_C \gamma \comp \alpha (x)\) and \(\alpha \comp \gamma(y) \leqslant_A y\).\label{lemma:propertiesGC:gammaalpha}
\item \(\alpha\) and \(\gamma\) are monotonic functions.\label{lemma:propertiesGC:monotone}
\item \(\alpha = \alpha \comp \gamma \comp \alpha\) and \(\gamma = \gamma \comp \alpha \comp \gamma\).
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item Since \(\leqslant_A\) is reflexive, we have that for all \(x \in A\) \(\alpha(x) \leqslant_A \alpha(x)\) holds and, by definition of GC, \(\alpha(x) \leqslant_A \alpha(x) \Leftrightarrow x \leqslant_C \gamma(\alpha(x))\).
Therefore, \(x \leqslant_C \gamma(\alpha(x))\).
Similarly, since \(\alpha(\gamma(y)) \leqslant_A y \Leftrightarrow \gamma(y) \leqslant_C \gamma(y)\) and \(\gamma(y) \leqslant_C \gamma(y)\), we conclude that \(\alpha(\gamma(y)) \leqslant_A y\).
\item Let \(c,c' \in C\) be such that $c\leqslant_C c'$. Then, by Lemma~\ref{lemma:propertiesGC}~\ref{lemma:propertiesGC:gammaalpha}, we have that $c' \leqslant_C \gamma(\alpha(c'))$ and, by definition of GC, $c \leqslant_C \gamma(\alpha(c')) \Rightarrow \alpha(c) \leqslant_A \alpha(c')$.
Similarly, let \(a,a' \in A\) be such that $a\leqslant_A a'$. Then, by Lemma~\ref{lemma:propertiesGC}~\ref{lemma:propertiesGC:gammaalpha}, we have that $α(γ(a)) \leqslant_A a'$, hence $α(γ(a) \leqslant_A a' \Rightarrow γ(a) \leqslant_A γ(a')$.
\item Let \(c \in C\).
By Lemma~\ref{lemma:propertiesGC}~\ref{lemma:propertiesGC:gammaalpha}, we have that \(c\leqslant_C \gamma(\alpha(c))\) which, by Lemma~\ref{lemma:propertiesGC}~\ref{lemma:propertiesGC:monotone}, implies that $\alpha(c) \leqslant_A \alpha(\gamma(\alpha(c)))$.
Moreover, since \(\gamma(\alpha(c))\leqslant_C \gamma(\alpha(c))\), it follows from the definition of GC that \(\alpha(\gamma(\alpha(c))) \leqslant_A \alpha(c)\).
Therefore \(\alpha(\gamma(\alpha(c))) = \alpha(c)\).
Similarly, let \(a \in A\).
By Lemma~\ref{lemma:propertiesGC}~\ref{lemma:propertiesGC:gammaalpha} and~\ref{lemma:propertiesGC:monotone}, we have that $γ(α(γ(a))) \leqslant_A γ(a)$ and, since \(α(γ(a))\leqslant_C α(γ(a))\), it follows from the definition of GC that \(γ(a) \leqslant_A γ(α(γ(a)))\).
Therefore \(γ(a) = γ(α(γ(a)))\).
\end{myEnumA}
\end{proof}
The function $\alpha$ is called the \demph{left-adjoint} of $\gamma$, and, dually, $\gamma$ is called the \demph{right-adjoint} of $\alpha$.
This terminology is justified by the fact that if a function $\alpha:C\rightarrow A$ admits a right-adjoint $\gamma:A\rightarrow C$ then this is unique (and this dually holds for left-adjoints).
It turns out that, in a GC, \(\gamma\) is always \demph{co-additive}, i.e. it preserves arbitrary glb's, while \(\alpha\) is always \demph{additive}, i.e. it preserves arbitrary lub's.
Moreover, an additive function \(\alpha : C\rightarrow A\) uniquely determines its right-adjoint by
\[\gamma\ud \lambda a\ldotp \bigvee_C\{c\in C \mid \alpha(c)\leqslant_A a\}\enspace .\]
Dually, a co-additive function \(\gamma: A\rightarrow C\) uniquely determines its left-adjoint by
\[\alpha \ud \lambda c\ldotp \bigwedge_A\{a\in A \mid c\leqslant_C \gamma(a)\}\enspace .\]
We conclude this chapter with the following lemma, which is folklore in abstract interpretation yet we provide a proof for the sake of completeness.
\begin{lemma}\label{lemma:alpharhoequality}
Let \( \tuple{C,\leqslant_C} \galois{\alpha}{\gamma} \tuple{A,\leqslant_A}\) be a GC between complete lattices and
\(f\colon C\rightarrow C\) be a monotone function. Then,
\(
\gamma( \lfp (\alpha f \gamma )) = \lfp (\gamma \alpha f)
\).
\end{lemma}
\begin{proof}
Let us first show that \(\gamma( \lfp (\alpha f\gamma) ) \geqslant_C \lfp (\gamma\alpha f) \):
\begin{align*}
\gamma(\lfp(\alpha f \gamma)) \leqslant_C \gamma(\lfp(\alpha f\gamma)) &\Leftrightarrow \quad\text{[Since \(g(\lfp(g))=\lfp(g)\)]}\\
\gamma\alpha f(\gamma(\lfp(\alpha f \gamma))) \leqslant_C \gamma(\lfp(\alpha f \gamma))&\Rightarrow \quad\text{[Since $g(x)\leqslant x \Rightarrow \lfp(g)\leqslant x$]}\\
\lfp(\gamma\alpha f)\leqslant_C \gamma(\lfp(\alpha f \gamma)) &
\end{align*}
Then, let us prove that \(\gamma( \lfp (\alpha f\gamma) ) \leqslant_C \lfp (\gamma\alpha f) \):
\begin{align*}
\lfp(\gamma\alpha f)\leqslant_C \lfp(\gamma\alpha f) & \Leftrightarrow \quad\text{[Since $g(\lfp(g))=\lfp(g)$]}\\
\gamma \alpha f(\lfp(\gamma\alpha f)) \leqslant_C \lfp(\gamma\alpha f) & \Rightarrow \quad\text{[Since $\alpha$ is monotone]}\\
\alpha \gamma \alpha f(\lfp(\gamma\alpha f)) \leqslant_A \alpha (\lfp(\gamma\alpha f)) & \Leftrightarrow \quad\text{[Since $\alpha\gamma\alpha=\alpha$ in GCs]}\\
\alpha f(\lfp(\gamma\alpha f)) \leqslant_A \alpha (\lfp(\gamma\alpha f)) & \Leftrightarrow \quad\text{[Since $g(\lfp(g))=\lfp(g)$]} \\
\alpha f \gamma(\alpha(\lfp(\gamma\alpha f)))\leqslant_A \alpha(\lfp(\gamma\alpha f))&\Rightarrow\quad\text{[Since $g(x)\leqslant x \Rightarrow \lfp(g)\leqslant x$]}\\
\lfp(\alpha f\gamma) \leqslant_A \alpha(\lfp (\gamma\alpha f)) &\Rightarrow\quad\text{[Since $\gamma$ is monotone]}\\
\gamma(\lfp(\alpha f \gamma)) \leqslant_C \gamma\alpha(\lfp(\gamma\alpha f)) &\Leftrightarrow\quad\text{[Since $g(\lfp(g))=\lfp(g)$]}\\
\gamma(\lfp (\alpha f\gamma)) \leqslant_C \lfp(\gamma\alpha f)
\end{align*}
\end{proof}
\section{Complexity Notation}
In this thesis we analyze the time and space complexity of some algorithms and constructions.
To do that, we use the standard small-O, big-O and big-Omega notation to compare functions.
Next, we define these notations for the shake of completeness, where, given a real number \(k\), we write \(\len{k}\) to denote its absolute value.
\begin{definitionNI*}[Small-O, Big-O, Big-Omega]\index{Small-O}\index{Big-O}\index{Big-Omega}
Let \(f\) and \(g\) be two functions on the real numbers.
Then
\begin{align*}
f(n) = o(g(n)) & \udiff \forall k > 0, \exists n_0, \; \forall n > n_0, f(n) \leq k\cdot g(n) \udiff \lim_{n \to \infty} \frac{f(n)}{g(n)} = 0\\
f(n) = \mathcal{O}(g(n)) & \udiff \exists k > 0, \exists n_0, \; \forall n > n_0, f(n) \leq k\cdot g(n) \udiff \limsup_{n \to \infty} \frac{f(n)}{g(n)} < \infty \\
f(n) = Ω(g(n)) & \udiff \exists k >0, \exists n_0, \; \forall n > n_0, f(n) \geq k \cdot g(n) \udiff \liminf_{n \to \infty} \frac{f(n)}{g(n)} > 0
\end{align*}
Intuitively, \(f(n) = o(g(n))\) indicates that \(f\) is \emph{asymptotically dominated} by \(g\); \(f(n) = \mathcal{O}(g(n))\) indicates that \(f\) is \emph{asymptotically bounded above} by \(g\) and \(f(n) = Ω(g(n))\) indicates that \(f\) is \emph{asymptotically bounded below} by \(g\).\eod
\end{definitionNI*}
These notations allow us to simplify the complexity analysis by removing all components of low impact in a complexity function.
For instance, let the number of operations performed by an algorithm on an input of size \(n\) be given by a function \(f(n)\) that satisfies
\[n^2 + n\cdot \log n+k \leq f(n) \leq n^3+n^2+\log n + k'\enspace ,\]
where \(k\) and \(k'\) are constants.
Since, by definition, \(n^2 = o(n^3)\), \(\log(n) = o (n^3)\) and \(k' = o(n^3)\) we find that the components \(n^2\), \(\log n\) and \(k'\) have low impact in the behavior of the upper bound of \(f(n)\) for large values of \(n\).
Similarly, the components \(n \cdot \log(n)\) and \(k\) have low impact in the lower bound of \(f(n)\) for large values of \(n\).
Therefore, we find that \(\mathcal{O}(f(n)) = \mathcal{O}(n^3+n^2+\log n + k') = \mathcal{O}(n^3)\) and \(Ω(f(n)) = Ω(n^2 + n\cdot \log n+k) = Ω(n^2)\).
Intuitively, this means that for large values of the parameter \(n\) the function \(f(n)\) is \emph{below} \(n^3\) and \emph{above} \(n^2\).
{}
{}
\chapter{Deciding Language Inclusion}
\label{chap:LangInc}
In this chapter, we present a quasiorder-based framework for deciding language inclusion which is a fundamental and classical problem~\cite[Chapter~11]{HU79} with applications to different areas of computer science.
The basic idea of our approach for solving a language inclusion problem $L_1\subseteq L_2$ is to leverage
Cousot and Cousot's abstract interpretation \cite{CC77,CC79} for checking the inclusion of an over-approximation (i.e.\ a superset) of \(L_1\) into \(L_2\).
This idea draws inspiration from the work of \citet{Hofmann2014}, who used abstract interpretation to decide language inclusion between languages of infinite words.
Assuming that \(L_1\) is specified as least fixpoint of an equation system on $\wp(\Sigma^*)$, an over-approximation of $L_1$ is obtained by applying an over-approximating abstraction function for sets of words \(\rho:\wp(\Sigma^*)\rightarrow \wp(\Sigma^*)\) at each step of the Kleene iterates converging to the least fixpoint $L_1$.
This abstraction map \(\rho\) is an upper closure operator which is used in standard abstract interpretation for approximating an input language by adding words (possibly none) to it.
This abstract interpretation-based approach provides an abstract inclusion check $\rho(L_1) \subseteq L_2$ which is always \emph{sound} by construction because $L_1 \subseteq \rho(L_1)$.
We then give conditions on \(\rho\) which ensure a \emph{complete} abstract inclusion
check, namely, the answer to $\rho(L_1) \subseteq L_2$ is always exact (no ``false alarms'' in abstract interpretation terminology).
These conditions are:
\begin{myEnumIL}
\item \(\rho(L_2)=L_2\) and\label{condition:rhoL}
\item \(\rho\) is a complete abstraction for symbol concatenation $\lambda X\in \wp(\Sigma^*).\,aX$, for all $a\in \Sigma$,
according to the standard notion of completeness in abstract interpretation \cite{CC77,GiacobazziRS00,Ranzato13}.\label{condition:rhobw}
\end{myEnumIL}
This approach leads us to design in Section~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions}
two general algorithmic frameworks for language inclusion problems which are parameterized by an underlying language abstraction (see Theorems~\ref{theorem:FiniteWordsAlgorithmGeneral} and~\ref{theorem:EffectiveAlgorithm}).
Intuitively, the first of these frameworks allows us to decide the inclusion \(L_1 \subseteq L_2\) by manipulating finite sets of words, even if the languages \(L_1\) and \(L_2\) are infinite.
On the other hand, the second framework allows us to decide the inclusion by working on an abstract domain.
We then focus on over-approximating abstractions $\rho$ which are
induced by a quasiorder relation $\mathord{\leqslant}$ on words in $\Sigma^*$. Here, a language \(L\) is over-approximated by adding all the words which are ``greater than or equal to'' some word of \(L\) for $\mathord{\leqslant}$. This allows us to
instantiate the above conditions~\ref{condition:rhoL} and~\ref{condition:rhobw}
for having a complete abstract inclusion check in terms of the quasiorder $\mathord{\leqslant}$.
Termination, which corresponds to having finitely many Kleene iterates in the fixpoint computations,
is guaranteed by requiring that
the relation $\mathord{\leqslant}$ is a well-quasiorder.
We define quasiorders satisfying the above conditions which are directly derived from the standard Nerode equivalence relations on words.
These quasiorders have been first investigated by \citet{ehrenfeucht_regularity_1983} and have been later generalized and extended by \citet{deLuca1994,deluca2011}.
In particular, drawing from a result by \citet{deLuca1994}, we show that the language abstractions induced by the Nerode's quasiorders are the most general ones (thus, intuitively optimal) which fit in our algorithmic framework for checking language inclusion.
While these quasiorder abstractions do not depend on some language representation (e.g., some class of
automata),
we provide quasiorders which instead exploit an underlying language representation given by a finite automaton.
In particular, by selecting suitable well-quasiorders for the class of language inclusion problems at hand, we are able to systematically derive
decision procedures for the inclusion problem
$L_1\subseteq L_2$ when:\begin{myEnumIL}
\item both \(L_1\) and \(L_2\) are regular,
\item \(L_1\) is regular and \(L_2\) is the trace language of a one-counter net and
\item \(L_1\) is context-free and \(L_2\) is regular.
\end{myEnumIL}
These decision procedures that we systematically derive here by instantiating our framework
are then related to existing language inclusion checking algorithms.
We study in detail the case where both languages $L_1$ and $L_2$ are regular and represented by finite-state automata.
When our decision procedure for $L_1\subseteq L_2$ is derived from
a well-quasiorder on $\Sigma^*$ by exploiting the automaton-based representation of \(L_2\), it turns out that
we obtain the well-known ``antichains algorithm'' by \citet{DBLP:conf/cav/WulfDHR06}.
Also, by including a simulation relation in the definition of the well-quasiorder we derive a decision procedure that partially matches the language inclusion algorithm by \citet{Abdulla2010}, and in turn also that by \citet{DBLP:conf/popl/BonchiP13}.
For the case in which \(L_1\) is regular and \(L_2\) is the set of traces of a one-counter net we derive an alternative proof for the decidability of the language inclusion problem~\cite{JANCAR1999476}.
Moreover, for the case in which \(L_1\) is context-free and \(L_2\) is regular, we derive a decision procedure that matches the ``antichains algorithm'' for context-free languages presented by \citet{Holk2015}.
\\
\indent
Finally, we leverage a standard duality result~\cite{cou00} and put forward a \emph{greatest} fixpoint approach (instead of the above \emph{least} fixpoint-based procedures) for the case where both \(L_1\) and \(L_2\) are regular languages.
In this case, we exploit the properties of the over-approximating abstraction induced by the quasiorder in order to show that the Kleene iterates of this greatest fixpoint computation are finitely many.
Interestingly, the Kleene iterates of the greatest fixpoint are finitely many whether you apply the over-approximating abstraction or not, which we show by relying on so-called forward complete abstract interpretations~\cite{gq01}.
\section{Inclusion Check by Complete Abstractions}
\label{sec:inclusion_checking_by_complete_abstractions}
The language inclusion problem consists in checking whether \(L_1 \!\subseteq\! L_2\) holds where \(L_1\) and \(L_2\) are two languages over a common alphabet \(\Sigma\).
In this section, we show how complete abstractions $\rho$ of $\wp(\Sigma^*)$ can be used to compute
an over-approximation \(\rho(L_1)\) of \(L_1\) such that \(\rho(L_1) \!\subseteq\! L_2 \Leftrightarrow L_1 \!\subseteq\! L_2\).
Closure-based abstract interpretation can be applied to solve a generic inclusion problem by leveraging backward complete abstractions~\cite{CC77,CC79,GiacobazziRS00,Ranzato13}.
An upper closure \(\rho\in \uco(C)\) is called \demph{backward complete}
for a concrete
monotone function \(f:C\rightarrow C\) when \( \rho f=\rho f \rho \) holds. Since $\rho f(c) \leq_C \rho f \rho(c)$ always holds for all $c\in C$,
the intuition is that
backward completeness models an ideal situation where no loss of precision
is accumulated in the computations of $\rho f$ when
its concrete input objects $c$ are over-approximated by $\rho(c)$.
It is well known~\cite{CC79}
that backward completeness implies completeness of least fixpoints, namely
\begin{equation} \label{eqn:lfpcompleteness}
\rho f=\rho f \rho \;\Rightarrow\;
\rho(\lfp(f))=\lfp(\rho f) = \lfp(\rho f \rho)
\end{equation}
provided that these least fixpoints exist (this is the case, for instance, when $C$ is a CPO).
Theorem~\ref{theorem:inc-check-comp-abs} states how
a concrete inclusion check \(\lfp(f) \leq_C c_2\) can be equivalently performed
in a backward complete abstraction \(\rho\) when \(c_2\in \rho\).
\begin{theorem}\label{theorem:inc-check-comp-abs}
If $C$ is a CPO, \(f: C\rightarrow C\) is monotone, $\rho\in \uco(C)$ is backward complete for \(f\) and \(c_2\in \rho\), then
\[\lfp(f) \leq_C c_2 \Leftrightarrow \lfp(\rho f) \leq_C c_2 \enspace .\]
In particular, if \(\tuple{C,\leq_C}\) is ACC then the Kleene iterates of \ \(\lfp(\rho f)\) are finitely many.
\end{theorem}
\begin{proof}
First, we show that \(\lfp(f) \leq_C c_2 \Leftrightarrow \lfp(\rho f) \leq_C c_2\).
\begin{align*}
\lfp(f) \leq_C c_2 &\Leftrightarrow \quad\text{[Since $c_2\in \rho$]}\\
\lfp(f) \leq_C \rho(c_2) &\Leftrightarrow \quad\text{[Since $x\leq \rho(y) \Leftrightarrow \rho(x)\leq \rho(y)$]}\\
\rho(\lfp(f)) \leq_C \rho(c_2) &\Leftrightarrow \quad\text{[By Equation~\eqref{eqn:lfpcompleteness}]}\\
\lfp(\rho f) \leq_C \rho(c_2) &\Leftrightarrow \quad\text{[Since $c_2\in \rho$]}\\
\lfp(\rho f) \leq_C c_2 &
\end{align*}
It remains to prove that the Kleene iterates of \(\lfp(ρf)\) are finitely many.
Observe that, since \(ρ\) and \(f\) are monotone and \(\bot \leq_C ρf(\bot)\), we have that
\[(ρf)^n(\bot) \leq_C (ρf)^{n{+}1}(\bot) \text{ for all \(n \geq 1\)}\enspace .\]
If \(\tuple{C,\leq_C}\) is ACC then, by definition, there are no infinite ascending chains, hence the sequence of Kleene iterates \[\bot \leq_C ρf(\bot) \leq_C (ρf)^2(\bot) \leq_C \ldots \leq_C (ρf)^n(\bot)\]
converges in finitely many steps.
\end{proof}
In the following, we will apply this general abstraction scheme to a number of
different language inclusion problems, by designing inclusion algorithms which rely on
several different backward complete abstractions of \(\wp(\Sigma^*)\).
\section{An Algorithmic Framework for Language Inclusion}
\label{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions}
\subsection{Languages as Fixed Points}
\label{sub:languages_as_fixpoints}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA.
Recall that the language accepted by \(\mathcal{N}\) is given by \(\lang{\mathcal{N}} \ud W^{\mathcal{N}}_{I,F}\) and, therefore,
\begin{equation}
\label{eq:unionofrightlg}
\lang{\mathcal{N}}={\textstyle\bigcup_{q\in I}} W^\mathcal{N}_{q,F}={\textstyle\bigcup_{q\in F}} W^\mathcal{N}_{I,q}\enspace
\end{equation}
\noindent
where, as usual, \(\textstyle{\bigcup \varnothing} = \varnothing\).
Let us recall how to define the language accepted by an automaton as a solution of a set of equations~\cite{Schutzenberger63}.
To do that, given a generic boolean predicate \(p(x)\) (typically a membership predicate) on some set and two generic sets $T$ and $F$, we define the following parametric
choice function:\[
\nullable{p(x)}{T}{F} \ud \begin{cases}
T & \text{if \(p(x)\) holds} \\
F & \text{otherwise}
\end{cases} \enspace .\]
The NFA \(\mathcal{N}\) induces the following set of equations, where the $X_q$'s
are variables of type $X_q\in \wp(\Sigma^*)$ and are indexed by states $q\in Q$:
\begin{equation}\label{leftEqn}
\Eqn(\mathcal{N}) \ud \{ X_q = \nullable{q \in F}{\lbrace\epsilon\rbrace}{\varnothing} \cup {\textstyle \bigcup_{a\in \Sigma, q'\in\delta(q,a)}} a X_{q'} \mid q\in Q\} \enspace .
\end{equation}
It follows that the functions in the right-hand side of the equations in
\(\Eqn(\mathcal{N})\) have
type \(\wp(\Sigma^*)^{|Q|} \rightarrow \wp(\Sigma^*)\).
Since \(\tuple{\wp(\Sigma^*)^{|Q|},\subseteq}\) is a (product) complete lattice (because \(\tuple{\wp(\Sigma^*),\subseteq}\) is a complete lattice) and all the right-hand side functions in \(\Eqn(\mathcal{N})\) are clearly monotone,
the least solution \(\tuple{Y_q}_{q\in Q}\in \wp(\Sigma^*)^{|Q|}\) of \(\Eqn(\mathcal{N})\) does exist and it is easy to check
that for every \(q\in Q\), \(Y_q = W^{\mathcal{N}}_{q,F}\) holds, hence, by Equation~\eqref{eq:unionofrightlg}, \(\lang{\mathcal{N}} = {\textstyle\bigcup_{q_i \in I}} Y_{q_i}\).
It is worth noticing that, by relying on right concatenations rather than left ones
$aX_{q'}$ used
in \(\Eqn(\mathcal{N})\), one could also define a
set of symmetric equations whose least solution coincides with \(\tuple{W_{I,q}^{\mathcal{N}}}_{q\in Q}\) instead of \(\tuple{W_{q,F}^{\mathcal{N}}}_{q\in Q}\).
\begin{figure}
\caption{An NFA \(\mathcal{N}
\label{fig:A}
\end{figure}
\begin{example}\label{ex-first}
Let us consider the automaton \(\mathcal{N}\) in Figure~\ref{fig:A}.
The set of equations induced by \(\mathcal{N}\) are as follows:
\[
\Eqn(\mathcal{N})=\begin{cases}
X_1 = \{\epsilon\} \cup aX_1 \cup bX_2\\
X_2 = \varnothing \cup aX_1 \cup b X_2
\end{cases} \enspace . \tag*{
{\ensuremath{\Diamond}}}
\]
\end{example}
It is convenient
to state the equations in \(\Eqn(\mathcal{N})\) by exploiting
an ``initial'' vector \(\vectarg{\epsilon}{F} \in \wp(\Sigma^*)^{|Q|}\) and a predecessor
function \(\Pre_\mathcal{N} \colon \wp(\Sigma^*)^{|Q|} {\rightarrow} \wp(\Sigma^*)^{|Q|}\) de-fined as follows:
\begin{align*}
\vectarg{\epsilon}{F} &\ud \tuple{\nullable{q \in F}{\{\epsilon\}}{\varnothing}}_{q\in Q}\,, &\qquad
\Pre_\mathcal{N}(\tuple{X_{q}}_{q\in Q}) &\ud \tuple{ {\textstyle \bigcup_{a\in \Sigma, q'\in\delta(q,a)}} aX_{q'}}_{q\in Q} \enspace .
\end{align*}
The intuition for the function \(\Pre_{\mathcal{N}}\) is that given the language \(W_{q',F}^{\mathcal{N}}\) and a transition \(q' \!\in\! \delta(q,a) \), we have that \(aW^{\mathcal{N}}_{q',F} \subseteq W^{\mathcal{N}}_{q,F}\) holds, i.e. given a subset $X_q'$ of the language generated by \(\mathcal{N}\) from some state \(q'\), the function \(\Pre_{\mathcal{N}}\) computes a subset $X_q$ of the language generated by \(\mathcal{N}\) for its predecessor state \(q\).
Since \(\epsilon \in W_{q,F}^{\mathcal{N}}\) for all \(q \in F\), the least fixpoint computation can start from the vector
\(\vectarg{\epsilon}{F}\) and iteratively apply $\Pre_\mathcal{N}$.
Therefore
\begin{equation}\label{eq:WqFAequalslfp}
\tuple{W^{\mathcal{N}}_{q,F}}_{q\in Q} = \lfp(\lambda \vect{X}\ldotp \vectarg{\epsilon}{F} \cup \Pre_\mathcal{N}(\vect{X})) \enspace .
\end{equation}
Together with Equation~\eqref{eq:unionofrightlg}, it follows that \(\lang{\mathcal{N}}\) equals the union of the component languages of the vector
\(\lfp(\lambda \vect{X}\ldotp \vectarg{\epsilon}{F} \cup \Pre_\mathcal{N}(\vect{X}))\) indexed by the initial states in $I$.
\begin{example}[Continuation of Example~\ref{ex-first}]
The fixpoint characterization of \(\tuple{W_{q,F}^{\mathcal{N}}}_{q\in Q}\) is:
\[
\left( \begin{array}{c}
W^{\mathcal{N}}_{q_1,q_1} \\ W^{\mathcal{N}}_{q_2,q_1}
\end{array} \right) =
\lfp\biggl(\lambda \left( \begin{array}{c}
X_1 \\ X_2
\end{array} \right) .
\left(\begin{array}{c}
\{\epsilon\} \cup a X_1 \cup b X_2 \\
\varnothing \cup a X_1 \cup b X_2
\end{array}\right)\biggr) = \left( \begin{array}{c}
(a+(b^+ a))^* \\ (a+b)^*a
\end{array} \right) \enspace .\tag*{
{\ensuremath{\Diamond}}}
\]
\end{example}
\paragraph{Fixpoint-based Inclusion Check}
Consider the language inclusion problem \(L_1 \subseteq L_2\), where \(L_1=\lang{\mathcal{N}}\) for some NFA \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\).
The language \(L_2\) can be formalized as a vector in \(\wp(\Sigma^*)^{|Q|}\) as follows:
\begin{equation}\label{eq:elltwo}
\vectarg{L_2}{I} \ud \tuple{\nullable{q \in I}{L_2}{\Sigma^*}}_{q\in Q}
\end{equation}
whose components indexed by initial states are $L_2$ and those indexed by non-initial states are $\Sigma^*$. Then, as a consequence of Equations~\eqref{eq:unionofrightlg},~\eqref{eq:WqFAequalslfp} and~\eqref{eq:elltwo}, we have that
\begin{equation}\label{eq:lfp}
\lang{\mathcal{N}}\subseteq L_2 \Leftrightarrow
\lfp(\lambda \vect{X}\ldotp\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X})) \subseteq \vectarg{L_2}{I} \enspace .
\end{equation}
\subsection{Abstract Inclusion Check using Closures}
In what follows, we will apply Theorem~\ref{theorem:inc-check-comp-abs} for solving the language inclusion problem where:
\(C=\tuple{\wp(\Sigma^*)^{|Q|},\subseteq}\), \(f=\lambda \vect{X}\ldotp\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X})\) and
\(\rho\in \uco(\wp(\Sigma^*))\), so that $\rho\in \uco\left(\wp(\Sigma^*)^{|Q|}\right)$.
\begin{theorem}\label{theorem:backComplete}
Let \(\rho \in \uco(\wp(\Sigma^*))\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) for all \(a\in \Sigma\) and let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA. Then the extension of \(ρ\) to vectors, $\rho\in \uco\left(\wp(\Sigma^*)^{|Q|}\right)$, is backward complete for \(\Pre_{\mathcal{N}}\) and \(\lambda \vect{X}\ldotp\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X})\).
\end{theorem}
\begin{proof}
First, it turns out that:
\begin{align*}
\rho( \Pre_\mathcal{N}(\tuple{X_q}_{q\in Q})) &= \quad\text{[By definition of \(\Pre_{\mathcal{N}}\)]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q'\in \delta(q,a)}} aX_{q'}) &=
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q'\in \delta(q,a)}} \rho(aX_{q'})) &= \quad\text{[By backward completeness of $\rho$ for \(\lambda X\ldotp aX\)]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q'\in \delta(q,a)}} \rho(a \rho(X_{q'}))) &=\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q'\in \delta(q,a)}} a \rho(X_{q'})) &=\quad\text{[By definition of \(\Pre_{\mathcal{N}}\)]}\\
\rho( \Pre_\mathcal{N}(\rho(\tuple{X_q}_{q\in Q}))) & \enspace .
\end{align*}
Next, we show backward completeness of \(\rho\) for \(\lambda \vect{X}\ldotp \vectarg{\epsilon}{F} \cup {\Pre}_\mathcal{N} (\vect{X})\):
\begin{align*}
\rho (\vectarg{\epsilon}{F} \cup \Pre_\mathcal{N} (\rho(\vect{X}))) & =
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho (\rho (\vectarg{\epsilon}{F}) \cup \rho (\Pre_\mathcal{N} (\rho (\vect{X})))) & =
\quad\text{[By backward completeness of $\rho$ for \(\Pre_{\mathcal{N}}\)]}\\
\rho (\rho (\vectarg{\epsilon}{F}) \cup \rho (\Pre_\mathcal{N} (\vect{X}))) & =
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho (\vectarg{\epsilon}{F} \cup\: \Pre_\mathcal{N} (\vect{X})) \enspace .&
\end{align*}
\end{proof}
Then, by Equation~\eqref{eqn:lfpcompleteness}, we obtain the following result.
\begin{corollary}\label{corol:rholfp}
If \(\rho \in \uco(\wp(\Sigma^*))\)
is backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) for all \(a\in\Sigma\) then
\[\rho (\lfp(\lambda \vect{X}\ldotp\vectarg{\epsilon}{F} \cup \Pre_\mathcal{N}(\vect{X}))) = \lfp(\lambda \vect{X}\ldotp \rho (\vectarg{\epsilon}{F} \cup \Pre_\mathcal{N}(\vect{X})))\enspace . \]
\end{corollary}
Note that if \(\rho\) is backward complete for \(\lambda X. aX\) for all \(a \in \Sigma\) and \(L_2\in \rho\) then, as a
consequence of Theorem~\ref{theorem:inc-check-comp-abs} and Corollary~\ref{corol:rholfp}, we find that Equivalence~\eqref{eq:lfp} becomes
\begin{equation}\label{equation:checklfpRLintoRL}
\lang{\mathcal{N}}\subseteq L_2 \Leftrightarrow \lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))) \subseteq \vectarg{L_2}{I} \enspace.
\end{equation}
\subsubsection{Right Concatenation}\label{rightwqo}
Let us consider the symmetric case of right concatenation.
Recall that, given an NFA \(\mathcal{N} = \tuple{Q,Σ,δ,I,F}\), we have that
\[W^{\mathcal{N}}_{I,q} = \nullable{q\in I}{\{\epsilon\}}{\varnothing} \cup {\textstyle\bigcup_{a\in\Sigma,a\in W^{\mathcal{N}}_{q',q}}} W^{\mathcal{N}}_{I,q'}a \enspace .\]
Correspondingly, we can define a set of fixpoint equations on \(\wp(\Sigma^*)\) which is based on right concatenation
and is symmetric to Equation~\eqref{leftEqn}:
\[\Eqnr(\mathcal{N}) \ud \{X_q = \nullable{q\in I}{\{\epsilon\}}{\varnothing} \cup {\textstyle\bigcup_{a\in\Sigma,q\in \delta(q',a)}} X_{q'}a \mid q \in Q\} \enspace .\]
In this case, if \(\vect{Y}=\tuple{Y_q}_{q\in Q}\) is the
least fixpoint solution of \(\Eqnr(\mathcal{N})\) then we have that \(Y_q = W^{\mathcal{N}}_{I,q}\) for every \(q\in Q\).
Also, by defining \(\vectarg{\epsilon}{I} \in \wp(\Sigma^*)^{|Q|}\) and \(\Post_\mathcal{N} \colon \wp(\Sigma^*)^{|Q|} {\rightarrow} \wp(\Sigma^*)^{|Q|}\) as follows:
\begin{align*}
\vectarg{\epsilon}{I} &\ud \tuple{\nullable{q \in I}{\{\epsilon\}}{\varnothing}}_{q\in Q}, \quad & \Post_\mathcal{N}(\tuple{X_q}_{q\in Q}) &\ud \tuple{ {\textstyle \bigcup_{a\in \Sigma, q\in \delta(q',a)}} X_{q'}a}_{q\in Q} \enspace ,
\end{align*}
we have that
\begin{equation}\label{eq:WIqAequalslfp}
\tuple{W_{I,q}}_{q\in Q} = \lfp(\lambda \vect{X}\ldotp \vectarg{\epsilon}{I} \cup \Post_\mathcal{N}(\vect{X})) \enspace .
\end{equation}
Since, by Equation~\eqref{eq:unionofrightlg}, we have that \(\lang{\mathcal{N}} = {\textstyle\bigcup_{q \in F}} W_{I,q}\), it follows that \(\lang{\mathcal{N}}\) is the union of the component languages of the vector \(\lfp(\lambda \vect{X}\ldotp \vectarg{\epsilon}{I} \cup \Post_\mathcal{N}(\vect{X}))\) indexed by the final states in \(F\).
\begin{example}\label{ex-firstPost}
Consider again the NFA \(\mathcal{N}\) in Figure~\ref{fig:A}.
The set of right equations for \(\mathcal{N}\) is:
\[
\Eqnr(\mathcal{N})=\begin{cases}
X_1 = \{\epsilon\} \cup X_1a \cup X_2 a\\
X_2 = \varnothing \cup X_1b \cup X_2b
\end{cases}
\]
so that
\[
\left( \begin{array}{c}
W_{q_1,q_1} \\ W_{q_1,q_2}
\end{array} \right)=
\lfp\biggl(\lambda \left( \begin{array}{c}
X_1 \\ X_2
\end{array} \right) .
\left(\begin{array}{c}
\{\epsilon\} \cup X_1 a \cup X_2 a \\
\varnothing \cup X_1 b\cup X_2 b
\end{array}\right)\biggr) = \left( \begin{array}{c}
(a+(b^+ a))^* \\ a^*b(b+a^+b)^*
\end{array} \right) \enspace .\tag*{
{\ensuremath{\Diamond}}}
\]
\end{example}
Finally, given a language inclusion problem \(\lang{\mathcal{N}} \subseteq L_2\),
the language \(L_2\) can be formalized as the vector
\[\vectarg{L_2}{F} \ud \tuple{\nullable{q \in F}{L_2}{\Sigma^*}}_{q\in Q} \in \wp(\Sigma^*)^{|Q|} \enspace ,\]
so that, by Equation~\eqref{eq:WIqAequalslfp}, it turns out that
\begin{align*}
\lang{\mathcal{N}}\subseteq L_2 \:\Leftrightarrow\:
\lfp(\lambda \vect{X}\ldotp\vectarg{\epsilon}{I} \cup \Post_{\mathcal{N}}(\vect{X})) \subseteq \vectarg{L_2}{F}
\end{align*}
We therefore have the following symmetric version
of Theorem~\ref{theorem:backComplete} for right concatenation.
\begin{theorem}\label{theorem:backCompleteRight}
Let \(\rho \in \uco(\wp(\Sigma^*))\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp Xa\) for all \(a\in \Sigma\) and let $\mathcal{N}=\tuple{Q,Σ,\delta,I,F}$ be an NFA.
Then the extension of \(ρ\) to vectors, \(\rho\in \uco\left(\wp(\Sigma^*)^{|Q|}\right)\), is backward complete for \(\Post_{\mathcal{N}}(\vect{X})\) and \(\lambda \vect{X}\ldotp\vectarg{\epsilon}{I} \cup \Post_{\mathcal{N}}(\vect{X})\).
\end{theorem}
\begin{proof}
First, it turns out that:
\begin{align*}
\rho( \Post_\mathcal{N}(\tuple{X_q}_{q\in Q})) &= \quad\text{[By definition of \(\Post_{\mathcal{N}}\)]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q\in \delta(q',a)}} X_{q'}a) &=
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q\in \delta(q',a)}} \rho(X_{q'}a)) &= \quad\text{[By backward completeness of $\rho$ for \(\lambda X\ldotp Xa\)]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q\in \delta(q',a)}} \rho(\rho(X_{q'})a)) &=\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho ({\textstyle \bigcup_{a\in \Sigma, q\in \delta(q',a)}} \rho(X_{q'})a) &=\quad\text{[By definition of \(\Post_{\mathcal{N}}\)]}\\
\rho( \Post_\mathcal{N}(\rho(\tuple{X_q}_{q\in Q}))) & \enspace .
\end{align*}
Next, we show backward completeness of \(\rho\) for \(\lambda \vect{X}\ldotp \vectarg{\epsilon}{I} \cup {\Post}_\mathcal{N} (\vect{X})\):
\begin{myAlignEP}
\rho (\vectarg{\epsilon}{I} \cup \Post_\mathcal{N} (\rho(\vect{X}))) & =
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho (\rho (\vectarg{\epsilon}{I}) \cup \rho (\Post_\mathcal{N} (\rho (\vect{X})))) & =
\quad\text{[By backward completeness of $\rho$ for \(\Post_{\mathcal{N}}\)]}\\
\rho (\rho (\vectarg{\epsilon}{I}) \cup \rho (\Post_\mathcal{N} (\vect{X}))) & =
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho (\vectarg{\epsilon}{I} \cup\: \Post_\mathcal{N} (\vect{X})) \enspace .&
\end{myAlignEP}
\end{proof}
\subsection{Solving the Abstract Inclusion Check}\label{sec:SolvingAbstractInclusionCheck}
In this section we present two techniques for solving the language inclusion problem $\lang{\mathcal{N}} \subseteq L_2$ by relying on Equivalence~\eqref{equation:checklfpRLintoRL}.
The first of these techniques leads to algorithms for solving the inclusion problem by using \emph{finite languages}.
Intuitively, given a closure \(ρ\), we show that it is possible to work on the domain \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) while considering only languages \(S\) that are finite.
On the other hand, we present a second technique that relies on the use of Galois Connections in order to solve the language inclusion problem in a different domain.
This technique allows us to decide the inclusion \(\lang{\mathcal{N}} \subseteq L_2\) by manipulating the underlying automata representation of the language \(L_2\).
\subsubsection{Using Finite Languages}
The following result shows that the successive steps of the fixpoint iteration for computing the \(\lfp(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})))\) can be replicated by iterating on a function \(f\), instead of \(ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))\), and then abstracting the result, provided that \(f\) meets a set of requirements.
\begin{lemma}\label{lemma:FiniteWordsAlgorithm}
Let \(\mathcal{N}\!=\!\tuple{Q,Σ,\delta,I,F}\) be an NFA, let \(ρ \!\in\! \uco(Σ^*)\) be backward complete for \(\lambda X\!\in\!\wp(\Sigma^*)\ldotp aX\) for all \(a\in \Sigma\) and let \(f: \wp(Σ^*)^{|Q|} \to \wp(Σ^*)^{|Q|}\) be a function such that
\(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ(f(\vect{X}))\).
Then, for all \(0 \leq n\),
\[(ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))^n = ρ(f^n(\vect{X})) \enspace .\]
\end{lemma}
\begin{proof}
We proceed by induction on \(n\).
\begin{myItem}
\item \emph{Base case:} Let \(n = 0\).
Then \(f^0(\vect{X}) = (ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))^0 = \vect{\varnothing}\).
\item \emph{Inductive step:} Assume that \(ρ(f^n(\vect{X})) = (ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))^n\) holds for some value \(n \geq 0\).
To simplify the notation, let \(\mathcal{P}(\vect{X}) = \vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})\) so that \(ρ f^n = (ρ\mathcal{P})^n\).
Then
\begin{align*}
ρf^{n{+}1}(\vect{X}) & = \quad \text{[Since \(f^{n{+}1} = f^n f\)]} \\
ρf^nf(\vect{X}) & = \quad \text{[By Inductive Hypothesis]} \\
(ρ\mathcal{P})^nf(\vect{X}) & = \quad \text{[By Theorem~\ref{theorem:backComplete}, \(ρ\) is bw. complete for \(\mathcal{P}\)]}\\
(ρ\mathcal{P})^nρf(\vect{X}) & = \quad \text{[Since \(ρ f = ρ \mathcal{P}\)]} \\
(ρ\mathcal{P})^nρ\mathcal{P}(\vect{X}) & = \quad \text{[Since \((ρ \mathcal{P})^{n{+}1} = (ρ \mathcal{P})^n ρ \mathcal{P}\)]} \\
(ρ\mathcal{P})^{n{+}1}(\vect{X})
\end{align*}
\end{myItem}
We conclude that \((ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))^n = ρ(f^n(\vect{X}))\) for all \(0 \leq n\).
\end{proof}
Lemma~\ref{lemma:FiniteWordsAlgorithm} shows that the iterates of \(\lfp(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})))\) can be computed by abstracting the iterates of a function \(f\), which might manipulate only finite languages.
Moreover, its straightforward to check that Lemma~\ref{lemma:FiniteWordsAlgorithm} remains valid when considering a different function \(f\) at each step of the iteration as long as all the considered functions satisfy the requirements.
To simplify the notation, given a set of functions \(\mathcal{F}\) and a function \(f\), we write \(\mathcal{F}f\) to denote the composition of one arbitrary function from \(\mathcal{F}\) with \(f\).
Similarly, \(f\mathcal{F} \) denotes the composition of \(f\) with an arbitrary function from \(\mathcal{F}\).
Finally, we write \(\mathcal{F}^2 = f\), for instance, to indicate that any composition of two functions in \(\mathcal{F}\) equals \(f\).
\begin{corollary}\label{corol:FiniteWordsAlgorithm}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA, let \(ρ \in \uco(Σ^*)\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) for all \(a\in \Sigma\) and let \(\mathcal{F}\) be a set of functions such that every function \(f \in \mathcal{F}\) is of the form \(f: \wp(Σ^*)^{|Q|} \to \wp(Σ^*)^{|Q|}\) and satisfies \(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ(f(\vect{X}))\).
Then, for all \(0 \leq n\),
\[(ρ(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))^n = ρ(\mathcal{F}^n(\vect{X})) \enspace .\]
\end{corollary}
Observe that, in particular, Corollary~\ref{corol:FiniteWordsAlgorithm} holds when considering the set \(\mathcal{F} = \{f\}\) with \(f = \vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})\).
Intuitively, this means that we can compute the least fixpoint for \(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))\) by iterating on \(\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})\) until we reach an \emph{abstract fixpoint}, i.e. the abstraction of two consecutive steps coincide.
The idea of recursively applying a function \(f\) until its abstraction reaches a fixpoint is captured by the following definition of the \emph{abstract Kleene procedure}:
\[
\KleeneQO(\abseq,f,b) \ud \left\{ \begin{array}{l}
x:=b; \\
\textbf{while~} \neg \abseq(f(x), x) \textbf{~do~} x:=f(x);\\
\textbf{return~} x;
\end{array}
\right. \enspace ,
\]
where \(\abseq(x,y)\) is a function that returns \(\text{\emph{true}}\) if{}f the abstraction of \(x\) and \(y\) coincide, i.e. \(ρ(x) = ρ(y)\).
Clearly, \(\KleeneQO(id,f,b) = \Kleene(f,b)\) where \(id(x,y)\) returns \(\text{\emph{true}}\) if{}f \(x = y\).
For simplicity, we abuse of notation and write \(\KleeneQO(\abseq,\mathcal{F},b)\) to denote the abstract \(\KleeneQO\) iteration where, at each step, an arbitrary function from the set \(\mathcal{F}\) is applied.
\iffalse
As the following lemma shows, whenever the domain \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is ACC and the abstraction \(ρ\) is backward complete for all the functions in the set \(\mathcal{F}\), i.e. \(ρ \mathcal{F} = ρ \mathcal{F} ρ\), the procedure \(\KleeneQO(\abseq,\mathcal{F},b)\) terminates and
the abstraction of the returned value is the least abstraction that is a fixpoint of \(ρ \mathcal{F}\).
\begin{lemma}\label{lemma:KleeneQO}
Let \(ρ \in \uco(Σ^*)\) be such that \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC with least element \(\varnothing\) and let \(\mathcal{F}\) be a set of functions such that every \(f \in \mathcal{F}\) is a monotone function of the form
\(f: \wp(Σ^*) \rightarrow \wp(Σ^*)\).
If \(ρ\) is backward complete for every \(f \in \mathcal{F}\), i.e. \(ρ\mathcal{F} = ρ \mathcal{F} ρ\), then \(\KleeneQO(\abseq,\mathcal{F},\varnothing)\) terminates and returns a set \(S \in \wp(Σ^*)\) such that
\begin{myEnumA}
\item \(ρ(S) = ρ\mathcal{F}ρ(S)\) and\label{lemma:KleeneQO:fp}
\item if there exists \(S'\) such that \(ρ(S') = ρ\mathcal{F}ρ(S')\) then \(ρ(S) \subseteq ρ(S')\)\label{lemma:KleeneQO:least}
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item First, we show by induction that \(\mathcal{F}^n(\varnothing) \subseteq \mathcal{F}^{n{+}1}(\varnothing)\) for all \(n \geq 0\).
\begin{myItem}
\item \emph{Base case:} The relation \(\varnothing \subseteq \mathcal{F}(\varnothing)\) trivially holds since \(\varnothing\) is the least element in \(\{ρ(S) \mid S \in \wp(Σ^*)\}\).
\item \emph{Inductive step:} Assume \(\mathcal{F}^n(\varnothing) \subseteq \mathcal{F}^{n{+}1}(\varnothing)\) for some value \(n\).
Since every \(f \in \mathcal{F}\) is monotone, we have that \(\mathcal{F}^{n{+}1}(\varnothing) \subseteq \mathcal{F}^{n{+}2}(\varnothing)\).
\end{myItem}
We conclude that \(\mathcal{F}^n(\varnothing) \subseteq \mathcal{F}^{n{+}1}(\varnothing)\) holds for all \(n \geq 0\) and, as a consequence, \(ρ(\mathcal{F}^n(\varnothing)) \subseteq ρ(\mathcal{F}^{n{+}1}(\varnothing))\).
Since \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\},\subseteq}\) is an ACC, there is no infinite sequence of ascending elements and, as a consequence, \(\KleeneQO(\abseq,\mathcal{F},\varnothing)\) terminates and returns a set \(S \in \wp(Σ^*)\) such that \(ρ(S) = ρ\mathcal{F}(S)\).
Finally, since \(ρ\) is backward complete for every \(f \in \mathcal{F}\), i.e. \(ρ\mathcal{F} = ρ \mathcal{F} ρ\), we have that \(ρ(S) = ρ\mathcal{F}ρ(S)\).
\item Next, we show that \(ρ\mathcal{F}^{i}(\varnothing) \subseteq ρ(S')\) for every \(i \geq 0\) and for every \(S'\) such that \(ρ(S') = ρ\mathcal{F}ρ(S')\).
Again, we proceed by induction on \(n\).
\begin{myItem}
\item \emph{Base case:} The relation \(\varnothing \subseteq S'\) trivially holds by definition of \(\varnothing\).
\item \emph{Inductive step:} Assume \(ρ\mathcal{F}^n(\varnothing) \subseteq ρ(S')\) for some value \(n\).
Then, since all functions in \(ρ\mathcal{F}\) are monotone, we have that
\[ρ\mathcal{F}(ρ\mathcal{F}^n(\varnothing)) \subseteq ρ\mathcal{F}(ρ(S'))\]
which, given that \(ρ\mathcal{F}ρ = ρ\mathcal{F}\), and \(ρ(\mathcal{F}(S')) = ρ(S')\), results in \(ρ\mathcal{F}^{n{+}1}(\varnothing) \subseteq ρ(S')\).
\end{myItem}
\end{myEnumA}
\end{proof}
\pagebreak
\fi
As the following lemma shows, whenever the domain \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is ACC and the abstraction \(ρ\) is backward complete for all the functions in the set \(\mathcal{F}\), i.e. \(ρ \mathcal{F} = ρ \mathcal{F} ρ\), the procedure \(\KleeneQO(\abseq,\mathcal{F},b)\) can be used to compute \(\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})))\).
\begin{lemma}\label{lemma:KleeneQOLfp}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA, let \(ρ \in \uco(Σ^*)\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) for all \(a\in \Sigma\) such that \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO.
Let \(\mathcal{F}\) be a set of monotone functions such that every \(f \in \mathcal{F}\) is of the form \(f: \wp(Σ^*)^{|Q|} \to \wp(Σ^*)^{|Q|}\) and satisfies
\(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ(f(\vect{X}))\).
Then,
\[\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right) \enspace .\]
Moreover, the iterates of \(\,\Kleene(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})), \vect{\varnothing})\) coincide in lockstep with the abstraction of the iterates of \(\,\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\).
\end{lemma}
\begin{proof}
Since \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO, by Theorem~\ref{theorem:Kleene}, we have that
\[\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))) = \Kleene(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})), \vect{\varnothing})\]
On the other hand, by Corollary~\ref{corol:FiniteWordsAlgorithm}, the iterates of the above Kleene iteration coincide in lockstep with the abstraction of the iterates of \(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\) and, therefore,
\[\Kleene(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right)\]
As a consequence,
\[\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right) \enspace .\]
\end{proof}
The following result relies on the \(\KleeneQO\) procedure to design an algorithm that solves the language inclusion problem \(\lang{\mathcal{N}} \subseteq L_2\) whenever the abstraction \(ρ\) and the set of functions \(\mathcal{F}\) satisfy a list of requirements in terms of backward completeness and computability.
\begin{theorem}\label{theorem:FiniteWordsAlgorithmGeneral}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA, let \(L_2\) be a regular language, let \(ρ \in \uco(Σ^*)\) and let \(\mathcal{F}\) be a set of functions.
Assume that the following properties hold:
\begin{myEnumI}
\item The abstraction \(ρ\) is backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) for all \(a\in \Sigma\) and satisfies \(ρ(L_2) = L_2\).\label{theorem:FiniteWordsAlgorithmGeneral:rho}
\item The set \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO.\label{theorem:FiniteWordsAlgorithmGeneral:ACC}
\item Every function \(f\) in the set \(\mathcal{F}\) is of the form \(f: \wp(Σ^*)^{|Q|} \to \wp(Σ^*)^{|Q|}\), it is computable and satisfies \(\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ(f(\vect{X}))\).\label{theorem:FiniteWordsAlgorithmGeneral:F}
\item There is an algorithm, say \(\abseq^{\sharp}(\vect{X}, \vect{Y})\), which decides the abstraction equivalence \(ρ(\vect{X}) = ρ(\vect{Y})\), for all \(\vect{X}, \vect{Y} \in \wp(Σ^*)^{|Q|}\).\label{theorem:FiniteWordsAlgorithmGeneral:EQ}
\item There is an algorithm, say \(\absincl(\vect{X})\), which decides the inclusion \(ρ(\vect{X}) \subseteq \vectarg{L_2}{I}\), for all \(\vect{X} \in \wp(Σ^*)^{|Q|}\).\label{theorem:FiniteWordsAlgorithmGeneral:INC}
\end{myEnumI}
Then, the following is an algorithm which decides whether \(\lang{\mathcal{N}} \subseteq L_2\):
\(\tuple{Y_q}_{q\in Q} := \KleeneQO (\abseq^{\sharp},\mathcal{F}, \vect{\varnothing})\)\emph{;}
\emph{\textbf{return}} \(\absincl(\tuple{Y_q}_{q\in Q})\)\emph{;}
\end{theorem}
\begin{proof}
It follows from hypotheses~\ref{theorem:FiniteWordsAlgorithmGeneral:rho},~\ref{theorem:FiniteWordsAlgorithmGeneral:ACC} and~\ref{theorem:FiniteWordsAlgorithmGeneral:F}, by Lemma~\ref{lemma:KleeneQOLfp}, that
\begin{equation}\label{eq:lfpKleeneQO}
\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right)
\end{equation}
The function \(\abseq\) can be replaced by function \(\abseq^{\sharp}\) due to hypothesis~\ref{theorem:FiniteWordsAlgorithmGeneral:EQ}.
Moreover, by Equivalence~\eqref{equation:checklfpRLintoRL}, which holds by hypothesis~\ref{theorem:FiniteWordsAlgorithmGeneral:rho}, and Equation~\eqref{eq:lfpKleeneQO} we have that
\[\lang{\mathcal{N}}\subseteq L_2 \Leftrightarrow ρ\left(\KleeneQO (\abseq^{\sharp}, \mathcal{F}, \vect{\varnothing})\right) \subseteq \vectarg{L_2}{I}\enspace .\]
Finally, hypotheses~\ref{theorem:FiniteWordsAlgorithmGeneral:EQ} and~\ref{theorem:FiniteWordsAlgorithmGeneral:INC} guarantee, respectively, the decidability of the inclusion \(ρ\mathcal{F}(X) \subseteq ρ(X)\) performed at each step of the \(\KleeneQO\) iteration and the decidability of the inclusion of the abstraction of the lfp in \(\vectarg{L_2}{I}\).
\end{proof}
Note that Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral} can also be stated in a symmetric version for right concatenation similarly to Theorem~\ref{theorem:backCompleteRight}.
\subsubsection{Using Galois Connections}
The next result reformulates Equivalence~\eqref{equation:checklfpRLintoRL} by using Galois Connections rather than closures, and shows how to design an algorithm that solves a language inclusion problem $\lang{\mathcal{N}} \subseteq L_2$ on
an \emph{abstraction} $D$ of the concrete domain $\tuple{\wp(\Sigma^*),\subseteq}$ whenever $D$ satisfies a list of requirements related to backward completeness and computability.
\begin{theorem}\label{theorem:EffectiveAlgorithm}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA and \(L_2\) be a language over \(\Sigma\).
Let \(\tuple{\wp(\Sigma^*),\subseteq} \galois{\alpha}{\gamma}\tuple{D,\leq_D}\) be a GC where \( \tuple{D,\leq_D}\) is a poset.
Assume that the following properties hold:
\begin{myEnumI}
\item \(L_2\in\gamma(D)\) and for every \( a \in \Sigma\) and \(X \in \wp(\Sigma^*)\), \(\gamma\alpha(a X) = \gamma\alpha(a \gamma\alpha(X))\).\label{theorem:EffectiveAlgorithm:prop:rho}
\item \((D,\leq_D,\sqcup,\bot_D)\) is an effective domain, meaning that: \((D,\leq_D,\sqcup,\bot_D)\) is an ACC join-semilattice with bottom $\bot_D$,
every element of \(D\) has a finite representation, the binary relation
\(\leq_D\) is decidable and the binary lub \(\sqcup\) is computable.\label{theorem:EffectiveAlgorithm:prop:absdecidable}
\item There is an algorithm, say \(\Pre^{\sharp}(\vect{X}^\sharp)\), which computes \(\alpha(\Pre_{\mathcal{N}}(\gamma(\vect{X}^\sharp)))\),
for all \(\vect{X}^\sharp\in \alpha(\wp(\Sigma^*))^{|Q|}\).\label{theorem:EffectiveAlgorithm:prop:abspre}
\item There is an algorithm, say \(\epsilon^{\sharp}\), which computes \(\alpha(\vectarg{\epsilon}{F})\).\label{theorem:EffectiveAlgorithm:prop:abseps}
\item There is an algorithm, say \(\absincl(\vect{X}^\sharp)\), which decides the abstract inclusion
\(\vect{X}^\sharp \leq_D \alpha(\vectarg{L_2}{I})\), for all \(\vect{X}^\sharp\in \alpha(\wp(\Sigma^*))^{|Q|}\).
\label{theorem:EffectiveAlgorithm:prop:absincl}
\end{myEnumI}
Then, the following is an algorithm which decides whether \(\lang{\mathcal{N}} \subseteq L_2\):
\(\tuple{Y^\sharp_q}_{q\in Q} := \Kleene (\lambda \vect{X}^\sharp\ldotp\epsilon^{\sharp} \sqcup \Pre^{\sharp}(\vect{X}^\sharp), \vect{\bot_D})\)\emph{;}
\emph{\textbf{return}} \(\absincl(\tuple{Y^\sharp_q}_{q\in Q})\)\emph{;}
\end{theorem}
\begin{proof}
Let \(\rho \ud \gamma \alpha\in \uco(\wp(\Sigma^*))\), so that hypothesis~\ref{theorem:EffectiveAlgorithm:prop:rho} can be stated as
\(L_2 \in \rho\) and \(\rho(aX) = \rho(a\rho(X))\).
It turns out that:
\begin{align*}
\lang{\mathcal{N}}\subseteq L_2 &\Leftrightarrow\quad
\text{[By Equivalence~\eqref{equation:checklfpRLintoRL}]}\\
\lfp(\lambda \vect{X}\ldotp\rho (\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X}))) \subseteq \vectarg{L_2}{I} &\Leftrightarrow \quad
\text{[By Lemma~\ref{lemma:alpharhoequality}]}\\
\gamma(\lfp (\lambda \vect{X}^\sharp\ldotp \alpha (\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\gamma(\vect{X}^\sharp))))) \subseteq \vectarg{L_2}{I} &\Leftrightarrow
\quad \text{[By GC]}\\
\gamma(\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vectarg{\epsilon}{F}) \sqcup \alpha(\Pre_{\mathcal{N}}(\gamma(\vect{X}^\sharp))))) \subseteq \vectarg{L_2}{I} &\Leftrightarrow
\quad \text{[By GC since $L_2\in \gamma(D)$]}\\
\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vectarg{\epsilon}{F}) \sqcup \alpha(\Pre_{\mathcal{N}}(\gamma(\vect{X}^\sharp)))) \leq_D \alpha(\vectarg{L_2}{I}) &
\end{align*}
By hypotheses~\ref{theorem:EffectiveAlgorithm:prop:absdecidable}, \ref{theorem:EffectiveAlgorithm:prop:abspre} and \ref{theorem:EffectiveAlgorithm:prop:abseps},
\(\Kleene (\lambda \vect{X}^\sharp\ldotp\epsilon^{\sharp} \sqcup \Pre^{\sharp}(\vect{X}^\sharp), \vect{\bot_D})\) is an algorithm computing the least fixpoint
$\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vectarg{\epsilon}{F}) \sqcup \alpha(\Pre_{\mathcal{N}}(\gamma(\vect{X}^\sharp))))$. In particular, the hypotheses
\ref{theorem:EffectiveAlgorithm:prop:absdecidable}, \ref{theorem:EffectiveAlgorithm:prop:abspre} and \ref{theorem:EffectiveAlgorithm:prop:abseps} ensure that the Kleene iterates of
$\lambda \vect{X}^\sharp\ldotp\epsilon^{\sharp} \sqcup \Pre^{\sharp}(\vect{X}^\sharp)$
starting from $\vect{\bot_D}$
are in $\alpha(\wp(\Sigma^*))^{|Q|}$, computable and finitely many and that
it is decidable whether the iterates have reached the fixpoint.
Finally, hypothesis~\ref{theorem:EffectiveAlgorithm:prop:absincl} ensures the
decidability of the $\leq_D$-inclusion check of this least fixpoint
in $\alpha(\vectarg{L_2}{I})$.
\end{proof}
It is also worth noticing that, analogously to what has been done in Theorem~\ref{theorem:backCompleteRight},
the above Theorem~\ref{theorem:EffectiveAlgorithm} can be also stated in a symmetric version
for right (rather than left) concatenation.
\section{Instantiating the Framework}
\label{sec:instantiating_the_framework_language_based_well_quasiorders}
We instantiate the general algorithmic framework of
Section~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions} to the class of closure operators induced by quasiorder relations on words.
\subsection{Word-based Abstractions}
Let \(\mathord{\leqslant} \subseteq \Sigma^* \times \Sigma^*\) be a quasiorder relation on words in $\Sigma^*$.
Recall that the corresponding closure operator \(\rho_\leqslant \in \uco(\wp(\Sigma^*))\) is defined as follows:
\begin{equation}\label{eq:qo-up-closure}
\rho_\leqslant(X) \ud
\{v\in \Sigma^* \mid \exists u\in X, \;u \leqslant v \} \enspace .
\end{equation}
Thus, $\rho_\leqslant(X)$ is the $\leqslant$-upward closure
of $X$ and it is easy to check that $\rho_\leqslant$ is indeed a closure
on the complete lattice $\tuple{\wp(\Sigma^*),\subseteq}$.
As described in Chapter~\ref{chap:prel}, the quasiorder \(\leqslant\) is left-monotone (resp.\ right-monotone) if{}f
\begin{equation}\label{def-leftmon}
\forall x_1,x_2 \in \Sigma^*,\forall a\in \Sigma,\: x_1\leqslant x_2 \,\Rightarrow\, a x_1 \leqslant a x_2 \quad \text{(resp.\ $x_1 a \leqslant x_2 a$)}
\end{equation}
In fact, if $x_1\leqslant x_2$ then Equation~\eqref{def-leftmon} implies
that for all $y\in \Sigma^*$, $y x_1 \leqslant y x_2$ since, by induction
on the length $|y|\in\mathbb{N}$, we have that:
\begin{myEnumI}
\item if $y=\epsilon$ then $y x_1 \leqslant y x_2$;
\item if $y=av$ with $a\in \Sigma,v\in \Sigma^*$ then, by inductive hypothesis, $v x_1 \leqslant v x_2$, so that by \eqref{def-leftmon}, $yx_1=av x_1 \leqslant av x_2=yx_2$
\end{myEnumI}
\begin{definition}[$L$-Consistent Quasiorder]\label{def:LConsistent}\rm
Let $L\in$ $\wp(\Sigma^*)$. A quasiorder \(\mathord{\leqslant_L}\) on \(\Sigma^*\) is called \emph{left} (resp.\ \emph{right}) \(L\)\emph{-consistent} if{}f
\begin{myEnumA}
\item \(\mathord{\leqslant}_L \cap (L\times \neg L) = \varnothing \);\label{eq:LConsistentPrecise}
\item \(\mathord{\leqslant}_L\) is left-monotone (resp.\ right-monotone). \label{eq:LConsistentmonotone}
\end{myEnumA}
Also, \(\mathord{\leqslant}_L\) is called \emph{\(L\)-consistent} when it is both left and right \(L\)-consistent.
{\rule{0.5em}{0.5em}}
\end{definition}
As the following lemma shows, it turns out that a quasiorder is $L$-consistent if{}f it induces a closure which includes $L$ in its image and it is
backward complete for concatenation.
\begin{lemma}\label{lemma:properties}
Let \(L\in \wp(\Sigma^*)\) and \(\mathord{\leqslant_L}\) be a quasiorder on \(\Sigma^*\).
Then, \(\mathord{\leqslant_L}\) is a
left (resp.\ right) \(L\)-consistent quasiorder on \(\Sigma^*\) if and only if
\begin{myEnumA}
\item \(\rho_{\leqslant_L}(L) = L\), and \label{lemma:properties:L}
\item \(\rho_{\leqslant_L}\) is backward complete for \(\lambda X\ldotp a X\) (resp.\ \(\lambda X\ldotp Xa\)) for all \(a\in \Sigma\).\label{lemma:properties:bw}
\end{myEnumA}
\end{lemma}
\begin{proof}
We consider the left case, the right case is symmetric.
\begin{myEnumA}
\item The inclusion
\(L\subseteq \rho_{\leqslant_L}(L)\) always
holds because \(\rho_{\leqslant_L}\) is an upper closure.
For the reverse inclusion we have that
\begin{align*}
\rho_{\leqslant_L}(L)\subseteq L & \Leftrightarrow \quad \text{[By definition of \(ρ_{\leqslant_L}(L)\)]}\\
\forall v\in \Sigma^*, \; (\exists u\in L,\, u \leqslant_L v) \:\Rightarrow\: v\in L & \Leftrightarrow \quad \\
\mathord{\leqslant}_L \cap (L\times \neg L) = \varnothing \enspace .
\end{align*}
Thus, $\rho_{\leqslant_L}(L)= L$ iff
condition~\ref{eq:LConsistentPrecise} of Definition~\ref{def:LConsistent} holds.
\item We first prove that if \(\mathord{\leqslant}_L\) is left-monotone then for all $X\in \wp(\Sigma^*)$ we have that
\(\rho_{\leqslant_L}(a X) = \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X))\) for all \(a\in\Sigma\).
Monotonicity of concatenation together with monotonicity and extensivity of
$\rho_{\leqslant_L}$ imply that the inclusion \(\rho_{\leqslant_L}(a X) \subseteq \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X))\) holds.
For the reverse inclusion, we have that:
\begin{align*}
\rho_{\leqslant_L}(a \rho_{\leqslant_L}(X))
&= \quad \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\rho_{\leqslant_L}\left( \{ a y \mid \exists x\in X, x \leqslant_L y \} \right)
&= \quad \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\{ z \mid \exists x\in X, y\in \Sigma^*,\, x\leqslant_L y \land a y \leqslant_L z \}
&\subseteq \quad \text{[By monotonicity of \(\leqslant_L\)]}\\
\{ z \mid \exists x\in X, y\in \Sigma^*,\, ax\leqslant_L ay \land a y \leqslant_L z \}
&= \quad \text{[By transitivity of \(\leqslant_L\)]}\\
\{ z \mid \exists x\in X , a x\leqslant_L z\}
&= \quad \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\rho_{\leqslant_L}(a X) &\enspace .
\end{align*}
Next, we show that if \(\rho_{\leqslant_L}(a X) = \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X))\) for all $X\in \wp(\Sigma^*)$ and \(a\in\Sigma\) then \(\leqslant_L\) is left-monotone.
Let $x_1,x_2\in \Sigma^*$ and $a\in \Sigma$.
If $x_1 \leqslant_L x_2$ then
$\{x_2\} \subseteq \rho_{\leqslant_L}(\{x_1 \})$, hence
$a\{x_2\} \subseteq a\rho_{\leqslant_L}(\{x_1 \})$.
Then, by applying the monotone function
$\rho_{\leqslant_L}$ we have that
$\rho_{\leqslant_L}(a\{x_2\}) \subseteq \rho_{\leqslant_L}(a\rho_{\leqslant_L}(\{x_1 \}))$, so that, by backward completeness,
$\rho_{\leqslant_L}(a\{x_2\}) \subseteq \rho_{\leqslant_L}(a\{x_1 \})$.
Thus, $a\{x_2\} \subseteq \rho_{\leqslant_L}(a\{x_1 \})$, namely,
$ax_1 \leqslant_L ax_2$. By~\eqref{def-leftmon}, this shows that $\leqslant_L$ is left-monotone.
\end{myEnumA}
\end{proof}
Since \(ρ_{\leqslant} (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ_{\leqslant}(\minor{\vectarg{\epsilon}{F} \! \cup \Pre_{\mathcal{N}}(\vect{X})})\) for every quasiorder then, by Lemma~\ref{lemma:properties}, we can apply Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral} with the abstraction \(ρ_{\leqslant_{L_2}}\) induced by a left \(L_2\)-consistent well-quasiorder and \(\mathcal{F} =\minor{\vectarg{\epsilon}{F} \! \cup \Pre_{\mathcal{N}}(\vect{X})}\) interpreted as the set of functions of the form \(f_i = \minor{\vectarg{\epsilon}{F} \! \cup \Pre_{\mathcal{N}}(\vect{X})}_i\) where each \(\minor{\cdot}_i\) is a function mapping each set \(X \in \wp(Σ^*)\) into a minor \(\minor{X}_i\).
Intuitively, this means that we can manipulate \(\leqslant\)-upward closed sets in \(\wp(Σ^*)\) using their finite minors, as already shown by \citet{ACJT96}.
As a consequence, we obtain Algorithm~\AlgRegularW which, given a left \(L_2\)-consistent well-quasiorder, solves the language inclusion problem \(\lang{\mathcal{N}} \subseteq L_2\) for any automaton \(\mathcal{N}\).
The algorithm is called ``word-based'' because the vector \(\tuple{Y_q}_{q \in Q}\) consists of finite sets of words in \(Σ^*\).
We write \(\mathord{\sqsubseteq_{\leqslant^{\ell}_{L_2}}}\hspace{-4pt} \cap \mathord{(\sqsubseteq_{\leqslant^{\ell}_{L_2}}\hspace{-2pt})^{-1}}\) as the first argument of \(\KleeneQO\) to denote the function \(f(X,Y)\) that returns \(\text{\emph{true}}\) if{}f \(X \sqsubseteq_{\leqslant^{\ell}_{L_2}} Y\) and \(Y \sqsubseteq_{\leqslant^{\ell}_{L_2}} X\).
\begin{figure}
\caption{Word-based algorithm for \(\lang{\mathcal{N}
\label{alg:RegIncW}
\end{figure}
\begin{theorem}\label{theorem:quasiorderAlgorithm}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA and let \(L_2\in \wp(\Sigma^*)\) be a language such that:
\begin{myEnumIL}
\item membership in $L_2$ is decidable; \label{theorem:quasiorderAlgorithm:membership}
\item there exists a decidable left \(L_2\)-consistent wqo on $\Sigma^*$.\label{theorem:quasiorderAlgorithm:decidableL}
\end{myEnumIL}
Then, Algorithm \AlgRegularW decides the inclusion problem \(\lang{\mathcal{N}} \subseteq L_2\).
\end{theorem}
\begin{proof}
Let $\leqslant_{L_2}^{\ell}$ be a decidable left $L_2$-consistent well-quasiorder on $\Sigma^*$.
Then, we check that hypothesis~\ref{theorem:FiniteWordsAlgorithmGeneral:rho}-\ref{theorem:FiniteWordsAlgorithmGeneral:INC} of Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral} are satisfied.
\begin{myEnumA}
\item It follows from hypothesis~\ref{theorem:quasiorderAlgorithm:decidableL} and Lemma~\ref{lemma:properties} that \(\leqslant_{L_2}^{\ell}\) is backward complete for left concatenation and satisfies \(ρ_{\leqslant_{L_2}^{\ell}}(L_2) = L_2\).
\item Since \(\leqslant_{L_2}^{\ell}\) is a wqo, then \(\tuple{\{ρ_{\leqslant_{L_2}^{\ell}}(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO.
\item Let \(\lfloor\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X})\rfloor\) be the set of functions \(f_i\) each of which maps each set \(X \in \wp(Σ^*)\) into a minor of \(\vectarg{\epsilon}{F} \cup \Pre_{\mathcal{N}}(\vect{X})\).
Since \(\rho_{\leqslant_{L_2}^{\ell}}(X) = ρ_{\leqslant_{L_2}^{\ell}}(\minor{X})\) for all \(X \in \wp(Σ^*)^{|Q|}\),
we have that all functions \(f_i\) satisfy
\[\rho_{\leqslant_{L_2}^{\ell}} (\vectarg{\epsilon}{F} \!\cup \Pre_{\mathcal{N}}(\vect{X})) = ρ_{\leqslant_{L_2}^{\ell}}(f_i(\vect{X}))\enspace .\]
\item The equality \(ρ_{\leqslant_{L_2}^{\ell}}(S_1) = ρ_{\leqslant_{L_2}^{\ell}}(S_2)\) is decidable for every \(S_1, S_2 \in \wp(Σ^*)^{|Q|}\) since
\[ρ_{\leqslant_{L_2}^{\ell}}(S_1) = ρ_{\leqslant_{L_2}^{\ell}}(S_2) \Leftrightarrow S_1 \sqsubseteq_{\leqslant_{L_2}^{\ell}} S_2 \land S_2 \sqsubseteq_{\leqslant_{L_2}^{\ell}} S_1\]
and, by hypothesis~\ref{theorem:quasiorderAlgorithm:decidableL}, \(\leqslant_{L_2}^{\ell}\) is decidable.
\item Since \(\vectarg{L_2}{I} = \tuple{\nullable{q \in I}{L_2}{\Sigma^*}}_{q \in Q})\), the inclusion trivially holds for all components \(Y_q\) with \(q \notin I\).
Therefore, it suffices to check whether \(Y_q \subseteq L_2\) holds for \(q \in I\) which, since \(Y_q = \minor{S}\) with \(S \in \wp(Σ^*)\), can be decided by performing finitely many membership tests as done by lines 2-5 of Algorithm~\AlgRegularW.
By hypothesis~\ref{theorem:quasiorderAlgorithm:membership}, this check is decidable.
\end{myEnumA}
\end{proof}
\subsubsection{Right Concatenation}
Following Section~\ref{rightwqo},
a symmetric version, called \AlgRegularWr, of Algorithm \AlgRegularW and of Theorem~\ref{theorem:quasiorderAlgorithm} for \emph{right} \(L_2\)-consistent wqos can be easily derived as follows.
\begin{figure}
\caption{Word-based algorithm for \(\lang{\mathcal{N}
\label{alg:RegIncWr}
\end{figure}
\begin{theorem}\label{theorem:quasiorderAlgorithmR}
Let \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\) be an NFA and let \(L_2\in \wp(\Sigma^*)\) be a language such that
\begin{myEnumIL}
\item membership in $L_2$ is decidable;
\item there exists a decidable right \(L_2\)-consistent wqo on $\Sigma^*$.
\end{myEnumIL}
Then, Algorithm \AlgRegularWr decides the inclusion problem \(\lang{\mathcal{N}} \subseteq L_2\).
\end{theorem}
In the following, we will consider different quasiorders on $\Sigma^*$ and we will show that they fulfill the requirements of Theorem~\ref{theorem:quasiorderAlgorithm}, so that they yield algorithms for solving a language inclusion problem.
\subsection{Nerode Quasiorders}\label{sec:nerode}
\label{sub:the_left_nerode_quasi_order_relative_to_a_language}
Recall from Chapter~\ref{chap:prel} that the \emph{left} and \emph{right}
\emph{Nerode's quasiorders} on \(\Sigma^*\) are defined in the standard way:
\begin{align*}
u\leqslant^{\ell}L v &\udiff\; L u^{-1} \subseteq L v^{-1} \,,&
u\leqslant^{r}L v &\udiff\; u^{-1} L \subseteq v^{-1} L \enspace .
\end{align*}
The following result shows that Nerode's quasiorders are the weakest (i.e. greatest w.r.t.\ set inclusion of binary relations) \(L_2\)-consistent quasiorders for which the algorithm \AlgRegularW can be instantiated to decide a language inclusion \(\lang{\mathcal{N}}\subseteq L_2\).
\begin{lemma}\label{lemma:leftrightnerodegoodqo}
Let $L\in \wp(\Sigma^*)$.
Then
\begin{myEnumA}
\item \(\mathord{\leqslant^{\ell}L}\) and \(\mathord{\leqslant^{r}L}\) are, respectively, left and right \(L\)-consistent quasiorders.
If $L$ is regular then, additionally, \(\mathord{\leqslant^{\ell}L}\) and \(\mathord{\leqslant^{r}L}\) are, respectively, decidable wqos. \label{lemma:leftrightnerodegoodqo:Consistent}
\item Let \(\leqslant^{\ell}\) and \(\leqslant^{r}\) be, respectively, a left and a right \(L\)-consistent quasiorder on $\Sigma^*$.
Then \( \rho_{\mathord{\leqslant^{\ell}L}} \subseteq \rho_{\leqslant^{\ell}} \) and \( \rho_{\leqslant^{r}L} \subseteq \rho_{\leqslant^{r}} \).\label{lemma:leftrightnerodegoodqo:Incl}
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item As explained in Chapter~\ref{chap:prel}, \citet[Section 2]{deLuca1994} show that \(\leqslant^{\ell}L\) and \(\leqslant^{r}L\) are, respectively, left and right monotone quasiorders.
On the other hand, note that given \(u \in L\) and \(v \notin L\) we have that \(\epsilon \in Lu^{-1}\) and \(\epsilon \in u^{-1}L\) while \(\epsilon \notin Lv^{-1}\) and \(\epsilon \notin v^{-1}L\).
Hence, \(\mathord{\leqslant^{\ell}L}\) (resp. \(\mathord{\leqslant^{r}L}\)) is a left (resp. right) \(L\)-consistent quasiorder.
Finally, if $L$ is regular then both relations are
clearly decidable.
\item We consider the left case (the right case is symmetric).
As shown by \citet[Section~2, point~4]{deLuca1994}, \(\mathord{\leqslant^{\ell}L}\) is maximum in the set of all the left \(L\)-consistent quasiorders, i.e.\ every left \(L\)-consistent quasiorder \(\leqslant^{\ell}\) is such that \(x \leqslant^{\ell} y \Rightarrow x \leqslant^{\ell}L y \).
As a consequence, \(\rho_{\leqslant^{\ell}}(X) \subseteq \rho_{\leqslant^{\ell}L}(X)\) holds for all \(X\in \wp(\Sigma^*)\), namely,
\(\mathord{\leqslant^{\ell}} \subseteq \mathord{\leqslant^{\ell}L}\).\qedhere
\end{myEnumA}
\end{proof}
We then derive a first instantiation of Theorem~\ref{theorem:quasiorderAlgorithm}.
Because membership is decidable for regular languages $L_2$, Lemma~\ref{lemma:leftrightnerodegoodqo}~\ref{lemma:leftrightnerodegoodqo:Consistent} for \(\leqslant^{\ell}_{L_2}\) implies that the hypotheses \ref{theorem:quasiorderAlgorithm:membership} and \ref{theorem:quasiorderAlgorithm:decidableL} of Theorem~\ref{theorem:quasiorderAlgorithm} are satisfied, so that Algorithm \AlgRegularW instantiated to \(\leqslant^{\ell}_{L_2}\)
decides the inclusion \(\lang{\mathcal{N}} \subseteq L_2\) when $L_2$ is regular.
Furthermore, under these hypotheses,
Lemma~\ref{lemma:leftrightnerodegoodqo}~\ref{lemma:leftrightnerodegoodqo:Incl} shows that \(\leqslant_{L_2}^{\ell}\) is the weakest (i.e. greatest for set inclusion)
left \(L_2\)-consistent quasiorder for which the algorithm \AlgRegularW can be instantiated
for deciding the inclusion $\lang{\mathcal{N}}\subseteq L_2$.
\begin{figure}
\caption{Two automata \(\mathcal{N}
\label{fig:B}
\end{figure}
\begin{example}\label{example:Word_Regular_LInc}
We illustrate the use of the left Nerode's quasiorder in the algorithm \AlgRegularW for solving the language inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\), where \(\mathcal{N}_1\) and \(\mathcal{N}_2\) are the automata shown in Figure~\ref{fig:B}.
The equations for \(\mathcal{N}_1\) are as follows:
\[
\Eqn(\mathcal{N}_1)=\begin{cases}
X_1 = \varnothing \cup aX_1 \cup aX_2 \cup bX_2 \cup cX_2\\
X_2 = \{\epsilon\}
\end{cases} \enspace .
\]
\noindent
We have the following quotients (among others) for \(L = \lang{\mathcal{N}_2}\).
\begin{align*}
L \epsilon^{-1} = \; & a^* (a(a+b)^*a+a^+c+ab+bb) & L b^{-1} = \; & a^* (a + b) \\
L a^{-1} = \; & a^* a(a+b)^* & L c^{-1} = \; & a^* a^+\\
L w^{-1} = \; & a^* \text{ if{}f } w \in (a(a+b)^*a+ac+ab+bb) \span
\end{align*}
\noindent
It is straightforward to check that, among others, the following relations hold between different alphabet symbols: \(b \leqslant^{\ell}L a\),
\(c \leqslant^{\ell}L a\)
and \(c \leqslant^{\ell}L b\).
Then, let us show the computation of the Kleene iterates performed by Algorithm \AlgRegularW.
\begin{align*}
\vect{Y}^{(0)} &= \vect{\varnothing}\\
\vect{Y}^{(1)} &= \vectarg{\epsilon}{F} = \tuple{\varnothing, \{\epsilon\}} \\
\vect{Y}^{(2)} &= \lfloor\vectarg{\epsilon}{F}\rfloor {\sqcup} \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(1)})\rfloor = \tuple{\varnothing, \{\epsilon\}} {\sqcup} \tuple{\minor{\varnothing \cup a\varnothing \cup a\{\epsilon\} \cup b\{\epsilon\} \cup c\{\epsilon\}}, \minor{\{\epsilon\}}}\\
&= \tuple{\minor{\{a,b,c\}}, \minor{\{\epsilon\}}} = \tuple{\{c\}, \{\epsilon\}}\\
\vect{Y}^{(3)} &= \lfloor\vectarg{\epsilon}{F}\rfloor {\sqcup} \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(2)})\rfloor =
\tuple{\varnothing, \{\epsilon\}} {\sqcup} \tuple{\minor{\varnothing {\cup} a\{c\} {\cup} a\{\epsilon\} {\cup} b\{\epsilon\} {\cup} c\{\epsilon\}}, \minor{\{\epsilon\}}}\\
&=
\tuple{\minor{\{ac, a, b, c\}}, \minor{\{\epsilon\}}} = \tuple{\{c\}, \{\epsilon\}}
\end{align*}
The least fixpoint is thus \(\vect{Y} = \tuple{\{c\}, \{\epsilon\}}\).
Since $c\in \vect{Y}_1$ and \(c \notin \lang{\mathcal{N}_2}\), Algorithm \AlgRegularW concludes that the language inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) does not hold.
{\ensuremath{\Diamond}}
\end{example}
\subsubsection{On the Complexity of Nerode's quasiorders}
For the inclusion problem between languages generated by finite automata, deciding the
(left or right) Nerode's quasiorder can be easily shown to be as hard as the language inclusion problem, which is PSPACE-complete.
In fact, given the automata \(\mathcal{N}_1=\tuple{Q_1,\delta_1,I_1,F_1,\Sigma}\) and \(\mathcal{N}_2=\tuple{Q_2,\delta_2,I_2,F_2,\Sigma}\), one can define the union automaton \(\mathcal{N}_3\ud \tuple{Q_1\cup Q_2\cup\{q^{\iota}\}, \delta_3, \{q^{\iota}\}, F_1\cup F_2}\) where \(\delta_3 \) maps \((q^\iota,a)\) to \(I_1\), \( (q^\iota,b) \) to \(I_2\) and behaves like \(\delta_1\) or \(\delta_2\) elsewhere. Then, it turns out that
\[a \leqslant^{r}_{\lang{\mathcal{N}_3}} b \Leftrightarrow a^{-1}\lang{\mathcal{N}_3} \subseteq b^{-1}\lang{\mathcal{N}_3} \Leftrightarrow \lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}\enspace .\]
It follows that deciding the right Nerode's quasiorder \(\leqslant^{r}_{\lang{\mathcal{N}_3}}\) is as hard as deciding \(\lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}\).
Also, for the inclusion problem of a language generated by an
automaton within the trace set of a one-counter net (see Section~\ref{sub:containment_in_one_counter_languages}), the right Nerode's quasiorder is a right language-consistent well-quasiorder but it turns out to be undecidable (see Lemma~\ref{lemma:RightNerodeOcnwqo}).
\subsection{State-based Quasiorders}\label{subsec:state-qos}
Consider the inclusion problem \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) where \(\mathcal{N}_1\) and \(\mathcal{N}_2\) are NFAs.
In the following, we study a class of well-quasiorders based on \(\mathcal{N}_2\), called state-based quasiorders.
These quasiorders are strictly stronger (i.e. lower w.r.t.\ set inclusion of binary relations) than the Nerode's quasiorders and sidestep the untractability or undecidability of Nerode's quasiorders yet allowing to define an algorithm solving the language inclusion problem.
\subsubsection{Inclusion in Regular Languages.}
\label{sub:automata_based}
We define the quasiorders \(\leqslant^{\ell}N\) and \(\leqslant^{r}N\) induced by an NFA \(\mathcal{N}=\tuple{Q,Σ,\delta,I,F}\)
as follows:
\begin{align}\label{eqn:state-qo}
u \mindex{\leqslant^{\ell}N} v & \udiff Pe^{\mathcal{N}}_{u}(F) \subseteq Pe^{\mathcal{N}}_{v}(F)\,,
&
u \mindex{\leqslant^{r}N} v & \udiff \post^{\mathcal{N}}_{u}(I) \subseteq \post^{\mathcal{N}}_{v}(I) \,.
\end{align}
The superscripts in $\leqslant^{\ell}N$ and $\leqslant^{r}N$ stand, respectively, for left/right because they are, respectively, left and right well-quasiorders as the following result shows.
\begin{lemma}\label{lemma:LAconsistent}
The relations \(\mathord{\leqslant^{\ell}N}\) and \(\mathord{\leqslant^{r}N}\) are, respectively, decidable left and right
\(\lang{\mathcal{N}}\)-consistent wqos.
\end{lemma}
\begin{proof}
Since, for every \(u \in \Sigma^*\), \(Pe^{\mathcal{N}}_u(F)\) is a computable subset of a the finite set of states of \(\mathcal{N}\), it turns out that \(\mathord{\leqslant^{\ell}N}\) is a decidable wqo.
Let us check that \(\mathord{\leqslant^{\ell}N}\) is left \(\lang{\mathcal{N}}\)-consistent according to Definition~\ref{def:LConsistent}~\ref{eq:LConsistentPrecise}-\ref{eq:LConsistentmonotone}.
\begin{myEnumA}
\item Let \(u\in \lang{\mathcal{N}}\) and \(v\notin \lang{\mathcal{N}}\).
We have that \(Pe^{\mathcal{N}}_u(F)\) contains some initial state while \(Pe^{\mathcal{N}}_v(F)\) does not, hence \(u \nleq^{\ell}_{\mathcal{N}} v\).
Therefore, \(\leqslant^{\ell}N \cap (L\times L^c) = \varnothing\).
\item Let us check that $\leqslant^{\ell}N$ is left monotone.
Observe that $Pe^\mathcal{N}_x$ is a monotone function and that
\begin{eqnarray}
Pe^{\mathcal{N}}_{uv} = Pe^{\mathcal{N}}_{u} \comp Pe^{\mathcal{N}}_v \enspace .\label{eq:prepre}
\end{eqnarray}
Therefore, for all $x_1,x_2\in \Sigma^*$ and $a\in \Sigma$,
\begin{align*}
x_1 \leqslant^{\ell}N x_2 & \Rightarrow \quad\text{[By definition of \(\leqslant^{\ell}N\)]} \\
Pe^{\mathcal{N}}_{x_1}(F) \subseteq Pe^{\mathcal{N}}_{x_2}(F) & \Rightarrow \quad\text{[Since $Pe^\mathcal{N}_a$ is monotone]} \\
Pe^{\mathcal{N}}_{a}(Pe^{\mathcal{N}}_{x_1}(F)) \subseteq Pe^{\mathcal{N}}_{a}(Pe^{\mathcal{N}}_{x_2}(F)) & \Leftrightarrow \quad\text{[By Equation~\eqref{eq:prepre}]} \\
Pe^{\mathcal{N}}_{ax_1}(F) \subseteq Pe^{\mathcal{N}}_{ax_2}(F) & \Leftrightarrow \quad\text{[By definition of \(\leqslant^{\ell}N\)]} \\
ax_1 \leqslant^{\ell}N ax_2 & \enspace .
\end{align*}
\end{myEnumA}
The proof that \(\leq_{\mathcal{N}}^r\) is a decidable right \(\lang{\mathcal{N}}\)-consistent quasiorder is symmetric.
\end{proof}
As a consequence, Theorem~\ref{theorem:quasiorderAlgorithm} applies to the wqo \(\mathord{\leqslant^{\ell}_{\mathcal{N}_2}}\) (and
\(\mathord{\leqslant^{r}_{\mathcal{N}_2}}\)), so that one can instantiate Algorithm~\AlgRegularW with $\mathord{\leqslant^{\ell}_{\mathcal{N}_2}}$ for deciding $\lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}$.
Turning back to the left Nerode wqo
$\leqslant^{\ell}_{\lang{\mathcal{N}_2}}$, it turns out that:
\begin{align*}
u \leqslant^{\ell}_{\lang{\mathcal{N}_2}} v \Leftrightarrow \lang{\mathcal{N}_2}u^{-1} \subseteq \lang{\mathcal{N}_2} v^{-1}
\Leftrightarrow W_{I,Pe^{\mathcal{N}_2}_u(F)} \subseteq W_{I,Pe^{\mathcal{N}_2}_v(F)} \enspace .
\end{align*}
Since \(Pe^{\mathcal{N}_2}_u(F) \subseteq Pe^{\mathcal{N}_2}_v(F) \Rightarrow W_{I,Pe^{\mathcal{N}_2}_u(F)} \subseteq W_{I,Pe^{\mathcal{N}_2}_v(F)}\), it follows that
\[u \leqslant^{\ell}_{\mathcal{N}_2} v \Rightarrow u \leqslant^{\ell}_{\lang{\mathcal{N}_2}} v\]
\begin{example}\label{example:Word_Regular_LInc:states}
We illustrate the left state-based quasiorder by using it to solve the language inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) from Example~\ref{example:Word_Regular_LInc}.
We have, among others, the following set of predecessors of $F_{\mathcal{N}_2}$:
\begin{align*}
Pe_{\epsilon}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{5\} & Pe_{a}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{3\} & Pe_{b}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{4\} & Pe_{c}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{2\} \\
Pe_{aa}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{1, 3\} & Pe_{ab}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{1\} & Pe_{ac}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{1, 2\} & Pe_{aab}^{\mathcal{N}_2}(F_{\mathcal{N}_2}) & \,{=}\, \{1\}
\end{align*}
Recall from Example~\ref{example:Word_Regular_LInc} that, for the Nerode's quasiorder, we have that \(b \leqslant^{\ell}_{\lang{\mathcal{N}_2}} a\) and \(c \leqslant^{\ell}_{\lang{\mathcal{N}_2}} a\) while none of these relations hold for \(\leqslant^{\ell}_{\mathcal{N}_2}\).
Let us next show the Kleene iterates computed by Algorithm \AlgRegularW when using \(\leqslant^{\ell}_{\mathcal{N}_2}\).
\begin{align*}
\vect{Y}^{(0)} &=\vect{\varnothing}\\
\vect{Y}^{(1)} &= \vectarg{\epsilon}{F} = \tuple{\varnothing, \{\epsilon\}} \\
\vect{Y}^{(2)} &= \lfloor\vectarg{\epsilon}{F}\rfloor {\sqcup} \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(1)})\rfloor = \tuple{\minor{\{a, b, c\}}, \minor{\{\epsilon\}}} = \tuple{\{a, b, c\}, \{\epsilon\}}\\
\vect{Y}^{(3)} &= \lfloor\vectarg{\epsilon}{F}\rfloor {\sqcup} \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(2)})\rfloor = \tuple{\minor{\{aa, ab, ac, a, b, c\}}, \minor{\{\epsilon\}}} = \tuple{\{ab, a, b, c\}, \{\epsilon\}} \\
\vect{Y}^{(4)} &= \lfloor\vectarg{\epsilon}{F}\rfloor {\sqcup} \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(3)})\rfloor = \tuple{\minor{\{aab, aa, ab, ac, a, b, c\}}, \minor{\{\epsilon\}}} = \tuple{\{ab, a, b, c\}, \{\epsilon\}}
\end{align*}
The least fixpoint is therefore \(\vect{Y}=\tuple{\{ab, a, b, c\}, \{\epsilon\}} \).
Since $c\in \vect{Y}_0$ and \(c \notin \lang{\mathcal{N}_2}\), Algorithm \AlgRegularW concludes that the inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) does not hold.
{\ensuremath{\Diamond}}
\end{example}
\subsubsection{Simulation-based Quasiorders.}\label{sec:simulation_basedQO}
Recall that a \demph{simulation} on an NFA $\mathcal{N}= \tuple{Q,Σ,\delta,I,F}$ is a binary relation on the states of \(\mathcal{N}\), i.e. \(\mathord{Peceq} \subseteq Q\times Q\), such that for all $p,q\in Q$ if \(pPeceq q\) then the following two conditions hold:
\begin{myEnumI}
\item if \(p\in F\) then \(q\in F\);
\item for every transition \(p \xrightarrow{a} p'\), there exists a transition \(q \xrightarrow{a} q'\) such that \(p'Peceq q'\).
\end{myEnumI}
It is well known that simulation implies language inclusion, i.e. if $Peceq$ is a simulation on $\mathcal{N}$ then
\[ q Peceq q' \Rightarrow W^{\mathcal{N}}_{q,F}\subseteq W^{\mathcal{N}}_{q',F} \enspace .\]
A relation \(\mathord{Peceq}\subseteq Q\times Q\) can be lifted in the standard universal-existential way to a relation $Peceq^{\forall\exists}\subseteq \wp(Q)\times \wp(Q)$ on sets of states as follows:
\[ X Peceq^{\forall\exists} Y \:\udiff\: \forall x\in X, \exists y\in Y,\: xPeceq y \enspace.\]
In particular, if $Peceq$ is a qo then $Peceq^{\forall\exists}$ is a qo as well.
Also, if $Peceq$ is a simulation relation then its lifting $Peceq^{\forall\exists}$ is such
that \(X Peceq^{\forall\exists} Y \Rightarrow W^{\mathcal{N}}_{X,F} \subseteq W^{\mathcal{N}}_{Y,F}\) holds. This suggests us to
define a \emph{right simulation-based quasiorder} \(Peceq^{r}_{\mathcal{N}}\) on $\Sigma^*$ induced by a simulation $Peceq$ on $\mathcal{N}$ as follows:
\begin{equation}\label{eq:sim-qo}
u \mindex{Peceq^{r}_{\mathcal{N}}} v \:\udiff\: \post^{\mathcal{N}}_u(I) Peceq^{\forall\exists} \post^{\mathcal{N}}_v(I) \enspace .
\end{equation}
\begin{lemma}\label{lemma:simulationLConsistent}
Let \(\mathcal{N}\) be an NFA and let \(\mathord{Peceq}\) be a simulation on $\mathcal{N}$.
Then the right si-mulation-based quasiorder \(\mathord{Peceq^r_{\mathcal{N}}}\) is a decidable right \(\lang{\mathcal{N}}\)-consistent well-quasiorder.
\end{lemma}
\begin{proof}
Since, for every \(u \in \Sigma^*\), \(\post^{\mathcal{N}}_u(F)\) is a computable subset of a the finite set of states of \(\mathcal{N}\), it turns out that \(\mathord{Peceq^r_{\mathcal{N}}}\) is a decidable wqo.
Next, we show that \(\mathord{Peceq^r_{\mathcal{N}}}\) is right \(\lang{\mathcal{N}}\)-consistent according to Definition~\ref{def:LConsistent}~\ref{eq:LConsistentPrecise}-\ref{eq:LConsistentmonotone}.
\begin{myEnumA}
\item Let \(u\in \lang{\mathcal{N}}\) and \(v\notin \lang{\mathcal{N}}\).
We have that \(\post^{\mathcal{N}}_u(I)\) contains some final state while \(\post^{\mathcal{N}}_v(I)\) does not.
Let \(q \in \post^{\mathcal{N}}_u(I) \cap F\).
We have that \(q Peceq^r_{\mathcal{N}} q'\) for no \(q' \in \post^{\mathcal{N}}_v(I)\) since, by simulation, this would imply \(q' \in \post^{\mathcal{N}}_v(I) \cap F\), which contradicts the fact that \(F \cap \post^{\mathcal{N}}_v(I) = \varnothing\).
We conclude that \(u \npreceq^r_{\mathcal{N}} v\), hence \(Peceq^r_{\mathcal{N}} \cap (L\times L^c) = \varnothing\).
\item Next we show that \(Peceq^r_{\mathcal{N}}\) is right monotone. By Equation~\eqref{def-leftmon}, we check that for all $u,v\in \Sigma^*$ and $a\in \Sigma$,
\(uPeceq^r_{\mathcal{N}} v \Rightarrow ua Peceq^r_{\mathcal{N}} va\):
\begin{adjustwidth}{-0.8cm}{}
\begin{myAlign}{-10pt}{-5pt}
u Peceq^r_{\mathcal{N}} v & \Leftrightarrow \; \text{[By def. of \(Peceq^r_{\mathcal{N}}\)]}\\
\post^{\mathcal{N}}_u(I) Peceq^{\forall\exists} \post^{\mathcal{N}}_v(I) & \Leftrightarrow \; \text{[By def.\ of \(Peceq^{\forall\exists}\)]} \\
\forall x \in \post^{\mathcal{N}}_u(I), \exists y \in \post^{\mathcal{N}}_v(I), x Peceq y & \Rightarrow \; \text{[By def.\ of \(Peceq\)]} \\
\forall x \ggoes{a} x' ,\; x \in \post^{\mathcal{N}}_u(I),\; \exists y \ggoes{a} y' ,\; y\in \post^{\mathcal{N}}_v(u), x' Peceq y' & \Leftrightarrow \\
\span\specialcell{
\text{[Since \(\post^{\mathcal{N}}_{a}\circ \post^{\mathcal{N}}_{u} = \post^{\mathcal{N}}_{ua}(I)\)]}}\\
\forall x' \in \post^{\mathcal{N}}_{ua}(I), \exists y' \in \post^{\mathcal{N}}_{va}(I), \; x' Peceq y' & \Leftrightarrow \;\text{[By def.\ of \(Peceq^{\forall\exists}\)]}\\
\post^{\mathcal{N}}_{ua}(I) Peceq^{\forall\exists} \post^{\mathcal{N}}_{va}(I) & \Leftrightarrow \;\text{[By def.\ of \(Peceq_{\mathcal{N}}^r\)]}\\
ua Peceq_{\mathcal{N}}^r va & \enspace .
\end{myAlign}
\end{adjustwidth}
\end{myEnumA}
\end{proof}
Thus, once again, Theorem~\ref{theorem:quasiorderAlgorithmR} applies to
\(\mathord{Peceq^r_{\mathcal{N}_2}}\) and this allows us to instantiate the
algorithm \AlgRegularWr to the quasiorder
$\mathord{Peceq^r_{\mathcal{N}_2}}$ for deciding the inclusion $\lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}$.
On the other hand, note that it is possible to define a left simulation \(Peceq^{\forall\exists}_{R}\) on an automaton \(\mathcal{N}\) by applying \(Peceq^{\forall\exists}\) on the reverse of \(\mathcal{N}\).
This left simulation induces a \emph{left simulation-based quasiorder} on \(\Sigma^*\) as follows:
\begin{equation}\label{eq:sim-qo:left}
u \mindex{Peceq^{l}_{\mathcal{N}}} v \:\udiff\: Pe^{\mathcal{N}}_u(F) Peceq^{\forall\exists}_R Pe^{\mathcal{N}}_v(F) \enspace .
\end{equation}
It is straightforward to check that Theorem~\ref{theorem:quasiorderAlgorithm} applies to \(\mathord{Peceq^{\ell}_{\mathcal{N}_2}}\) and, therefore, we can instantiate Algorithm~\AlgRegularW for deciding \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
\begin{example}\label{example:Word_Regular_LInc:sim}
Finally, let us illustrate the use of the left simulation-based quasiorder to solve the language inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) of Example~\ref{example:Word_Regular_LInc}.
For the set $F_{\mathcal{N}_2}$ of final states of \(\mathcal{N}_2\)
we have the same set of predecessors computed in Example~\ref{example:Word_Regular_LInc:states} and, among others, the following left simulations between these sets (For clarity, we omit the argument of the function \(Pe\), which is always \(F_{\mathcal{N}_2}\)):
\begin{align*}
Pe_{c}^{\mathcal{N}_2}() = \{2\} & Peceq^{\forall\exists}_R \{3\} = Pe_{a}^{\mathcal{N}_2}() & Pe_{b}^{\mathcal{N}_2}() = \{4\} & \npreceq^{\forall\exists}_R \{3\} = Pe_{a}^{\mathcal{N}_2}() \\
Pe_{ac}^{\mathcal{N}_2}() = \{1\} & Peceq^{\forall\exists}_R \{4\} = Pe_{b}^{\mathcal{N}_2}() & Pe_{ac}^{\mathcal{N}_2}() = \{1\} & \npreceq^{\forall\exists}_R \{2\} = Pe_{c}^{\mathcal{N}_2}()
\end{align*}
As expected, the simulation-based quasiorder lies in between the Nerode and the state-based quasiorders.
As shown in Examples~\ref{example:Word_Regular_LInc} and~\ref{example:Word_Regular_LInc:states}, we have \(b \leqslant_{\lang{\mathcal{N}_2}}^{\ell} a\), \(c \leqslant_{\lang{\mathcal{N}_2}}^{\ell} a\), \(b \not\leqslant^{\ell}_{\mathcal{N}_2} a\) and \(c \not\leqslant^{\ell}_{\mathcal{N}_2} a\) while \(c Peceq_{\mathcal{N}_2}^{\ell} a\), but \(b \npreceq_{\mathcal{N}_2}^{\ell} a\).
Let us show the computation of the Kleene iterates performed by Algorithm \AlgRegularW when using the quasiorder \(\mathord{Peceq_{\mathcal{N}_2}^{\ell}}\).
\begin{align*}
\vect{Y}^{(0)} &= \vect{\varnothing}\\
\vect{Y}^{(1)} &= \vectarg{\epsilon}{F} = \tuple{\varnothing, \{\epsilon\}} \\
\vect{Y}^{(2)} &= \lfloor\vectarg{\epsilon}{F}\rfloor \sqcup \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(1)})\rfloor = \tuple{\minor{\{a, b, c\}}, \minor{\{\varepsilon\}}} = \tuple{\{c\}, \{\varepsilon\}}\\
\vect{Y}^{(3)} &= \lfloor\vectarg{\epsilon}{F}\rfloor \sqcup \lfloor\Pre_{\mathcal{N}_1}(\vect{Y}^{(2)})\rfloor = \tuple{\minor{\{ac, a, b, c\}}, \minor{\{\varepsilon\}}} = \tuple{\{c\}, \{\varepsilon\}}
\end{align*}
The least fixpoint is therefore \(\vect{Y} = \tuple{\{c\}, \{\varepsilon\}}\).
Since $c\in \vect{Y}_0$ and \(c \notin \lang{\mathcal{N}_2}\), Algorithm~\AlgRegularW concludes that the inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) does not hold.
{\ensuremath{\Diamond}}
\end{example}
Let us observe that \(u Peceq^{r}_{\mathcal{N}_2} v\) implies \(W_{\post^{\mathcal{N}_2}_u(I),F} \subseteq W_{\post^{\mathcal{N}_2}_v(I),F}\), which is equivalent to the right Nerode's quasiorder \(u\leqslant^{r}_{\lang{\mathcal{N}_2}} v\) for $\lang{\mathcal{N}_2}$.
Furthermore, for the state-based quasiorder defined in
\eqref{eqn:state-qo}, we have that
\(u \leqslant^{r}_{\mathcal{N}_2} v \Rightarrow uPeceq^r_{\mathcal{N}_2} v\) trivially holds.
Summing up, given an NFA \(\mathcal{N}\) with \(\lang{\mathcal{N}} = L\), the following containments relate the state-based,
simulation-based and Nerode's quasiorders:
\[\mathord{\leqslant^{r}_{\mathcal{N}}} \,\subseteq\, \mathord{Peceq^r_{\mathcal{N}}} \,\subseteq\, \mathord{\leqslant^{r}_{L}}, \qquad \mathord{\leqslant^{\ell}_{\mathcal{N}}} \,\subseteq\, \mathord{Peceq^{\ell}_{\mathcal{N}}} \,\subseteq\, \mathord{\leqslant^{\ell}_{L}}\enspace .\]
Recall that these are decidable \(\lang{\mathcal{N}_2}\)-consistent well-quasiorders so that Algorithm \AlgRegularW can be instantiated for each of them for deciding an inclusion $\lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}$.
Examples~\ref{example:Word_Regular_LInc}, \ref{example:Word_Regular_LInc:states} and~\ref{example:Word_Regular_LInc:sim} show how the algorithm behaves for each of the three quasiorders considered in this section.
Despite their simplicity, the examples evidence the differences in the behavior of the algorithm when considering the different quasiorders.
In particular, we observe that the fixpoint computation for \(\mathord{\leqslant^r_{\lang{\mathcal{N}_2}}}\) coincides with the one for \(\mathord{Peceq^r_{\mathcal{N}_2}}\) which, as expected, converge faster than the one for \(\mathord{\leqslant^r_{\mathcal{N}_2}}\).
As shown by \citet{deLuca1994}, \(\mathord{\leqslant^r_{\lang{\mathcal{N}_2}}}\) is the coarsest well-quasiorder for which Algorithm~\AlgRegularW works (i.e. Theorem~\ref{theorem:quasiorderAlgorithm} holds), hence its corresponding fixpoint computation exhibits optimal behavior in terms of the number of closed sets considered.
However, Nerode's quasiorder is not practical since it requires checking language inclusion, which is the PSPACE-complete problem we are trying to solve, in order to decide whether two words are related.
Therefore, the coincidence of the fixpoint computations for \(\mathord{\leqslant^r_{\lang{\mathcal{N}_2}}}\) and \(\mathord{Peceq^r_{\mathcal{N}_2}}\) is of special interest since it evidences that Algorithm~\AlgRegularW might exhibit optimal behavior while using a ``simpler'' well-quasiorder such as \(\mathord{Peceq^r_{\mathcal{N}_2}}\), which is a polynomial under-approximation of \(\mathord{\leqslant^r_{\lang{\mathcal{N}_2}}}\).
\subsection{Inclusion in Traces of One-Counter Nets.}
\label{sub:containment_in_one_counter_languages}
We show that our framework can be instantiated to systematically derive an algorithm for deciding the inclusion \(\lang{\mathcal{N}} \subseteq L_2\) where \(L_2\) is the trace set of a one-counter net.
This is accomplished by defining a decidable \(L_2\)-consistent quasiorder so that Theorem~\ref{theorem:quasiorderAlgorithm} can be applied.
Intuitively, a \emph{one-counter net} is an NFA endowed with a nonnegative integer counter which
can be incremented, decremented or
left unchanged by a transition.
\begin{definition*}[One-Counter Net]
A One-Counter Net (OCN)~\cite{hofman_trace_2018} is a tuple $\mathcal{O}=\tuple{Q,\Sigma,\delta}$ where $Q$ is a finite set of states, $\Sigma$ is an alphabet and $\delta\subseteq Q\times \Sigma\times \{-1,0,1\}\times Q$ is a set of transitions.\eod
\end{definition*}
A \demph{configuration of an OCN} \(\mathcal{O} = \tuple{Q,\Sigma,\delta}\) is a pair $qn$ consisting of a state \(q\in Q\) and a value \(n\in\mathbb{N}\) for the counter.
Given two configurations of an OCN, \(qn, q'n'\in Q\times \mathbb{N}\), we write \(qn \xrightarrow{a} q'n'\) and call it an \(a\)-step (or simply step) if there exists a transition \( (q,a,d,q')\in\delta \) such that \(n'=n+d\).
Given \(qn\in Q\times\mathbb{N}\), the \demph{trace set} \(T(qn)\subseteq \Sigma^*\) of an OCN is defined as follows:
\begin{align*}
T(qn) & \ud \{u \in \Sigma^* \mid Z_u^{qn} \neq \varnothing\} \quad \text{ where } \\
Z_u^{qn} & \ud \{ q_k n_k \in Q\times \mathbb{N} \mid qn=q_0n_0 \xrightarrow{a_1} q_1n_1\xrightarrow{a_2}\cdots \xrightarrow{a_k} q_kn_k,\: a_1\cdots a_k=u \}\enspace .
\end{align*}
Observe that \(Z_{\epsilon}^{qn}= \{ qn \}\) and \(Z_u^{qn}\) is a finite set for every word \(u\in\Sigma^*\).
Let us consider the poset $\tuple{\mathbb{N}_{\bot}\ud \mathbb{N}\cup\{\bot\},\leq_{\mathbb{N}_{\bot}}}$ where \(\bot\leq_{\mathbb{N}_{\bot}} n\) holds for all \(n\in\mathbb{N}_{\bot}\), while for all $n,n'\in
\mathbb{N}$, $n\leq_{\mathbb{N}_{\bot}}n'$ is the standard ordering relation between numbers.
For a finite set of states \(S \subseteq Q\times\mathbb{N}\) define the so-called macro state \(M_S \colon Q \rightarrow \mathbb{N}_{\bot}\) as follows:
\[M_S( q ) \ud \max \{ n\in \mathbb{N} \mid q n \in S \}\,,\]
\noindent
where $\max\varnothing\ud\bot$.
Define the following quasiorder on $\Sigma^*$:
\begin{equation}\label{eq:ocnleq}
u \leq_{{qn}}^r v \:\udiff\:\forall q\in Q,\, M_{Z_u^{qn}}(q) \leq_{\mathbb{N}_{\bot}} M_{Z_v^{qn}}(q) \enspace .
\end{equation}
\begin{lemma}\label{lemma:ocnwqo}
Let \(\mathcal{O}\) be an OCN. For any configuration $q n$ of $\mathcal{O}$, \(\mathord{\leq_{{qn}}^r}\) is a right \(T(qn)\)-consistent decidable well-quasiorder.
\end{lemma}
\begin{proof}
It follows from Dickson's Lemma \citep[Section~II.7.1.2]{Sakarovitch} that \(\mathord{\leq_{{qn}}^r}\) is a wqo.
Next, we show that \(\mathord{\leq_{{qn}}^r}\) is \(T(qn)\)-consistent according to Definition~\ref{def:LConsistent}~\ref{eq:LConsistentPrecise}-\ref{eq:LConsistentmonotone}.
\begin{myEnumA}
\item Since \(Z_u^{qn}\) and \(Z_v^{qn}\) are finite sets, we have that the macro state functions
\(M_{Z_u^{qn}}\) and \(M_{Z_v^{qn}}\) are computable, hence the relation \(\mathord{\leq_{{qn}}^r}\) is decidable.
Let \(u\in T(qn)\) and \(v\notin T(qn)\).
Then \(M_{Z_u^{qn}}(q')\neq \bot\) for some \(q'\in Q\) and \(M_{Z_v^{qn}}(q') = \bot\) since \(Z_v^{qn} = \varnothing\).
It follows that \(u \not\leq_{{qn}}^r v \) and, therefore, \(\mathord{\leq_{{qn}}^r} \cap (T(qn) \times (T(qn))^c) = \varnothing\).
\item Next we show that
\(u \leq_{{qn}}^r v\) implies \(ua \leq_{{qn}}^r va\)
for all \(a\in \Sigma\), since, by Equation~\eqref{def-leftmon}, this is equivalent to the fact that $\leq_{{qn}}^r$ is right monotone.
We proceed by contradiction.
Assume that \(u \leq_{{qn}}^r v\) and \(\exists q' \in Q\), \(M_{Z^{qn}_{ua}}(q') \not\leq_{\mathbb{N}_{\bot}} M_{Z^{qn}_{va}}(q')\).
Then we have that \(m_1\ud\max\{n \mid pn \in Z^{qn}_{ua}\} \not\leq_{\mathbb{N}_{\bot}} m_2\ud\max\{n \mid pn \in Z^{qn}_{va}\}\), which implies, since
$m_1\neq \bot$, that
$m_1,m_2\in \mathbb{N}$ and $m_1 > m_2$.
On the other hand, for all \( (q,a,d,q') \in \delta\) we have \(q'(m_1-d) \in Z_u^{qn}\) and \(q'(m_2-d) \in Z_v^{qn}\).
Observe that \(\max\{n \mid pn \in Z_u^{qn}\} = m_1-d\) since otherwise we would that have \(\max\{n \mid pn \in Z_u^{qn}\} +d > m_1\) which contradicts the definition of \(m_1\).
Similarly, \(\max\{n \mid pn \in Z_v^{qn}\} = m_2-d\).
Since \(m_1 > m_2\) we have that \(m_1-d > m_2-d\) and, as a consequence, \(\max\{n \mid pn \in Z_u^{qn}\} > \max\{n \mid pn \in Z_v^{qn}\}\), which contradicts \(u \leq_{{qn}}^r v\).
\end{myEnumA}
\end{proof}
Thus, as a consequence of Theorem~\ref{theorem:quasiorderAlgorithm},
Lemma~\ref{lemma:ocnwqo} and the decidability of membership \(u\in T(qn)\),
the following known decidability result for language inclusion of regular languages into traces of OCNs~\citep[Theorem 3.2]{JANCAR1999476} is systematically derived within our framework.
\begin{corollary}\label{theorem:ocncontainment}
Let \(\mathcal{N}\) be an NFA and \(\mathcal{O}\) be an OCN. For any configuration \(qn \) of $\mathcal{O}$, the language inclusion
\(\lang{\mathcal{N}} \subseteq T(qn)\) is decidable.
\end{corollary}
The following result closes a conjecture made by \citet[Section 6]{deLuca1994}.
\begin{lemma}\label{lemma:RightNerodeOcnwqo}
Let \(\mathcal{O}\) be an OCN.
Then the right Nerode's quasiorder \(\mathord{\leqslant^{r}_{T(qn)}}\) is an undecidable well-quasiorder.
\end{lemma}
\begin{proof}
Recall that \(\mathord{\leqslant^{r}_{T(qn)}}\) is maximum in the set of all right \(T(qn)\)-consistent quasiorders~\citep[Section~2, point~4]{deLuca1994}.
As a consequence, \(u\leqslant^{r}_{{qn}}v\) $\Rightarrow$ \(u\leqslant^{r}_{T(qn)} v\), for all \(u,v\in\Sigma^*\).
By Lemma~\ref{lemma:ocnwqo}, \(\leqslant^{r}_{{qn}}\) is a wqo, so that \(\mathord{\leqslant^{r}_{T(qn)}}\) is a wqo as well.
Undecidability of \(\mathord{\leqslant^{r}_{T(qn)}}\) follows from the undecidability of the trace inclusion problem for nondeterministic OCNs \citep[Theorem 20]{Hofman:2013:DWS:2591370.2591405} since given the OCNs \(\mathcal{O}_1=(Q_1,Σ,\delta_1)\) and \(\mathcal{O}_2=(Q_2,Σ,\delta_2)\), we can define the union OCN \(\mathcal{O}_3\ud (Q_1\cup Q_2\cup\{q\}, Σ, \delta_3)\) where \(\delta_3 \) maps \((q,a,0)\) to \(q_1 \in Q_1\), \( (q,b,0) \) to \(q_2 \in Q_2\) and behaves like \(\delta_1\) or \(\delta_2\) elsewhere. Then, it turns out that
\[a \leqslant^{r}_{T_3(qn)} b \Leftrightarrow a^{-1}T_3(q_1n) \subseteq b^{-1}T_3(q_2n) \Leftrightarrow T_1(q_1n)\subseteq T_2(q_2n)\enspace .\]
Therefore, deciding the right Nerode's quasiorder \(\leqslant^{r}_{T_3(qn)}\) is as hard as deciding \(T_1(q_1n)\subseteq T_2(q_2n)\).
\end{proof}
It is worth to remark that, by Lemma~\ref{lemma:leftrightnerodegoodqo}~\ref{lemma:leftrightnerodegoodqo:Consistent}, the left and right Nerode's quasiorders \(\mathord{\leqslant^{\ell}_{T(qn)}}\) and \(\mathord{\leqslant^{r}_{T(qn)}}\) are \(T(qn)\)-consistent.
However, the left Nerode's quasiorder does not need to be a wqo, otherwise \(T(qn)\) would be regular.
We conclude this section by conjecturing that our framework could be instantiated for extending
Corollary~\ref{theorem:ocncontainment} to traces of Petri Nets, a result
which is already known to be true~\cite{JANCAR1999476}.
\section{A Novel Perspective on the Antichain Algorithm}
\label{sec:novel_perspective_AC}
Let \(\mathcal{N}_1 = \tuple{Q_1,\delta_1,I_1,F_1,\Sigma}\) and \(\mathcal{N}_2 = \tuple{Q_2,\delta_2,I_2,F_2,\Sigma}\) be two NFAs
and consider
the state-based left \(\lang{\mathcal{N}_2}\)-consistent wqo
\(\mathord{\leqslant_{\mathcal{N}_2}^{\ell}}\) defined by Equivalence~\eqref{eqn:state-qo}.
Theorem~\ref{theorem:quasiorderAlgorithm} shows that Algorithm \AlgRegularW decides the language inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) by manipulating finite sets of words.
Since \(u \leqslant_{\mathcal{N}_2}^{\ell} v \Leftrightarrow Pe^{\mathcal{N}_2}_u(F_2) \subseteq Pe^{\mathcal{N}_2}_v(F_2)\), we could equivalently consider
the
set of states \(Pe^{\mathcal{N}_2}_u(F_2)\in \wp(Q_2)\) rather than
a word $u\in \Sigma^*$.
This observation suggests the design of an algorithm analogous to \AlgRegularW but computing on the poset
\(\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\) of antichains
of sets of states of the complete lattice $\tuple{\wp(Q_2),\subseteq}$.
To that end, the poset \(\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\) is viewed as an abstraction of the poset \(\tuple{\wp(Σ^*), \subseteq}\) by using the abstraction and concretization functions \(\alpha\colon \wp(\Sigma^*) \rightarrow \AC_{\tuple{\wp(Q_2),\subseteq}}\) and \(\gamma\colon \AC_{\tuple{\wp(Q_2),\subseteq}}\rightarrow\wp(\Sigma^*)\) and using the abstract function \({\Pre}_{\mathcal{N}_1}^{\mathcal{N}_2}:(\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|}\rightarrow (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|}\) defined as follows:
\begin{align}
& \alpha(X) \ud \lfloor \{ Pe_u^{\mathcal{N}_2}(F_2) \in \wp(Q_2) \mid u\in X\} \rfloor \,,\nonumber\\
& \gamma(Y) \ud \{v \in \Sigma^* \mid \exists u\in \Sigma^*,\, Pe_{u}^{\mathcal{N}_2}(F_2) \in Y \,\land\, Pe_{u}^{\mathcal{N}_2}(F_2) \subseteq Pe_{v}^{\mathcal{N}_2}(F_2)\} \,,\label{def-antichain-state-abs}\\
&\mindex{\Pre_{\mathcal{N}_1}^{\mathcal{N}_2}}(\tuple{X_q}_{q\in Q_1}) \ud \langle \lfloor \big\{ Pe_a^{\mathcal{N}_2}(S) \in \wp(Q_2) \mid \exists a\in \Sigma, q'\in Q_1, q'\in\delta_1(q,a) \wedge S \in X_{q'} \big\} \rfloor \rightarrowngle_{q\in Q_1} \nonumber .
\end{align}
Observe that the functions $\alpha$ and ${\Pre}_{\mathcal{N}_1}^{\mathcal{N}_2}$ are well-defined because minors are antichains.
\begin{lemma}\label{lemma:rhoisgammaalpha}
The following properties hold:
\begin{myEnumA}
\item \(\tuple{\wp(\Sigma^*),\subseteq}\galois{\alpha}{\gamma}\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\) is a GC.
\label{lemma:rhoisgammaalpha:GC}
\item \(\gamma \comp \alpha = \rho_{\leqslant^{\ell}_{\mathcal{N}_2}}\).\label{lemma:rhoisgammaalpha:rho}
\item For all \(\vect{X}\in \alpha(\wp(\Sigma^*))^{|Q_1|}\), \(\Pre_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X}) = {\alpha\comp \Pre_{\mathcal{N}_1} \comp \gamma(\vect{X})}\). \label{lemma:rhoisgammaalpha:pre}
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item
Let us first observe that $\alpha$ and $\gamma$ are well-defined.
First, $\alpha(X)$ is an antichain of $\tuple{\wp(Q_2),\subseteq}$ since it is a minor for the well-quasiorder \(\subseteq\) and, therefore, it is finite.
On the other hand, $\gamma(Y)$ is clearly an element of $\tuple{\wp(Σ^*), \subseteq}$ by definition.
Then, for all $X\in \wp(Σ^*)$ and
$Y\in \AC_{\tuple{\wp(Q_2),\subseteq}}$,
it turns out that:
\begin{adjustwidth}{-0.5cm}{}
\begin{myAlign}{0pt}{}
\alpha(X) \sqsubseteq Y & \Leftrightarrow \quad\text{[By definition of \(\sqsubseteq\)]} \\
\forall z \in \alpha(X), \exists y \in Y, \; y \subseteq z &\Leftrightarrow \quad\text{[By definition of \(\alpha\) and \(\minor{\cdot}\)]} \\
\forall v \in X, \exists y \in Y, \; y \subseteq Pe^{\mathcal{N}_2}_v(F_2) &\Leftrightarrow \quad\text{[By definition of \(\gamma\)]} \\
\forall v \in X, \; v \in γ(Y) & \Leftrightarrow \quad\text{[By definition of \(\subseteq\)]} \\
X \subseteq \gamma(Y) &\enspace .
\end{myAlign}
\end{adjustwidth}
\item For all \(X \in \wp(Σ^*)\) we have that
\begin{adjustwidth}{-0.95cm}{}
\begin{myAlign}{0pt}{}
\gamma(\alpha(X)) &=\quad\text{[By definition of $\alpha,\gamma$]}\\
\{v \,{\in}\, \Sigma^* \mid \exists u\,{\in}\, \Sigma^*, Pe_{u}^{\mathcal{N}_2}(F_2) \,{\in}\, \lfloor \{ Pe_w^{\mathcal{N}_2}(F_2) \mid w\in X\} \rfloor
\span \land Pe_{u}^{\mathcal{N}_2}(F_2) \subseteq Pe_{v}^{\mathcal{N}_2}(F_2)\} \\
&=\quad\text{[By definition of minor]} \\
\{v \in \Sigma^* \mid \exists u\in X,\, Pe_{u}^{\mathcal{N}_2}(F_2) \subseteq Pe_{v}^{\mathcal{N}_2}(F_2)\} &=\quad\text{[By definition of \(\mathord{\leqslant^{\ell}_{\mathcal{N}_2}}\)]}\\
\{v \in \Sigma^* \mid \exists u \in X ,\, u \leqslant^{\ell}_{\mathcal{N}_2} v\}&=\quad\text{[By definition of\ \(\rho_{\leqslant^{\ell}_{\mathcal{N}_2}}\)]}\\
\rho_{\leqslant^{\ell}_{\mathcal{N}_2}}(X) &\enspace .
\end{myAlign}
\end{adjustwidth}
\item For all \(\vect{X}\in \alpha(\wp(\Sigma^*))^{|Q_1|}\) we have that
\begin{adjustwidth}{-0.8cm}{}
\begin{myAlign}{0pt}{0pt}
\alpha(\Pre_{\mathcal{N}_1}(\gamma(\vect{X}))) &= \quad \text{[By def. of \(\Pre_{\mathcal{N}_1}\)]} \\
\tuple{\alpha({\textstyle \bigcup_{a \in \Sigma, q\ggoes{a}_{\mathcal{N}_1} q'}} a\gamma(\vect{X}_{q'}))}_{q \in Q_1} &=\quad \text{[By definition of \(\alpha\)]} \\
\langle\lfloor \{ Pe^{\mathcal{N}_2}_u(F_2) \mid u \in {\textstyle \bigcup_{a \in \Sigma, q\ggoes{a}_{\mathcal{N}_1} q'}} a\gamma(\vect{X}_{q'})\rfloor\rightarrowngle_{q\in Q_1} &=\\
\span\specialcell{
\text{[By \(Pe^{\mathcal{N}_2}_{av} = Pe^{\mathcal{N}_2}_a\comp Pe^{\mathcal{N}_2}_v\)]}}\\
\langle\lfloor \{ Pe^{\mathcal{N}_2}_a(\{ Pe^{\mathcal{N}_2}_u(F_2) \mid u \in {\textstyle\bigcup_{q\ggoes{a}_{\mathcal{N}_1} q'}}\gamma(\vect{X}_{q'})\}) \mid a \in \Sigma\}\rfloor\rightarrowngle_{q\in Q_1} &= \quad \text{[By rewriting]}\\
\langle\lfloor \{ Pe^{\mathcal{N}_2}_a(S) \mid a \in \Sigma, q\ggoes{a}_{\mathcal{N}_1} q', S\in \{ Pe^{\mathcal{N}_2}_u(F_2) \mid u \in \gamma(\vect{X}_{q'})\}\}\rfloor\rightarrowngle_{q\in Q_1} &=\\
\span\specialcell{
\text{[By \( \minor{Pe^{\mathcal{N}_2}_a(X)} = \minor{Pe^{\mathcal{N}_2}_a(\minor{X})}\)]}}\\
\langle\lfloor \{ Pe^{\mathcal{N}_2}_a(S) \mid a \in \Sigma, q\ggoes{a}_{\mathcal{N}_1} q', S\in \minor{\{ Pe^{\mathcal{N}_2}_u(F_2) \mid u \in \gamma(\vect{X}_{q'})\}}\}\rfloor\rightarrowngle_{q\in Q_1} & =\quad \text{[By definition of \(\alpha\)]} \\
\langle\lfloor \{ Pe^{\mathcal{N}_2}_a(S) \mid a \in \Sigma, q\ggoes{a}_{\mathcal{N}_1} q', S\in \alpha(\gamma(\vect{X}_{q'}))\rfloor\rightarrowngle_{q\in Q_1} &=\\
\span\specialcell{
\text{[Since \(\vect{X} \in \alpha\), \(\alpha(\gamma(\vect{X}_{q'})) = \vect{X}_{q'}\)]}}\\
\langle \lfloor \{ Pe^{\mathcal{N}_2}_a(S) \mid a\in\Sigma, q\ggoes{a}_{\mathcal{N}_1}q', S \in \vect{X}_{q'} \} \rfloor \rightarrowngle_{q\in Q_1} &=\quad \text{[By def. of ${\Pre}_{\mathcal{N}_1}^{\mathcal{N}_2}$]} \\
{\Pre}_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X}) & \enspace .
\end{myAlign}
\end{adjustwidth}
\end{myEnumA}
\end{proof}
It follows from Lemma~\ref{lemma:rhoisgammaalpha} that the GC \(\tuple{\wp(\Sigma^*),\subseteq}\galois{\alpha}{\gamma}\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\) and the abstract
function \(\Pre_{\mathcal{N}_1}^{\mathcal{N}_2}\) satisfy the hypotheses~\ref{theorem:EffectiveAlgorithm:prop:rho}-\ref{theorem:EffectiveAlgorithm:prop:abseps} of Theorem~\ref{theorem:EffectiveAlgorithm}.
Thus, in order to obtain an algorithm for deciding \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) it remains to show that requirement~\ref{theorem:EffectiveAlgorithm:prop:absincl} of Theorem~\ref{theorem:EffectiveAlgorithm} holds, i.e. there is an algorithm to decide whether \(\vect{Y} \sqsubseteq \alpha(\vectarg{L_2}{I_2})\) for every \(\vect{Y} \in \alpha(\wp(\Sigma^*))^{|Q_1|}\).
In order to do that, we first provide some intuitions on how the resulting algorithm works.
First, observe that the Kleene iterates of the function \(\lambda \vect{X}\ldotp\alpha(\vectarg{\epsilon}{F_1}) \sqcup \Pre_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\) of Theorem~\ref{theorem:EffectiveAlgorithm} are vectors of antichains in \(\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\), where
each component is indexed by some \(q\in Q_1\) and represents (through its minor set) a set of sets of states that are predecessors of \(F_2\) in \(\mathcal{N}_2\) by a word $u$ generated by \(\mathcal{N}_1\) from that state \(q\), i.e. \(Pe_u^{\mathcal{N}_2}(F_2)\) with \(u \in W^{\mathcal{N}_1}_{q,F_1}\).
Since \(\epsilon \in W_{q,F_1}^{\mathcal{N}_1}\) for all \(q \in F_1\) and \(Pe_\epsilon^{\mathcal{N}_2}(F_2) = F_2\) the
iterations of the procedure $\Kleene$ begin with the initial vector \(\alpha(\vectarg{\epsilon}{F_1}) = \tuple{\nullable{q\in F_1}{F_2}{\varnothing}}_{q\in Q_1}\).
On the other hand, note that by taking the minor of each vector component, we are considering smaller sets which still preserve the relation \(\sqsubseteq\) since
\begin{equation*}
A \sqsubseteq B \Leftrightarrow \minor{A} \sqsubseteq B \Leftrightarrow A \sqsubseteq \minor{B} \Leftrightarrow \minor{A} \sqsubseteq \minor{B}\enspace .
\end{equation*}
Let \(\tuple{Y_q}_{q\in Q_1}\) be the fixpoint computed by the \(\Kleene\) procedure.
It turns out that, for each component $q\in Q_1$, \(Y_q = \minor{\{Pe_u^{\mathcal{N}_2}(F_2)\mid u \in W_{q,F_1}^{\mathcal{N}_1}\}}\) holds.
Whenever the inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) holds, all the sets of states in \(Y_q\) for some initial state \(q \in I_1\) are predecessors of \(F_2\) in \(\mathcal{N}_2\) by words in \(\lang{\mathcal{N}_2}\), so that they all contain at least one initial state in \(I_2\).
As a result, we obtain Algorithm \AlgRegularA, that is,
a ``state-based'' inclusion algorithm for deciding \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
\begin{figure}
\caption{State-based algorithm for {\(\lang{\mathcal{N}
\label{alg:RegIncA}
\end{figure}
\begin{theorem}\label{theorem:statesQuasiorderAlgorithm}
Let \(\mathcal{N}_1,\mathcal{N}_2\) be NFAs.
The algorithm \AlgRegularA decides the inclusion \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
\end{theorem}
\begin{proof}
We show that all the conditions~\ref{theorem:EffectiveAlgorithm:prop:rho}-\ref{theorem:EffectiveAlgorithm:prop:absincl} of Theorem~\ref{theorem:EffectiveAlgorithm} are satisfied for the abstract domain \(\tuple{D,\leqslant_D}=\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq}\) as defined by the Galois Connection of Lemma~\ref{lemma:rhoisgammaalpha}~\ref{lemma:rhoisgammaalpha:GC}.
\begin{myEnumA}
\item Since, by Lemma~\ref{lemma:rhoisgammaalpha}~\ref{lemma:rhoisgammaalpha:rho}, \(\rho_{\leqslant^{\ell}_{\mathcal{N}_2}}(X) = \gamma(\alpha(X))\) it follows from Lemmas~\ref{lemma:properties} and~\ref{lemma:LAconsistent} that \(\gamma(\alpha(L_2)) = L_2\).
Moreover, for all \(a\in\Sigma\), \(X\in\wp(\Sigma^*)\) we have that:
\begin{align*}
\gamma\alpha(a X) & = \quad \text{[In GCs \(\gamma = \gamma \alpha \gamma\)]} \\
\gamma\alpha\gamma\alpha(a X) & = \quad \text{[By Lemma~\ref{lemma:properties}~\ref{lemma:properties:bw} with \(\rho_{\leqslant^{\ell}_{\mathcal{N}_2}} = \gamma\alpha\)]} \\
\gamma\alpha\gamma \alpha(a\gamma \alpha(X)) &= \quad \text{[In GCs
\(\gamma = \gamma \alpha \gamma\)]} \\
\gamma\alpha(a\gamma\alpha(X)) & \enspace.
\end{align*}
\item \( (\AC_{\tuple{\wp(Q_2),\subseteq}},\sqsubseteq) \) is effective because
$Q_2$ is finite.
\item By Lemma~\ref{lemma:rhoisgammaalpha}~\ref{lemma:rhoisgammaalpha:pre} we have that
\(\alpha(\Pre_{\mathcal{N}_1}(\gamma(\vect{X}))) = {\Pre}_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\) for all \(\vect{X}\in \alpha(\wp(\Sigma^*))^{|Q_1|}\).
\item \(\alpha(\{\epsilon\}) = \{F_2\}\) and \(\alpha({\varnothing})=\varnothing\), hence \(\minor{\alpha(\vectarg{\epsilon}{F_1})}\) is trivial to compute. \label{prop:alphaepsilon}
\item Since \(\alpha(\vectarg{L_2}{I_1})=\tuple{\alpha(\nullable{q\in I_1}{L_2}{\Sigma^*})}_{q\in Q_1}\), for all $\vect{Y}\in\alpha(\wp(\Sigma^*))^{|Q_1|}$ the relation \(\vect{Y} \sqsubseteq \alpha(\vectarg{L_2}{I_1})\) trivially holds for all components \(q \notin I_1\).
For the components $q\in I_1$, it suffices to show that
\(Y_q \sqsubseteq \alpha(L_2) \Leftrightarrow \forall S \in Y_q, \; S \cap I_2 \neq \varnothing\), which is the check performed by lines 2-5 of algorithm \AlgRegularA.
\begin{align*}
Y_q \sqsubseteq \alpha(L_2) & \Leftrightarrow \quad \text{[Because \(Y_q = \alpha(U)\) for some \(U \in \wp(\Sigma^*)\)]} \\
\alpha(U) \sqsubseteq \alpha(L_2) & \Leftrightarrow \quad \text{[By GC]} \\
U \subseteq \gamma(\alpha(L_2)) & \Leftrightarrow \quad \text{[By L.~\ref{lemma:properties},~\ref{lemma:LAconsistent} and~\ref{lemma:rhoisgammaalpha}, $\gamma(\alpha(L_2))=L_2$]} \\
U \subseteq L_2 & \Leftrightarrow \quad \text{[By definition of \(Pe_u^{\mathcal{N}_2}\)]} \\
\forall u \in U, Pe_u^{\mathcal{N}_2}(F_2) \cap I_2 \neq \varnothing & \Leftrightarrow \quad \text{[Since \(Y_q =\alpha(U) = \lfloor \{ Pe_u^{\mathcal{N}_2}(F_2) \mid u\in U\} \rfloor \)]} \\
\forall S \in Y_q, S \cap I_2 \neq \varnothing &\enspace .
\end{align*}
\end{myEnumA}
Thus, by Theorem~\ref{theorem:EffectiveAlgorithm}, Algorithm \AlgRegularA decides \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
\end{proof}
\subsection{Relationship to the Antichains Algorithm}
\label{sub:relationship_to_the_antichain_algorithm}
\citet{DBLP:conf/cav/WulfDHR06} introduced two so-called antichains algorithms, denoted
\emph{forward} and \emph{backward}, for deciding the universality of the language accepted by an NFA, i.e. whether the language is $\Sigma^*$ or not.
Then, they extended the backward algorithm to decide the inclusion between the languages accepted by two NFAs.
In what follows we show that Algorithm \AlgRegularA is equivalent to the corresponding extension of the forward algorithm and, therefore, dual to the backward antichains algorithm for language inclusion by \citet{DBLP:conf/cav/WulfDHR06}[Theorem 6].
To do that, we first define the poset of antichains in which the forward antichains algorithm computes its fixpoint.
Then, we give a formal definition of the forward antichains algorithm for deciding language inclusion and show that this algorithm coincides with \AlgRegularA when applied to the reverse automata.
Since language inclusion between the languages generated by two NFAs holds if{}f inclusion holds between the languages generated by their reverse NFAs, we conclude that the algorithm \AlgRegularA is equivalent to the forward antichains algorithm.
Finally, we show how the different variants of the antichains algorithm, including the original backward antichains algorithm~\cite{DBLP:conf/cav/WulfDHR06}[Theorem 6], can be derived within our framework by considering the adequate quasiorders.
\paragraph*{Forward Antichains Algorithm}
Let \(\mathcal{N}_1=\tuple{Q_1,\Sigma,\delta_1,I_1,F_1}\) and \(\mathcal{N}_2=\tuple{Q_2,\Sigma,\delta_2,I_2,F_2}\) be two NFAs and consider the language inclusion problem \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
Let us consider the following poset of antichains
\( \tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\wsqsubseteq} \) where
\[X \wsqsubseteq Y \udiff \forall y \in Y, \exists x \in X, \; x \subseteq y\enspace \]
and notice that \(\wsqsubseteq\) coincides with the reverse
relation \(\sqsubseteq^{-1}\).
As observed by \citet[Lemma 1]{DBLP:conf/cav/WulfDHR06}, it turns out that \( \tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\wsqsubseteq, \wsqcup, \wsqcap, \{\varnothing\}, \varnothing} \) is a finite lattice, where \(\wsqcup\) and \(\wsqcap\) denote, resp., lub and glb, and $\{\varnothing\}$ and $\varnothing$ are, resp., the least and greatest elements.
This lattice \( \tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\wsqsubseteq} \) is the domain in which the forward antichains algorithm computes on for deciding universality \citep[Theorem~3]{DBLP:conf/cav/WulfDHR06}.
The following result extends this forward algorithm in order to decide language inclusion.
\begin{theorem}[\textbf{{\citep[Theorems~3 and 6]{DBLP:conf/cav/WulfDHR06}}}] \label{theorem:antichainpaper}
Let
\begin{align*}
\vect{\mathcal{FP}} \ud \textstyle{\wbigsqcup}\{\vect{X} \in (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|} \mid \vect{X} = \Post_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\;\wsqcap\; \tuple{\nullable{q \in I_1}{\{I_2\}}{\varnothing}}_{q\in Q_1}\}
\end{align*}
where
\begin{align*}
\mindex{\Post_{\mathcal{N}_1}^{\mathcal{N}_2}}(\tuple{X_q}_{q\in Q_1}) \ud \langle \lfloor\{\post_a^{\mathcal{N}_2}(x) {\in} \wp(Q_2) \mid \exists a {\in} \Sigma, q'{\in} Q_1, & q{\in}\delta_1(q',a) \wedge x \in X_{q'} \}\rfloor \rightarrowngle_{q \in Q_1}\enspace .
\end{align*}
Then, \(\lang{\mathcal{N}_1} \nsubseteq \lang{\mathcal{N}_2}\) if and only if there exists \(q \in F_1\) such that \(\vect{\mathcal{FP}}_q \,\wsqsubseteq\, \{F_2^c\} \).
\end{theorem}
\begin{proof}
Let us first introduce some notation necessary to describe the forward antichains algorithm by \citet{DBLP:conf/cav/WulfDHR06} for deciding \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\).
In the following, we consider the poset
\(\tuple{Q_1\times \wp(Q_2),\subseteq_\times}\) where
\[(q_1,x_1) \subseteq_\times
(q_2,x_2) \udiff q_1=q_2 \wedge x_1 \subseteq x_2 \enspace . \]
Then, let
\(\tuple{\AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}},\wsqsubseteq_\times,\wsqcup_\times, \wsqcap_\times}\) be the lattice of antichains over the poset \(\tuple{Q_1\times \wp(Q_2),\subseteq_\times}\) where:
\begin{align*}
X \wsqsubseteq_\times Y & \udiff \forall (q,y) \in Y, \exists (q,x) \in X , x \subseteq y \\
\textstyle{\min_{\times}}(X) &\ud \{(q,x) \in X \mid \forall (q',x') \in X, q=q' \Rightarrow x' \nsubseteq x\} \\
X \wsqcup_\times Y & \ud \textstyle{\min_{\times}}(\{(q,x \cup y) \mid (q,x) \in X,\, (q,y) \in Y\}) \\
X \wsqcap_\times Y & \ud \textstyle{\min_{\times}}(\{(q,z) \mid (q,z) \in X \cup Y \}) \enspace .
\end{align*}
Also, let $\Post: \AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}} \rightarrow \AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}}$ be defined as follows:
\begin{align*}
\Post(X) \ud \textstyle{\min_{\times}}(\{ (q,\post_a^{\mathcal{N}_2}(x)) \in Q_1\times \wp(Q_2) \mid \exists a \in \Sigma, q\in Q_1,
(q',x) \in X , q' \ggoes{a}_{\mathcal{N}_1} q\}) \enspace .
\end{align*}
Then, it turns out that the dual of the backward antichains algorithm of \citet[Theorem~6]{DBLP:conf/cav/WulfDHR06} states that \(\lang{\mathcal{N}_1} \nsubseteq \lang{\mathcal{N}_2}\) if{}f there exists \(q \in F_1\) such that \(\mathcal{FP} \mathrel{\wsqsubseteq_\times} \{(q,F_2^c)\}\) where
\[\mathcal{FP} = {\textstyle\wbigsqcup_\times}\{X \in \AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}} \mid X = \Post(X)\;\wsqcap_\times\; (I_1 \times \{I_2\})\}\enspace .\]
\noindent
We observe that for every \(X\in\AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}}\), a pair \((q,x) \in Q_1\times \wp(Q_2)\) such that $(q,x)\in X$ is used by
\citet[Theorem~6]{DBLP:conf/cav/WulfDHR06} simply as
a way to associate states $q$ of \(\mathcal{N}_1\) with sets $x$ of states of \(\mathcal{N}_2\).
In fact, every antichain
\(X\in\AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}}\)
can be equivalently formalized
by a vector
\[\tuple{\{x \in \wp(Q_2) \mid (q,x)\in X\}}_{q \in Q_1}
\in (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|}\]
indexed by states \(q\in Q_1\) and whose components are antichains in $\AC_{\tuple{\wp(Q_2),\subseteq}}$.
Correspondingly, we consider
the lattice \(\tuple{\AC_{\tuple{\wp(Q_2),\subseteq}},\wsqsubseteq}\), where for
every pair of elements $X,Y\in \AC_{\tuple{\wp(Q_2),\subseteq}}$ we have that
\begin{align*}
X \wsqsubseteq Y &\udiff \forall y \in Y, \exists x \in X , x \subseteq y&
\textstyle{\min}(X) &\ud \{x \in X \mid \forall x' \in X, x' \nsubset x\} \\
X \wsqcup Y & \ud \textstyle{\min}(\{x \cup y \in \wp(Q_2) \mid x \in X, y \in Y\}) &
X \wsqcap Y & \ud \textstyle{\min}(\{z \in \wp(Q_2) \mid z \in X \cup Y\}) \enspace .
\end{align*}
Then, \(\Post\) can be replaced by \(\Post_{\mathcal{N}_1}^{\mathcal{N}_2}: (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|} \rightarrow (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|}
\), its equivalent formulation on vectors defined as follows:
\begin{align*}
\Post_{\mathcal{N}_1}^{\mathcal{N}_2}(\tuple{X_q}_{q\in Q_1}) \ud \langle\textstyle{\min}(\{\post_a^{\mathcal{N}_2}(x) \in \wp(Q_2) \mid \exists a \in \Sigma, q'\in Q_1,
x \in X_{q'} , q' \ggoes{a}_{\mathcal{N}_1} q \})\rightarrowngle_{q \in Q_1}\enspace .
\end{align*}
In turn, \(\mathcal{FP}\in \AC_{\tuple{Q_1\times \wp(Q_2),\subseteq_\times}}\) is replaced by the
following vector:
\[\vect{\mathcal{FP}} \ud \textstyle{\wbigsqcup}\{\vect{X}\in (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|} \mid \vect{X}=
\Post_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\;\wsqcap\; \tuple{\nullable{q \in I_1}{\{I_2\}}{\varnothing}}_{q\in Q_1}\} \enspace .\]
Finally, the check \(\exists q \in F_1 , \mathcal{FP} \mathrel{\wsqsubseteq_\times} \{(q,F_2^c)\}\) becomes \(\exists q \in F_1 , \vect{\mathcal{FP}}_q \mathrel{\wsqsubseteq} \{F_2^c\} \).
\end{proof}
Let \(\mathcal{N}^R\) denote the reverse automaton of \(\mathcal{N}\), where arrows are flipped and the initial/final states become final/initial.
Note that language inclusion can be decided by considering the reverse automata since
\[\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2} \Leftrightarrow \lang{\mathcal{N}_1^R} \subseteq \lang{\mathcal{N}_2^R}\enspace . \]
Furthermore, it is straightforward to check that \(\Post_{\mathcal{N}_1}^{\mathcal{N}_2} = \Pre_{\mathcal{N}_1^R}^{\mathcal{N}_2^R}\).
We therefore obtain the following result as a consequence of Theorem~\ref{theorem:antichainpaper}.
\begin{corollary}\label{theorem:antichainpaperReverse}
Let
\begin{align*}
\vect{\mathcal{FP}} \ud \textstyle{\wbigsqcup}\{\vect{X} \in (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|} \mid \vect{X} = \Pre_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\;\wsqcap\; \tuple{\nullable{q \in F_1}{\{F_2\}}{\varnothing}}_{q\in Q_1}\} \enspace .
\end{align*}
Then, \(\lang{\mathcal{N}_1} \nsubseteq \lang{\mathcal{N}_2}\) if{}f there exists \(q \in I_1\) such that \(\vect{\mathcal{FP}}_q \,\wsqsubseteq\, \{I_2^c\} \).
\end{corollary}
\paragraph*{From the Forward Antichains Algorithm to \textsc{FAIncS}}
Since \(\wsqsubseteq = \mathord{\sqsubseteq^{-1}}\), we have that \(\wsqcap = \sqcup\), \(\wsqcup = \sqcap\) and the greatest element $\varnothing$ for $\wsqsubseteq$ is the least element for $\mathord{\sqsubseteq}$.
Moreover, by~\eqref{def-antichain-state-abs}, $\alpha(\vectarg{\epsilon}{F_1}) = \tuple{\nullable{q \in F_1}{\{F_2\}}{\varnothing}}_{q\in Q_1}$.
Therefore, we can rewrite the vector
$\vect{\mathcal{FP}}$ of
Corollary~\ref{theorem:antichainpaperReverse} as
\[
\vect{\mathcal{FP}} = {\textstyle\bigsqcap}\{\vect{X} \in (\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|} \mid \vect{X} = \Pre_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X})\;\sqcup\; \alpha(\vectarg{\epsilon}{F_1})\}
\]
which is precisely the lfp in $\tuple{(\AC_{\tuple{\wp(Q_2),\subseteq}})^{|Q_1|}, \sqsubseteq}$ of $\Pre_{\mathcal{N}_1}^{\mathcal{N}_2}$ above
$\alpha(\vectarg{\epsilon}{F_1})$.
Hence, it turns out that the Kleene iterates of the least fixpoint computation
that converge to \(\vect{\mathcal{FP}}\) exactly coincide with the iterates computed by the $\Kleene$ procedure of the state-based algorithm
\AlgRegularA.
In particular, if \(\vect{Y}\) is the output vector of the call to $\Kleene$ at line~1 of
\AlgRegularA then \(\vect{Y} = \vect{\mathcal{FP}}\).
Furthermore,
\[\exists q\in I_1, \vect{\mathcal{FP}}_q \:\wsqsubseteq\: \{I_2^c\} \Leftrightarrow \exists q\in I_1, \exists S \in \vect{\mathcal{FP}}_q, \; S \cap I_2 = \varnothing\enspace .\]
Summing up, the \(\sqsubseteq\)-lfp algorithm \AlgRegularA coincides with the \(\wsqsubseteq\)-gfp antichains algorithm given by Corollary~\ref{theorem:antichainpaperReverse}.
\paragraph*{Backward Antichains Algorithm}
We can also derive an antichains algorithm for deciding language inclusion fully equivalent to the backward one of \citet[Theorem 6]{DBLP:conf/cav/WulfDHR06} by considering the
lattice \(\tuple{\AC_{\tuple{\wp(Q_2),\supseteq}},\sqsubseteq}\) for the dual lattice $\tuple{\wp(Q_2),\supseteq}$
and by replacing the functions \(\alpha\), \(\gamma\) and \(\Pre_{\mathcal{N}_1}^{\mathcal{N}_2}\) of Lemma~\ref{lemma:rhoisgammaalpha}, respectively, with:
\begin{align*}
& \alpha^c(X) \ud \lfloor \{ \cpre_u^{\mathcal{N}_2}(F_2^c) \in \wp(Q_2)\mid u\in X\} \rfloor \, ,\hspace{-13pt} \\
& \gamma^c(Y) \ud \{u \in \Sigma^* \mid \exists y \in Y , y \supseteq \cpre_{u}^{\mathcal{N}_2}(F^c_2) \}, \\
& {\CPre}_{\mathcal{N}_1}^{\mathcal{N}_2}(\tuple{X_q}_{q\in Q_1}) \ud \langle \lfloor \{ \cpre_a^{\mathcal{N}_2}(S) \in \wp(Q_2) \mid \exists a\in \Sigma, q'\in Q_1,
q'\in\delta_1(q,a) \wedge S \in X_{q'} \} \rfloor \rightarrowngle_{q\in Q_1} \enspace .
\end{align*}
where \(\cpre_u^{\mathcal{N}_2}(S) \ud (Pe_u^{\mathcal{N}_2}(S^c))^c\) for $u\in \Sigma^*$.
When instantiating Theorem~\ref{theorem:EffectiveAlgorithm} using these functions, we obtain an lfp algorithm computing on the lattice \(\tuple{\AC_{\tuple{\wp(Q_2),\supseteq}},\sqsubseteq}\).
Indeed, it turns out that
\[\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2} \Leftrightarrow \Kleene\big(\lambda \vect{X} \ldotp \CPre_{\mathcal{N}_1}^{\mathcal{N}_2}(\vect{X}) \sqcup \alpha^c(\vectarg{\epsilon}{F_1}),\vect{\varnothing}\big) \sqsubseteq \alpha^c(\vectarg{L_2}{I_1})\enspace .\]
It is easily seen that this algorithm coincides with the backward antichains algorithm defined by \citet[Theorem 6]{DBLP:conf/cav/WulfDHR06} since both compute on the same lattice, \(\minor{X}\) corresponds to the maximal (w.r.t.\ set inclusion) elements of \(X\), \(\alpha^c(\{\epsilon\}) = \{F_2^c\}\) and for all \(X \in \alpha^c(\wp(\Sigma^*))\), we have that \(X \sqsubseteq \alpha^c(L_2) \Leftrightarrow \forall S \in X, \; I_2 \nsubseteq S\).
\paragraph*{Variants of the Antichains Algorithm}
We have shown that the two forward/backward antichains algorithms introduced by \citet{DBLP:conf/cav/WulfDHR06} can be systematically derived by instantiating our framework and (possibly) considering the reverse automata.
Similarly, we can derive within our framework an algorithm equivalent to the backward antichains algorithm applied to the reverse automata and an algorithm equivalent to the forward antichains algorithm (without reverting the automata).
Table~\ref{table:antichainsAlgorithms} summarizes the relation between our framework and the antichains algorithms given (explicitly or implicitly) by \citet{DBLP:conf/cav/WulfDHR06}.
\begin{table}[!ht]
\centering
\setlength{\tabcolsep}{4pt}
\setlength{\extrarowheight}{1ex}
\begin{tabular}{c?c|c}
& \emph{Backward} & \emph{Forward} \\
\toprule
\(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\) & \(\cpre_u^{\mathcal{N}_2}(F_2^c) \subseteq \cpre_v^{\mathcal{N}_2}(F_2^c)\) & \(\post_u^{\mathcal{N}_2}(I_2) \subseteq \post_v^{\mathcal{N}_2}(I_2)\)\\
\(\lang{\mathcal{N}_1^R} \subseteq \lang{\mathcal{N}_2^R}\) & \(\cpost_u^{\mathcal{N}_2}(I_2^c) \subseteq \cpost_v^{\mathcal{N}_2}(I_2^c)\) & \(Pe_u^{\mathcal{N}_2}(F_2) \subseteq Pe_v^{\mathcal{N}_2}(F_2)\)
\end{tabular}
\caption{Summary of the quasiorders that should be used within our framework, i.e. using Theorem~\ref{theorem:EffectiveAlgorithm}, to derive the different antichains algorithms that are (explicitly or implicitly) given by \citet{DBLP:conf/cav/WulfDHR06}.
Each cell of the form \(f(u) \subseteq f(v)\) is the definition of the quasiorder \(u \leqslant v \ud f(u) \subseteq f(v)\) that should be used to derive the antichains algorithm given by the column for solving the language inclusion given by the row.}\label{table:antichainsAlgorithms}
\end{table}
The original antichains algorithms were later improved by \citet{Abdulla2010} and, subsequently, by \citet{DBLP:conf/popl/BonchiP13}. Among their improvements, they showed how to exploit a precomputed binary relation between pairs of states of the input automata such that language inclusion holds for all the pairs in the relation.
When that binary relation is a simulation relation, our framework allows to partially match their results by using the quasiorder \(Peceq^{r}_{\mathcal{N}}\) defined in Section~\ref{subsec:state-qos}.
However, this quasiorder relation \(Peceq^{r}_{\mathcal{N}}\) does not consider pairs of states \(Q_1 \times Q_1\) whereas the aforementioned algorithms do.
\section{Inclusion for Context Free Languages}
\label{sec:context_free_languages}
In Section~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions} we used the general abstraction scheme presented in Section~\ref{sec:inclusion_checking_by_complete_abstractions} to derive two techniques (Theorems~\ref{theorem:FiniteWordsAlgorithmGeneral} and~\ref{theorem:EffectiveAlgorithm}) for defining algorithms for solving language inclusion problems.
Then, in Sections~\ref{sec:instantiating_the_framework_language_based_well_quasiorders} and~\ref{sec:novel_perspective_AC} we applied these techniques on different scenarios and derived algorithms for solving language inclusion problems \(L_1 \subseteq L_2\) where \(L_1\) and \(L_2\) are regular languages.
In this section, we show that the abstraction scheme from Section~\ref{sec:inclusion_checking_by_complete_abstractions} is general enough to cover language inclusion problems \(L_1 \subseteq L_2\) where \(L_1\) is context-free.
In particular, we replicate the developments from Sections~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions},~\ref{sec:instantiating_the_framework_language_based_well_quasiorders} and~\ref{sec:novel_perspective_AC} in order to extend our quasiorder-based framework for deciding the inclusion \(L_1 \subseteq L_2\) where \(L_1\) is a context-free language and \(L_2\) is regular.
\subsection{Extending the Framework to CFGs}
Similarly to the case of automata, a CFG \(\mathcal{G} = (\mathcal{V},\Sigma,P)\) in CNF induces the following set of equations:
\[\Eqn(\mathcal{G}) \ud \{X_i = {\textstyle \bigcup_{X_i \to \beta_j \in P}} \beta_j \mid i \in [0,n]\} \enspace .\]
Given a subset of variables \(S \subseteq \mathcal{V}\) of a grammar, the set of words generated from some variable in \(S\) is defined as
\[\mindex{W_{S}^{\mathcal{G}}} \ud \{w \in \Sigma^* \mid \exists X \in S, \; X \rightarrow^* w\} \enspace .\]
When \(S = \{X\}\) we slightly abuse the notation and write \(W_{X}^{\mathcal{G}}\).
Also, we drop the superscript \(\mathcal{G}\) when the grammar is clear from the context.
The language generated by \(\mathcal{G}\) is therefore \(\lang{\mathcal{G}} = W^{\mathcal{G}}_{X_0}\).
Next, we define the function \(\Fn_{\mathcal{G}}: \wp(\Sigma^*)^{|\mathcal{V}|}\to \wp(\Sigma^*)^{|\mathcal{V}|}\) and the vector \(\vect{b} \in \wp(\Sigma^*)^{|\mathcal{V}|}\), which are used to formalize the equations in \(\Eqn(\mathcal{G})\), as follows:
\begin{align*}
\vect{b} & \ud\tuple{b_i}_{i\in[0,n]} \in \wp(\Sigma^*)^{|\mathcal{V}|} &&\text{with } b_i \ud \{ \beta \mid X_i\rightarrow \beta\in P,\:\beta\in \Sigma\cup \{ \epsilon \}\}, \\
\Fn_{\mathcal{G} }(\vect{X}) & \ud \tuple{\beta_1^{(i)}\cup\ldots\cup\beta_{k_i}^{(i)}}_{i\in[0,n]} &&\text{with } \beta_j^{(i)}\in\mathcal{V}^2 \text{ and } X_i\rightarrow\beta_j^{(i)}\in P \enspace .
\end{align*}
Notice that function \(\lambda \vect{X}\ldotp \vect{b}\mathrel{\cup} \Fn_{\mathcal{G}}(\vect{X})\) is a well-defined monotone function in \(\wp(\Sigma^*)^{|\mathcal{V}|}\rightarrow \wp(\Sigma^*)^{|\mathcal{V}|}\), which therefore has the least fixpoint
\begin{equation}\label{eq:CFGFixpoint}
\tuple{Y_i}_{i\in[0,n]} = \lfp (\lambda \vect{X}\ldotp \vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))
\end{equation}
It is known \cite{ginsburg} that the language accepted by \(\mathcal{G}\) is such that \(\lang{\mathcal{G}} = Y_{0}\).
\begin{example}\label{example:cfg}
Consider the following grammar in CNF:
\[\mathcal{G} = \tuple{\{X_0, X_1\}, \{a,b\}, \{X_0\rightarrow X_0X_1 \mid X_1X_0 \mid b,\: X_1 \rightarrow a\}}\enspace .\]
The corresponding equation system is
\[\Eqn(\mathcal{G}) = \begin{cases}
X_0 = X_0X_1 \cup X_1X_0 \cup \{b\}\\
X_1 =\{a\}
\end{cases}\]
so that
\begin{equation*}
\left( \begin{array}{c}
W_{X_0} \\ W_{X_1}
\end{array} \right)=
\lfp\biggl(\lambda \left( \begin{array}{c}
X_0 \\ X_1
\end{array} \right) .
\left(\begin{array}{c}
X_0X_1 \cup X_1X_0 \cup \{b\} \\
\{a\}
\end{array}\right)\biggr) = \left( \begin{array}{c}
a^*ba^* \\ a
\end{array} \right) \enspace .
\end{equation*}
\noindent
Moreover, we have that \(\vect{b} \in \wp(\Sigma^*)^2\) and \(\Fn_{\mathcal{G} }:\wp(\Sigma^*)^2 \rightarrow \wp(\Sigma^*)^2\) are given by
\begin{align*}
\vect{b} & =\tuple{\{b\},\{a\}} & \Fn_{\mathcal{G} }(\tuple{X_0,X_1}) &=\tuple{X_0X_1 \cup X_1X_0, \varnothing} \tag*{
{\ensuremath{\Diamond}}}
\end{align*}
\end{example}
Thus, it follows from Equation~\eqref{eq:CFGFixpoint} that
\begin{equation}\label{eq:CFGIncLfp}
\lang{\mathcal{G}} \subseteq L_2 \:\Leftrightarrow\:
\lfp (\lambda\vect{X}\ldotp \vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})) \subseteq \vectarg{L_2}{X_0}
\end{equation}
where \(\vectarg{L_2}{X_0} \ud \tuple{\nullable{i=0}{L_2}{\Sigma^*}}_{i\in[0,n]}\).
As we did for the automata case in Section~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions}, we next apply Theorem~\ref{theorem:inc-check-comp-abs} in order to derive algorithms for solving the language inclusion problem \(\lang{\mathcal{G}} \subseteq L_2\) by using backward complete abstractions of \(\wp(Σ^*)\).
\begin{theorem}\label{theorem:rhoCFG}
Let \(\rho \!\in\! \uco(\wp(\Sigma^*))\) be backward complete for both \(\lambda X. Xa\) and \(\lambda X. aX\), for all \(a \!\in\! \Sigma\) and let \(\mathcal{G}=(\mathcal{V},\Sigma,P)\) be a CFG in CNF.
Then \(\rho\) is backward complete for \(\Fn_{\mathcal{G}}\) and \(\lambda\vect{X}\ldotp \vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})\).
\end{theorem}
\begin{proof}
Let us first show that backward completeness for left and right concatenation can be extended from letter to words.
We give the proof for the concatenation to the left, the case of the concatenation to the right is symmetric.
We prove that \(\rho(w X) = \rho(w \rho(X))\) for every \(w\in\Sigma^*\).
We proceed by induction on the length of \(w\).
The base case is trivial because \(\rho\) is idempotent.
For the inductive case \(|w| > 0\) let \(w = a u\) for some
\(u\in\Sigma^*\) and \(a\in \Sigma\), so that:
\begin{align*}
\rho(a u X) &= \quad\text{[By backward completeness for \(\lambda X\ldotp a X\)]}\\
\rho(a \rho(u X)) &= \quad\text{[By inductive hypothesis]}\\
\rho(a \rho(u \rho(X))) &= \quad\text{[By backward completeness for \(\lambda X\ldotp a X\)]}\\
\rho(a u \rho(X)) &\enspace .
\end{align*}
Next we turn to the binary concatenation case, i.e. we prove that \(\rho(Y Z) = \rho(\rho(Y)\rho(Z))\) for all \(Y, Z \in \wp(\Sigma^*)\):
\begin{align*}
\rho(\rho(Y)\rho(Z)) &=\quad \text{[By definition of concatenation]}\\
\rho(\textstyle{\bigcup_{u\in\rho(Y)}} u \rho(Z)) &=\quad \text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\textstyle{\bigcup_{u\in\rho(Y)}} \rho(u \rho(Z)) ) &=\quad \text{[By backward completeness of \(\lambda X\ldotp w X\)]}\\
\rho(\textstyle{\bigcup_{u\in\rho(Y)}} \rho(u Z))
&=\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\textstyle{\bigcup_{u\in\rho(Y)}} u Z) &=\quad\text{[By definition of concatenation]}\\
\rho(\rho(Y) Z) &=\quad\text{[By definition of concatenation]}\\
\rho(\textstyle{\bigcup_{v\in Z}} \rho(Y) v)&=\quad
\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\textstyle{\bigcup_{v\in Z}} \rho(\rho(Y) v))&=\quad
\text{[By backward completeness of \(\lambda X\ldotp X w\)]}\\
\rho(\textstyle{\bigcup_{v\in Z}} \rho(Y v))&=\quad
\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\textstyle{\bigcup_{v\in Z}} Y v)&=\quad\text{[By definition of concatenation]}\\
\rho(Y Z) & \enspace .
\end{align*}
Then, the proof follows the same lines of the proof of Theorem~\ref{theorem:backComplete}.
Indeed, it follows from the definition of \(\Fn_{\mathcal{G}}(\tuple{X_i}_{i\in[0,n]})\)
that: \begin{align*}
\rho({\textstyle\bigcup_{j=1}^{k_i}}\beta^{(i)}_j) &=\quad \text{[By definition of \(\beta^{(i)}_j\)]}\\
\rho({\textstyle\bigcup_{j=1}^{k_i}}X^{(i)}_j Y^{(i)}_j) &=\quad
\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho({\textstyle\bigcup_{j=1}^{k_i}}\rho(X^{(i)}_j Y^{(i)}_j)) &=\quad
\text{[By backward comp. of \(\rho\) for concatenation]}\\
\rho({\textstyle\bigcup_{j=1}^{k_i}}\rho( \rho(X^{(i)}_j) \rho(Y^{(i)}_j))) &=\quad
\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho({\textstyle\bigcup_{j=1}^{k_i}}\rho(X^{(i)}_j) \rho(Y^{(i)}_j)) & \enspace .
\end{align*}
\noindent
Hence, by a straightforward
componentwise application on vectors in \(\wp(\Sigma^*)^{|\mathcal{V}|}\), we obtain that \(\rho\) is backward complete for \(\Fn_\mathcal{G}\).
Finally, \(\rho\) is backward complete for
\(\lambda\vect{X}\ldotp (\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))\),
because:
\begin{myAlignEP}
\rho(\vect{b}\cup \Fn_{\mathcal{G}}(\rho(\vect{X}))) &=
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\rho(\vect{b})\cup\rho(\Fn_{\mathcal{G}}(\rho (\vect{X})))) &=
\quad\text{[By backward comp. for \(\Fn_{\mathcal{G}}\)]}\\
\rho(\rho(\vect{b})\cup\rho(\Fn_{\mathcal{G}}(\vect{X}))) &=
\quad\text{[By Equation~\eqref{equation:lubAndGlb}]}\\
\rho(\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})) &\enspace .
\end{myAlignEP}
\end{proof}
As a consequence, by backward completeness of $\rho$ for \(\lambda\vect{X}\ldotp (\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))\), by \eqref{eqn:lfpcompleteness}
it turns out that:
\[\rho (\lfp(\lambda\vect{X}\ldotp \vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))) =
\lfp(\lambda\vect{X}\ldotp \rho(\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))) \enspace.\]
Note that if \(\rho\) is backward complete for both left and right concatenation and \(\rho(L_2)=L_2\) then, as a straightforward consequence of Equivalence~\eqref{eq:CFGIncLfp} and Theorems~\ref{theorem:inc-check-comp-abs} and~\ref{theorem:rhoCFG}, we have that:
\begin{equation}\label{equation:CFGcheck}
\lang{\mathcal{G}}\subseteq L_2 \Leftrightarrow \lfp(\lambda\vect{X}\ldotp \rho(\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))) \subseteq \vectarg{L_2}{X_0} \enspace .
\end{equation}
Next, we present two techniques for solving the language inclusion problem \(\lang{\mathcal{G}} \subseteq L_2\) by relying on Equivalence~\eqref{equation:CFGcheck}.
As with the two techniques presented in Section~\ref{sec:SolvingAbstractInclusionCheck}, the first of these techniques allows us to define algorithms for deciding the inclusion by working on finite languages while the second one relies on the use of Galois Connections.
\subsection{Solving the Abstract Inclusion Check using Finite Languages}
The following result, which is an adaptation of Corollary~\ref{corol:FiniteWordsAlgorithm} for grammars, shows that the fixpoint iteration for \(\lfp(\rho (\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})))\) can be replicated by iterating on a set of functions \(\mathcal{F}\), and then abstracting the result, provided that all functions in \(\mathcal{F}\) meet a set of requirements.
\begin{lemma}\label{lemma:FiniteWordsAlgorithmCFG}
Let \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) be a CFG in CNF, let \(ρ \in \uco(Σ^*)\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) and \(\lambda X\in \wp(\Sigma^*)\ldotp Xa\) for all \(a\in \Sigma\) and let \(\mathcal{F}\) be a set of functions such that every \(f \in \mathcal{F}\) is of the form \(f: \wp(Σ^*)^{|\mathcal{V}|} \to \wp(Σ^*)^{|\mathcal{V}|}\) and satisfies
\(\rho (\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})) = ρ(f(\vect{X}))\).
Then, for all \(0 \leq n\),
\[(ρ(\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X}))^n = ρ(\mathcal{F}^n(\vect{X})) \enspace .\]
\end{lemma}
\begin{proof}
We proceed by induction on \(n\).
\begin{myItem}
\item \emph{Base case:} Let \(n = 0\).
Then \(\mathcal{F}^0(\vect{X}) = (ρ(\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))^0 = \vect{\varnothing}\).
\item \emph{Inductive step:} Assume that \(ρ(\mathcal{F}^n(\vect{X})) = (ρ(\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))^n\) holds for some value \(n \geq 0\).
To simplify the notation, let \(\mathcal{P}(\vect{X}) = \vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X})\) so that \(ρ\mathcal{F}^n = (ρ\mathcal{P})^n\).
Then
\begin{align*}
ρ\mathcal{F}^{n{+}1}(\vect{X}) & = \quad \text{[Since \(\mathcal{F}^{n{+}1} = \mathcal{F}^n\mathcal{F}\)]} \\
ρ\mathcal{F}^n\mathcal{F}(\vect{X}) & = \quad \text{[By Inductive Hypothesis]} \\
(ρ\mathcal{P})^n\mathcal{F}(\vect{X}) & = \quad \text{[By Theorem~\ref{theorem:rhoCFG}, \(ρ\) is bw. complete for \(\mathcal{P}\)]}\\
(ρ\mathcal{P})^nρ\mathcal{F}(\vect{X}) & = \quad \text{[By Inductive Hypothesis]} \\
(ρ\mathcal{P})^nρ\mathcal{P}(\vect{X}) & = \quad \text{[By definition of \((ρ(\mathcal{P}))^n\)]} \\
(ρ\mathcal{P})^{n{+}1}(\vect{X})
\end{align*}
\end{myItem}
We conclude that \((ρ(\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))^n = ρ(\mathcal{F}^n(\vect{X}))\) for all \(0 \leq n\).
\end{proof}
We are now in position to show that the procedure \(\KleeneQO(\abseq,\mathcal{F},b)\) can be used to compute \(\lfp(\lambda\vect{X}\ldotp \rho(\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})))\).
\begin{lemma}\label{lemma:KleeneQOLfp:CFG}
Let \(ρ \in \uco(Σ^*)\) be backward complete for \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) and \(\lambda X\in \wp(\Sigma^*)\ldotp Xa\) for all \(a\in \Sigma\) such that \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO and let \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) be a CFG in CNF.
Let \(\mathcal{F}\) be a set of functions such that every \(f \in \mathcal{F}\) is of the form \(f: \wp(Σ^*)^{|\mathcal{V}|} \to \wp(Σ^*)^{|\mathcal{V}|}\) and satisfies
\(\rho (\vect{b}\cup \Fn_{\mathcal{G}}(\vect{X})) = ρ(f(\vect{X}))\).
Then,
\[\lfp(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right) \enspace .\]
Moreover, the iterates of \(\,\Kleene(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X})),\vect{\varnothing})\) coincide in lockstep with the abstraction of the iterates of \(\,\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\)
\end{lemma}
\begin{proof}
Since \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO, by Theorem~\ref{theorem:Kleene}, we have that
\[\lfp(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))) = \Kleene(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X})), \vect{\varnothing})\]
On the other hand, by Lemma~\ref{lemma:FiniteWordsAlgorithmCFG}, the iterates of the above Kleene iteration coincide in lockstep with the abstraction of the iterates of \(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\) and, therefore,
\[\Kleene(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))), \vect{\varnothing}) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right)\]
As a consequence,
\[\lfp(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right) \enspace .\]
\end{proof}
We are now in position to introduce the equivalent of Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral} for grammars.
\begin{theorem}\label{theorem:FiniteWordsAlgorithmGeneral:CFG}
Let \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) be a CFG in CNF, let \(L_2\) be a regular language, let \(ρ \in \uco(Σ^*)\) and let \(\mathcal{F}\) be a set of functions.
Assume that the following properties hold:
\begin{myEnumI}
\item The abstraction \(ρ\) satisfies \(ρ(L_2) = L_2\) and it is backward complete for both \(\lambda X\in \wp(\Sigma^*)\ldotp aX\) and \(\lambda X\in \wp(\Sigma^*)\ldotp Xa\) for all \(a\in \Sigma\).\label{theorem:FiniteWordsAlgorithmGeneral:CFG:rho}
\item The set \(\tuple{\{ρ(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO.\label{theorem:FiniteWordsAlgorithmGeneral:CFG:ACC}
\item Every function \(f_i\) in the set \(\mathcal{F}\) is of the form \(f_i: \wp(Σ^*)^{|\mathcal{V}|} \to \wp(Σ^*)^{|\mathcal{V}|}\), it is computable and satisfies \(\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X})) = ρ(f_i(\vect{X}))\).\label{theorem:FiniteWordsAlgorithmGeneral:CFG:F}
\item There is an algorithm, say \(\abseq^{\sharp}(\vect{X}, \vect{Y})\), which decides the abstraction equivalence \(ρ(\vect{X}) = ρ(\vect{Y})\), for all \(\vect{X}, \vect{Y} \in \wp(Σ^*)^{|\mathcal{V}|}\).\label{theorem:FiniteWordsAlgorithmGeneral:CFG:EQ}
\item There is an algorithm, say \(\absincl(\vect{X})\), which decides the inclusion \(ρ(\vect{X}) \subseteq \vectarg{L_2}{X_0}\), for all \(\vect{X} \in \wp(Σ^*)^{|\mathcal{V}|}\).\label{theorem:FiniteWordsAlgorithmGeneral:CFG:INC}
\end{myEnumI}
Then, the following is an algorithm which decides whether \(\lang{\mathcal{G}} \subseteq L_2\):
\(\tuple{Y_i}_{i \in [0,n]} := \KleeneQO (\abseq^{\sharp},\mathcal{F}, \vect{\varnothing})\)\emph{;}
\emph{\textbf{return}} \(\absincl(\tuple{Y_i}_{i \in [0,n]})\)\emph{;}
\end{theorem}
\begin{proof}
It follows from hypotheses~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:rho},~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:ACC} and~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:F}, by Lemma~\ref{lemma:KleeneQOLfp:CFG}, that
\begin{equation}\label{eq:lfpKleeneQO:CFG}
\lfp(\lambda \vect{X}\ldotp\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X}))) = ρ\left(\KleeneQO(\abseq,\mathcal{F},\vect{\varnothing})\right)
\end{equation}
Observe that function \(\abseq\) can be replaced by function \(\abseq^{\sharp}\) due to hypothesis~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:EQ}.
Moreover, it follows from Equivalence~\eqref{equation:CFGcheck}, which holds by hypothesis~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:rho}, and Equation~\eqref{eq:lfpKleeneQO:CFG} that
\[\lang{\mathcal{G}}\subseteq L_2 \Leftrightarrow ρ\left(\KleeneQO (\abseq^{\sharp}, \mathcal{F}, \vect{\varnothing})\right) \subseteq \vectarg{L_2}{X_0}\enspace .\]
Finally, hypotheses~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:EQ} and~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:INC} guarantee, respectively, the decidability of the inclusion check \(ρ\mathcal{F}(X) \subseteq ρ(X)\) performed at each step of the \(\KleeneQO\) iteration and the decidability of the inclusion of the lfp in \(\vectarg{L_2}{X_0}\).
\end{proof}
\subsection{Solving the Abstract Inclusion Check using Galois Connections}
The following result is the equivalent of Theorem~\ref{theorem:EffectiveAlgorithm} for context-free languages.
It shows that the language inclusion problem \(\lang{\mathcal{G}} \subseteq L_2\) can be solved by working on an abstract domain.
\begin{theorem}\label{theorem:EffectiveAlgorithmCFG}
Let \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) be a CFG in CNF and let \(L_2\) be a language over \(\Sigma\).
Let \(\tuple{\wp(\Sigma^*),\subseteq} \galois{\alpha}{\gamma}\tuple{D,\sqsubseteq}\) be a GC where \( \tuple{D,\leq_D}\) is a poset.
Assume that the following properties hold:
\begin{myEnumI}
\item \(L_2\in\gamma(D)\) and for every \( a \in \Sigma\), \(X \in \wp(\Sigma^*)\), \(\gamma\alpha(a X) = \gamma\alpha(a \gamma\alpha(X))\) and \(\gamma\alpha(Xa) = \gamma\alpha\gamma(\alpha(X)a)\).\label{theorem:EffectiveAlgorithmCFG:prop:rho}
\item \((D,\leq_D,\sqcup,\bot_D)\) is an effective domain, meaning that: \((D,\leq_D,\sqcup,\bot_D)\) is an ACC join-semilattice with bottom $\bot_D$,
every element of \(D\) has a finite representation, the binary relation
\(\leq_D\) is decidable and the binary lub \(\sqcup\) is computable.\label{theorem:EffectiveAlgorithmCFG:prop:absdecidable}
\item There is an algorithm, say \(\Fn^{\sharp}(\vect{X}^\sharp)\), which computes \(\alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X})))\),
for all \(\vect{X}^\sharp\in \alpha(\wp(\Sigma^*))^{|\mathcal{V}|}\).
\label{theorem:EffectiveAlgorithmCFG:prop:abspre}
\item There is an algorithm, say \(\base^\sharp\), which computes \(\alpha(\vect{b})\).\label{theorem:EffectiveAlgorithmCFG:prop:abseps}
\item There is an algorithm, say \(\absincl(\vect{X}^\sharp)\), which decides the abstract inclusion \(\vect{X}^\sharp \leq_D \alpha(\vectarg{L_2}{X_0})\), for all \(\vect{X}^\sharp\in \alpha(\wp(\Sigma^*)^{|\mathcal{V}|})\).
\label{theorem:EffectiveAlgorithmCFG:prop:absincl}
\end{myEnumI}
Then, the following is an algorithm which decides whether \(\lang{\mathcal{G}} \subseteq L_2\):
\(\tuple{Y_i^\sharp}_{i \in [0,n]} := \Kleene (\lambda \vect{X}^\sharp\ldotp\base^\sharp \sqcup \Fn^{\sharp}(\vect{X}^\sharp), \vect{\bot_D})\)\emph{;}
\emph{\textbf{return}} \(\absincl(\tuple{Y_i^\sharp}_{i \in [0,n]})\)\emph{;}
\end{theorem}
\begin{proof}
Let \(\rho = \gamma\alpha\in \uco(\wp(\Sigma^*))\).
Then, it follows from property~\ref{theorem:EffectiveAlgorithmCFG:prop:rho} that \(L_2 \in \rho\), \(\rho(aX) = \rho(a\rho(X))\) and \(\rho(Xa) = \rho(\rho(X)a)\).
Therefore
\begin{align*}
\lang{\mathcal{N}}\subseteq L_2 &\Leftrightarrow
\quad\text{[By~\eqref{equation:CFGcheck}]}\\
\lfp(\lambda \vect{X}\ldotp\rho (\vect{b} \cup \Fn_{\mathcal{G}}(\vect{X}))) \subseteq \vectarg{L_2}{X_0} &\Leftrightarrow
\quad\text{[By Lemma~\ref{lemma:alpharhoequality}]}\\
\gamma(\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vect{b}) \sqcup \alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}^\sharp))))) \subseteq \vectarg{L_2}{X_0} &\Leftrightarrow
\quad\text{[By GC and since $L_2\in \rho$]}\\
\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vect{b}) \sqcup \alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}^\sharp)))) \leq_D \alpha(\vectarg{L_2}{X_0}) \enspace .
\end{align*}
\noindent
By hypotheses~\ref{theorem:EffectiveAlgorithmCFG:prop:absdecidable},~\ref{theorem:EffectiveAlgorithmCFG:prop:abspre} and~\ref{theorem:EffectiveAlgorithmCFG:prop:abseps} it turns out that \(\Kleene (\lambda \vect{X}^\sharp\ldotp\base^\sharp \sqcup \Fn^{\sharp}(\vect{X}^\sharp), \vect{\bot_D})\) is an algorithm computing \(\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vect{b}) \sqcup \alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}^\sharp))))\).
In particular, these hypotheses ensure that the Kleene iterates of \(\lfp (\lambda \vect{X}^\sharp\ldotp\alpha(\vect{b}) \sqcup \alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}^\sharp))))\) starting from \(\vect{\bot_D}\) are computable, finitely many and that it is decidable whether the iterates have reached the fixpoint.
The hypothesis~\ref{theorem:EffectiveAlgorithmCFG:prop:absincl} ensures decidability of the required \(\leq_D\)-inclusion check of this least fixpoint in \(\alpha(\wp(\Sigma^*))^{|\mathcal{V}|}\).
\end{proof}
\subsection{Instantiating the Framework}
Let us instantiate the general algorithmic framework provided by Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG} to the class of closure operators induced by quasiorder relations on words.
Recall that a quasiorder \(\leqslant\) on \(\Sigma^*\) is monotone if
\begin{equation}\label{def-mon}
\forall x_1, x_2 \in \Sigma^*, \forall a,b \in \Sigma, \; x_1 \leqslant x_2 \Rightarrow ax_1 b \leqslant ax_2b \enspace .
\end{equation}
\noindent
It follows that \(x_1 \leqslant x_2 \Rightarrow \forall u,v \in \Sigma^*, \; ux_1 v \leqslant ux_2 v\).
The following result is the equivalent to Lemma~\ref{lemma:properties} for \(L\)-consistent quasiorders and it allows us to characterize \(L\)-consistent quasiorders in terms of the induced closure.
\begin{lemma}\label{lemma:propertiesCFG}
Let \(L\in \wp(\Sigma^*)\) and \(\mathord{\leqslant_L}\) be a quasiorder on \(\Sigma^*\).
Then, \(\mathord{\leqslant_L}\) is an \(L\)-consistent quasiorder on \(\Sigma^*\) if and only if
\begin{myEnumA}
\item \(\rho_{\leqslant_L}(L) = L\), and \label{lemma:propertiesCFG:L}
\item \(\rho_{\leqslant_L}\) is backward complete for \(\lambda X\ldotp a X b\) for all \(a,b\in \Sigma\).\label{lemma:propertiesCFG:bw}
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item It follows from Lemma~\ref{lemma:properties}~\ref{lemma:properties:L} since, by Definition~\ref{def:LConsistent}, a quasiorder is \(L\)-consistent if{}f it is left and right \(L\)-consistent.
\item We first prove that if \(\mathord{\leqslant}_L\) is monotone.
Then for all $X\in \wp(\Sigma^*)$ we have that
\(\rho_{\leqslant_L}(a X b) = \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X) b)\) for all \(a, b\in\Sigma\).
Monotonicity of concatenation together with monotonicity and extensivity of the closure $\rho_{\leqslant_L}$ imply that \(\rho_{\leqslant_L}(a X b) \subseteq \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X) b)\) holds.
For the reverse inclusion, we have that:
\begin{align*}
\rho_{\leqslant_L}(a \rho_{\leqslant_L}(X)b) &= \; \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\rho_{\leqslant_L}\left( \{ a y b\mid \exists x\in X, x \leqslant_L y \} \right)
&= \; \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\{ z \mid \exists x\in X, y\in \Sigma^*,\, x\leqslant_L y \land a y b\leqslant_L z \}
&\subseteq \; \text{[By monotonicity of \(\leqslant_L\)]}\\
\{ z \mid \exists x\in X, y\in \Sigma^*,\, axb\leqslant_L ayb \land a y b\leqslant_L z \}
&= \; \text{[By transitivity of \(\leqslant_L\)]}\\
\{ z \mid \exists x\in X , a xb\leqslant_L z\}
&= \; \text{[By definition of \(\rho_{\leqslant_L}\)]}\\
\rho_{\leqslant_L}(a X b) &\enspace .
\end{align*}
Next, we show that if \(\rho_{\leqslant_L}(a X b) = \rho_{\leqslant_L}(a \rho_{\leqslant_L}(X) b)\) for all $X\in \wp(\Sigma^*)$ and \(a,b\in\Sigma\) then \(\leqslant_L\) is monotone.
Let $x_1,x_2\in \Sigma^*$, $a,b\in \Sigma$.
If $x_1 \leqslant_L x_2$ then
$\{x_2\} \subseteq \rho_{\leqslant_L}(\{x_1 \})$, and in turn
$a\{x_2\}b \subseteq a\rho_{\leqslant_L}(\{x_1 \})b$.
Since $\rho_{\leqslant_L}$ is monotone, we have that
$\rho_{\leqslant_L}(a\{x_2\}b) \subseteq \rho_{\leqslant_L}(a\rho_{\leqslant_L}(\{x_1 \})b)$, so that, by backward completeness,
$\rho_{\leqslant_L}(a\{x_2\}b) \subseteq \rho_{\leqslant_L}(a\{x_1 \}b)$.
It follows that, $a\{x_2\}b \subseteq \rho_{\leqslant_L}(a\{x_1\}b)$, namely,
$ax_1b \leqslant_L ax_2b$. By Equation~\eqref{def-mon}, this shows that $\leqslant_L$ is monotone.
\end{myEnumA}
\end{proof}
Analogously to the case of regular languages presented in Section~\ref{sec:instantiating_the_framework_language_based_well_quasiorders}, Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG} induces an algorithm for deciding the language inclusion \(\lang{\mathcal{G}} \subseteq L_2\) for any CFG \(\mathcal{G}\) and regular language \(L_2\).
Indeed, we can apply Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG} with \(\minor{\vect{b} \! \cup \Fn_{\mathcal{G}}(\vect{X})}\) interpreted as the set of functions \(f_i \ud \minor{\vect{b} \! \cup \Fn_{\mathcal{G}}(\vect{X})}_i\) where, again, each \(\minor{\cdot}_i\) is a function mapping each set \(X \in \wp(Σ^*)\) into a minor \(\minor{X}_i\).
As a consequence, we obtain Algorithm~\AlgGrammarW which, given a language \(L_2\) whose membership problem is decidable and a decidable \(L_2\)-consistent well-quasiorder, determines whether \(\lang{\mathcal{G}} \subseteq L_2\) holds.
\begin{figure}
\caption{Word-based algorithm for \(\lang{\mathcal{G}
\label{alg:CFGIncW}
\end{figure}
\begin{theorem}\label{theorem:quasiorderAlgorithmGr}
Let \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) be a CFG in CNF and let \(L_2\in \wp(\Sigma^*)\) be a language such that:
\begin{myEnumIL}
\item membership $u\in L_2$ is decidable; \label{theorem:quasiorderAlgorithmGr:membership}
\item there exists a decidable \(L_2\)-consistent well-quasiorder on $\Sigma^*$.\label{theorem:quasiorderAlgorithmGr:decidableL}
\end{myEnumIL}
Then, Algorithm \AlgGrammarW decides the inclusion \(\lang{\mathcal{G}} \subseteq L_2\).
\end{theorem}
\begin{proof}
Let $\leqslant_{L_2}$ be a decidable $L_2$-consistent well-quasiorder on $\Sigma^*$.
Then, we check that hypotheses~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:rho}-\ref{theorem:FiniteWordsAlgorithmGeneral:CFG:INC} of Theorem~\ref{theorem:FiniteWordsAlgorithmGeneral:CFG} are satisfied.
\begin{myEnumA}
\item It follows from hypothesis~\ref{theorem:quasiorderAlgorithmGr:decidableL} and Lemma~\ref{lemma:propertiesCFG} that \(\leqslant_{L_2}\) is backward complete for left and right concatenation and satisfies \(ρ_{\leqslant_{L_2}}(L_2) = L_2\).
\item Since \(\leqslant_{L_2}\) is a well-quasiorder, it follows that \(\tuple{\{ρ_{\leqslant_{L_2}}(S) \mid S \in \wp(Σ^*)\}, \subseteq}\) is an ACC CPO.
\item Let \(\lfloor\vect{b} \cup \Fn_{\mathcal{G}}(\vect{X})\rfloor\) be the set of functions \(f_i\) each of which maps each set \(X \in \wp(Σ^*)\) into a minor of \(\vect{b} \cup \Fn_{\mathcal{G}}(\vect{X})\).
Since \(\rho_{\leqslant_{L_2}}(X) = ρ_{\leqslant_{L_2}}(\minor{X})\) for all \(X \in \wp(Σ^*)^{|\mathcal{V}|}\) then all functions \(f_i\) satisfy
\[\rho (\vect{b} \!\cup \Fn_{\mathcal{G}}(\vect{X})) = ρ(f_i(\vect{X}))\enspace . \]
\item The equality \(ρ_{\leqslant_{L_2}}(S_1) = ρ_{\leqslant_{L_2}}(S_2)\) is decidable for every \(S_1, S_2 \in \wp(Σ^*)^{|\mathcal{V}|}\) since \(ρ_{\leqslant_{L_2}}(S_1) = ρ_{\leqslant_{L_2}}(S_2) \Leftrightarrow S_1 \sqsubseteq_{\leqslant_{L_2}} S_2 \land S_2 \sqsubseteq_{\leqslant_{L_2}} S_1\) and \(\leqslant_{L_2}\) is decidable.
\item Since \(\vectarg{L_2}{X_0} = \tuple{\nullable{i = 0}{L_2}{\Sigma^*}}_{i \in [0,n]})\), the inclusion trivially holds for all components \(Y_i\) with \(i \neq 0\).
Therefore, it suffices to check whether \(Y_0 \subseteq L_2\) holds.
Since \(Y_0 = \minor{S}\) for some set \(S \in \wp(Σ^*)\), the inclusion \(Y_0 \subseteq L_2\) can be decided by performing finitely many membership tests, which is exactly the check performed by lines 2-4 of Algorithm~\AlgGrammarW.
By hypothesis~\ref{theorem:quasiorderAlgorithmGr:membership}, this check is decidable.
\end{myEnumA}
\end{proof}
\subsubsection{Myhill and State-based Quasiorders}
In the following, we will consider two quasiorders on $\Sigma^*$ and we will show that they fulfill the requirements of Theorem~\ref{theorem:quasiorderAlgorithmGr}, so that they yield algorithms for deciding the inclusion \(\lang{\mathcal{G}} \subseteq L_2\) for every CFG \(\mathcal{G}\) and regular language \(L_2\).
The \emph{context} of a word \(w\in \Sigma^*\) w.r.t a given language \(L \in \wp(\Sigma^*)\) is defined as:
\begin{align*}
\mindex{\ctx_L}(w) &\ud \{(u, v) \in \Sigma^* \times \Sigma^* \mid uwv\in L\}\enspace .
\end{align*}
Correspondingly, let us define the following quasiorder relation on \(\Sigma^*\):
\begin{align}\label{def-Myhillqo}
u\mindex{\leqslant_L} v &\udiff\; \ctx_L(u) \subseteq \ctx_L(v) \enspace .
\end{align}
\citet[Section 2]{deLuca1994} call \(\leqslant_L\) the \emph{Myhill quasiorder relative to \(L\)}.
The following result is the analogue
of Lemma~\ref{lemma:leftrightnerodegoodqo} for $L$-consistent and Myhill's quasiorders:
it shows that the Myhill's quasiorder is the weakest (i.e. greatest w.r.t.\ set inclusion between binary relations) \(L\)-consistent quasiorder for which Algorithm \AlgGrammarW can be instantiated to decide the inclusion \(\lang{\mathcal{G}}\subseteq L\).
\begin{lemma}\label{lemma:myhillgoodqo}
Let $L\in \wp(\Sigma^*)$.
\begin{myEnumA}
\item \(\mathord{\leqslant_L}\) is an \(L\)-consistent quasiorder.
If $L$ is regular then, additionally, \(\mathord{\leqslant_L}\) is a decidable well-quasiorder. \label{lemma:myhillgoodqo:Consistent}
\item If \(\mathord{\leqslant}\) is an \(L\)-consistent quasiorder on $\Sigma^*$ then \( \rho_{\leqslant_L} \subseteq \rho_{\leqslant} \).\label{lemma:myhillgoodqo:Incl}
\end{myEnumA}
\end{lemma}
\begin{proof}
The proof follows the same lines of the proof of Lemma~\ref{lemma:leftrightnerodegoodqo}.
\begin{myEnumA}
\item
\citet[Section 3]{deLuca1994} observe that \(\mathord{\leqslant_L}\) is monotone.
Moreover, if
$L$ is regular then \(\mathord{\leqslant_L}\) is a wqo \citep[Proposition~2.3]{deLuca1994}.
Let us observe that given \(u \in L\) and \(v \notin L\) we have that \((\epsilon, \epsilon) \in \ctx_L(u)\) while \((\epsilon, \epsilon) \notin \ctx_L(v)\).
Hence, \(\mathord{\leqslant_L} \cap (L \times L^c) = \varnothing\) and, therefore, \(\mathord{\leqslant_L}\) is an \(L\)-consistent quasiorder.
Finally, if $L$ is regular then \(\leqslant_L\) is clearly decidable.
\item
As shown by \citet{deLuca1994}, \(\mathord{\leqslant_L}\) is maximum in the set of all \(L\)-consistent quasiorders, i.e.\ every \(L\)-consistent quasiorder \(\leqslant\) is such that
\(x \leqslant y \Rightarrow x \leqslant_L y \).
As a consequence, \(\rho_{\leqslant}(X) \subseteq \rho_{\leqslant_L}(X)\) holds for all \(X\in \wp(\Sigma^*)\), namely \(\mathord{\leqslant} \subseteq \mathord{\leqslant_L}\).
\end{myEnumA}
\end{proof}
\begin{figure}
\caption{A finite automaton \(\mathcal{N}
\label{fig:C}
\end{figure}
\begin{example}\label{example:CFGIncL}
Let us illustrate the use of the Myhill quasiorder $\leqslant_{\lang{\mathcal{N}}}$ in Algorithm \AlgGrammarW
for solving the language inclusion \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\), where \(\mathcal{G}\) is the CFG in Example~\ref{example:cfg} and \(\mathcal{N}\) is the NFA depicted in Figure~\ref{fig:C}.
Recall that the equations for \(\mathcal{G}\) are:
\[\Eqn(\mathcal{G}) = \begin{cases}
X_0 = X_0X_1 \cup X_1X_0 \cup \{b\}\\
X_1 =\{a\}
\end{cases} \enspace .\]
\noindent
We write \(\{(S,T)\} \cup \{(X,Y)\}\) to denote the set \(\{(u,v) \mid (u,v) \in S\times T \cup X \times Y\}\).
Then, we have the following contexts (among others) for $L=\lang{\mathcal{N}}=(b+ab^*a)(a+b)^*$:
\begin{align*}
\ctx_L(\epsilon) & = \{(\epsilon, L)\} \cup \{(ab^*, b^*a\Sigma^*)\} \cup \{(L, \Sigma^*)\}&
\ctx_L(a) & = \{(\epsilon, b^*a\Sigma^*)\} \cup \{ab^*, \Sigma^*\} \cup \{(L,\Sigma^*)\} \\
\ctx_L(b) &= \{(\epsilon, \Sigma^*)\} \cup \{(ab^*, b^*a\Sigma^*)\} \cup \{(L, \Sigma^*)\}&
\ctx_L(ba) & = \{(\epsilon, \Sigma^*)\} \cup \{(ab^*, \Sigma^*)\} \cup \{(L, \Sigma^*)\}
\end{align*}
Moreover, \(\ctx_L(ab) = \ctx_L(a)\) and \(\ctx_L(ba) = \ctx_L(aa) = \ctx_L(aaa) = \ctx_L(aab) = \ctx_L(aba)\) and, since \(a \leqslant_{L} ba\) and \(\varepsilon \leqslant_L b\), it follows that \(\minor{\Sigma^*} = \{\epsilon, a\}\).
Recall that, as shown in Example~\ref{example:cfg}, $\vect{b}=\tuple{\{b\}, \{a\}}$ and $\Fn_{\mathcal{G} }(\tuple{X_0,X_1}) =\tuple{X_0X_1 \cup X_1X_0, \varnothing}$.
Next, we show the computation of the Kleene iterates according to Algorithm \AlgGrammarW when using the quasiorder \(\mathord{\leqslant_L}\).
\begin{align*}
\vect{Y}^{(0)} &= \vect{\varnothing}\\
\vect{Y}^{(1)} &= \lfloor\vect{b}\rfloor = \tuple{\{b\}, \{a\}} \\
\vect{Y}^{(2)} &= \lfloor\vect{b}\rfloor \sqcup \lfloor\Fn_{\mathcal{G}}(\vect{Y}^{(1)})\rfloor
= \tuple{\{b\}, \{a\}} \sqcup \tuple{\minor{\{ba,ab\}},\minor{\varnothing}} = \tuple{\minor{\{ba,ab,b\}}, \minor{\{a\}}} = \tuple{\{ab, b\}, \{a\}}\\
\vect{Y}^{(3)} &= \lfloor\vect{b}\rfloor \sqcup \lfloor\Fn_{\mathcal{G}}(\vect{Y}^{(2)})\rfloor = \tuple{\{b\}, \{a\}} \sqcup \tuple{\minor{\{aba, ba, aab, ab\}},\minor{\varnothing}}\\
&=\tuple{\minor{\{aba, ba, aab, ab, b\}}, \minor{\{a\}}} = \tuple{\{ab, b\}, \{a\}}
\end{align*}
The least fixpoint is therefore \(\vect{Y} = \tuple{\{ab, b\}, \{a\}}\).
Since $ab\in \vect{Y}_0$ but \(ab \notin \lang{\mathcal{N}}\) then Algorithm \AlgGrammarW concludes that the inclusion \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\) does not hold.
{\ensuremath{\Diamond}}
\end{example}
Similarly to Section~\ref{sec:instantiating_the_framework_language_based_well_quasiorders}, we also consider a state-based quasiorder that can be used with Algorithm \AlgGrammarW.
First, given an NFA \(\mathcal{N} = \tuple{Q, \delta, I, F, \Sigma}\) we define the state-based equivalent of the context of a word \(w \in \Sigma^*\) as follows:
\[\ctx_{\mathcal{N}}(w) \ud \{(q,q') \in Q \times Q \mid q \stackrel{w}{\leadsto} q' \} \enspace .\]
Then, the quasiorder \(\leqslant_{\mathcal{N}}\) on $\Sigma^*$ is defined as follows: for all $u,v\in \Sigma^*$,
\begin{equation}\label{eqn:state-qo:CFG}
u \leqslant_{\mathcal{N}} v \udiff \ctx_{\mathcal{N}}(u) \subseteq \ctx_{\mathcal{N}}(v)
\end{equation}
The following result is the analogue of Lemma~\ref{lemma:LAconsistent} and
shows that \(\mathord{\leqslant_{\mathcal{N}}}\) is a \(\lang{\mathcal{N}}\)-consistent well-quasiorder, hence it can be used with Algorithm~\AlgGrammarW to decide the inclusion \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\).
\begin{lemma}\label{lemma:LAconsistent:CFG}
The relation \(\mathord{\leqslant_{\mathcal{N}}}\) is a decidable \(\lang{\mathcal{N}}\)-consistent wqo.
\end{lemma}
\begin{proof}
For every \(u \in \Sigma^*\), \(\ctx_{\mathcal{N}}(u)\) is a finite and computable set, so that \(\mathord{\leqslant_{\mathcal{N}}}\) is a decidable wqo.
Next, we show that \(\mathord{\leqslant_{\mathcal{N}}}\) is \(\lang{\mathcal{N}}\)-consistent according to Definition~\ref{def:LConsistent}~\ref{eq:LConsistentPrecise}-\ref{eq:LConsistentmonotone}.
\begin{myEnumA}
\item By picking \(u\!\in\! \lang{\mathcal{N}}\) and \(v\!\notin\! \lang{\mathcal{N}}\) we have that \(\ctx_{\mathcal{N}}(u)\) contains a pair \((q_i, q_f)\) with \(q_i \!\in\! I\) and \(q_f \in F\) while \(\ctx_{\mathcal{N}}(v)\) does not, hence \(u \not\leqslant_{\mathcal{N}} v\).
Therefore, \(\leqslant_{\mathcal{N}} \cap (\lang{\mathcal{N}} \times \lang{\mathcal{N}}^c) = \varnothing\).
\item Let us check that $\leqslant_{\mathcal{N}}$ is monotone.
To that end, observe that $\ctx_{\mathcal{N}}: \tuple{\Sigma^*,\leqslant_{\mathcal{N}}} \rightarrow \tuple{\wp(Q^2),\subseteq}$ is a monotone function.
Therefore, for all $x_1,x_2\in \Sigma^*$ and $a,b\in \Sigma$ we have that
\begin{align*}
x_1 \leqslant_{\mathcal{N}} x_2 & \Rightarrow \quad\text{[By def.\ of \(\leqslant_{\mathcal{N}}\)]} \\
\ctx_{\mathcal{N}}(x_1) \subseteq \ctx_{\mathcal{N}}(x_2) & \Rightarrow \quad\text{[Since $\ctx_\mathcal{N}$ is monotone]} \\
\ctx_{\mathcal{N}}(ax_1 b) \subseteq \ctx_{\mathcal{N}}(a x_2 b) & \Rightarrow \quad \text{[By def.\ of \(\leqslant_{\mathcal{N}}\)]} \\
ax_1b \leqslant_{\mathcal{N}} ax_2b & \enspace . \tag*{\qedhere}
\end{align*}
\end{myEnumA}
\end{proof}
For the Myhill wqo
$\leqslant_{\lang{\mathcal{N}}}$, it turns out that for all $u,v\in \Sigma^*$,
\begin{align*}
u \leqslant_{\lang{\mathcal{N}}} v \Leftrightarrow \begin{array}{c}
\ctx_{\lang{\mathcal{N}}}(u)\\
\subseteq \\
\ctx_{\lang{\mathcal{N}}}(v) \end{array} \Leftrightarrow \begin{array}{c}
\{(x, y) \mid x \in W_{I,q} \land y \in W_{q', F} \land q \stackrel{u}{\leadsto} q'\} \\
\subseteq\\
\{(x, y) \mid x \in W_{I,q} \land y \in W_{q', F} \land q \stackrel{v}{\leadsto} q'\}
\end{array}
\end{align*}
Therefore, \(u \leqslant_{\mathcal{N}} v \Rightarrow u \leqslant_{\lang{\mathcal{N}}} v\), hence \(\mathord{\leqslant_{\mathcal{N}}} \subseteq \mathord{\leqslant_{\lang{\mathcal{N}}}}\) holds.
\begin{example}\label{example:CFGIncA}
Let us illustrate the use of the state-based quasiorder $\leqslant_{\mathcal{N}}$ to solve the language inclusion \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\) of Example~\ref{example:CFGIncL}.
Here, we have the following contexts (among others):
\begin{align*}
\ctx_{\mathcal{N}}(\epsilon) & = \{(q_1, q_1), (q_2, q_2), (q_3,q_3)\} & \ctx_{\mathcal{N}}(a) & = \{(q_1, q_2), (q_2, q_3), (q_3,q_3)\} \\
\ctx_{\mathcal{N}}(b) &= \{(q_1,q_3), (q_2,q_2), (q_3, q_3)\} & \ctx_{\mathcal{N}}(aa) & = \{(q_1,q_3), (q_2,q_3), (q_3,q_3)\}
\end{align*}
Moreover, \(\ctx_{\mathcal{N}}(ab) = \ctx_{\mathcal{N}}(a)\) and \(\ctx_{\mathcal{N}}(ba) = \ctx_{\mathcal{N}}(aa) = \ctx_{\mathcal{N}}(baa) = \ctx_{\mathcal{N}}(aab) = \ctx_{\mathcal{N}}(aba)\).
Recall from Example~\ref{example:CFGIncL} that for the Myhill wqo we have that \(a \leqslant_{\lang{\mathcal{N}}} ba\), while for the state-based qo \(a \not\leqslant_{\mathcal{N}} ba\).
Next, we show the Kleene iterates computed by Algorithm \AlgGrammarW when using the wqo \(\mathord{\leqslant_{\mathcal{N}}}\).
\begin{align*}
\vect{Y}^{(0)} &= \vect{\varnothing}\\
\vect{Y}^{(1)} &= \minor{\vect{b}} = \tuple{\{b\}, \{a\}} \\
\vect{Y}^{(2)} &= \lfloor\vect{b}\rfloor \sqcup \lfloor\Fn_{\mathcal{G}}(\vect{Y}^{(1)})\rfloor = \tuple{\minor{\{ba,ab,b\}}, \minor{\{a\}}} = \tuple{\{ba, ab, b\}, \{a\}}\\
\vect{Y}^{(3)} &= \lfloor\vect{b}\rfloor \sqcup \lfloor\Fn_{\mathcal{G}}(\vect{Y}^{(2)})\rfloor = \tuple{\minor{\{aba, aab, ab, baa, aba, ba, b\}}, \minor{\{a\}}} = \tuple{\{ba, ab, b\}, \{a\}}
\end{align*}
The least fixpoint is therefore \(\vect{Y} = \tuple{\{ba, ab, b\}, \{a\}}\).
Since $ab\in \vect{Y}_0$ but \(ab \notin \lang{\mathcal{N}}\), Algorithm \AlgGrammarW concludes that the inclusion \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\) does not hold.
{\ensuremath{\Diamond}}
\end{example}
\subsection{A Systematic Approach to the Antichain Algorithm}\label{sec:ACGrammar}
Consider a CFG \(\mathcal{G}=\tuple{\mathcal{V},\Sigma,P}\) and an NFA \(\mathcal{N}=\tuple{Q,Σ,δ,I,F}\) and let \(\leqslant_{\mathcal{N}}\) be the \(\lang{\mathcal{N}}\)-consistent wqo defined in~\eqref{eqn:state-qo:CFG}.
Theorem~\ref{lemma:FiniteWordsAlgorithmCFG} shows that the algorithm \AlgGrammarW solves the inclusion problem \(\lang{\mathcal{G}}\subseteq \lang{\mathcal{N}}\) by working with finite languages.
Similarly to the case of the quasiorder \(\leqslant_{\mathcal{N}}^{\ell}\) (Section~\ref{sec:novel_perspective_AC}) it suffices to keep the sets \(\ctx_{\mathcal{N}}(u)\) of pairs of states of $Q$ for each word \(u\) instead of the words themselves.
Therefore, we can systematically derive a ``state-based'' algorithm analogous to \AlgGrammarW but working on the antichain poset \(\tuple{\AC_{\tuple{\wp(Q\times Q),\subseteq}},\sqsubseteq}\) viewed as an abstraction of \(\tuple{\wp(Σ^*), \subseteq}\).
Let us define the abstraction and concretization maps
\(\alpha\colon \wp(\Sigma^*) \rightarrow \AC_{\tuple{\wp(Q\times Q),\subseteq}}\) and
\(\gamma\colon \AC_{\tuple{\wp(Q\times Q),\subseteq}}\rightarrow\wp(\Sigma^*)\) and the abstract function
\({\Fn}_{\mathcal{G}}^{\mathcal{N}}(\tuple{X_i}_{i \in [0,n]}):{\wp(Q\times Q)^{|\mathcal{V}|}}\rightarrow \wp(Q \times Q)^{|\mathcal{V}|}\) as follows:
\begin{align*}
\alpha(X)&\ud \minor{\{\ctx_{\mathcal{N}}(u) \mid u \in X\}} \\
\gamma(Y) & \ud \{u \in \Sigma^* \mid \exists y \in Y, y \subseteq \ctx_{\mathcal{N}}(u)\} \\
\Fn_{\mathcal{G} }^{\mathcal{N}}(\tuple{X_i}_{i\in[0,n]}) & \ud \langle \minor{\{X_j {\comp} X_k \mid X_i {\to} X_j X_k \in P\}} \rightarrowngle_{i \in [0,n]}
\end{align*}
where \(X \comp Y \ud \{(q,q') \mid (q,q'') \in X \land (q'',q') \in Y\}\) is standard composition of
relations $X,Y\subseteq Q\times Q$.
\begin{lemma}\label{lemma:rhoisgammaalphaCFG}
The following hold:
\begin{myEnumA}
\item \(\tuple{\wp(\Sigma^*),\subseteq}\galois{\alpha}{\gamma}\tuple{\AC_{\tuple{\wp(Q\times Q),\subseteq}},\sqsubseteq}\) is a GC.\label{lemma:rhoisgammaalpha:GCCFG}
\item \(\gamma \comp \alpha = \rho_{\leqslant_{\mathcal{N}}}\)\label{lemma:rhoisgammaalpha:rhoCFG}
\item \(\Fn_{\mathcal{G}}^{\mathcal{N}}(\vect{X}) = \alpha \comp \Fn_{\mathcal{G}} \comp \gamma(\vect{X})\) for all \(\vect{X}\in \alpha(\wp(\Sigma^*)^{|\mathcal{V}|})\)\label{lemma:rhoisgammaalpha:preCFG}
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item Let us first observe that $\alpha$ and $\gamma$ are well-defined.
First, $\alpha(X)$ is an antichain of $\tuple{\wp(Q\times Q),\subseteq}$ since it is a minor for the well-quasiorder \(\subseteq\) and, therefore, it is finite.
On the other hand, $\gamma(Y)$ is clearly an element of $\tuple{\wp(Σ^*), \subseteq}$ by definition.
Then, for all $X\in \wp(Σ^*)$ and
$Y\in \AC_{\tuple{\wp(Q \times Q),\subseteq}}$,
it turns out that:
\begin{align*}
\alpha(X) \sqsubseteq Y & \Leftrightarrow \quad\text{[By definition of \(\sqsubseteq\)]} \\
\forall z \in \alpha(X), \exists y \in Y, \; y \subseteq z &\Leftrightarrow \quad\text{[By definition of \(\alpha\) and \(\minor{\cdot}\)]} \\
\forall v \in X, \exists y \in Y, \; y \subseteq \ctx_{\mathcal{N}}(v) &\Leftrightarrow \quad\text{[By definition of \(\gamma\)]} \\
\forall v \in X, x \in γ(Y) & \Leftrightarrow \quad\text{[By definition of \(\subseteq\)]} \\
X \subseteq \gamma(Y) &\enspace .
\end{align*}
\item For all \(X \in \wp(Σ^*)\) we have that:
\begin{adjustwidth}{-0.5cm}{}
\begin{myAlign}{0pt}{}
\gamma(\alpha(X)) &= \quad\text{[By definition of $\alpha,\gamma$]}\\
\{v \in \Sigma^* \mid \exists u\in \Sigma^*, \ctx_{\mathcal{N}}(u) \in \lfloor \{ \ctx_{\mathcal{N}}(w) \mid w\in X\} \rfloor \land \ctx_{\mathcal{N}}(u) \subseteq \ctx_{\mathcal{N}}(v)\} \span \\
&= \quad \text{[By definition of minor]} \\
\{v \in \Sigma^* \mid \exists u\in X,\, \ctx_{\mathcal{N}}(u) \subseteq \ctx_{\mathcal{N}}(v)\} &= \quad \text{[By definition of \(\mathord{\leqslant_{\mathcal{N}}}\)]}\\
\{v \in \Sigma^* \mid \exists u \in X ,\, u \leqslant_{\mathcal{N}} v\}&= \quad\text{[By definition of\ \(\rho_{\leqslant_{\mathcal{N}}}\)]}\\
\rho_{\leqslant_{\mathcal{N}}}(X) &\enspace .
\end{myAlign}
\end{adjustwidth}
\item First, we show that \(\ctx_{\mathcal{N}}(uv) = \ctx_{\mathcal{N}}(u)\comp \ctx_{\mathcal{N}}(v)\) for every pair of words \(u,v \in Σ^*\).
\begin{align*}
\ctx_{\mathcal{N}}(uv) & = \;\text{[By def. of \(\ctx_{\mathcal{N}}\)]} \\
\{(q,q') \in Q^2 \mid q\goes{uv}q'\} & = \\
\span\specialcell{
\text{[Since \(q \goes{uv}q' \Leftrightarrow \exists q'' \in Q, \; q \goes{u}q'' \land q''\goes{v}q'\)]}} \\
\{(q,q') \in Q^2 \mid \exists q'' \in Q, q\goes{u}q'' \land q''\goes{v}q'\} &= \\
\span\specialcell{
\text{[By definition of \(\comp\) for binary relations]}} \\
\{(q,q'') \in Q^2 \mid q\goes{u}q''\} \comp \{(q'',q') \in Q^2 \mid q''\goes{v}q'\} &=\; \text{[By definition of \(W_{q,q'}\) and \(\ctx_{\mathcal{N}}\)]} \\
\ctx_{\mathcal{N}}(u)\comp \ctx_{\mathcal{N}}(v)
\end{align*}
Secondly, we show that \(\minor{X \comp Y} = \minor{\minor{X}\comp \minor{Y}}\) for every \(X, Y \in \wp(Q\times Q)\).
It is straightforward to check that \(\minor{X} \comp \minor{Y} \subseteq X \comp Y\) and, therefore, \(\minor{\minor{X}\comp \minor{Y}} \subseteq \minor{X \comp Y}\).
Next, we prove the reverse inclusion by contradiction.
Let \(x\comp y \in \minor{X \comp Y}\) with \(x \in X\) and \(y \in Y\).
Assume \(x \comp y \notin \minor{\minor{X}\comp \minor{Y}}\).
Then, there exists \(\tilde{x} \in \minor{X}\) and \(\tilde{y} \in \minor{Y}\) such that \(\tilde{x} \comp \tilde{y} \in \minor{\minor{X} \comp \minor{Y}}\) and \(\tilde{x} \comp \tilde{y} \subseteq x \comp y\) which contradicts the fact that \(x \comp y \in \minor{X \comp Y}\) unless \(\tilde{x} \comp \tilde{y} = x \comp y\), in which case \(x \comp y \in\minor{\minor{X} \comp \minor{Y}} \).
Therefore, \(\minor{X \comp Y} \subseteq \minor{\minor{X} \comp \minor{Y}}\).
Finally, we show that \(\alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}))) = {\Fn}_{\mathcal{G}}^{\mathcal{N}}(\vect{X})\) for all \(\vect{X}\in \alpha(\wp(\Sigma^*))^{|\mathcal{V}|}\).
\begin{adjustwidth}{-0.7cm}{}
\begin{myAlign}{0pt}{0pt}
\alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}))) &= \hspace{35pt}\\
\span\specialcell{
\text{[By definition of \(\Fn_{\mathcal{G}}\)]}}\\
\tuple{\alpha({\textstyle \bigcup_{X_i \to X_jX_k \in P}} \gamma(\vect{X}_{j})\gamma(\vect{X}_k))}_{i\in [0,n]} &= \\
\span\specialcell{
\text{[By definition of \(\alpha\)]}} \\
\langle\lfloor \{ \ctx_{\mathcal{N}}(w) \mid w \in {\textstyle \bigcup_{X_i \to X_jX_k \in P}} \gamma(\vect{X}_{j})\gamma(\vect{X}_k)\}\rfloor\rightarrowngle_{i\in [0,n]} &= \\
\langle\lfloor \{ \ctx_{\mathcal{N}}(w) \mid \exists X_i \to X_j X_k \in P, \; w\in \gamma(\vect{X}_{j})\gamma(\vect{X}_k)\}\rfloor\rightarrowngle_{i\in [0,n]} &= \\
\span\specialcell{
\text{[By definition of concatenation]}}\\
\langle\lfloor \{ \ctx_{\mathcal{N}}(uv) \mid \exists X_i \to X_j X_k \in P, \; u\in \gamma(\vect{X}_{j}) \land v \in \gamma(\vect{X}_k)\}\rfloor\rightarrowngle_{i\in [0,n]} &= \\
\span\specialcell{
\text{[Since \(\ctx_{\mathcal{N}}(uv) = \ctx_{\mathcal{N}}(u)\comp \ctx_{\mathcal{N}}(v)\)]}} \\
\langle\lfloor \{ \ctx_{\mathcal{N}}(u)\comp \ctx_{\mathcal{N}}(v) \mid \exists X_i \to X_j X_k \in P, \; u\in \gamma(\vect{X}_{j}) \land v \in \gamma(\vect{X}_k)\}\rfloor\rightarrowngle_{i\in [0,n]} & \\
\span\specialcell{
\text{[By definition of \(X \comp Y\)]}}\\
\langle\lfloor \{\ctx_{\mathcal{N}}(u) \mid u\in \gamma(\vect{X}_{j}),X_i\to X_jX_k\}\comp \{\ctx_{\mathcal{N}}(v) \mid v \in \gamma(\vect{X}_k),X_i{\rightarrow} X_jX_k\}\rfloor\rightarrowngle_{i\in [0,n]} & \\
\span\specialcell{
\text{[Since \(\minor{X{\comp} Y} = \minor{\minor{X}{\comp}\minor{Y}}\)]}}\\
\langle\lfloor \minor{\{\ctx_{\mathcal{N}}(u) \mid u\in \gamma(\vect{X}_{j}),X_i{\rightarrow} X_jX_k\}}\comp \minor{\{\ctx_{\mathcal{N}}(v) \mid v \in \gamma(\vect{X}_k),X_i{\rightarrow} X_jX_k\}}\rfloor\rightarrowngle_{i\in [0,n]} &= \\
\span\specialcell{
\text{[Since \(\alpha(\gamma(X))=\minor{X}\)]}}\\
\langle\lfloor \minor{\{\vect{X}_j \mid X_i \to X_jX_k\} }\comp \minor{\{\vect{X}_k \mid X_i \to X_jX_k\} } \rfloor\rightarrowngle_{i\in [0,n]} & \\
\span\specialcell{
\text{[Since \(\minor{X{\comp} Y} = \minor{\minor{X}{\comp}\minor{Y}}\)]}} \\
\langle\lfloor \{\vect{X}_j \mid X_i \to X_jX_k\} \comp \{\vect{X}_k \mid X_i \to X_jX_k\} \rfloor\rightarrowngle_{i\in [0,n]} & \\
\span\specialcell{
\text{[By definition of \(\comp\)]}}\\
\langle\lfloor\{ \vect{X}_j\comp \vect{X}_k \mid X_i \to X_jX_k\}\rfloor\rightarrowngle_{i\in [0,n]} &= \\
\span\specialcell{
\text{[By definition of \(\Fn_{\mathcal{G}}^{\mathcal{N}}\)]}} \\
{\Fn}_{\mathcal{G}}^{\mathcal{N}}(\vect{X}) \enspace .
\end{myAlign}
\end{adjustwidth}
\end{myEnumA}
\end{proof}
\RemoveAlgoNumber
\begin{algorithm}[!ht]
\SetAlgorithmName{\AlgGrammarA}{}
\caption{State-based algorithm for \(L(\mathcal{G}) \subseteq L(\mathcal{N})\)}\label{alg:CFGIncA}
\KwData{CFG \(\mathcal{G} = \tuple{\mathcal{V},\Sigma,P}\) and NFA \(\mathcal{N} = \tuple{Q,Σ,δ,I,F}\)}
\(\tuple{Y_i}_{i\in[0,n]} := \Kleene (\lambda \vect{X}\ldotp\lfloor\vect{b}\rfloor \sqcup \Fn_{\mathcal{G}}^{\mathcal{N}}(\vect{X}), \vect{\varnothing})\)\;
\ForAll{\(y \in Y_0\)}{
\lIf{\(y \cap (I \times F) = \varnothing\)}{\Return \textit{false}}
}
\Return \textit{true}\;
\end{algorithm}
\begin{theorem}\label{theorem:statesQuasiorderAlgorithmCFG}
Let \(\mathcal{G}\) be a CFG and $\mathcal{N}$ be an NFA.
The algorithm \AlgGrammarA decides \(L(\mathcal{G}) \subseteq L(\mathcal{N})\).
\end{theorem}
\begin{proof}
We show that all the hypotheses~\ref{theorem:EffectiveAlgorithmCFG:prop:rho}-\ref{theorem:EffectiveAlgorithmCFG:prop:absincl} of Theorem~\ref{theorem:EffectiveAlgorithmCFG} are satisfied for the abstract domain \(\tuple{D,\leq_D}=\tuple{\AC_{\tuple{\wp(Q \times Q),\subseteq}},\sqsubseteq}\) as defined by the GC of Lemma~\ref{lemma:rhoisgammaalphaCFG}~\ref{lemma:rhoisgammaalpha:GCCFG}.
\begin{myEnumI}
\item Since, by Lemma~\ref{lemma:rhoisgammaalphaCFG}~\ref{lemma:rhoisgammaalpha:rhoCFG}, we have that \(\rho_{\leqslant_{\mathcal{N}}}(X) = \gamma(\alpha(X))\), it follows from Lemmas~\ref{lemma:propertiesCFG}~\ref{lemma:propertiesCFG:L} and~~\ref{lemma:LAconsistent:CFG} that \(\gamma(\alpha(L_2)) = L_2\).
Moreover, for every \(a\in\Sigma\) and \(X\in\wp(\Sigma^*)\) we have \(\gamma\alpha(a X) = \gamma\alpha(a\gamma\alpha(X))\):
\begin{align*}
\gamma\alpha(a X) & = \quad \text{[In GCs \(\gamma = \gamma \alpha \gamma\)]} \\
\gamma\alpha\gamma\alpha(a X) & = \quad \text{[By Lemma~\ref{lemma:propertiesCFG}~\ref{lemma:propertiesCFG:bw} with \(\rho_{\leqslant_{\mathcal{N}}} = \gamma\alpha\)]} \\
\gamma\alpha\gamma \alpha(a\gamma \alpha(X)) &= \quad \text{[In GCs \(\gamma = \gamma \alpha \gamma\)]} \\
\gamma\alpha(a\gamma\alpha(X)) & \enspace.
\end{align*}
\item \( (\AC_{\tuple{\wp(Q\times Q),\subseteq}},\sqsubseteq) \) is effective because $Q$ is finite.
\item By Lemma~\ref{lemma:rhoisgammaalphaCFG}~\ref{lemma:rhoisgammaalpha:preCFG} we have that
\(\alpha(\Fn_{\mathcal{G}}(\gamma(\vect{X}))) = {\Fn}_{\mathcal{G}}^{\mathcal{N}}(\vect{X})\) for all vectors \(\vect{X}\in \alpha(\wp(\Sigma^*))^{|\mathcal{V}|}\).
\item \(\alpha(\{b\}) = \{(q,q') \mid q\ggoes{b}q'\}\) and \(\alpha({\varnothing})=\varnothing\), hence \(\minor{\alpha(\vect{b})}\) is trivial to compute.
\item Since \(\alpha(\vectarg{L_2}{X_0})=\tuple{\alpha(\nullable{i = 0}{L_2}{\Sigma^*})}_{i \in [0,n]}\), for all $\vect{Y}\in\alpha(\wp(\Sigma^*))^{|\mathcal{V}|}$ the relation \(\vect{Y} \sqsubseteq \alpha(\vectarg{L_2}{X_0})\) trivially holds for all components \(Y_i\) with \(i \neq 0\).
For $Y_0$, it suffices to show that
\(Y_0 \sqsubseteq \alpha(L_2) \Leftrightarrow \forall S \in Y_q, \; S \cap (I \times F) \neq \varnothing\), which is the check performed by lines 2-5 of algorithm \AlgGrammarA.
\begin{adjustwidth}{-0.5cm}{}
\begin{myAlign}{-\baselineskip}{0pt}
Y_0 \sqsubseteq \alpha(L_2) & \Leftrightarrow \quad \text{[Since \(Y_0 = \alpha(U)\) for some \(U \in \wp(\Sigma^*)\)]} \\
\alpha(U) \sqsubseteq \alpha(L_2) & \Leftrightarrow \quad \text{[By GC]} \\
U \subseteq \gamma(\alpha(L_2)) & \Leftrightarrow \quad \text{[By Lemmas~\ref{lemma:propertiesCFG},~\ref{lemma:LAconsistent:CFG} and~\ref{lemma:rhoisgammaalphaCFG}, $\gamma(\alpha(L_2))=L_2$]} \\
U \subseteq L_2 & \Leftrightarrow \quad \text{[Since \(Y_0 =\alpha(U) = \lfloor \{ \ctx_{\mathcal{N}}(u) \mid u\in U\} \rfloor \)]} \\
\forall u \in U, \ctx_{\mathcal{N}}(u) \cap (I \times F) \neq \varnothing & \Leftrightarrow \quad \text{[By definition of \(\ctx_{\mathcal{N}}(u)\)]} \\
\forall S \in Y_0, S \cap I \neq \varnothing &\enspace .
\end{myAlign}
\end{adjustwidth}
\end{myEnumI}
Thus, by Theorem~\ref{theorem:EffectiveAlgorithmCFG}, Algorithm \AlgGrammarA decides \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\).
\end{proof}
The resulting algorithm \AlgGrammarA shares some features with two previous works.
On the one hand, it is related to the work of \citet{Hofmann2014} which defines an abstract interpretation-based language inclusion decision procedure similar to ours.
Even though Hofmann and Chen's algorithm and ours both manipulate sets of pairs of states of an automaton,
their abstraction is based on equivalence relations and not quasiorders.
Since quasiorders are strictly more general than equivalences our framework can be instantiated to
a larger class of abstractions, most importantly coarser ones.
Finally, it is worth pointing out
that the approach of \citet{Hofmann2014} aims at including languages of finite and also infinite words.
A second related work is that of \citet{Holk2015} who define an antichain like algorithm manipulating sets of pairs of states.
\citet{Holk2015} tackle the language inclusion problem \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\), where \(\mathcal{G}\) is a grammar and \(\mathcal{N}\) and automaton, by rephrasing the problem as a data flow analysis problem over a relational domain.
In this scenario, the solution of the problem requires the computation of a least fixpoint on the relational domain, followed by an inclusion check between sets of relations.
Then, they use the ``antichains principle'' to improve the performance of the fixpoint computation and, finally, they move from manipulating relations to manipulating pairs of states.
As a consequence, \citet{Holk2015} obtain an antichains algorithm for deciding \(\lang{\mathcal{G}} \subseteq \lang{\mathcal{N}}\).
By contrast, our approach is direct and systematic, since we derive \AlgGrammarA starting from the well-known Myhill quasiorder.
We believe our approach evidences the relationship between the original antichains algorithm of \citet{DBLP:conf/cav/WulfDHR06} for regular languages and the one of \citet{Holk2015} for context-free languages, which is the relation between Algorithms~\AlgRegularA and~\AlgGrammarA.
Specifically, we show that these two algorithms are conceptually identical and differ in the quasiorder used to define the abstraction in which the computation takes place.
\section{An Equivalent Greatest Fixpoint Algorithm}
\label{sec:greatest_fixpoint_based_algorithm}
Let us recall from \citet[Theorem~4]{cou00} that if \(g \colon C\rightarrow C\) is a monotone function on a complete lattice $\tuple{C,\leq,\vee,\wedge}$ which admits
its unique \demph{right-adjoint} \(\widetilde{g} \colon C\rightarrow C\), i.e. for every $c,c'\!\in\! C,\linebreak\, g(c)\leq c' \Leftrightarrow c\leq \widetilde{g}(c')$ holds,
then the following equivalence holds
for all \(c,c'\in C\)
\begin{equation}\label{eqn:duality}
\lfp(\lambda x\ldotp c \vee g(x)) \leq c' \;\Leftrightarrow\;
c\leq \gfp(\lambda y\ldotp c' \wedge \widetilde{g}(y)) \enspace .
\end{equation}
This property has been used by \citet{cou00} to derive equivalent least/greatest fixpoint-based invariance proof methods for programs.
In the following, we use Equivalence~\eqref{eqn:duality} to derive an algorithm for deciding the language inclusion \(\lang{\mathcal{N}_1}\subseteq \lang{\mathcal{N}_2}\), which relies on the computation of a greatest fixpoint rather than a least fixpoint.
This can be achieved by exploiting the following simple observation, which provides an adjunction between concatenation and quotients
of sets of words.
\begin{lemma}\label{lemma:adjointbinary}
For all \(X,Y \in \wp(\Sigma^*)\) and \(w\in \Sigma^*\), \(wY \subseteq Z \Leftrightarrow Y \subseteq w^{-1}Z\) and \(Yw \subseteq Z \Leftrightarrow Y \subseteq Zw^{-1}\).
\end{lemma}
\begin{proof}
By definition, for all \(u\in \Sigma^*\), \(u \in w^{-1}Z\) if{}f \( wu \in Z\).
Hence,
\[Y\subseteq w^{-1}Z \Leftrightarrow \forall u\in Y,\: wu \in Z \Leftrightarrow wY\subseteq Z \enspace . \]
Symmetrically, \(Yw\subseteq Z\) \(\Leftrightarrow\) \(Y\subseteq Zw^{-1}\) holds.
\end{proof}
Given an NFA \(\mathcal{N} = \tuple{Q,Σ,\delta,I,F}\), we define $\widetilde{\Pre}_\mathcal{N}:\wp(\Sigma^*)^{|Q|} \rightarrow \wp(\Sigma^*)^{|Q|}$ as a function on $Q$-indexed vectors of sets of words as follows:
\[
\widetilde{\Pre}_\mathcal{N}(\tuple{X_q}_{q\in Q}) \ud \langle {\textstyle\bigcap_{a\in \Sigma, q'\in \delta(q,a)}}\; a^{-1} X_q
\rightarrowngle_{q'\in Q} \enspace ,
\]
where, as usual, \(\bigcap \varnothing = \Sigma^*\). It turns out that $\widetilde{\Pre}_\mathcal{N}$ is the usual weakest liberal precondition which is
right-adjoint
of $\Pre_\mathcal{N}$.
\begin{lemma}\label{lemma:FnAdjoint}
For all \(\vect{X},\vect{Y}\in \wp(\Sigma^*)^{|Q|}\), \(\Pre_{\mathcal{N}}(\vect{X})\subseteq \vect{Y}\Leftrightarrow \vect{X}\subseteq \widetilde{\Pre}_{\mathcal{N}}(\vect{Y})\).
\end{lemma}
\begin{proof}
For all \(\vect{X},\vect{Y}\in \wp(\Sigma^*)^{|Q|}\),
\begin{align*}
\Pre_{\mathcal{N}}(\tuple{X_q}_{q\in Q}) \subseteq \tuple{Y_q}_{q\in Q} &\Leftrightarrow
\quad\text{[By definition of $\Pre_{\mathcal{N}}$]} \\
\forall q\in Q, \; {\textstyle \bigcup_{q\ggoes{a}{q'}}} a X_{q'} \subseteq Y_q &\Leftrightarrow
\\
\forall q,{q'}\in Q, \; q\ggoes{a} q' \Rightarrow a X_{q'} \subseteq Y_q &\Leftrightarrow
\quad\text{[By Lemma~\ref{lemma:adjointbinary}]}\\
\forall q,{q'}\in Q, \; q\ggoes{a} q' \Rightarrow X_{q'} \subseteq a^{-1} Y_q &\Leftrightarrow \quad\text{[\((\forall i \in I, \; X \subseteq Y_i) \Leftrightarrow X \subseteq {\textstyle\bigcap_{i\in I}} Y_i\)]}
\\
\forall {q'}\in Q, X_{q'} \subseteq {\textstyle\bigcap_{q\ggoes{a} q'}} a^{-1} Y_q&\Leftrightarrow
\quad\text{[By definition of $\widetilde{\Pre}_{\mathcal{N}}$]} \\
\tuple{X_q}_{q\in Q} \subseteq \widetilde{\Pre}_{\mathcal{N}}(\tuple{Y_q}_{q\in Q})
\end{align*}
\end{proof}
Hence, from Equivalences~\eqref{eq:lfp} and~\eqref{eqn:duality} we obtain:
\begin{equation}
\lang{\mathcal{N}_1} \subseteq L_2 \:\Leftrightarrow\:
\vectarg{\epsilon}{F_1} \subseteq \gfp(\lambda \vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X})) \enspace . \label{eq:inclgfplfp}
\end{equation}
The following algorithm \AlgRegularGfp decides the inclusion \(\lang{\mathcal{N}_1} \subseteq L_2\) by implementing the greatest fixpoint
computation from Equivalence~\eqref{eq:inclgfplfp}.
\begin{figure}
\caption{Greatest fixpoint algorithm for \(\lang{\mathcal{N}
\label{alg:RegIncGfp}
\end{figure}
\noindent
The intuition behind algorithm~\AlgRegularGfp is that
\[\lang{\mathcal{N}_1} \subseteq L_2 \Leftrightarrow \epsilon \in {\textstyle\bigcap_{w \in \lang{\mathcal{N}_1}}} w^{-1}L_2 \enspace .\]
Therefore, \AlgRegularGfp computes the set \({\textstyle\bigcap_{w \in \lang{\mathcal{N}_1}}} w^{-1}L_2\) by using the automaton \(\mathcal{N}_1\) and by
considering prefixes of \(\lang{\mathcal{N}_1}\) of increasing lengths. This means that
after \(n\) iterations of the \(\Kleene\) procedure, Algorithm \AlgRegularGfp has computed, for every state \(q \in Q_1\), the set
\[\bigcap_{wu\in \lang{\mathcal{N}_1}, |w| \leq n, q_0 \in I_1, q_0 \stackrel{w}{\leadsto} q} \hspace{-30pt}w^{-1}L_2 \enspace , \]
The regularity of \(L_2\) together with the property of regular languages of being closed under intersections and quotients show that each iterate computed by $\Kleene (\lambda \vect{X}\ldotp\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X}), \vect{{\Sigma^*}})$ is a (computable) regular language.
To the best of our knowledge, this language inclusion algorithm \AlgRegularGfp has never been described in the literature before.
\\
\indent
Next, we discharge the fundamental assumption on which the correctness of Algorithm \AlgRegularGfp depends on: the Kleene iterates computed by \AlgRegularGfp are finitely many.
In order to do that, we consider an abstract version of the greatest fixpoint computation exploiting
a closure operator which guarantees that the abstract Kleene iterates are finitely many.
This closure operator $\rho_{\leqslant_{\mathcal{N}_2}}$ will be defined by using an ordering relation $\leqslant_{\mathcal{N}_2}$
induced by an NFA $\mathcal{N}_2$ such that
\(L_2=\lang{\mathcal{N}_2}\) and will be shown to be
\emph{forward complete} for the function \(\lambda \vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X})\)
used by \AlgRegularGfp.
\\
\indent
Forward completeness of abstract interpretations \cite{gq01}, also called
exactness \citep[Definition~2.15]{mine17}, is different
from and orthogonal to backward completeness introduced in Section~\ref{sec:inclusion_checking_by_complete_abstractions}
and crucially used in Sections~\ref{sec:an_algorithmic_framework_for_language_inclusion_based_on_complete_abstractions}-\ref{sec:context_free_languages}.
In particular, a remarkable consequence
of exploiting a forward complete abstraction is that the Kleene iterates of the concrete and abstract greatest fixpoint computations coincide.
The intuition here is that this forward complete closure $\rho_{\leq_{\mathcal{N}_2}}$ allows us to establish that all Kleene iterates of \(\gfp(\vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X}))\) belong to the image of the closure $\rho_{\leqslant_{\mathcal{N}_2}}$.
More precisely, every Kleene iterate is a language which is upward closed for \(\leqslant_{\mathcal{N}_2}\).
Interestingly, a similar phenomenon occurs in well-structured transition systems~\cite{ACJT96,Finkel2001}.
\\
\indent
Let us now describe in detail this abstraction.
A closure \(\rho\in\uco(C)\) on a concrete domain $C$ is forward complete for a monotone function \(f:C\rightarrow C\) if{}f \(\rho f \rho = f \rho\) holds.
The intuition here is that forward completeness means that no loss of precision
is accumulated when the output of a computation of $f\rho$ is approximated by $\rho$, or, equivalently, $f$ maps abstract elements
of $\rho$ into abstract elements of $\rho$.
Dually to the case of backward completeness, forward completeness implies that \(\gfp(f)=\gfp(f\rho) = \gfp(\rho f \rho)\), when these greatest fixpoints exist (this is the case, e.g., when $C$ is a complete lattice).
It turns out that forward and backward completeness are related by the following duality on function $f$.
\begin{lemma}[\textbf{\citep[Corollary~1]{gq01}}]\label{lemma:forwardbackwardtransfer}
Let $\tuple{C,\leq}$ be a complete lattice and assume that \(f\colon C\rightarrow C\) admits the right-adjoint \(\widetilde{f}\colon C\rightarrow C\), i.e.
$f(c) \leq c' \Leftrightarrow c \leq \widetilde{f}(c')$ holds.
Then, \(\rho\) is backward complete for \(f\) if{}f \(\rho\) is forward complete for \(\widetilde{f}\).
\end{lemma}
Thus, by Lemma~\ref{lemma:forwardbackwardtransfer}, in the following result instead of
assuming the hypotheses implying that a closure $\rho$ is forward complete for the right-adjoint $\widetilde{\Pre}_{\mathcal{N}_1}$ we
state some hypotheses which guarantee that $\rho$ is backward complete for its left-adjoint, which, by Lemma~\ref{lemma:FnAdjoint}, is ${\Pre}_{\mathcal{N}_1}$.
\begin{theorem}\label{theorem:dualalgorithm}
Let \(\mathcal{N}_1 = \tuple{Q_1,\delta_1,I_1,F_1,\Sigma}\) be an NFA, let \(L_2\) be a regular language and let \(\rho \in \uco(\tuple{\wp(\Sigma^*),\subseteq})\). Let us assume that:
\begin{myEnumA}
\item \(\rho(L_2) = L_2\);
\item \(\rho\) is backward complete for \(\lambda X\ldotp a X\) for all \(a\in \Sigma\).
\end{myEnumA}
Then
\[\lang{\mathcal{N}_1}\subseteq L_2 \Leftrightarrow \vectarg{\epsilon}{F_1} \subseteq \gfp(\vect{X}\ldotp \rho(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho(\vect{X}))))\enspace .\]
Moreover, the Kleene iterates computed by $\gfp(\vect{X}\ldotp \rho(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho(\vect{X}))))$ coincide in lockstep with those of \(\gfp(\vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X}))\).
\end{theorem}
\begin{proof}
Theorem~\ref{theorem:backComplete} shows that if \(\rho\) is backward complete for \(\lambda X\ldotp a X\) for every \(a\in\Sigma\) then it is backward complete for \(\Pre_{{\mathcal{N}_1}}\).
Thus, by Lemma~\ref{lemma:forwardbackwardtransfer}, \(\rho\) is forward complete for \(\widetilde{\Pre}_{\mathcal{N}_1}\), hence it is forward complete for \(\lambda \vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\vect{X})\) since:
\begin{align*}
\rho (\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\rho(\vect{X}))) & =
\quad\text{[By forward comp. for $\widetilde{\Pre}_{\mathcal{N}_1}$ and \(\rho(L_2)=L_2\)]}\\
\rho (\rho( \vectarg{L_2}{I_1}) \cap \rho(\widetilde{\Pre}_{\mathcal{N}_1} (\rho(\vect{X})))) & =
\quad\text{[Since \(\rho(\cap \rho(X)) = \cap\rho(X)\)]}\\
\rho (\vectarg{L_2}{I_1}) \cap \rho (\widetilde{\Pre}_{\mathcal{N}_1} (\rho(\vect{X}))) & =
\quad\text{[By forward comp. for $\widetilde{\Pre}_{\mathcal{N}_1}$ and \(\rho (L_2)=L_2\)]}\\
\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\rho (\vect{X}))&\enspace .
\end{align*}
Since, by forward completeness, we have that
\[\gfp(\vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X})) = \gfp(\vect{X}\ldotp \rho(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho(\vect{X}))))\enspace, \] by Equivalence \eqref{eq:inclgfplfp}, we conclude
that
\[\lang{\mathcal{N}_1}\subseteq L_2 \Leftrightarrow \vectarg{\epsilon}{F_1} \subseteq \gfp(\vect{X}\ldotp \rho(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho(\vect{X})))) \enspace . \]
\noindent
Finally, we observe that the Kleene iterates computing \(\gfp(\lambda \vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\vect{X}))\) and those computing $\gfp(\vect{X}\ldotp \rho(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho(\vect{X}))))$ coincide in lockstep since
$\rho (\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\rho(\vect{X}))) =
\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\rho (\vect{X}))$ and
\(\rho(\vectarg{L_2}{I_1})=\vectarg{L_2}{I_1}\).
\end{proof}
We can now establish that the sequence of Kleene iterates computed by \(\gfp(\vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\vect{X}))\) is finite.
Let \(L_2=\lang{\mathcal{N}_2}\), for some NFA \(\mathcal{N}_2\), and consider the corresponding left
state-based quasiorder \(\mathord{\leqslant_{\mathcal{N}_2}^{l}}\) on $\Sigma^*$ as defined by~\eqref{eqn:state-qo}.
Lemma~\ref{lemma:LAconsistent} tells us that \(\mathord{\leqslant_{\mathcal{N}_2}^{l}}\) is a left \(L_2\)-consistent wqo.
Furthermore, since \(Q_2\) is finite we have that both \(\mathord{\leqslant_{\mathcal{N}_2}^{l}}\) and \((\mathord{\leqslant_{\mathcal{N}_2}^{l}})^{-1}\) are wqos, so that, in turn, \( \tuple{\rho_{\leqslant_{\mathcal{N}_2}^{l}},\subseteq}\) is a poset which is both ACC and DCC.
In particular, the definition of \(\mathord{\leqslant_{\mathcal{N}_2}^{l}}\) implies that every chain in \( \tuple{\rho_{\leqslant_{\mathcal{N}_2}^{l}},\subseteq}\) has at most \(2^{|Q_2|}\) elements, so that
if we compute \(2^{|Q_2|}\) Kleene iterates then we have surely computed the greatest fixpoint.
Moreover, as a consequence of the DCC, the Kleene iterates of
\(\gfp(\lambda \vect{X}\ldotp\rho_{\leq_{\mathcal{N}_2}}(\vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1}(\rho_{\leq_{\mathcal{N}_2}}(\vect{X}))))\) are finitely many, hence so are the iterates of \(\gfp(\lambda \vect{X}\ldotp \vectarg{L_2}{I_1} \cap \widetilde{\Pre}_{\mathcal{N}_1} (\vect{X}))\) because they go in lockstep as stated by Theorem~\ref{theorem:dualalgorithm}.
\begin{corollary}
Let \(\mathcal{N}_1\) be an NFA and let \(L_2\) be a regular language.
Then, Algorithm \AlgRegularGfp decides the inclusion \(\lang{\mathcal{N}_1} \subseteq L_2\)
\end{corollary}
Finally, it is worth citing that \citet{fiedor2019nested} put forward an algorithm for deciding WS1S formula which relies on the same lfp computation used in \AlgRegularA.
Then, they derive a dual gfp computation by relying on Park's duality~\cite{park1969fixpoint}: \(\lfp (\lambda X \ldotp f(X)) = (\gfp (\lambda X \ldotp (f(X^c))^c))^c\).
Their approach differs from ours since we use the Equivalence~\eqref{eqn:duality} to compute a gfp, different from the lfp, which still allows us to decide the inclusion problem.
Furthermore, their algorithm decides whether a given automaton accepts \(\epsilon\) and it is not clear how their algorithm could be extended for deciding language inclusion.
{}
{}
\chapter{Searching on Compressed Text}
\label{chap:zearch}
In this chapter, we show how to instantiate the quasiorder-based framework from Chapter~\ref{chap:LangInc} to search on compressed text.
Specifically, we adapt Algorithm \AlgGrammarA to report the number of lines in a grammar-compressed text containing a match for a given regular expression.
The problem of searching in compressed text is of practical interest due the growing amount of information handled by modern systems, which demands efficient techniques both for compression, to reduce the storage cost, and for regular expression searching, to speed up querying.
As an evidence of the importance of this problem, note that state of the art tools for searching with regular expressions, such as \tool{grep} and \tool{ripgrep}, provide a method to search on compressed files by decompressing them on-the-fly.
In the following, we focus on the problem of \emph{counting}, i.e. finding the number of lines of the input text that contain a match for the expression.
This type of query is supported out of the box by many tools\footnote{Tools such as \tool{grep}, \tool{ripgrep}, \tool{awk} and \tool{ag}, among others, can be used to report the number of matching lines in a text.}, which evidences its practical interest.
However, when the text is given in compressed form, the fastest approach in practice is the \emph{decompress and search approach}, i.e. querying the uncompressed text as it is recovered by the decompressor.
In this chapter, we challenge this approach.
Lossless compression of textual data is achieved by finding repetitions in the input text and replacing them by references.
We focus on grammar-based compression schemes in which each tuple ``reference → repeated text'' is considered as a rule of a context-free grammar.
The resulting grammar, produced as the output of the compression, generates a language consisting of a single word: the uncompressed text.
Figure~\ref{fig:compress} depicts the output of a grammar-based compression algorithm.
\begin{figure}
\caption{List of grammar rules (left) generating the string
``\textrm{a\hspace{1pt}
\label{fig:compress}
\end{figure}
Intuitively, the decompress and search approach prevents the searching algorithm from taking advantage of the repetitions in the data found by the compressor.
For instance, in the grammar shown in Figure~\ref{fig:compress}, the decompress and search approach results in processing the subsequence ``$\textrm{ab\$a\$b}$'' twice.
By working on the compressed data, our algorithm would process that subsequence once and reuse the information each time it finds the variable \(X_5\).
Given a grammar-compressed text and a regular expression, deciding whether the compressed text matches the expression amounts to deciding the emptiness of the intersection of the languages generated by the grammar and an automaton built for the regular expression.
In order to solve this emptiness problem, we reduce it to an inclusion problem.
Note that this reduction is possible since the grammar generates a single word and, therefore, \(\{w\} \cap L \neq \varnothing \Leftrightarrow w \in L\), where \(L\) is the language generated by the regular expression.
Then, we could instantiate the quasiorder-based framework described in Chapter~\ref{chap:LangInc} with different quasiorders to decide the inclusion.
However, in order to go beyond a yes/no answer and report or count the exact matches, we need to compute some extra information for each variable of the grammar.
This extra information is computed for the terminals of the grammar and then propagated through the variables according to the grammar rules in a bottom-up fashion.
To do that, we iterate thorough the grammar rules and compose, for each of them, the information previously computed for the variables on the right hand side.
For example, when processing rule \(X_3 {\to} X_1\$\) of Figure~\ref{fig:compress} our algorithm composes the information for \(X_1\) with the one for \(\$\).
The information computed for the string ``$\textrm{ab\$}$'', will be reused every time the variable \(X_3\) appears in the right hand side of a rule.
Following this idea, we present an algorithm for counting the lines in a grammar-compressed text containing a match for a regular expression whose runtime does not depend on the size $T$ of the uncompressed text.
Instead, it runs in time \emph{linear} in the size of its \emph{compressed version}.
Furthermore, the information computed for counting can be used to perform an \emph{on-the-fly}, \emph{lazy} decompression to recover the matching lines from the compressed text.
Note that, for reporting the matching lines, the dependency on $T$ in unavoidable.
The salient features of our approach are:
\paragraph{Generality} Our algorithm is not tied to any particular grammar-based compressor.
Instead, we consider the compressed text is given by a straight line program (SLP for short), i.e. a grammar generating the uncompressed text and nothing else.
Finding the smallest SLP $g$ generating a text of length $T$ is an NP-hard problem, as shown by \citet{Charikar2005Smallest}, for which grammar-based compressors such as LZ78~\cite{ziv1978compression}, LZW~\cite{welch1984technique}, RePair~\cite{larsson2000off} and Sequitur~\cite{nevill1997compression} produce different approximations.
For instance, \citet{Hucke2016Smallest} showed that the LZ78 algorithm produces a representation of size $Ω\bigl(\len{g}{\cdot} (T/\log{T})^{2/3}\bigr)$ and the representation produced by the RePair algorithm has size $Ω\bigl(\len{g}{\cdot} \log{T}/\log(\log{T})\bigr)$.
Since it is defined over SLPs, our algorithm applies to all such approximations, including $g$ itself.
\paragraph{Nearly optimal data structures} We define data structures enabling the algorithm to run in time linear in the size of the compressed text.
With these data structures our algorithm runs in $\mathcal{O}(t {\cdot} s^3)$ time using $\mathcal{O}(t {\cdot} s^2)$ space where $t$ is the size of the compressed text, i.e. the grammar, and $s$ is the size of the automaton built from the regular expression.
When the automaton is deterministic, the complexity drops to $\mathcal{O}(t{\cdot}s)$ time and $\mathcal{O}(t{\cdot}s)$ space.
As shown by \citet{Amir2018FineGrained}, there is no combinatorial\footnote{Interpreted as
any \emph{practically efficient} algorithm that does not suffer from the issues of Fast Matrix Multiplication such as large constants and inefficient memory usage.} algorithm improving these time complexity bounds beyond \emph{polylog} factors, hence our algorithm is \emph{nearly optimal}.
\paragraph{Efficient implementation} We present \tool{zearch}, a purely \emph{sequential} implementation of our algorithm which uses the above mentioned data structures.\footnote{\tool{zearch} can optionally report the matching lines.}
The experiments show that \tool{zearch} requires up to $25\pct$ less time than the state of the art: running \tool{hyperscan} on the uncompressed text as it is recovered by \tool{lz4} (in \emph{parallel}).
Furthermore, when the grammar-based compressor achieves high compression ratio (above 13:1), running \tool{zearch} on the compressed text is as fast as running \tool{hyperscan} directly on the uncompressed text.
Such compression ratios are achieved, for instance, when working with automatically generated log files.
\section{Finding the Matches}\label{sec:findingMatches}
Recall that the problem of deciding whether a grammar-compressed text contains a match for a regular expression can be reduced to an emptiness problem for the intersection of the languages generated by a grammar and an automaton.
Indeed, given an SLP \(\mathcal{P}\) generating a text \(T\) over an alphabet \(Σ\), i.e. \(\lang{\mathcal{P}}=\{T\}\) where \(T \in Σ^*\), and an automaton \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) representing a regular expression, we find that:
\[\text{There exists a substring of } T \text{ in }\lang{\mathcal{N}} \Leftrightarrow \lang{\mathcal{P}} \cap \bigl(Σ^* \cdot \lang{\mathcal{N}}\cdot Σ^*\bigr) \neq \varnothing \enspace .\]
\noindent On the other hand, since \(\lang{\mathcal{P}}\) contains exactly one word we have that
\[\lang{\mathcal{P}} \cap \bigl(Σ^* \cdot \lang{\mathcal{N}}\cdot Σ^*\bigr) \neq \varnothing \Leftrightarrow \lang{\mathcal{P}} \subseteq \bigl(Σ^* \cdot \lang{\mathcal{N}}\cdot Σ^*\bigr) \enspace .\]
As a consequence, the problem of deciding whether a grammar-compressed text contains a match for a regular expression can be solved by using Algorithm \AlgGrammarA with the quasiorder \(\leqslant_{\mathcal{N}}\) as described in Chapter~\ref{chap:LangInc}.
Observe that, as the following example evidences, when restricting to SLPs the iteration of the \(\Kleene\) procedure updates the abstraction for each variable of the grammar \emph{exactly once} since there are no loops in SLPs.
As a consequence, it is enough to process the rules in an orderly manner and compute the abstraction for each variable, i.e. \(α(X)\), exactly once.
\begin{example}\label{example:search}
Let \(\mathcal{P}\) be the SLP from Figure~\ref{fig:compress} and let \(\mathcal{N}\) and \(\mathcal{N}'\) be the automata from Figure~\ref{fig:NFAsearch}.
Next, we show the Kleene iterates computed by Algorithm \AlgGrammarA which, as shown in Chapter~\ref{chap:LangInc}, works on the abstract domain \(\tuple{\AC_{\tuple{\wp(Q\times Q),\subseteq}},\sqsubseteq}\) with the abstraction function defined as \(α(X) = \minor{\{\ctx_{\mathcal{N}}(u) \mid u \in X\}}\).
To simplify the notation, we denote the pair \((q_i,q_j)\) by \(ij\).
{\small
\begin{align*}
\left(\hspace{-5pt}\begin{array}{c}
α(W_{X_6}^{\mathcal{P}}) \\[2pt]
α(W_{X_5}^{\mathcal{P}}) \\[2pt]
α(W_{X_4}^{\mathcal{P}}) \\[2pt]
α(W_{X_3}^{\mathcal{P}})\\[2pt]
α(W_{X_2}^{\mathcal{P}})\\[2pt]
α(W_{X_1}^{\mathcal{P}})\end{array}\hspace{-5pt}\right) \,{=}\, \left(\hspace{-5pt}\begin{array}{c}
\varnothing \\[2pt]
\varnothing \\[2pt]
\varnothing \\[2pt]
\varnothing \\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\end{array}\hspace{-5pt}\right) \,{\Rightarrow}\,
\left(\hspace{-5pt}\begin{array}{c}
\varnothing \\[2pt]
\varnothing \\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\end{array}\hspace{-5pt}\right) \,{\Rightarrow}\,
\left(\hspace{-5pt}\begin{array}{c}
\varnothing \\[2pt]
\minor{\{{11},{33},{13}\}} \\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\end{array}\hspace{-5pt}\right) \,{\Rightarrow}\,
\left(\hspace{-5pt}\begin{array}{c}
\minor{\{{11},{33},{13}\}} \\[2pt]
\minor{\{{11},{33},{13}\}} \\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\\[2pt]
\minor{\{{11},{33}\}} \\[2pt]
\minor{\{{11},{33},{13}\}}\end{array}\hspace{-5pt}\right)
\end{align*}}
Since for every variable \(X_n\) the value of \(α(X_n)\) is computed by combining the values of \(α(X_i)\) and \(α(X_j)\) for some \(i,j < n\), the \(\Kleene\) procedure is equivalent to computing, sequentially, the values of \(α(X_1), α(X_2), \ldots, \linebreak α(X_5)\).
In this case, since \((q_1,q_3) \in α(X_5)\) then Algorithm~\AlgGrammarA concludes that the language inclusion \(\lang{\mathcal{P}} \subseteq \lang{\mathcal{N}}\) holds, i.e. there exists a substring \(w\) of the uncompressed such that \(w \in \lang{\mathcal{N}'}\).
{\ensuremath{\Diamond}}
\end{example}
\begin{figure}
\caption{NFAs \(\mathcal{N}
\label{fig:NFAsearch}
\end{figure}
Furthermore, in an SLP each variable generates exactly one word and, therefore, the abstraction of a variable consists of a single set, i.e. \(α(X) \in \AC_{\tuple{\wp(Q\times Q),\subseteq}}\) is a singleton as shown in Example~\ref{example:search}.
As a consequence, we can drop the \(\minor{\cdot}\) from function \(\Fn^{\mathcal{N}}_{\mathcal{P}}\) defined in Section~\ref{sec:ACGrammar}, since \(\minor{\{\ctx_{\mathcal{N}}(w)\}} = \{\ctx_{\mathcal{N}}(w)\}\) for any word, and write:
\[\Fn_{\mathcal{P} }^{\mathcal{N}}(\tuple{X_i}_{i\in[0,n]}) \ud \langle \{X_j \comp X_k \mid X_i {\to} X_j X_k \in P\} \rightarrowngle_{i \in [0,n]}\]
Recall that, by definition, for all \(X_j, X_k \in \wp(Q \times Q)^{|\mathcal{V}|}\),
\[X_j \comp X_k = \{(q_1,q_2) \mid \exists q' \in Q, \; (q_1,q') \in X_j \land (q',q_2) \in X_k\} \enspace .\]
Finally, given an NFA \(\mathcal{N}'\) it is straightforward to build an automaton \(\mathcal{N}\) generating the language \(Σ^* \cdot \lang{\mathcal{N}} \cdot Σ^*\) by adding self-loops reading each letter of the alphabet to every initial and every final state of \(\mathcal{N}'\) as shown in Figure~\ref{fig:NFAsearch}.
Instead of adding these transitions to \(\mathcal{N}\), which, as shown in Example~\ref{example:search}, results in adding the pairs \(\{(q,q) \mid q \in I \cup F\}\) to \(\ctx_{\mathcal{N}}(w)\) for every word \(w \in Σ^*\), we consider them as implicit.
As a consequence, when the input grammar is an SLP and we are interested in deciding whether \(\lang{\mathcal{P}} \subseteq Σ^* \cdot \lang{\mathcal{N}} \cdot Σ^*\), Algorithm~\AlgGrammarA can be written as Algorithm~\AlgSLPIncS.
Observe that Algorithm~\AlgSLPIncS uses the transition function \(δ\) to store and manipulate the sets \(\ctx_{\mathcal{N}}(X_i)\) for each variable \(X_i\) of the grammar, i.e.
\[(q_1,X_i,q_2) \in δ \Leftrightarrow (q_1,q_2) \in \ctx_{\mathcal{N}}(X_i)\enspace .\]
\begin{figure}
\caption{Algorithm for deciding $\lang{\mathcal{P}
\label{alg:SLPIncS}
\label{alg:algorithmTheoryDecide:step}
\label{alg:algorithmTheoryDecide:loopl}
\label{alg:algorithmTheoryDecide:loopa}
\label{alg:algorithmTheoryDecide:loopb}
\label{alg:algorithmTheoryDecide:add}
\label{alg:algorithmTheoryDecide:looplend}
\label{alg:algorithmTheoryDecide:return}
\end{figure}
\section{Counting Algorithm}\label{sec:provingCorrectness}
State of the art tools for regular expression search are equipped with a number of features\footnote{\url{https://beyondgrep.com/feature-comparison/}} to perform different operations beyond deciding the existence of a match in the text.
Among the most relevant of these features we find \emph{counting}.
Tools like \tool{grep}\footnote{\url{https://www.gnu.org/software/grep}}, \tool{rg}\footnote{\url{https://github.com/BurntSushi/ripgrep}}, \tool{ack}\footnote{\url{https://github.com/beyondgrep/ack2}} or \tool{ag}\footnote{\url{https://geoff.greer.fm/ag/}} report the number of lines containing a match, ignoring matches across lines.
Next we extend Algorithm~\AlgSLPIncS to perform this sort of counting.
Let $\NL$ denote the new-line delimiter and let $\widehat{Σ} = Σ {\setminus} \{\NL\}$.
Given a string $w \in Σ^+$ compressed as an SLP $\mathcal{P}=\tuple{V,Σ,P}$ and an automaton $\mathcal{N}=\tuple{Q,\widehat{Σ},δ,I,F}$ built from a regular expression, Algorithm \AlgCountLines reports the number of lines in $w$ containing a match for the expression.
Note that, as the tools mentioned in the previous paragraph, we deliberately ignore matches across lines.
As an overview, our algorithm computes some \emph{counting information} for each alphabet symbol of the grammar (procedure \textsc{init\_automaton}) which is then propagated, in a bottom-up manner, to the axiom rule.
Such propagation is achieved by iterating through the grammar rules (loop in line~\ref{alg:algorithmTheoryCount:step}) and combining, for each rule, the information for the symbols on the right hand side to obtain the information for the variable on the left (procedure \textsc{count}).
Finally, the output of the algorithm is computed from the information propagated to the axiom symbol (line~\ref{alg:algorithmTheoryCount:return}).
\begin{figure}
\caption{Algorithm for counting the lines in $\lang{\mathcal{P}
\label{alg:CountLines}
\label{alg:algorithmTheoryCount:step}
\label{alg:algorithmTheoryCount:loopl}
\label{alg:algorithmTheoryCount:loopa}
\label{alg:algorithmTheoryCount:loopb}
\label{alg:algorithmTheoryCount:add}
\label{alg:algorithmTheoryCount:nm}
\label{alg:algorithmTheoryCount:looplend}
\label{alg:algorithmTheoryCount:return}
\end{figure}
Define a \demph{line} as a maximal factor of $w$ each symbol of which belongs to $\widehat{Σ}$, a \demph{closed line} as a line which is not a prefix nor a suffix of $w$ and a \demph{matching line} as a line in $\widehat{\lang{\mathcal{N}}}$, where $\widehat{\lang{\mathcal{N}}} = \widehat{Σ}^*\cdot\lang{\mathcal{N}}\cdot\widehat{Σ}^*$.
\begin{example}
Consider the word \(w = ``\textrm{a\hspace{1pt}b\hspace{1pt}\NL\hspace{1pt}a\hspace{1pt}\NL\hspace{1pt}b\hspace{1pt}a\hspace{1pt}b\hspace{1pt}\NL}"\) and an NFA \(\mathcal{N}\) with \(\lang{\mathcal{N}} = \{ba\}\).
Then the strings ``\(ab\)'', ``\(a\)'' and ``\(bab\)'' are \emph{lines} of which only the strings ``\(ab\)'' and ``\(a\)'' are \emph{closed lines} and ``\(bab\)'' is the only \emph{matching line}.
{\ensuremath{\Diamond}}
\end{example}
\begin{definition}[Counting Information]
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{P}=\tuple{\mathcal{V},Σ,P}\) be an SLP.
The \emph{counting information of} $τ \in (V \cup Σ)$, with $τPoduces^* u$ and $u \in Σ^+$, is the tuple $\counting{τ}\ud\tuple{\varnl{τ},\varleft{τ},\varright{τ},\varcount{τ}}$ where
\begin{align*}
\varnl{τ} &\ud \exists k\,; (u)_k = \NL &
\varleft{τ} & \ud \exists i, \; (u)_{1,i} \in \widehat{Σ}^*\cdot\lang{\mathcal{N}} \\
\varright{τ} &\ud \exists j, \; (u)_{j,\dag} \in \lang{\mathcal{N}}\cdot\widehat{Σ}^*&
\varcount{τ} & \ud \len{\{(i{+}1,j{-}1) \mid (u)_{i,j} \in \NL\cdot\widehat{\lang{\mathcal{N}}}\cdot\NL\}}\tag*{\rule{0.5em}{0.5em}}
\end{align*}
\end{definition}
Note that $\varnl{τ}$, $\varleft{τ}$ and $\varright{τ}$ are boolean values while $\varcount{τ}$ is an integer.
It follows from the definition that the number of \emph{matching lines} in $u$, with $τ Poduces^*u$, is given by the number of \emph{closed matching lines} ($\varcount{τ}$) plus the prefix of $u$ if{}f it is a \emph{matching line} ($\varleft{τ}$) and the suffix of $u$ if{}f it is a \emph{matching line} ($\varright{τ}$) different from the prefix ($\varnl{τ})$.
Since whenever $\varnl{τ} = \text{\emph{false}}$ we have $\varleft{τ} = \varright{τ}$ , it follows that
\[\sharp\text{\emph{matching lines} in } u = \varcount{τ} + \left\{
\begin{array}{ll}
1 & \text{if } \varleft{τ}\\
0 & otherwise \end{array} \\
\right. + \left\{
\begin{array}{ll}
1 & \text{if } \varnl{τ} \land \varright{τ}\\
0 & otherwise \end{array} \\
\right.
\]
Computing the counting information of $τ$ requires deciding membership of certain factors of $u$ in $\widehat{\lang{A}}$.
As explained before, we reduce these membership queries to language inclusion checks which are solved by Algorithm~\AlgSLPIncS.
This operation corresponds to lines~\ref{alg:algorithmTheoryCount:loopa} to \ref{alg:algorithmTheoryCount:add} of Algorithm \AlgCountLines.
As a result, after processing the rule for $τ$, we have $(q_1,τ,q_2) \in δ$ if{}f the automaton moves from $q$ to $q'$ reading\begin{myEnumAL}
\item $u$,
\item a suffix of $u$ and $q_1 \in I$, or
\item a prefix of $u$ and $q_2 \in F$.
\end{myEnumAL}
Procedures \textsc{count} and \textsc{init\_automaton} are quite straightforward, the main difficulty being the computation of $\algvarcount{X}$ which we explain next.
Let $x,y \in Σ^+$ be the strings generated by $α$ and $β$, respectively.
Given rule $X \to α β$, $X$ generates all the matching lines generated by $α$ and $β$ plus, possibly, a ``new'' matching line of the form $z = (x)_{i,\dag}(y)_{1,j}$ with $1< i \leq \len{x}$ and $1 \leq j < \len{y}$.
Such an extra matching line appears if{}f both $α$ and $β$ generate a $\NL$\, symbol and one of the following holds:
\begin{myEnumA}
\item The suffix of $x$ matches the expression.
\item The prefix of $y$ matches the expression.
\item There is a new match $m \in z$ with $m \notin x$, $m \notin y$ (line~\ref{alg:algorithmTheoryCount:nm}).
\end{myEnumA}
\begin{example}
Let $\mathcal{N}$ be an automaton with $\lang{\mathcal{N}}=\{ab,ba\}$ and let $X\to α β$ be a grammar rule with $αPoduces^* ba\NL a$ and $βPoduces^*b \NL aba$.
Then $XPoduces^* ba\NL ab \NL aba$.
The matching lines generated by $α$, $β$ and $X$ are, respectively, $\{ba\}$, $\{aba\}$ and $\{ba,ab,aba\}$.
Moreover
\[\counting{α} = \tuple{\text{\emph{true}},\text{\emph{true}},\text{\emph{false}},0} \quad \text{ and } \quad \counting{β} = \tuple{\text{\emph{true}},\text{\emph{false}},\text{\emph{true}},0}\enspace .\]
Therefore, applying function \textsc{count} we find that $\counting{X}=\tuple{\text{\emph{true}},\text{\emph{true}},\text{\emph{true}},1}$ so the number of matching lines is $1{+}1{+}1{=}3$, as expected.
{\ensuremath{\Diamond}}
\end{example}
Note that the counting information computed by Algorithm \AlgCountLines can be used to uncompress \emph{only} the matching lines by performing a top-down processing of the SLP.
For instance, given $X \to α β$ with $\counting{X}=\tuple{\text{\emph{true}},\text{\emph{true}},\text{\emph{false}},0}$ and $\counting{α}=\tuple{\text{\emph{true}},\text{\emph{true}},\text{\emph{false}},0}$, there is no need to decompress the string generated by $β$ since we are certain it is not part of any matching line (otherwise we should have $\varcount{X}>0$ or $\varright{X}=\text{\emph{true}}$).
Next, we describe the data structures that we use to implement Algorithm \AlgCountLines with \emph{nearly optimal} complexity.
\subsection{Data Structures}\label{sec:DataStructures}
We assume the alphabet symbols, variables and states are indexed and use the following data structures, illustrated in Figure~\ref{fig:datastructure}: an array $\mathcal{A}$ with $t{+}\len{Σ}$ elements, where $t$ is the number of rules of the SLP, and two $s \times s$ matrices $\mathcal{M}$ and $\mathcal{K}$ where $s$ is the number of states of the automaton.
\begin{figure}
\caption{Data structures enabling nearly optimal running time for Algorithm \AlgCountLines. The image shows the contents of $\mathcal{M}
\label{fig:datastructure}
\end{figure}
Each element $\mathcal{A}[i]$ contains the information related to variable $X_i$, i.e. $\counting{X_i}$ and the list of transitions labeled with $X_i$, denoted $\edges{X_i}$.
We store $\counting{X}$ using one bit for each $\algvarnl{X}$, $\algvarleft{X}$ and $\algvarright{X}$ and an integer for $\algvarcount{X}$.
For each rule $X_\ell → α_\ell β_\ell$ the matrix $\mathcal{K}$ is set so that row $i$ contains the set of states reachable from the state $q_i$ by reading the string generated by $β_\ell$, i.e. \(\mathcal{K}[i] = \{q_j \mid (q_i,β_\ell,q_j) \in δ\}\).
If there are less than $s$ such states we use a sentinel value (${-}1$ in Figure~\ref{fig:datastructure}).
Finally, each element $\mathcal{M}[i][j]$ stores the index $\ell$ of the last variable for which $(q_i,X_\ell,q_j)$ was added to $δ$.
Note that since rules are processed one at a time, matrices $\mathcal{K}$ and $\mathcal{M}$ can be reused for all rules.
Observe that it is straightforward to update the matrices \(\mathcal{M}\) and \(\mathcal{K}\) in \(\mathcal{O}(s^2)\) time for each rule $X_\ell → α_\ell β_\ell$ since there are up to \(s^2\) transitions \((q_i,β_\ell,q_j) \in δ\).
These data structures provide $\mathcal{O}(1)$ runtime for the following operations:
\begin{myEnum}
\item[-] Accessing the information corresponding to $α_\ell$ and $β_\ell$ at line~\ref{alg:algorithmTheoryCount:loopl} (using $\mathcal{A}$).
\item[-] Accessing the list of pairs $(q,q')$ with $(q,α_\ell,q') \in δ$ at line~\ref{alg:algorithmTheoryCount:loopa} (using $\edges{X_i}$).
\item[-] Accessing the list of states $q_2$ with $(q',β_\ell,q_2) \in δ$ at line~\ref{alg:algorithmTheoryCount:loopb} (using $\mathcal{K}$).
\item[-] Inserting a pair $(q,q')$ in $\edges{X_i}$ (avoiding duplicates) at line~\ref{alg:algorithmTheoryCount:add} (using $\mathcal{M}$).
\end{myEnum}
As a result, Algorithm \AlgCountLines runs in $\mathcal{O}(t{\cdot}s^3)$\footnote{The algorithm performs \(t\) iterations of loop in line~\ref{alg:algorithmTheoryCount:loopl}, up to \(s^2\) iterations of loop in line~\ref{alg:algorithmTheoryCount:loopa} and up to \(s\) iterations for loop in line~\ref{alg:algorithmTheoryCount:loopb}.} time using $\mathcal{O}(t{\cdot}s^2)$ space when the automaton built from the regular expression is an NFA and it runs in $\mathcal{O}(t{\cdot}s)$ time and $\mathcal{O}(t{\cdot}s)$ space when the automaton is a DFA (each row of $\mathcal{K}$ stores up to one state, hence the loop in line~\ref{alg:algorithmTheoryCount:loopb} results in, at most, one iteration).
\citet[Thm.~3.2]{Amir2018FineGrained} proved that, under the Strong Exponential Time Hypothesis, there is no combinatorial algorithm for deciding whether a grammar-compressed text contains a match for a DFA running in $\mathcal{O}((t {\cdot} s)^{1-ε})$ time with $ε{>}0$.
For NFAs, they proved~\citep[Thm.~4.2]{Amir2018FineGrained} that, under the $k$-Clique Conjecture, there is no combinatorial algorithm running in $\mathcal{O}((t {\cdot} s^3)^{1-ε})$ time.
Therefore, our algorithm is \emph{nearly optimal} in both scenarios.
\section{Implementation}\label{sec:implementation}
We implemented Algorithm \AlgCountLines, using the data structures described in the previous section, in a tool named \tool{zearch}\footnote{\url{https://github.com/pevalme/zearch}}.
Our tool works on \tool{repair}\footnote{\url{https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/re-pair/repair110811.tar.gz}}-compressed text and, beyond counting the matching lines, it can also report them by partially decompressing the input file.
The implementation consists of less than 2000 lines of C code.
The choice of this particular compressor, which implements the RePair algorithm of \citet{larsson2000off}, is due to the little effort required to adapt Algorithm \AlgCountLines to the specifics of the grammar built by \tool{repair} and the compression it achieves (see Table~\ref{table:compression}).
However \tool{zearch} can handle any grammar-based compression scheme by providing a way to recover the SLP from the input file.
Recall that we assume the alphabet symbols, variables and states are indexed.
For text compressed with \tool{repair}, the indexes of the alphabet symbols are $0…255$ ($Σ$ is fixed\footnote{Our algorithm also applies to larger alphabets, such as UTF8, without altering its complexity.}) and the indexes of the variables are $256…t{+}256$.
Typically, grammar-based compressors such as \tool{repair} encode the grammar so that rule $X \to α β$ appears always after the rules with $α$ and $β$ on the left hand side.
Thus, each iteration of the loop in line~\ref{alg:algorithmTheoryCount:loopl} reads a subsequent rule from the compressed input.
We translate the input regular expression into an $ε$-free NFA using the automata library \tool{libfa}\footnote{\url{http://augeas.net/libfa/index.html}} which applies Thompson's algorithm~\cite{thompson1968programming} with on-the-fly $ε$-removal.
\section{Empirical Evaluation}\label{sec:experimental}
Next we present a \emph{summary} of the experiments carried out to assess the performance of \tool{zearch}.
The details of the experiments, including the runtime and number of matching lines reported for each expression on each file and considering more tools, file sizes and regular expressions are available on-line\footnote{\url{https://pevalme.github.io/zearch/graphs/index.html}}, where we report graphs as the ones shown in Figure~\ref{fig:webGraphs}.
The following explanations about how the experiments reported in this thesis were carried out also apply to the larger set of experiments available on-line.
\begin{figure}
\caption{The \emph{first graph}
\label{fig:webGraphs}
\end{figure}
All tools for regular expression searching considered in this benchmark are used to count the matching lines without reporting them.
As expected, all tools report the exact same result for all benchmarks.
To simplify the terminology, we refer to counting the matching lines as \emph{searching}, unless otherwise stated.
\subsection{Tools}
Our benchmark compares the performance of \tool{zearch} against the fastest implementations we found for the following operations:
\begin{myEnumI}
\item Searching the compressed text without decompression.
\item Searching the uncompressed text.
\item Decompressing the text without searching.
\item Searching the uncompressed text as it is recovered by the decompressor.
\end{myEnumI}
For searching the compressed text we consider \tool{GNgrep}, the tool developed by \citet{navarro2003regular} for searching on text compressed with the grammar-based compressor \tool{LZW} defined by \citet{welch1984technique}.
To the best of our knowledge, this is the only existing tool departing from the \emph{decompress and search} approach.
For searching uncompressed text we consider \tool{grep} and \tool{hyperscan}.
We improve the performance of \tool{grep} by compiling it without \emph{perl regular expression} compatibility, which is not supported by \tool{zearch}.
We used the library \tool{hyperscan} by means of the tool (provided with the library) \tool{simplegrep}, which we modified\footnote{\url{https://gist.github.com/pevalme/f94bedc9ff08373a0301b8c795063093}} to \emph{efficiently} read data either from stdin or an input file.
These tools are top of the class\footnote{\url{https://rust-leipzig.github.io/regex/2017/03/28/comparison-of-regex-engines/}} for regular expression searching.
For (de)compressing the files we use \tool{zstd} and \tool{lz4} which are among the best lossless compressors\footnote{\url{https://quixdb.github.io/squash-benchmark/}}, being \tool{lz4} considerably faster while \tool{zstd} achieves better compression.
We use both tools with the highest compression level, which has little impact on the time required for decompression.
We use versions \tool{grep v3.3}, \tool{hyperscan v5.0.0}, \tool{lz4 v1.8.3} and \tool{zstd v1.3.6} running in an Intel Xeon E5640 CPU 2.67 GHz with 20 GB RAM which supports SIMD instructions up to SSE4-2.
We restrict to ASCII inputs and set \verb!LC_ALL=C! for all experiments, which significantly improves the performance of \tool{grep}.
Since both \tool{hyperscan} and \tool{GNgrep} count positions of the text where a match ends, we extend each regular expression (when used with these tools) to match the whole line.
We made this decision to ensure all tools solve the same counting problem and produce the \emph{same output}.
\subsection{Files and Regular Expressions}
Our benchmark consists of an automatically generated \emph{Log}\footnote{\url{http://ita.ee.lbl.gov/html/contrib/NASA-HTTP.html}} of HTTP requests, English \emph{Subtitles}~\cite{openSubtitles}, and a concatenation of English \emph{Books}\footnote{\url{https://web.eecs.umich.edu/~lahiri/gutenberg_dataset.html}}.
Table~\ref{table:compression} shows how each compressor behaves on these files.
\begin{table}[!ht]
\centering
\renewcommand{0.3}{0.8}
\setlength{\tabcolsep}{5pt}
\setlength{\extrarowheight}{.2ex}
\resizebox{\textwidth}{!}{
\begin{tabular}{rr|r?r|r|r|r?r|r|r|r?r|r|r|r}
\toprule
& & & \multicolumn{4}{c?}{\textbf{Compressed size}} & \multicolumn{4}{c?}{\textbf{Compression time}} & \multicolumn{4}{c}{\textbf{Decompression time}} \\
& & \multicolumn{1}{c?}{\textbf{File}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c?}{\tool{lz4}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c?}{\tool{lz4}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c}{\tool{lz4}} \\
\midrule
\parbox[t]{2mm}{\multirow{6}{*}{\rotatebox[origin=c]{90}{\resizebox{65pt}{!}{\textbf{Uncompressed}}}}} & \parbox[t]{2mm}{\multirow{3}{*}{\rotatebox[origin=c]{90}{\small\textbf{1 MB}}}} & \textit{Logs} & \loser{0.19} & \second{0.08} & \winner{0.07} & 0.12 & \second{0.04} & 0.19 & \loser{0.51} & \winner{0.03} & \loser{0.02} & 0.01 & \second{0.01} & \winner{0.004} \\
& & \textit{Subtitles} & \loser{0.36} & \second{0.13} & \winner{0.11} & 0.15 & \second{0.04} & 0.25 & \loser{0.3} & \winner{0.03} & \loser{0.02} & 0.01 & \second{0.01} & \winner{0.004} \\
& & \textit{Books} & 0.42 & \second{0.34} & \winner{0.27} & \loser{0.43} & \winner{0.04} & 0.29 & \loser{0.42} & \second{0.08} & \loser{0.02} & 0.02 & \second{0.01} & \winner{0.004} \\
\cmidrule{2-15}
& \parbox[t]{2mm}{\multirow{3}{*}{\rotatebox[origin=c]{90}{\small\textbf{500 MB}}}} & \textit{Logs} & \loser{96} & \second{38} & \winner{33} & 65 & \second{16.9} & 123.2 & \loser{819.1} & \winner{13.3} & \loser{7.8} & 5.5 & \second{1.1} & \winner{0.64} \\
& & \textit{Subtitles} & \loser{191} & \second{66} & \winner{55} & 114 & \winner{19.9} & 169.3 & \loser{415.2} & \second{22.8} & \loser{8.6} & 8.2 & \second{1.2} & \winner{0.81} \\
& & \textit{Books} &206 & \second{153} & \winner{129} & \loser{216} & \winner{20.2} & 198.6 & \loser{646.3} & \second{40.6} & 8.6 & \loser{9.7} & \second{2.0} & \winner{0.8} \\
\bottomrule
\end{tabular}}
\caption{Sizes (in MB) of the compressed files and (de)compression times (in seconds). Maximum compression levels enabled.
(Blue = best; bold black = second best; red = worst).}
\label{table:compression}
\end{table}
We first run each experiment 3 times as warm up so that the files are loaded in memory.
Then we measure the running time 30 times and compute the \emph{confidence interval} (with 95\% confidence) for the running time required to count the number of matching lines for a regular expression in a certain file using a certain tool.
We consider the \emph{point estimate} of the confidence interval and omit the \emph{margin of error} which never exceeds the $9\pct$ of the point estimate for the reported experiments.
The on-line version of these experiment \emph{does report} the margin of error as a black mark on the top of each bar.
The height of the bar is the point estimate computed for the given experiment while the black mark denotes the confidence interval (see Figure~\ref{fig:webGraphs}).
Figure~\ref{fig:comparison} summarizes the obtained results when considering, for all files, the regular expressions: ``\verb!what!'', ``\verb!HTTP!'', ``\verb!.!'', ``\verb!I .* you !'', ``\verb! [a-z]{4} !'', ``\verb! [a-z]*[a-z]{3} !'', ``\verb![0-9]{4}!'', ``\verb![0-9]{2}/(Jun|Jul|Aug)/[0-9]{4}!''.
For clarity, we report only on the most relevant tools among the ones considered.
For \tool{lz4} and \tool{zstd}, we report the time required to decompress the file and send the output to \tool{/dev/null}.
\begin{figure}
\caption{Average running time required to count the lines matching a regular expression in a file and time required for decompression.
Colors indicate whether the tool performs the search on the uncompressed text (blue); the compressed text (black); the output of the decompressor (green); or decompresses the file without searching (red).
}
\label{fig:comparison}
\end{figure}
\subsection{Analysis of the Results.}
Figure~\ref{fig:comparison} and Table~\ref{table:compression} show that the performance of \tool{zearch} improves with the compression ratio.
This is to be expected since \tool{zearch} processes each grammar rule exactly once and better compression results in less rules to be processed.
In consequence, \tool{zearch} is the fastest tool for counting matching lines in compressed \emph{Log} files while it is the second slowest one for the \emph{Books}.
In particular, \tool{zearch} is more than $25\pct$ faster than any other tool working on compressed \emph{Log} files.
Actually \tool{zearch} is competitive with \tool{grep} and \tool{hyperscan}, even though these tools operate on the uncompressed text.
These results are remarkable since \tool{hyperscan}, unlike \tool{zearch}, uses algorithms specifically designed to take advantage of SIMD parallelization.\footnote{According to the documentation, \tool{hyperscan} \emph{requires}, at least, support for SSSE3.}
Finally, the fastest tool for counting matching lines in compressed \emph{Subtitles} and \emph{Books}, i.e. \linebreak\tool{lz4|hyperscan}, applies to files larger than the ones obtained when compressing the data with \tool{repair} (see Table~\ref{table:compression}).
However, when considering a better compressor such as \tool{zstd}, which achieves slightly more compression than \tool{repair}, the decompression becomes slower.
As a result, \tool{zearch} outperforms \tool{zstd|hyperscan} by more than $7\pct$ for \emph{Subtitles} files and $50\pct$ for \emph{Logs}.
\paragraph{Contrived Example}
Next, we discharge the full potential of our approach by considering a contrived experiment in which the data is highly repetitive.
In particular, we consider a file where all lines are identical and consist of the sentence ``\textrm{This is a contrived experiment.\NL}''.
Table~\ref{table:compressionContrived} shows the compression achieved on this data for each of the compressors.
\begin{table}[!ht]
\centering
\renewcommand{0.3}{0.8}
\setlength{\tabcolsep}{4pt}
\setlength{\extrarowheight}{.2ex}
\resizebox{\textwidth}{!}{
\begin{tabular}{r?r|r|r|r?r|r|r|r?r|r|r|r}
\toprule
& \multicolumn{4}{c?}{\textbf{Compressed size}} & \multicolumn{4}{c?}{\textbf{Compression time}} & \multicolumn{4}{c}{\textbf{Decompression time}} \\
\multicolumn{1}{c?}{\textbf{File size}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c?}{\tool{lz4}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c?}{\tool{lz4}} & \multicolumn{1}{c|}{\tool{LZW}} & \multicolumn{1}{c|}{\tool{repair}} & \multicolumn{1}{c|}{\tool{zstd}} & \multicolumn{1}{c}{\tool{lz4}} \\
\midrule
1MB & \loser{13} & \winner{0.072} & \second{0.135} & 4.1 & 0.01 & \loser{0.08} & \second{0.01} & \winner{0.004} & \loser{0.01} & 0.01 & \second{0.003} & \winner{0.003} \\
\midrule
500MB & 950 & \second{0.09} & \winner{44} & \loser{2000} & 14.5 & \loser{53.1} & \second{0.99} & \winner{0.28} & \loser{3.9} & 3.3 & \second{0.24} & \winner{0.2} \\
\bottomrule
\end{tabular}}
\caption{Sizes (in KB) of the compressed files and (de)compression times (in seconds). Maximum compression levels enabled.
(Blue = best; bold black = second best; red = worst).}
\label{table:compressionContrived}
\end{table}
As expected this contrived file results in really high compression ratios.
As we show next, this scenario evidences the virtues of \tool{zearch} which is capable of searching in 500MB of data by processing a grammar consisting of 57 rules.
Table~\ref{table:comparisonContrived} summarizes the results obtained when searching the 500 MB contrived file for different regular expressions.\footnote{We run each experiment 30 times and report the point estimate of the confidence interval with 95\pct\, confidence.}
For each expression, we report the time required to\begin{myEnumIL}
\item search on the compressed data without decompression,
\item search on the uncompressed data and
\item search with the best implementation of the parallel decompress and search approach.\footnote{The best implementation might vary depending on the expression.}
\end{myEnumIL}
\begin{table}[!ht]
\centering
\renewcommand{0.3}{0.8}
\setlength{\tabcolsep}{4pt}
\setlength{\extrarowheight}{.2ex}
\resizebox{!}{50pt}{
\begin{tabular}{l?r|r?r|r?r}
\toprule
\multicolumn{1}{c?}{\textbf{Expression}} & \multicolumn{1}{c|}{\tool{zearch}} & \multicolumn{1}{c?}{\tool{GNgrep}} & \multicolumn{1}{c|}{\tool{grep}} & \multicolumn{1}{c?}{\tool{hyperscan}} & \multicolumn{1}{c}{\tool{decompress and search}} \\
\midrule
``\tool{experiment}'' & \winner{2.267} & \loser{14K} & \second{1352} & \loser{1784} & 1652 \\
\cmidrule{1-6}
``\tool{This}'' & \winner{2.533} & \loser{14K} & \second{764} & 2166 & 959 \\
\cmidrule{1-6}
``\tool{.}'' & \winner{2.467} & \loser{14K} & \second{703} & \loser{1276} & 886 \\
\cmidrule{1-6}
``\tool{[a-z]\{4\}}'' & \winner{2.667} & \loser{14K} & \second{1138} & 1270 & 1360 \\
\cmidrule{1-6}
``\tool{[a-z]\{11\}}'' & \winner{2.233} & \second{37} & \loser{1690} & 1312 & 1397 \\
\cmidrule{1-6}
``\tool{That}'' & \winner{2.433} & \second{37.2} & \loser{607} & 239 & 444 \\
\bottomrule
\end{tabular}}
\caption{Time (ms) required to report the number of lines matching a regular expression in the 500 MB large contrived file.
(Blue = fastest; bold black = second fastest; red = slowest).}
\label{table:comparisonContrived}
\end{table}
As shown by Tables~\ref{table:compressionContrived} and~\ref{table:comparisonContrived}, our tool is about \emph{10 times faster} at searching than \tool{lz4} at decompression.
Therefore, \tool{zearch} clearly outperforms any decompress and search approach, even if decompression and search are done in parallel.
This is to be expected since \tool{zearch} only needs to process 90 Bytes of data (the size of the grammar) while the rest of the tools need to process 500 MB.
Similarly, \tool{GNgrep} processes 950 KB of data (the size of the \tool{LZW}-compressed data).
As a consequence, when there are no matches of the expression, \tool{GNgrep} is faster than decompression as evidenced by the last two rows of Table~\ref{table:comparisonContrived}.
However, \tool{GNgrep} reports the number of matching lines by explicitly finding the positions in the data where the match begins, which results rather inefficient when all lines of the file contain a match, as evidenced by the first 4 rows of Table~\ref{table:comparisonContrived}.
\section{Fine-Grained Analysis of the Implementation}\label{sec:complexity}
The grammars produced by \tool{repair} break the definition of SLP in behalf of compression by allowing the axiom rule to have more than two symbols on the right hand side.
This is due to the fact that the axiom rule is built with the remains of the input text after creating all grammar rules.
Typically, the length of the axiom is larger or equal than the number of rules in the SLP so the way in which the axiom is processed heavily influences the performance of \tool{zearch}.
On the other hand, our experiments show that the performance of \tool{zearch} is typically far from its worst case complexity.
This is because the worst case scenario assumes each string generated by a grammar variable labels a path between each pair of states of the automaton.
However, we only observed such behavior in contrived examples.
\subsection{Processing the Axiom Rule.}
Algorithm \AlgCountLines could process the axiom rule $X_{\len{V}} \to σ$ by building an SLP with the set of rules
\[\{S_{1} \to (σ)_1(σ)_2\} \cup \{S_i \to S_{i{-}1}(σ)_{i{+}1} \mid i = 2\ldots \len{σ}{-}2\} \cup \{X_{\len{V}} \to S_{\len{σ}{-}2}(σ)_\dag\} \enspace .\]
However it is more efficient to compute the set of states reachable from the initial ones when reading the string generated with $S_1$ and update this set for each symbol $(σ)_i$.
To perform the counting note that $\counting{S_i}$ is only used to compute $\counting{S_{i+1}}$ and can be discarded afterwards.
This yields an algorithm running in $\mathcal{O}\left(\len{V}\cdot s^3{+}\len{σ}\cdot s^2\right)$ time using $\mathcal{O}\left(\len{V}\cdot s^2\right)$ space where $\len{V}$ is the number of rules of the input grammar and $X_{\len{V}} \to σ$ its axiom.
\subsection{Number of Operations Performed by the Algorithm}
Define $s_{τ,q}=\len{\{q' \mid (q,τ,q') \in δ\}}$ and $s_τ = \sum_{q \in Q}s_{τ,q}$ and let us recall the complexity of Algorithm \AlgCountLines according to the data structures described in Section~\ref{sec:DataStructures}.
The algorithm iterates over the $\len{V}$ rules of the grammar and, for each of them:
\begin{myEnumI}
\item Initializes matrix $\mathcal{K}$ with $s_{β_\ell}$ elements\footnote{We need to set up to $s$ sentinel values for the rows in $\mathcal{K}$ not used for storing $s_{β_\ell}$}
\item Iterates through $\mathcal{K}[q'][0…s_{β_\ell,q'}]$ for each pair $(q_1,q') \in \edges{α_\ell}$.
\end{myEnumI}
Then it processes the axiom rule iterating, for each symbol $(σ)_i$, through $s_{(σ)_i}$ transitions.
These are all the operations performed by the algorithm with running time dependent on the size of the input.
Hence, Algorithm \AlgCountLines runs in
\[\mathcal{O}\left(\sum_{\ell=1}^{\len{V}}\tilde{s}_\ell+\sum_{i=1}^\len{σ}s_{(σ)_i} \right) \text{ time, where } \tilde{s}_\ell=s_{β_\ell}+s+\hspace{-10pt}\sum_{(q_1,q') \in \edges{α_\ell}}\hspace{-10pt}\left(1{+}s_{β_\ell,q'}\right)\enspace p.\]
Note that $\tilde{s}_\ell \leq s^3$, $s_{(σ)_i} \leq s^2$ so the worst case time complexity of the algorithm is $\mathcal{O}(\len{V}\cdot s^3+\len{σ}\cdot s^2)$.
However, in the experiments we observed that $\tilde{s}_\ell$ and $s_{(σ_i)}$ are usually much smaller than $s^3$ and $s^2$, respectively, as reported in Table~\ref{table:Behavior}.
\begin{table}[!ht]
\centering
\resizebox{\textwidth}{!}{
\renewcommand{0.3}{0.60}
\setlength{\tabcolsep}{4pt}
\setlength{\extrarowheight}{1ex}
\small
\begin{tabular}{lr?r|rrrrr?r|rrrrr}
\toprule
\multicolumn{1}{c}{\multirow{2}{*}{\textbf{Expression}}} & \multicolumn{1}{c?}{\multirow{2}{*}{$s$}} & \multicolumn{1}{c|}{\multirow{2}{*}{$s^3$}} & \multicolumn{5}{c?}{percentiles for $\tilde{s}_{\ell}$} & \multicolumn{1}{c|}{\multirow{2}{*}{$s^2$}} & \multicolumn{5}{c}{percentiles for $s_{(σ)_i}$} \\
\multicolumn{1}{c}{} & \multicolumn{1}{c?}{} & \multicolumn{1}{c|}{} & {\footnotesize $50$\pct} & {\footnotesize $75$\pct} & {\footnotesize $95$\pct} & {\footnotesize $98$\pct} & {\footnotesize$100$\pct} & \multicolumn{1}{c|}{} & {\footnotesize $50$\pct} & {\footnotesize $75$\pct} & {\footnotesize $95$\pct} & {\footnotesize $98$\pct} & {\footnotesize$100$\pct}\\
\midrule
{\small ``\tool{what}''} & 5 & 125 & 0 & 0 & 1 & 1 & 9 & 25 & 0 & 0 & 1 & 1 & 2 \\
{\small ``\tool{HTTP}''} & 5 & 125 & 0 & 0 & 0 & 0 & 10 & 25 & 0 & 0 & 0 & 0 & 2 \\
{\small ``\tool{.}''} & 2 & 8 & 0 & 0 & 0 & 0 & 4 & 4 & 0 & 0 & 0 & 0 & 1 \\
{\small ``\tool{I .* you} ''} & 9 & 729 & 3 & 13 & 16 & 18 & 29 & 81 & 3 & 3 & 5 & 5 & 9 \\
{\small ``\tool{ [a-z]{4} }''} & 7 & 343 & 2 & 10 & 11 & 12 & 18 & 49 & 1 & 1 & 2 & 2 & 4 \\
{\small ``\tool{ [a-z]*[a-z]{3} }''} & 7 & 343 & 3 & 11 & 14 & 18 & 31 & 49 & 1 & 3 & 3 & 4 & 8 \\
{\small ``\tool{ [0-9]\{4\}}''} & 6 & 216 & 8 & 8 & 8 & 8 & 18 & 36 & 1 & 1 & 1 & 1 & 5 \\
{\small ``\tool{.*[A-Za-z ]\{5\}}''} & 7 & 343 & 14 & 25 & 48 & 48 & 48 & 49 & 11 & 14 & 14 & 14 & 14 \\
{\small ``\tool{.*[A-Za-z ]\{10\}}''} & 12 & 1728 & 29 & 51 & 86 & 95 & 98 & 144 & 16 & 26 & 29 & 29 & 29 \\
{\small ``\tool{.*[A-Za-z ]\{20\}}''} & 22 & 10648 & 57 & 87 & 132 & 153 & 198 & 484 & 23 & 38 & 52 & 58 & 59 \\
{\small ``\tool{((((.)*.)*.)*.)*}''} & 6 & 216 & 12 & 29 & 209 & 209 & 209 & 36 & 29 & 29 & 29 & 29 & 29 \\
{\small ``\tool{(((((.)*.)*.)*.)*.)*}''} & 7 & 343 & 14 & 34 & 249 & 249 & 249 & 49 & 34 & 34 & 34 & 34 & 34 \\
\bottomrule
\end{tabular}
}
\caption{Analysis of the values $\tilde{s}_\ell$ and $s_{(σ)_i}$ obtained when considering different regular expressions to search \emph{Subtitles} (100 MB uncompressed long).
The fifth column of the fourth row indicates that when considering the expression ``\tool{I .* you}'', for 75\% of the grammar rules we have $\tilde{s}_\ell \leq 13$ while $s^3=729$.}
\label{table:Behavior}
\end{table}
As the experiments show, \tool{zearch} exhibits almost linear behavior with respect to the size of the automaton built from the expression.
Nevertheless, there are regular expressions that trigger the worst case behavior (last two rows in Table~\ref{table:Behavior}), which cannot be avoided due to the result of \citet{Amir2018FineGrained} described before.
\section{Fine-Grained Complexity}
In Section~\ref{sec:provingCorrectness} we obtained upper bounds for the worst-case time complexity of our algorithm depending on whether the automata built from the expression is an NFA or a DFA.
However, we observed in Section~\ref{sec:complexity} that the actual behavior of our implementation is, in general, far from its worst-case scenario (see Table~\ref{table:Behavior}).
This is due to the fact that the worst-case scenario assumes an NFA where each pair of states are connected by a transition for each symbol in the alphabet but this is rarely the type of automata obtained from non-contrived regular expressions.
This difference between the worst-case time complexity of the algorithm and its behavior in practice also appears when considering the problem of searching with regular expressions on plain text.
Indeed, this problem led \citet{Backurs2016Hard} to analyze the complexity of searching on plain text for different classes of regular expressions.
In their work, \citet{Backurs2016Hard} restrict themselves to \demph{homogeneous regular expressions}, i.e. regular expressions in which operators at the same level of the formula are equal\footnote{Write the regular expression as a tree. The expression is homogeneous if all non-leaf nodes at the same depth have the same label.}, which are grouped in classes depending on the sequence of the operators involved.
Then, they obtain a lower bound for the search complexity for each class of expressions by building reductions from the \demph{Orthogonal Vector Problem}\index{OVP} (OVP for short) which, given two sets of vectors \(A,B \subseteq \{0,1\}^d\) in \(d\) dimensions, with \(N\) and \(M\) elements respectively, asks whether there exists \(a\in A\) and \(b \in B\) such that \(a\cdot b =0\).
\begin{conjecture*}[OV Conjecture~\cite{bringmann2015quadratic}]
There are no reals \(\varepsilon, d > 0\) such that the OVP in \(d < N^{o(1)}\) dimensions with \(M = \Theta(N^α)\) for \(α \in (0,1]\) can be solved in \(\mathcal{O}((N\cdot M)^{1-\varepsilon})\) time.
\end{conjecture*}
The idea behind the conjecture is that any algorithm defying it would yield an algorithm for SAT violating the Strong Exponential Time Hypothesis.
\citet{Backurs2016Hard} relied on the OV conjecture to determine whether a search problem is \emph{easy}\index{easy searching problem}, i.e. there is an algorithm running in $\mathcal{O}(T+s)$ time where $T$ is the size of the input text and $s$ is the number of states of the automaton, or \emph{hard}\index{hard searching problem}, i.e. assuming the Strong Exponential Time Hypothesis (SETH) any algorithm has $Ω((T\cdot s)^{1-ε})$ time complexity with $ε > 0$.
This analysis can be extended to consider searching on compressed text and decide whether our implementation is optimal on different classes of homogeneous regular expressions.
To do that, we apply the following remark, inherited from~\citet{Amir2018FineGrained}, who used the OVP to analyze whether the decompress and solve approach can be outperformed by manipulating the compressed text for different problems.
\begin{remark}\label{remark:OVP}
Let $A=\{a_1,…,a_{N}\}\subseteq\{0,1\}^d$ and $B=\{b_1,…,b_{M}\} \subseteq\{0,1\}^d$ be an instance of the OVP in $d \leq N^{o(1)}$ dimensions with \(M = \mathcal{O}(N)\).
We define a string \(T\) with a representation as an SLP of size $t = \mathcal{O}(N\cdot d)$ and a regular expression \(π\) of size \(s = \mathcal{O}(M\cdot d)\) such that the string contains a match for the expression if{}f we have a solution for the OVP.
If there is an algorithm for regular expression searching on compressed text that operates, for a class of regular expressions that includes \(π\), in $\mathcal{O}((t\cdot s)^{1{-}ε})$ with \(\epsilon > 0\) then it would solve the OVP in $\mathcal{O}((N\cdot M)^{1{-}ε})$ (since the dimension is fixed) which contradicts the OV conjecture.
\end{remark}
\subsection{Complexity of Searching on Compressed Text}
Given a regular expression, we say it is \emph{homogeneous of type}\index{homogeneous expression} ``\verb!|+!'' if{}f the regular expression is a disjunction of \verb!+! operators and terminals.
We extend this notation to any combination of operators.
For instance, the expressions ``\tool{a+b+}'' and ``\tool{a+b}'' are homogeneous of type ``\tool{·+}'' while ``\tool{a+b*}'' is not homogeneous.
Recall that the \emph{size} of a regular expression is the number of operators and terminals used to define the expression.
For instance, ``\tool{a+b+}'' and ``\tool{a+b}'' have size 4 and 3, respectively.
The following three results use Remark~\ref{remark:OVP} to show that the time complexity of regular expression searching on compressed text is \(Ω(t\cdot s)\), where \(t\) is the size of the SLP and \(s\) is the size of the expression, when the regular expression is homogeneous of type ``\verb!·+!'', ``\verb!·*!'' or ``\verb!·|!''.
\begin{theorem}\label{theorem:HomogeneousDotPlus}
There is no algorithm for searching with a regular expression on grammar-compressed text that operates in \(\mathcal{O}((t\cdot s)^{1-\epsilon})\) time with \(\varepsilon > 0\), where \(t\) is the size of the compressed text and \(s\) is the size of the regular expression, when the expression is homogeneous of type ``\verb!·+!''.
\end{theorem}
\begin{proof}
Let $A=\{a_1,…,a_{N}\}\subseteq\{0,1\}^d$ and $B=\{b_1,…,b_{M}\} \subseteq\{0,1\}^d$ be an instance of the OVP in $d \leq N^{o(1)}$ dimensions with \(M = \mathcal{O}(N)\).
Without loss of generality, assume the dimension is even.
Consider the regular expression
\[π \ud \text{``} F(b_1)zF(b_2)z…zF(b_M)z\text{''}\]
on the alphabet \(Σ = \{x,y,z\}\) with
\(\def0.3{0.3}
F(b_i) \ud f(b_i,1)f(b_i,2),…,f(b_i,d)\) and
\[f(b,j) \ud \left\{ \begin{array}{lcc}
xx^+ & \text{if} & (b)_j = 1 \text{ and \(j\) is even} \\
x^+ & \text{if} & (b)_j = 0 \text{ and \(j\) is even} \\
yy^+ & \text{if} & (b)_j = 1 \text{ and \(j\) is odd} \\
y^+ & \text{if} & (b)_j = 0 \text{ and \(j\) is odd}
\end{array}
\right. \enspace ,\]
where \((b)_j\) is the \(j\)-th component of the vector \(b\).
Clearly, \(π\) is homogeneous of type ``\verb!·+!'' and has size \(s = \mathcal{O}(M \cdot d)\).
Now, we define an SLP $\mathcal{P}$ on \(Σ = \{x,y,z\}\) such that $\lang{\mathcal{P}}=\{w\}$ with
\[w \ud \left((xxyy)^{\frac{d}{2}}z\right)^{M{-}1}\tilde{F}(a_1)z…\left((xxyy)^{\frac{d}{2}}z\right)^{M{-}1}\tilde{F}(a_N)z\left((xxyy)^{\frac{d}{2}}z\right)^{M{-}1} \enspace\]
where \(\tilde{F}(a_i) \ud \tilde{f}(a_i,1)\tilde{f}(a_i,2),…,\tilde{f}(a_i,d)\) and
\[\tilde{f}(a,j) \ud \left\{ \begin{array}{lcc}
x & if & (a)_j = 1 \text{ and \(j\) is even} \\
xx & if & (a)_j = 0 \text{ and \(j\) is even} \\
y & if & (a)_j = 1 \text{ and \(j\) is odd} \\
yy & if & (a)_j = 0 \text{ and \(j\) is odd}
\end{array}
\right. \enspace .\]
The substring $\left((xxyy)^{\frac{d}{2}}z\right)^{M{-}1}$ can be generated with an SLP of size $\mathcal{O}(d+\log M)$, hence $w$ can be compressed as an SLP $\mathcal{P}$ of size $t =\mathcal{O}(N\cdot d + d + \log M)$ and, since \(d \leq N^{o(1)}\) is a constant and \(M = \mathcal{O}(N)\), we find that $t = \mathcal{O}(N)$.
Clearly, \(π\) and \(\mathcal{P}\) can be built in \(\mathcal{O}(M \cdot d)\) and \(\mathcal{O}(N \cdot d)\) time, respectively.
Finally, we show that there exists $a \in A$, $b \in B$ such that $a⋅b = 0$ if{}f there is a factor of $w$ that matches $π$.
Let $a_{i_1} \in A$ and $b_{i_2} \in B$.
Then $a_{i_1}⋅b_{i_2} = 0$ if{}f
\begin{myEnumI}
\item The factor \(\left((xxyy)^{\frac{d}{2}}z\right)^{i_2-1}\) of \(w\) that \emph{precedes} the factor \(\tilde{F}(a_{i_1})z\) matches the subexpression \linebreak``\(F(b_1)z\ldots F(b_{i_2{-}1})z\)''.
\item The factor \(\tilde{F}(a_{i_1})z\) of \(w\) matches the subexpression ``\(F(b_{i_2})z\)''.
\item The factor \(\left((xxyy)^{\frac{d}{2}}z\right)^{M{-}i_2}\) of \(w\) that \emph{succeeds} the factor \(\tilde{F}(a_{i_1})z\) matches the subexpression ``\(F(b_{i_2{+}1})z\ldots F(b_M)z\)''.
\end{myEnumI}
It follows from Remark~\ref{remark:OVP} that there is no algorithm for searching with an homogeneous regular expression of type ``\verb!·+!'' working on $\mathcal{O}((t\cdot s)^{1{-}ε})$ time.
Finally, note that if the dimension of the OVP is odd then it suffices to replace the \(\left((xxyy)^{\frac{d}{2}}z\right)^{M{-}1}\) factors from \(w\) by \(\left((xxyy)^{\frac{d{-}1}{2}}xxz\right)^{M{-}1}\).
\end{proof}
Note that for any homogeneous regular expression of type ``\verb!·+!'' of size \(s\), we can build in \(\mathcal{O}(s)\) time an equivalent homogeneous regular expression of type ``\verb!·*!'' and size \(\mathcal{O}(s)\).
Therefore, we obtain the following corollary from Theorem~\ref{theorem:HomogeneousDotPlus}.
\begin{corollary}\label{corol:HomogeneousDotStar}
There is no algorithm for searching with a regular expression on grammar-compressed text that operates in \(\mathcal{O}((t\cdot s)^{1-\epsilon})\) with \(\varepsilon > 0\), where \(t\) is the size of the compressed text and \(s\) is the size of the regular expression, when the expression is homogeneous of type ``\verb!·*!''.
\end{corollary}
\begin{theorem}\label{theorem:HomogeneousDotOr}
There is no algorithm for searching with regular expressions on grammar-compressed text that operates in \(\mathcal{O}((t\cdot s)^{1-\epsilon})\) with \(\varepsilon > 0\), where \(t\) is the size of the compressed text and \(s\) is the size of the regular expression, when the expression is homogeneous of type ``\verb!·|!''.
\end{theorem}
\begin{proof}
The proof is identical to that of Theorem~\ref{theorem:HomogeneousDotPlus} but considering the expression
\[π \ud \text{``}F(b_1)zF(b_2)z…zF(b_M)z\text{''}\]
on the alphabet \(Σ=\{0,1,z\}\) with
\[F(b_i)=f(b_i,1)f(b_i,2),…,f(b_i,d) \;\text{ and }\; f(b, j) = \left\{ \begin{array}{lcc}
0 & if & (b)_j = 1 \\
0|1 & if & (b)_j = 0
\end{array}
\right.\]
and the word
\[w \ud \left(0^dz\right)^{M{-}1}a_1z\left(0^dz\right)^{M{-}1}a_2z…\left(0^dz\right)^{M{-}1}a_Nz\left(0^dz\right)^{M{-}1} \enspace .\]
Note that, unlike the proof of Theorem~\ref{theorem:HomogeneousDotPlus}, this proof does not depend on the parity of the dimension of the OVP.
\end{proof}
\subsection{Complexity of Our Implementation}
In the following, we analyze the complexity of the implementation of Algorithm \AlgCountLines described in Section~\ref{sec:implementation} when the input regular expression is homogeneous of type ``\verb!·+!'', ``\verb!·*!'' or ``\verb!·|!''
As explained in Section~\ref{sec:implementation}, \tool{zearch} uses \tool{libfa}, which applies Thompson's algorithm~\cite{thompson1968programming} with on-the-fly $ε$-removal, to build an NFA for the input regular expression.
However, given a regular expression of size \(s\) we can decide in \(\mathcal{O}(s)\) time whether a expression is homogeneous of type ``\verb!·+!'', ``\verb!·*!'' or ``\verb!·|!'' and, as we show next, use a specialized algorithm for building a DFA with \(\mathcal{O}(s)\) states in \(\mathcal{O}(s)\) time for the given expression.
Therefore, \tool{zearch} admits a straightforward modification that allows it to search on grammar-compressed text with homogeneous regular expressions of type ``\verb!·+!'', ``\verb!·*!'' or ``\verb!·|!'' in \(\mathcal{O}(t \cdot s)\) time, where \(s\) is the size of the expression.
Next, we show how to build such DFAs from the given regular expressions.
First, observe that every homogeneous regular expression of type ``\verb!·+!'' of size \(s\) such that it contains no concatenation of the form ``\verb!a+a+!'' can be captured by a DFA with \(s{+}1\) states as we show next.
Let \(a_1,\ldots,a_n\) be the sequence of letters that appear in an homogeneous expression of type ``\verb!·+!''.
Then,
\[\mathcal{D} = \tuple{\{q_i \mid 0 \leq i \leq n\}, Σ, \{(q_i,a_i,q_i), (q_{i{-}1},a_{i},q_{i}) \mid 1 \leq i \leq n\}, \{q_1\}, \{q_{n}\}}\]
is a DFA for the given expression.
If the expression contains a concatenation of the form ``\tool{a+a+}'' then \(\mathcal{D}\) is no longer deterministic.
In that case, we can replace ``\tool{a+a+}'' by ``\tool{aa+}'' and, therefore, remove from \(\mathcal{D}\) the self-loop corresponding to the first \(a\).
It is straightforward to check that this change results in a deterministic automaton and it does not alter the generated language, hence it does not alter the result of the search.
Figure~\ref{fig:DFAc+} shows the DFA for an homogeneous regular expression of type ``\verb!·+!''.
\begin{figure}
\caption{DFA for the regular expression ``\tool{a+b+b+a+c+}
\label{fig:DFAc+}
\end{figure}
On the other hand, let \(a_1,\ldots,a_n\) be the sequence of letters that appear in an homogeneous expression of type ``\verb!·*!''.
For every \(a\), let \(j_a^k\) be the smallest index such that \(k \leq j_a^k \leq n \) and \(a = a_{j_a^k}\).
Then, the DFA obtained by making every state of \(\mathcal{D}\) final and adding the transitions \(\{(q_{i-1},a_k,q_{j_{a_k}^i}) \mid 1 \leq i \leq k \leq n\}\), is an automaton for the given expression.
Note that, if the expression contains a concatenation of the form ``\tool{a*a*}'', which will break the determinism of our automata, then we can safely replace it by ``\tool{a*}''.
Figure~\ref{fig:DFAc*} shows the DFA for an homogeneous regular expression of type ``\verb!·*!''.
\begin{figure}
\caption{DFA for the regular expression ``\tool{a*b*b*a*c*}
\label{fig:DFAc*}
\end{figure}
Finally, it is straightforward to build a DFA for an homogeneous regular expression of the type ``\verb!·|!'' with \(n{+}1\) states where \(n\) is the number of concatenations.
Figure~\ref{fig:DFAco} shows the DFA for an homogeneous regular expression of type ``\verb!·|!''.
\begin{figure}
\caption{DFA for the regular expression ``\tool{(a|b)(a|c)(b|c)(a|c)}
\label{fig:DFAco}
\end{figure}
It is worth to remark that the DFA of Figure~\ref{fig:DFAco} is the result of applying Thompson's construction on the input expression.
As a consequence, \tool{zearch} already builds a DFA when the input expression is homogeneous of type ``\verb!·|!'' and, therefore, it performs the search in \(\mathcal{O}(t \cdot s)\).
We conclude that \tool{zearch} admits a straightforward modification to exhibit \(\mathcal{O}(t\cdot s)\) time complexity when working on homogeneous regular expressions of types ``\verb!·+!'', ``\verb!·*!'' and ``\verb!·|!'' and, therefore, be nearly optimal for these classes of regular expressions.
{}
{}
\chapter{Building Residual Automata}
\label{chap:RFA}
As shown in Chapter~\ref{chap:prel}, residual automata (RFAs for short) are a class of automata that lies between deterministic (DFAs) and nondeterministic automata (NFAs).
They share with DFAs a significant property: the existence of a canonical minimal form for any regular language.
On the other hand, they share with NFAs the existence of automata that are exponentially smaller (in the number of states) than the corresponding minimal DFA for the language.
These properties make RFAs specially appealing in certain areas of computer science such as Grammatical Inference~\cite{denis2004learning,Kasprzik2011Inference}.
RFAs were first introduced by \citet{denis2000residual,denis2002residual} who defined an algorithm for \emph{residualizing} an automaton (see Section~\ref{sec:FSA}), showed that there exists a \emph{unique} \emph{canonical} RFA for every regular language and proved that the residual-equivalent of double-reversal method for DFAs~\cite{brzozowski1962canonical} holds for RFAs, i.e.\ residualizing an automaton \(\mathcal{N}\) whose reverse is residual yields the canonical RFA for \(\lang{\mathcal{N}}\).
Later, \citet{tamm2015generalization} generalized the double-reversal method for RFAs in the same lines as that of \citet{Brzozowski2014} for the double-reversal method for DFAs.
The similarities between the determinization and residualization (see Section~\ref{sec:FSA}) operations and between the double-reversal methods for DFAs and RFAs evidence the existence of a relationship between these two classes of automata.
However, the connection between them is not clear and, as a consequence, the relation between the generalization by \citet{Brzozowski2014} of the double-reversal method for DFAs and the one by \citet{tamm2015generalization} for RFAs is not immediate.
In this chapter, we show that \emph{quasiorders} are fundamental to RFAs as \emph{congruences} are for DFAs, which evidences the relation between these two classes of automata.
To do that, we define a framework of finite-state automata constructions based on \emph{quasiorders} over words.
As explained in Chapter~\ref{chap:related}, \citet{ganty2019congruence} studied the problem of building DFAs using congruences, i.e., equivalence relations over words with good properties w.r.t. concatenation, and derived several well-known results about minimization of DFAs, including the double-reversal method and its generalization by \citet{Brzozowski2014}.
While the use of congruences over words suited for the construction of a subclass of residual automata, namely, \emph{deterministic} automata, these are no longer useful to describe the more general class of \emph{nondeterministic} residual automata.
By moving from \emph{congruences} to \emph{quasiorders}, we are able to introduce nondeterminism in our automata constructions.
We consider quasiorders with good properties w.r.t. \emph{right} and \emph{left} concatenation.
In particular, we define the so-called right \emph{language-based} quasiorder, whose definition relies on a given regular language; and the right \emph{automata-based} quasiorder, whose definition relies on a finite representation of the language, i.e., an automaton.
We also give counterpart definitions for quasiorders that behave well with respect to \emph{left} concatenation.
When instantiating our automata constructions using the right language-based quasiorder, we obtain the canonical RFA for the given language; while using the right automata-based quasiorder yields an RFA for the language generated by the automaton that has, at most, as many states as the RFA obtained by the residualization operation defined by \citet{denis2002residual}.
Similarly, \emph{left} automata-based and language-based quasiorders yield co-residual automata, i.e., automata whose reverse is residual.
Our quasiorder-based framework allows us to give a simple correctness proof of the double-reversal method for building the canonical RFA.
Moreover, it allows us to generalize this method in the same fashion as \citet{Brzozowski2014} generalized the double-reversal method for building the minimal DFA.
Specifically, we give a characterization of the class of automata for which our automata-based quasiorder construction yields the canonical RFA.
We compare our characterization with the class of automata, defined by \citet{tamm2015generalization}, for which the residualization operation of \citet{denis2002residual} yields the canonical RFA and show that her class of automata is strictly contained in the class we define.
Furthermore, we highlight the connection between the generalization of \citet{Brzozowski2014} and the one of \citet{tamm2015generalization} for the double-reversal methods for DFAs and RFAs, respectively.
Finally, we revisit the problem of learning RFAs from a quasiorder-based perspective.
Specifically, we observe that the NL\(^*\) algorithm defined by \citet{bollig2009angluin}, inspired by the popular Angluin's L\(^*\) algorithm for learning DFAs~\cite{angluin1987learning}, can be seen as an algorithm that starts from a quasiorder and refines it at each iteration.
At the end of each iteration, the automaton built by NL\(^*\) coincides with our quasiorder-based automata construction applied to the refined quasiorder.
\section{Automata Constructions from Quasiorders}
\label{sec:automataConstructions}
In this chapter, we consider monotone quasiorders on \(\Sigma^*\) (and their corresponding closures) and we use them to define RFAs constructions for regular languages.
The following lemma gives a characterization of right and left quasiorders.
\begin{lemma}
\label{lemma:QObwComplete}
The following properties hold:
\begin{myEnumA}
\item \(\leqslant^{r}\) is a right quasiorder if{}f \(ρ_{\leqslant^{r}}(u)\, v \subseteq ρ_{\leqslant^{r}}(uv)\), for all \(u,v \in \Sigma^*\).
\item\(\leqslant^{\ell}\) is a left quasiorder if{}f \(v\, ρ_{\leqslant^{\ell}}(u) \subseteq ρ_{\leqslant^{\ell}}(vu)\), for all \(u,v \in \Sigma^*\).
\end{myEnumA}
\end{lemma}
\begin{proof}
\begin{myEnumA}
\item To simplify the notation, we denote \(ρ_{\leqslant^{r}}\), the closure induced by \(\leqslant^{r}\), by \(ρ\).
\begin{myEnumA}
\item[(\(\Rightarrow\))]
Let \(x \in ρ(v)u\), i.e. \(x = \tilde{v}u\) with \(v \leqslant^{r} \tilde{v}\).
Since \(\leqslant^{r}\) is a right quasiorder and \(v \leqslant^{r} \tilde{v}\) then \(vu \leqslant^{r} \tilde{v}u\).
Therefore \(x \in ρ(vu)\).
\item[(\(\Leftarrow\))]
Assume that for each \(u,v \in \Sigma^*\) and \(\tilde{v} \in ρ(v)\) we have that \(\tilde{v}u \in ρ(vu)\).
Then, \(v \leqslant^{r} \tilde{v} \Rightarrow vu \leqslant^{r} \tilde{v}u\).
\end{myEnumA}
\item To simplify the notation we denote \(ρ_{\leqslant^{\ell}}\), the closure induced by \(\leqslant^{\ell}\), by \(ρ\).
\begin{myEnumA}
\item[(\(\Rightarrow\))]
Let \(x \in uρ(v)\), i.e. \(x = u\tilde{v}\) with \(v \leqslant^{\ell} \tilde{v}\).
Since \(\leqslant^{\ell}\) is a left quasiorder and \(v \leqslant^{\ell} \tilde{v}\) then \(uv \leqslant^{\ell} u\tilde{v}\).
Therefore \(x \in ρ(uv)\).
\item[(\(\Leftarrow\))]
Assume that for each \(u,v \in \Sigma^*\) and \(\tilde{v} \in ρ(v)\) we have that \(u\tilde{v} \in ρ(uv)\).
Then \(v \leqslant^{\ell} \tilde{v} \Rightarrow uv \leqslant^{\ell} u\tilde{v}\).
\end{myEnumA}
\end{myEnumA}
\end{proof}
Given a regular language \(L\), we are interested in left and right \(L\)-consistent quasiorders.
We use the principals of these quasiorders as states of automata constructions that yield RFAs and co-RFAs generating the language \(L\).
Therefore, in the sequel, we only consider quasiorders that induce a finite number of principals, i.e., quasiorders \(\leqslant\) such that the equivalence \(\mathord{\sim} \ud \mathord{\leqslant} \cap (\mathord{\leqslant})^{-1}\) has finite index.
Next, we introduce the notion of \emph{\(L\)-composite principals} which, intuitively, correspond to states of our automata constructions that can be removed without altering the generated language.
\begin{definition}[\(L\)-Composite Principal]
\label{def:CompositeClosed}
Let \(L\) be a regular language and let \(\leqslant^{r}\) (resp. \(\leqslant^{\ell}\)) be a right (resp.\ left) quasiorder on \(Σ^*\).
Given \(u \in \Sigma^*\), the principal \(ρ_{\leqslant^{r}}(u)\) (resp. \(ρ_{\leqslant^{\ell}}(u)\)) is \(L\)-\emph{composite} if{}f
\begin{align*}
u^{-1}L & = \hspace{-10pt}\bigcup_{x\in\Sigma^*,\; x \leqslant^{r}n u }\hspace{-10pt} x^{-1}L &
\text{\emph{(}resp. }Lu^{-1} & = \hspace{-10pt}\bigcup_{x\in\Sigma^*,\; x \leqslant^{\ell}n u }\hspace{-10pt} Lx^{-1}\text{\emph{)}}
\end{align*}
If \(ρ_{\leqslant^{r}}(u)\) (resp. \(ρ_{\leqslant^{\ell}}(u)\)) is not \(L\)-composite then it is \emph{\(L\)-prime}.\eod
\end{definition}
We sometimes use the terms \emph{composite} and \emph{prime principal} when the language \(L\) is clear from the context.
Observe that, if \(ρ_{\leqslant^{r}}(u)\) is \(L\)-composite, for some \(u \in \Sigma^*\), then so is \(ρ_{\leqslant^{r}}(v)\), for every \(v \in \Sigma^*\) such that \(u \sim^{r} v\).
The same holds for a left quasiorder \(\leqslant^{\ell}\).
Given a regular language \(L\) and a right \(L\)-consistent quasiorder \(\leqslant^{r}\), the following automata construction yields an RFA that generates exactly \(L\).
\begin{definition}[Automata construction \(\mathsf{H}^{r}(\leqslant^{r}, L)\)]
\label{def:right-const:qo}
Let \(\leqslant^{r}\) be a right quasiorder and let \(L \subseteq \Sigma^*\) be a language.
Define the automaton \(\mathsf{H}^{r}(\leqslant^{r}, L) \ud \tuple{Q, \Sigma, \delta, I, F}\) where \(Q = \{ρ_{\leqslant^{r}}(u) \mid u \in Σ^*, \; ρ_{\leqslant^{r}}(u) \text{ is \(L\)-prime}\}\), \(I = \{ρ_{\leqslant^{r}}(u) \in Q \mid \varepsilon \in ρ_{\leqslant^{r}}(u)\}\), \(F = \{ρ_{\leqslant^{r}}(u) \in Q \mid u \in L\}\) and \( \delta(ρ_{\leqslant^{r}}(u), a) = \{ ρ_{\leqslant^{r}}(v) \in Q \mid ρ_{\leqslant^{r}}(u) \cdot a \subseteq ρ_{\leqslant^{r}}(v)\}\) for all \(ρ_{\leqslant^{r}}(u) \in Q, a \in Σ\).
\rule{0.5em}{0.5em}
\end{definition}
\begin{lemma}\label{lemma: HrGeneratesL}
Let \(L\subseteq \Sigma^*\) be a regular language and let \(\leqslant^{r}\) be a right \(L\)-consistent quasiorder.
Then, \(\mathsf{H}^r(\leqslant^{r},L)\) is an RFA such that \(\lang{\mathsf{H}^r(\leqslant^{r},L)} = L\).
\end{lemma}
\begin{proof}
To simplify the notation, we denote \(ρ_{\leqslant^{r}}\), the closure induced by the quasiorder \(\leqslant^{r}\), simply by \(ρ\).
Let \(\mathcal{H} = \mathsf{H}^{r}(\leqslant^{r}, L) = \tuple{Q, \Sigma, \delta, I, F}\).
We first show that \(\mathcal{H}\) is an RFA, i.e.
\begin{equation}\label{eq:right-langsUco}
W_{ρ(u), F}^{\mathcal{H}} = u^{-1}L, \quad \text{for each } ρ(u)\in Q \enspace .
\end{equation}
Let us prove that \(w \in u^{-1}L \Rightarrow w \in W_{ρ(u), F}^{\mathcal{H}}\).
We proceed by induction on the length of \(w\).
\begin{myItem}
\item \emph{Base case:}
Assume \(w = \varepsilon\).
Then,
\[\varepsilon \in u^{-1}L \Rightarrow u \in L \Rightarrow ρ(u) \in F \Rightarrow \varepsilon \in W_{ρ(u), F}^{\mathcal{H}}\enspace .\]
\item \emph{Inductive step:}
Assume that the hypothesis holds for each word \(x \in \Sigma^*\) with \(\len{x} \leq n\), where \(n \geq 1\), and let \(w \in \Sigma^*\) be such that \(\len{w} = n{+}1\).
Then \(w = a x\) with \(\len{x} = n\) and \(a \in Σ\).
\begin{adjustwidth}{-0.8cm}{}
\begin{myAlign}{0pt}{}
ax \in u^{-1} L & \Rightarrow \quad \text{[By definition of quotient]} \\
x \in (ua)^{-1}L & \Rightarrow \; \\
\hspace{-5pt}\span \text{[By Def.~\ref{def:CompositeClosed}, \(ρ(ua)\) is \(L\)-prime (so \(z \ud ua\)) or \((ua)^{-1}L = \hspace{-5pt}\bigcup_{x_i \leqslant^{r}n ua}\hspace{-5pt}x_i^{-1}L\) (so \(z \ud x_i\))]}\\
\exists ρ(z) \in Q, \; x \in z^{-1}L \land ρ(ua) \subseteq ρ(z) & \Rightarrow \quad \text{[By I.H., Lemma~\ref{lemma:QObwComplete} and Def.~\ref{def:right-const:qo}]} \\
x \in W_{ρ(z), F}^{\mathcal{H}} \land ρ(z) \in δ(ρ(u), a) & \Rightarrow \quad \text{[By definition of \(W_{S,T}\)]}\\
ax \in W_{ρ(u), F}^{\mathcal{H}} \enspace .
\end{myAlign}
\end{adjustwidth}
\end{myItem}
We now prove the other side of the implication, \(w \in W_{ρ(u), F}^{\mathcal{H}} \Rightarrow w \in u^{-1}L\).
\begin{myItem}
\item \emph{Base case:}
Let \(w = \varepsilon\).
By Definition~\ref{def:right-const:qo},
\[\varepsilon \in W_{ρ(u), F}^{\mathcal{H}} \Rightarrow \exists ρ(x) \in Q, \; x \in L \land ρ(u) \varepsilon \subseteq ρ(x) \enspace .\]
Since \(ρ(L) = L\), we have that \(u\,\varepsilon \in L \), hence \(\varepsilon \in u^{-1}L\).
\item \emph{Inductive step:}
Assume the hypothesis holds for each \(x \in \Sigma^*\) with \(\len{x} \leq n\), where \(n \geq 1\), and let \(w \in \Sigma^*\) be such that \(\len{w} = n{+}1\).
Then \(w = a x\) with \(\len{x} = n\) and \(a \in Σ\).
\begin{align*}
ax \in W_{ρ(u), F}^{\mathcal{H}} & \Rightarrow \quad \text{[By Definition~\ref{def:right-const:qo}]} \\
x \in W_{ρ(y), F}^{\mathcal{H}} \land ρ(u) a \subseteq ρ(y) & \Rightarrow \quad \text{[By I.H. and since \(ρ\) is induced by \(\leqslant^{r}\)]} \\
x \in y^{-1}L \land y \leqslant^{r} ua & \Rightarrow \quad \text{[By \citet{deLuca1994}]} \\
x \in y^{-1}L \land y^{-1}L \subseteq (ua)^{-1}L & \Rightarrow \quad \text{[Since \(x \in (ua)^{-1} L \Rightarrow ax \in u^{-1}L\)]} \\
ax \in u^{-1}L \enspace .
\end{align*}
\end{myItem}
We have shown that \(\mathcal{H}\) is an RFA.
Finally, we show that \(\lang{\mathcal{H}} = L\).
First note that
\[\lang{\mathcal{H}} = \bigcup_{ρ(u) \in I} W_{ρ(u), F}^{\mathcal{H}} = \bigcup_{ρ(u) \in I} u^{-1}L \enspace ,\]
where the first equality holds by definition of \(\lang{\mathcal{H}}\) and the second by Equation~\eqref{eq:right-langsUco}.
On one hand, we have that \(\bigcup_{ρ(u) \in I} u^{-1}L \subseteq L\) since, by Definition~\ref{def:right-const:qo}, \(\varepsilon \in ρ(u)\) for each \(ρ(u) \in I\), hence \(u \leqslant^{r} \varepsilon\) which, as shown by \citet{deLuca1994}, implies that \(u^{-1}L \subseteq \varepsilon^{-1}L = L\).
Let us now show that \(L \subseteq \bigcup_{ρ(u) \in I} u^{-1}L\).
First, let us assume that \(ρ(\varepsilon) \in I\).
Then,
\[L = \varepsilon^{-1}L \subseteq \bigcup_{ρ(u) \in I} u^{-1}L \enspace .\]
Now suppose that \(ρ(\varepsilon)\notin I\), i.e. \(ρ(\varepsilon)\) is \(L\)-composite.
Then,
\[ L = \varepsilon^{-1}L = \bigcup_{u \leqslant^{r}n \varepsilon} u^{-1}L = \bigcup_{ρ(u) \in I} u^{-1}L \enspace .\]
where the last equality follows from \(ρ(u) \in I \Leftrightarrow \varepsilon \in ρ(u)\).
\end{proof}
Given a regular language \(L\) and a left \(L\)-consistent quasiorder \(\leqslant^{\ell}\), we can give a similar automata construction of a co-RFA that recognizes exactly \(L\)
\begin{definition}[Automata construction \(\mathsf{H}^{\ell}(\leqslant^{\ell}, L)\)]
\label{def:left-const:qo}
Let \(\leqslant^{\ell}\) be a left quasiorder and let \(L \subseteq \Sigma^*\) be a language.
Define the automaton \(\mathsf{H}^{\ell}(\leqslant^{\ell}, L)\ud \tuple{Q, \Sigma, \delta, I, F}\) where \(Q = \{ρ_{\leqslant^{\ell}}(u) \mid u \in Σ^*, \; ρ_{\leqslant^{\ell}}(u) \text{ is \(L\)-prime}\}\), \(I = \{ ρ_{\leqslant^{\ell}}(u) \in Q \mid u \in L \}\), \(F = \{ρ_{\leqslant^{\ell}}(u) \in Q \mid \varepsilon \in ρ_{\leqslant^{\ell}}(u)\}\), and \(\delta(ρ_{\leqslant^{\ell}}(u), a) = \{ρ_{\leqslant^{\ell}}(v)\in Q \mid a\cdot ρ_{\leqslant^{\ell}}(v) \subseteq ρ_{\leqslant^{\ell}}(u)\}\) for all \(ρ_{\leqslant^{\ell}}(u) \in Q, a \in \Sigma\).
\rule{0.5em}{0.5em}
\end{definition}
\begin{lemma}
\label{lemma:HlgeneratesL}
Let \(L\subseteq \Sigma^*\) be a language and let \(\leqslant^{\ell}\) be a left \(L\)-consistent quasiorder.
Then \(\mathsf{H}^{\ell}(\leqslant^{\ell},L)\) is a co-RFA such that \(\lang{\mathsf{H}^{\ell}(\leqslant^{\ell},L)} = L\).
\end{lemma}
\begin{proof}
To simplify the notation we denote \(ρ_{\leqslant^{\ell}}\), the closure induced by the quasiorder \(\leqslant^{\ell}\), simply by \(ρ\).
Let \(\mathcal{H} = \mathsf{H}^{\ell}(\leqslant^{\ell}, L) = \tuple{Q, \Sigma, \delta, I, F}\).
We first show that \(\mathcal{H}\) is a co-RFA.
\begin{equation}\label{eq:left-langsUco}
W_{I, ρ(u)}^{\mathcal{H}} = Lu^{-1}, \quad \text{for each } ρ(u)\in Q \enspace .
\end{equation}
Let us prove that \(w \in Lu^{-1} \Rightarrow w \in W_{I, ρ(u)}^{\mathcal{H}}\).
We proceed by induction.
\begin{myItem}
\item \emph{Base case:}
Let \(w = \varepsilon\).
Then
\[\varepsilon \in Lu^{-1} \Rightarrow u \in L \Rightarrow ρ(u) \in I \Rightarrow \varepsilon \in W_{I, ρ(u)}^{\mathcal{H}}\enspace .\]
\item \emph{Inductive step:}
Assume the hypothesis holds for all \(x \in \Sigma^*\) with \(\len{x} \leq n\), where \(n \geq 1\), and let \(w \in \Sigma^*\) be such that \(\len{w} = n{+}1\).
Then \(w = x a\) with \(\len{x} = n\) and \(a \in Σ\).
\begin{adjustwidth}{-0.8cm}{}
\begin{myAlign}{0pt}{}
xa \in Lu^{-1} & \Rightarrow \quad \text{[By definition of quotient]} \\
x \in L(au)^{-1} & \Rightarrow \; \\
\hspace{-5pt}\span \text{[By Def.~\ref{def:CompositeClosed}, \(ρ(ua)\) is \(L\)-prime (so \(z \ud au\)) or \(L(au)^{-1} = \hspace{-5pt}\bigcup_{x_i \leqslant^{\ell}n au}\hspace{-5pt}Lx_i^{-1}\) (so \(z \ud x_i\))]}\\
\exists ρ(z) \in Q, \; x \in L z^{-1} \land ρ(au) \subseteq ρ(z) & \Rightarrow \quad \text{[By I.H., Lemma~\ref{lemma:QObwComplete} and Def.~\ref{def:left-const:qo}]} \\
x \in W_{I, ρ(z)}^{\mathcal{H}} \land ρ(u) \in δ(ρ(z), a) & \Rightarrow \quad \text{[By definition of \(W_{S,T}\)]}\\
xa \in W_{I, ρ(u)}^{\mathcal{H}} \enspace .
\end{myAlign}
\end{adjustwidth}
\end{myItem}
We now prove the other side of the implication, \(w \in W_{I, ρ(u)}^{\mathcal{H}} \Rightarrow w \in Lu^{-1}\).
\begin{myItem}
\item \emph{Base case:}
Let \(w = \varepsilon\).
Then
\[\varepsilon \in W_{I, ρ(u)}^{\mathcal{H}} \Rightarrow \exists ρ(x) \in Q,\; x \in L \land \varepsilon ρ(u) \subseteq ρ(x)\enspace . \]
Since \(ρ(L) = L\), we have that \( \varepsilon u \in L\), hence \(\varepsilon \in Lu^{-1}\).
\item \emph{Inductive step:}
Assume the hypothesis holds for each \(x \in \Sigma^*\) with \(\len{x} \leq n\), where \(n \geq 1\), and let \(w \in \Sigma^*\) be such that \(\len{w} = n{+}1\).
Then \(w = x\cdot a\) with \(\len{x} = n\) and \(a \in Σ\).
\begin{align*}
xa \in W_{I, ρ(u)}^{\mathcal{H}} & \Rightarrow \quad \text{[By Definition~\ref{def:left-const:qo}]} \\
a\cdot ρ(u) \subseteq ρ(y) \land x \in W_{I, ρ(y)}^{\mathcal{H}} & \Rightarrow \quad \text{[By I.H. and since \(ρ\) is induced by \(\leqslant^{\ell}\)]} \\
y \leqslant^{\ell} au \land x \in Ly^{-1} & \Rightarrow \quad \text{[By \citet{deLuca1994}]} \\
Ly^{-1} \subseteq L(au)^{-1} \land x \in Ly^{-1} & \Rightarrow \quad \text{[Since \(x \in L(au)^{-1} \Rightarrow xa \in Lu^{-1}\)]} \\
xa \in u^{-1}L \enspace .
\end{align*}
\end{myItem}
We have shown that \(\mathcal{H}\) is a co-RFA.
Finally, we show that \(\lang{\mathcal{H}} = L\).
First note that
\[\lang{\mathcal{H}} = \bigcup_{ρ(u) \in F} W_{I, ρ(u)}^{\mathcal{H}} = \bigcup_{ρ(u) \in F} Lu^{-1} \enspace ,\]
where the first equality holds by definition of \(\lang{\mathcal{H}}\) and the second by Equation~\eqref{eq:left-langsUco}.
On one hand, we have that \(\bigcup_{ρ(u) \in F} Lu^{-1} \subseteq L\) since, by Definition~\ref{def:left-const:qo}, \(\varepsilon \in ρ(u)\) for each \(ρ(u) \in F\), hence \(u \leqslant^{\ell} \varepsilon\) which, as shown by \citet{deLuca1994}, implies that \(Lu^{-1} \subseteq L\varepsilon^{-1} = L\).
Let us now show that \(L \subseteq \bigcup_{ρ(u) \in F} Lu^{-1}\).
First, let us assume that \(ρ(\varepsilon) \in F\).
Then,
\[L = L\varepsilon^{-1} \subseteq \bigcup_{ρ(u) \in F} Lu^{-1} \enspace .\]
Now suppose that \(ρ(\varepsilon)\notin F\), i.e. \(ρ(\varepsilon)\) is \(L\)-composite.
Then,
\[ L = L\varepsilon^{-1} = \bigcup_{u \leqslant^{\ell}n \varepsilon} Lu^{-1} = \bigcup_{ρ(u) \in F} u^{-1}L\enspace .\]
where the last equality follows from \(ρ(u) \in F \Leftrightarrow \varepsilon \in ρ(u)\).
\end{proof}
Observe that the automaton \(\mathcal{H}^r = \mathsf{H}^{r}(\leqslant^{r}, L)\) (resp. \(\mathcal{H}^{\ell} = \mathsf{H}^{\ell}(\leqslant^{\ell}, L)\)) is \emph{finite}, since we assume \(\leqslant^{r}\) (resp. \(\leqslant^{\ell}\)) induces a finite number of principals.
Note also that \(\mathcal{H}^{r}\) (resp. \(\mathcal{H}^{\ell}\)) possibly contains empty (resp.\ unreachable) states but no state is unreachable (resp.\ empty).
Moreover, notice that by keeping all principals of \(\leqslant^{r}\) (resp. \(\leqslant^{\ell}\)) as states, instead of only the \(L\)-prime ones as in Definition~\ref{def:right-const:qo} (resp. Definition~\ref{def:left-const:qo}), we would obtain an RFA (resp.\ a co-RFA) with (possibly) more states that also recognizes \(L\).
Finally, Lemma~\ref{lemma:leftRightReverse} shows that \(\mathcal{H}^{\ell}\) and \(\mathcal{H}^r\) inherit the left-right duality between \(\leqslant^{\ell}\) and \(\leqslant^{r}\) through the reverse operation.
\begin{lemma}\label{lemma:leftRightReverse}
Let \(\leqslant^{r}\) and \(\leqslant^{\ell}\) be a right and a left quasiorder, respectively, and let \(L \subseteq \Sigma^*\) be a language.
If the following property holds
\begin{equation}
\label{eq:leftRightReverse}
u \leqslant^{r} v \Leftrightarrow u^R \leqslant^{\ell} v^R
\end{equation}
then \(\mathsf{H}^{r}(\leqslant^{r}, L) \) is isomorphic to \( \left(\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R)\right)^R\).
\end{lemma}
\begin{proof}
Let \(\mathsf{H}^{r}(\leqslant^{r}, L) = \tuple{Q, \Sigma, \delta, I, F}\) and \((\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R))^R = \tuple{\widetilde{Q}, \Sigma, \widetilde{\delta}, \widetilde{I}, \widetilde{F}}\).
We will show that \(\mathsf{H}^{r}(\leqslant^{r}, L)\) is isomorphic to \((\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R))^R\).
Let \(\varphi: Q \rightarrow \widetilde{Q}\) be a mapping assigning to each state \(ρ_{\leqslant^{r}}(u) \in Q\) with \(u \in \Sigma^*\), the state \(ρ_{\leqslant^{\ell}}(u^R) \in \widetilde{Q}\).
Next, we show that \(\varphi\) is an NFA isomorphism between \(\mathsf{H}^{r}(\leqslant^{r}, L)\) and \((\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R))^R\).
Observe that:
\begin{align*}
u^{-1}L = \bigcup_{x \leqslant^{r}n u}x^{-1}L & \Leftrightarrow \quad\text{[Since \(\left(\bigcup S_i\right)^R = \bigcup S_i^R\)]} \\
(u^{-1}L)^R = \bigcup_{x \leqslant^{r}n u}(x^{-1}L)^R & \Leftrightarrow \quad\text{[Since \((u^{-1}L)^R = L^R(u^R)^{-1} \)]}\\
L^R(u^R)^{-1} = \bigcup_{x \leqslant^{r}n u} L^R(x^R)^{-1} & \Leftrightarrow \quad\text{[By Equation~\eqref{eq:leftRightReverse}]}\\
L^R(u^R)^{-1} = \bigcup_{x^R \leqslant^{\ell}n u^R} L^R(x^R)^{-1} & \enspace .
\end{align*}
Therefore \(ρ_{\leqslant^{r}}(u)\) is \(L\)-composite if{}f \(ρ_{\leqslant^{\ell}}(u^R)\) is \(L^R\)-composite, hence \(\varphi(Q) = \widetilde{Q}\).
Since
\[\varepsilon \in ρ_{\leqslant^{r}}(u) \Leftrightarrow u \leqslant^{r} \varepsilon \Leftrightarrow u^r \leqslant^{\ell} \varepsilon \Leftrightarrow \varepsilon \in ρ_{\leqslant^{\ell}}(u^R) \enspace ,\]
we have that \(ρ_{\leqslant^{r}}(u)\) is an initial
state of \(\mathsf{H}^{r}(\leqslant^{r}, L)\) if{}f \(ρ_{\leqslant^{\ell}}(u^R)\) is a
final state of \(\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R)\), i.e.\ an initial state of \((\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R))^R\).
Therefore, \(\varphi(I) = \widetilde{I}\).
Since
\[ρ_{\leqslant^{r}}(u) \subseteq L \Leftrightarrow u \in L \Leftrightarrow u^r \in L^R \enspace ,\]
we have that \(ρ_{\leqslant^{r}}(u)\) is a final state of \(\mathsf{H}^{r}(\leqslant^{r}, L)\) if{}f \(ρ_{\leqslant^{\ell}}(u^R)\) is an initial state of \(\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R)\), i.e. a final state of \((\mathsf{H}^{\ell}(\leqslant^{\ell}, L^R))^R\).
Therefore, \(\varphi(F) = \widetilde{F}\).
It remains to show that \(q' \in \delta(q, a)\Leftrightarrow \varphi(q') \in \widetilde{\delta}(\varphi(q),a)\), for all \(q, q' \in Q\) and \(a \in \Sigma\).
Assume that \(q = ρ_{\leqslant^{r}}(u)\) for some \(u \in \Sigma^*\), \(q' = ρ_{\leqslant^{r}}(v)\) for some \(v \in Σ^*\) and \(q' \in \delta(q, a)\) with \(a \in \Sigma\).
Then,
\begin{myAlignEP}
ρ_{\leqslant^{r}}(v) \in δ(ρ_{\leqslant^{r}}(u), a) & \Leftrightarrow \quad \text{[By Definition~\ref{def:right-const:qo}]} \\
ρ_{\leqslant^{r}}(u)a \subseteq ρ_{\leqslant^{r}}(v) & \Leftrightarrow \quad \text{[By definition of \(ρ_{\leqslant^{r}}\) and Lemma~\ref{lemma:QObwComplete}]} \\
v \leqslant^{r} ua & \Leftrightarrow \quad \text{[By Equation~\eqref{eq:leftRightReverse} and \((ua)^R = au^R\)]} \\
v^r \leqslant^{\ell} au^R & \Leftrightarrow \quad \text{[By definition of \(ρ_{\leqslant^{\ell}}\) and Lemma~\ref{lemma:QObwComplete}]} \\
aρ_{\leqslant^{\ell}}(u^R) \subseteq ρ_{\leqslant^{\ell}}(v^R) & \Leftrightarrow \quad \text{[By Definition~\ref{def:left-const:qo}]} \\
ρ_{\leqslant^{\ell}}(v^R) \in \widetilde{δ}(ρ_{\leqslant^{\ell}}(u^R), a) & \Leftrightarrow \quad \text{[Definition of \(q, q'\) and \(\varphi\)]} \\
\varphi(q') \in \widetilde{δ}(\varphi(q), a) \enspace .\tag*{\qedhere}
\end{myAlignEP}
\end{proof}
\subsection{On the Size of \texorpdfstring{\(\mathsf{H}^{r}(\leqslant^{r},L)\)}{Hr} and \texorpdfstring{\(\mathsf{H}^{\ell}(\leqslant^{\ell},L)\)}{Hl}}
We conclude this section with a note on the sizes of the automata constructions \(\mathsf{H}^{r}(\leqslant^{r},L)\) and \(\mathsf{H}^{\ell}(\leqslant^{\ell},L)\) when applied to quasiorders satisfying \(\mathord{\leqslant^{r}_1} \subseteq \mathord{\leqslant^{r}_2}\) and \(\mathord{\leqslant^{\ell}_1} \subseteq \mathord{\leqslant^{\ell}_2}\), respectively.
The following result establishes a relationship between the \(L\)-composite principals for two comparable right quasiorders \(\mathord{\leqslant^{r}_1} \subseteq \mathord{\leqslant^{r}_2}\).
This result is used in Theorem~\ref{theorem:numLPrimePrincipals} to show that the number of \(L\)-prime principals induced by \(\leqslant^{r}_1\) is greater or equal than the number of \(L\)-prime principals induced by \(\leqslant^{r}_2\).
As a consequence, if \(\mathord{\leqslant^{r}_1} \subseteq \mathord{\leqslant^{r}_2}\) then the automaton \(\mathsf{H}^{r}(\leqslant^{r}_1,L)\) has, at least, as many states as \(\mathsf{H}^{r}(\leqslant^{r}_2,L)\).
The same holds for left quasiorders and \(\mathsf{H}^{\ell}\).
\begin{lemma}\label{lemma:numprincipals}
Let \(L \subseteq Σ^*\) be a regular language and let \(u \in Σ^*\).
Let \(\leqslant^{r}_1\) and \(\leqslant^{r}_2\) be two \(L\)-consistent right quasiorders such that \(\mathord{\leqslant^{r}_1} \subseteq \mathord{\leqslant^{r}_2}\).
Then
\[ρ_{\leqslant^{r}_1}(u) \text{ is \(L\)-composite} \Rightarrow \left( ρ_{\leqslant^{r}_2}(u) \text{ is \(L\)-composite} \lor \exists x \leqslant^{r}n_1 u, \; ρ_{\leqslant^{r}_2}(u) = ρ_{\leqslant^{r}_2}(x)\right)\enspace .\]
Similarly holds for left quasiorders.
\end{lemma}
\begin{proof}
Let \(u \in Σ^*\) be such that \(ρ_{\leqslant^{r}_1}(u)\) is \(L\)-composite.
Then we have that \(u^{-1}L = \bigcup_{x \in Σ^*, x \leqslant^{r}n_1 u} x^{-1}L\).
On the other hand, since \(\leqslant^{r}_2\) is a right \(L\)-consistent quasiorder, we have that \(\mathord{\leqslant^{r}_2} \subseteq \mathord{\leqslant^{r}L}\), as shown by \citet{deLuca1994}.
Therefore \(u^{-1}L \supseteq \bigcup_{x \in Σ^*, x \leqslant^{r}n_2 u} x^{-1}L\).
There are now two possibilities:
\begin{myItem}
\item For all \(x \in Σ^*\) such that \(x \leqslant^{r}n_1 u\) we have that \(x \leqslant^{r}n_2 u\).
In that case we have that \(u^{-1}L = \bigcup_{x\inΣ^*, \; x \leqslant^{r}n_2 u} x^{-1}L\), hence \(ρ_{\leqslant^{r}_2}(u)\) is \(L\)-composite.
\item There exists \(x \in Σ^*\) such that \(x \leqslant^{r}n_1 u\), hence \(x \leqslant^{r}_2 u\), but \(x \not\leqslant^{r}n_2u\).
In that case, it follows that \(ρ_{\leqslant^{r}_2}(x) = ρ_{\leqslant^{r}_2}(u)\).
\end{myItem}
The proof for left quasiorders is symmetric.
\end{proof}
\begin{theorem}\label{theorem:numLPrimePrincipals}
Let \(L\subseteq Σ^*\) and let \(\leqslant_1\) and \(\leqslant_2\) be two right or two left \(L\)-consistent quasiorders such that \(\mathord{\leqslant_1} \subseteq \mathord{\leqslant_2}\).
Then
\[\len{\{ρ_{\leqslant_1}(u) \mid u \in Σ^* \land ρ_{\leqslant_1}(u) \text{ is \(L\)-prime}\}} \geq \len{\{ρ_{\leqslant_2}(u) \mid u \in Σ^* \land ρ_{\leqslant_2}(u) \text{ is \(L\)-prime}\}}\]
\end{theorem}
\begin{proof}
We proceed by showing that for every \(L\)-prime \(ρ_{\leqslant_2}(u)\) there exists an \(L\)-prime \(ρ_{\leqslant_1}(x)\) such that \(ρ_{\leqslant_2}(x) = ρ_{\leqslant_2}(u)\).
Clearly, this entails that there are, at least, as many \(L\)-prime principals for \(\leqslant_1\) as there are for \(\leqslant_2\).
Let $ρ_{\leqslant_2}(u)$ be $L$-prime.
If \(ρ_{\leqslant_1}(u)\) is \(L\)-prime, we are done.
Otherwise, by Lemma~\ref{lemma:numprincipals}, we have that there exists $x \leqslantn_1 u$ such that
$ρ_{\leqslant_2}(u) = ρ_{\leqslant_2}(x)$.
We repeat the reasoning with $x$. If $ρ_{\leqslant_1}(x)$ is $L$-prime, we are done. Otherwise, there exists $x_1 \leqslantn_1 x$ such that
$ρ_{\leqslant_2}(u) = ρ_{\leqslant_2}(x) = ρ_{\leqslant_2}(x_1)$.
Since \(\leqslant_1\) induces finitely many principals, there are no infinite strictly descending chains and, therefore, there exists \(x_n\) such that \(ρ_{\leqslant_2}(u)=ρ_{\leqslant_2}(x)=ρ_{\leqslant_2}(x_1)=\ldots = ρ_{\leqslant_2}(x_n)\) and \(ρ_{\leqslant_1}(x_n)\) is \(L\)-prime.
\end{proof}
\section{Language-based Quasiorders and their Approximation using NFAs}
\label{sec:Instantiation}
In this section we instantiate our automata constructions using two classes of quasiorders, namely, the so-called \emph{Nerode's} quasiorders~\cite{deLuca1994}, whose definition is based on a given regular language; and the \emph{automata-based} quasiorders, whose definition is based on a finite representation of the language, i.e, an automaton.
Both quasiorders have been used previously in Chapter~\ref{chap:LangInc} in order to derive algorithms for solving the language inclusion problem between regular languages.
We recall their definitions next:
\begin{align}
u \leqslant^{r}L v & \udiff u^{-1}L \subseteq v^{-1}L & \quad \text{\emph{Right-}language-based Quasiorder}\label{eq:Rlanguage} \\
u \leqslant^{\ell}L v & \udiff Lu^{-1} \subseteq Lv^{-1} & \quad \text{\emph{Left-}language-based Quasiorder}
\label{eq:Llanguage} \\
u \leqslant^{r}N v & \udiff \post^{\mathcal{N}}_u(I) \subseteq \post^{\mathcal{N}}_v(I) & \quad \text{\emph{Right-}Automata-based Quasiorder}\label{eq:RState} \\
u \leqslant^{\ell}N v & \udiff Pe^{\mathcal{N}}_u(F) \subseteq Pe^{\mathcal{N}}_v(F) & \quad \text{\emph{Left-}Automata-based Quasiorder} \label{eq:LState}
\end{align}
As explained in Chapter~\ref{chap:LangInc}, \citet{deluca2011} showed that for every regular language \(L\) there exists a finite number of quotients \(u^{-1}L\) and, therefore, \(\leqslant^{r}L\) and \(\leqslant^{\ell}L\) are well-quasiorders.
On the other hand, the automata-based quasiorders are also well-quasiorders.
Therefore, all the quasiorders defined above induce a finite number of principals.
\begin{remark} \label{remark:LRDual}
The pairs of quasiorders \(\leqslant^{r}L\)~-~\(\leqslant^{\ell}L\) and \(\leqslant^{r}N\)~-~\(\leqslant^{\ell}N\) are dual, i.e.
\[u \leqslant^{r}L v \Leftrightarrow u^R \leqslant^{\ell}L v^R \quad \text{and} \quad u \leqslant^{r}N v \Leftrightarrow u^R \leqslant^{\ell}N v^R\enspace .\]
\end{remark}
The following result shows that the principals of \(\leqslant^{r}N\) and \(\leqslant^{\ell}N\) can be described, respectively, as intersections of left and right languages of the states of \(\mathcal{N}\) while the principals of \(\leqslant^{r}L\) and \(\leqslant^{\ell}L\) can be described as intersections of left and right quotients of \(L\).
\begin{lemma}\label{lemma:positive_atoms}
Let \(\mathcal{N} = \tuple{Q,Σ,δ,I,F}\) be an NFA with \(\lang{\mathcal{N}}=L\).
Then, for every \(u \in \Sigma^*\),
\begin{align*}
ρ_{\leqslant^{r}N}(u) &= \bigcap\textstyle{_{q \in \post_u^{\mathcal{N}}(I)}} W_{I,q}^{\mathcal{N}} & ρ_{\leqslant^{r}L}(u) &= \bigcap\textstyle{_{w \in \Sigma^*, \; w \in u^{-1}L}} Lw^{-1} \\
ρ_{\leqslant^{\ell}N}(u) & = \bigcap\textstyle{_{q \in Pe_u^{\mathcal{N}}(I) }} W_{q,F}^{\mathcal{N}} & ρ_{\leqslant^{\ell}L}(u) &= \bigcap\textstyle{_{w \in \Sigma^*, \; w \in Lu^{-1}L }} w^{-1}L\enspace .
\end{align*}
\end{lemma}
\begin{proof}
We prove the lemma for the principals induced by \(\leqslant^{r}L\) and \(\leqslant^{r}N\).
The proofs for the left quasiorders are symmetric.
For each \(u \in \Sigma^*\) we have that
\begin{align*}
ρ_{\leqslant^{r}N}(u) &= \quad \text{[By definition of \(ρ_{\leqslant^{r}N}\)]}\\
\{v \in Σ^* \mid \post_u^{\mathcal{N}}(I) \subseteq \post_v^{\mathcal{N}}(I)\} & = \quad \text{[By definition of set inclusion]} \\
\{v \in \Sigma^* \mid \forall q \in \post^{\mathcal{N}}_u(I), \; q\in \post^{\mathcal{N}}_v(I)\} & = \quad \text{[Since \(q \in \post^{\mathcal{N}}_v(I) \Leftrightarrow v \in W^{\mathcal{N}}_{I,q}\)]}\\
\{v \in \Sigma^* \mid \forall q \in \post^{\mathcal{N}}_u(I), \; v \in W^{\mathcal{N}}_{I,q}\} & = \quad \text{[By definition of intersection]}\\
\bigcap\textstyle{_{q \in \post^{\mathcal{N}}_u(I)}} W_{I,q}^{\mathcal{N}} & \enspace .
\end{align*}
On the other hand,
\begin{align*}
v \in \bigcap\textstyle{_{w \in \Sigma^*, \; w \in u^{-1}L}} Lw^{-1} & \Leftrightarrow \quad \text{[By definition of intersection]} \\
\forall w \in Σ^*, \; w \in u^{-1}L \Rightarrow v \in Lw^{-1} & \Leftrightarrow \quad \text{[Since \(\forall x,y \in Σ^*, \; x \in Ly^{-1} \Leftrightarrow y \in x^{-1}L\)]} \\
\forall w \in Σ^*, \; w \in u^{-1}L \Rightarrow w \in v^{-1}L & \Leftrightarrow \quad \text{[By definition of set inclusion]} \\
u^{-1}L \subseteq v^{-1}L & \Leftrightarrow \quad \text{[By definition of \(ρ_{\leqslant^{\ell}L}(u)\)]} \\
v \in ρ_{\leqslant^{r}L}(u)
\end{align*}
\end{proof}
As shown by Lemma~\ref{lemma:LAconsistent}, given an NFA \(\mathcal{N}\) with \(L = \lang{\mathcal{N}}\), the quasiorders \(\leqslant^{r}L\) and \(\leqslant^{r}N\) are right \(L\)-consistent, while the quasiorders \(\leqslant^{\ell}L\) and \(\leqslant^{\ell}N\) are left \(L\)-consistent.
Therefore, by Lemma~\ref{lemma: HrGeneratesL} and~\ref{lemma:HlgeneratesL}, our automata constructions applied to these quasiorders yield automata for \(L\).
Finally, recall that, as shown by \citet{deLuca1994}, \(\leqslant^{r}N\) is finer than \(\leqslant^{r}L\), i.e., \(\mathord{\leqslant^{r}N} \subseteq \mathord{\leqslant^{r}L}\).
In that sense we say \(\leqslant^{r}N\) \emph{approximates} \(\leqslant^{r}L\).
As the following lemma shows, the approximation is precise, i.e., \(\mathord{\leqslant^{r}N} = \mathord{\leqslant^{r}L}\), whenever \(\mathcal{N}\) is a co-RFA with no empty states\@.
\begin{lemma}\label{lemma:coResidual_qrL=qrN}
Let \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) be a co-RFA with no empty states such that \(L = \lang{\mathcal{N}}\).
Then \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\).
Similarly, if \(\mathcal{N}\) is an RFA with no unreachable states and \(L = \lang{\mathcal{N}}\) then \(\mathord{\leqslant^{\ell}L} = \mathord{\leqslant^{\ell}N}\).
\end{lemma}
\begin{proof}
It is straightforward to check that the following holds for every NFA \(\mathcal{N}\) and \(u, v \in Σ^*\).
\[\post_u^{\mathcal{N}}(I) \subseteq \post_v^{\mathcal{N}}(I) \Rightarrow W_{\post_{u}^{\mathcal{N}}(I),F}^{\mathcal{N}} \subseteq W_{\post_{v}^{\mathcal{N}}(I),F}^{\mathcal{N}}\]
Next we show that the reverse implication also holds when \(\mathcal{N}\) is a co-RFA with no empty states.
Let \(u, v\in Σ^*\) be such that \(W_{\post_{u}^{\mathcal{N}}(I),F}^{\mathcal{N}} \subseteq W_{\post_{v}^{\mathcal{N}}(I),F}^{\mathcal{N}}\).
Then,
\begin{align*}
q \in \post_{u}^{\mathcal{N}}(I) & \Rightarrow \quad \text{[Since \(\mathcal{N}\) is co-RFA with no empty states]}\\
\exists x \in Σ^*, \; u \in W_{I,q} = Lx^{-1} & \Rightarrow \quad \text{[Since \(u \in Lx^{-1} \Rightarrow x \in u^{-1}L\)]} \\
x \in W_{\post_{u}^{\mathcal{N}}(I), F} & \Rightarrow \quad \text{[Since \(W_{\post_{u}^{\mathcal{N}}(I),F}^{\mathcal{N}} \subseteq W_{\post_{v}^{\mathcal{N}}(I),F}^{\mathcal{N}}\)]} \\
x \in W_{\post_{v}^{\mathcal{N}}(I), F} & \Rightarrow \quad \text{[By definition of \(W_{S,T}^{\mathcal{N}}\)]} \\
\exists q' \in Q, \; x \in W_{q',F} \land v \in W_{I, q'} & \Rightarrow \quad \text{[Since \(x \in W_{q',F} \Rightarrow W_{I,q'} \subseteq Lx^{-1}\)]}\\
v \in Lx^{-1} & \Rightarrow \quad \text{[Since \(Lx^{-1} = W_{I,q}\)]} \\
v \in W_{I, q} & \Rightarrow \quad \text{[By definition of \(\post_v^{\mathcal{N}}(I)\)]} \\
q \in \post_v^{\mathcal{N}}(I) \enspace .
\end{align*}
Therefore, \(W_{\post_{u}^{\mathcal{N}}(I),F}^{\mathcal{N}} \subseteq W_{\post_{v}^{\mathcal{N}}(I),F}^{\mathcal{N}} \Rightarrow \post_u^{\mathcal{N}}(I) \subseteq \post_v^{\mathcal{N}}(I)\).
The proof for RFAs with no unreachable states and left quasiorders is symmetric.
\end{proof}
Finally, the following lemma shows that, for the Nerode's quasiorders, the \(L\)-composite principals can be described as intersections of \(L\)-prime principals.
\begin{lemma}\label{lemma:CompositeIntersection}
Let \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) be an NFA with \(\lang{\mathcal{N}} = L\).
Then,
\begin{equation}\label{eq:rhoCompRaIntersection}
u^{-1}L = \hspace{-10pt}\bigcup_{x\in\Sigma^*,\; x \leqslant^{r}n_L u}\hspace{-10pt} x^{-1}L \implies ρ_{\leqslant^{r}L}(u) = \hspace{-10pt}\bigcap_{x\in\Sigma^*,\; x \leqslant^{r}n_L u}\hspace{-10pt} ρ_{\leqslant^{r}L}(x) \enspace .
\end{equation}
Similarly holds for the left Nerode's quasiorder \(\leqslant^{\ell}L\).
\end{lemma}
\begin{proof}
Observe that the inclusion \(ρ_{\leqslant^{r}L}(u) \subseteq \bigcap_{x\in\Sigma^*, x \leqslant^{r}n_L u} ρ_{\leqslant^{r}L}(x)\) always holds since \(x \leqslant^{r}n_L u \Rightarrow ρ_{\leqslant^{r}L}(u) \subseteq ρ_{\leqslant^{r}L}(x)\).
Next, we prove the reverse inclusion.
Let \(w \in \bigcap_{x\in\Sigma^*, x \leqslant^{r}n_L u} ρ_{\leqslant^{r}L}(x)\) and assume that the left hand side of Equation~\eqref{eq:rhoCompRaIntersection} holds.
Then, by definition of intersection and \(ρ_{\leqslant^{r}L}\), we have that \(x \leqslant^{r}L w\) for every \(x \in Σ^*\) such that \(x \leqslant^{r}n_L u\), i.e., \(x^{-1}L \subseteq w^{-1}L\) for every \(x \in Σ^*\) such that \(x^{-1}L \subsetneq u^{-1}L\).
Since, by hypothesis, \(u^{-1}L = \bigcup_{x\in\Sigma^*,\; x \leqslant^{r}n_L u} x^{-1}L\), it follows that \(u^{-1}L \subseteq w^{-1}L\) and, therefore, \(w \in ρ_{\leqslant^{r}}(u)\).
We conclude that \(\bigcap_{x\in\Sigma^*, x \leqslant^{r}n_L u} ρ_{\leqslant^{r}L}(x) \subseteq ρ_{\leqslant^{r}L}(u)\).
\end{proof}
\subsection{Automata Constructions}
In what follows, we will use \(\cF{}\) and \(\cG{}\) to denote the construction \(\mathsf{H}\) when applied, respectively, to the language-based quasiorders induced by a regular language and the automata-based quasiorders induced by an NFA\@.
\begin{definitionNI}[\(\cG{}\) and \(\cF{}\)]
\label{def:FG}
Let \(\mathcal{N}\) be an NFA with \(L = \lang{\mathcal{N}}\).
Define:
\begin{align*}
\mindex{\cF{r}}(L) & \ud \mathsf{H}^{r}(\leqslant^{r}L, L) & \mindex{\cG{r}}(\mathcal{N}) & \ud \mathsf{H}^{r}(\leqslant^{r}N, L) \\
\mindex{\cF{\ell}}(L) & \ud \mathsf{H}^{\ell}(\leqslant^{\ell}L, L) & \mindex{\cG{\ell}}(\mathcal{N}) & \ud \mathsf{H}^{\ell}(\leqslant^{\ell}N, L) \enspace . \tag*{\rule{0.5em}{0.5em}}
\end{align*}
\end{definitionNI}
Given an NFA \(\mathcal{N}\) generating the language \(L=\lang{\mathcal{N}}\), all constructions in the above definition yield automata generating \(L\).
However, while the constructions using the right quasiorders result in RFAs, those using left quasiorders result in co-RFAs.
Furthermore, it follows from Remark~\ref{remark:LRDual} and Lemma~\ref{lemma:leftRightReverse} that \(\cF{\ell}(L)\) is isomorphic to \((\cF{r}(L^R))^R\) and \(\cG{\ell}(\mathcal{N})\) is isomorphic to \((\cG{r}(\mathcal{N}^R))^R\).
It follows from Theorem~\ref{theorem:numLPrimePrincipals} that the automata \(\cG{r}(\mathcal{N})\) and \(\cG{\ell}(\mathcal{N})\) have, at least, as many states as \(\cF{r}(L)\) and \(\cF{\ell}(L)\), respectively.
Intuitively, \(\cF{r}(L)\) is the minimal RFA for \(L\), i.e. it is isomorphic to the canonical RFA for \(L\), since, as shown by~\citet{deLuca1994}, \(\leqslant^{r}L\) is the coarsest right \(L\)-consistent quasiorder.
On the other hand, as we shall see in Example~\ref{example:residualization}, \(\cG{r}(\mathcal{N})\) is a sub-automaton of \(\mathcal{N}^{\text{res}}\)~\cite{denis2002residual} for every NFA \(\mathcal{N}\).
Finally, it follows from Lemma~\ref{lemma:coResidual_qrL=qrN} that residualizing (\(\cG{r}\)) a co-RFA with no empty states (for instance, \(\cG{\ell}(\mathcal{N})\)) results in the canonical RFA for \(\lang{\mathcal{N}}\) (\(\cF{r}(\lang{\mathcal{N}})\)).
We formalize all these notions in Theorem~\ref{theoremF}.
\pagebreak
\begin{theorem}\label{theoremF}
Let \(\mathcal{N}\) be an NFA with \(L = \lang{\mathcal{N}}\).
Then the following hold:
\begin{myEnumA}
\item \(\lang{\cF{r}(L)} =\lang{\cF{\ell}(L)} = L = \lang{\cG{r}(\mathcal{N})} = \lang{\cG{\ell}(\mathcal{N})}\).
\label{lemma:language-F}
\item \(\cF{\ell}(L)\) is isomorphic to \((\cF{r}(L^R))^R\).
\label{lemma:FlisomorphicRfrR}
\item \(\cG{\ell}(\mathcal{N})\) is isomorphic to \((\cG{r}(\mathcal{N}^R))^R\).
\label{lemma:AlRequalArNR}
\item \(\cF{r}(L)\) is isomorphic to the canonical RFA for \(L\).
\label{theorem:CanonicalRFAlanguage}
\item \(\cG{r}(\mathcal{N})\) is isomorphic to a sub-automaton of \(\mathcal{N}^{\text{res}}\).
\label{lemma:rightNRes}
\item \(\cG{r}(\cG{\ell}(\mathcal{N}))\) is isomorphic to \(\cF{r}(L)\).\label{lemma:LS+RS=RN}
\end{myEnumA}
\end{theorem}
\begin{proof}
In the following, let \(\mathcal{N} = \tuple{Q, \Sigma, \delta, I, F}\).
\begin{myEnumA}
\item
By Definition~\ref{def:FG}, \(\cF{r}(L) = \mathsf{H}^{r}(\leqslant^{r}L, L)\) and \(\cG{r}(\mathcal{N}) = \mathsf{H}^{r}(\leqslant^{r}N, L)\).
On the other hand, by Lemma \ref{lemma: HrGeneratesL}, \(\lang{\mathsf{H}^{r}(\leqslant^{r}L, L)} = \lang{\mathsf{H}^{r}(\leqslant^{r}N, L)} = L\).
Therefore, \(\lang{\cF{r}(L)} = \lang{\cG{r}(L)} = L\).
Similarly, it follows from Lemma~\ref{lemma:HlgeneratesL} that \(\lang{\cF{\ell}(L)} = \lang{\cG{\ell}(L)} =L\).
\item For each \(u, v \in Σ^*\):
\begin{align*}
u \leqslant^{\ell}L v & \Leftrightarrow \quad \text{[By Definition~\eqref{eq:Llanguage}]} \\
u^{-1}L \subseteq v^{-1}L & \Leftrightarrow \quad \text{[\(A \subseteq B\Leftrightarrow A^R \subseteq B^R\)]}\\
(u^{-1}L)^R \subseteq (v^{-1}L)^R & \Leftrightarrow \quad\text{[Since \((u^{-1}L)^R = L^R(u^R)^{-1}\)]} \\
L^R(u^R)^{-1} \subseteq L^R(v^R)^{-1} & \Leftrightarrow\quad\text{[By Definition~\eqref{eq:Rlanguage}]} \\
u^R \leqslant^{r}_{L^R} v^R \enspace .
\end{align*}
Therefore, by Lemma~\ref{lemma:leftRightReverse}, \(\cF{\ell}(L)\) is isomorphic to \((\cF{r}(L^R))^R\).
\item
For each \(u,v \in \Sigma^*\):
\begin{align*}
u \leqslant^{\ell}N v & \Leftrightarrow \quad\text{[By Definition~\eqref{eq:LState}]}\\
Pe_u^{\mathcal{N}^R}(F) \subseteq Pe_v^{\mathcal{N}^R}(F) & \Leftrightarrow\quad \text{[Since \(q \in Pe^{\mathcal{N}^R}_{x}(F)\) if{}f \(q \in \post^{\mathcal{N}}_{x^R}(I) \)]}\\
\post_{u^R}^{\mathcal{N}}(I) \subseteq \post_{v^R}^{\mathcal{N}}(I) & \Leftrightarrow \quad\text{[By Definition~\eqref{eq:RState}]}\\
u^R \leqslant^{\ell}N v^R \enspace .
\end{align*}
It follows from Lemma~\ref{lemma:leftRightReverse} that \(\cG{\ell}(\mathcal{N})\) is isomorphic to \(\cG{r}(\mathcal{N}^R)^R\).
\item
Let \(ρ\) be the closure induced by \(\leqslant^{r}L\).
Let \(\mathcal{C} = \tuple{\widetilde{Q}, \Sigma, \eta, \widetilde{I}, \widetilde{F}}\) be the canonical RFA for \(L\) and let \(\cF{r}(L) = \tuple{Q, \Sigma, \delta, I, F}\).
Let \(\varphi: \widetilde{Q} \rightarrow Q\) be the mapping assigning to each state \(\widetilde{q}_i \in \widetilde{Q}\) of the form \(u^{-1}L\), the state \(ρ(u) \in Q\), with \(u \in \Sigma^*\).
We show that \(\varphi\) is an NFA isomorphism between \(\mathcal{C}\) and \(\cF{r}(L)\).
Since
\[u^{-1}L \subseteq L \Leftrightarrow u \leqslant^{r}L \varepsilon \Leftrightarrow \varepsilon \in ρ(u)\enspace ,\]
we have that \(u^{-1}L\) is an initial state of \(\mathcal{C}\) if{}f \(ρ(u)\) is an initial state of \(\cF{r}(L)\), hence \(\varphi(\widetilde{I}) = I\).
On the other hand,
since
\[\varepsilon \in u^{-1}L \Leftrightarrow u \in L \enspace ,\]
we have that \(u^{-1}L\) is a final state of \(\mathcal{C}\) if{}f \(ρ(u)\) is a final state of \(\cF{r}(L)\), hence \(\varphi(\widetilde{F}) = F\).
Moreover, since
\[ρ(u)\cdot a \subseteq ρ(v) \Leftrightarrow v \leqslant^{r}L ua \Leftrightarrow v^{-1}L \subseteq (ua)^{-1}L\enspace ,\]
we have that \(v^{-1}L = \eta(u^{-1}L, a)\) if and only if \(ρ(v) \in δ(ρ(u),a)\), for all \(u^{-1}L, v^{-1}L \in \widetilde{q}\) and \(a \in \Sigma\).
Finally, we need to show that \(\forall u \in Σ^*, \; ρ(u) \in Q \Leftrightarrow \exists q_i \in \widetilde{Q}, \; q_i = u^{-1}L\).
Observe that:
\begin{align*}
u^{-1}L = \bigcup_{x \leqslant^{r}n_L u} u^{-1}L & \Leftrightarrow \quad \text{[By Definition~\eqref{eq:Rlanguage}]} \\
u^{-1}L = \bigcup_{x^{-1}L \subset u^{-1}L} x^{-1}L \enspace .
\end{align*}
Therefore, \(\forall u \in Σ^*, ρ(u) \text{ is \(L\)-prime} \Leftrightarrow u^{-1}L \text{ is prime}\), hence \(\varphi(\widetilde{Q}) = Q\).
\item
Recall that \(\mathcal{N}^{\text{res}} = \tuple{Q_r, Σ, δ_r, I_r, F_r}\) is the RFA built by the residualization operation defined by \citet{denis2002residual} (see Chapter~\ref{chap:prel}).
Let \(\cG{r}(\mathcal{N}) = \tuple{\widetilde{Q}, \Sigma, \widetilde{\delta}, \widetilde{I}, \widetilde{F}}\).
Next, we show that there is a surjective mapping \(\varphi\) that associates states and transitions of \(\cG{r}(\mathcal{N})\) with states and transitions of \(\mathcal{N}^{\text{res}}\).
Moreover, if \(q \in \widetilde{Q}\) is initial (resp.\ final) then \(\varphi(q) \in Q_r\) is initial (resp.\ final) and \(q' \in \widetilde{δ}(q,a) \Leftrightarrow \varphi(q') \in δ_r(\varphi(q),a)\).
In this way, we conclude that \(\cG{r}(\mathcal{N})\) is isomorphic to a sub-automaton of \(\mathcal{N}^{\text{res}}\).
Finally, since \(\lang{\mathcal{N}^{\text{res}}} = \lang{\mathcal{N}}\) then it follows from Lemma~\ref{lemma: HrGeneratesL} that \(\lang{\mathcal{N}^{\text{res}}} = \lang{\mathcal{N}} = \lang{\cG{r}(\mathcal{N})}\).
Let \(ρ\) be the closure induced by \(\leqslant^{r}N\) and let \(\varphi: \widetilde{Q} \rightarrow Q_{r}\) be the mapping assigning to each state \(ρ(u) \in \widetilde{Q}\), the set \(\post_u^{\mathcal{N}}(I) \in Q_{r}\) with \(u \in \Sigma^*\).
Since
\[\varepsilon \in ρ(u) \Leftrightarrow u \leqslant^{r}N \varepsilon \Leftrightarrow \post_u^{\mathcal{N}}(I) \subseteq \post_{\varepsilon}^{\mathcal{N}}(I)\]
The initial states of \(\cG{r}(\mathcal{N})\) are mapped into the set the initial states of \(\mathcal{N}^{\text{res}}\), hence \(\varphi(\widetilde{I}) = I_r\).
On the other hand, since
\[ρ(u) \subseteq L \Leftrightarrow u \in L \Leftrightarrow (\post_u^{\mathcal{N}}(I) \cap F) \neq \varnothing \enspace ,\]
we have that the final states of \(\cG{r}(\mathcal{N})\), are mapped to the final states of \(\mathcal{N}^{\text{res}}\), hence \(\varphi(\widetilde{F}) = F_r\).
Moreover, since
\[ρ(u)\cdot a \subseteq ρ(v) \Leftrightarrow v \leqslant^{r}N ua \Leftrightarrow \post_{v}^{\mathcal{N}}(I) \subseteq \post_{ua}^{\mathcal{N}}(I)\enspace ,\]
it follows that \(\forall u, v \in Σ^*\) such that \(\post_u^{\mathcal{N}}(I), \post_v^{\mathcal{N}}(I) \in Q_r\), we have
\[\post_v^{\mathcal{N}}(I) \in δ_r(\post_u^{\mathcal{N}}(I),a) \Leftrightarrow ρ(v) \in \widetilde{δ}(ρ(u), a)\enspace . \]
Finally, we show that \(\forall u \in Σ^*, \; ρ(u) \in \widetilde{Q} \Rightarrow \post_u^{\mathcal{N}}(I) \in Q_r\).
By definition of \(\widetilde{Q}\) and \(Q_r\), this is equivalent to showing that for every word \(u \in Σ^*\), if \(\post_u^{\mathcal{N}}(I)\) is coverable then \(ρ(u)\) is \(L\)-composite.
Observe that:
\begin{align*}
\post_u^{\mathcal{N}}(I) = \hspace{-10pt}\bigcup_{\post_x^{\mathcal{N}}(I) \subset \post_u^{\mathcal{N}}(I)}\hspace{-10pt} \post_x^{\mathcal{N}}(I) & \Leftrightarrow \quad \text{[\(x \leqslant^{r}n_{\mathcal{N}} u \Leftrightarrow \post_x^{\mathcal{N}}(I) \subset \post_u^{\mathcal{N}}(I)\)]} \\
\post_u^{\mathcal{N}}(I) = \bigcup_{x \leqslant^{r}n_{\mathcal{N}} u} \post_x^{\mathcal{N}}(I) & \Rightarrow \quad \text{[Since \(W_{\post_u^{\mathcal{N}}(I),T}^{\mathcal{N}} = u^{-1}L\)]} \\
u^{-1}L = \bigcup_{x \leqslant^{r}n_{\mathcal{N}} u} x^{-1}L \enspace .
\end{align*}
It follows that if \(\post_u^{\mathcal{N}}(I)\) is coverable then \(ρ(u)\) is \(L\)-composite, hence \(\varphi(\widetilde{Q}) \subseteq Q_r\).
\item
As shown by Lemma~\ref{lemma:HlgeneratesL}, \(\cG{\ell}(\mathcal{N})\) is a co-RFA with no empty states and \(\lang{\cG{\ell}(\mathcal{N})} = \lang{\mathcal{N}}\).
Therefore, it follows from Lemma~\ref{lemma:coResidual_qrL=qrN} that \(\cG{r}(\cG{\ell}(\mathcal{N}))\) is isomorphic to \linebreak\(\cF{r}(\lang{\cG{\ell}(\mathcal{N})}) = \cF{r}(\lang{\mathcal{N}})\).\qedhere
\end{myEnumA}
\end{proof}
Figure~\ref{Figure:diagramAutomata} summarizes all the connections between the automata constructions from Definition~\ref{def:FG}.
\begin{figure}
\caption{Relations between the constructions \(\cG{\ell}
\label{Figure:diagramAutomata}
\end{figure}
It is well-known that determinizing a deterministic automata yields the same automaton, i.e. \(\mathcal{D}^D = \mathcal{D}\) for every DFA \(\mathcal{D}\).
As a consequence, determinizing twice and automaton is the same as doing it once, i.e. \((\mathcal{N}^{D})^{D} = \mathcal{N}^D\).
However, it is not clear that the same holds for our residualization operation, i.e. it is not clear whether \(\cG{r}(\cG{r}(\mathcal{N})) = \cG{r}(\mathcal{N})\).
The following lemma gives a sufficient condition on an RFA \(\mathcal{H}\) built with our right automata construction so that applying our residualization operation yields the same automaton, i.e. \(\cG{r}(\mathcal{H}) \!=\! \mathcal{H}\).
In particular, we find that \(\cF{r}(L)\) is invariant to our residualization operation \(\cG{r}\).
\begin{lemma}
\label{lemma:qrHEqualqrifHsc}
Let \(L\) be a regular language and let \(\leqslant^{r}\) be a right \(L\)-consistent quasiorder.
Let \(\mathcal{H}\!=\!\mathsf{H}^{r}(\leqslant^{r},L)\).
If \(\mathcal{H}\) is a strongly consistent RFA then \(\mathord{\leqslant^{r}_{\mathcal{H}}} = \mathord{\leqslant^{r}}\).
\end{lemma}
\begin{proof}
Let \(\mathcal{N} \!=\! \tuple{Q, Σ, δ, I, F}\) and \(\mathcal{H} \!=\! \tuple{\widetilde{Q}, Σ, \widetilde{δ}, \widetilde{I}, \widetilde{F}}\).
As shown by Lemma~\ref{lemma: HrGeneratesL}, \(\mathcal{H} \!=\! \mathsf{H}^{r}(\leqslant^{r}, L)\) is an RFA generating \(L\), hence each state of \(\mathcal{H}\) is an \(L\)-prime principal \(ρ_{\leqslant^{r}}(u)\) whose right language is the quotient \(u^{-1}L\) for some \(u \in Σ^*\).
Observe that, by definition,
\(\mathord{\leqslant^{r}_{\mathcal{H}}} = \mathord{\leqslant^{r}} \Leftrightarrow \left( \forall u,v \in Σ^*, \; \post_u^{\mathcal{H}}(\widetilde{I}) \subseteq \post_v^{\mathcal{H}}(\widetilde{I}) \Leftrightarrow u \leqslant^{r} v\right)\).
Next we prove that:
\begin{equation}
\label{eq:PostQuotient}
\post_u^{\mathcal{H}}(\widetilde{I}) = \{ρ_{\leqslant^{r}}(x) \in \widetilde{Q} \mid x \leqslant^{r} u\} \enspace .
\end{equation}
First, we show that \(\post_u^{\mathcal{H}}(\widetilde{I}) \subseteq \{ρ_{\leqslant^{r}}(x) \in \widetilde{Q} \mid x \leqslant^{r} u\}\).
To simplify the notation, let \(ρ\) denote \(ρ_{\leqslant^{r}}\).
\begin{align*}
ρ(x) \in \post_u^{\mathcal{H}}(\widetilde{I}) & \Leftrightarrow \quad \text{[By definition of \(\post_u^{\mathcal{H}}(\widetilde{I})\)]}\\
\exists ρ(x_0) \in \widetilde{I}, \; u \in W^{\mathcal{H}}_{ρ(x_0),ρ(x)} & \Rightarrow \quad \text{[By Definition~\ref{def:right-const:qo}]} \\
\exists ρ(x_0) \in \widetilde{Q}, \;\varepsilon \in ρ(x_0) \land ρ(x_0) \cdot u \subseteq ρ(x) & \Leftrightarrow \quad \text{[By definition of \(ρ\)]} \\
\exists ρ(x_0) \in \widetilde{Q}, \; x_0 \leqslant^{r} \varepsilon \land x \leqslant^{r} u\cdot x_0 & \Rightarrow \quad \text{[By mon. and trans. of \(\leqslant^{r}\)]} \\
x \leqslant^{r} u \enspace .
\end{align*}
We now prove the reverse inclusion.
Let \(ρ(u), ρ(x) \in \widetilde{Q}\) be such that \(x \leqslant^{r} u\).
Then,
\begin{align*}
ρ(u) \in \widetilde{Q} & \Rightarrow \quad \text{[By Lemma~\ref{lemma: HrGeneratesL}]}\\
W^{\mathcal{H}}_{ρ(u),F} = u^{-1}L & \Rightarrow \quad \text{[Since \(\mathcal{H}\) is str. cons.]} \\
u \in W_{I,ρ(u)}^{\mathcal{H}} & \Rightarrow \quad \text{[By def. \(W_{S,T}^{\mathcal{H}}\), \(u = za\)]} \\
\exists ρ(y) \in \widetilde{Q}, ρ(u_0) \in \widetilde{I},\; z \in W_{ρ(u_0), ρ(y)} \land a \in W_{ρ(y), ρ(u)}& \Rightarrow \quad \text{[By Definition~\ref{def:right-const:qo}]} \\
z \in W_{ρ(u_0), ρ(y)} \land ρ(y) \cdot a \subseteq ρ(u)& \Rightarrow \quad \text{[By def. \(ρ = ρ_{\leqslant^{r}}\)]} \\
z \in W_{ρ(u_0), ρ(y)} \land u\leqslant^{r} y\cdot a & \Rightarrow \quad \text{[Since \(x \leqslant^{r} u\)]} \\
z \in W_{ρ(u_0), ρ(y)} \land x \leqslant^{r} ya & \Rightarrow \quad \text{[By Def.~\ref{def:right-const:qo}]} \\
z \in W_{ρ(u_0), ρ(y)} \land ρ(x) \in δ(ρ(y), a) & \Rightarrow \quad \text{[By def. \(\post_u(I)\)]} \\
ρ(x) \in \post_u(I) \enspace .
\end{align*}
It follows from Equation~\eqref{eq:PostQuotient} that \(\post_u^{\mathcal{H}}(I) \subseteq \post^{\mathcal{H}}_v(I) \Leftrightarrow u \leqslant^{r} v\), i.e. \(\mathord{\leqslant^{r}_{\mathcal{H}}} = \mathord{\leqslant^{r}}\).
\end{proof}
Finally, note that if \(\mathord{\leqslant^{r}L}=\mathord{\leqslant^{r}N}\) then clearly the automata \(\cF{r}(L)\) and \(\cG{r}(\mathcal{N})\) coincide for any NFA \(\mathcal{N}\) with \(L = \lang{\mathcal{N}}\).
The following result shows that the reverse implication also holds.
\begin{lemma}\label{lemma:qrlEqualqrNResEqualCan}
Let \(\mathcal{N}\) be an NFA with \(L = \lang{\mathcal{N}}\).
Then \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\) if{}f \(\cG{r}(\mathcal{N})\) is isomorphic to \(\cF{r}(L)\).
\end{lemma}
\begin{proof}
As shown by Theorem~\ref{theoremF}~\ref{theorem:CanonicalRFAlanguage}, \(\cF{r}(L)\) is the canonical RFA for \(L\), hence it is strongly consistent and, by Lemma~\ref{lemma:qrHEqualqrifHsc}, we have that \(\mathord{\leqslant^{r}_{\cF{r}(L)}} = \mathord{\leqslant^{r}L}\).
On the other hand, if \(\cG{r}(\mathcal{N})\) is isomorphic to \(\cF{r}(L)\) we have that \(\mathord{\leqslant^{r}_{\cG{r}(\mathcal{N})}} = \mathord{\leqslant^{r}_{\cF{r}(L)}}\), and by Lemma~\ref{lemma:qrHEqualqrifHsc}, \(\mathord{\leqslant^{r}_{\cG{r}(\mathcal{N})}} =~ \leqslant^{r}N\).
It follows that if \(\cG{r}(\mathcal{N})\) is isomorphic to \(\cF{r}(L)\) then \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\).
Finally, if \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\) then \(\mathsf{H}^{r}(\leqslant^{r}L, L) = \mathsf{H}^{r}(\leqslant^{r}N, \lang{\mathcal{N}})\), in other words, \( \cF{r}(L) = \cG{r}(\mathcal{N})\).
\end{proof}
The following example illustrates the differences between our residualization operation, \linebreak\(\cG{r}(\mathcal{N})\), and the one defined by~\citet{denis2001residual}, \(\mathcal{N}^{\text{res}}\), on a given NFA \(\mathcal{N}\): the automaton \(\cG{r}(\mathcal{N})\) has, at most, as many states as \(\mathcal{N}^{\text{res}}\).
This follows from the fact that for every \(u \in Σ^*\), if \(\post_u^{\mathcal{N}}(I)\) is coverable then \(ρ_{\leqslant^{r}N}(u)\) is composite but \emph{not} vice-versa.
\begin{example}
\label{example:residualization}
Let \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) be the automata on the left of Figure~\ref{fig:Residuals} and let \(L = \lang{\mathcal{N}}\).
In order to build \(\mathcal{N}^{\text{res}}\) we compute \(\post_u^{\mathcal{N}}(I)\), for all \(u \in Σ^*\).
Let \(C \ud L^c \setminus \{\varepsilon, a, b, c\}\).
\begin{align*}
\post_{\varepsilon}^{\mathcal{N}}(I) & = \{0\} & \post_a^{\mathcal{N}}(I) & = \{1,2\}& \forall w \in L, \;\post_{w}^{\mathcal{N}}(I) & = \{5\} \\
\post_c^{\mathcal{N}}(I) & = \{1, 2, 3, 4\} & \post_b^{\mathcal{N}}(I) & = \{1,3\} &\forall w \in C,\; \post_{w}^{\mathcal{N}}(I) & = \varnothing
\end{align*}
Since none of these sets is coverable by the others, they are all states of \(\mathcal{N}^{\text{res}}\).
The resulting RFA \(\mathcal{N}^{\text{res}}\) is shown in the center of Figure~\ref{fig:Residuals}.
On the other hand, let us denote \(ρ_{\leqslant^{r}N}\) simply by \(ρ\).
In order to build \(\cG{r}(\mathcal{N})\) we need to compute the principals \(ρ(u)\), for all \(u \!\in\! Σ^*\).
By definition of \(\leqslant^{r}N\), we have that \(w \!\in\! ρ(u) \Leftrightarrow \post_u^{\mathcal{N}}(I) \!\subseteq\! \post_w^{\mathcal{N}}(I)\).
Therefore, we obtain:
\begin{align*}
ρ(\varepsilon) & = \{\varepsilon\} & ρ(a) &= \{a,c\} & ρ(c) & = \{c\} & ρ(b) & = \{b,c\} & \forall w \in L,\; ρ(w) & = L & \forall w \in C,\;ρ(w) & = \Sigma^*
\end{align*}
Since \(a \leqslant^{r}n_{\mathcal{N}} c\), \(b \leqslant^{r}n_{\mathcal{N}} c\) and \(\forall w\in Σ^*, \; cw \subseteq L \Leftrightarrow \big(aw \subseteq L \lor bw \subseteq L\big)\), it follows that \(ρ(c)\) is \(L\)-composite.
The resulting RFA \(\cG{r}(\mathcal{N})\) is shown on the right of Figure~\ref{fig:Residuals}.
{\ensuremath{\Diamond}}
\end{example}
\begin{figure}
\caption{Left to right: an NFA \(\mathcal{N}
\label{fig:Residuals}
\end{figure}
\section{Double-Reversal Method for Building the Canonical RFA}
\label{sec:Novel}
\citet{denis2002residual} show that their residualization operation satisfies the residual-equivalent of the double-reversal method for building the minimal DFA\@.
More specifically, they prove that if an NFA \(\mathcal{N}\) is a co-RFA with no empty states then their residualization operation applied to \(\mathcal{N}\) results in the canonical RFA for \(\lang{\mathcal{N}}\).
As a consequence,
\[(((\mathcal{N}^R)^{\text{res}})^R)^{\text{res}} \text{ is the canonical RFA for } \lang{\mathcal{N}}\enspace .\]
In this section we show that the residual-equivalent of the double-reversal method works when using our automata constructions based on quasiorders, i.e.
\[\cG{r}((\cG{r}(\mathcal{N}^R))^R) \text{ is isomorphic to } \cF{r}(\mathcal{N})\enspace .\]
Then, we generalize this method along the lines of the generalization of the double-reversal method for building the minimal DFA given by \citet{Brzozowski2014}.
To this end, we extend the work of \citet{ganty2019congruence} where they use congruences to offer a new perspective on the generalization of \citet{Brzozowski2014}.
By switching from congruences to monotone quasiorders, we are able to give a \emph{necessary} and \emph{sufficient} condition on an NFA \(\mathcal{N}\) that guarantees that our residualization operation yields the canonical RFA for \(\lang{\mathcal{N}}\).
Finally, we compare our generalization with the one given by \citet{tamm2015generalization}.
\subsection{Double-reversal Method}
We give a simple proof of the double-reversal method for building the canonical RFA for the language generated by a given NFA \(\mathcal{N}\)\@.
\begin{theorem}[Double-Reversal]
\label{theorem:DoubleReversal}
Let \(\mathcal{N}\) be an NFA\@.
Then \(\cG{r}((\cG{r}(\mathcal{N}^R))^R)\) is isomorphic to the canonical RFA for \(\lang{\mathcal{N}}\).
\end{theorem}
\begin{proof}
It follows from Theorem~\ref{theoremF}~\ref{lemma:AlRequalArNR}, \ref{theorem:CanonicalRFAlanguage} and \ref{lemma:LS+RS=RN}.
\end{proof}
Note that Theorem~\ref{theorem:DoubleReversal} can be inferred from Figure~\ref{Figure:diagramAutomata} by following the path starting at \(\mathcal{N}\), labeled with \(R-\cG{r}-R-\cG{r}\) and ending in \(\cF{r}(\lang{\mathcal{N}})\).
\subsection{Generalization of the Double-reversal Method}
Next we show that residualizing an automaton yields the canonical RFA if{}f the left language of every state is closed w.r.t. the right Nerode quasiorder.
\begin{theorem}
\label{theorem:canonicalreverserestic}
Let \(\mathcal{N} = \tuple{Q,\Sigma,\delta,I,F}\) be an NFA with \(L=\lang{\mathcal{N}}\).
Then \(\cG{r}(\mathcal{N})\) is the canonical RFA for \(L\) if{}f \(\forall q \in Q,\; ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\).
\end{theorem}
\begin{proof}
We first show that \(\forall q \in Q,\ ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\) is a \emph{necessary} condition, i.e.\ if \(\cG{r}(\mathcal{N})\) is the canonical RFA for \(L\) then \(\forall q \in Q,\ ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\) holds.
By Lemma~\ref{lemma:qrlEqualqrNResEqualCan} we have that if \(\cG{r}(\mathcal{N})\) is the canonical RFA for \(L\) then \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\).
Moreover:
\begin{align*}
ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) & = \; \text{[By definition of \(ρ_{\leqslant^{r}L}\)]}\\
\{w \in \Sigma^* \mid \exists u \in W_{I,q}^{\mathcal{N}}, \; u^{-1}L \subseteq w^{-1}L\} & = \; \text{[Since \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\)]} \\
\{w \in \Sigma^* \mid \exists u \in W_{I,q}^{\mathcal{N}}, \; \post_u^{\mathcal{N}}(I) \subseteq \post_w^{\mathcal{N}}(I)\} & \subseteq \; \text{[Since \( u \in W_{I,q}^{\mathcal{N}} \Leftrightarrow q \in \post_u^{\mathcal{N}}(I)\)]}\\
\{w \in \Sigma^* \mid q \in \post_w^{\mathcal{N}}(I)\} & = \; \text{[By definition of \(W_{I,q}^{\mathcal{N}}\)]}\\
W_{I,q}^{\mathcal{N}} \enspace .
\end{align*}
By reflexivity of \(\leqslant^{r}L, \) we conclude that \(ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}} \).
Next, we show that \(\forall q \in Q,\; ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\) is also a \emph{sufficient} condition.
By Lemma~\ref{lemma:positive_atoms} and condition \(\forall q \in Q,\; ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\), we have that
\begin{equation}\label{eq:rhoNIntersectOfrhoL}
ρ_{\leqslant^{r}N}(u) = \bigcap{\textstyle_{q \in \post^{\mathcal{N}}_u(I)}} W_{I,q}^{\mathcal{N}} = \bigcap{\textstyle_{q \in \post^{\mathcal{N}}_u(I)}} ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}})\enspace .
\end{equation}
Since \(u \in ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}})\) for all \(q \in \post_u^{\mathcal{N}}(I)\), it follows that \(ρ_{\leqslant^{r}L}(u) \subseteq ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}})\) for all \(q \in \post_u^{\mathcal{N}}(I)\) and, since \(ρ_{\leqslant^{r}N}(u) = \bigcap\textstyle{_{q \in \post^{\mathcal{N}}_u(I)}} ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}})\), we have that \(ρ_{\leqslant^{r}L}(u) \subseteq ρ_{\leqslant^{r}N}(u)\) for every \(u \in Σ^*\), i.e., \(\mathord{\leqslant^{r}L} \subseteq \mathord{\leqslant^{r}N}\).
On the other hand, as shown by \citet{deLuca1994}, we have that \(\mathord{\leqslant^{r}N}\subseteq \mathord{\leqslant^{r}L}\).
We conclude that \(\mathord{\leqslant^{r}N} = \mathord{\leqslant^{r}L}\), hence \(\cG{r}(\mathcal{N}) = \cF{r}(L)\).
\end{proof}
It is worth to remark that Theorem~\ref{theorem:canonicalreverserestic} does not hold when considering the residualization operation \(\mathcal{N}^{\text{res}}\)~\cite{denis2002residual}.
As a counterexample we have the automaton \(\mathcal{N}\) in Figure~\ref{fig:Residuals} where \(\cG{r}(\mathcal{N})\) is the canonical RFA for \(\lang{\mathcal{N}}\), hence \(\mathcal{N}\) satisfies the condition of Theorem~\ref{theorem:canonicalreverserestic}, while \(\mathcal{N}^{\text{res}}\) is not canonical.
\subsubsection{Co-atoms and co-rests}
The condition of Theorem~\ref{theorem:canonicalreverserestic} is analogue to the one \citet[Theorem 16]{ganty2019congruence} give for building the minimal DFA, except that the later is formulated in terms of congruences instead of quasiorders.
In that case they prove that, given an NFA \(\mathcal{N}=\tuple{Q,Σ,δ,I,F}\) with \(L = \lang{\mathcal{N}}\),
\[\mathcal{N}^D \text{ is the minimal DFA for \(L\) if{}f} \forall q \in Q, \; ρ_{\sim^{r}L}(W^{\mathcal{N}}_{I,q}) = W^{\mathcal{N}}_{I,q}\enspace ,\]
where \(\mathord{\sim^{r}L} \ud \mathord{\leqslant^{r}L} \cap \mathord{(\leqslant^{r}L)^{-1}}\) is the right Nerode's congruence.
Moreover, \citet{ganty2019congruence} show that the principals of \(\sim^{r}L\) coincide with the so-called \emph{co-atoms}, which are non-empty intersections of complemented and uncomplemented right quotients of the language.
This allowed them to connect their result with the generalization of the double-reversal method for DFAs of \citet{Brzozowski2014}, who establish that determinizing an NFA \(\mathcal{N}\) yields the minimal DFA for \(\lang{\mathcal{N}}\) if{}f the left languages of the states of \(\mathcal{N}\) are unions of co-atoms of \(\lang{\mathcal{N}}\).
Next, we give a formulation of the condition from Theorem~\ref{theorem:canonicalreverserestic} along the lines of the one given by \citet{Brzozowski2014} for their generalization of the double-reversal method for building the minimal DFA.
To do that, let us call the intersections used in Lemma~\ref{lemma:positive_atoms} to describe the principals of \(\leqslant^{\ell}L\) and \(\leqslant^{r}L\) as \emph{rests} and \emph{co-rests} of \(L\), respectively.
\begin{definitionNI}[Rest and Co-rest]\index{Rest}\index{co-Res}
\label{def:rest}
Let \(L\) be a regular language.
A \emph{rest} (resp. \emph{co-rest}) is any non-empty intersection of left (resp.\ right) quotients of \(L\).
\rule{0.5em}{0.5em}
\end{definitionNI}
As shown by Theorem~\ref{theorem:canonicalreverserestic}, residualizing an NFA \(\mathcal{N}\) yields the canonical RFA for \(\lang{\mathcal{N}}\) if{}f the left language of every state of \(\mathcal{N}\) satisfies \(ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\).
By definition, \(ρ_{\leqslant^{r}L}(S) = S\) if{}f \(S\) is a union of principals of \(\leqslant^{r}L\).
Therefore we derive the following statement, equivalent to Theorem~\ref{theorem:canonicalreverserestic}, that we consider as the residual-equivalent of the generalization of the double-reversal for building the minimal DFA~\cite{Brzozowski2014}.
\begin{corollary}
Let \(\mathcal{N}\) be an NFA with \(L=\lang{\mathcal{N}}\).
Then \(\cG{r}(\mathcal{N})\) is the canonical RFA for \(L\) if{}f the left languages of \(\mathcal{N}\) are union of co-rests.
\end{corollary}
\subsubsection{Tamm's Generalization of the Double-reversal Method for RFAs}\label{sec:TammGen}
\citet{tamm2015generalization} generalized the double-reversal method of \citet{denis2002residual} by showing that \(\mathcal{N}^{\text{res}}\) is the canonical RFA for \(\lang{\mathcal{N}}\) if{}f the left languages of \(\mathcal{N}\) are union of the left languages of the canonical RFA for \(\lang{\mathcal{N}}\).
In this section, we compare the generalization of \citet{tamm2015generalization} with ours.
The two approaches differ in the definition of the residualization operation they consider and, as we show next, the sufficient and necessary condition from Theorem~\ref{theorem:canonicalreverserestic} is more general than that of \citet[Theorem 4]{tamm2015generalization}.
\begin{lemma}\label{lemma:WeRaTamm}
Let \(\mathcal{N} = \tuple{Q, Σ, δ, I, F}\) be an NFA with \(L = \lang{\mathcal{N}}\) and let \(\mathcal{C} = \cF{r}(\leqslant^{r}L, L) = \tuple{\widetilde{Q}, Σ, \widetilde{δ}, \widetilde{I}, \widetilde{F}}\) be the canonical RFA for \(L\).
Then
\[W_{I,q}^{\mathcal{N}} = \bigcup_{q \in \widetilde{Q}}W_{\widetilde{I},q}^{\mathcal{C}} \implies ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\enspace .\]
\end{lemma}
\begin{proof}
Since the canonical RFA, \(\mathcal{C}\), is strongly consistent then it follows from Lemma~\ref{lemma:qrHEqualqrifHsc} that \(\mathord{\leqslant^{r}_{\mathcal{C}}} = \mathord{\leqslant^{r}L}\) and, consequently, \(\cG{r}(\mathcal{C})\) is isomorphic to \(\cF{r}(L)\).
It follows from Theorem~\ref{theorem:canonicalreverserestic} that \(ρ_{\leqslant^{r}L}(W_{\widetilde{I},q}^{\mathcal{C}}) = W_{\widetilde{I},q}^{\mathcal{C}}\) for every \(q \in \widetilde{Q}\).
Therefore,
\begin{align*}
ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) & = \quad \text{[Since \(W_{I,q}^{\mathcal{N}} = {\textstyle\bigcup_{q \in \widetilde{Q}}W_{\widetilde{I},q}^{\mathcal{C}}}\) and \(ρ_{\leqslant^{r}L}(\cup S_i) = \cup ρ_{\leqslant^{r}L}(S_i)\)]} \\
{\textstyle\bigcup_{q \in \widetilde{Q}}ρ_{\leqslant^{r}L}(W_{\widetilde{I},q}^{\mathcal{C}})} & = \quad \text{[Since \(ρ_{\leqslant^{r}L}(W_{\widetilde{I},q}^{\mathcal{C}}) = W_{\widetilde{I},q}^{\mathcal{C}}\) for every \(q \in \widetilde{Q}\)]}\\
{\textstyle\bigcup_{q \in \widetilde{Q}}W_{\widetilde{I},q}^{\mathcal{C}}}\enspace .\tag*{\qedhere}
\end{align*}
\end{proof}
Observe that, since the canonical RFA \(\mathcal{C} = \tuple{\widetilde{Q}, Σ, \widetilde{δ}, \widetilde{I}, \widetilde{F}}\) for a language \(L\) is strongly consistent, the left language of each state is a principal of \(\leqslant^{r}L\).
In particular, if the right language of a state is \(u^{-1}L\) then its left language is the principal \(ρ_{\leqslant^{r}L}(u)\).
Therefore, if \(W_{I,q}^{\mathcal{N}} = \bigcup_{q \in \widetilde{Q}}W_{\widetilde{I},q}^{\mathcal{C}}\) then \(W_{I,q}^{\mathcal{N}}\) is a closed set in \(ρ_{\leqslant^{r}L}\).
However, the reverse implication does not hold since \emph{only the \(L\)-prime principals are left languages of states of \(\mathcal{C}\)}.
On the other hand, Lemma~\ref{lemma:CompositeIntersection} shows that \(L\)-composite principals can be described as intersections of \(L\)-prime principals when we consider the Nerode's quasiorder \(\leqslant^{r}L\).
As a consequence, our residualization operation applied on an NFA \(\mathcal{N}\) yields the canonical RFA for \(\lang{\mathcal{N}}\) if{}f the left languages of states of \(\mathcal{N}\) are \emph{union of non-empty intersections of left languages of the canonical RFA} while \citet{tamm2015generalization} proves that \(\mathcal{N}^{\text{res}}\) yields to the canonical RFA if{}f the left languages of states of \(\mathcal{N}\) are \emph{union of left languages of the canonical RFA}.
\section{Learning Residual Automata}\label{sec:LearningNL:qo}
\citet{bollig2009angluin} devised the NL\(^*\) algorithm for learning the canonical RFA for a given regular language.
The algorithm describes the behavior of a \demph{Learner} that infers a language \(L\) by performing membership queries on \(L\) (which are answered by a \demph{Teacher}) and equivalence queries between the language generated by a candidate automaton and \(L\) (which are answered by an \demph{Oracle}).
The algorithm terminates when the \emph{Learner} builds an RFA generating the language \(L\).
For the shake of completeness, we offer an overview of the NL\(^*\) algorithm as presented by~\citet{bollig2009angluin}.
\subsection{The \texorpdfstring{NL\(^*\)}{NL*} Algorithm~\texorpdfstring{\cite{bollig2009angluin}}{}}
The \emph{Learner} maintains a prefix-closed finite set \(\Pref \subseteq Σ^*\) and a suffix-closed finite set \(\Suf \subseteq Σ^*\).
The \emph{Learner} groups the words in \(\Pref\) by building a \demph{table} \(T = (\mathcal{T}, \Pref, \Suf)\) where \(T: (\Pref \cup \Pref \cdot Σ) \times \Suf \to \{{+}, {-}\}\) is a function such that for every \(u \in \Pref \cup \Pref \cdot Σ\) and \(v \in \Suf\) we have that \(T(u,v) = {+} \Leftrightarrow uv \in L\).
Otherwise \(T(u,v) = {-}\).
For every word \(u \in \Pref \cup \Pref \cdot Σ\), define the function \(\row(u): \Suf \to \{{+}, {-}\}\) as \(\row(u)(v) \ud T(u,v)\).
The set of all rows of a table \(\mathcal{T}\) is denoted by \(\Rows(\mathcal{T})\).
The algorithm uses the table \(\mathcal{T} = (T, \Pref, \Suf)\) to build an automaton whose states are some of the rows of \(\mathcal{T}\).
In order to do that, it is necessary to define the notions of \emph{union} of rows, \emph{prime} row and \emph{composite} row.
\begin{definition}[Join Operator]
\label{def:join}
Let \(\mathcal{T} = (T, \Pref, \Suf)\) be a table.
For every pair of rows \(r_1, r_2 \in \Rows(\mathcal{T})\), define the \emph{join} \(r_1 \sqcup r_2: \Suf \to \{{+},{-}\}\) as:
\[\forall x \in \Suf, \; (r_1 \sqcup r_2)(x) \ud \left\{\begin{array}{ll}
{+} & \text{if } r_1(x) = + \lor r_2(x) = + \\
{-} & \text{otherwise}\end{array}\right. \tag*{\rule{0.5em}{0.5em}}\]
\end{definition}
Note that the join operator is associative, commutative and idempotent.
However, the join of two rows is not necessarily a row of \(\mathcal{T}\).
\begin{definition}[Covering Relation]
\label{def:coverRow}
Let \(\mathcal{T} = (T, \Pref, \Suf)\) be a table.
Then, for every pair of rows \(r_1, r_2 \in \Rows(\mathcal{T})\) we have that
\[r_1 \sqsubseteq r_2 \udiff \forall x \in \Suf, \; r_1(x) = {+} \Rightarrow r_2(x) = {+}\enspace .\]
We write \(r_1 \sqsubset r_2\) to denote \(r_1 \sqsubseteq r_2 \) and \(r_1 \neq r_2\).
\rule{0.5em}{0.5em}
\end{definition}
\begin{definition}[Composite and Prime Rows]
\label{def:PrimeRow}
Let \(\mathcal{T} = (T, \Pref, \Suf)\) be a table.
We say a row \(r \in \Rows(\mathcal{T})\) is \(\mathcal{T}\)-\emph{composite} if it is the join of all the rows that it strictly covers, i.e. \(r = \bigsqcup_{r' \in \Rows(\mathcal{T}), \; r' \sqsubset r} r'\).
Otherwise, we say \(r\) is \(\mathcal{T}\)-\emph{prime}.
\rule{0.5em}{0.5em}
\end{definition}
\begin{definition}[Closed and Consistent Table]
\label{def:Table}
Let \(\mathcal{T} = (T, \Pref, \Suf)\) be a table.
Then
\begin{myEnumA}
\item \(\mathcal{T}\) is \emph{closed} if{}f \label{def:Table:closed}
\(\forall u \in \Pref, a \in Σ, \; \row(ua) = \bigsqcup \{\row(v) \mid v \in \Pref, \; \row(v) \sqsubseteq \row(ua) \land \row(v) \text{ is \(\mathcal{T}\)-prime}\}\).
\item \(\mathcal{T}\) is \emph{consistent} if{}f \label{def:Table:Consistent} \(\row(u) \sqsubseteq \row(v) \Rightarrow \row(ua) \sqsubseteq \row(va) \text{ for every \(u,v \in \Pref\) and \(a \in Σ\)}\)\eod
\end{myEnumA}
\end{definition}
At each iteration of the algorithm, the \emph{Learner} checks whether the current table \(\mathcal{T} = (T, \Pref, \Suf)\) is closed and consistent.
If \(\mathcal{T}\) is not closed, then it finds \(\row(ua)\) with \(u \in \Pref, a \in Σ\) such that \(\row(ua)\) is \(\mathcal{T}\)-prime and it is not equal to some \(\row(v)\) with \(v \in \Pref\).
Then the \emph{Learner} adds \(ua\) to \(\Pref\) and updates the table \(\mathcal{T}\).
Similarly, if \(\mathcal{T}\) is not consistent, the \emph{Learner} finds \(u, v \in \Pref, a \in Σ, x \in \Suf\) such that \(\row(u) \subseteq \row(v)\) but \(\row(ua)(x) = {+} \land \row(va)(x) = {-}\).
Then the \emph{Learner} adds \(ax\) to \(\Suf\) and updates \(\mathcal{T}\).
When the table \(\mathcal{T}\) is closed and consistent, the \emph{Learner} builds the RFA \(\mathsf{R}(\mathcal{T})\).
\begin{definition}[Automata Construction \(\mathsf{R}(\mathcal{T})\)]
Let \(\mathcal{T} = (T, \Pref, \Suf)\) be a closed and consistent table.
Define the automaton \(\mathsf{R}(\mathcal{T}) \ud \tuple{Q, Σ, δ, I, F}\) with \(Q = \{\row(u) \mid u \in \Pref \land \row(u) \text{ is \(\mathcal{T}\)-prime}\}\), \(I {=} \{\row(u) \in Q \mid \row(u) \sqsubseteq \row(\varepsilon)\}\), \(F {=} \{\row(u) \in Q\mid \row(u)(\varepsilon) = +\}\) and \(\row(v) \in δ(\row(u),a) = \{\row(v) \in Q \mid \row(v) \sqsubseteq \row(ua)\}\) for all \(\row(u) \in Q, a \in Σ\).
\rule{0.5em}{0.5em}
\end{definition}
The \emph{Learner} asks the \emph{Oracle} whether \(\lang{\mathsf{R}(\mathcal{T})} = L\).
If the \emph{Oracle} answers \emph{yes} then the algorithm terminates.
Otherwise, the \emph{Oracle} returns a counterexample \(w\) for the language equivalence.
Then the \emph{Learner} adds every suffix of \(w\) to \(\Suf\), updates the table \(\mathcal{T}\) and repeats the process.
\subsection{The \texorpdfstring{NL\(^{\leqslant}\)}{NLqo} Algorithm}
In this section we present a quasiorder-based perspective on the NL\(^*\) algorithm in which the \emph{Learner} iteratively refines a quasiorder on \(Σ^*\) by querying the \emph{Teacher} and uses and adaption of the automata construction from Definition~\ref{def:right-const:qo} to build an RFA that is used to query the \emph{Oracle}.
We capture this approach in the so-called \emph{NL\(^{\leqslant}\) algorithm}.
\RemoveAlgoNumber
\begin{algorithm}[!ht]
\caption{NL\(^\leqslant\): A quasiorder-based version of NL\(^*\)}\label{alg:NL-star}
\SetAlgorithmName{Algorithm NL\(^{\leqslant}\)}{}
\SetSideCommentRight
\KwData{A \emph{Teacher} that answers membership queries in \(L\)}
\KwData{An \emph{Oracle} that answers equivalence queries between the language generated by an RFA and \(L\)}
\KwResult{The canonical RFA for the language \(L\).}
\(\mathcal{P}, \mathcal{S} := \{\varepsilon\}\)\;
\While{True\label{step:teacher-yes}}{
\label{step:loop}
\While{\(\qr_{L_{\Suf}}\) not closed or consistent:}{
\If{\(\qr_{L_{\Suf}}\) is not closed}{
Find \(u \in \Pref, a \in \Sigma\) with \(ρ_{\qr_{L_{\Suf}}}(u)\) \(L_{\Suf}\)-prime for \(\Pref\) and \(\forall v \in \Pref, \; ρ_{\qr_{L_{\Suf}}}(u) \neq ρ_{\qr_{L_{\Suf}}}(v)\)\;
Let \(\mathcal{P} := \mathcal{P} \cup \{ua\}\)\;
}
\If{\(\qr_{L_{\Suf}}\) is not consistent}{
Find \(u, v \in \Pref, a \in \Sigma\) with \(u \qr_{L_{\Suf}} v\) s.t. \(u a \not\qr_{L_{\Suf}} v a \)\;
Find \(x \in (ua)^{-1}L \cap ((va)^{-1}L)^c \cap \Suf\) \;
Let \(\mathcal{S} := \mathcal{S} \cup \{ax\}\)\;
}
}
\label{step:DFA-const}Build \(\mathsf{R}(\qr_{L_{\Suf}}, \Pref)\)\;
Ask the \emph{Oracle} whether \(L = \lang{\mathsf{R}(\qr_{L_{\Suf}},\Pref)}\)\;
\If{the \emph{Oracle} replies with a counterexample \(w\)}{
Let \(\Suf := \Suf \cup \{x \in \Sigma^* \mid w = w'x \text{ with }w \in \Suf, w' \in \Sigma^*\}\)\;
}\Else{\Return{\(\mathsf{R}(\qr_{L_{\Suf}}, \Pref)\)}\;}
}
\end{algorithm}
Next we explain the behavior of algorithm NL\(^{\leqslant}\) and give the necessary definitions in order to understand it and its relation with the algorithm NL\(^*\).
The \emph{Learner} maintains a prefix-closed finite set \(\Pref \subseteq Σ^*\) and a suffix-closed finite set \(\Suf \subseteq Σ^*\).
The set \(\Suf\) is used to \emph{approximate} the principals in \(\leqslant^{r}L\) for the words in \(\Pref\).
In order to manipulate these approximations, we define the following two operators.
\begin{definitionNI}\label{def:subsetS}
Let \(L\) be a language, \(\Suf \subseteq \Sigma^*\) and \(u \in Σ^*\).
Define:
\begin{align*}
u^{-1}L =_{\Suf} v^{-1}L & \udiff \left(u^{-1}L \cap \Suf\right) = \left(v^{-1}L \cap \Suf\right) &
u^{-1}L \subseteq_{\Suf} v^{-1}L & \udiff \left(u^{-1}L \cap \Suf\right) \subseteq \left(v^{-1}L \cap \Suf\right) .\tag*{\eod}
\end{align*}
\end{definitionNI}
These operators allow us to define an over-approximation of Nerode's quasiorder that can be decided with finitely many membership tests.
\begin{definition}[Right-language-based quasiorder w.r.t. \(\Suf\)]
\label{def:finiteNerode}
Let \(L\) be a language, \(\Suf \subseteq \Sigma^*\) and \(u,v \in Σ^*\).
Define \(u \qr_{L_{\Suf}} v \udiff u^{-1}L \subseteq_{\Suf} v^{-1}L\).\eod
\end{definition}
Recall that the \emph{Learner} only manipulates the principals for the words in \(\Pref\).
Therefore, we need to adapt the notion of composite principal for \(\qr_{L_{\Suf}}\).
\begin{definition}[\(L_{\Suf}\)-Composite Principal w.r.t. \(\Pref\)]
Let \(\Pref, \Suf \subseteq \Sigma^*\) with \(u \in \Pref\) and let \(L \subseteq Σ^*\) be a language.
We say \(ρ_{\qr_{L_{\Suf}}}(u)\) is \emph{\(L_{\Suf}\)-composite w.r.t. \(\Pref\)} if{}f
\[u^{-1}L =_{\Suf} \bigcup_{x \in \Pref, \; x \qr_{L_{\Suf}}n u} x^{-1}L\enspace .\]
Otherwise, we say it is \(L_{\Suf}\)-\emph{prime} w.r.t. \(\Pref\).\eod
\end{definition}
The \emph{Learner} uses the quasiorder \(\qr_{L_{\Suf}}\) to build an automaton by adapting the construction from Definition~\ref{def:right-const:qo} in order to use only the information that is available by means of the sets \(\Suf\) and \(\Pref\).
Building such an automaton requires the quasiorder to satisfy two conditions: it must be \emph{closed} and \emph{consistent} w.r.t. \(\Pref\).
\begin{definitionNI}[Closedness and Consistency of \(\qr_{L_{\Suf}}\) w.r.t. \(\Pref\)]\index{Closedness w.r.t. \(\Pref\)}\index{Consistency w.r.t. \(\Pref\)}\label{def:ClosedCons}
\begin{myEnumA}
\item \(\qr_{L_{\Suf}}\) is \emph{closed w.r.t. \(\Pref\)} if{}f \label{def:ClosedCons:Closed}
\begin{adjustwidth}{-0.5cm}{}
\begin{myAlign}{0pt}{}
\forall u \in \mathcal{P}, a \in \Sigma,\; ρ_{\qr_{L_{\Suf}}}(ua) \text{ is \(L_{\Suf}\)-prime w.r.t. \(\Pref\)}\Rightarrow \exists v \in \mathcal{P}, ρ_{\qr_{L_{\Suf}}}(ua) = ρ_{\qr_{L_{\Suf}}}(v) .
\end{myAlign}
\end{adjustwidth}
\item \(\qr_{L_{\Suf}}\) is \emph{consistent w.r.t. \(\Pref\)} if{}f \label{def:ClosedCons:Cons}
\(\forall u, v \in \Pref, a \in Σ: \; u \qr_{L_{\Suf}} v \Rightarrow ua \qr_{L_{\Suf}} va\). \eod
\end{myEnumA}
\end{definitionNI}
At each iteration, the \emph{Learner} checks whether the quasiorder \(\qr_{L_{\Suf}}\) is closed and consistent w.r.t. \(\Pref\).
If \(\qr_{L_{\Suf}}\) is not closed w.r.t. \(\Pref\), then it finds \(ρ_{\qr_{L_{\Suf}}}(ua)\) with \(u \in \Pref, a \in Σ\) such that \(ρ_{\qr_{L_{\Suf}}}(ua)\) is \(L_{\Suf}\)-prime w.r.t. \(\Pref\) and it is not equal to some \(ρ_{\qr_{L_{\Suf}}}(v)\) with \(v \in \Pref\).
Then it adds \(ua\) to \(\Pref\).
Similarly, if \(\qr_{L_{\Suf}}\) is not consistent w.r.t. \(\Pref\) then the \emph{Learner} finds \(u, v \in \Pref\), \(a \in Σ, x \in \Suf\) such that \(u \qr_{L_{\Suf}} v\) but \(uax \in L \land vax \notin L\).
Then the \emph{Learner} adds \(ax\) to \(\Suf\).
When the quasiorder \(\qr_{L_{\Suf}}\) is closed and consistent w.r.t. \(\Pref\), the \emph{Learner} builds the RFA \(\mathsf{R}(\qr_{L_{\Suf}}, \Pref)\).
Definition~\ref{def:right-const:qo:S} is an adaptation of the automata construction \(\mathsf{H}^{r}\) from Definition~\ref{def:right-const:qo}.
Instead of considering all principals, it considers only those that correspond to words in \(\Pref\).
Moreover, the notion of \(L\)-primality is replaced by \(L_{\Suf}\)-primality w.r.t. \(\Pref\) since the algorithm does not manipulate quotients of \(L\) by words in \(Σ^*\) but the approximation through \(\Suf\) of the quotients of \(L\) by words in \(\Pref\) (see Definition~\ref{def:subsetS}).
\begin{definition}[Automata construction \(\mathsf{L}(\qr_{L_{\Suf}}, \Pref)\)]
\label{def:right-const:qo:S}
Let \(L\!\subseteq\! Σ^*\) be a regular language and let \(\Pref,\Suf\!\subseteq\! Σ^*\).
Define the automaton \(\mathsf{L}(\qr_{L_{\Suf}}, \Pref)\!=\! \tuple{Q, \Sigma, \delta, I, F}\) with \(Q \!=\! \{ρ_{\qr_{L_{\Suf}}}(u) \mid u\!\in\! \Pref, ρ_{\qr_{L_{\Suf}}}(u) \text{ is \(L_{\Suf}\)-prime w.r.t. \(\Pref\)}\}\), \(I = \{ρ_{\qr_{L_{\Suf}}}(u) \in Q \mid \varepsilon \in ρ_{\qr_{L_{\Suf}}}(u)\}\), \(F = \{ρ_{\qr_{L_{\Suf}}}(u) \in Q \mid u \in L\}\) and \( \delta(ρ_{\qr_{L_{\Suf}}}(u), a) = \{ ρ_{\qr_{L_{\Suf}}}(v) \in Q \mid ρ_{\qr_{L_{\Suf}}}(u) a \subseteq ρ_{\qr_{L_{\Suf}}}(v)\}\) for all \(ρ_{\qr_{L_{\Suf}}}(u) \in Q\) and \(a \in Σ\).\eod
\end{definition}
Finally, the \emph{Learner} asks the \emph{Oracle} whether \(\lang{\mathsf{R}(\qr_{L_{\Suf}}, \Pref)} = L\).
If the \emph{Oracle} answers \emph{yes} then the algorithm terminates.
Otherwise, the \emph{Oracle} returns a counterexample \(w\) for the language equivalence.
Then, the \emph{Learner} adds every suffix of \(w\) to \(\Suf\) and repeats the process.
Theorem~\ref{theorem:NLqo} shows that the NL\(^{\leqslant}\) algorithm exactly coincides with NL\(^*\).
\begin{theorem}\label{theorem:NLqo}
NL\(^{\leqslant}\) builds the same sets \(\Pref\) and \(\Suf\), performs the same queries to the \emph{Oracle} and the \emph{Teacher} and returns the same RFA as NL\(^*\), provided that both algorithms perform the same non-deterministic choices.
\end{theorem}
\begin{proof}
Let \(\Pref, \Suf \subseteq Σ^*\) be a prefix-closed and a suffix-closed finite set, respectively, and let \(\mathcal{T} = (T, \Pref, \Suf)\) be the table built by algorithm NL\(^*\).
Observe that for every \(u,v \in \Pref\):
\begin{align}
u \qr_{L_{\Suf}} v & \Leftrightarrow \quad \text{[By Definition~\ref{def:finiteNerode}]} \nonumber\\
{u}^{-1}L \subseteq_{\Suf} {v}^{-1}L & \Leftrightarrow \quad \text{[By definition of quotient w.r.t \(S\)]} \nonumber\\
\forall x \in S, \; ux \in L \Rightarrow vx \in L & \Leftrightarrow \quad \text{[By definition of \(\mathcal{T}\)]} \nonumber\\
\forall x \in S, \; (\row(u)(x) = {+}) \Rightarrow (\row(v)(x) = {+}) & \Leftrightarrow \quad \text{[By Definition~\ref{def:coverRow}]} \nonumber\\
\row(u) \sqsubseteq \row(v) \enspace .
\label{eq:QOIffRowsSubset}
\end{align}
Moreover, for every \(u,v \in \Pref\) we have that \({u}^{-1}L =_{\Suf} {v}^{-1}L\) if{}f \(\row(u) = \row(v)\).
Next, we show that the join operator applied to rows corresponds to the set union applied to quotients w.r.t \(S\).
Let \(u,v \in \Pref\) and let \(x \in \Suf\).
Then,
\begin{align}
(\row(u) \sqcup \row(v))(x) = {+} & \Leftrightarrow \quad \text{[By Definition~\ref{def:join}]} \nonumber\\
(\row(u)(x) = {+}) \lor (\row(v)(x) = {+}) & \Leftrightarrow \quad \text{[By definition of row]} \nonumber\\
(ux \in L )\lor (vx \in L) & \Leftrightarrow \quad \text{[By definition of quotient w.r.t \(\Suf\)]} \nonumber\\
(x \in {u}^{-1}L) \lor (x \in {v}^{-1}L )& \Leftrightarrow \quad \text{[By definition of \(\cup\)]}\nonumber \\
x \in {u}^{-1}L \cup {v}^{-1}L \enspace .
\label{eq:joinUnion}
\end{align}
Therefore, we can prove that \(\row(u)\) is \(\mathcal{T}\)-\emph{prime} if{}f \(ρ_{\qr_{L_{\Suf}}}(u)\) is \(L_{\Suf}\)-prime w.r.t. \(\Pref\).
\begin{align*}
\row(u) = {\textstyle\bigsqcup_{v \in \Pref, \; \row(v) \sqsubset \row(u)}} \row(v) & \Leftrightarrow \quad \text{[By Equation~\eqref{eq:QOIffRowsSubset}]} \\
\row(u) = {\textstyle\bigsqcup_{v \in \Pref, \; {v}^{-1}L \subsetneq_{\Suf} {u}^{-1}L}} \row(v) & \Leftrightarrow \quad \text{[By Equation~\eqref{eq:joinUnion}]} \\
{u}^{-1}L = {\textstyle\bigcup_{v \in \Pref, \; {v}^{-1}L \subsetneq_{\Suf} {u}^{-1}L}} {v}^{-1}L & \Leftrightarrow \quad \text{[\({v}^{-1}L \subsetneq_{\Suf} {u}^{-1}L \Leftrightarrow u \qr_{L_{\Suf}}n v\)]} \\
{u}^{-1}L = {\textstyle\bigcup_{v \in \Pref, \; u \qr_{L_{\Suf}}n v}} {v}^{-1}L \enspace .
\end{align*}
It follows from Definitions~\ref{def:ClosedCons} \ref{def:ClosedCons:Closed} and~\ref{def:Table} \ref{def:Table:closed} and Equation~\eqref{eq:joinUnion} that \(\mathcal{T}\) is closed if{}f \(\qr_{L_{\Suf}}\) is closed.
Moreover, it follows from Definitions~\ref{def:ClosedCons} \ref{def:ClosedCons:Cons} and~\ref{def:Table} \ref{def:Table:Consistent} that \(\mathcal{T}\) is consistent if{}f \(\qr_{L_{\Suf}}\) is consistent.
On the other hand, for every \(u,v \in \Pref, a \in Σ\) and \(x \in \Suf\) we have that:
\begin{align*}
(\row(u) \subseteq \row(v)) \land (\row(ua)(x) = {+}) \land (\row(va)(x) = {-} )& \Leftrightarrow \quad \text{[By Equation~\eqref{eq:QOIffRowsSubset}]} \\
(u \qr_{L_{\Suf}} v )\land (uax \in L) \land (vax \notin L) &
\end{align*}
It follows that if \(\mathcal{T}\) and \(\qr_{L_{\Suf}}\) are not consistent then both NL\(^*\) and NL\(^{\leqslant}\) can find the same word \(ax \in Σ \Suf\) and add it to \(\Suf\).
Similarly, it is straightforward to check that if \(\row(ua)\) with \(u \in \Pref\) and \(a \in Σ\) break consistency, i.e.\ it is \(\mathcal{T}\)-prime and it is not equal to any \(\row(v)\) with \(v \in \Pref\), then \(ρ_{\qr_{L_{\Suf}}}(ua)\) is \(L_{\Suf}\)-prime for \(\Pref\) and not equal to any \(ρ_{\qr_{L_{\Suf}}}(v)\) with \(v \in \Pref\).
Thus, if \(\mathcal{T}\) and \(\qr_{L_{\Suf}}\) are not closed then both NL\(^*\) and NL\(^{\leqslant}\) can find the same word \(ua\) and add it to \(\Pref\).
It remains to show that both algorithms build the same automaton modulo isomorphism, i.e., \(\mathsf{R}(\mathcal{T}) = \tuple{\widetilde{Q}, Σ, \widetilde(δ), \widetilde{I}, \widetilde{F}}\) is isomorphic to \(\mathsf{R}(\qr_{L_{\Suf}}, \Pref) = \tuple{Q, Σ, δ, I, F}\).
Define the mapping \(\varphi: Q \to \widetilde{Q}\) as \(\varphi(ρ_{\qr_{L_{\Suf}}}(u)) = \row(u)\).
Then:
\begin{align*}
\varphi(Q) & = \{\varphi(ρ_{\qr_{L_{\Suf}}}(u)) \mid u \in \mathcal{P} \land ρ_{\qr_{L_{\Suf}}}(u) \text{ is \(L_{\Suf}\)-prime w.r.t. \(\Pref\)}\} \\
& = \{\row(u) \mid u \in \mathcal{P} \land \row(u) \text{ is \(\mathcal{T}\)-prime}\} = \widetilde{Q} \enspace .\\
\varphi(I) & = \{\varphi(ρ_{\qr_{L_{\Suf}}}(u)) \mid \varepsilon \in ρ_{\qr_{L_{\Suf}}}(u)\} = \{\row(u) \mid u \qr_{L_{\Suf}} \varepsilon\} = \{\row(u) \mid \row(u) \sqsubseteq \row(\varepsilon)\} = \widetilde{I} \enspace .\\
\varphi(F) & = \{\varphi(ρ_{\qr_{L_{\Suf}}}(u)) \mid u \in L \cap \mathcal{P}\} = \{\row(u) \mid u \in L \cap \mathcal{P}\} = \{\row(u) \mid \row(u)(\varepsilon) = {+}\} = \widetilde{F}\enspace .\\
\varphi(\delta(ρ_{\qr_{L_{\Suf}}}(u),a)) &= \varphi(ρ_{\qr_{L_{\Suf}}}(ua)) = \{\row(v) \mid ρ_{\qr_{L_{\Suf}}}(u) \in Q \land ρ_{\qr_{L_{\Suf}}}(u)a \subseteq ρ_{\qr_{L_{\Suf}}}(v)\} \\
& = \{\row(v) \mid \row(v) \in \widetilde{Q} \land v \qr_{L_{\Suf}} ua\} = \{\row(v) \mid \row(v) \in \widetilde{Q} \land \row(v) \sqsubseteq \row(ua)\} \\
& = \widetilde{\delta}(\row(u),a) = \widetilde{\delta}(\varphi(ρ_{\qr_{L_{\Suf}}}(u)),a) \enspace .
\end{align*}
Finally, we show that \(\varphi\) is an isomorphism.
Clearly, the function \(\varphi\) is surjective since, for every \(u \in \Pref\), we have that \(\row(u) = \varphi(ρ_{\qr_{L_{\Suf}}}(u))\).
Moreover \(\varphi\) is injective since for every \(u,v \in \Pref\), \(\row(u) = \row(v) \Leftrightarrow {u}^{-1}L =_{\Suf} {v}^{-1}L\), hence \(\row(u) = \row(v) \Leftrightarrow ρ_{\qr_{L_{\Suf}}}(u) = ρ_{\qr_{L_{\Suf}}}(v)\).
We conclude that \(\varphi\) is an NFA isomorphism between \(\mathsf{R}(\qr_{L_{\Suf}},\Pref))\) and \(\mathsf{R}(\mathcal{T})\).
Therefore NL\(^*\) and NL\(^{\leqslant}\) exhibit the same behavior, provided that both algorithms perform the same non-deterministic choices, as they both maintain the same sets \(\Pref\) and \(\Suf\) and build the same automata at each step.
\end{proof}
\paragraph*{Termination of NL\(^*\) and NL\(^{\leqslant}\)}
At each iteration of the NL\(^{\leqslant}\) algorithm, it either terminates or the counterexample \(w\) given by the \emph{Oracle} refines the quasiorder \(\qr_{L_{\Suf}}\) which results in having, at least, one new principal \(ρ_{\qr_{L_{\Suf}}}(w)\).
Since
\[ρ_{\qr_{L_{\Suf}}}(u) \neq ρ_{\qr_{L_{\Suf}}}(v) \Rightarrow \exists s \in \Suf, \; us \in L \land vs \notin L \Rightarrow ρ_{\leqslant^{r}L}(u) \neq ρ_{\leqslant^{r}L}(v)\enspace ,\]
we conclude that the number of principals for \(\qr_{L_{\Suf}}\) is smaller o equal than the number of principals for \(\leqslant^{r}L\).
Given that \(\leqslant^{r}L\) induces finitely many principals, algorithm NL\(^{\leqslant}\) can only add finitely many principals to \(\qr_{L_{\Suf}}\) and, therefore, the algorithm terminates.
It is worth to remark that, in order to prove the termination of the NL\(^*\) algorithm, \citet{bollig2009angluin} first had to show that the number of rows built during the computation of the NL\(^*\) algorithm is a lower bound for the number of rows computed during an execution of the L\(^*\) algorithm of \citet{angluin1987learning}.
Then, the termination of the NL\(^*\) algorithms follows from the termination of L\(^*\).
Finally, observe that, by replacing the right quasiorder \(\qr_{L_{\Suf}}\) by its corresponding right congruence \(\mathord{\sim_{L_{\Suf}}} \ud \mathord{\mathord{\qr_{L_{\Suf}}}} \cap \mathord{\mathord{(\qr_{L_{\Suf}})^{-1}}}\) in the above algorithm (precisely, in Definitions~\ref{def:ClosedCons} and \ref{def:right-const:qo:S}), the resulting algorithm corresponds to the L\(^*\) algorithm of \citet{angluin1987learning}.
Note that, in that case, all principals \(ρ_{\sim_{L_{\Suf}}}(u)\), with \(u\in\Sigma^*\), are \(L_{\Suf}\)-prime w.r.t. \(\Pref\).
{}
{}
\chapter{Future Work}
\label{chap:future}
We believe that we have only scratched the surface on the use of \emph{well-quasiorders} on words for solving problems from \emph{Formal Language Theory}.
In this section, we present some directions for further developments that show how our work can be extended to\begin{myEnumIL}\item take full advantage of simulation relations, \item better understand, and possibly improve, the performance of \tool{zearch} and \item develop new algorithms for building smaller residual automata.\end{myEnumIL}
\section{The Language Inclusion Problem}
Consider the inclusion problem \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\), where \(\mathcal{N}_1\) and \(\mathcal{N}_2\) are NFAs.
Even though we have shown in Chapter~\ref{chap:LangInc} that simulations can be used to derive an algorithm for solving this language inclusion problem, we are not on par with the thoughtful use of simulation relations made by \citet{Abdulla2010} and \citet{DBLP:conf/popl/BonchiP13}.
The main reason for which we are not able to accommodate within our framework their use of simulations is that our abstraction only manipulates sets of states of \(\mathcal{N}_2\).
As a consequence, any use of simulations that involves states of \(\mathcal{N}_1\) is out of reach.
However, it is possible to overcome this limitation by using \emph{alternating automata} as we show next.
Intuitively, since alternating automata can be complemented without altering their number of states, we can reduce any language inclusion problem \(\lang{\mathcal{A}_1} \subseteq \lang{\mathcal{A}_2}\), where \(\mathcal{A}_1\) and \(\mathcal{A}_2\) are alternating automata, into a universality problem \(Σ^* \subseteq \lang{\mathcal{A}_3}\), where \(\mathcal{A}_3 = \mathcal{A}_1^c \cup \mathcal{A}_2\).
Since \(\mathcal{A}_3\) is built by combining the two input automata its states are the union of the states of \(\mathcal{A}_1\) and \(\mathcal{A}_2\).
Therefore, simulations applicable within our framework to decide \(Σ^* \subseteq \lang{\mathcal{A}_3}\), which only involve states of \(\mathcal{A}_3\), now involve states of \(\mathcal{A}_1\) and \(\mathcal{A}_2\).
\subsection{Language Inclusion Through Alternating Automata}
Let \(S\) be a set.
We denote by \(\mathcal{B}p(S)\) the set of \demph{positive Boolean formulas} over \(S\) which are of the form \(\Phi \ud s \;|\; \Phi_1 \lor \Phi_2 \;|\; \Phi_1 \land \Phi_2 \;|\; \text{\emph{false}}\), where \(s \in S\) and \(\Phi_1,\Phi_2 \in \mathcal{B}p(S)\).
We say \(S' \subseteq S\) \emph{satisfies} a formula \(\Phi \in \mathcal{B}^{+}(S)\) if{}f \(\Phi\) is \(\text{\emph{true}}\) when assigning the value \(\text{\emph{true}}\) to all elements in \(S'\) and \(\text{\emph{false}}\) to the elements in \(S \setminus S'\).
Given \(\Phi \in \mathcal{B}p(S)\), we denote \(\eval{\Phi}\) the set of all subsets of \(S\) that satisfy \(\Phi\).
Clearly, if \(S'\) satisfies a formula \(\Phi\), any set \(S'' \subseteq S\) such that \(S' \subseteq S''\) also satisfies \(\Phi\).
Therefore, the set \(\eval{\Phi}\) is an \(\subseteq\)-upward closed set, i.e. \(ρ_{\subseteq}(\eval{\Phi}) = \eval{\Phi}\).
Finally, if a formula \(\Phi\) is not satisfiable, i.e. no set \(S' \subseteq S\) satisfies \(\Phi\), then \(\eval{\Phi} = \varnothing\).
\begin{definition*}[AFA]
An \emph{alternating finite-state automata}\index{alternating automata} (AFA for short) is a tuple \(\mathcal{A} \ud \tuple{Q,\Sigma,δ,I,F}\) where \(Q\) is the finite set of \emph{states}, \(\Sigma\) is the finite alphabet, \(\delta \colon Q \times \Sigma \to \mathcal{B}p(Q)\) is the transition function, \(I \subseteq Q\) are the initial states and \(F \subseteq Q\) are the final states.\eod
\end{definition*}
Intuitively, given an active state \(q \in Q\) and an alphabet symbol \(a \in Σ\) an AFA can activate any set of states in \(\eval{δ(q,a)}\).
Figure~\ref{fig:AFA} shows an example of an AFA.
\begin{figure}
\caption{Alternating automaton \(\mathcal{A}
\label{fig:AFA}
\end{figure}
Given an AFA \(\mathcal{A} = \tuple{Q,\Sigma,δ,I,F}\), we extend the transition function \(\delta\) to sets of states obtaining \(\Delta: \wp(Q)\times \Sigma \to \mathcal{B}p\) defined as \(Δ(S,a) \ud \bigwedge_{s \in S} δ(s,a)\).
Intuitively, \(\Delta(S,a)\) indicates the states that will be activated after reading \(a\) when all states in \(S\) are active.
Let \(X \uplus Y \ud \{x \cup y \mid x \in X, y \in Y\}\).
Then
\begin{equation}\label{eq:Delta}
\eval{\Delta(S,a)} = \left\{\begin{array}{ll}
\varnothing & \text{ if } \exists s \in S \text{ s.t. } \delta(s,a) = \text{\emph{false}} \\
\biguplus_{s \in S} \eval{\delta(s,a)} & \text{ otherwise }
\end{array}\right.
\end{equation}
We say a word \(w\) is accepted by an AFA \(\mathcal{A}=\tuple{Q,Σ,δ,I,F}\) if{}f there exists a sequence of sets of active states \(S_0,\ldots,S_{\len{w}}\) such that \(S_0 = \{q_i\}\) with \(q_i \in I\), \(S_n \subseteq F\), \(S_n \neq \varnothing\) and \(S_{i} \in \eval{Δ(S_{i{-}1},(w)_i)}\) for \(1 \leq i \leq \len{w}\).
\begin{example}
Let us consider the alternating automaton \(\mathcal{A}\) in Figure~\ref{fig:AFA}.
Then, we have that
\begin{align*}
\Delta(\{q_0\},a) & = δ(q_0,a) = (q_1 \land q_2) \lor q_3 \enspace .\\
\eval{\Delta(\{q_0\},a)} & = \eval{δ(q_0,a)} = ρ_{\subseteq}\left(\{\{q_1,q_2\},\{q_3\}\}\right) \enspace .\\[8pt]
\Delta(\{q_1,q_2\},b) & = δ(q_1,b) \land δ(q_2,b) = q_2\land q_3 \land \text{\emph{false}} = \text{\emph{false}}\\
\eval{\Delta(\{q_1,q_2\},b)} & = \eval{δ(q_1,b)} \biguplus \eval{δ(q_2,b)} = ρ_{\subseteq}\left(\{\{q_2,q_3\}\}\right) \uplus \varnothing = \varnothing \enspace . \\[8pt]
\Delta(\{q_1,q_2\},a) & = δ(q_1,a) \land δ(q_2,a) = \text{\emph{false}} \land q_3 = \text{\emph{false}}\\
\eval{\Delta(\{q_1,q_2\},a)} & = \eval{δ(q_1,a)} \biguplus \eval{δ(q_2,a)} = \varnothing \uplus ρ_{\subseteq}\left(\{\{q_3\}\}\right) = \varnothing \enspace . \\[8pt]
Δ(\{q_3\},a) & = Δ(\{q_3\},b) = q_3 \\
\eval{Δ(\{q_3\},a)} & = \eval{Δ(\{q_3\},b)} = ρ_{\subseteq}\left(\{\{q_3\}\}\right)\enspace .
\end{align*}
Since \(q_0\) is the only initial state and \(F = \{q_3\}\), it follows that the language generated by the automaton is \(\lang{\mathcal{A}} = a(a+b)^*\).
{\ensuremath{\Diamond}}
\end{example}
We denote the reflexo-transitive closure of \(\eval{\Delta}\) as \(\goes{}\).
Thus, the \emph{language} of an AFA, \(\mathcal{A}\), is \(\lang{\mathcal{A}} = \{w \in \Sigma^* \mid \exists q_i \in I, S \subseteq F, \; S \neq \varnothing \land \{q_i\} \goes{w} S\}\).
One of the most interesting properties of AFAs is that their complement, i.e. an AFA generating the complement language, can be built in polynomial time.
\begin{definition}[Complement of an AFA]
Let \(\mathcal{A} = \tuple{Q,\Sigma,δ,I,F}\) be an AFA with \(L = \lang{\mathcal{A}}\).
Its \emph{complement AFA}, denoted \(\mathcal{A}^c\) is the AFA \(\mathcal{A}^c \ud \tuple{Q,\Sigma,δ^c,I,Q \setminus F}\) where
\(δ^c(q,a)\) is the result of switching \(\land\) and \(\lor\) operators in \(δ(q,a)\).\eod
\end{definition}
The simplicity of the computation of the complement for AFAs, \emph{which does not alter the number of states of the automaton}, allows us to use them in order to solve the language inclusion problem \(\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2}\), where \(\mathcal{N}_1\) and \(\mathcal{N}_2\) are NFAs, by reducing it to universality of alternating automata as follows:
\begin{align}
\lang{\mathcal{N}_1} \subseteq \lang{\mathcal{N}_2} & \Leftrightarrow \quad \text{[Since NFAs \(\subseteq\) AFAs]}\nonumber \\
\lang{\mathcal{A}_1} \subseteq \lang{\mathcal{A}_2} & \Leftrightarrow \quad \text{[\(A \subseteq B \Leftrightarrow A \cap B^c = \varnothing\)]}\nonumber\\
\lang{\mathcal{A}_1} \cap (\lang{\mathcal{A}_2})^c = \varnothing & \Leftrightarrow \quad \text{[\((A\cap B)^c = A^c \cup B^c\) and \(\varnothing^c = Σ^*\)]}\nonumber\\
(\lang{\mathcal{A}_1})^c \cup \lang{\mathcal{A}_2} = \Sigma^* & \Leftrightarrow \quad\text{[AFAs are closed under complement]}\nonumber\\
\lang{\mathcal{A}_1^c} \cup \lang{\mathcal{A}_2} = \Sigma^* & \Leftrightarrow \quad \text{[\(A = \Sigma^* \Leftrightarrow \Sigma^* \subseteq A\)]} \nonumber\\
\Sigma^* \subseteq \lang{\mathcal{A}_1^c} \cup \lang{\mathcal{A}_2} & \Leftrightarrow \quad \text{[With \(\mathcal{A}_3 = \mathcal{A}_1^c \cup \mathcal{A}_2\)]} \nonumber\\
Σ^* \subseteq \lang{\mathcal{A}_3}\label{eq:universality}
\end{align}
On the other hand, \(\Sigma^*\) is the \(\lfp\) of the equation \(\lambda X. \{\varepsilon\} \cup \bigcup_{a \in \Sigma}aX\).
Therefore
\[\Sigma^* \subseteq \lang{\mathcal{A}_3} \Leftrightarrow \lfp (\lambda X. \{\varepsilon\} \cup {\textstyle\bigcup_{a \in \Sigma} aX}) \subseteq \lang{\mathcal{A}_3}\enspace .\]
We are now in position to leverage our quasiorder-based framework from Chapter~\ref{chap:LangInc} to derive an algorithm for deciding the universality of a regular language given by an AFA \(\mathcal{A}\).
To do that, we adapt our right state-based quasiorder from Equation~\ref{eqn:state-qo}, which requires defining the successor operator for AFAs \(\post_w^{\mathcal{A}}: \wp(\wp(Q)) \to \wp(\wp(Q))\), where \(w \in \Sigma^*\), as follows:
\begin{equation}\label{eq:postAFAwords}
\post_{w}^{\mathcal{A}}(X) \ud \{S' \in \wp(Q) \mid \exists S \in X, S \goes{w} S'\} \enspace .
\end{equation}
It is straightforward to check that \(\post_{wa}^{\mathcal{A}}(X) = \post_a^{\mathcal{A}}(\post_w^{\mathcal{A}}(X))\).
The following example illustrates the behavior of the function \(\post_w^{\mathcal{A}}\) on the AFA from Figure~\ref{fig:AFA}.
\begin{example}
Consider again the AFA \(\mathcal{A}\) from Figure~\ref{fig:AFA}.
We have that
\begin{align*}
\post_{a}^{\mathcal{A}}(\{\{q_0\}\}) & = ρ_{\subseteq}\left(\{\{q_1,q_2\},\{q_3\}\}\right) \\
\post_{aa}^{\mathcal{A}}(\{\{q_0\}\}) & = \post_{a}^{\mathcal{A}}(\{\{q_1,q_2\},\{q_3\}\}) = ρ_{\subseteq}\left(\{\{q_3\}\}\right) \\
\post_{ab}^{\mathcal{A}}(\{\{q_0\}\}) & = \post_b^{\mathcal{A}}(\{\{q_1,q_2\},\{q_3\}\}) = ρ_{\subseteq}\left(\{\{q_3\}\}\right)\enspace . \tag*{
{\ensuremath{\Diamond}}}
\end{align*}
\end{example}
Similarly to what we did in Section~\ref{subsec:state-qos} for NFAs, we next define a sate-based quasiorder for AFAs, \(\leqslant_{\mathcal{A}}\).
To do that, let \(I_{\{\}}\) be the set of singleton subsets of \(I\), i.e. \(I_{\{\}} \ud \{\{q\} \mid q \in I\}\).
Then
\begin{equation}\label{eq:qoAFAState}
u \leqslant_{\mathcal{A}} v \Leftrightarrow \post_u^{\mathcal{A}}(I_{\{\}}) \subseteq \post_v^{\mathcal{A}}(I_{\{\}})
\end{equation}
\begin{lemma}\label{lemma:leqStateLcWQP}
Let \(\mathcal{A} = \tuple{Q,\Sigma,\delta, I,F}\) be an AFA with \(L=\lang{\mathcal{A}}\).
Then \(\leqslant_{\mathcal{A}}\) is a right \(L\)-consistent well-quasiorder.
\end{lemma}
\begin{proof}
First, we show that \(\leqslant_{\mathcal{A}}\) is right monotone.
Let \(u,v \in \Sigma^*\) and \(a \in \Sigma\).
Recall that $\post^\mathcal{A}_a$ is a monotonic function and that
\begin{equation}
\post^{\mathcal{A}}_{uv} = \post^{\mathcal{A}}_{v} \comp \post^{\mathcal{A}}_u \enspace .\label{eq:postpost}
\end{equation}
Then
\begin{myAlign}{0pt}{0pt}
u \leqslant_{\mathcal{A}} v & \Rightarrow \quad\text{[By definition of \(\leqslant_{\mathcal{A}}\)]} \\
\post^{\mathcal{A}}_{u}(I_{\{\}}) \subseteq \post^{\mathcal{A}}_{v}(I_{\{\}}) & \Rightarrow \quad\text{[Since $\post^\mathcal{A}_a$ is monotonic]} \\
\post^{\mathcal{A}}_{a}(\post^{\mathcal{A}}_{u}(I_{\{\}})) \subseteq \post^{\mathcal{A}}_{a}(\post^{\mathcal{A}}_{v}(I_{\{\}})) & \Leftrightarrow \quad\text{[By Equation~\eqref{eq:postpost}]} \\
\post^{\mathcal{A}}_{ua}(I_{\{\}}) \subseteq \post^{\mathcal{A}}_{va}(I_{\{\}}) & \Leftrightarrow \quad\text{[By definition of \(\leqslant_{\mathcal{A}}\)]} \\
ua \leqslant_{\mathcal{A}} va \enspace .
\end{myAlign}
On the other hand, \(\leqslant_{\mathcal{A}}\) is \(L\)-consistent since, by definition
\[\forall w \in Σ^*, \; w \in L \Leftrightarrow \exists S \in \post_w^{\mathcal{A}}(I_{\{\}}), \; S \neq \varnothing \land S \subseteq F\enspace .\]
Therefore, if \(u \in L\) and \(u \leqslant_{\mathcal{A}} v\) then it follows that \(v \in L\).
Finally, it is straightforward to check that \(\leqslant_{\mathcal{A}}\) is a well-quasiorder since \(\wp(\wp(Q))\) is finite.
\end{proof}
Since membership in AFAs is decidable, it follows from Lemma~\ref{lemma:leqStateLcWQP} and Theorem~\ref{theorem:quasiorderAlgorithmR} that Algorithm \AlgRegularWr instantiated with the wqo \(\leqslant_{\mathcal{A}}\) decides the inclusion \(Σ^* \!\subseteq\! \lang{\mathcal{A}}\), where \(\mathcal{A}\) is an AFA.
Following the developments of Chapter~\ref{chap:LangInc}, given an AFA \(\mathcal{A} = \tuple{Q,Σ,δ,I,F}\), we could define a Galois Connection \(\tuple{\wp(\Sigma^*),\subseteq}\galois{\alpha}{\gamma}\tuple{\AC_{\tuple{\wp(\wp(Q)),\subseteq}},\sqsubseteq}\) that yields an antichains algorithm for deciding the universality of AFAs by manipulating sets of sets of states.
By doing so, we would obtain an algorithm that computes the set \(Y = \minor{\{\post_w^{\mathcal{A}}(I_{\{\}}) \mid w \in Σ^*\}}\) and checks whether all elements \(y \in Y\) satisfy \(\exists s \in y, \; s \neq \varnothing \land s \subseteq F\).
Moreover, we could enhance the state-based quasiorder for AFAs by using simulations between the states of \(\mathcal{A}\) which, recall, are the union of the states of the input automata \(\mathcal{N}_1\) and \(\mathcal{N}_2\).
This would allow us to use the simulations that relate states of both automata, similarly to \citet{Abdulla2010} and \citet{DBLP:conf/popl/BonchiP13}.
Therefore, we believe that the full development of an antichains algorithm for AFAs is an interesting line for future work since it will allow us to understand how close our framework can get to the results of \citet{Abdulla2010} and \citet{DBLP:conf/popl/BonchiP13}.
\section{The Complexity of Searching on Compressed Text}
We believe the good results obtained during the evaluation of \tool{zearch} (see Figure~\ref{fig:comparison}) invite for a deeper study of our algorithm in order to better understand its behavior and improve its performance.
For instance, it is yet to be considered how the performance of \tool{zearch} is affected by the choice of the grammar-based compression algorithm.
By using different heuristics to build the grammar, the resulting SLP will have different properties, such as depth, width or length of the rules, which would definitely affect \tool{zearch}'s performance.
Figure~\ref{fig:grammars} shows the grammars built by different compression algorithms for the same string.
\begin{figure}
\caption{From left to right, grammars built by the compression algorithms \tool{sequitur}
\label{fig:grammars}
\end{figure}
In particular, there are grammar-based compression algorithms such as \tool{Sequitur}~\cite{nevill1997compression} that produce SLPs which are not in CNF, i.e. in which rules might have more than two symbols on the right hand side.
Processing such a grammar, instead of the one built by \tool{repair} reduces the number of rules to be processed at the expense of a greater cost for processing each rule.
It is worth considering whether adapting \tool{zearch} to work on such SLPs will have a positive impact on its performance.
On the other hand, Algorithm \AlgCountLines allows for a conceptually simple parallelization since any set of rules such that no variable appearing on the left hand side of a rule appears on the right hand side of another, can be processed simultaneously.
Indeed, a theoretical result by \citet{ullman1988parallel} on the parallelization of Datalog queries can be used to show that counting the number of lines in a grammar-compressed text containing a match for a regular expression is in $\mathcal{NC}^2$, i.e. it is solvable in \emph{polylogarithmic time} on \emph{parallel} computer with a polynomial number of processors, when the automaton built from the expression is acyclic.
Therefore, we are optimistic about the possibilities of a parallel version of \tool{zearch}.
Finally, \emph{patterns} are a commonly used subclass of regular expressions for which specific searching algorithms have been developed~\cite{kida1998multipattern,navarro2005lzgrep,gawrychowski2013optimal,gawrychowski2014simple}.
Since the standard automata construction from regular expressions yields a DFA when the expression is a pattern, our algorithm allows us to search for patterns in \(\mathcal{O}(t\cdot s)\) time, where \(t\) is the size of the grammar and \(s\) is the length of the pattern.
However, as shown by \citet{gawrychowski2013optimal}, it is possible to decide the existence of a pattern in an \tool{LZW}-compressed text in \(\mathcal{O}(t + s)\) time.
It is yet to be considered whether the algorithm of \citet{gawrychowski2013optimal} can be adapted to the more general scenario of searching on grammar-compressed text and whether it can be extended to report the number of matching lines without altering its complexity as we did with Algorithm \AlgCountLines.
\section{The Performance of Residualization}
In Chapter~\ref{chap:RFA} we presented the automata construction \(\cG{r}(\mathcal{N})\) as an alternative to \(\mathcal{N}^{\text{res}}\), the residualization operation defined by \citet{denis2002residual}.
We have shown in Theorem~\ref{theoremF}~\ref{lemma:rightNRes} that given an NFA \(\mathcal{N}\), the automaton \(\cG{r}(\mathcal{N})\) is a sub-automaton of \(\mathcal{N}^{\text{res}}\), meaning that our construction yields smaller automata.
On the other than, it is clear that, given an NFA \(\mathcal{N}=\tuple{Q,Σ,δ,I,F}\), finding the coverable sets in \(\{\post_u^{\mathcal{N}}(I) \mid u \in Σ^*\}\) is easier than finding the \(L\)-composite principals in \(\{ρ_{\leqslant^{r}N}(u) \mid u \in Σ^*\}\).
However, it is yet to be considered the performance of both algorithms and the actual difference in size between the RFAs \(\cG{r}(\mathcal{N})\) and \(\mathcal{N}^{\text{res}}\).
\subsection{Reducing RFAs with Simulations}
Let \(\mathcal{N}\) be an NFA with \(L = \lang{\mathcal{N}}\).
As shown by Lemma~\ref{lemma:simulationLConsistent}, the simulation-based quasiorder \(Peceq^r_{\mathcal{N}}\) is an \(L\)-consistent right well-quasiorder.
Therefore, it follows from Lemma~\ref{lemma: HrGeneratesL} that \(\mathsf{H}^r(Peceq^r_{\mathcal{N}},L)\) is an RFA generating the language \(L\).
Moreover, as shown in Section~\ref{sec:simulation_basedQO}, we have the following relation between the state-based, the simulation-based and the Nerode's right quasiorders:
\[
\mathord{\leqslant^{r}_{\mathcal{N}}} \,\subseteq\, \mathord{Peceq^r_{\mathcal{N}}} \,\subseteq\, \mathord{\leqslant^{r}_{\lang{\mathcal{N}}}}\enspace .
\]
Therefore, by Theorem~\ref{theorem:numLPrimePrincipals}, we have that
\[\begin{array}{c}
\len{\{ρ_{\leqslant^{r}N}(u) \mid u \in Σ^* \text{ and } ρ_{\leqslant^{r}N}(u) \text{ is \(L\)-prime}\}} \\
\rotatebox[origin=c]{270}{\(\geq\)} \\
\len{\{ρ_{Peceq^r_{\mathcal{N}}}(u) \mid u \in Σ^* \text{ and } ρ_{Peceq^r_{\mathcal{N}}}\text{ is \(L\)-prime}\}} \\
\rotatebox[origin=c]{270}{\(\geq\)} \\
\len{\{ρ_{\leqslant^{r}L}(u) \mid u \in Σ^* \text{ and } ρ_{\leqslant^{r}L}(u)\text{ is \(L\)-prime}\}} \enspace .
\end{array}\]
One promising direction for future work is to fully develop this idea of using simulation-based quasiorders to build even smaller RFAs.
Such technique should be implemented and evaluated in practice in comparison with the residualization operations \(\cG{r}(\mathcal{N})\) and \(\mathcal{N}^{\text{res}}\).
{}
{}
\chapter{Conclusions}
\label{chap:conclusions}
In this thesis, we have shown that well-quasiorders are the right tool for addressing different problems from \emph{Formal Language Theory}.
Indeed, we presented two quasiorder-based frameworks in Chapters~\ref{chap:LangInc} and~\ref{chap:RFA} that allowed us to offer a new perspective on \emph{The Language Inclusion Problem} and \emph{Residual Automata}, respectively.
In both cases, our frameworks allowed us to\begin{myEnumIL}
\item offer a \emph{new perspective on known algorithms} that facilitates their understanding and evidences the relationships between them and
\item \emph{systematically derive new algorithms}, some of which proved to be of practical interest due to their performance.
\end{myEnumIL}
\paragraph{The Language Inclusion Problem}
We have been able to systematically derive well-known algorithms such as the antichains algorithms for regular languages of \citet{DBLP:conf/cav/WulfDHR06}, with its multiple variants (see Section~\ref{sec:novel_perspective_AC}), and the antichains algorithm for grammars of \citet{Holk2015}.
These systematic derivations result in a simpler presentation of the antichains algorithm for grammars of \citet{Holk2015} as a straightforward extension of the antichains algorithm for regular languages.
Indeed, we have shown that the antichains algorithm for regular languages and for grammars are conceptually identical and correspond to two instantiations of our framework with different quasiorders.
Recall that, previously, the use of antichains for grammars was justified through a reduction to data flow analysis.
Our framework has also allowed us to derive algorithms for deciding the inclusion of a regular language in the trace set of a one-counter net.
In doing so, we have shown that the right Nerode quasiorder for the trace set of a one-counter net is an undecidable well-quasiorder, thereby closing a conjecture made by \citet[Section 6]{deLuca1994}.
Finally, our quasiorder-based framework also allowed us to derive novel algorithms, such as gfp-based Algorithm \AlgRegularGfp, for deciding the inclusion between regular languages.
It is yet to be considered the performance of this algorithm in order to decide whether it is of practical interest.
\paragraph*{Searching on Compressed Text}
We then adapted the antichains algorithm for grammars to the problem of \emph{searching with regular expressions in grammar compressed text}.
As a result, we have presented the first algorithm for \emph{counting} the number of lines in a grammar-compressed text containing a match for a regular expression.
It is worth to remark that our algorithm applies to any grammar-based compression scheme while being nearly optimal.
Together with the presentation of our algorithm, we described in Chapter~\ref{chap:zearch} the data structures required to achieve nearly optimal complexity for searching in compressed text and used them to implement a \emph{sequential} tool --\tool{zearch}-- that significantly outperforms the \emph{parallel} state of the art to solve this problem.
Indeed, when the grammar-based compressor achieves high compression ratio, which is the case, for instance, for automatically generated \emph{Log} files, \tool{zearch} uses up to $25\pct$ less time than \tool{lz4|hyperscan}, even outperforming \tool{grep} and being competitive with \tool{hyperscan}.
Our results evidence that compression of textual data and regular expression searching, two problems considered independent in practice, are connected.
Intuitively, the search can take advantage of the information about repetitions in the text, highlighted by the compressor, to skip parts of the uncompressed text.
\paragraph{Residual Automata}
\citet{denis2002residual} introduced the notion of RFA and canonical RFA for a regular language and devised a procedure, similar to the subset construction for DFAs, to build the RFA \(\mathcal{N}^{\text{res}}\) from a given automaton \(\mathcal{N}\).
Furthermore, they showed that the \emph{double-reversal method} holds for RFAs with their \emph{residualization} operation, i.e. \(\mathcal{N}^{\text{res}}\) is isomorphic to the canonical RFA \(\mathcal{C}\) for \(\lang{\mathcal{N}}\) for every co-RFA \(\mathcal{N}\).
Later, \citet{tamm2015generalization} proved the following result:
\begin{lemmaC}[\citet{tamm2015generalization}]\label{lemma:tammGen}
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{C}\) be the \emph{canonical RFA} for \(\lang{\mathcal{N}}\).
Then, \(\mathcal{N}^{\text{res}}\) is\linebreak isomorphic to \(\mathcal{C}\) if{}f the \emph{left language} of every state of \(\mathcal{N}\) is a \emph{union of left languages} of states of \(\mathcal{C}\).
\end{lemmaC}
The result of \citet{tamm2015generalization} generalizes the double-reversal method for RFAs along the lines of the generalization by \citet{Brzozowski2014} of the double-reversal method for DFAs which we estate next.
\begin{lemmaC}[\citet{Brzozowski2014}]
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{M}\) be the \emph{minimal DFA} for \(\lang{\mathcal{N}}\).
Then \(\mathcal{N}^D\) is isomorphic to \(\mathcal{M}\) if{}f the \emph{left language} of each state of \(\mathcal{N}\) is a \emph{union of co-atoms} of \(\lang{\mathcal{N}}\).
\end{lemmaC}
Although the two generalizations have a common foundation, the connection between the two resulting characterizations is not immediate.
Our work, together with the work of \citet{ganty2019congruence} allows us to clarify the relation between these two results and our Theorem~\ref{theorem:canonicalreverserestic}.
Indeed, \citet{ganty2019congruence} offered a congruence-based perspective of the generalized double-reversal me-thod for building the minimal DFA which lead to the following result.
\begin{lemmaC}[\citet{ganty2019congruence}]\label{lemma:congruenceDFA}
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{M}\) be the minimal DFA for \(\lang{\mathcal{N}}\).
Then \(\mathcal{N}^D\) is isomorphic to \(\mathcal{M}\) if{}f
\[ρ_{\sim^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\enspace ,\]
where \(\mathord{\sim^{r}L} \ud \mathord{\leqslant^{r}L} \cap \mathord{(\leqslant^{r}L)^{-1}}\) is the right Nerode's congruence.
\end{lemmaC}
We believe that the similarity between the generalizations of the double-reversal methods for the minimal DFA (Lemma~\ref{lemma:congruenceDFA}) and for the canonical RFA (Theorem~\ref{theorem:canonicalreverserestic}), which says that
\[\cG{r}(\mathcal{N}) \text{ is isomorphic to \(\mathcal{C}\)} \Leftrightarrow ρ_{\leqslant^{r}L}(W_{I,q}^{\mathcal{N}}) = W_{I,q}^{\mathcal{N}}\enspace ,\]
evidences that \emph{quasiorders are for RFAs as congruences are for DFAs}.
Figure~\ref{fig:conclusions} summarizes the existing results about these double-reversal methods.
\begin{figure}
\caption{Summary of the existing results about the generalized double-reversal method for building the minimal DFA (first row) and the canonical RFA (second row) for a given language. The results on the first column are based on the notion of \emph{atoms}
\label{fig:conclusions}
\end{figure}
Moreover, as shown by Lemma~\ref{lemma:qrlEqualqrNResEqualCan}, our residualization operation \(\cG{r}(\mathcal{N})\) offers a desirable property that \(\mathcal{N}^{\text{res}}\) lacks: residualizing \(\mathcal{N}\) yields the canonical RFA for \(\lang{\mathcal{N}}\) if{}f \(\mathord{\leqslant^{r}L} = \mathord{\leqslant^{r}N}\).
Again, this property is equivalent to the one presented by \citet{ganty2019congruence} for DFAs.
\begin{lemmaC}[\citet{ganty2019congruence}]
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{M}\) be the minimal DFA for \(\lang{\mathcal{N}}\).
Then \(\mathcal{N}^D\) is isomorphic to \(\mathcal{M}\) if{}f \(\mathord{\sim^{r}N} = \mathord{\sim^{r}L}\), where \(\mathord{\sim^{r}N} \ud \mathord{\leqslant^{r}N} \cap (\leqslant^{r}N)^{-1}\) is the right state-based congruence.
\end{lemmaC}
On the other hand, since \citet{ganty2019congruence} showed that the left languages of the minimal DFA for a regular language are the blocks of the partition \(ρ_{\sim^{r}L}\), Lemma~\ref{lemma:congruenceDFA} can be equivalently stated as follows.
\begin{lemmaC}[\citet{ganty2019congruence}]\label{lemma:congruenceTamm}
Let \(\mathcal{N}\) be an NFA and let \(\mathcal{M}\) be the minimal DFA for \(\lang{\mathcal{N}}\).
Then \(\mathcal{N}^D\) is isomorphic to \(\mathcal{M}\) if{}f the left language of each state of \(\mathcal{N}\) is a union of left languages of states of the minimal DFA.
\end{lemmaC}
Therefore, Lemma~\ref{lemma:congruenceTamm} can be seen as the DFA-equivalent of Tamm's condition for RFAs (Lemma~\ref{lemma:tammGen}).
Therefore, Lemma~\ref{lemma:congruenceTamm} together with Lemma~\ref{lemma:congruenceDFA}, evidence the connection between the generalization of the double reversal for RFAs of \citet{tamm2015generalization} and the one for DFAs of \citet{Brzozowski2014}.
Finally, we further support the idea that quasiorders are natural to residual automata by observing that the NL\(^*\) algorithm proposed by \citet{bollig2009angluin} for learning RFAs can be interpreted within our framework as an algorithm that, at each step, refines an approximation of the Nerode's quasiorder and builds an RFA using our automata construction.
{}
\pagestyle{plain}
\chapter*{Funding Acknowledgments}
This research was partially supported by:
\begin{myItem}
\item The Spanish Ministry of Economy and Competitiveness project No.\ PGC2018-102210-B-I00.
\item The Spanish Ministry of Science and Innovation project No. TIN2015-71819-P.
\item The Madrid Regional Government project No. S2018/TCS-4339 .
\item The Madrid Regional Government project No. S2013/ICE-2731
\item The Ramón y Cajal fellowship RYC-2016-20281.
\item German Academic Exchange Service (DAAD) program ``Research Grants - Short-Term Grants 2018 (57378443)''.
\end{myItem}
\backmatter
\pagestyle{plain}
\addcontentsline{toc}{chapter}{Bibliography}
Pintindex
\end{document} |
\begin{document}
\title[]{Quasismooth hypersurfaces in toric varieties}
\author{Michela Artebani}
\address{
Departamento de Matem\'atica, \newline
Universidad de Concepci\'on, \newline
Casilla 160-C,
Concepci\'on, Chile}
\email{[email protected]}
\author{Paola Comparin}
\address{
Departamento de Matem\'atica y Estad\'istica, \newline
Universidad de La Frontera, \newline
Av. Francisco Salazar 1145,
Temuco, Chile}
\email{[email protected]}
\author{Robin Guilbot}
\address{Faculty of Mathematics, \newline
Computer Science and Mechanics,\newline
University of Warsaw, \newline
ul. Banacha 2, 02-097 Warszawa, Poland }
\email{[email protected]}
\subjclass[2010]{14M25, 14J32, 14J17, 32S25}
\keywords{Toric variety, quasismooth, Newton polytope, Calabi-Yau variety}
\thanks{The first author has been partially
supported by Proyecto Fondecyt Regular N. 1130572
and Proyecto Anillo ACT 1415 PIA Conicyt,
the second author has been partially
supported by Proyecto Fondecyt Postdoctorado N. 3150015 and Proyecto Anillo ACT 1415 PIA Conicyt,
the third author was supported by the NCN project 2013/08/A/ST1/00804.}
\begin{abstract}
We provide a combinatorial characterization of monomial linear systems on toric varieties
whose general member is quasismooth. This is given both in terms of the Newton polytope
and in terms of the matrix of exponents of a monomial basis.
\end{abstract}
\maketitle
\section*{Introduction}
Let $X$ be a normal projective toric variety over ${\mathbb C}$ with
homogeneous coordinate ring $R(X)$.
According to Cox's construction, there exists a GIT quotient
$p:\hat X\to X$ by the action of a quasi-torus,
where $\hat X$ is an open subvariety of $\operatorname{Spec} R(X)$
obtained removing a closed subset of codimension at least two,
the irrelevant locus \cite[Chapter 5]{CLS}.
This description allows to describe the geometry of $X$
by means of homogeneous coordinates, as for the usual projective space.
In particular, a hypersurface $Y$ of $X$ can be defined as (the image by $p$ of)
the zero set of a homogeneous element $f\in R(X)$.
Such hypersurface $Y$ is called {\em quasismooth} if $V(f)$ is smooth in $\hat X$,
i.e. the singular locus of $f$ is contained in the irrelevant locus.
This implies that the singularities of $Y$ are induced by those of the ambient space,
that is they are due to the isotropy in the action of the quasi-torus on $\hat X$.
In particular $Y$ is smooth if $X$ is smooth.
To the authors' knowledge, the concept of quasismoothness
first appeared in the work of Danilov \cite{Da2} and later in the work of Batyrev and Cox \cite{BC}
for simplicial toric varieties. Under such hypothesis $X$ has abelian quotient singularities and $p$
is a geometric quotient. In \cite[Proposition 3.5]{BC} it is proved that if $X$ is simplicial,
then a hypersurface $Y$ is quasismooth if and only if it is a $V$-submanifold of $X$.
In particular $Y$ has abelian quotient singularities as well.
In the general case, by \cite{Bo}, quasismooth hypersurfaces have rational singularities.
Our motivation for the study of this regularity condition is the fact that
quasismooth hypersurfaces with trivial canonical class give examples of
Calabi-Yau varieties with canonical singularities \cite{ACG}.
In fact, quasismoothness appears as a regularity condition in
the literature on mirror symmetry, for example it is required
for the Berglund-H\"ubsch-Krawitz duality \cite{BH}, \cite{Kr}.
A characterization of quasismooth hypersurfaces in weighted projective spaces
is given in \cite{kreuzerskarke} (see also \cite[Theorem 8.1]{F}). As a consequence, this gives a nice description
of quasismooth polynomials having the same number of monomials as variables
(called of {\em Delsarte type}), see Corollary \ref{cor-simpl}.
In this paper we define and study quasismoothness for hypersurfaces in any normal toric variety $X$.
Given a monomial linear system $\mathcal L$ on $X$, we provide combinatorial conditions
for the quasismoothness of a general member $Y$ of $\mathcal L$
in terms both of its Newton polytope and of the matrix of exponents of a defining
equation of $Y$ in homogeneous coordinates.
The paper is organized as follows. In section \ref{sec-pre} we provide some background on the Cox construction
for toric varieties and on monomial linear systems.
Section \ref{sec-qs} contains the definition and some preliminary observations on quasismoothness.
In section \ref{sec-newt} we characterize quasismoothness in terms of a geometric condition on
the Newton polytope of the linear system, based on results by Khovanskii.
In section \ref{sec-matrix} we characterize quasismoothness in terms of the matrix of exponents of the linear system,
translating the previous result into a linear algebra condition.
In section \ref{sec-wps} we recover the known results on quasismoothness in fake weighted projective spaces.
In section \ref{sec-cy} we use quasismoothness to define families of Calabi-Yau hypersurfaces associated
to good pairs of polytopes (see \cite{ACG}) and we discuss the behavior of quasismoothness under polar duality.
Finally, section \ref{sec-app} contains some applications of the previous results for hypersurfaces of low dimension.
\noindent {\em Acknowledgments.}
We would like to thank Antonio Laface for several enlightening discussions and the anonymous referee for
his/her comments.
\section{Preliminaries} \label{sec-pre}
In this section we recall some basic facts about the Cox ring of a toric variety
and we recall the construction due to D.~Cox which presents any toric variety
as a GIT quotient of an open subset of an affine space by the action of a quasi-torus.
Let $X=X_{\Sigma}$ be a toric variety associated to a fan $\Sigma$ in $N_{\mathbb Q}$,
where $N$ is a lattice.
As usual we denote by $\Sigma(1)$ the set of one dimensional cones in $\Sigma$,
which are in bijection with the integral torus-invariant divisors of $X$.
The {\em Cox ring}, or {\em homogeneous coordinate ring}, of $X$ is the
polynomial ring
\[
R(X)={\mathbb C}[x_\rho: \rho\in \Sigma(1)],
\]
graded by the divisor class group $\operatorname{Cl}(X)$ in the natural way:
the degree of $x_{\rho}$ is the class in $\operatorname{Cl}(X)$
of the integral torus-invariant divisor corresponding to $\rho$.
Let $\bar X:={\rm Spec}\, R(X)\cong {\mathbb C}^{|\Sigma(1)|}$,
$J$ be the {\em irrelevant ideal} in $R(X)$, i.e.
the ideal generated by the monomials
\[
x^{\hat \sigma}:=\prod_{\rho\not\in \sigma(1)}x_\rho,\quad \sigma\in \Sigma,
\]
and $\hat X:=\bar X-V(J)$.
The ${\rm Cl}(X)$-grading of $ R(X)$ induces an action of the quasi-torus
$G={\rm Spec}\, {\mathbb C}[{\rm Cl}(X)]$ on $\bar X$ which preserves $\hat X$.
Moreover, there exists a morphism
\[
p:\hat X\to X,
\]
called {\em characteristic map}, which is a GIT quotient
by the action of $G$ \cite[]{CLS}.
The quotient is geometric if and only if $\Sigma$ is simplicial \cite[Theorem 5.1.11]{CLS}.
Given a hypersurface $Y$ of $X$, we define its {\em pull-back} $p^*(Y)$ in $\hat X$
in the following way.
Let $X^0$ be the smooth locus of $X$, whose complement has codimension
at least two in $X$ since $X$ is normal, and let $Y^0=Y\cap X^0$.
Since $Y^0$ is a Cartier divisor one can consider the usual pullback $(p_{|p^{-1}(X^0)})^*(Y^0)$
obtained composing the local defining functions of $Y^0$ with $p$.
We define $p^*(Y)$ to be the closure of $p^*(Y^0)$ in $\hat X$.
Given a linear system $\mathcal L$ on $X$ let $p^*\mathcal L$ be its pull-back,
obtained taking the pull-back of each element of $\mathcal L$.
Observe that $p^*\mathcal L$ is a linear system on $\hat X$ since
the complement of $p^{-1}(X^0)$ has codimension at least two in $\hat X$.
Moreover $p^*\mathcal L$ is associated to a homogeneous subspace $V_{\mathcal L}$ of $R(X)$:
\[
p^*\mathcal L=\{{\rm div}(f): f\in V_{\mathcal L} \}.
\]
Let $B(\mathcal L)$ and $B^*(\mathcal L)$ be the base loci of $\mathcal L$ and
$p^*\mathcal L$ respectively. Clearly $p(B^*(\mathcal L))\subseteq B(\mathcal L)$.
A linear system $\mathcal L$ on $X$ will be called {\em monomial linear system}
if the associated subspace $V_{\mathcal L}\subset R(X)$ is generated by monomials of the same degree in the variables $x_\rho, \rho\in\Sigma(1)$.
We define the {\em Newton polytope} $\Delta(\mathcal L)$ of a monomial linear system $\mathcal L$
to be the convex hull of the set of exponents of a monomial basis of $V_{\mathcal L}$
in ${\mathbb R}^{|\Sigma(1)|}$. Observe that $\Delta(\mathcal L)$ is contained in the intersection of the positive
orthant with the affine space $Q^{-1}(w)$, where $w\in \operatorname{Cl}(X)$ is the degree of
the elements in $V_{\mathcal L}$ and
\[
Q:{\mathbb R}^{|\Sigma(1)|}\to \operatorname{Cl}(X)
\]
is the homomorphism which associates
to $e_\rho$ the degree of $x_\rho$.
If $\mathcal L$ is a monomial linear system, then
the base locus of $p^*\mathcal L$ is the vanishing locus of all the monomials generating $V_{\mathcal L}$.
In particular it is a union of strata of the following form
\[
D_{\sigma}=\{x\in \hat X: x_{\rho}=0, \rho\in \sigma(1)\},
\]
where $\sigma\in \Sigma$. The following result implies that monomial linear systems
with the same Newton polytope have the same base locus.
\begin{lemma}\label{bs}
Let $\mathcal L$ be a monomial linear system on a complete toric variety and $\Delta(\mathcal L) \subset {\mathbb R}^{|\Sigma(1)|}$ be
its Newton polytope.
Then $B^*(\mathcal L)$ is the vanishing locus of all the monomials $x^a=\prod_{\rho \in \Sigma(1)} x_{\rho}^{a_\rho}$, where
$a$ is a vertex of $\Delta(\mathcal L)$.
\end{lemma}
\begin{proof}
Let $S$ be the set of exponents of a monomial basis of $V_{\mathcal L}$.
It is sufficient to show that the vanishing locus of all the monomials $x^b$ for $b \in S$
is the vanishing locus of all the monomials $x^a$ for $a$ a vertex of $\Delta(\mathcal L)$.
One inclusion is obvious, let us prove the other. For all $b \in S$ there exist vertices $a_1, \ldots, a_k$ of $\Delta(\mathcal L)$
and positive rational numbers $\lambda_1, \ldots, \lambda_k \in \ (0,1] \cap {\mathbb Q}$ such that $\sum_{i=1}^k \lambda_i a_i=b$
and $\sum_{i=1}^k \lambda_i =1$. It follows that all the variables appearing in the $x^{a_i}$'s also appear in $x^b$.
So the vanishing of all the $x^{a_i}$'s implies the vanishing of $x^b$, which proves the second inclusion.
\end{proof}
\section{Quasismoothness}\label{sec-qs}
\begin{definition}
Let $X=X_\Sigma$ be a toric variety and $p:\hat X\to X$ be its characteristic map.
A hypersurface $Y\subset X$ is {\em quasismooth at a point} $y\in Y$ if $p^*(Y)$ is smooth
at $p^{-1}(y)\cap p^*(Y)$;
it is {\em quasismooth}
if $p^*(Y)\subset \hat X$ is smooth.
\end{definition}
Observe that $p^*(Y)$ is the zero set of a homogeneous element $f_Y\in R(X)$ in $\hat X$,
since it is $G$-invariant. We will denote by $S(f_Y)$ the singular locus of $f_Y$ in $\bar X$,
that is the zero set of al partial derivatives of $f_Y$ in $\bar X$.
Thus $Y$ is quasismooth if and only if $S(f_Y)$ is contained
in the irrelevant locus $V(J)$ of $X$.
We start considering the behaviour of quasismoothness in linear systems.
\begin{proposition} \label{zariski}
Let $\mathcal L$ be a linear system of a complete toric variety $X_\Sigma$.
The set of quasismooth elements in $\mathcal L$ is an open Zariski subset
of $\mathcal L$.
\end{proposition}
\begin{proof}
Let $p:\hat X\to X$ the characteristic map of $X$ and let $r=|\Sigma(1)|$.
We will denote by $f_Y\in R(X)$ a defining polynomial for a hypersurface $Y$ of $X$,
as explained above.
Consider the set
\[
S=\{(x,Y)\in \hat X\times \mathcal L: f_Y(x)=\frac{\partial f_Y}{\partial x_j}(x)=0,\ j=1,\dots,r\}
\]
and the projection $\pi:S\to \mathcal L$. The fiber over $Y\in \mathcal L$ is empty
exactly when $Y$ is quasismooth. Observe that $\pi$ factors through
$\bar\pi: (p\times{\rm id})(S)\to \mathcal L$.
Since $ (p\times{\rm id})(S)$ is complete, then the image of $\bar\pi$ is Zariski closed in $\mathcal L$ \cite[Exercise 4.4, Chapter II]{Ha}.
This gives the statement.
\end{proof}
As a direct consequence of the previous Proposition, one has the following.
\begin{corollary} Let $X=X_{\Sigma}$ be a complete toric variety and $\mathcal L$ be a
linear system on $X$.
Then the following are equivalent:
\begin{enumerate}
\item the general element of $\mathcal L$ is quasismooth;
\item there exists a quasismooth element in $\mathcal L$.
\end{enumerate}
\end{corollary}
In what follows we will say that a linear system is quasismooth if its general element is quasismooth.
\begin{proposition}
The general element of $\mathcal L$ is quasismooth outside of $p(B^*(\mathcal L))$.
In particular $\mathcal L$ is quasismooth if $B(\mathcal L)=\emptyset$.
\end{proposition}
\begin{proof}
By the first Bertini's theorem applied to the linear system $p^*\mathcal L$ on $\hat X$,
we have that the singular locus of the general element of $p^*\mathcal L$ is contained
in its base locus. Taking the image in $X$, this gives the statement.
\end{proof}
Finally, we observe that quasismoothness behaves well with respect to finite coverings which are the identity
in Cox coordinates (see \cite[Lemma 1.1]{ACG}).
\begin{proposition}\label{fq}
Let $X$ be a toric variety associated to a fan $\Sigma\subset N_{\mathbb Q}$,
$\iota: N\to N'$ be a lattice monomorphism with finite cokernel and $\pi : X \to X'$
be the associated finite quotient. If the primitive generators in $N$
of the rays of the fan of $X$ are primitive in $N'$,
then the homomorphism $\pi^*: R(X') \to R(X)$ can be taken to be the identity.
In particular, if $\mathcal L$ is a monomial linear system on $X'$, then
$\mathcal L$ is quasismooth if and only if $\pi^*\mathcal L$ is quasismooth.
\end{proposition}
\section{In terms of the Newton polytope}\label{sec-newt}
In this section we provide a first characterization of quasismoothness
based on results in \cite{K}.
We will denote by $T^r\cong ({\mathbb C}^*)^r$ the $r$-dimensional torus
with coordinates $x=(x_1,\dots,x_r)$.
A Laurent polynomial on $T^r$ can be written as a finite sum
$P=\sum_{m}a_mx^m$ with $m\in {\mathbb Z}^r$ and $a_m\in {\mathbb C}$.
The {\em support} of $P$ is the set of $m\in {\mathbb Z}^r$ such that $a_m\not=0$ and
the convex hull of the support of $P$ in ${\mathbb R}^r$ is the {\em Newton polytope} $\Delta(P)$ of $P$.
\begin{definition}
A collection of $\ell>0$ non-empty polytopes in ${\mathbb R}^n$ is {\em degenerate}
if it is possible to
translate all the polytopes in an $(\ell-1)$-dimensional subspace.
A collection of polytopes in ${\mathbb R}^n$ is called {\em dependent} if it contains a degenerate subcollection.
By convention, the empty collection of polytopes is independent.
\end{definition}
Given a set of Laurent polynomials $P_1,\dots,P_k\in {\mathbb C}[x_1,x_1^{-1},\dots,x_r,x_r^{-1}]$,
we will say that they are {\em general with fixed support} if each $P_i$ is general in the family
of polynomials having the same support as $P_i$.
The following comes from \cite[Theorem 1, \S 2.5]{K} and \cite[Lemmas 1,2, \S 2.2]{K}.
\begin{theorem}\label{khov}
Let $P_1 = \dots = P_k = 0$ be a system of equations
where $P_i\in {\mathbb C}[x_1,x_1^{-1},\dots,x_r,x_r^{-1}]$ are general with fixed support.
If the Newton polyhedra $\Delta(P_1),\dots, \Delta(P_k)$ are dependent,
then the system is not compatible in $T^r$.
Otherwise it defines an analytic $(r - k)$-dimensional manifold in $T^r$.
\end{theorem}
We now consider hypersurfaces in toric varieties.
Let $\mathcal L$ be a monomial linear system on a toric variety $X=X_{\Sigma}$ with $r=|\Sigma(1)|$
and let $f\in R(X)$ be a defining element for a general hypersurface in the linear system.
The stratum $D_\sigma$, $\sigma\in \Sigma$, is contained in $B^*(\mathcal L)$
if and only if one can write $f=\sum_{\rho\in \sigma(1)} x_\rho f_\rho$.
We now define a set of polytopes associated to the pair $(\mathcal L,\sigma)$.
\begin{definition} With the previous notation, we define $\Delta_\sigma^\rho(\mathcal L)$
to be the Newton polytope of the restriction of $f_{\rho}$ to $D_{\sigma}$.
\end{definition}
It is easy to see that the definition of $\Delta_\sigma^\rho(\mathcal L)$ does not depend
on the choice of the writing for $f$, which is not unique in general (this also follows from the description in
Remark \ref{polytopes}).
\begin{remark}\label{ambient}
All polytopes $\Delta_\sigma^{\rho}(\mathcal L)$, $\rho\in \sigma(1)$,
are contained in the linear subspace
$Q^{-1}(w)\cap \{m_\rho=0: \rho\in \sigma(1)\}\subset {\mathbb R}^r$ (see Section \ref{sec-pre}),
whose dimension is equal to $\dim p(D_\sigma)$.
Of course some of the polytopes can be empty.
\end{remark}
\begin{remark}\label{polytopes} After restricting to $D_{\sigma}$, the only left monomials in $f_{\rho}$
are those in the variables $x_{\tau}$ with $\tau\not\in \sigma(1)$.
Thus we have
\[
\Delta_\sigma^\rho(\mathcal L)+e_{\rho}={\rm conv}(m\in \Delta(f)\cap {\mathbb Z}^r: m_{\rho}=1 \text{ and } m_\gamma=0 \text{ for } \gamma\in \sigma(1)\backslash\{\rho\}).
\]
These polytopes, if not empty, are disjoint faces of the face of $\Delta(f)$ defined by $\sum_{\rho\in \sigma(1)}m_{\rho}=1$.
\end{remark}
\begin{theorem}\label{thm1}
Let $X=X_{\Sigma}$ be a toric variety and $\mathcal L$ be a monomial linear system on $X$.
The general element of $\mathcal L$ is quasismooth if and only if for any $\sigma\in \Sigma$
such that $D_\sigma\subseteq B^*(\mathcal L)$,
the set of non-empty polyhedra $\Delta_\sigma^\rho(\mathcal L)$, $\rho\in \sigma(1)$, is dependent.
\end{theorem}
\begin{proof}
Let $f\in R(X)$ be a defining element for a general hypersurface in the linear system.
Assume $f$ to be not quasismooth, that is the singular locus $S(f)$ of $f$ intersects
$\hat X$. Let $S$ be an irreducible component of $S(f)\cap \hat X$ and
let $D_\sigma$ be the smallest toric stratum containing $S$, $\sigma\in\Sigma$.
Observe that $D_\sigma \subseteq B^*(\mathcal L)$, thus we can write $f=\sum_{\rho\in \sigma(1)}x_\rho f_\rho$.
By the minimality condition on $D_\sigma$,
$S$ contains a point in the torus $T_\sigma$ of coordinates $x_\tau, \tau \not\in \sigma(1)$.
Thus
\[S(f)\cap D_\sigma=\cap_{\rho\in \sigma(1)} V(f_\rho)\cap D_\sigma
\]
intersects the torus $T_\sigma$.
By Theorem \ref{khov} the set of Newton polyhedra of the restrictions of the
$f_\rho$'s to $D_\sigma$ is independent. Observe that the polynomials $f_\rho$
are generic with fixed support by the generality assumption on $f$.
Conversely, if there is a stratum $D_{\sigma}$, $\sigma\in\Sigma$, such that
$D_\sigma\subseteq B^*(\mathcal L)$ and the set of polytopes
$\Delta_\sigma^\rho(\mathcal L)$ with $\rho\in \sigma(1)$ is independent, then by Theorem \ref{khov}
the $f_\rho$'s have a common zero in $T_\sigma$.
Thus $S(f)\cap T_\sigma$ is not empty. This implies that $S(f)\cap \hat X$ is non empty as well.
\end{proof}
\begin{corollary}\label{empty}
If there exists $\sigma\in\Sigma$ such that $D_\sigma\subseteq B^*(\mathcal L)$
and
$\Delta_\sigma^\rho(\mathcal L)=\emptyset$ for all $\rho\in\sigma(1)$,
then the general element of $\mathcal L$ is not quasismooth.
\end{corollary}
Given $D_\sigma\subseteq B^*(\mathcal L)$ we denote by
$k_{\sigma}(\mathcal L)$ the number of $\rho\in \sigma(1)$ such that ${f_\rho}_{|D_\sigma}$ is not zero,
that is the number of non-empty polytopes $\Delta_\sigma^{\rho}(f)$, $\rho\in \sigma(1)$.
\begin{corollary}\label{cor-qs}
If $k_\sigma(\mathcal L)>\dim p(D_\sigma)$ for all $\sigma\in \Sigma$ such that $D_\sigma\subseteq B^*(\mathcal L)$,
then the general element of $\mathcal L$ is quasismooth.
\end{corollary}
\begin{proof}
This is an immediate consequence of Remark \ref{ambient} and the fact that
any set containing at least $\ell+1$ polyhedra in a $\ell$-dimensional space is dependent.
\end{proof}
\begin{example}\label{blowup}
The following example shows that the converse of Corollary \ref{cor-qs} is false.
Let us consider the linear system $\mathcal L$ in $X=\mathbb P^3$ generated by the following monomials:
\[
x_1^3x_2,\ x_2^4,\ x_3^3x_4,\ x_4^4.
\]
Consider the blow up $\tilde X$ of $X$ at the points
$(0,0,1,0)$ and $(1,0,0,0)$.
The Cox ring of $\tilde X$ is ${\mathbb C}[y_1,\dots,y_6]$, where the degrees of the variables
are the columns of the matrix
\[
\left(
\begin{array}{cccccc}
1 & 1 & 1 & 1 & 0 & 0\\
0 & 0 & 1 & 0 & 1 & 0\\
1 & 0 &0 & 0 & 0 & 1
\end{array}
\right)
\]
and the variables $y_5, y_6$ define the two exceptional divisors.
Moreover, a computation using the following Magma \cite{Magma} program
\begin{verbatim}
P<[x]> := ProjectiveSpace(Rationals(),3);
B1,f1 := Blowup(P,&+[Rays(Fan(P))[i]: i in [1,2,4]]);
B<[y]>,f2 := Blowup(B1,&+[Rays(Fan(B1))[i]: i in [2,3,4]]);
f2*f1;
\end{verbatim}
gives that the blow up map in Cox coordinates is given by
\[
(y_1,y_2,y_3,y_4,y_5,y_6)\mapsto (y_1y_5, y_2y_5y_6,y_3y_6,y_4y_5y_6).
\]
Thus an easy computation shows the proper transform $\tilde{\mathcal L}$ of $\mathcal L$ in $\tilde X$ is generated by
\[
y_1^3y_2y_5^3,\ y_2^4y_5^3y_6^3,\ y_3^3y_4y_6^3,\ y_4^4y_5^3y_6^3.
\]
The closure of the base locus of $p^*\tilde{\mathcal L}$ is given by
\[
\{y_2=y_4=0\}\cup\{y_2=y_6=0\}\cup\{y_4=y_5=0\}.
\]
An easy check of the criterion in Theorem \ref{thm1} shows that
the general element of $\tilde{\mathcal L}$ is quasismooth (all polytopes $\Delta_{\sigma}^{\rho}(\mathcal{\tilde L})$ are either points or empty).
On the other hand, when $D_\sigma=\{y_2=y_6=0\}$, one has $k_\sigma(\tilde{\mathcal L})=1\leq3=\dim p(D_\sigma)$.
\end{example}
\begin{example}\label{not-max-comp}
The following example shows that the condition in Theorem \ref{thm1}
has to be checked for all subsets $D_\sigma$ in the base locus, not only for
the maximal ones.
Let $X={\mathbb P}^4$ and $\mathcal L$ be the linear system
generated by $x_1^3,\ x_2^2x_1, x_3^2x_1,\ x_2x_3x_4$.
The base locus of $\mathcal L$ is
\[
\{x_1=x_2=0\}\cup\{x_1=x_3=0\}\cup\{x_1=x_4=0\}.
\]
Observe that $\mathcal L$ is not quasismooth since
the polytopes $\Delta_{\sigma}^{\rho}$ are all empty
for $D_\sigma=\{x_1=x_2=x_3=0\}$.
On the other hand, the condition of Theorem \ref{thm1}
holds for the three maximal components of the base locus.
\end{example}
The theorem easily implies that quasismoothness only depends on the Newton polytope of $\mathcal L$.
In particular it is enough to consider the linear system generated by the monomials associated to the vertices
of the Newton polytope.
\begin{corollary}\label{vert}
Let $X$ be a toric variety and let $\mathcal L, \mathcal L'$
be two monomial linear systems on $X$ such that $\Delta(\mathcal L)=\Delta(\mathcal L')$.
The general element of $\mathcal L$ is quasismooth if and only if the same holds for the general element of
$\mathcal L'$.
\end{corollary}
\begin{proof}
By Lemma \ref{bs}, $B^*(\mathcal L)=B^*(\mathcal L')$.
Moreover, given $\sigma\in \Sigma$,
the polytopes $\Delta_\sigma^\rho(\mathcal L)$ and $\Delta_\sigma^\rho(\mathcal L')$ are the same by Remark \ref{polytopes}. Thus we conclude by Theorem \ref{thm1}.
\end{proof}
\begin{example}\label{ex1}
Let $X={\mathbb P}^2\times{\mathbb P}^1$ and $\mathcal L$ be the monomial linear system generated by
\[ x_0^2x_1y_0^2,\
x_0^2x_1y_1^2,\
x_0^2x_2y_0^2,\
x_0^2x_2y_1^2,\
x_1^3y_0^2,\
x_1^3y_1^2,\
x_2^3y_0^2,\
x_2^3y_1^2.\]
The only strata $D_\sigma$ such that $D_\sigma$ is contained in $B^*(\mathcal L)$
are $D_{\sigma}=\{x_1=x_2=0\}$
and its two substrata
\[
D_{\sigma_0}=\{x_1=x_2=y_0=0\},\quad D_{\sigma_1}=\{x_1=x_2=y_1=0\}.
\]
Considering $\sigma$, one has that
\[
\Delta_\sigma^{x_1}(\mathcal L)={\Delta_\sigma^{x_2}}(\mathcal L)={\rm conv}((2,0,0,2,0), (2,0,0,0,2)).
\]
Thus the two polytopes form a dependent set.
The same can be repeated for $D_{\sigma_0}$ and $D_{\sigma_1}$
so that the general element of $\mathcal L$ is quasismooth.
\end{example}
\begin{example}\label{p1}
Let $X={\mathbb P}^1\times{\mathbb P}^1\times{\mathbb P}^1$ with coordinates $(x_0,x_1,y_0,y_1,z_0,z_1)$.
Let $\mathcal L$ be the linear system generated by the monomials
\[
x_1^2y_0^2z_0^2,\
x_1^2y_1^2z_0^2,\
x_1^2y_0^2z_1^2,\
x_0^2y_1^2z_1^2,\
x_1^2y_1^2z_1^2,\
x_0^2y_1^2z_0^2,\
x_0x_1y_0^2z_1^2,\
x_0x_1y_0^2z_0^2
.\]
The base locus is given by $D_{\sigma}=\{x_1=y_1=0\}$ and
the only non-empty polytope
$\Delta_\sigma^\rho(\mathcal L),\rho\in\sigma(1)$ is a segment.
Thus Theorem \ref{thm1} proves that $\mathcal L$ is not quasismooth.
\end{example}
\section{In terms of the exponents matrix}\label{sec-matrix}
Let $X = X_{\Sigma}$ be a projective toric variety,
$\mathcal L$ be a monomial linear system on $X$
of degree $w\in \operatorname{Cl}(X)$ and $\Delta(\mathcal L)$ be its Newton polytope.
We define the {\em matrix of exponents of } $\mathcal L$ to be the matrix
$A$ whose rows are the vectors of exponents
of an ordered monomial basis of $V_{\mathcal L}$.
Moreover, we will denote by $A_{I,J}$ the submatrix of $A$
whose rows are indexed by elements of $I \subseteq \Delta(\mathcal L)\cap{\mathbb Z}^r$
and columns are indexed by elements of $J \subseteq \Sigma(1)$.
For any such matrix we denote by ${\mathbb R} A_{I,J}$ the linear span of the columns of $A_{I,J}$.
Given a non-empty subset $\gamma\subseteq \Sigma(1)$ we define
\[
M_{\gamma}=\{m\in \Delta(\mathcal L)\cap{\mathbb Z}^r: \sum_{i\in \gamma} m_i=1\}.
\]
\begin{theorem}\label{propqs}
Let $X = X_{\Sigma}$ be a projective toric variety with characteristic map $p:\hat X\to X$ and
$\mathcal L$ be a monomial linear system on $X$ with matrix of exponents $A$.
The following are equivalent
\begin{enumerate}
\item $\mathcal L$ is quasismooth;
\item for all $\sigma\in \Sigma$ such that $D_{\sigma}\subseteq B^*(\mathcal L)$ there exists a non-empty
subset $\gamma \subseteq \sigma(1)$
such that $M_{\gamma}$ is not empty and
\begin{equation}\label{eq}
2\,{\rm rk} (A_{M_\gamma,\gamma})>{\rm rk}(A_{M_{\gamma},\Sigma(1)}).
\end{equation}
\end{enumerate}
\end{theorem}
\begin{proof}
Let $\sigma\in \Sigma$ and $\gamma\subseteq \sigma(1)$
as in the statement, $k:=|\gamma|$ and $s:=|M_\gamma|$.
We can assume that $\Delta_{\sigma}^{\rho}(\mathcal L)$ is not empty for all $\rho\in \gamma$
since the empty polytopes give zero columns in both $A_{M_\gamma,\gamma}$
and $A_{M_{\gamma},\Sigma(1)}$.
We will now prove that the collection of polytopes $\{\Delta_{\sigma}^{\rho}(\mathcal L)\}_{\rho\in \gamma}$
is degenerate if and only if $2\,{\rm rk} (A_{M_\gamma,\gamma})>{\rm rk}(A_{M_{\gamma},\Sigma(1)})$.
This implies the thesis by Theorem \ref{thm1}.
The minimal dimension $d$ of a linear space containing translates of the polytopes $\Delta_{\sigma}^{\rho}(\mathcal L)$, $\rho\in \gamma$,
is the dimension of the linear span of any set of translates of such polytopes that all contain the origin. In particular if we pick a
lattice point $m(\rho) \in \Delta_{\sigma}^{\rho}(\mathcal L)\cap {\mathbb Z}^r$ for each $\rho\in \gamma$, we have
\[
d=\dim\,{\rm Span}(\Delta_{\sigma}^{\rho}(\mathcal L)-m(\rho): \rho\in \gamma).
\]
The columns of $A_{M_\gamma ,\gamma}$ are of the form $A_\rho = (A_{m,\rho })_{m\in M_\gamma}$ with $A_{m,\rho} = 1$ if $m\in \Delta_{\sigma}^{\rho}(\mathcal L)$ and 0 otherwise.
In particular the columns of $A_{M_\gamma ,\gamma}$ are linearly independent
since for $\rho\not= \rho'$ the polytopes $\Delta_{\sigma}^{\rho}(\mathcal L)$ and $\Delta_{\sigma}^{\rho'}(\mathcal L)$
have no common lattice point. It follows
that ${\rm rk}(A_{M_\gamma,\gamma}) = k$ and that the linear map
\[
\phi_{\gamma} : {\mathbb R}^k \to {\mathbb R} A_{M_\gamma,\Sigma(1)}\subseteq {\mathbb R}^s,\quad x\mapsto A_{M_\gamma,\gamma}\cdot x
\]
is injective.
We now show that $d=\dim({\rm coker}\,\phi_\gamma)$.
This gives the thesis since the degeneracy condition $k > d$ is thus equivalent to
$k>{\rm rk}(A_{M_\gamma,\Sigma(1)})-k$.
We consider the following commutative diagram with exact rows
\[
\xymatrix{
0\ar[r] & {\mathbb R}^k\ar@{=}[d]\ar[r]^{\tilde \phi_\gamma} & {\mathbb R}^ {s}\ar[r]^{\tilde \alpha} & \tilde C\ar[r] & 0\\
0\ar[r] & {\mathbb R}^k\ar[r]^-{\phi_\gamma} & {\mathbb R} A_{M_\gamma,\Sigma(1)}\ar[r]^{\alpha}\ar[u]^{i} & C\ar[u]\ar[r] & 0,\\
}
\]
where the vertical arrows are inclusions.
In order to compute $\dim(C)$ we compute the rank of the map $\tilde \alpha \circ i$.
The map $\tilde\alpha$ is given by the $(s-k)\times s$
matrix $B$ which is the vertical join of the matrices $B_\rho$, with $\rho\in \gamma$, where the rows of $B_\rho$
are indexed by the elements $m\in \Delta_{\sigma}^{\rho}(\mathcal L)\cap{\mathbb Z}^n-\{m(\rho)\}$
and have $1$ in the position $m(\rho)$, $-1$ in the position $m$ and $0$ in all other positions.
The rows of the matrix $B\cdot A_{M_\gamma,\Sigma(1)}$ are the vectors of exponents of the monomials $x^{m(\rho)}/x^{m}$ for $\rho\in\gamma$, $m\in\Delta_{\sigma}^{\rho}(\mathcal L)\cap{\mathbb Z}^n-\{m(\rho)\}$.
It follows that the rank of $\tilde \alpha\circ i$ is equal to the dimension of
the linear span of the differences $m(\rho)-m$, and hence is $d$, which proves the theorem.
\end{proof}
\begin{remark} In Theorem \ref{propqs} one can replace $M_{\gamma}$
with the set $V$ of vertices of all the polytopes $\Delta_{\sigma}^{\rho}(\mathcal L)$, $\rho\in \gamma$.
In fact ${\rm rk} (A_{V,\gamma})={\rm rk} (A_{M_\gamma,\gamma})=k$ and
${\rm rk}(A_{V,\Sigma(1)})={\rm rk}(A_{M_{\gamma},\Sigma(1)})$ since any lattice point
in $M_\gamma$ is in the linear span of $V$.
\end{remark}
\begin{example}
Let $\mathcal L$ be as in Example \ref{ex1}. The matrix $A$ is of the form
\[A=\left(\begin{array}{ccccc}
2&1&0&2&0\\
2&1&0&0&2\\
2&0&1&2&0\\
2&0&1&0&2\\
0&3&0&2&0\\
0&3&0&0&2\\
0&0&3&2&0\\
0&0&3&0&2
\end{array}
\right).
\]
When considering $D_\sigma=\{x_1=x_2=0\}\subset B^*(\mathcal L)$ one can
choose $\gamma=\{\rho_1,\rho_2\}$, where $\rho_i$ is the ray
corresponding to the variable $x_i$, since we already observed that $\Delta_\sigma^{\rho_j}(\mathcal L), j=1,2$ are not empty.
With this choice, $$M_\gamma= \{(2,1,0,2,0),(2,1,0,0,2),(2,0,1,2,0),(2,0,1,0,2)\}$$and
the matrices appearing in the proof of Theorem \ref{propqs} are
\[A_{V,\gamma}=\left(\begin{array}{cc}
1&0\\
1&0\\
0&1\\
0&1\\
\end{array}
\right),\
A_{V,\Sigma(1)}=\left(\begin{array}{ccccc}
2&1&0&2&0\\
2&1&0&0&2\\
2&0&1&2&0\\
2&0&1&0&2
\end{array}
\right),\
B=
\left(\begin{array}{cccc}
1&-1&0&0\\
0&0&1&-1
\end{array}
\right).
\]
We have ${\rm rk}(A_{M_\gamma,\gamma})= {\rm rk}(A_{V,\gamma})=2$ and ${\rm rk}(
A_{M_\gamma,\Sigma(1)})={\rm rk}(
A_{V,\Sigma(1)})=3$, so that the condition is satisfied for this cone $\sigma$.
The same can be repeated for $D_{\sigma_0}=\{x_1=x_2=y_0=0\}$ and $D_{\sigma_1}=\{x_1=x_2=y_1=0\}$, proving thus quasismoothness of $\mathcal L$.
\end{example}
\section{The case of fake weighted projective spaces}\label{sec-wps}
In this section we
show how our results allow to recover the
known classification of quasismooth hypersurfaces in fake weighted projective
spaces (see \cite{Kou1, Kou2, kreuzerskarke, F} and \cite[Remark 2.3]{HK} for further references).
A fake weighted projective space is a complete simplicial toric variety $X$ with Picard number one (see also \cite{Bu} and \cite[Lemma 2.11]{BC}).
In particular $X$ is $\mathbb Q$-factorial and $\bar X\backslash \hat X=\{0\}$.
We start giving an easy necessary condition for quasismoothness.
We denote by $r_\sigma$ the dimension of $(\bar X\backslash \hat X)\cap D_\sigma$.
\begin{proposition}\label{thm-qs}
Let $X=X_{\Sigma}$ be a projective toric variety
and $\mathcal L$ be a quasismooth monomial linear system on $X$
whose monomial basis does not contain any generator of $R(X)$.
Then for any $D_\sigma\subseteq B^*(\mathcal L)$ one has
\begin{equation}
\dim(D_\sigma)-k_\sigma(\mathcal L)\leq r_\sigma.
\label{eq-qs}
\end{equation}
In particular $2\dim(D_\sigma)\leq r_\sigma+|\Sigma(1)|$.
\end{proposition}
\begin{proof}
Let $R(X)={\mathbb C}[x_1,\ldots,x_r]$ and $D_\sigma$ be a subset of $B^*(\mathcal L)$
such that $\dim(D_\sigma)-k_\sigma(\mathcal L)>r_\sigma$.
Thus the general $f$ in $\mathcal L$ can be written as $f=\sum_{\rho\in \sigma(1)}x_\rho f_\rho$ and
$S(f)\cap D_\sigma$ is equal to $D_\sigma\cap V(f_\rho: \rho \in \sigma(1))$.
Observe that all $f_{\rho}$ are not constant by the hypothesis on $\mathcal L$.
Let $S$ be the closure of an irreducible component of
$S(f)\cap D_\sigma$ and
$V=\bar X\backslash \hat X\cap S$.
We have that
\[
\dim(S)\geq \dim(D_\sigma)-k_\sigma(\mathcal L)>r_\sigma\geq \dim(V).
\]
Thus $V$ is properly contained in $S$,
so that $\mathcal L$ is not quasismooth.
The last statement follows from \eqref{eq-qs} since $k_\sigma(\mathcal L)\leq |\Sigma(1)|-\dim(D_\sigma)$.
\end{proof}
\begin{remark} Let $\mathcal L$ be a monomial linear system
on a toric variety $X$ whose monomial basis contains a generator $x_0$ of $R(X)$.
In this case, a general element of $\mathcal L$
is defined by an equation of the form $f=\alpha x_0+g$, where $\alpha$ is a constant and $g$ is a polynomial not containing $x_0$.
Either by a direct computation or applying Theorem \ref{thm1}
one easily shows that $\mathcal L$ is quasismooth.
\end{remark}
\begin{corollary}\label{wps}
Let $X=X_{\Sigma}$ be an $n$-dimensional fake weighted projective space and
$\mathcal L$ be a monomial linear system on $X$ whose monomial basis does not contain any generator of $R(X)$.
Then $\mathcal L$ is quasismooth if and only if
$\dim(D_\sigma)-k_\sigma(\mathcal L)\leq 0$ for any $\sigma\in \Sigma$ such that $D_\sigma\subseteq B^*(\mathcal L)$.
\end{corollary}
\begin{proof}
This follows from Corollary \ref{cor-qs} and Proposition \ref{thm-qs}
since $r_\sigma=0$ and, since $X$ is ${\mathbb Q}$-factorial,
then $\dim p(D_\sigma)=\dim(D_\sigma)-{\rm rk}\, {\rm Cl}(X)=\dim(D_\sigma)-1$.
\end{proof}
A different formulation is the following (see also \cite[Theorem 8.1]{F}).
\begin{corollary}\label{cor:wps}
Let $X=X_{\Sigma}$ be an $n$-dimensional fake weighted projective space and
$\mathcal L$ be a monomial linear system on $X$ with matrix of exponents $A$
whose monomial basis does not contain any generator of $R(X)$.
Then $\mathcal L$ is quasismooth if and only if
for any subset $\gamma\subseteq \{0,\dots,n\}$ either $A$ has a row whose entries indexed by $\gamma$ are all zero,
or there exists a non-empty submatrix $S=A_{I,\gamma}$ of $A$ such that $\sum_{j\in \gamma}S_{ij}= 1$ for all $i \in I$ and such that ${\rm rk} (S)\geq n+1-|\gamma|$.
\end{corollary}
\begin{proof}
Given $\gamma$ as in the statement, this identifies $D_{\gamma}=\{x_i=0: i\in \gamma\}\cap \hat X$.
Observe that $D_{\gamma}$ is not contained in the base locus of $p^*\mathcal L$
if and only if there exists a monomial in $V_{\mathcal L}$ not using the variables indexed by $\gamma$,
i.e. $A$ has a row whose entries indexed by $\gamma$ are all zero.
Moreover, since $\sum_{j\in \gamma}S_{ij}= 1$ for all $i \in I$, there is exactly one non zero
entry in each row of $S$, so that ${\rm rk}(S)$ equals the number of non zero columns, which is $k_{\gamma}(\mathcal L)$.
The proof thus follows from Corollary \ref{wps} observing that $\dim(D_{\gamma})=n-|\gamma|$.
\end{proof}
We now consider the case when the general element of $\mathcal L$
is of Delsarte type, i.e. with the number of monomials equal to the number of variables.
In particular $\Delta(\mathcal L)$ is a simplex if it is full-dimensional.
We give an alternative proof of the following known result (see \cite{kreuzerskarke, HK}).
\begin{corollary}\label{cor-simpl}
Let $\mathcal L$ be a monomial linear system of Delsarte type
on a fake weighted projective space $X$ whose degree is bigger
than the degree of each variable in $R(X)$.
Thus $\mathcal L$ is quasismooth if and only if the general element $f$of $\mathcal L$
can be written as sum of disjoint invertible polynomials of the following form
(called atomic types):
\begin{align*}
f_{Fermat}&:=x^a,\\
f_{chain}&:=x_1^{a_1}x_2+x_2^{a_2}x_3+\ldots+x_k^{a_k},\\
f_{loop}&:=x_1^{a_1}x_2+x_2^{a_2}x_3+\ldots+x_k^{a_k}x_1,
\end{align*}
with $a,a_i>1$.
\end{corollary}
\begin{proof}
Let $A=(a_{ij})$ be the matrix of exponents associated to $\mathcal L$.
We will show that if $\mathcal L$ is quasismooth then the matrix $A$, after
reordering its rows, satisfies
\begin{enumerate}[1.]
\item $a_{ii}>1$ for all $i$,
\item for all $i_0$, $\sum_{j\neq i_0} a_{i_0 j}\leq 1$,
\item for all $j_0$, $\sum_{i\neq j_0} a_{i j_0}\leq 1$.
\end{enumerate}
An easy algorithm thus gives the necessary part of the proof.
The sufficiency part is obvious.
Let $\gamma=\{0,\dots, n\}\backslash \{i_0\}$. By Corollary \ref{cor:wps}
there exists a row $j$ with $\sum_{i\not=i_0} a_{ji}\leq 1$.
By the hypothesis on the degree, this implies that $a_{ji_{0}}>1$
and this is the only row having an entry bigger than $1$ in position $i_0$.
This gives $1.$ and $2.$ up to reordering the rows of $A$.
Now assume that $\sum_{i\neq j_0} a_{ij_0}> 1$ for some index $j_0$.
This implies that $a_{rj_0}=a_{sj_0}=1$ for some $r,s\not=j_0$
and that $j_0$ is the only column containing $1$ in both positions $r$ and $s$
by 2.
This contradicts Corollary \ref{cor:wps} when $\gamma=\{0,\dots,n\}\backslash \{r,s\}$.
\end{proof}
\begin{remark}
Since fake weighted projective spaces are finite quotients of weighted projective spaces
satisfying the hypothesis of Proposition \ref{fq} by \cite[Theorem 6.4]{Bu},
then quasismoothness in a fake weighted projective space can be checked in its
weighted projective space covering.
\end{remark}
\section{Quasismooth Calabi-Yau hypersurfaces}\label{sec-cy}
The concept of quasismoothness appeared in the literature
on Calabi-Yau varieties since it provides a sufficient condition
to have good singularities. We recall that a normal projective variety $Y$ of
dimension $n$ is a {\em Calabi-Yau variety} if it has canonical singularities,
$K_Y \cong {\mathcal O}_Y$ and $h^i(Y,{\mathcal O}_Y ) = 0$ for $0 < i < n$.
Moreover, a hypersurface $Y$ of a projective toric variety $X$ is called
{\em well-formed} if ${\rm codim}_Y(Y\cap {\rm Sing}(X))\geq 2$,
where ${\rm Sing}(X)$ is the singular locus of $X$.
\begin{proposition}
Let $Y$ be an anticanonical hypersurface of a projective toric variety $X$.
If $Y$ is quasismooth and well-formed, then $Y$ is a Calabi-Yau variety.
\end{proposition}
\begin{proof}
By \cite[Proposition 2.12]{ACG}, $Y$ has canonical singularities and $K_Y \cong {\mathcal O}_Y$.
Moreover the exact sequence of sheaves
\[
\xymatrix{
0\ar[r] & {\mathcal O}_{X}(K_X)\ar[r] & {\mathcal O}_X\ar[r] & {\mathcal O}_Y\ar[r]& 0,
}
\]
gives the exact sequence
\[
\xymatrix{
\dots\ar[r] & H^i(X,{\mathcal O}_X)\ar[r] & H^i(Y,{\mathcal O}_Y)\ar[r] &H^{i+1}(X,{\mathcal O}_X(K_X))\ar[r] &\dots.
}
\]
Since $h^i(X,{\mathcal O}_X)=0$ for $i>0$ and
$h^{i+1}(X,{\mathcal O}_X(K_X))=h^{\dim(X)-i-1}(X,{\mathcal O}_X)$ for $i<\dim(X)-1$
by Serre-Grothendieck duality, we have that
$h^i(Y,{\mathcal O}_Y)=0$ for $0<i<\dim(Y)$.
Thus $Y$ is a Calabi-Yau variety.
\end{proof}
A natural question, which is the original motivation of the present work,
is how quasismoothness behaves with respect to known dualities between families
of Calabi-Yau varieties.
For example, we recall the following fact, which is the basis of the
Berglund-H\"ubsch-Krawitz duality \cite{BH, Kr}.
\begin{proposition}
Let $\mathcal L$ be a monomial linear system
of Delsarte type in a weighted projective space $X$
with matrix of exponents $A$.
There exist a weighted projective space $X'$
and a monomial linear system $\mathcal L'$ on $X'$
whose matrix of exponents is $A^T$.
Moreover, $\mathcal L$ is quasismooth if and only if the same holds for $\mathcal L'$.
\end{proposition}
\begin{proof}
The existence of $X'$ and $\mathcal L'$ is constructive, see \cite[Section 4.1]{ACG}.
Quasismoothness of $\mathcal L'$ follows from (the proof of) Corollary \ref{cor-simpl}.
\end{proof}
In \cite{ACG} Berglund-H\"ubsch-Krawitz duality has been generalized
to give a duality between pairs of polytopes. We recall the main definitions and results.
Given a lattice $M$ and a polytope $P\subset M_{{\mathbb Q}}$ containing the origin in its interior, we will denote by
$P^*$ the polar of $P$ as in \cite[\S 2.2]{CLS}.
\begin{definition} A polytope $P\subset M_{{\mathbb Q}}$ with vertices in $M$
is \emph{canonical} if ${\rm Int}(P)\cap M=\{0\}$.
A pair of polytopes $(P_1,P_2)$ with $P_1\subseteq P_2\subset M_{{\mathbb Q}}$ is a \emph{good pair}
if $P_1$ and $P_2^*$ are canonical.
\end{definition}
To any good pair $(P_1,P_2)$ we can associate a monomial linear system $\mathcal L_1$ in a toric variety $X$
as follows. Let $X=X_{P_2}$ be the toric variety defined by the normal fan of $P_2$.
Then $P_2$ is the anticanonical polytope of $X$ and its lattice points give a monomial basis of the anticanonical linear system $|-K_X|$.
The polytope $P_1$ defines a monomial linear subsystem $\mathcal L_1$ of $|-K_X|$ generated by the monomials
corresponding to its lattice points. Moreover the following holds.
\begin{theorem}\cite[Theorem 1]{ACG}\label{thm1ACG}
Let $(P_1,P_2)$ be a good pair of polytopes and let $X=X_{P_2}$ be the
toric variety defined by the normal fan of $P_2$.
Then $X$ is a ${\mathbb Q}$-Fano toric variety and the general element of the
monomial linear system associated to $P_1$ is a Calabi-Yau variety.
\end{theorem}
\begin{remark}
Observe that the polytope $P_1$ in the statement of Theorem \ref{thm1ACG} is not exactly
the Newton polytope $\Delta(\mathcal L_1)$ of the monomial linear system as defined in Section \ref{sec-pre}
but $\Delta(\mathcal L_1)=\alpha(P_1)+(1,\dots,1)$, where $\alpha$ is the dual of the map ${\mathbb Z}^r\to N$, sending $e_i$ to the primitive generator of the $i$-th ray of the fan of $X$.\end{remark}
The following shows that quasismooth monomial linear systems
in ${\mathbb Q}$-Fano toric varieties give rise to good pairs.
\begin{proposition} \label{gp}
Let $P_2\subset M_{\mathbb Q}$ be a polytope such that $P_2^*$ is canonical and
let $P_1\subseteq P_2$ be a lattice polytope.
If the monomial linear system $\mathcal L_1$ associated to $P_1$
is quasismooth, then $(P_1,P_2)$ is a good pair (i.e. $P_1$ is a canonical polytope).
\end{proposition}
\begin{proof}
By \cite[Corollary 1.6]{ACG} it is enough to prove
that the origin is an interior point of $P_1$.
Assume the contrary, i.e. that the origin is contained
in a facet of $P_1$. Then there exists $n\in N_{{\mathbb Q}}$
such that $(n,m)\geq 0$ for all $m\in P_1$.
The vector $n$ is contained in a cone over the faces
of $P_2^*$, thus we can write
$n=\sum_{i\in I}\alpha_i\rho_i$ with $\alpha_i\in {\mathbb Q}$
positive coefficients.
Thus
\[
(n,m)\geq 0\Leftrightarrow \sum_{i\in I}\alpha_i(m,\rho_i)\geq 0\Leftrightarrow \sum_{i\in I}\beta_i(m,\rho_i)\geq 0,
\]
where $\beta_i$ are positive integers.
We recall that $(m,\rho_i)+1=a_i$ is the exponent of $x_i$ in the monomial corresponding to $m$.
Thus, if $\beta=\max\{\beta_i\}_{i\in I}$, the above inequality gives
\[
\sum_{i\in I}a_i\geq \frac{1}{\beta}\sum_{i\in I}\beta_i.
\]
Observe that $D_I=\{x_i=0: i \in I\}$ is contained in the base locus of
$\mathcal L_1$ since
the above inequality implies that at least one of the exponents
$a_i, i\in I$ is positive in any monomial of the basis of $\mathcal L_1$.
If $|I|=1$, then $\mathcal L_1$ contains the divisor $x_i=0$
in its base locus, thus clearly it is not quasismooth.
On the other hand, if $|I|>1$, the right hand side of the inequality is bigger than $1$.
This implies that $\mathcal L_1$ is not quasismooth by Corollary \ref{empty}.
\end{proof}
A good pair $(P_1,P_2)$ has a natural dual $(P_2^*,P_1^*)$, which is still a good pair.
This gives rise to a duality between the corresponding linear systems $\mathcal L_1$
and $\mathcal L_2^*$ of Calabi-Yau varieties on $X_{P_2}$ and $X_{P_1^*}$ respectively.
In \cite[Theorem 2]{ACG} it is proved that, in case $P_1,P_2$ are both simplices, this
duality is exactly Berglund-H\"ubsch-Krawitz duality.
With this in mind, we asked ourselves: is quasismoothness preserved by this duality? More precisely:
does $\mathcal L_1$ quasismooth imply $\mathcal L_2^*$ quasismooth?
Unfortunately the answer is no, as the following example shows.
\begin{example}
Let $X={\mathbb P}^2\times{\mathbb P}^1$ and let $P_2$ be its anticanonical polytope. Let $\mathcal L_1$ be the quasismooth linear system as in
Example \ref{ex1}. Thus $(\Delta(\mathcal L_1),P_2)$ is a good pair by Proposition \ref{gp}.
The toric variety $X_{\Delta(\mathcal L_1)^*}$ has Cox ring with variables $y_1,\dots, y_8$
with integer grading given by the matrix
\[
\begin{pmatrix*}[c]
0 & 1 & 0 & 2& 3& 0& 0& 0\\
0& 1& 1& 0& 0& 1& 1& 0\\
0& 1& 1& 0& 1& 0& 0& 1\\
0& 1& 1& 1& 2& 1& 0& 0\\
1& 0& 0& 1& 0& 1& 1& 0
\end{pmatrix*},
\]
quotient grading $1/2( 0, 0, 0, 0, 0, 0, 1, 1 )$ and
the components of its irrelevant ideal are:
\[
\begin{array}{c}
(y_7, y_6, y_4), (y_5, y_4), (y_6, y_3), (y_8, y_5, y_3), (y_7, y_2),\\
(y_8, y_5, y_2), (y_8, y_3, y_2), (y_5, y_3, y_2), (y_8, y_1),\\
(y_7, y_6, y_1), (y_7, y_4, y_1), (y_6, y_4, y_1).
\end{array}
\]
The dual pair $(P_2^*,\Delta(\mathcal L_1)^*)$ gives a monomial linear system $\mathcal L_2^*$ generated by
\[
y_2^2y_4^2y_6^2y_8^2,\
y_3^3y_4^3y_7y_8, \
y_5^2y_6^2y_7^2y_8^2,\
y_1^3y_2^3y_5y_6, \
y_1^2y_3^2y_5^2y_7^2.
\]
One can observe that $\mathcal L_2^*$ is not quasismooth at the point $(1, 1, 1, 1, 0, 0, 0, 0)$.
\end{example}
\section{Applications in low dimension}\label{sec-app}
\begin{theorem} A monomial linear system $\mathcal L$ of
curves in a projective toric surface $X$ with monomial basis $S$
is quasismooth if and only if either $\mathcal L$ is base point free or
the following hold
\begin{enumerate}
\item if $\{x_i=0\}\subseteq B^*(\mathcal L)$ then $S$ contains a unique monomial where $x_i$ appears with exponent one;
\item if $\{x_i=x_j=0\}\subseteq B^*(\mathcal L)$ then $S$ contains a monomial $x^a$ where $a_i+a_j=1$.
\end{enumerate}
\end{theorem}
\begin{proof}
Assume $\{x_i=0\}\subseteq B^*(\mathcal L)$ and let $\rho_i$ be the ray of the fan of $X$ corresponding to $x_i$.
By Theorem \ref{thm1} the polytope $\Delta_{\rho_i}^{\rho_i}(\mathcal L)$ must be a point, i.e.
$S$ contains a unique monomial where $x_i$ appears with exponent one.
If $\{x_i=x_j=0\}\subseteq B^*(\mathcal L)$ and $\sigma=\langle \rho_i,\rho_j\rangle$,
the dimension of $\Delta_\sigma^{\rho_i},\Delta_\sigma^{\rho_j}$ is at most 0 by Remark \ref{ambient}.
By Theorem \ref{thm1} one of them has to be not empty.
Thus there must be a monomial in $S$
containing one variable among $x_i,x_j$ with exponent one
and not containing the other one.
\end{proof}
Observe that a quasismooth curve is smooth,
since quasismooth implies normal (see the proof of \cite[Proposition 2.4]{ACG}).
However the converse is false, as the following example shows.
\begin{example}
Let $X={\mathbb P}(2,3,5)$, whose fan can be taken to have rays $e_1,e_1+5e_2, -e_1-3e_2$,
and let $Y$ be the curve defined by $x^3-y^2=0$.
Clearly the curve is not quasismooth. In order to prove that $Y$ is smooth, it is enough
to check smoothness at the point $(0,0,1)$.
The affine chart containing such point is $U_{\sigma}$,
where $\sigma={\rm cone}(e_1,e_1+5e_2)$, thus it is given by the closure of the
image of the map
\[
\varphi_{\sigma}:({\mathbb C}^*)^2\to {\mathbb C}^3,\ (t_1,t_2)\mapsto (t_1,t_2, t_1^5t_2^{-1}).
\]
An easy computation shows that the curve $Y$ is the closure of the image of the
one parameter subgroup $t\mapsto (t,t^3)$,
thus the curve $Y\cap U_{\sigma}$ is the closure of the image of the map $t\mapsto (t,t^3,t^2)$,
which is smooth.
\end{example}
\begin{theorem} A monomial linear system $\mathcal L$ of
surfaces in a simplicial projective toric threefold $X$ with monomial basis $S$
is quasismooth if and only if either $\mathcal L$ is base point free or
the following hold
\begin{enumerate}
\item if $\{x_i=0\}\subseteq B^*(\mathcal L)$ then $S$ contains a unique monomial where $x_i$ appears with exponent one;
\item if $\{x_i=x_j=0\}\subseteq B^*(\mathcal L)$ then
\begin{itemize}
\item either $S$ contains a monomial $x^{a}$ with $a_i=1, a_j=0$
and a monomial $x^b$ with $b_i=0,b_j=1$,
\item or
the exponent of $x_i$ is either $0$ or $\geq 2$ in each monomial of $S$ and
there exists a unique monomial $m=x^a$ with $a_i=0, a_j=1$.
\end{itemize}
\item if $\{x_i=x_j=x_k=0\}\subseteq B^*(\mathcal L)$ then $S$ contains a monomial $x^a$ where $a_i+a_j+a_k=1$.
\end{enumerate}
\end{theorem}
\begin{proof}
Assume $\{x_i=0\}\subseteq B^*(\mathcal L)$ and let $\rho_i$ be the ray of the fan of $X$ corresponding to $x_i$.
By Theorem \ref{thm1} the polytope $\Delta_{\rho_i}^{\rho_i}(\mathcal L)$ must be a point, i.e.
$S$ contains a unique monomial where $x_i$ appears with exponent one.
If $\{x_i=x_j=0\}\subseteq B^*(\mathcal L)$ and $\sigma=\langle \rho_i,\rho_j\rangle$,
we distinguish whether $\Delta_\sigma^{\rho_i},\Delta_\sigma^{\rho_j}$ are both not empty
or one of them is empty, say $\Delta_\sigma^{\rho_i}=\emptyset$.
The first situation implies the existence of a monomial containing $x_i$ with exponent one and not containing $x_j$
and of a monomial containing $x_j$ with exponent one and not containing $x_i$.
In the second case by Theorem \ref{thm1} the only non-empty polytope $\Delta_\sigma^{\rho_j}$
must be a point, i.e. $S$ contains a unique monomial where $x_j$ appears with exponent one.
If $\{x_i=x_j=x_k=0\}\subseteq B^*(\mathcal L)$ and $\sigma=\langle \rho_i,\rho_j,\rho_k\rangle$,
by Theorem \ref{thm1} one of the polytopes $\Delta_{\sigma}^{\rho_i}(\mathcal L)$, which are either points or empty by Remark \ref{ambient},
must be not empty.
\end{proof}
\end{document} |
\begin{document}
\begin{abstract}
It is well known that in every inverse semigroup the binary operation and the unary operation of inversion satisfy the following three identities:
\[
\quad x=(xx')x \qquad
\quad (xx')(y'y)=(y'y)(xx') \qquad
\quad (xy)z=x(yz'')\,.
\] The goal of this note is to prove the converse, that is, we prove that an algebra of type $\langle 2,1\rangle$ satisfying these three identities is an inverse semigroup and the unary operation coincides with the usual inversion on such semigroups.
\end{abstract}
\title{An Elegant 3-Basis for Inverse Semigroups}
\section{Introduction}
\seclabel{intro}
In the language of a binary operation $\cdot$ and a unary operation ${}'$, a set of $n$ independent identities is an $n$-basis
for inverse semigroups, if those identities define the variety of inverse semigroups considered as algebras $(S,\cdot,{}')$
of type $\langle 2,1\rangle$, where the unary operation coincides with the natural inversion.
Denoting by $x'$ the inverse of an element $x$ in an inverse semigroup, we then have $x=(xx')x$ (as inverse semigroups are
regular semigroups) and $(xx')(y'y)=(y'y)(xx')$ (as both $xx'$ and $y'y$ are idempotents, and idempotents commute in inverse
semigroups). Thus we might be tempted to think that the following identities provide a $3$-basis for inverse semigroups:
\begin{equation}
\eqnlabel{candidates}
x=(xx')x, \qquad (xx')(y'y)=(y'y)(xx') \qquad \text{and}\qquad (xy)z=x(yz)\,.
\end{equation}
However, for $S=\{0,1\}$ with $xy=0$, except for $11=1$, and defining $x'=1$, we have the previous identities satisfied,
but $0'\neq 0'00'$ and hence $'$ does not coincide with the natural inversion in $(S,\cdot)$.
B.M. Schein \cite{Schein} repaired the {\em defect} of \eqnref{candidates} by adjoining two additional identities: $x''=x$ and $(xy)'=y'x'$. The resulting set of five identities indeed provides a $4$-basis for inverse semigroups. (The identity $(xy)'=y'x'$ is dependent upon the others, and hence can be discarded. However it is worth observing that in the same paper Schein also provided a $5$-basis using $xx'x'x=x'xxx'$ instead of $xx'y'y=y'yxx'$; see \cite[Theorem 1.6]{Schein} and
\cite[p. 15, Ex. 20(b)]{higgins}.) Therefore the natural question to ask would be: \emph{is it possible to find a 3-basis for inverse semigroups?} This question was first answered in the affirmative in \cite{AM}, but the $3$-basis given there requires an extremely complicated proof (it is still an open problem to provide a reasonable proof for that result).
The aim of this note is to repair \eqnref{candidates} by providing an easy, transparent and
\emph{elegant} $3$-basis for inverse semigroups.
\begin{main}
Let $(S,*,')$ be an algebra of type $\langle 2,1\rangle$. Then this algebra is an inverse semigroup and the unary operation coincides with the usual inversion on such semigroups if and only if
\[
(\mathbf{E}_1)\quad x=(xx')x, \qquad
(\mathbf{E}_2)\quad (xx')(y'y)=(y'y)(xx'), \qquad
(\mathbf{E}_3)\quad (xy)z=x(yz'')\,.
\]
\end{main}
\section{Proof of the Theorem}
\seclabel{Proof}
In this section we prove that the identities ($\mathbf{E}_1$)--($\mathbf{E}_3$) imply Schein's $4$-basis for inverse semigroups. As the converse is obvious, the equivalence of the two bases will follow.
Throughout this section let $(S,\cdot,{}')$ be an algebra of type $\langle 2,1\rangle$ satisfying ($\mathbf{E}_1$)--($\mathbf{E}_3$). We start by proving a few handy identities.
\begin{lemma} The following identities hold.
\begin{align}
x'x''&= x'x \eqnlabel{lemma1} \\
(xy')y&=x(y'y) \eqnlabel{lemma5}\\
x & =x(x'x) \eqnlabel{lemma3b}\\
x''&= (x''x')x = x''(x'x) \eqnlabel{lemma6}\\
x'''x&=x'''x''=x''' x^{(4)} \eqnlabel{lemma50}
\end{align}
\end{lemma}
\begin{proof}
Firstly, for \eqnref{lemma1}, we have
\[
x'x'' \byx{(\mathbf{E}_1)} x'[(x''x''')x'']
\byx{(\mathbf{E}_3)} [x'(x''x''')]x \byx{(\mathbf{E}_3)} [(x'x'')x']x \byx{(\mathbf{E}_1)} x'x\,.
\]
Next, for \eqnref{lemma5}, we compute $(xy')y \byx{(\mathbf{E}_3)} x(y'y'') \by{lemma1} x(y'y)$.
Regarding \eqnref{lemma3b}, we have $x(x'x) \by{lemma5} (xx')x \byx{(\mathbf{E}_1)} x$.
Then for \eqnref{lemma6}, we compute
$x'' \by{lemma3b} x''(x'''x'') \byx{(\mathbf{E}_3)} (x''x''')x \by{lemma1} (x''x')x \by{lemma5} x''(x'x)$.
Finally, for \eqnref{lemma50}, we have
\[
x'''x \by{lemma3b} [x'''(x''''x''')]x \byx{(\mathbf{E}_3)} x'''[(x''''x''')x'']
\by{lemma6} x''' x'''' \by{lemma1} x''' x''\,.
\]
\end{proof}
The next two lemmas are the key tools in the proof that the identities ($\mathbf{E}_1$)--($\mathbf{E}_3$)
imply $x''=x$.
\begin{lemma}
\label{396}
$(x'x)x'''=x'''$.
\end{lemma}
\begin{proof}
We start with two observations. Firstly,
as
\[
[x(y''' y)]y' \byx{(\mathbf{E}_3)} x[(y'''y)y''']
\by{lemma50} x[(y'''y'''')y'''] \byx{(\mathbf{E}_1)} xy'''\,,
\]
we have
\begin{equation}
\eqnlabel{136}
(x(y''' y))y' = xy'''\,.
\end{equation}
Secondly,
\[
(x'x)(x'''x) \by{lemma50} (x'x)(x'''x'''')
\byx{(\mathbf{E}_2)} (x'''x'''')(x'x) \by{lemma50} (x'''x'')(x'x)
\by{lemma5} [(x'''x'')x']x \by{lemma6} x'''x\,,
\]
so that
\begin{align}
(x'x)(x'''x)&=x'''x. \eqnlabel{148}
\end{align}
Now we have all we need to prove the lemma.
\[
x'''\by{lemma6}(x'''x'')x'\by{lemma50}(x'''x)x'\by{148}[(x'x)(x'''x)]x'\by{136}(x'x)x'''.
\]
\end{proof}
\begin{lemma}\label{lemma16}
$(xy)z'=x(yz')$.
\end{lemma}
\begin{proof}
We start by proving that
\begin{equation}
\eqnlabel{455}
x'''=x'\,.
\end{equation}
In fact we have $xx' \by{lemma3b} [x(x'x)]x' \byx{(\mathbf{E}_3)} x[(x'x)x'''] = xx'''$, using
Lemma \ref{396} in the last equality. Thus
\begin{equation}
\eqnlabel{452}
xx'''=xx'\,.
\end{equation}
Now, by Lemma \ref{396},
\[
x'''=(x'x)x''' \by{lemma1} (x'x'')x'''
\byx{(\mathbf{E}_3)} x'(x''x^{(5)}) \by{452} x'(x''x''') \byx{(\mathbf{E}_3)}
(x'x'')x' \byx{(\mathbf{E}_1)} x'\,.
\]
Replacing $z$ by $z'$ in ($\mathbf{E}_3$), we get
\[
(xy)z'=x(yz''')=x(yz')\,,
\]
where the last equality follows from \eqnref{455}. The lemma is proved.
\end{proof}
We have everything we need to prove our main result.
\begin{theorem}
The identities \emph{(}$\mathbf{E}_1$\emph{)}--\emph{(}$\mathbf{E}_3$\emph{)} imply $x'' = x$ and the associative law.
\end{theorem}
\begin{proof}
First, we have
\begin{align*}
x''x' &\by{lemma6} [(x''x')x]x' = (x''x')(xx') \byx{(\mathbf{E}_2)} (xx')(x''x')\\
&= [(xx')x'']x' = [x(x'x'')]x' = x[(x'x'')x'] \byx{(\mathbf{E}_1)} xx'\,,
\end{align*}
where we have used Lemma \ref{lemma16} in the unlabeled equalities. Thus
\begin{equation}
\eqnlabel{hmph}
x''x' = xx'\,.
\end{equation}
Now $x'' \by{lemma6} (x''x')x \by{hmph} (xx')x \byx{(\mathbf{E}_1)} x$, as claimed.
Associativity now follows easily: $(xy)z \byx{(\mathbf{E}_1)} x(yz'') = x(yz)$.
\end{proof}
\section{Other Sets of Axioms}
It is natural to ask how sensitive the axioms ($\mathbf{E}_1$)--($\mathbf{E}_3$) are
to certain modifications, such as shifting the parentheses in ($\mathbf{E}_1$)
or changing the placement of the double inverse in ($\mathbf{E}_3$).
If, for instance,
we leave ($\mathbf{E}_2$) intact, replace ($\mathbf{E}_1$) with $x(x'x) = x$ and replace ($\mathbf{E}_3$) with
$(x''y)z = x(yz)$, then we obtain a set of identities which are dual to
($\mathbf{E}_1$)--($\mathbf{E}_3$). By an argument dual to that in \S\secref{Proof},
this set of identities is another $3$-basis for inverse semigroups.
Thus to dispense with these sorts of obvious dualities, we will assume that
both ($\mathbf{E}_1$) and ($\mathbf{E}_2$) are left intact, and consider only alternative placement
of the double inverse in ($\mathbf{E}_3$). Using \textsc{Prover9}, we found that each of
the following identities can substitute for ($\mathbf{E}_3$) to give another $3$-basis for
inverse semigroups:
\begin{align*}
(xy)z &= x''(yz) \hspace{2cm} (xy)z = x(y''z) \\
x(yz) &= (xy'')z \hspace{2cm} x(yz) = (xy)z''.
\end{align*}
The remaining possibility, $x(yz) = (x''y)z$, does not work. Using \textsc{Mace4}, we found
the counterexample given by the following tables. It satisfies ($\mathbf{E}_1$), ($\mathbf{E}_2$) and
$x(yz) = (x''y)z$, but the binary operation is not associative
($(0\cdot 0)\cdot 0 = 1\cdot 0 = 7\neq 6 = 0\cdot 1 = 0\cdot (0\cdot 0)$), and
the unary operation clearly fails to satisfy $x'' = x$.
\begin{table}[htb]
\centering
\begin{tabular}{r|cccccccccccc}
$\cdot$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11\\
\hline
0 & 1 & 6 & 5 & 7 & 3 & 8 & 4 & 2 & 0 & 4 & 4 & 4 \\
1 & 7 & 2 & 6 & 0 & 8 & 4 & 5 & 1 & 3 & 5 & 5 & 5 \\
2 & 5 & 8 & 3 & 6 & 1 & 7 & 0 & 4 & 2 & 0 & 0 & 0 \\
3 & 8 & 0 & 7 & 4 & 6 & 2 & 1 & 3 & 5 & 1 & 1 & 1 \\
4 & 3 & 7 & 1 & 8 & 5 & 6 & 2 & 0 & 4 & 2 & 2 & 2 \\
5 & 6 & 4 & 8 & 2 & 7 & 0 & 3 & 5 & 1 & 3 & 3 & 3 \\
6 & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 6 & 6 & 6 \\
7 & 4 & 3 & 0 & 5 & 2 & 1 & 7 & 8 & 6 & 7 & 7 & 7 \\
8 & 2 & 5 & 4 & 1 & 0 & 3 & 8 & 6 & 7 & 8 & 8 & 8 \\
9 & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 6 \\
10 & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 10 & 9 & 6 \\
11 & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 6 & 6 & 11
\end{tabular}
\begin{tabular}{r|cccccccccccc}
${}'$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 \\
\hline
& 1 & 2 & 3 & 4 & 5 & 0 & 6 & 8 & 7 & 9 & 10 & 11
\end{tabular}
\end{table}
\section{Problem}
\emph{Does there exist a $2$-basis for inverse semigroups?}
We guess that the answer is no.
\begin{acknowledgment}
We are pleased to acknowledge the assistance of the automated deduction tool
\textsc{Prover9} and the finite model builder \textsc{Mace4}, both developed by
McCune \cite{McCune}.
The first author was partially supported by FCT and FEDER, Project POCTI-ISFL-1-143 of Centro de Algebra da Universidade de Lisboa, and by FCT and PIDDAC through the project PTDC/MAT/69514/2006.
\end{acknowledgment}
\end{document} |
\begin{document}
\setcounter{page}{1}
\title[ The Stein restriction problem on the torus ]{A note on the Stein restriction conjecture and the restriction problem on the torus }
\author[D. Cardona]{Duv\'an Cardona}
\address{
Duv\'an Cardona:
\endgraf
Department of Mathematics
\endgraf
Pontificia Universidad Javeriana.
\endgraf
Bogot\'a
\endgraf
Colombia
\endgraf
{\it E-mail address} {\rm [email protected]}
}
\subjclass[2010]{42B37.}
\keywords{Stein Restriction Conjecture, Fourier Analysis, Clifford's torus}
\begin{abstract} In this note revision we discuss the Stein restriction problem on arbitrary $n$-torus, $n\geq 2$. In contrast with the usual cases of the sphere, the parabola and the cone, we provide necessary and sufficient conditions on the Lebesgue indices, by finding conditions which are independent of the dimension $n$.
\end{abstract} \maketitle
\section{Introduction}
This note is devoted to the Stein restriction problem on the torus $\mathbb{T}^n,$ $n\geq 2.$ In harmonic analysis, the Stein restriction problem for a smooth hypersurface $S\subset \mathbb{R}^n,$ asks for the conditions on $p$ and $q,$ $1\leq p,q<\infty,$ satisfying
\begin{equation}\label{Stein}
\Vert \hat{f}|_{S}\Vert_{L^q({S},d\sigma)}:= \left(\int\limits_{S}|\hat{f}(\omega)|^qd\sigma(\omega)\right)^{\frac{1}{q}}\leq C\Vert f\Vert_{L^p(\mathbb{R}^n)},
\end{equation} where $d\sigma$ is a surface measure associated to $S,$ the constant $C>0$ is independent of $f,$ and $\widehat{f}|_{S}$ denotes the Fourier restriction of $f$ to $S,$ where
\begin{equation}
\hat{f}(\xi)=\int\limits_{\mathbb{R}^n}e^{-i2\pi x\cdot \xi} f(x)dx,
\end{equation} is the Fourier transform of $f.$ Let us note that for $p=1,$ the Riemann-Lebesgue theorem implies that $\hat{f}$ is a continuous function on $\mathbb{R}^n$ and we can restrict $\hat{f}$ to every subset $S\subset \mathbb{R}^n.$ On the other hand, if $f\in L^2(\mathbb{R}^n),$ the Plancherel theorem gives $\Vert f \Vert_{L^2(\mathbb{R}^n)}=\Vert \hat{f} \Vert_{L^2(\mathbb{R}^n)}$ and the Stein restriction problem is trivial by considering that every hypersurface is a subset in $\mathbb{R}^n$ with vanishing Lebesgue measure. So, for $1<p<2,$ a general problem is to find those hypersurfaces $S,$ where the Stein restriction problem has sense. However, the central problem in the restriction theory is the following conjecture (due to Stein). It is of particular interest because it is related to Bochner-Riesz multipliers and the Kakeya conjecture.
\begin{conjecture}\label{ConjectureofStein}
Let $S=\mathbb{S}^{n-1}=\{x\in \mathbb{R}^n:|x|=1\}$ be the $(n-1)$-sphere and let $d\sigma$ be the corresponding surface measure. Then \eqref{Stein} holds true if and only if $1\leq p<\frac{2n}{n+1}$ and $q\leq p'\cdot \frac{n-1}{n+1},$ where $p'=p/p-1.$
\end{conjecture}
That the inequalities $1\leq p<\frac{2n}{n+1}$ and $q\leq p'\cdot \frac{n-1}{n+1},$ are necessary conditions for Conjecture \ref{ConjectureofStein} is a well known fact. In this setting, a celebrated result by Tomas and Stein (see e.g. Tomas \cite{Tomas}) shows that
\begin{equation}
\Vert \hat{f}|_{\mathbb{S}^{n-1}} \Vert_{L^2(\mathbb{S}^{n-1},d\sigma)}\leq C_{p,n}\Vert f\Vert_{L^{p}(\mathbb{R}^n)}
\end{equation}
holds true for every $1\leq p\leq \frac{2n+2}{n+3}.$ Surprisingly, a theorem due to Bourgain shows that the Stein restriction conjecture is true for $1<p<p_n$ where $p_n$ is defined inductively and $\frac{2n+2}{n+3}<p_n<\frac{2n}{n+1}.$ For instance,
$p(3) = 31/23$. We refer the reader to Tao \cite{Tao2003} for a good introduction and some advances to the restriction theory.
In this paper we will consider the $n$-dimensional torus $\mathbb{T}^n=(\mathbb{S}^1)^n$ modelled on $\mathbb{R}^{2n},$ this means that
\begin{equation}\label{DefiCliffordTorues}
\mathbb{T}^n=\{(x_{1,1},x_{1,2},x_{2,1},x_{2,2},\cdots,x_{n,1},x_{n,2}):x_{\ell,1}^2+x_{\ell,2}^2=1,\,1\leq \ell\leq n\}.
\end{equation} In this case $$\mathbb{T}^n\subset {\sqrt{n}}\,\mathbb{S}^{2n-1} \subset \mathbb{R}^{2n}.$$
In order to illustrate our results, we will discuss the case $n=2,$ where
\begin{equation}\label{DefiCliffordTorues2} \mathbb{T}^2\subset {\sqrt{2}}\,\mathbb{S}^{3} \subset \mathbb{R}^{4}.\end{equation}
As it is well known, the $n$-dimensional torus can be understood of different ways. Topologically, $\mathbb{T}^n\sim \mathbb{S}^1\times \cdots \times \mathbb{S}^1,$ where the circle $\mathbb{S}^1$ can be identified with the unit interval $[0,1),$ where we have identified $0\sim 1.$ The case $n=2,$ implies that $\mathbb{T}^2\sim \mathbb{S}^1\times \mathbb{S}^1.$ From differential geometry, a stereographic projection $\pi$ from $\mathbb{S}^3\setminus \{N\}$ into $\mathbb{R}^3$ gives the following embedding of $(1/\sqrt{2})\mathbb{T}^2\subset \mathbb{S}^3,$
\begin{equation}\label{anothertorus}
\dot{\mathbb{T}}^2=\{((\sqrt{2}+\cos(\phi))\cos(\theta),(\sqrt{2}+\cos(\phi))\sin(\theta),\sin(\phi)\in \mathbb{R}^3: 0\leq \theta,\phi<2\pi \},
\end{equation} of the 2-torus in $\mathbb{R}^3.$ At the same time, the Fourier analysis and the geometry on the torus can be understood better by the description of the torus given in \eqref{DefiCliffordTorues}. So, we will investigate the restriction problem on the torus by using \eqref{DefiCliffordTorues2} instead of \eqref{anothertorus}.
In this case, the Stein restriction conjecture for $S=\mathbb{S}^3$ assures that \eqref{Stein} holds true for every $1\leq p<\frac{8}{5}$ and $q\leq \frac{3}{5}p'.$ However, we will prove the following result, where we characterise the Stein restriction problem on $\mathbb{T}^2$.
\begin{theorem}\label{ThrcardonaRest2018}
Let $f\in L^{p}(\mathbb{R}^4).$ Then there exists $C>0,$ independent of $f$ and satisfying the estimate
\begin{equation}\label{cardonaRest2018}
\Vert \hat{f}|_{\mathbb{T}^2}\Vert_{L^q({\mathbb{T}^2},d\sigma)}:= \left(\int\limits_{\mathbb{T}^2}|\hat{f}(\xi_1,\xi_2,\eta_1,\eta_2)|^qd\sigma(\xi_1,\xi_2,\eta_1,\eta_2)\right)^{\frac{1}{q}}\leq C\Vert f\Vert_{L^{p}(\mathbb{R}^4)},
\end{equation} if and only if $1\leq p< \frac{4}{3}$ and $q\leq p'/3.$ Here, $d\sigma(\xi_1,\xi_2,\eta_1,\eta_2)$ is the usual surface measure associated to $\mathbb{T}^2.$
\end{theorem}
An important difference between the restriction problem on the $n$-torus, $n\geq 2,$ and the Stein-restriction conjecture come from the curvature notion. For example, the sphere $\mathbb{S}^2$, has Gaussian curvature non-vanishing, in contrast with the 2-torus $\mathbb{T}^2$ where the Gaussian curvature vanishes identically.
In the general case, let us observe that the Stein conjecture for $S=\mathbb{S}^{2n-1}$ asserts that \eqref{Stein} holds true for all $1\leq p<\frac{ 4n}{2n+1}$ and $q\leq \frac{2n-1}{2n+1}p'.$ Curiously, the situation for the $n$-dimensional torus is very different, as we will see in the following theorem.
\begin{theorem}\label{ThrcardonaRestTn2018}
Let $f\in L^{p}(\mathbb{R}^{2n}),$ $n\geq 2.$ Then there exists $C>0,$ independent of $f$ and satisfying
\begin{equation}\label{cardonaRestTn2018}
\Vert \hat{f}|_{\mathbb{T}^{n}}\Vert_{L^q({\mathbb{T}^{n}},d\sigma_n)}\leq C_n\Vert f\Vert_{L^{p}(\mathbb{R}^{2n})},
\end{equation} if and only if $1\leq p< \frac{4}{3}$ and $q\leq p'/3.$ Here, $d\sigma_n$ is the usual surface measure associated to $\mathbb{T}^{n}.$
\end{theorem}
\begin{remark}
By a duality argument we conclude the following fact: if
$F\in L^{q'}(\mathbb{T}^{n},d\sigma_n),$ then there exists $C>0,$ independent of $F$ and satisfying
\begin{equation}
\Vert (Fd\sigma_n)^{\vee} \Vert_{L^{p'}(\mathbb{R}^{2n})}=\left\Vert \int\limits_{ \mathbb{T}^n } e^{i2\pi x\cdot \xi}F(\xi) d\sigma_n(\xi) \right\Vert_{L^{p'}({\mathbb{R}^{2n}})}\leq C_n\Vert F\Vert_{L^{q'}{(\mathbb{T}^n,d\sigma_n)}},
\end{equation} if and only if $p'>4$ and $q'\geq (p'/3)'.$ We have denoted by $(Fd\sigma_n)^{\vee}$ the inverse Fourier transform of the measure $\mu:=Fd\sigma_n.$
\end{remark}
We end this introduction by summarising the progress on the restriction conjecture as follows. Indeed, we refer the reader to,
\begin{itemize}
\item Fefferman \cite{Fefferman1970} and
Zygmund \cite{Zygmund74} for the proof of the restriction conjecture in the case $n=2$ (which is \eqref{cardonaRestTn2018} for $n=1$).
\item Stein \cite{Stein1986}, Tomas \cite{Tomas} and Strichartz \cite{Strichartz1977}, for the restriction problem in higher dimensions, with sharp $(L^q,L^2)$ results
for hypersurfaces with nonvanishing
Gaussian curvature.
Some more general classes of surfaces were treated by A. Greenleaf \cite{Greenlaf1981}.
\item Bourgain \cite{Bourgain1991,Bourgain1995b}, Wolff \cite{Wolff1995}, Moyua, Vargas, Vega and Tao \cite{Moyua1996,Moyua1999,Tao1998} who established the so-called bilinear approach.
\item
Bourgain and Guth \cite{BoutgainGuth2011}, Bennett, Carbery
and Tao \cite{BeCarTao2006}, by the progress on the case of
nonvanishing curvature, by making use of multilinear restriction estimates.
\item Finally, Buschenhenke, M\"uller and Vargas \cite{Muller}, for a complete list of references as well as the progress on the restriction theory on surfaces of finite type.
\end{itemize}
The main goal of this note is to give a simple proof of the restriction problem on the torus. This work is organised as follows. In Section \ref{Sec2} we prove Theorem \ref{ThrcardonaRest2018}. We end this note with the proof of Theorem \ref{ThrcardonaRestTn2018}. Sometimes we will use $(\mathscr{F}f)$ for the 2-dimensional Fourier transform of $f$ and $(\mathscr{F}_{\mathbb{R}^n}u)$ for the Fourier transform of a function $u$ defined on $\mathbb{R}^n.$
\section{Proof of Theorem \ref{ThrcardonaRest2018}}\label{Sec2}
In this note we will use the standard notation used for the Fourier analysis on $\mathbb{R}^n$ and the torus (see e.g. Ruzhansky and Turunen \cite{Ruz}).
Throughout this section we will consider the 2-torus $\mathbb{T}^2,$
\begin{equation}
\mathbb{T}^2=\{(x_{1},x_{2},y_{1},y_{2}):x_{1}^2+x_{2}^2=1,\,y_{1}^2+y_{2}^2=1 \}=\mathbb{S}^1_{(x_1,x_2)}\times \mathbb{S}^1_{(y_1,y_2)}\subset \mathbb{R}^4.
\end{equation} Here, $\mathbb{T}^2$ will be endowed
with the surface measure $$d\sigma(\xi_1,\xi_2,\eta_1,\eta_2)=d\sigma(\xi_1,\xi_2)d\sigma(\eta_1,\eta_2),$$
where $d\sigma(\xi_1,\xi_2)$ is the usual `surface measure' defined on $\mathbb{S}^1.$ Indeed, if $(\xi_1,\xi_2)\equiv (\xi_1(\varkappa),\xi_2(\varkappa))=(\cos(2\pi \varkappa),\sin(2\pi\varkappa)),$ $0\leq \varkappa< 1 ,$ then $d\varkappa=d\sigma(\xi_1,\xi_2).$
Conjecture \ref{ConjectureofStein} has been proved by Fefferman for $n=2,$ the corresponding announcement is the following (see Fefferman \cite{Fefferman1970} and Zygmund \cite{Zygmund1974}). \begin{theorem}[Fefferman restriction Theorem]\label{FeffermanStein}
Let $S=\mathbb{S}^{1}=\{x\in \mathbb{R}^2:|x|=1\}$ be the $1$-sphere and let $d\sigma$ be the corresponding `surface measure'. Then \eqref{Stein} holds true if and only if $1\leq p<\frac{4}{3}$ and $q\leq p'/3,$ where $p'=p/p-1.$
\end{theorem}
In order to prove Theorem \ref{ThrcardonaRest2018}, let us consider $1\leq p<\frac{4}{3}$, $q\leq p'/3$ and $f\in L^{p}(\mathbb{R}^4) .$ By the argument of density we can assume that $f\in C^\infty_{c}(\mathbb{R}^4).$ If $(\xi_1,\xi_2,\eta_1,\eta_2)\in \mathbb{T}^2,$ then
\begin{equation}
\widehat{f}(\xi_1,\xi_2,\eta_1,\eta_2)=\int\limits_{\mathbb{R}^4}e^{-i2\pi (x\cdot\xi+y\cdot \eta)}f(x,y)dy\,dx,\,\,x=(x_1,x_2),\,y=(y_1,y_2).
\end{equation}
By the Fubini theorem we can write
\begin{align*}
\widehat{f}(\xi_1,\xi_2,\eta_1,\eta_2)
=\int\limits_{\mathbb{R}^2}e^{-i2\pi x\cdot \xi}(\mathscr{F}_{y\rightarrow \eta}{f}(x,\cdot))(\eta)dx,\,\,\eta=(\eta_1,\eta_2),
\end{align*} where $(\mathscr{F}_{y\rightarrow \eta}{f}(x,\cdot))(\eta)=\widehat{f}(x,\eta)$ is the 2-dimensional Fourier transform of the function $f(x,\cdot),$ for every $x\in \mathbb{R}^2.$ By writing
\begin{equation}
\widehat{f}(\xi_1,\xi_2,\eta_1,\eta_2)=\mathscr{F}_{x\rightarrow \xi}(\mathscr{F}_{y\rightarrow \eta}{f}(x,\cdot))(\eta))(\xi),
\end{equation}
for $1\leq p<\frac{4}{3}$ and $q\leq p'/3,$ the Fefferman restriction theorem gives,
\begin{equation}
\Vert \widehat{f}(\xi_1,\xi_2,\eta_1,\eta_2)\Vert_{L^q(\mathbb{S}^1,d\sigma(\xi))}\leq C\Vert \widehat{f}(x,\eta) \Vert_{L^p(\mathbb{R}^2_x)}.
\end{equation}
Now, let us observe that
\begin{align*}
\Vert \widehat{f}|_{\mathbb{T}^2}\Vert_{L^q({\mathbb{T}^2},d\sigma)} &=\Vert\widehat{f}(\xi,\eta) \Vert_{L^q((\mathbb{S}^1,d\sigma(\eta));L^q(\mathbb{S}^1,d\sigma(\xi)))} \\
&\leq C\Vert \Vert \widehat{f}(x,\eta) \Vert_{L^p(\mathbb{R}^2_x)} \Vert_{L^q(\mathbb{S}^1,d\sigma(\eta))}=: C \Vert \widehat{f}(x,\eta) \Vert_{L^q((\mathbb{S}^1,d\sigma(\eta)); L^p(\mathbb{R}^2_x) )}\\
&:=I.
\end{align*}
Now, we will estimate the right hand side of the previous inequality. First, if we assume that $4/3\leq q<p'/3,$ then $p\leq q$ and the Minkowski integral inequality gives,
\begin{align*}
I&=\left(\int\limits_{\mathbb{S}^1}\left(\int\limits_{\mathbb{R}^2}|\widehat{f}(x,\eta)|^pdx\right)^{\frac{q}{p}} d\sigma(\eta )\right)^{\frac{1}{q}} \leq \left(\int\limits_{\mathbb{R}^2}\left(\int\limits_{\mathbb{S}^1}|\widehat{f}(x,\eta)|^q d\sigma(\eta)\right)^\frac{p}{q} dx\right)^{\frac{1}{p}}\\
& \lesssim \left(\int\limits_{\mathbb{R}^2} \int\limits_{\mathbb{R}^2}|{f}(x,y)|^p dy dx\right)^{\frac{1}{q}}=\Vert f\Vert_{L^{p}(\mathbb{R}^4)},
\end{align*} where in the last inequality we have used the Fefferman restriction theorem. So we have proved that \eqref{cardonaRest2018} holds true for $4/3\leq q<p'/3.$ Now, if $q<\frac{4}{3},$ then we can use the finiteness of the measure $d\sigma(\xi,\eta)$ to deduce that
\begin{equation}
\Vert \widehat{f}|_{\mathbb{T}^2}\Vert_{L^q({\mathbb{T}^2},d\sigma)}\lesssim \Vert \widehat{f}|_{\mathbb{T}^2}\Vert_{L^\frac{4}{3}({\mathbb{T}^2},d\sigma)} \leq C\Vert f\Vert_{L^{p}(\mathbb{R}^4)}
\end{equation} holds true for $1\leq p<\frac{4}{3}.$ Now, we will prove the converse announcement. So, let us assume that $p$ and $q$ are Lebesgue exponents satisfying \eqref{cardonaRest2018} with a constant $C>0$ independent of $f\in L^p(\mathbb{R}^4).$ If $g\in C^\infty_{c}(\mathbb{R}^2),$ let us define the function $f$ by $f(x,y)=g(x)g(y).$
The inequality,
\begin{equation}
\Vert \widehat{f}|_{\mathbb{T}^2}\Vert_{L^q({\mathbb{T}^2},d\sigma)}:= \left(\int\limits_{\mathbb{T}^2}|\widehat{f}(\xi_1,\xi_2,\eta_1,\eta_2)|^qd\sigma(\xi_1,\xi_2,\eta_1,\eta_2)\right)^{\frac{1}{q}}\leq C\Vert f\Vert_{L^{p}(\mathbb{R}^4)},
\end{equation} implies that
\begin{equation}
\Vert \widehat{g}|_{\mathbb{S}^1}\Vert_{L^q({\mathbb{S}^1},d\sigma)}:= \left(\int\limits_{\mathbb{S}^1}|\widehat{g}(\xi_1,\xi_2)|^qd\sigma(\xi_1,\xi_2)\right)^{\frac{1}{q}}\leq C\Vert g\Vert_{L^{p}(\mathbb{R}^2)}.
\end{equation}But, according with the Fefferman restriction theorem, the previous inequality only is possible for arbitrary $g\in C^\infty_{c}(\mathbb{R}^2),$ if $1\leq p<\frac{4}{3}$ and $q\leq p'/3.$
\section{Proof of Theorem \ref{ThrcardonaRestTn2018}}
Let us consider the $n$-dimensional torus
\begin{equation}
\mathbb{T}^n=\{(x_{1,1},x_{1,2},x_{2,1},x_{2,2},\cdots,x_{n,1},x_{n,2}):x_{\ell,1}^2+x_{\ell,2}^2=1,\,1\leq \ell\leq n\}.
\end{equation}
We endow to $\mathbb{T}^n$ with the surface measure
\begin{equation}
d\sigma_n (\xi_{1,1},\xi_{1,2},\xi_{2,1},\xi_{2,2},\cdots,\xi_{n,1},\xi_{n,2})=\bigotimes_{j=1}^n d\sigma(\xi_{j,1},\xi_{j,2}),
\end{equation}
where $d\sigma$ is the `surface measure' on $\mathbb{S}^1.$ In order to prove Theorem \ref{ThrcardonaRestTn2018} we will use induction on $n.$ The case $n=2$ is precisely Theorem \ref{ThrcardonaRest2018}. So, let us assume that for some $n\in\mathbb{N},$ there exists $C_n$ depending only on the dimension $n,$ such that
\begin{equation}
\Vert (\mathscr{F}_{\mathbb{R}^n}{u})|_{\mathbb{T}^{n}}\Vert_{L^q({\mathbb{T}^{n}},d\sigma_n)}\leq C_n\Vert u\Vert_{L^{p}(\mathbb{R}^{2n})},
\end{equation}
for every function $u\in L^{p}(\mathbb{R}^{2n}).$ If $f\in C^\infty_{c}(\mathbb{R}^{2n+2})\subset L^p(\mathbb{R}^{2n+2}),$ $1\leq p<\frac{4}{3}$ and $q\leq p'/3,$ by using the approach of the previous section, we can write
\begin{align*}
\widehat{f}(\xi_1,\xi_2,\eta)
=\int\limits_{\mathbb{R}^2}e^{-i2\pi x\cdot \xi}(\mathscr{F}_{y\rightarrow \eta}{f}(x,\cdot))(\eta)dx,\,\,\eta\in \mathbb{R}^n.
\end{align*} By applying the Fefferman restriction theorem we deduce
\begin{equation}
\Vert\widehat{f}(\cdot,\cdot,\eta) \Vert_{L^q(\mathbb{S}^1,d\sigma(\xi))}\leq \Vert \mathscr{F}_{y\rightarrow \eta}{f}(x,\cdot))(\eta) \Vert_{L^p(\mathbb{R}^2_x)}.
\end{equation} Now, by using that
\begin{align*}
\Vert \widehat{f}|_{\mathbb{T}^{n+1}}\Vert_{L^q({\mathbb{T}^{n+1}},d\sigma_{n+1})} &=\Vert\widehat{f}(\xi_1,\xi_2,\eta) \Vert_{L^q((\mathbb{T}^n,d\sigma_n(\eta));L^q(\mathbb{S}^1,d\sigma(\xi)))} \\
&\leq C\Vert \Vert \widehat{f}(x_{1,1},x_{1,2},\eta) \Vert_{L^p(\mathbb{R}^2_x)} \Vert_{L^q(\mathbb{T}^n,d\sigma_n(\eta))}\\
&=: C \Vert \widehat{f}(x,\eta) \Vert_{L^q((\mathbb{T}^n,d\sigma_n(\eta)); L^p(\mathbb{R}^2_x) )}\\
&:=II,
\end{align*} for $4/3\leq q<p'/3,$ $p\leq q,$ and the Minkowski integral inequality, we have
\begin{align*}
II&=\left(\int\limits_{\mathbb{T}^n}\left(\int\limits_{\mathbb{R}^2}|\widehat{f}(x,\eta)|^pdx\right)^{\frac{q}{p}} d\sigma_n(\eta )\right)^{\frac{1}{q}} \leq \left(\int\limits_{\mathbb{R}^2}\left(\int\limits_{\mathbb{T}^n}|\widehat{f}(x,\eta)|^q d\sigma_n(\eta)\right)^\frac{p}{q} dx\right)^{\frac{1}{p}}\\
& \lesssim_n \left(\int\limits_{\mathbb{R}^{2}} \int\limits_{\mathbb{R}^{2n}}|{f}(x,y)|^p dy dx\right)^{\frac{1}{p}}=\Vert f\Vert_{L^{p}(\mathbb{R}^{2n+2})},
\end{align*} where in the last inequality we have used the induction hypothesis. So, we have proved Theorem \ref{ThrcardonaRestTn2018} for $4/3\leq q<p'/3.$ The case $q<\frac{4}{3}$ now follows from the finiteness of the measure $d\sigma_{n+1}$. That $1\leq p<\frac{4}{3}$ and $q\leq p'/3,$ are necessary conditions for \eqref{cardonaRestTn2018} can be proved if we replace $f$ in \eqref{cardonaRestTn2018} by a function of the form
\begin{equation}
f(x_{1,1},x_{1,2},x_{2,1},x_{2,2},\cdots,x_{n,1},x_{n,2})=\prod_{j=1}^n g(x_{j,1},x_{j,2}),\,\,g \in C^\infty_c(\mathbb{R}^2).
\end{equation} Indeed, we automatically have \begin{equation}
\Vert \widehat{g}|_{\mathbb{S}^1}\Vert_{L^q({\mathbb{S}^1},d\sigma)}:= \left(\int\limits_{\mathbb{S}^1}|\widehat{g}(\xi_1,\xi_2)|^qd\sigma(\xi_1,\xi_2)\right)^{\frac{1}{q}}\leq C\Vert g\Vert_{L^{p}(\mathbb{R}^2)}.
\end{equation} Consequently, the Fefferman restriction theorem, shows that the previous inequality only is possible for arbitrary $g\in C^\infty_{c}(\mathbb{R}^2),$ if $1\leq p<\frac{4}{3}$ and $q\leq p'/3.$\\
An usual argument of duality applied to Theorem \ref{ThrcardonaRestTn2018}, allows us to deduce the following result.
\begin{corollary}\label{ThrcardonaRestTn2018dual}
Let $F\in L^{q'}(\mathbb{T}^{n},d\sigma_n).$ Then there exists $C>0,$ independent of $F$ and satisfying
\begin{equation}\label{cardonaRestTn2018dual}
\Vert (Fd\sigma_n)^{\vee} \Vert_{L^{p'}(\mathbb{R}^{2n})}=\left\Vert \int\limits_{ \mathbb{T}^n } e^{i2\pi x\cdot \xi}F(\xi) d\sigma_n(\xi) \right\Vert_{L^{p'}({\mathbb{R}^{2n}})}\leq C_n\Vert F\Vert_{L^{q'}{(\mathbb{T}^n,d\sigma_n)}},
\end{equation} if and only if $p'>4$ and $q'\geq (p'/3)'.$ Here, $d\sigma_n$ is the usual surface measure associated to $\mathbb{T}^{n}$ and $r':=r/r-1.$
\end{corollary}
\noindent {\bf Acknowledgement}. I would like to thanks Felipe Ponce from \textit{Universidad Nacional de Colombia} who introduced me to the restriction problem.
\end{document} |
\begin{document}
\title[]{Finding singularly cospectral graphs}
\author[C.M. Conde]{Cristian M. Conde${}^{1,3}$}
\author[E. Dratman]{Ezequiel Dratman${}^{1,2}$}
\author[L.N. Grippo]{Luciano N. Grippo${}^{1,2}$}
\address{${}^{1}$Instituto de Ciencias\\Universidad Nacional de General Sarmiento}
\address{${}^{2}$Consejo Nacional de Investigaciones Cient\'ificas y Tecnicas, Argentina}
\address{${}^{3}$Instituto Argentina de Matem\'atica "Alberto Calder\'on" - Consejo Nacional de Investigaciones Cient\'ificas y Tecnicas, Argentina}
\email{[email protected]}
\email{[email protected]}
\email{[email protected]}
\keywords{Almost cospectral graphs, cospectral graphs, singularly cospectral graphs}
\subjclass[2010]{05 C50}
\date{}
\maketitle
\begin{abstract}
Two graphs having the same spectrum are said to be cospectral. A pair of singularly cospectral graphs is formed by two graphs such that the absolute values of their nonzero eigenvalues coincide. Clearly, a pair of cospectral graphs is also singularly cospectral but the converse may not be true. Two graphs are almost cospectral if their nonzero eigenvalues and their multiplicities coincide. In this paper, we present necessary and sufficient conditions for a pair of graphs to be singularly cospectral, giving an answer to a problem posted by Nikiforov. In addition, we construct an infinite family of pairs of noncospectral singularly cospectral graphs with unbounded number of vertices. It is clear that almost cospectral graphs are also singularly cospectral but the converse is not necessarily true, we present families of graphs where both concepts: almost cospectrality and singularly cospectrality agree.
\end{abstract}
\section{Introduction}\label{s1}
Two graphs are said to be cospectral if they have the same spectrum. The problem of finding families of pairs of nonisomorphic cospectral graphs have attracted the attention of many researchers. Probably, the first relevant result on this subject is due to Schwenk, who proves that almost all trees are cospectral~\cite{schwenk1973}. Since then, many articles, presenting constructions to either generate pairs of cospectral graphs or finding families of graphs which do not have a mate, known as defined by their spectrum, have been published. See for instance~\cite{GodsilM82,vanDamH09,Quietal20}. The energy of a graph was defined by Gutman in 1978 as the sum of the absolute values of its eigenvalues, counted with their multiplicity~\cite{Gutman78}. Two graphs, with the same number of vertices, are said to be equienergetic if they have the same energy. Clearly, two cospectral graphs are also equienergetic. Nevertheless, there are examples of pairs of noncospectral equienergetic graphs~\cite{Stevanovic05,BonifacioVA08}. Indeed, finding noncospectral equienergetic pairs of graphs is a very active and actual topic of research. The energy of a graph is nothing but the trace norm of its adjacency matrix; i.e., the sum of the singular values of its adjacency matrix. Notice that, when a matrix is symmetric, its singular values are precisely the absolute values of its eigenvalues. In~\cite{Nikiforov2016}, Nikiforov defines two graphs as singularly cospectral if their nonzero singular values, counted with their multiplicity, coincide. Hence any two singularly cospectral graphs are equienergetic. In that article, he also posted the following problem.
\begin{problem}~\cite{Nikiforov2016}\label{Niki}
Find necessary and sufficient conditions for two graphs to be singularly cospectral.
\end{problem}
We give a characterization in Section~\ref{sec: main result} that answers the Problem~\ref{Niki}. In addition, we formulate the following two natural problems, in connection with the notion of singularly cospectral.
\begin{problem}~\label{problem 1}
Find infinite pairs of noncospectral singularly cospectral graphs.
\end{problem}
\begin{problem}~\label{problem 2}
Find families of graphs where singularly cospectrality implies cospecrality.
\end{problem}
In Section~\ref{sec: construction sc noncospectral} we deal with Problem~\ref{problem 1} by presenting a construction based on the spectral decomposition of a symmetric matrix. Finally, in Section~\ref{sec: c implies sc}, we define families of graphs by imposing certain constrains on the spectrum of their graphs that makes equivalent the notions of cospectrality and singularly cospectrality, answering the Problem~\ref{problem 2}.
\section{Preliminaries}\label{s2}
All graphs, mentioned in this article are finite, have no loops and multiple edges. Let $G$ be a graph. We use $V(G)$ and $E(G)$ to denote the set of $n=|V(G)|$ vertices and the set of $m=|E(G)|$ edges of $G$, respectively.
The adjacency matrix associated with the graph $G$ is defined by
$A_G= (a_{ij})_{n\times n}$, where $a_{ij} = 1$; if $v_i \sim v_j$ i.e., the vertex $v_i$ is adjacent to the vertex
$v_j$ and $a_{ij} = 0$ otherwise. We often denote $A_G$ simply by $A.$ We use $d_G(v)$ to denote the degree of $v$ in $G$ (the number of edges incident to $v$), or $d(v)$ provided the context is clear. A $d$-regular graph is a graph such that $d_G(v)=d$ for every $v\in V(G)$.
For a matrix $M \in \mathbb{R}^{n\times n}$, the
spectrum, $\sigma(M)$ is the
multiset $
\sigma(M)=\{[\lambda_1(M)]^{m_1},\cdots, [\lambda_{r}(M)]^{m_{r}}\},
$ where $\lambda_1(M)>\lambda_2(M)>\cdots >\lambda_r(M)$ are the distinct eigenvalues of $M$ and $m_i$
is the multiplicity
of $\lambda_i(M)$ for any $i \in \{1,\cdots, r\}$ and spectral radius, $\rho(M)$ is the
largest absolute value of the eigenvalues i.e., largest of $|\lambda_1(M)|, |\lambda_2(M)|, \cdots, |\lambda_r(M)|$ where $\lambda_i(M)\in \sigma(M)$.
To denote the spectrum of $A_G$ we use $\sigma(G)$.
Next proposition collects several results about the eigenvalues of a graph. Most of these statements are well-known.
\begin{proposition}{\cite[Chapter 6]{Ba}}\label{propiedades_basicas}
Let $G$ be a graph with $n$ vertices and $m$ edges. Then,
\begin{enumerate}
\item $|\lambda_i(G)|\leq \lambda_1(G)$ for all $i\geq 2.$
\item $\lambda_1(G) \geq \frac{2m}{n}.$ Equality holds if and only $G$ is a $\frac{2m}{n}$-regular.
\item If $G$ is connected, the equality $\lambda_r(G)=-\lambda_1(G)$ holds if and only $G$ is bipartite.
\item If $G$ is bipartite, then $\sigma(G)$ is symmetric respect to zero.
\end{enumerate}
\end{proposition}
The inertia of a graph $G$ is the triple $In(G) = (p(G), z(G), n(G)),$ in which $p(G), z(G), n(G)$ stand for the number of positive, zero,
and negative eigenvalues of $G$, respectively. The energy of $G$,
denoted by $\mathcal{E} = \mathcal{E}(G)$, is the sum of the absolute values of the eigenvalues of $G$.
Let $G$ and $H$ be two graphs with vertex sets $V(G)$ and $V(H)$, respectively. The \emph{strong product of $G$ and $H$} is the graph $G\boxtimes H$ such that the vertex set is $V(G)\times V(H)$ and in which two vertices $(g_1, h_1)$ and $(g_2, h_2)$ are adjacent if and only if one of the following conditions holds: $g_1=g_2$ and $h_1$ is adjacent to $h_2$, $h_1=h_2$ and $g_1$ is adjacent to $g_2$, or $g_1$ is adjacent to $g_2$ and $h_1$ is adjacent to $h_2$. The \emph{cartesian product of $G$ and $H$} is the graph $G\square H$ such that the vertex set is $V(G)\times V(H)$ and in which two vertices $(g_1, h_1)$ and $(g_2, h_2)$ are adjacent if and only if either $g_1 = g_2$ and $h_1$ is adjacent to $h_2$ in $H$ or $g_1$ is adjacent to $g_2$ in $G$ and $h_1 = h_2.$
We denote by $C_n$, $P_n$ and $K_n$ to the cycle, the path and the complete graph on $n$ vertices. By $K_{r,s}$ we denote the complete bipartite graphs with a bipartition on $r$ and $s$ vertices respectively. We use $P_G(x)$ to denote the characteristic polynomial of $A_G$; i.e., $P_G(x)=\det(xI_n-A_G)$. It is easy to prove that $P_{K_n}(x)=(x-n+1)(x+1)^{n-1}$ and in consequence $\sigma(K_n)=\{[n-1]^1,[-1]^{n-1}\}$.
\subsection*{Schatten norms}
Consider a matrix $M \in \mathbb{R}^{n\times n}$.
Recall that the {\textit singular values} of a matrix $M$ are the square roots of the eigenvalues of $M^*M$, where $M^*$ is the conjugate transpose of $M$. We denote by $s_1(M), s_2(M),\cdots, s_r(M)$ for the singular values of $M$ arranged in descending order. Let $r={\rm rank}(M)$ then $s_{r+1}(M) =\cdots=s_n(M) = 0.$
Define for $p>0$,
$$
\|M\|_p=\left(\sum_{i=1}^n s_i(M)^p \right)^{\frac 1p}.
$$
For $p\geq 1$, it is a norm over ${\mathbb R}^{n\times n}$ called the {\textit{ $p$-Schatten norm}}. When $p=1$, it is also called the trace norm or nuclear norm. When $p= 2$, it is exactly the Frobenius norm $\|M\|_2.$
\begin{remark}
As $M^*M$ is a positive semi-definite matrix, so the eigenvalues of $(M^*M)^{1/2}$ coincide with the singular values of $M$. It holds that
\begin{equation}\label{limitenormap}
\lim\limits_{p\to \infty} \|M\|_p=s_1(M).\end{equation}
\end{remark}
\section{Characterizing singularly cospectral graphs}~\label{sec: main result}
Given a graph $G$, following Nikiforov's notation~\cite{Nikiforov2016}, consider the function $f_G (p)$ for any $p\geq 1$ as:
$$
f_G(p):=\|G\|_p=\left(\sum_{i=1}^n s_i(G)^p \right)^{\frac 1p},
$$
where $\|G\|_p$ and $s_i(G)$ stands for $\|A_G\|_p$ and $s_i(A_G)$, respectively. The following statement collects some of the most important known properties of $f_G$.
\begin{lemma}\label{propbasicas}~\cite{Nikiforov2016} \label{Nikiforov}
The following statements hold for a graph $G$.
\begin{enumerate}
\item $f_G(p)$ is differentiable in $p$.
\item $f_G(p)$ is decreasing in $p.$
\item
If $G$ is a graph with $m$ edges, then $f_G(2)=\sqrt{2m}.$ Furthermore, for any $k > 1$, the number of closed walks of length $2k$ of a graph $G$ is equal to
$$\frac{\left(f_G (2k)\right)^{2k}}{4k}.$$
\end{enumerate}
\end{lemma}
Following \cite{Nikiforov2016} we recall the next definition.
\begin{definition}Two graphs $G$ and $H$ are called {\bf singularly cospectral} if they have the same nonzero
singular values with the same multiplicities. We denote a pair of noncospectral singularly cospectral graphs by \textbf{NCSC}.
\end{definition}
We use ${\rm rank}(G)$ and ${\rm nullity}(G)$ to denote ${\rm rank}(A_G)$ and ${\rm nullity}(A_G)$ for a graph $G$, respectively. Let us start stating some relevant properties satisfied by singularly cospectral graphs.
\begin{lemma}\label{prop_nec}
Let $G$ and $H$ be singularly cospectral graphs. Then, the following condition holds.
\begin{enumerate}
\item $G$ and $H$ have the same number of edges.
\item ${\rm rank}(G)={\rm rank}(H),$ $\big |{\rm nullity}(G)-{\rm nullity}(H)\big|=\big ||V(G)|-|V(H)|\big |$ and $p(G)-p(H)=n(H)-n(G)$.
\item $\mathcal{E}(G)=\mathcal{E}(H)$, i.e, $G$ and $H$ are equienergetic.
\end{enumerate}
\end{lemma}
\begin{proof}
We denoted by $m_G$ and $m_H$ the number of edges of $G$ and $H$, respectively. By item (4) in Lemma \ref{propbasicas} we have that $\sqrt{2m_G}=f_G(2)=f_H(2)=\sqrt{2m_H}.$ Then, $m_G=m_H.$
Recall that if $A \in \mathbb{R}^{n\times n}$, the number of nonzero singular values of $A$ is equal to the
${\rm rank}(A)$. Then ${\rm rank}(G)={\rm rank}(H)$. The rank of any square matrix equals the number of nonzero eigenvalues (with repetitions), so the number of nonzero singular values of $A$ equals the rank of $A^TA$ (notice that $A^T=A^*$). As $A^TA$ and $A$ have the same kernel then it follows from the Rank-Nullity Theorem that $A^TA$ and $A$ have the same rank. On the other hand, from the Rank-Nullity Theorem we have
\begin{eqnarray}
\big |{\rm nullity}(G)-{\rm nullity}(H)\big|&=&\big |\left(|V(G)|-{\rm rank}(G)\right)-\left(|V(H)|-{\rm rank}(H)\right)\big |\nonumber\\
&=&\big ||V(G)|-|V(H)|\big |.\nonumber\
\end{eqnarray}
Finally, since $p(G)+n(G)={\rm rank}(G)={\rm rank}(H)=p(H)+n(H)$ then $p(G)-p(H)=n(H)-n(G).$
It follows from the fact that the singular values of a graph $G$ coincide with the absolute values of the eigenvalues of $G$.
\end{proof}
\begin{example}{\bf Coenergetic does not imply Singularly cospectral }
We present a pair of coenergetic nonsingularly cospectral graphs. For this
purpose, we consider the cartesian product and the strong product of two complete graphs. Let $r,s$ two integer number with $r>3$ and $s>3$. Let $G=K_r \boxtimes K_s$ and $H=K_r\square K_s$. Then $\mathcal{E}(G)=\mathcal{E}(H)$; i.e., $G$ and $H$ are coenergetic~\cite{BonifacioVA08} but we have that such graphs are no singularly cospectral since
$|E(G)|=\frac{rs}{2}(s+r-2)$ and $|E(H)|=\frac{rs}{2}(r-1)(s-1).$
\end{example}
In the sequel, the following lemma is useful to obtain a characterization for a pair of singularly cospectral graphs.
\begin{lemma}\label{normakn}
Let $x_1, x_2,\cdots, x_r$ and $y_1, y_2,\cdots, y_s$ be non-increasing sequences of
positive real numbers. If
\begin{equation}\label{igualdadkn}
\sum_{i=1}^r x_i^{k_n}=\sum_{j=1}^s y_j^{k_n} \qquad {\rm for\: all}\: n\in\mathbb{N},
\end{equation}
where $\{k_n\}_{n\in \mathbb{N}}$ is a sequence of real numbers such that $k_n\geq 1$ for all $n\in \mathbb{N}$ and $k_n\to \infty$,
then $r=s$ and $x_i=y_i$ for $i=1, \cdots, r.$
\end{lemma}
\begin{proof}
We denote by $x=(x_1, x_2,\cdots, x_r, 0, 0, \cdots)$ and $y=(y_1, y_2,\cdots, y_s, 0, 0, \cdots)$. We have, by \eqref{igualdadkn}, that $\|x\|_{k_n}=\|y\|_{k_n}$ for all $k_n$, then from \eqref{limitenormap} we have that
$$x_1=\|x\|_{\infty}=\lim\limits_{n\to \infty} \|x\|_{k_n}=\lim\limits_{n\to \infty} \|y\|_{k_n}=\|y\|_{\infty}=y_1.$$
By repeating this procedure we can prove that
\begin{equation}\label{igualdadi}
x_i=y_i \qquad \forall i=1, 2, \cdots, \min\{r, s\}.
\end{equation}
Now we assume that $r<s.$ Then by \eqref{igualdadkn} and \eqref{igualdadi} we obtain that $\sum_{j=r+1}^s y_j^{k_n}=0$, as $y_j$ is a positive real number for any $j=r+1, \cdots, s$ we conclude that $y_j=0$ for all $j\geq r+1.$ This shows that $r=s$ and $x_i=y_i$ for all $i=1, \cdots, r.$
\end{proof}
We can then prove the following theorem for characterizing when two graphs $G$ and $H$ are singularly cospectral graphs in terms of their functions $f_G(x)$ and $f_H(x)$.
\begin{theorem} \label{equivalencia}
The following conditions are equivalent:
\begin{enumerate}
\item $G$ and $H$ are singularly cospectral.
\item $f_G(p)=f_H(p)$ for all $p\geq 1.$
\item $f_G(x_n)=f_H(x_n)$ for any sequence $\{x_n\}_{n\in \mathbb{N}}$ such that $x_n\geq 1$ for all $n\in \mathbb{N}$ and $x_n\to \infty$.
\end{enumerate}
\end{theorem}
\begin{proof}
The implications $1\Rightarrow 2$ and $2\Rightarrow 3$ are trivial. Now, suppose that 3 holds. Then
$$
\sum_{i=1}^r s_i(G)^{x_n}=\sum_{j=1}^s s_j(H)^{x_n} \qquad {\rm for\: all}\: n\in\mathbb{N},$$
hence by Lemma \ref{normakn} we have that $G$ and $H$ are singularly cospectral.
\end{proof}
Now, we are ready to prove the main result of this section, giving an answer to Problem~\ref{Niki}.
\begin{theorem}\label{singcospectral}
Two graphs are singularly cospectral if and only if, for each $k\in \mathbb{N}$,
they have the same number of closed walks of length $2k.$
\end{theorem}
\begin{proof}
Assume that $G$ and $H$ are singularly cospectral, then $s_i(G)=s_i(H)$ for all $i=1, \cdots, {\rm rank}(G)={\rm rank}(H)$ and $f_G(2k)=f_H(2k)$ for all $k\in \mathbb{N}.$ This implies, by Lemma \ref{propbasicas}, that the number of closed walks of length $2k$ of $G$ and $H$ coincides.
Conversely, assume that $G$ and $H$ have the same number of closed walks of length $2k$ for all $k\in \mathbb{N}.$ This implies, by Lemma \ref{Nikiforov}, that $f_G(2k)=f_H(2k)$ for any $k\in \mathbb{N}$. Then by Theorem \ref{equivalencia} we conclude that $G$ and $H$ are singularly cospectral.
\end{proof}
The following example is an immediate consequence of previous Theorem. We exhibit a pair of singularly cospectral graphs. Let $G=C_{2j}$ and $F$ be the union disjoint of two copies of the cycle $C_j$ with $j\ge 3$. We use $V(F)=\{v_1, v_2, \cdots, v_{2j}\}=V(G)$ to denote the set of vertices of $F$ and $G$. For each $k\in \mathbb{N}$, we denote by $W^{G}_{2k}=\{v_{n_i}\}_{i=1,\cdots, 2k+1} $ a closed walk of lenght $2k$ with its endpoint equal to $v_{n_1}=v_{n_{2k+1}}$ in $G$. We define the following function between the closed walks of even length in $G$ ($\mathcal{F}_G$) and the closed walks of even length in $F$ ($\mathcal{F}_F$) as follows:
$$
\phi: \mathcal{F}_G\to \mathcal{F}_F, \phi(W^{G}_{2k})=W^{F}_{2k}=\{v_{m_i}\}_{i=1,\cdots, 2k+1},
$$
where $m_i = r_j(n_i)$ if $n_1\in \{1, 2, \cdots, j\}$ or $m_i = r_j(n_i) + j$ if $n_1\in \{j+1,\ldots, 2j\}$ for each $1\le i\le 2k+1$, notice that $r_j(n_i)$ represent the remainder when integer $n_i$ is divided by $j$.
Clearly this is a bijection between $\mathcal{F}_G$ and $\mathcal{F}_F$ and by Theorem \ref{singcospectral} we conclude that this pair of graphs are singularly cospectral.
Even though Theorem~\ref{singcospectral} gives a characterization that may be useful from a theoretic perspective, it does not seem to be helpful from a practical point of view when we try to construct pairs of singularly cospectral graphs, unless these graphs are well-structured as in the last example.
\section{Constructing pairs of \textbf{NCSC}}~\label{sec: construction sc noncospectral}
Recall that two graphs are called cospectral if they have the same spectrum. Obviously, cospectral graphs are singularly cospectral, but the converse may not be true. See Fig.~\ref{fig: SCNC uno conexo y el otro disconexo} for an example.
As the adjacency matix of a graph is a symmetric matrix with real entries, then it has a spectral decomposition $A=\sum_{i=1}^m \mu_i P_i$ where $\mu_1, \mu_2, \cdots, \mu_m$ are the distinct
eigenvalues of $G$ and $P_i$ represents the orthogonal projection onto the eigenspace $E_{\mu_i}$. In the next result we present how to construct a pair of singularly cospectral graphs but not cospectral from a graph using its spectral decomposition.
\begin{figure}
\caption{The graphs $G_F$ and $H_F$ are \textbf{NCSC}
\label{fig: SCNC uno conexo y el otro disconexo}
\end{figure}
\begin{theorem}\label{GFHF}
Let $F$ be a nonbipartite graph with $|V(F)|=n$ and
$n\geq 3$. Then, the bipartite graph $G_F=F\times K_2$ and $H_F=2F$ (the union disjoint of two copies of $F$) are \textbf{NCSC}.
\end{theorem}
\begin{proof}
Assume that $\sigma(F)=\{[\lambda_1]^{m_1},\cdots, [\lambda_{k}]^{m_{k}}\}$ with spectral decomposition $A_F=\sum_{i=1}^k \lambda_i P_i$. Notice that $\sigma(F)$ is not symmetric respect to zero, because $F$ is a nonbipartite graph (see Proposition~\ref{propiedades_basicas}). Then, we obtain that
\begin{eqnarray}
A_{H_F}&=&\left(
\begin{matrix}
A_F& 0 \\
\\
0 & A_F
\end{matrix}
\right)=\sum_{i=1}^k\lambda_i \underbrace{\left(
\begin{matrix}
P_i& 0 \\
\\
0 & P_i
\end{matrix}
\right)}_{\hat{P}_i}=\sum_{i=1}^k \lambda_i \hat{P}_i,
\end{eqnarray}
and
\begin{eqnarray}
A_{G_F}&=&\left(
\begin{matrix}
0& A_F \\
\\
A_F & 0
\end{matrix}
\right)=\sum_{i=1}^k\lambda_i \left(
\begin{matrix}
0& P_i \\
\\
P_i & 0
\end{matrix}
\right)\nonumber \\&=&\sum_{i=1}^k \lambda_i \left[\frac12\left(
\begin{matrix}
P_i& P_i \\
\\
P_i & P_i
\end{matrix}
\right) -\frac12 \left(
\begin{matrix}
P_i& -P_i \\
\\
-P_i & P_i
\end{matrix}
\right) \right] \nonumber\\&=&
\sum_{i=1}^k \lambda_i \underbrace{\frac12\left(
\begin{matrix}
P_i& P_i \\
\\
P_i & P_i
\end{matrix}
\right)}_{\widetilde{P}_i^+} -\sum_{i=1}^k \lambda_i \underbrace{\frac12 \left(
\begin{matrix}
P_i& -P_i \\
\\
-P_i & P_i
\end{matrix}
\right)}_{\widetilde{P}_i^-}\nonumber\\&=&\sum_{i=1}^k \lambda_i \widetilde{P}_i^+ + \sum_{i=1}^k (-\lambda_i) \widetilde{P}_i^-.
\end{eqnarray}
Then, we conclude that
$$\sigma(G_F)=\{[-\lambda_1]^{m_1},\cdots, [-\lambda_{k}]^{m_{k}}, [\lambda_1]^{m_1},\cdots, [\lambda_{k}]^{m_{k}}\}$$
and $\sigma(H_F)=\{[\lambda_1]^{2m_1},\cdots, [\lambda_{k}]^{2m_{k}}\}$. This completes the proof.
\end{proof}
\begin{remark}\label{scnocoe}
If $F=K_n$ with $n\ge 3$, then the $n-1$-regular bipartite graph $G_F=F\times K_2$ and the disconnected $n-1$-regular graph $2K_n$ are \textbf{NCSC}.
\end{remark}
In \cite{Row}, Rowlinson obtained
the relationship between the characteristic polynomials of a graph $G$ and the graph $G^*$ constructed by adding a new vertex to $G$, from its spectral decomposition. More precisely, the graph modified by the addition of a vertex with any prescribed set of neighbours on $V(G)$. In order to prove our result we need recall such statement.
\begin{theorem} [\cite{Row}, Theorem 2.1]\label{addvertex}
Let $G$ be a finite graph whose adjacency matrix $A$ has spectral decomposition $A=\sum_{i=1}^m \mu_i P_i$. Let $\emptyset \neq S \subseteq V(G)=\{1, 2, \cdots, n\}$ and let $G^*$ be the
graph obtained from G by adding one new vertex whose neighbours are the vertices
in S. Then
\begin{equation}
P_{G^*}(x)=P_{G}(x)\left( x- \sum_{i=1}^m \frac{\rho_i^2}{x-\mu_i} \right),
\end{equation}
where $\rho_i=\|\sum_{k\in S} P_i e_k\|$ and $\{e_1,\cdots, e_n\}$ is the standard orthonormal basis of $\mathbb{R}^n$.
\end{theorem}
Combining theorems~\ref{GFHF} and~\ref{addvertex}, we obtain a construction that leads to a pair of~\textbf{NCSC} graphs by properly adding a vertex to the original pair of graphs.
We will use the following notation for the below lemma. Let $F, G_F$ and $H_F$ as in Theorem \ref{GFHF}, where $V(F)=\{x_1,\ldots,x_n\}$ and $V(G_F)=V(H_F)=\{x_1,\ldots,x_n\}\cup \{y_1,\ldots,y_n\}$, where both sets correspond to the same copies of the vertices of $F$. For any $1\leq j\leq n,$ let $G_{F, j}^v$ be the graph obtained from $G_F$ by adding one new vertex $v$ whose neighbours are the vertices in $S=\{x_1,\cdots, x_j\}\cup \{y_1,\cdots, y_j\}$. Analogously, we define $H_{F, j}^w$ by adding a vertex $w$ and connecting it to the same set $S$ as $G_{F, j}^v$.
\begin{theorem}\label{polHF}
There exist $\lambda_1,\ldots,\lambda_m$, $\rho_1,\ldots,\rho_m$, $m_1,\ldots,m_m$ such that
\begin{equation*}
P_{G_{F, j}^v}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{2m_i-1}\left( x\prod_{i=1}^{m}(x-\lambda_i)- \sum_{i=1}^m \rho_i^2\prod_{j=1, j\neq i}^{m}(x-\lambda_j)\right)
\end{equation*}
and
\begin{equation*}
P_{H_{F, j}^w}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{m_i-1}\prod_{i=1}^{m}(x+\lambda_i)^{m_i}\left( x\prod_{i=1}^{m}(x-\lambda_i)- \sum_{i=1}^m \rho_i^2\prod_{j=1, j\neq i}^{m}(x-\lambda_j) \right).
\end{equation*}
In particular, $G_{F,j}^v$ and $H_{F, j}^w$ are \textbf{NCSC}.
\end{theorem}
\begin{proof}
First of all, we label the vertices of both graphs as follows: $V(G_F)=\{z_1,\cdots, z_{2n}\}$ with $z_i=x_i$ and $z_{n+i}=y_i$, for $i\in \{1, \cdots, n\}$ and $V(H_F)=\{w_1,\cdots, w_{2n}\}$ where the first $n$ vertices belong to $F$. Then we denote by $V_j=\{1,\cdots,j, n+1,\cdots, n+j\},$ $S_{j}^{G_F}=\{z_k\}_{k\in V_j}$ and $S_{j}^{H_F}=\{w_k\}_{k\in V_j}$ (e.g. Fig.~\ref{fig: added vertex}). By Theorem \ref{GFHF} and \ref{addvertex}, we have that
\begin{equation}
P_{G_{F, j}^v}(x)=P_{G_F}(x)\left( x- \sum_{i=1}^m \frac{\rho_i^2}{x-\lambda_i} \right),
\end{equation}
and
\begin{equation}
P_{H_{F, j}^w}(x)=P_{H_F}(x)\left( x- \sum_{i=1}^m \frac{\sigma_i^2}{x-\lambda_i} - \sum_{i=1}^m \frac{\tau_i^2}{x+\lambda_i}\right),
\end{equation}
where $\rho_i=\|\sum_{k\in S_{G_F,j}} \hat{P}_i e_k\|, \sigma_i=\|\sum_{k\in S_{H_F,j}} \widetilde{P}_i^+ e_k\|$ and $\tau_i=\|\sum_{k\in S_{H_F,j}} \widetilde{P}_i^- e_k\|.$ It follows that $\rho_i=\sigma_i$ and $\tau_i=0$ for any $1\leq i\leq m$. Since $P_{G_F}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{2m_i}$ and $P_{H_F}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{m_i}\prod_{i=1}^{m}(x+\lambda_i)^{m_i}$ we conclude that
\begin{equation*}
P_{G_{F, j}^v}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{2m_i-1}\left( x\prod_{i=1}^{m}(x-\lambda_i)- \sum_{i=1}^m \rho_i^2\prod_{j=1, j\neq i}^{m}(x-\lambda_j)\right)
\end{equation*}
and
\begin{equation*}
P_{H_{F, j}^w}(x)=\prod_{i=1}^{m}(x-\lambda_i)^{m_i-1}\prod_{i=1}^{m}(x+\lambda_i)^{m_i}\left( x\prod_{i=1}^{m}(x-\lambda_i)- \sum_{i=1}^m \rho_i^2\prod_{j=1, j\neq i}^{m}(x-\lambda_j) \right).
\end{equation*}
\end{proof}
\begin{figure}
\caption{In this example $F=K_5$, the vertices of the graphs $G_F$ and $H_F$ are labeled from $1$ to $10$ consecutive in each connected component of $G_F$ and each set of the bipartition of $H_F$ respectively, and $S=\{3,4,5,8,9,10\}
\label{fig: added vertex}
\end{figure}
\begin{corollary}\label{gnj}
It holds that
$$
\sigma(G_{K_n, j}^v)=\{[-(n-1)]^{1}, [-1]^{n-2}, [1]^{n-1}, \lambda_1, \lambda_2, \lambda_3\}
$$
and
$$
\sigma(H_{K_n, j}^w)=\{[-1]^{2n-3}, [n-1]^{1}, \lambda_1, \lambda_2, \lambda_3\},
$$
with $\lambda_1 >0, \lambda_3<0$ and
$$
\lambda_2= \left\{ \begin{array}{lcc}
>0 & {\rm if} & 1\leq j\leq n-2 \\
0 & {\rm if} & j=n-1 \\
<0 & {\rm if} & j=n
\end{array}
\right.
$$
In particular, $G_{K_n,j}^v$ and $H_{K_n, j}^w$ are \textbf{NCSC}.
\end{corollary}
\begin{proof}
Let us suppose that the eigenvalues of $G_{K_n}$ and $H_{K_n}$ are denoted by $\{\mu_1,\mu_2, \mu_3, \mu_4\}$ and $\{\nu_1, \mu_2\}$, respectively, in increasing order and
\begin{equation}
A_{G_{K_n}}=\sum_{r=1}^4 \mu_r P_r^{G_{K_n}}\qquad {\rm and} \qquad A_{H_{K_n}}=\sum_{s=1}^2 \nu_s P_s^{H_{K_n}},
\end{equation}
where $P_r^{G_{K_n}}$ and $P_s^{H_{K_n}}$ represent the orthogonal projection onto the eigenspace $E_{\mu_r}$ and $E_{\nu_s}$, respectively.
For sake of simplicity, we denote by $G_{K_n, j}$ and $H_{K_n, j}$ the graphs omitting in the notation the added vertex in each one of them. From Theorem \ref{polHF}, for each $j\in \{1, 2, \cdots, n\}$ we obtain the characteristic polynomials of $G_{K_n,j}$ and $H_{K_n,j}$, respectively:
\begin{equation}\label{polcarmod}
\begin{split}
P_{G_{K_n,j}}(x)=P_{G_{K_n}}(x)\left( x- \sum_{r=1}^4 \frac{\rho_r^2}{x-\mu_r} \right), \\ P_{H_{K_n,j}}(x)=P_{H_{K_n}}(x)\left( x- \sum_{s=1}^2 \frac{\sigma_s^2}{x-\nu_s} \right),
\end{split}
\end{equation}
where $\rho_r=\|\sum_{k\in S_j^{G_{K_n}}}P_r^{G_{K_n}}e_k\|$, $\sigma_s=\|\sum_{k\in S_j^{H_{K_n}}}P_s^{H_{K_n}}e_k\|$ and $S_j^{G_{K_n}}=S_j^{H_{K_n}}=\{1,\cdots, j, n+1, \cdots, n+j\}.$
For the sake of clarity, given a matrix $B\in \mathbb{R}^{n \times n}$ and $k\in \{1, \cdots, n\}$ we denote by $c_k(B)\in \mathbb{R}^n$ the $k^{\rm th}$ column of $B$. Then,
\begin{enumerate}
\item $\rho_1=\|\sum_{k\in S_j^{G_{K_n}}}P_1^{G_{K_n}}e_k\|=\|\sum_{k\in S_j^{G_{K_n}}}c_k(P_1^{G_{K_n}})\|=0.$
\item $\rho_2^2=\|\sum_{k\in S_j^{G_{K_n}}}P_2^{G_{K_n}}e_k\|^2=\|\sum_{k\in S_j^{G_{K_n}}}c_k(P_2^{G_{K_n}})\|^2=\frac{2j(n-j)}{n}.$
\item $\rho_3=\|\sum_{k\in S_j^{G_{K_n}}}P_3^{G_{K_n}}e_k\|=\|\sum_{k\in S_j^{G_{K_n}}}c_k(P_3^{G_{K_n}})\|=0.$
\item $\rho_4=\|\sum_{k\in S_j^{G_{K_n}}}P_4^{G_{K_n}}e_k\|=\|\sum_{k\in S_j^{G_{K_n}}}c_k(P_4^{G_{K_n}})\|=\frac{j}{n}\| c_1(J_{2n})\|=\frac{j}{n}(2n)^{1/2}.$
\item $\sigma_1^2=\|\sum_{k\in S_j^{H_{K_n}}}P_1^{H_{K_n}}e_k\|^2=\|\sum_{k\in S_j^{H_{K_n}}}c_k(P_1^{H_{K_n}})\|^2=\frac{2j(n-j)}{n}.$
\item $\sigma_2=\|\sum_{k\in S_j^{H_{K_n}}}P_2^{H_{K_n}}e_k\|=\|\sum_{k\in S_j^{H_{K_n}}}c_k(P_2^{H_{K_n}})\|=\frac{j}{n}\| c_1(J_{2n})\|=\frac{j}{n}(2n)^{1/2}.$
\end{enumerate}
Now we have to consider three possible cases.
\begin{enumerate}
\item {\bf Case 1:} $j=n.$
Then from \eqref{polcarmod} we can assert that:
\begin{eqnarray}
P_{G_{K_n,j}}(x)&=&P_{G_{K_n}}(x)\left( x- \frac{2n}{x-(n-1)} \right)\nonumber\\
&=&\frac{P_{G_{K_n}}(x)}{(x-(n-1))}(x^2-(n-1)x-2n),
\end{eqnarray}
and
\begin{eqnarray}
P_{H_{K_n,j}}(x)=\frac{P_{H_{K_n}}(x)}{(x-(n-1))}(x^2-(n-1)x-2n).
\end{eqnarray}
Let $Q_1(x)=(x^2-(n-1)x-2n)$ and $\lambda_1, \lambda_3$ its roots, it is easily seen that $\lambda_1\lambda_3<0$. Finally, letting $\lambda_2=-1<0$ we conclude this part of the proof.
\item {\bf Case 2:} $j=n-1.$
In this case from \eqref{polcarmod} we obtain that the characteristic polynomials have the following expressions:
\begin{eqnarray}
P_{G_{K_n,j}}(x)&=&\frac{P_{G_{K_n}}(x)}{(x-(n-1))(x+1)}x\left(nx^2+n(2-n) x- (3n^2-7n+4) \right)\nonumber\,
\end{eqnarray}
and
\begin{eqnarray}
P_{H_{K_n,j}}(x)&=&\frac{P_{H_{K_n}}(x)}{(x-(n-1))(x+1)}x\left(nx^2+n(2-n) x- (3n^2-7n+4) \right).\nonumber\
\end{eqnarray}
Let $Q_2(x)=x\left(nx^2+n(2-n) x- (3n^2-7n+4) \right)$ and $\lambda_1, \lambda_2, \lambda_3$ its roots with $\lambda_2=0$. It is easily seen that $\lambda_1\lambda_3<0$ and this concludes the proof in this case.
\item {\bf Case 3:} $1\leq j \leq n-2.$
We have that
\begin{eqnarray}
P_{G_{K_n,j}}(x)&=&\frac{P_{G_{K_n}}(x)}{(x-(n-1))(x+1)}Q_3(x),\nonumber\
\end{eqnarray}
and
\begin{eqnarray}
P_{H_{K_n,j}}(x)&=&\frac{P_{H_{K_n}}(x)}{(x-(n-1))(x+1)}Q_3(x), \nonumber\
\end{eqnarray}
where $Q_3(x)=x^3+(2-n) x^2- (n-1+2j)x+2j((n-1)-j).$
We denote by $\lambda_1, \lambda_2, \lambda_3$ the real roots of $Q_3$ such that $\lambda_3\leq \lambda_2\leq \lambda_1.$ We know that $\lambda_3<0$, since $\lim\limits_{x\to -\infty} Q_3(x)=-\infty$ and $Q_3(0)=2j((n-1)-j)>0.$ On the other hand, by Descartes' rule of signs, we conclude that $\lambda_2$ and $\lambda_1$ are positive real numbers and this finishes the proof.
\end{enumerate}
\end{proof}
\begin{remark}
From the previous proof we obtain that
\begin{equation}\label{polcargnj}
P_{G_{K_n,j}}(x)=(x-1)^{n-1}(x+1)^{n-2}(x+(n-1))Q_{n, j}(x)
\end{equation}
and
\begin{equation}\label{polcarhnj}
P_{H_{K_n,j}}(x)=(x-1)^{2n-3}(x-(n-1))Q_{n, j}(x),
\end{equation}
with $Q_{n, j}(x)=x(x+1)(x-(n-1))-2j(x-(n-1-j)).$
\end{remark}
Next, we present a generalization of the previous construction via coalescence operation between two graphs. If $G$ and $H$ are two graphs, $g\in V(G)$ and $h\in V(H)$, the \emph{coalescence} between $G$ and $H$ at $g$ and $h$, denoted $G\cdot H (g,h:v_{g,h})$ or $G\cdot_{g}^h H$, is the graph obtained from $G$ and $H$, by identifying vertices $g$ and $h$ (see Fig.~\ref{fig: coalescence}). We use $G\cdot H$ for short.
\begin{figure}
\caption{The coalescence of graphs $G$ and $H$ at vertices $g$ and $h$.}
\label{fig: coalescence}
\end{figure}
In the 70s Schwenk published an article containing useful formulas for the characteristic polynomial of a graph~\cite{Schwenk1974}. The main result of this research is based on the following Schwenk's formula, linking the characteristic polynomial of two graphs and the coalescence between them. More details on next result can be found in~\cite{CRS-1997}.
\begin{lemma}\cite{Schwenk1974}\label{lem: Schwenk}
Let $G$ and $H$ be two graphs. If $g\in V(G)$, $h\in V(H)$, and $F=G\cdot H$, then
\[P_F(x)=P_G(x)P_{H-h}(x)+P_{G-g}(x)P_H(x)-xP_{G-g}(x)P_{H-h}(x).\]
\end{lemma}
\begin{remark}
Combining Proposition \ref{scnocoe} and Theorem \ref{addvertex}, we obtain a pair of graphs singulary cospectral but not (almost) cospectral via coalescence. More precisely, let $G_{K_n, j}^v, H_{K_n, j}^w$ with $n\geq 3$ and $1\leq j\leq n$ as above. For each $k\geq 1$, the graph $G_{K_n, j, k}^v$ is defined inductively as follows: $ G_{K_n, j, 1}^{v_1}:=G_{K_n, j}^{v}$ and $G_{K_n, j, k}^{v_{k}}= G_{K_n, j, k-1}^{v_{k-1}}\cdot G_{K_n, j}^{\widetilde{v}_{k-1}}(v_{k-1}, \widetilde{v}_{k-1}:v_k)$ for $k\geq 2.$ Analogously, we consider the graphs $H_{K_n, j, 1}^{w_1}:=H_{K_n, j}^{w}$ and $H_{K_n, j, k}^{w_k}= H_{K_n, j, k-1}^{w_{k-1}}\cdot H_{K_n, j}^{\widetilde{w}_{k-1}} (w_{k-1}, \widetilde{w}_{k-1}:w_k).$ Then $G_{K_n,j, k}^{v_k}$ and $H_{K_n, j, k}^{w_k}$ are \textbf{NCSC}.
\end{remark}
In Remark~\ref{scnocoe} we present families of pairs of regular graphs which are singularly cospectral. To finish the section we will show that if we have a pair of singularly cospectral graphs, on the same number of vertices, and one of them is regular, then the other one is also regular.
\begin{proposition}\label{igual_max_autovalor}
Let $G, H$ be two graphs such that $G$ is a $d$-regular graph, $\lambda_1(G)=\lambda_1(H)$ and $d=\frac{2|E(H)|}{|V(H)|}$ then
$H$ is a $d$-regular.
\end{proposition}
\begin{proof}
We denote by $n=|V(G)|$ and $m=|E(G)|.$ As $G$ is a regular graph, by Proposition \ref{propiedades_basicas} we have that $$d=\frac{2m}{n}=\lambda_1(G)=\lambda_1(H).$$
Then, by Proposition \ref{propiedades_basicas} we can conclude that $H$ is a $d$-regular graph.
\end{proof}
\begin{corollary}
Let $G, H$ be two graphs with the same numbers of vertices, such that $G$ is a $d$-regular graph and $H$ a non-regular one. Then $G$ cannot be singularly cospectral with $H$.
\end{corollary}
\begin{proof}
As in the previous stamenent we denote by $n=|V(G)|=|V(H)|$ and $m=|E(G)|.$ Suppose that $G$ and $H$ are singularly cospectral, then from Proposition \ref{prop_nec} and \ref{propiedades_basicas} we have $m=|E(H)|$ and
$$d=\frac{2|E(H)|}{|V(H)|}=\lambda_1(G)=s_1(G)=s_1(H)=\lambda_1(H),$$
thus $H$ is a $d$-regular graph as consequence of Proposition \ref{igual_max_autovalor}. But this contradict our hypothesis and conclude the proof.
\end{proof}
\section{Families of graphs where singularly cospectral implies almost cospectral}~\label{sec: c implies sc}
Since for two graphs to be singularly coespectral is not necessary they to have the same number of vertices, the notion of almost cospectral is nearer to singularly cospectral than cospectral. Two graphs are {\em almost cospectral} if their nonzero eigenvalues (and their multiplicities) coincide. The connected components of $P_3\times P_3$, $C_4$ and $K_{1,4}$ are almost cospectral, see \cite[Theorem 3.16]{CvetkovicDDGT88}. For more details and results in connection with almost cospectral graphs we referred to the reader to~\cite{BeinekeLW04} and the references therein. Notice that if two almost cospectral graphs having the same number of vertices, then they are cospectral. Hence, in Section~\ref{sec: construction sc noncospectral}, we have presented constructions of singularly cospectral graphs which are not almost cospectral. This section is devoted to present families of graphs where the notion of singularly cospectral and cospectral are equivalent, namely, bipartite graphs (Theorem~\ref{scbipartito}), connected graphs having maximum singular value with multiplicity at least two (Theorem~\ref{mayoroiguala2}), and connected graphs having the same inertia (Theorem~\ref{thm: inertia}).
\begin{theorem}\label{scbipartito}
Let $G, H$ be two bipartite graphs and singularly cospectral then they are almost cospectral.
\end{theorem}
\begin{proof}
Let $\lambda_1\geq \lambda_2\geq \cdots\geq\lambda_n$ and $\mu_1\geq \mu_2\geq \cdots\geq \mu_m$ be the non-zero eigenvalues of $G$ and $H$,
respectively. As $G$ and $H$ are singularly cospectral then $n=m$ and the spectrum of a bipartite graph is symmetric about $0$ then $n$ is even. It follows that, for any $1\leq i\leq \frac{n}{2}$, it holds
$$\lambda_i=-\lambda_{n-i}\qquad {\rm and} \qquad \mu_i=-\mu_{n-i}.$$ Since the singular values of a symmetric matrix are the
absolute values of its nonzero eigenvalues, then
$$\lambda_1, \lambda_1, \lambda_2, \lambda_2, \cdots, \lambda_{\frac{n}{2}}, \lambda_{\frac{n}{2}}
$$
and
$$\mu_1, \mu_1, \mu_2, \mu_2, \cdots, \mu_{\frac{n}{2}}, \mu_{\frac{n}{2}},
$$
are the singular values of $G$ and $H$, respectively. This shows that the graphs are almost cospectral.
\end{proof}
\begin{theorem}\label{mayoroiguala2}
Let $G, H$ be two connected and singularly cospectral graphs such that its largest singular value has multiplicity greater or equal to 2. Then, $G$ and $H$ are almost cospectral
\end{theorem}
\begin{proof}
Let $s_1> s_2>\cdots> s_k$ be the non-zero singular values of $G$ with multiplicity $m_1,m_2,\cdots, m_k$, respectively. If we denote by $\lambda_1(G)$ and $\mu_1(H)$ the largest eigenvalue of $G$ and $H$, respectively, then $s_1=\lambda_1(G)=\mu_1(H)$. On the other hand, by the connectivity of both graphs such eigenvalues are simple. As by hypothesis, $s_1$ has multiplicity greater or equal to 2, then the smallest eigenvalue of $G$ and $H$ is equal to $-\lambda_1(G).$ So by Proposition \ref{propiedades_basicas} and Theorem \ref{scbipartito}, we conclude that both graphs are almost cospectral.
\end{proof}
\begin{remark}
The connectivity condition can not be dropped as hypothesis from~Theorem~\ref{mayoroiguala2}. See Fig.~\ref{fig: connectivity theorem 5.2} for an example of two graphs, one of them disconnected which are singularly cospectral but not almost cospectral.
\end{remark}
\begin{figure}
\caption{
$
\sigma(H_1)=\{[-1]^6, [3]^2\}
\label{fig: connectivity theorem 5.2}
\end{figure}
\begin{proposition}\label{inertia_cond}
Let $G, H$ be two connected and singularly cospectral graphs with exactly three different singular values, without considering their multiplicities, such that $n(G)=n(H)$ or $p(G)=p(H).$ Then they are almost cospectral.
\end{proposition}
\begin{proof}
Let $s_1> s_2> s_3$ be the non-zero singular values of $G$ with multiplicity $m_1,m_2, m_3$, respectively. If we denote by $\lambda_1(G)$ and $\mu_1(H)$ the largest eigenvalue of $G$ and $H$, respectively, then $s_1=\lambda_1(G)=\mu_1(H)$. On the other hand, by the connectivity of both graphs such eigenvalues are simple.
The non-zero eigenvalues of $G$ and $H$ are include in the set
$$
\{\pm s_1, \pm s_2, \pm s_3\}.
$$
Even more, for any $i\in \{1, 2, 3\}$, the non-zero eigenvalues of $G$ are exactly $s_i$ and $-s_i$ with multiplicity $m_i^+$ and $m_i^-$ respectively. Analogously, the non-zero eigenvalues of $H$ are $s_i$ and $-s_i$ with multiplicity $\hat{m}_i^+$ and $\hat{m}_i^-$, respectively. Obviously, it holds that
$m_i^++m_i^-=m_i=\hat{m}_i^++\hat{m}_i^-$ with
$m_i^+, m_i^-, \hat{m}_i^+, \hat{m}_i^- \geq 0.$
This gives two cases to consider:
\begin{enumerate}
\item {\bf Case 1:} $m_1\geq 2$.\\
From Proposition \ref{mayoroiguala2} we have that $G$ and $H$ are almost cospectral.
\item {\bf Case 2:} $m_1=1.$
As $m_1=1$ thus $m_i^+=\hat{m}_i^+=1$ and $m_i^-=\hat{m}_i^-=0.$ Using the well-known fact that the sum of all eigenvalues of a graph is always zero, we have the following equalities
\begin{equation}\label{sistema_s}
\left\{ \begin{array}{lcc}
s_1+(m_2^+-m_2^-)s_2+(m_3^+-m_3^-)s_3=0. \\
\\
s_1+(\hat{m}_2^+-\hat{m}_2^-)s_2+(\hat{m}_3^+-\hat{m}_3^-)s_3=0.
\end{array}
\right.
\end{equation}
From the hypothesis about the positive or negative inertia one of the following identity holds
\begin{equation}\label{positivo}
1+m_2^++m_3^+=1+\hat{m}_2^++\hat{m}_3^+,
\end{equation}
or
\begin{equation}\label{negativo}
m_2^-+m_3^-=\hat{m}_2^-+\hat{m}_3^-.
\end{equation}
From now on, without loss of generality, we assume that $p(G)=p(H)$. Then, by \eqref{sistema_s} and \eqref{positivo}, we have that
$$
(m_2^+-\hat{m}_2^+)(s_2-s_3)=0.
$$
As $s_2-s_3>0$, then we conclude that $m_2^+-\hat{m}_2^+=0=m_3^+-\hat{m}_3^+$. This shows that $G$ and $H$ are almost cospectal graphs and it concludes the proof.
\end{enumerate}
\end{proof}
\begin{remark}
The condition about the negative or positive inertia cannot be dropped from Proposition~\ref{inertia_cond}, as can
be seen in Fig.~\ref{fig: connectivity theorem 5.3}.
\end{remark}
\begin{figure}
\caption{$
\sigma(H_1)=\{[-2]^{1}
\label{fig: connectivity theorem 5.3}
\end{figure}
As a consequence of Proposition~\ref{inertia_cond} the following result holds.
\begin{theorem}\label{thm: inertia}
Let $G, H$ be two connected and singularly cospectral graphs with exactly three different singular values, without count its multiplicities, such that $In(G)=In(H)$. Then, $G$ and $H$ are almost cospectral.
\end{theorem}
In the above Theorem almost cospectral can be replaced by cospectral because $|V(G)|=|V(H)|$.
\end{document} |
\begin{document}
\title{Minimal invariant varieties and first integrals \ for algebraic foliations}
\begin{center} { \small
Mathematisches Institut, Universit\"at Basel\\
Rheinsprung 21, Basel 4051, Switzerland\\
e-mail: [email protected]}
\end{center}
\begin{abstract}
Let $X$ be an irreducible algebraic variety over $\mathbb C$, endowed with an algebraic foliation ${\cal{F}}$.
In this paper, we introduce the notion of minimal invariant variety $V({\cal{F}},Y)$ with respect to $({\cal{F}},Y)$, where $Y$
is a subvariety of $X$. If $Y=\{x\}$ is a smooth point where the foliation is regular, its minimal invariant
variety is simply the Zariski closure of the leaf passing through $x$. First we prove that for very generic
$x$, the varieties $V({\cal{F}},x)$ have the same dimension $p$. Second we generalize a result due to X.Gomez-Mont (see \cite{G-M}). More
precisely, we prove the existence of a dominant rational map $F:X\rightarrow Z$, where $Z$ has dimension $(n-p)$, such that
for very generic $x$, the Zariski closure of $F^{-1}(F(x))$ is one and only one minimal invariant variety of a point.
We end up with an example illustrating both results.
\end{abstract}
\section{Introduction}
Let $X$ be an affine irreducible variety over $\mathbb C$, and ${\cal{O}}_{X}$ its ring of regular functions. Let
${\cal{F}}$ be an algebraic foliation, i.e. a collection of algebraic vector fields on $X$ stable by Lie
bracket. We consider the elements of ${\cal{F}}$ as $\mathbb C$-derivations on the ring ${\cal{O}}_{X}$. In this paper,
we are going to extend the notion of algebraic solution for ${\cal{F}}$: this will be the minimal tangent
varieties for ${\cal{F}}$. We will study some of their properties and relate them to the existence of rational
first integrals for ${\cal{F}}$.
Recall that a subvariety $Y$ of $X$ is an algebraic solution of ${\cal{F}}$ if $Y$ is the closure (for the metric topology)
of a leaf of ${\cal{F}}$. A non-constant rational function $f$ on $X$ is a first integral if $ \partial(f)=0$ for any $ \partial$ in ${\cal{F}}$. Since the works
of Darboux, the existence of such varieties has been extensively
studied in the case of codimension 1 foliations (see \cite{Jou},\cite{Gh},\cite{Bru}). In particular,
from these works, we know that only two cases may occur for codimension 1 foliations:
\begin{itemize}
\item{${\cal{F}}$ has finitely many algebraic solutions,}
\item{${\cal{F}}$ has infinitely many algebraic solutions, and a rational first integral.}
\end{itemize}
So rational first integrals appear if and only if all leaves of ${\cal{F}}$ are algebraic solutions. In this case, the fibres of any
rational first integral is a finite union of closures of leaves. This fact has been generalised by Gomez-Mont
(see \cite{G-M}) in the following way.
\begin{thh}
Let $X$ be a projective variety and ${\cal{F}}$ an algebraic foliation on $X$ such that all leaves are quasi-projective.
Then there exists a rational map $F:X\to Y$ such that, for every generic point $y$ of $Y$, the Zariski closure
of $F^{-1}(y)$ is the closure of a leaf of ${\cal{F}}$.
\end{thh}
We would like to find a version of this result that does not need all leaves to be algebraic. To that purpose, we need to give a correct
definition to the algebraic object closest to a leaf. A good candidate would be the Zariski closure of a leaf,
but this choice may rise difficulties due to the singularities of both $X$ and ${\cal{F}}$. We counterpass this problem by the following
algebraic approach.
Let $Y$ be an algebraic subvariety of $X$ and $I_Y$ the ideal of vanishing functions on $Y$. Let ${\cal{J}}$ be the set of ideals
$I$ in ${\cal{O}}_{X}$ satisfying the two conditions:
$$
(i) \quad (0) \; \subseteq I \; \subseteq I_Y \quad \quad {\rm{and}} \quad \quad (ii) \quad \forall \; \partial \; \in \; {\cal{F}}, \quad \partial(I)\; \subseteq\; I
$$
Since $(0)$ belongs to ${\cal{J}}$, ${\cal{J}}$ is non-empty and it is partially ordered by the inclusion. Since
it is obviously inductive, ${\cal{J}}$ admits a maximal element $I$. If $J$ is any other ideal of ${\cal{J}}$,
then $I+J$ enjoys the conditions $(i)$ and $(ii)$, hence it belongs to ${\cal{J}}$. By maximality, we have
$I=I+J$ and $J$ is contained in $I$. Therefore $I$ is the unique maximal element of ${\cal{J}}$, which
we denote by $I({\cal{F}},Y)$.
\begin{df}
The minimal invariant variety $V({\cal{F}},Y)$ is the zero set of $I({\cal{F}},Y)$ in $X$.
\end{df}
From a geometric viewpoint, $V({\cal{F}},Y)$ can be seen as the smallest subvariety containing $Y$ and invariant by the flows of
all elements of ${\cal{F}}$. In particular, if $x$ is a smooth point of $X$ where the foliation is regular, then $V({\cal{F}},x)$ is
the Zariski closure of the leaf passing through $x$. In section \ref{order}, we show that $V({\cal{F}},Y)$ is irreducible if $Y$
is itself irreducible. \\
In this paper, we would like to study the behaviour of these invariant varieties, and relate it to the
existence of first integrals. We analyze some properties of the function:
$$
n_{{\cal{F}}} : X \longrightarrow \mathbb{N}, \quad x \longmapsto dim\; V({\cal{F}},x)
$$
Let ${\cal{M}}$ be the $\sigma$-algebra generated by the Zariski topology on $X$. A function $f: X\rightarrow
\mathbb{N}$ is {\em measurable for the Zariski topology} if $f^{-1}(p)$ belongs to ${\cal{M}}$ for any $p$.
The space ${\cal{M}}$
contains in particular all countable intersections $\theta$ of Zariski open sets. A property ${\cal{P}}$ holds for {\em
every very generic point $x$ in $X$} if ${\cal{P}}(x)$ is true for any point $x$ in such an intersection $\theta$.
\begin{thh} \label{mesure}
Let $X$ be an affine irreductible variety over $\mathbb C$ and ${\cal{F}}$ an algebraic foliation on $X$. Then the function $n_{{\cal{F}}}$ is
measurable for the Zariski topology. Moreover there exists
an integer $p$ such that $(1)$
$n_{{\cal{F}}}(x)\leq p$ for any point $x$ in $X$ and $(2)$ $n_{{\cal{F}}}(x)=p$ for any very generic point $x$ in $X$.
\end{thh}
Set $p=max\; dim \; V({\cal{F}},x)$ and note that $p$ is achieved for every generic point of $X$. In the last section, we will
produce an example of a foliation on $\mathbb C^4$ where the function $n_{{\cal{F}}}$ is measurable but not constructible for the Zariski
topology. In this sense, theorem \ref{mesure} is the best result one can expect for any algebraic foliation.
Let $K_{{\cal{F}}}$ be the
field generated by $\mathbb C$ and the rational first integrals of ${\cal{F}}$. By construction,
the invariant varieties $V({\cal{F}},x)$ are defined set-theoretically, and they seem to appear randomly, i.e.
with no link within each other. In fact there does exist some order among them, and we are going to
see that they are "mostly" given as the fibres of a rational map. More precisely:
\begin{thh} \label{fibration}
Let $X$ be an affine irreducible variety over $\mathbb C$ of dimension $n$ and ${\cal{F}}$ an algebraic foliation on $X$.
Then there exists a dominant rational map $F: X\rightarrow Y$, where $Y$ is irreducible of dimension $(n-p)$,
such that for every very generic point $x$ of $X$, the Zariski closure of $F^{-1}(F(x))$ is equal to
$V({\cal{F}},x)$. In particular,
the transcendence degree of $K_{{\cal{F}}}$ over $\mathbb C$ is equal to $(n-p)$.
\end{thh}
The idea of the proof is to construct enough rational first integrals. These will
be the coordinate functions of the rational map $F$ given above. The construction
consists in choosing a codimension $d$ irreducible variety $H$ in $X$. We show
there exists an integer $r>0$ such that, for every very generic point $x$ of $X$,
$V({\cal{F}},x)$ intersects $H$ in $r$ distinct
points $y_1,...,y_r$.
We then obtain a correspondence:
$$
{\cal{H}}: x\longmapsto \{y_1,...,y_r\}
$$
We can modify ${\cal{H}}$ so as to get a rational map $F$ that represents every $r$-uple $\{y_1,...,y_r\}$
by a single point. Since the image of $x$ only depends on the intersection of $V({\cal{F}},x)$ with $H$, the
map $F$ will be invariant with respect to the elements of ${\cal{F}}$. \\
One question may arise after these two results. Does there exist an effective way of computing these
minimal invariant varieties and detect the presence of rational first integrals? For instance,
we may attempt to use the description of the ideals $I({\cal{F}},Y)$ given by lemma \ref{autre}. Unfortunately we cannot hope
to compute them in a finite number of steps bounded, for instance, by the degrees of the components
of the vector fields of ${\cal{F}}$. Indeed, consider the well-known derivation $ \partial$ on $\mathbb C^2$:
$$
\partial=px\frac{ \partial}{ \partial x} + qy\frac{ \partial}{ \partial y}
$$
For any couple of non-zero coprime integers $(p,q)$, this derivation will have $f(x,y)=x^qy^{-p}$ as a rational first integral,
and we cannot find another one of smaller degree. The minimal invariant varieties of points will be given in general by the fibres
of $f$. Therefore we cannot bound the degree of the generators of $I({\cal{F}},x)$ solely by the degree of $ \partial$.
However, we may find them by an inductive process. For one derivation, an approach is given in the paper of J.V.Pereira
via the notion of extatic curves (see \cite{Pe}). The idea is to compute a series of Wronskians attached to the
derivation. Then one of them vanishes identically if and only the derivation has a rational first integral.
\\
Last thing to say is that the previous results carry over all algebraic irreducible varieties. Given an algebraic
variety $X$ with an algebraic foliation, we choose a covering of $X$ by open affine sets $U_i$ and work
on the $U_i$. For any algebraic subvariety $Y$ of $X$, we define the minimal invariant variety $V({\cal{F}},Y)$ by gluing
together the Zariski closure of the varieties $V({\cal{F}},Y\cap U_i)$ in $X$.
\section{The contact order with respect to ${\cal{F}}$} \label{order}
In this section, we are going to show that the minimal invariant variety $V({\cal{F}},Y)$ is irreducible if $Y$ is
irreducible. This result is already known when ${\cal{F}}$ consists of one derivation (see \cite{Ka}). We
could reproduce the proof given in \cite{Ka} for any set of derivations, but we prefer to adopt another
strategy. We will instead introduce a notion of contact order with respect to ${\cal{F}}$, and we will use it to
show that $I({\cal{F}},Y)$ is prime if $I_Y$ is prime. Denote by $M_{{\cal{F}}}$ the ${\cal{O}}_{X}$-module spanned by the
elements of ${\cal{F}}$. We start by giving the following characterisation of $I({\cal{F}},Y)$.
\begin{lem} \label{autre}
$\displaystyle I({\cal{F}},Y)=\left\{ f \in I_Y, \; \forall \partial_1,..., \partial_k \in M_{{\cal{F}}}, \; \partial_1\circ ...\circ
\partial_k(f)\in I_Y \right\}$
\end{lem}
{\em Proof: } Let $f$ be an element of $I_Y$ such that $ \partial_1\circ ...\circ \partial_k(f)$ belongs to $I_Y$ for any
$ \partial_1,..., \partial_k$ in $M_{{\cal{F}}}$. Then $ \partial_1\circ ...\circ \partial_k(f)$ belongs to $I_Y$ for any elements
$ \partial_1,..., \partial_k$ of ${\cal{F}}$. Let $I$ be the ideal generated by $f$ and all the elements of the form
$ \partial_1\circ ...\circ \partial_k(f)$, where every $ \partial_i$ lies in ${\cal{F}}$. By construction, this ideal is
contained in $I_Y$, and is stable
by every derivation of ${\cal{F}}$. Therefore $I$ is contained in $I({\cal{F}},Y)$, and a fortiori $f$ belongs
to $I({\cal{F}},Y)$. We then have the inclusion:
$$
\left\{ f \in I_Y, \; \forall \partial_1,..., \partial_k \in M_{{\cal{F}}}, \; \partial_1\circ ...\circ \partial_k(f)\in I_Y \right\}\subseteq
I({\cal{F}},Y)
$$
Conversely let $f$ be an element of $I({\cal{F}},Y)$. Since $I({\cal{F}},Y)$ is contained in $I_Y$ and is stable by
every derivation of ${\cal{F}}$, $ \partial_1\circ ...\circ \partial_k(f)$ belongs to $I_Y$ for any elements
$ \partial_1,..., \partial_k$ of ${\cal{F}}$. Since $M_{{\cal{F}}}$ is spanned by ${\cal{F}}$, $ \partial_1\circ ...\circ \partial_k(f)$
belongs to $I_Y$ for any $ \partial_1,..., \partial_k$ in $M_{{\cal{F}}}$
\begin{flushright} $\blacksquare$\end{flushright}
Since the space
of $\mathbb C$-derivations on ${\cal{O}}_{X}$ is an ${\cal{O}}_{X}$-module of finite type and ${\cal{O}}_{X}$ is noetherian,
$M_{{\cal{F}}}$ is finitely generated as an ${\cal{O}}_{X}$-module.
Let $\{ \partial_1,..., \partial_r\}$ be a system of generators of $M_{{\cal{F}}}$. If $I=(i_1,...,i_n)$ belongs to $\{1,...,r\}^n$,
we set $ \partial_I = \partial_{i_1}\circ ...\circ \partial_{i_n}$ and $|I|=n$. By convention $\{1,...,r\}^0=\{\emptyset\}$,
$|\emptyset|=0$ and
$ \partial_{\emptyset}$ is the identity on ${\cal{O}}_{X}$. We introduce the following map:
$$
ord_{{\cal{F}},Y}: {\cal{O}}_{X} \longrightarrow \mathbb{N} \cup \{+\infty\}, \quad f \longmapsto \inf\left\{|I|, \; \partial_I(f)\not\in
I_Y\right\}
$$
\begin{df}
The map $ord_{{\cal{F}},Y}$ is the contact order with respect to $({\cal{F}},Y)$.
\end{df}
By lemma \ref{autre}, $f$ belongs to $I({\cal{F}},Y)$ if and only if $ord_{{\cal{F}},Y}(f)=+\infty$, and $f$ does not belong to
$I_Y$ if and only if $ord_{{\cal{F}},Y}(f)=0$. A priori, the map $ord_{{\cal{F}},Y}$
depends on the set of generators chosen for $M_{{\cal{F}}}$. We are going to see that it only depends on ${\cal{F}}$. Let
$\{d_1,...,d_s\}$ be another set of generators for $M_{{\cal{F}}}$, and define in an analogous way the map $ord_{{\cal{F}},Y} '$
corresponding to this set. By assumption there exist some elements $a_{i,j}$ of ${\cal{O}}_{X}$ such that:
$$
\partial_i= \sum_{j=1} ^s a_{i,j} d_j
$$
By Leibniz rule, it is easy to check via an induction on $|I|$ that there exist some elements $a_{I,J}$ in ${\cal{O}}_{X}$ such
that:
$$
\partial_I = \sum_{|J|\leq |I|} a_{I,J} d_J
$$
Let $f$ be an element of ${\cal{O}}_{X}$ such that $ord_{{\cal{F}},Y}(f)=n$. Then there exists an index $I$ of length $n$ such that:
$$
\partial_I(f) = \sum_{|J|\leq n} a_{I,J} d_J(f) \; \not\in \; I_Y
$$
Since $I_Y$ is an ideal, this means there exists an index $J$ of length $\leq n$ such that $d_J(f)$ does not belong
to $I_Y$. By definition we get that $ord_{{\cal{F}},Y} '(f)\leq n=ord_{{\cal{F}},Y}(f)$ for any $f$. By symmetry we find that
$ord_{{\cal{F}},Y} '(f)=ord_{{\cal{F}},Y}(f)$ for any $f$, and the maps coincide.
\begin{prop} \label{contact}
If $Y$ is irreducible, the contact order enjoys the following properties:
\begin{itemize}
\item{$ord_{{\cal{F}},Y}(f+g)\geq \inf\{ord_{{\cal{F}},Y}(f),ord_{{\cal{F}},Y}(g)\}$ with equality if $ord_{{\cal{F}},Y}(f)\not=ord_{{\cal{F}},Y}(g)$,}
\item{$ord_{{\cal{F}},Y}(fg)=ord_{{\cal{F}},Y}(f)+ord_{{\cal{F}},Y}(g)$ for all $f,g$ in ${\cal{O}}_{X}$.}
\end{itemize}
\end{prop}
{\it Proof of the first assertion}: If $ord_{{\cal{F}},Y}(f)=ord_{{\cal{F}},Y}(g)=+\infty$, then $f,g$ both belong to $I({\cal{F}},Y)$,
$f+g$ belongs to $I({\cal{F}},Y)$ and the result follows. So assume that $ord_{{\cal{F}},Y}(f)$ is finite and for simplicity that
$n=ord_{{\cal{F}},Y}(f)\leq ord_{{\cal{F}},Y}(g)$. For any index $I$ of length $<n$, $ \partial_{I}(f)$ and $ \partial_{I}(g)$ both belong to
$I_Y$. So $ \partial_{I}(f+g)$ belong to $I_Y$ for any $I$ with $|I|<n$, and $ord_{{\cal{F}},Y}(f+g)\geq n$. Therefore we have
for all $f,g$:
$$
ord_{{\cal{F}},Y}(f+g)\geq \inf\{ord_{{\cal{F}},Y}(f),ord_{{\cal{F}},Y}(g)\}
$$
Assume now that $ord_{{\cal{F}},Y}(f)<ord_{{\cal{F}},Y}(g)$. Then there exists an index $I$ of length $n$ such that $ \partial_{I}(f)$
does not belong to $I_Y$. Since $|I|< ord_{{\cal{F}},Y}(g)$, $ \partial_{I}(g)$ belongs to $I_Y$. Therefore $ \partial_{I}(f+g)$
does not belong to $I_Y$ and $ord_{{\cal{F}},Y}(f+g)\leq n$, so that $ord_{{\cal{F}},Y}(f+g)= n$.
\begin{flushright} $\blacksquare$\end{flushright}
For the second assertion, we will need the following lemmas. The first one is easy to get via Leibniz rule, by
an induction on the length of $I$.
\begin{lem} \label{calcul2}
Let $ \partial_1,..., \partial_r$ a system of generators of $M_{{\cal{F}}}$.
Then there exist some elements $\alpha_{I_1,I_2}$ of $\mathbb C$, depending on $I$ and such that
for all $f,g$:
$$
\partial_I (fg)= \sum_{|I_1|+|I_2|=|I|} \alpha_{I_1,I_2} \partial_{I_1} (f) \partial_{I_2}(g)
$$
\end{lem}
\begin{lem} \label{calcul3}
Let $f$ be an element of ${\cal{O}}_{X}$ such that $ord_{{\cal{F}},Y}(f)\geq n$. Let $I=(i_1,...,i_n)$ be any index. For any
rearrangement $J=(j_1,...,j_n)$ of the $i_k$, $ \partial_J(f)- \partial_I(f)$ belongs to $I_Y$.
\end{lem}
{\em Proof: } Every rearrangement of the $i_k$ can be obtained after a composition of transpositions on two consecutive
terms. So we only need to check the lemma in the case $J=(i_1,...,i_{l+1},i_l,...,i_n)$. If we denote by
$I_1,I_2$ the indices $I_1=(i_1,...,i_{l-1})$ and $I_2=(i_{l+2},...,i_n)$, then we find:
$$
\partial_J - \partial_I = \partial_{I_1} \circ [ \partial_{i_l}, \partial_{i_{l+1}}] \circ \partial_{I_2}
$$
Since $M_{{\cal{F}}}$ is stable by Lie bracket, $d=[ \partial_{i_l}, \partial_{i_{l+1}}]$ belongs to $M_{{\cal{F}}}$. Then $ \partial_J - \partial_I$
is a composite of $(n-1)$ derivations that span $M_{{\cal{F}}}$. Since $ord_{{\cal{F}},Y}$ is independent of the set of generators
and $ord_{{\cal{F}},Y}(f)=n$, $ \partial_J (f) - \partial_I(f)$ belongs to $I_Y$.
\begin{flushright} $\blacksquare$\end{flushright}
{\it Proof of the second assertion of Proposition \ref{contact}}: Let $f,g$ be a couple of elements of ${\cal{O}}_{X}$. If either $f$ or $g$
has infinite contact order, then one of them belongs to $I({\cal{F}},Y)$ and $fg$ belongs to $I({\cal{F}},Y)$, so that
$ord_{{\cal{F}},Y}(fg)=+\infty = ord_{{\cal{F}},Y}(f) + ord_{{\cal{F}},Y}(g)$.
Assume now that $ord_{{\cal{F}},Y}(f)=n$ and $ord_{{\cal{F}},Y}(g)=m$ are finite. By lemma \ref{calcul2}, we have:
$$
\partial_I (fg)= \sum_{|I_1|+|I_2|=|I|} \alpha_{I_1,I_2} \partial_{I_1} (f) \partial_{I_2}(g)
$$
Since $|I_1|+|I_2|<n+m$, either $|I_1|<n$ or $|I_2|<m$, and $ \partial_{I_1} (f) \partial_{I_2}(g)$ belongs to $I_Y$. So
$ \partial_I (fg)$ belongs to $I_Y$ and we obtain:
$$ord_{{\cal{F}},Y}(fg)\geq n+m$$
Conversely, consider the following polynomials $P,Q$ in the indeterminates $x,t_1,...,t_r$:
$$
P(x,t_1,...,t_r)=(t_1 \partial_1 +...+ t_r \partial_r)^n(f)(x) \quad , \quad Q(x,t_1,...,t_r)=(t_1 \partial_1 +...+ t_r \partial_r)^m(g)(x)
$$
By lemma \ref{calcul3}, we get that $ \partial_I(f)\equiv \partial_J(f)\; [I_Y]$ for any rearrangement $J$ of $I$ if $I$
has length $n$. Idem for $ \partial_I(g)$ and $ \partial_J(g)$ if $I$ has length $m$. Therefore in the expressions of $P,Q$,
everything happens modulo $I_Y$ as if the derivations $ \partial_i$ commuted. We then obtain the following expansions
modulo $I_Y$:
$$
P\equiv \sum_{i_1+...+i_r=n} \frac{n!}{i_1 ! ...i_r!} t_1 ^{i_1}...t_r ^{i_r} \partial_1 ^{i_1} \circ ...\circ \partial_r ^{i_r}(f) \; [I_Y]
$$
$$
Q\equiv \sum_{i_1+...+i_r=m} \frac{m!}{i_1 ! ...i_r!} t_1 ^{i_1}...t_r ^{i_r} \partial_1 ^{i_1} \circ ...\circ \partial_r ^{i_r}(g)\; [I_Y]
$$
Since $ord_{{\cal{F}},Y}(f)=n$ and $ord_{{\cal{F}},Y}(g)=m$, both $P$ and $Q$ have at least one coefficient that does not belong to $I_Y$
by lemma \ref{calcul3}. So neither of them belong to the ideal $I_Y[t_1,...,t_r]$, which is prime because $I_Y$ is prime. So
$PQ$ does not belong to $I_Y[t_1,...,t_r]$. If $ \partial=t_1 \partial_1+...+t_r \partial_r$, then we have by Leibniz rule:
$$
\partial^{n+m}(fg)= \sum_{k=0} ^n C_{n+m} ^k \partial^k(f) \partial^{n+m-k} (g)
$$
Since $ord_{{\cal{F}},Y}(f)=n$ and $ord_{{\cal{F}},Y}(g)=m$, $ \partial^k(f) \partial^{n+m-k} (g)$ belongs to $I_Y[t_1,...,t_r]$ except
for $k=n$. So $ \partial^{n+m}(fg)= C_{n+m} ^{n}PQ$ does not belong to $I_Y[t_1,...,t_r]$. Choose a point $(y,z_1,...,z_r)$
in $Y\times \mathbb C^r$ such that $PQ(y,z_1,...,z_r)\not=0$ and set $d=z_1 \partial_1+...+z_r \partial_r$. By construction we have:
$$
d^{n+m}(fg)(y)=C_{n+m} ^{n}PQ(y,z_1,...,z_r)\not=0
$$
So $d^{n+m}(fg)$ does not belong to $I_Y$ and $fg$ has contact order $\leq n+m$ with respect to the system of generators
$\{ \partial_1,..., \partial_r,d\}$. Since the contact order does not depend on the system of generators, we find:
$$
ord_{{\cal{F}},Y}(fg)=n+m=ord_{{\cal{F}},Y}(f) +ord_{{\cal{F}},Y}(g)
$$
\begin{flushright} $\blacksquare$\end{flushright}
\begin{cor} \label{irreductible}
Let $Y$ be an irreducible subvariety of $X$. Then the ideal $I({\cal{F}},Y)$ is prime. In particular, the minimal
invariant variety $V({\cal{F}},Y)$ is irreducible.
\end{cor}
{\em Proof: } Let $f,g$ be two elements of ${\cal{O}}_{X}$ such that $fg$ belongs to $I({\cal{F}},Y)$. Then $fg$ has infinite contact order.
By proposition \ref{contact}, either $f$ or $g$ has infinite contact order. So one of them belongs to $I({\cal{F}},Y)$,
and this ideal is prime.
\begin{flushright} $\blacksquare$\end{flushright}
\section{Behaviour of the function $n_{{\cal{F}}}$}
In this section we are going to establish theorem \ref{mesure} about the measurability of the function
$n_{{\cal{F}}}$ for the Zariski topology. Recall that a function $f: X \rightarrow \mathbb{N}$ is lower
semi-continuous for the Zariski topology if the set $f^{-1}([0,r])$
is closed for any $r$. Note that such a function is continuous for the constructible topology.
We begin with the following lemma.
\begin{lem} \label{etape}
Let $F$ be a finite dimensional vector subspace of ${\cal{O}}_{X}$. Then the map $\varphi_F: X \rightarrow
\mathbb{N}, \; x\mapsto dim_{\mathbb C} \;F -dim_{\mathbb C} \; I({\cal{F}},x)\cap F$ is lower semi-continuous for the Zariski topology.
\end{lem}
{\em Proof: } For any fixed finite-dimensional vector space $F$, consider the affine algebraic set:
$$
\Sigma_F=\left\{ (x,f) \in X\times F, \; \forall d_1,...,d_m \in M_{{\cal{F}}}, \; d_1\circ...\circ d_m(f)(x)=0\right\}
$$
together with the projection $\Pi: \Sigma_F \longrightarrow X, \; (x,f)\longmapsto x$. Since $\Sigma_F$ is affine,
there exists a finite collection of linear operators $\Delta_1,...,\Delta_r$, obtained by composition of elements of
$M_{{\cal{F}}}$,
such that:
$$
\Sigma_F=\left\{ (x,f) \in X\times F, \; \Delta_1(f)(x)=...=\Delta_r(f)(x)=0\right\}
$$
By lemma \ref{autre}, the fibre $\Pi^{-1}(x)$ is isomorphic to $I({\cal{F}},x)\cap F$ for any point $x$ of $X$. Since every
$\Delta_i$ is linear, $\Delta_i$ can be considered as a linear form on $F$ with coefficients in ${\cal{O}}_{X}$. So
the map $\Delta=(\Delta_1,...,\Delta_r)$ is represented by a matrix with entries in ${\cal{O}}_{X}$. We therefore have the
equivalence:
$$
f\in I({\cal{F}},x)\cap F \quad \Longleftrightarrow \quad f \in ker \; \Delta(x)
$$
By the rank theorem, we have $\varphi_F(x)=rk \; \Delta(x)$. But the rank of this matrix is a lower semi-continuous function
because it is given as the maximal size of the minors of $\Delta$ that do not vanish at $x$. Therefore $\varphi_F$
is lower semi-continuous for the Zariski topology.
\begin{flushright} $\blacksquare$\end{flushright}
{\it Proof of theorem \ref{mesure}}: Since $X$ is affine, we may assume that $X$ is embedded in $\mathbb C^k$ for some $k$. We provide
$\mathbb C[x_1,...,x_k]$ with the filtration $\{F_n\}$ given by the polynomials of homogeneous degree $\leq n$. By Hilbert-Samuel
theorem (see \cite{Ei}), for any ideal $I$ of $\mathbb C[x_1,...,x_k]$, the function:
$$
h_I(n)=dim_{\mathbb C} \; F_n - dim_{\mathbb C} \; I\cap F_n
$$
is equal to a polynomial for $n$ large enough, and the degree $p$ of this polynomial coincides with the dimension of the variety
$V(I)$. It is therefore easy to show that:
$$
p= \lim_{n\to +\infty} \frac{\log(h_I(n))}{n}
$$
Let $\Pi: \mathbb C[x_1,...,x_k]\rightarrow {\cal{O}}_{X}$ be the morphism induced by the inclusion $X\hookrightarrow \mathbb C^k$, and set
$\widetilde{F_n}=\Pi(F_n)$. For any ideal $I$ of ${\cal{O}}_{X}$, consider the function:
$$
\widetilde{h_I}(n)=dim_{\mathbb C} \; \widetilde{F_n} - dim_{\mathbb C} \; I\cap \widetilde{F_n}
$$
Since $\Pi$ is onto, we have $\widetilde{h_I}(n)=h_{\Pi^{-1}(I)}(n)$, so that $\widetilde{h_I}(n)$ coincides for $n$ large
enough with a polynomial of degree $p$ equal to the dimension of $V(I)$. With the notation of lemma \ref{etape}, we obtain
for $I=I({\cal{F}},x)$:
$$
p=n_{{\cal{F}}}(x)= \lim_{n\to +\infty} \frac{\log(\widetilde{h_I}(n))}{n}=\lim_{n\to +\infty} \frac{\log(\varphi_{\widetilde{F_n}}(x))}{n}
$$
By lemma \ref{etape}, every $\varphi_{\widetilde{F_n}}$ is lower semi-continuous for the Zariski topology, hence measurable.
Since a pointwise limit of measurable functions is measurable, the function $n_{{\cal{F}}}$ is measurable for the Zariski topology.
Moreover since $\varphi_{\widetilde{F_n}}$ is lower semi-continuous, there exist a real number $r_n$ and an open set $U_n$ on
$X$ such that:
\begin{itemize}
\item{$\displaystyle \frac{\log(\varphi_{\widetilde{F_n}}(x))}{n}\leq r_n$ for any $x$ in $X$,}
\item{$\displaystyle \frac{\log(\varphi_{\widetilde{F_n}}(x))}{n}=r_n$ for any $x$ in $U_n$.}
\end{itemize}
Denote by $U$ the intersection of all $U_n$. Since this intersection is not empty, there exists an $x$ in $X$ for which
$\log(\varphi_{\widetilde{F_n}}(x))/n=r_n$ for any $n$, so that $r_n$ converges to a limit $p$. By passing to the limit,
we obtain that:
\begin{itemize}
\item{$n_{{\cal{F}}}(x)\leq p$ for any $x$ in $X$,}
\item{$n_{{\cal{F}}}(x)= p$ for any $x$ in $U$.}
\end{itemize}
Note that $p$ has to be an integer. The theorem is proved.
\begin{flushright} $\blacksquare$\end{flushright}
\section{The family of minimal invariant varieties} \label{set}
In this section, we are going to study the set of minimal invariant varieties associated to the points of $X$. The
result we will get will be the first step towards the proof of theorem \ref{fibration}. Let $M$ be the following set:
$$
M=\left\{(x,y) \in X\times X, \; y \in V({\cal{F}},x)\right\}
$$
together with the projection $\Pi: M\longrightarrow X, (x,y)\longmapsto x$. Note that for any $x$, the preimage $\Pi^{-1}(x)$
is isomorphic to $V({\cal{F}},x)$, so that the couple $(M,\Pi)$ parametrizes the set of all minimal invariant varieties. Our purpose is
to show that:
\begin{prop} \label{ferm}
The Zariski closure $\overline{M}$ is an irreducible affine set of dimension $dim\; X +p$, where $p$ is the maximum of the function
$n_{{\cal{F}}}$. Moreover, for every very generic point $x$ in $X$, $\overline{M} \cap \Pi ^{-1}(x)$ is equal to $\{x\}\times V({\cal{F}},x)$.
\end{prop}
The proof of this proposition is a direct consequence of the following lemmas.
\begin{lem}
The Zariski closure $\overline{M}$ is irreducible.
\end{lem}
{\em Proof: } For any $ \partial_i$ in ${\cal{F}}$, consider the new $\mathbb C$-derivation $\Delta_i$ on ${\cal{O}}_{X\times X}={\cal{O}}_{X} \otimes_{\mathbb C} {\cal{O}}_{X}$
given by the following formula:
$$
\forall f,g \in {\cal{O}}_{X}, \quad \Delta_i(f(x)\otimes g(y))=f(x)\otimes \partial_i(g)(y)
$$
It is easy to check that $\Delta_i$ is a well-defined derivation. Denote by ${\cal{G}}$ the collection of the $\Delta_i$, by $D$
the diagonal $\{(x,x), \; x \in X\}$ in $X\times X$ and set $M_0= V({\cal{G}},D)$. By corollary \ref{irreductible}, $M_0$ is
irreducible.
We are going to prove that $\overline{M}=M_0$.
First let us check that $M_0 \subseteq \overline{M}$. Let $f$ be a regular function on $X\times X$ that vanishes on $\overline{M}$.
Then $f(x,y)=0$ for any couple $(x,y)$ where $y$ belongs to $V({\cal{F}},x)$. If $\varphi_t(y)$ is the flow of $ \partial_i$ at $y$, then
$\psi_t(x,y)=(x,\varphi_t(y))$ is the flow of $\Delta_i$ at $(x,y)$. Since $y$ lies in $V({\cal{F}},x)$, $\varphi_t(y)$ belongs to
$V({\cal{F}},x)$ for any small value of $t$, and we obtain:
$$
f(\psi_t(x,y))=f(x,\varphi_t(y))=0
$$
By derivation with respect to $t$, we get that $\Delta_i(f)(x,y)=0$ for any $(x,y)$ in $M$. So $\Delta_i(f)$ vanishes along
$\overline{M}$, and the ideal $I(\overline{M})$ is stable by the family ${\cal{G}}$. Since it is contained in $I(D)$, we have the
inclusion:
$$
I(\overline{M})\subseteq I({\cal{G}},D)
$$
which implies that $M_0 \subseteq \overline{M}$.
Second let us show that $\overline{M}\subseteq M_0$. Let $f$ be a regular function that vanishes along $M_0$. Fix $x$ in $X$
and consider the function $f_x(y)=f(x,y)$ on $X$. Then for any $\Delta_1,...,\Delta_n$ in ${\cal{G}}$, we have:
$$
\Delta_1 \circ ...\circ \Delta_n(f)(x,y)= \partial_1\circ ...\circ \partial_n(f_x)(y)
$$
Since $M_0=V({\cal{G}},D)$, $D$ is contained in $M_0$ and $f_x(x)=0$. So $f(x,x)=0$ and for any $ \partial_1,..., \partial_n$ in ${\cal{F}}$ and
any $x$ in $X$, we get that:
$$
\partial_1\circ ...\circ \partial_n(f_x)(x)=0
$$
In particular, $f_x$ belongs to $I({\cal{F}},x)$ and $f_x$ vanishes along $V({\cal{F}},x)$. Thus $f$ vanishes on $\{x\}\times
V({\cal{F}},x)=\Pi^{-1}(x)$ for any $x$ in $X$. This implies that $f$ is equal to zero on $M$ and on $\overline{M}$, so that
$I({\cal{G}},D)\subseteq I(\overline{M})$. As a consequence, we find $\overline{M}\subseteq M_0$ and the result follows.
\begin{flushright} $\blacksquare$\end{flushright}
\begin{lem}
The variety $\overline{M}$ has dimension $\geq dim \; X+p$.
\end{lem}
{\em Proof: } Consider the projection $\Pi: \overline{M} \rightarrow X, \; (x,y)\mapsto x$. Since $M$ contains the diagonal $D$,
the map $\Pi$ is onto. By the theorem on the dimension of fibres, there exists a non-empty Zariski open set $U$ in $X$
such that:
$$
\forall x \in U, \quad dim \; \overline{M} = dim\; X + dim \; \Pi^{-1}(x) \cap \overline{M}
$$
By theorem \ref{mesure}, there exists a countable intersection $\theta$ of Zariski open sets in $X$ such that
$n_{{\cal{F}}}(x)=p$ for all $x$ in $X$. In particular, $U \cap \theta$ is non-empty. For any $x$ in
$U \cap \theta$, $\Pi^{-1}(x) \cap \overline{M}$ contains the variety $V({\cal{F}},x)$ whose dimension
is $p$, and this yields:
$$
dim \; \overline{M} \geq dim\; X + p
$$
\begin{flushright} $\blacksquare$\end{flushright}
\begin{lem} \label{note}
The variety $\overline{M}$ has dimension $\leq dim \; X+p$.
\end{lem}
{\em Proof: } Let $\{F_n\}$ be a filtration of ${\cal{O}}_{X}$ by finite-dimensional $\mathbb C$-vector spaces, and set:
$$
M_n =\left\{ (x,y) \in X\times X, \; \forall f \in I({\cal{F}},x)\cap F_n, \; f(y)=0 \right\}
$$
The sequence $\{M_n\}$ is decreasing for the inclusion, and $M=\cap_{n \in \mathbb{N}}\; M_n$. Moreover
every $M_n$ is constructible for the Zariski topology by Chevalley's theorem (see \cite{Ei}).
Indeed its complement in $X\times X$ is the
image of the constructible set:
$$
\Sigma_n =\left\{ (x,y,f) \in X\times X \times F_n , \; \forall \partial_1,..., \partial_k \in {\cal{F}}, \; \partial_1 \circ ...\circ
\partial_k(f)(y)=0 \; {\rm{and}} \; f(y)\not=0\right\}
$$
under the projection $(x,y,f)\mapsto (x,y)$. Since $D$ is contained in every $M_n$, the projection $\Pi: M_n \rightarrow
X$ is onto. By the theorem on the dimension of fibres applied to the irreducible components of $\overline{M_n}$, there
exists a non-empty Zariski open set $U_n$ in $X$ such that:
$$
\forall x \in U_n, \quad dim \; M_n \leq dim\; X + dim \; \Pi^{-1}(x) \cap M_n
$$
Since $\overline{M}\subseteq \overline{M_n}$ for any $n$, and $\Pi^{-1}(x) \cap M_n\simeq V(I({\cal{F}},x)\cap F_n)$, we obtain:
$$
\forall x \in U_n, \quad dim \; \overline{M} \leq dim\; X + dim \; V(I({\cal{F}},x)\cap F_n)
$$
Since every $U_n$ is open, the intersection $\theta'=\cap_{n\in \mathbb{N}} \; U_n$ is non-empty. Let $\theta$ be an intersection
of Zariski open sets of $X$ such that $n_{{\cal{F}}}(x)=p$ for any $x$ of $\theta$. For any fixed $x$ in
$\theta \cap \theta'$, we have:
$$
\forall n\in \mathbb{N}, \quad dim \; \overline{M} \leq dim\; X + dim \; V(I({\cal{F}},x)\cap F_n)
$$
Since ${\cal{O}}_{X}$ is noetherian, there exists an order $n_0$ such that $I({\cal{F}},x)$ is generated by $I({\cal{F}},x)\cap F_n$ for any
$n\geq n_0$. In this context, $V({\cal{F}},x)=V(I({\cal{F}},x)\cap F_n)$ for all $n\geq n_0$, and $V({\cal{F}},x)$ has dimension $p$,
which implies that:
$$
dim \; \overline{M} \leq dim\; X + p
$$
\begin{flushright} $\blacksquare$\end{flushright}
\begin{lem}
For every very generic point $x$ in $X$, $\overline{M} \cap \Pi ^{-1}(x)$ is equal to $\{x\}\times V({\cal{F}},x)$.
\end{lem}
{\em Proof: } Consider the constructible sets $M_n$ introduced in lemma \ref{note}. By construction their intersection is equal to $M$.
The $\{\overline{M_n}\}$ form a decreasing sequence which converges to $\overline{M}$. Since these are algebraic sets, there exists
an index $n_0$ such that for any $n\geq n_0$, we have $\overline{M_n}=\overline{M}$. We consider the sequence $\{M_n\}_{n\geq n_0}$
and denote by $G_n$ the Zariski closure of $\overline{M} - M_n$. By the theorem on the dimension of fibres, there exists a Zariski
open set $V_n$ on $X$ such that for any $x$ in $V_n$, either
$\Pi^{-1}(x)\cap G_n$ is empty or has dimension $<p$. Since $\Pi^{-1}(x)\cap M=\{x\}\times V({\cal{F}},x)$ for any $x$ in $X$, we have the following
decomposition:
$$
\Pi^{-1}(x)\cap \overline{M}= \{x\}\times V({\cal{F}},x) \cup \cup_{n\geq n_0} \Pi^{-1}(x)\cap G_n
$$
For all $x$ in $\theta=\cap V_n$, the set $\Pi^{-1}(x)\cap G_n$ has dimension $<p$ for any $n\geq n_0$, hence its Hausdorff dimension
is no greater than $(2p-2)$ (see \cite{Ch}). Consequently the countable union $\cup_{n\geq n_0} \Pi^{-1}(x)\cap G_n$ has an Hausdorff dimension
$<2p$. Let $H_{i,x}$ be the irreducible components of $\Pi^{-1}(x)\cap \overline{M}$ distinct from $\{x\}\times V({\cal{F}},x)$. These $H_{i,x}$
are covered
by the union $\cup_{n\geq n_0} \Pi^{-1}(x)\cap G_n$, hence their Hausdorff dimension does not exceed $(2p-2)$. Therefore the Krull
dimension of $H_{i,x}$ is strictly less than $p$ for any $i$ and any $x$ in $\theta$. If $H_x$ denotes the union of the $H_{i,x}$, then
we have for any $x$ in $\theta$:
$$
\Pi^{-1}(x)\cap \overline{M}= \{x\}\times V({\cal{F}},x) \cup H_x \quad \mbox{and} \quad dim\; H_x<p
$$
Now by Stein factorization theorem (see \cite{Ha}), the map $\Pi: \overline{M}\rightarrow X$ is a composite of a quasi-finite map
with a map whose generic fibres are irreducible. In particular $\Pi^{-1}(x)\cap \overline{M}$ is equidimensionnal of dimension $p$
for generic $x$ in $X$. Therefore the variety $H_x$ should be contained in $\{x\}\times V({\cal{F}},x)$, and we have for any $x$ in $\theta$:
$$
\Pi^{-1}(x)\cap \overline{M}= \{x\}\times V({\cal{F}},x)
$$
\begin{flushright} $\blacksquare$\end{flushright}
\section{Proof of theorem \ref{fibration}}
Let $X$ be an irreducible affine variety over $\mathbb C$ of dimension $n$, endowed with an algebraic foliation ${\cal{F}}$. Let $p$ be the integer
given by theorem \ref{mesure}. In this section we will establish theorem \ref{fibration}. We begin with
a few lemmas.
\begin{lem}
Let $F:X\rightarrow Y$ be a dominant morphism of irreducible affine varieties. Then for any Zariski open set $U$ in $X$,
$F(U)$ is dense in $Y$.
\end{lem}
{\em Proof: } Suppose on the contrary that $F(U)$ is not dense in $Y$. Then there exists a non-zero regular function $f$ on $Y$
that vanishes along $\overline{F(U)}$. The function $f\circ F$ vanishes on $U$, hence on $X$ by density. So $F(X)$ is
contained in $f^{-1}(0)$, which is impossible since this set is dense in $Y$.
\begin{flushright} $\blacksquare$\end{flushright}
\begin{lem} \label{prep}
Let $\overline{M}$ be the variety defined in section \ref{set}. Then there exists an irreducible variety $H$
in $X$ such that $\overline{M} \cap X\times H$ has dimension $n$ and the morphism $\Pi: \overline{M} \cap X\times H
\rightarrow X$ induced by the projection is dominant.
\end{lem}
{\em Proof: } Let $(x,y)$ be a smooth point of $\overline{M}$ such that $x$ is a smooth point of $X$. By the generic smoothness
theorem, we may assume that $d\Pi_{(x,y)}$ is onto. Consider the second projection $\Psi(x,y)=y$. Since the map $(\Pi,\Psi)$
defines an embedding of $\overline{M}$ into $X\times X$, and $d\Pi_{(x,y)}$ is onto, there exist some regular functions
$g_1,...,g_p$ on $X$ such that $(d\Pi_{(x,y)},{dg_1}_{(y)},...,{dg_p}_{(y)})$ is an isomorphism from $T_{(x,y)} \overline{M}$ to
$T_x X \oplus \mathbb C ^p$.
Let $G: \overline{M}\rightarrow \mathbb C^p$ be the map $(g_1,...,g_p)$, and denote by $E$ the set of points $(x,y)$ in
$\overline{M}$
where either $\overline{M}$ is singular
or $(\Pi,G)$ is not submersive. By construction $E$ is a closed set distinct from $\overline{M}$.
Since $dG_{(y)}$ has rank $p$ on $T_{(x,y)} \overline{M}$, the map $G:\overline{M}\rightarrow \mathbb C^p$
is dominant. So its generic fibres have dimension $n$. Fix a fibre $G^{-1}(z)$ of dimension $n$
that is not contained in $E$. Then there exists a smooth point $(x,y)$ in $G^{-1}(z)$ such that
$d(\Pi,G)_{(x,y)}$ is onto. The morphism $\Pi:G^{-1}(z)\rightarrow X$ is a submersion at $(x,y)$,
hence it is dominant. Moreover $G^{-1}(z)$ is of the form $X\times F^{-1}(z)\cap \overline{M}$, where $F:X
\rightarrow \mathbb C^p$ is the map $(g_1,...,g_p)$.
Choose an irreducible component $H$ of $F^{-1}(z)$ such that $\Pi: X\times H \cap \overline{M}\rightarrow X$ is dominant.
By construction $X\times H \cap \overline{M}$ has dimension $\leq n$. Since the latter map is dominant,
its dimension is exactly equal to $n$.
\begin{flushright} $\blacksquare$\end{flushright}
{\it Proof of theorem \ref{fibration}}: Let $H$ be an irreducible variety of codimension $p$ in $X$ satisfying the
conditions of lemma \ref{prep}. Denote by
$N$ the union of irreducible components of $\overline{M} \cap X\times H$ that are mapped dominantly on $X$ by $\Pi$.
By construction $N$ has dimension $dim\; X$ and the morphism $\Pi: N\rightarrow X$ is quasi-finite.
So there exists an open set $U$ in $X$ such that:
$$
\widetilde{\Pi}: \Pi^{-1}(U)\cap N \longrightarrow U
$$
is a finite unramified morphism. Let $r$ be the degree of this map. For any point $x$ in $U$, there exist $r$ points
$y_1,...,y_r$ in $H$ such that ${\widetilde{\Pi}}^{-1}(x)=\{y_1,...,y_r\}$. Let $\mathfrak{S}_r$ act on $H^r$ by
permutation
of the coordinates, i.e $\sigma.(y_1,...,y_r)=(y_{\sigma(1)},...,y_{\sigma(r)})$. Since this action is algebraic
and $\mathfrak{S}_r$ is finite, the algebraic quotient $H^r //\mathfrak{S}_r$ exists and is an irreducible affine
variety (see \cite{Mu}). Let $Q: H\rightarrow H^r //\mathfrak{S}_r$ be the corresponding quotient morphism.
Consider the mapping:
$$
\varphi: U \longrightarrow H^r //\mathfrak{S}_r , \quad x \longmapsto Q(y_1,...,y_r)
$$
Note that its graph is constructible in $U\times H^r //\mathfrak{S}_r$. Indeed it is given by the set:
$$
\Sigma=\left\{ (x,y'), \; \exists (y_1,...,y_r) \in H^r, \;\forall i\not=j, \; y_i\not=y_j, \; (x,y_i) \in \overline{M} \;
{\rm{and}} \; Q(y_1,...,y_r)=y' \right\}
$$
By Serre's theorem (see \cite{Lo}), $\varphi$ is a rational map on $U$. Since $\widetilde{\Pi}$ is unramified, $\varphi$ is also
holomorphic on $U$, hence it is regular on $U$. Denote by $Y$ the Zariski closure of $\varphi(U)$ in
$H^r //\mathfrak{S}_r$. Since $U$ is irreducible, $Y$ is itself irreducible.
By construction, for any $x$ in $U$, $\{x\}\times \varphi^{-1}(\varphi(x))$ is equal to $\Pi^{-1}(x)\cap \overline{M}$.
For every very generic point $x$ in $X$, $\Pi^{-1}(x)\cap \overline{M}$ corresponds to $\{x\}\times V({\cal{F}},x)$
by proposition \ref{ferm}.
So $\varphi^{-1}(\varphi(x))=V({\cal{F}},x)$ for every generic point $x$ in $X$, hence it has dimension $p$. By the
theorem on the dimension of fibres, $Y$ has dimension $(n - p)$.
Since $\varphi^{-1}(\varphi(x))=V({\cal{F}},x)$ for every generic point $x$ in $X$, this fibre is tangent to the
foliation ${\cal{F}}$. Since tangency is a closed condition, all the fibres of $\varphi$ are tangent to ${\cal{F}}$. Let
$f$ be a rational function on $Y$. In the neighborhood of any smooth point $x$ where ${\cal{F}}$ is regular and $f\circ \varphi$ is
well-defined, the function $f\circ \varphi$ is constant on the leaves of ${\cal{F}}$. So $f\circ \varphi$ is a rational first
integral of ${\cal{F}}$. Via the morphism $\varphi^*$ induced by $\varphi$, $K_{{\cal{F}}}$ is clearly isomorphic to $\mathbb C(Y)$
which has transcendence degree $(n-p)$ over $\mathbb C$.
\begin{flushright} $\blacksquare$\end{flushright}
\section{An example}
In this last section, we introduce an example that illustrates both theorems \ref{mesure} and \ref{fibration}. Consider the
affine space $\mathbb C^4$ with coordinates $(u,v,x,y)$, and the algebraic foliation ${\cal{F}}$ induced by the vector field:
$$
\partial=ux\frac{ \partial}{ \partial x} + vy \frac{ \partial}{ \partial y}
$$
For any $(\lambda,\mu)$ in $\mathbb C^2$, the plane $V(u-\lambda,v-\mu)$ is tangent to ${\cal{F}}$. Denote by $ \partial_{\lambda,\mu}$ the
restriction of $ \partial$ to that plane parametrized by $(x,y)$. Then two cases may occur:
\begin{itemize}
\item{If $[\lambda;\mu]$ does not belong to $\mathbb{P}^1(\mathbb{Q})$, then $ \partial_{\lambda,\mu}$ has no rational first integrals.
The only algebraic curves tangent to $ \partial_{\lambda,\mu}$ are the lines $x=0$ and $y=0$. There is only one singular point, namely
$(0,0)$.}
\item{If $[\lambda;\mu]$ belongs to $\mathbb{P}^1(\mathbb{Q})$, choose a couple of coprime integers $(p,q)\not=(0,0)$ such that
$p\lambda+q\mu=0$. The function $f(x,y)=x^py^q$ is a rational first integral for $ \partial_{\lambda,\mu}$. The algebraic curves tangent
to $ \partial_{\lambda,\mu}$ are the lines $x=0$, $y=0$ and the fibres $f^{-1}(z)$ for $z\not=0$. There is only one singular point, namely
$(0,0)$.}
\end{itemize}
From those two cases, we can get the following values for the function $n_{{\cal{F}}}$:
\begin{itemize}
\item{$n_{{\cal{F}}}(u,v,x,y)=2$ if $[\lambda;\mu] \not\in\mathbb{P}^1(\mathbb{Q})$ and $xy\not=0$,}
\item{$n_{{\cal{F}}}(u,v,x,y)=0$ if $x=y=0$,}
\item{$n_{{\cal{F}}}(u,v,x,y)=1$ otherwise.}
\end{itemize}
In particular, this function is measurable but not constructible for the Zariski topology, as can be easily seen
from its fibre $n_{{\cal{F}}} ^{-1}(2)$. Moreover since $p=2$, its
field $K_{{\cal{F}}}$ has transcendence degree 2 over $\mathbb C$. In fact it is easy to check that $K_{{\cal{F}}}=\mathbb C(u,v)$.
\end{document} |
\begin{document}
\begin{abstract}
Given a triple cover $\pi : X \longrightarrow Y$ of varieties,
we produce a new variety $\mathfrak S_X$ and a birational morphism
$\rho_X : \mathfrak S_X \longrightarrow X$ which is an isomorphism away from
the fat-point ramification locus of $\pi$.
The variety $\mathfrak S_X$ has a natural interpretation in terms of the data
describing the triple cover, and the morphism $\rho_X$ has an elegant
geometric description.
\end{abstract}
\title{A small resolution for triple covers in algebraic geometry}
\section{Introduction}
The basic fact regarding a triple cover $\pi : X \longrightarrow Y$, proven in
\cite{miranda-3}, is that any such cover is determined by a rank~2
locally free sheaf $E$ on $Y$ and a global section $\sigma$ of
$S^3(E)^{*} \otimes \Lambda^2(E)$. Furthermore, $X$ can be realized
as a subvariety of the geometric vector bundle $\mathbb V(E)$ equipped with
the natural projection to $Y$.
In this article we give a necessary and sufficient criterion for $X$ to
be realized as a subvariety of a $\mathbb P^1$-bundle equipped with its
natural projection to $Y$: we show that $X$ can be so realized if and
only if $\pi$ has no fat triple ramification (a fat triple ramification
point of $\pi$ is a point $x \in X$ whose Zariski tangent space in the fibre of
$\pi$ has dimension~2).
Along the way, we show that to any triple cover $\pi : X \longrightarrow Y$,
one can associate a subvariety $\mathfrak S_X$ of $\mathbb P(E^{*})$ defined in terms
of the global section $\sigma$. This variety $\mathfrak S_X$ is equipped with
a birational morphism $\rho_X : \mathfrak S_X \longrightarrow X$, which has a
nice geometric interpretation. In fact $\rho_X$ is a sort of small
resolution: it is the blow-up of a Weil divisor in $X$, and its fibre over any
fat ramification point of $\pi$ is a $\mathbb P^1$,
but its exceptional set is in general of codimension
larger than 1 in $\mathfrak S_X$. We construct this resolution first in a
local case, and then we show that the construction globalizes. Throughout
this article we make extensive use of Miranda's analyses in \cite{miranda-3}.
We note that our main result is not new: a more general statement (with a
correspondingly more technical proof) can be found as Theorem~1.3 in the
beautiful paper \cite{casnati-ekedahl}. However, it is our hope that our
simple geometric description in the case of triple covers can provide some
insight into the more general case.
The authors wish to thank Ciro Ciliberto, Alberto Calabri, Flaminio Flamini,
Alfio Ragusa, and especially Rick Miranda for their extraordinary efforts
during the PRAGMATIC~2001 summer school in Catania. The authors also wish
to thank Mike Roth for teaching us about small resolutions, which turned out
to be precisely the right objects for describing the results of our
research.
\section{Some examples of small resolutions}
Let $\mathbb A^5$ have coordinates $x,y,z,w,t$, and let $\mathbb P^4$ be its
projectivization. Let $X \subset \mathbb P^4$ be the hypersurface
defined by the equation $xw - yz = 0$. If we restrict our attention
to the $\mathbb P^3 \subset \mathbb P^4$ where $t=0$, this same equation defines
a smooth quadric $Q \subset \mathbb P^3$; thus $X$ is just the projective
cone over $Q$ with vertex $[0:0:0:0:1]$.
It is clear that $X$ is smooth away from its vertex.
If $\epsilon : \widetilde{X} \longrightarrow X$ is the blow-up of the vertex,
then it is easy to see that $\widetilde{X}$ is a smooth variety, and
that the exceptional divisor is isomorphic to $Q = \mathbb P^1 \times \mathbb P^1$.
We are going to describe a method of resolving the singular point
of $X$ with a morphism $\rho : \Gamma \longrightarrow X$, where
$\Gamma$ is
a smooth variety, but where the exceptional set is a $\mathbb P^1$.
In particular, the exceptional set is ``too small'' to be a divisor;
thus $\rho$ will be an example of a {\em small resolution}.
We begin by choosing a line $L$ belonging to one of the two rulings of
the quadric $Q$; for ease in computation, we take $L$ to be the line
$x = y = 0$. Clearly $L$ is a Weil divisor on $Q$. It is easy to
check that $L$ is also a Cartier divisor: since there is no point on $Q$
at which $x,y,z,w$ all vanish, the two open sets of $Q$ where $z \neq 0$
and where $w \neq 0$ cover $L$. On the first set $L$ is defined by
$x=0$, and on the second set $L$ is defined by $y=0$.
Now let $D \subset X$ be the cone over $L$. Then $D$ is a Weil divisor
in $X$, but it is not Cartier: $D$ cannot be defined by only one
equation in any open neighborhood of the origin.
Our small resolution $\rho : \Gamma \longrightarrow X$ is the blow-up of
$X$ along $D$. (Note that once we show that $\Gamma$ is not isomorphic to
$X$, we will have proven indirectly that $D$ is not Cartier: the blow-up
of a Cartier divisor is always an isomorphism.)
Since $D$ can be defined by the two equations $x = y = 0$ in $X$, the
blow-up of $X$ along $D$ can be defined as the (closed) graph
of the rational map $\phi : X --\rightarrow \mathbb P^1$, where
$\phi ([x:y:z:w:t]) = [x:y]$. This graph $\Gamma$ is a closed
subvariety of the
product $X \times \mathbb P^1$, and the morphism
$\rho : \Gamma \longrightarrow X$ is the restriction
of the first projection map.
Note that there is an open set of $X$ on which
the map $\phi$ agrees with the map $\psi$
sending $[x:y:z:w:t]$ to $[z:w]$; that these two maps agree is a
consequence of the defining equation for $X$. Also note that at
any point of $X$ other than its vertex $[0:0:0:0:1]$, at least one
of $\phi$ and $\psi$ is defined. This observation enables us to
write down the defining equations for $\Gamma \subset X \times \mathbb P^1$:
if $u,v$ are coordinates on the $\mathbb P^1$ factor, then $\Gamma$ is
defined by $uy - vx = 0$ and $uw - vz = 0$. It follows that
$\rho$ is an isomorphism away from the vertex of $X$, and the
exceptional set over the vertex is $\mathbb P^1$. It is also easy to
check that $\Gamma$ is smooth.
We now present an alternative way of describing the variety
$\Gamma$. We may regard $\Gamma$ as a subvariety of
$\mathbb P^4 \times \mathbb P^1$, defined by the three equations
$xw - yz = 0$, $uy - vx = 0$, and $uw - vz = 0$.
These three equations may be expressed in a single matrix
equation:
\[
\Gamma = \left\{ [x:y:z:w:t] \times [u:v] \in \mathbb P^4 \times \mathbb P^1
\mbox{ such that }
\left[
\begin{array}{cc} x & y \\ z & w \end{array}
\right]
\left[
\begin{array}{c} -v \\ u \end{array}
\right] = 0
\right\}.
\]
The first of the three equations is now seen to express the
vanishing of a determinant,
which is necessary and sufficient for the second and third
equations to have
a nonzero solution.
Note that in this description of $\Gamma$, the morphism
$\rho$ is the restriction of the natural projection from
$\mathbb P^4 \times \mathbb P^1$ to $\mathbb P^4$.
As a second example, we take $X \subset \mathbb P^6$ to be the projective
cone over $\mathbb P^2 \times \mathbb P^1$, with vertex $[\vec{0}:1]$. By
analogy with the previous example, we consider a divisor
$D \subset X$ which is the cone over one of the $\mathbb P^1 \times \mathbb P^1$
``rulings''
of $\mathbb P^2 \times \mathbb P^1$; for example, we can take $D$ to be
defined by $x_0=x_1=0$. As before, blowing up this divisor gives
a small resolution $\rho : \Gamma \longrightarrow X$ which is an
isomorphism away from the vertex of $X$, and whose fibre
over the vertex is a $\mathbb P^1$. A
computation similar to the previous one gives that $\Gamma$ may
be realized as a subvariety of $\mathbb P^6 \times \mathbb P^1$, using a
matrix condition:
\[
\Gamma = \left\{ [\vec{x}:t] \times [u:v] \in \mathbb P^6 \times \mathbb P^1
\mbox{ such that }
\left[
\begin{array}{cc} x_0 & x_1 \\
x_2 & x_3 \\
x_4 & x_5 \end{array}
\right]
\left[
\begin{array}{c} -v \\ u \end{array}
\right] = 0
\right\}.
\]
This single matrix equation expresses six quadratic conditions:
the vanishing of the three $2 \times 2$ minors, which are the
defining equations for $X \subset \mathbb P^6$, and the three row
equations, which come from the blow-up computation.
As before, the morphism $\rho$ is just the restriction
of the natural projection from $\mathbb P^6 \times \mathbb P^1$ to $\mathbb P^6$.
Note that by simply eliminating the variable $t$ in the
above matrix description,
we can define $\Gamma$ as a subvariety of $\mathbb A^6 \times \mathbb P^1$,
where $\mathbb A^6$ is the finite ($t \neq 0$) part of $\mathbb P^6$.
This construction is the one that will provide us with a sort of
universal local picture of our resolution for triple covers.
\section{The local picture of the resolution}
Consider the affine space $\mathbb A^4$ with coordinates $A,B,C,D$,
and let $F$ be the free sheaf of rank~2 on this affine space. Then
$\mathbb V(F)$ is nothing more than the affine space $\mathbb A^6$ with
coordinates $A,B,C,D,z,w$; here $z,w$ are global sections that
generate $F$. Let $\mathfrak X$ be the subvariety of $\mathbb V(F)$ defined by
the three quadrics
\begin{eqnarray*}
z^2 & = & Az + Bw + 2(A^2 - BD) \\
zw & = & -Dz - Aw + (BC-AD) \\
w^2 & = & Cz + Dw + 2(D^2 - AC).
\end{eqnarray*}
By the results in Miranda's paper \cite{miranda-3}, we know that
the projection of $\mathbb V(F)$ to $\mathbb A^4$ sending
$(A,B,C,D,z,w)$ to $(A,B,C,D)$ restricts to a triple cover
$\Pi : \mathfrak X \longrightarrow \mathbb A^4$.
Now, as pointed out in \cite{miranda-3}, the variety $\mathfrak X$ is
determinantal: it is the locus in $\mathbb V(F)$ where
the matrix
\[
\left[
\begin{array}{cc} z+A & B \\
C & w+D \\
w-2D & z-2A \end{array}
\right]
\]
has rank at most one. By a result in \cite{miranda-3}, the rank of this
matrix is zero if and only if the map $\Pi$ has fat triple
ramification over the point $(A,B,C,D)$; it is clear from the
matrix description that this happens only over the point
$(0,0,0,0)$.
This determinantal representation is familiar: up to a change
of coordinates on $\mathbb A^6$, we see that $\mathfrak X$ is just the affine cone
over $\mathbb P^2 \times \mathbb P^1$. Furthermore, we see that the vertex
of this cone -- its only singular point -- is exactly the fat
triple point where $A=B=C=D=z=w=0$. The temptation to compute
its small resolution is overwhelming, and so we define
$\Gamma \subset \mathfrak X \times \mathbb P^1$
to be the subvariety of $\mathbb V(F) \times \mathbb P^1$
defined by the matrix condition
\[
\left[
\begin{array}{cc} z+A & B \\
C & w+D \\
w-2D & z-2A \end{array}
\right]
\left[
\begin{array}{c} -v \\ u \end{array}
\right] = 0.
\]
We know from our previous computation that the natural projection
$\rho : \Gamma \longrightarrow \mathfrak X$ is an isomorphism away from
the fat point, and that the fibre over this point
is all of $\mathbb P^1$. We will refer to the morphism
$\rho : \Gamma \longrightarrow \mathfrak X$ as the {\em resolution of the
triple cover $\Pi$}.
We note that $\Gamma$ comes equipped with a morphism $\phi$ to
$\mathbb A^4 \times \mathbb P^1$: $\phi$ is just the product of
$\Pi \circ \rho$ with the second projection of $\Gamma$ to $\mathbb P^1$.
We are going to compute the image of $\phi$. To do
this, we first note that if $(A,B,C,D,z,w) \times [u:v]$ is a point
of $\Gamma$, then we can solve for $z$ and $w$ in terms of the
other coordinates, using the first two rows of the matrix:
\begin{eqnarray}
\label{eq:phi-inverse} z &=& B\left(\frac{u}{v}\right) - A \\
\nonumber w &=& C\left(\frac{v}{u}\right) - D.
\end{eqnarray}
Here we assume that both $u$ and $v$ are nonzero; in the case that
either vanishes, the third row of the matrix can be used instead
of one of the first two. Continuing under the assumption that both
$u$ and $v$ are nonzero, we use the third row of the matrix to
compute that
\[
-v\left(C\left(\frac{v}{u}\right) - D - 2D\right)
+u\left(B\left(\frac{u}{v}\right) - A - 2A\right) = 0,
\]
and since $uv \neq 0$, we conclude that:
\begin{equation}
Bu^{3} - 3Au^{2}v + 3Duv^{2} - Cv^{3} = 0. \label{eq:local-cubic}
\end{equation}
We note that the same equation results from the computations in the
cases where $u=0$ or $v=0$.
Let $\mathfrak S \subset \mathbb A^4 \times \mathbb P^1$ be the subvariety defined by
equation~(\ref{eq:local-cubic}).
Let $\Pi' : \mathfrak S \longrightarrow \mathbb A^4$ be the obvious projection;
this projection is compatible via $\phi$ with the composite map
$\Pi \circ \rho : \Gamma \longrightarrow \mathbb A^4$. In fact, we have
the following result:
\begin{prop}
The morphism $\phi : \Gamma \longrightarrow \mathfrak S$ is an isomorphism
of varieties over $\mathbb A^4$.
\end{prop}
\begin{proof}
The fact that $\Pi \circ \rho = \Pi' \circ \phi$ is clear from
the definition of $\phi$, so we only need to show that $\phi$ is
an isomorphism. To do this, note that the equations~(\ref{eq:phi-inverse})
for $z$ and $w$ in terms
of $A,B,C,D$ define regular functions on all of $\mathfrak S$; this is
easily checked using equation~(\ref{eq:local-cubic}).
From the definition
\[
\phi((A,B,C,D,z,w) \times [u:v]) = (A,B,C,D) \times [u:v] ,
\]
we see that $\phi$ is surjective, and also that
the regular functions for $z$ and $w$ are sufficient to define
the inverse morphism. Thus $\phi$ is an isomorphism, as needed.
\end{proof}
\begin{cor}
Away from the point $(0,0,0,0) \in \mathbb A^4$, the three morphisms
$\Pi : \mathfrak X \longrightarrow \mathbb A^4$,
$\Pi \circ \rho : \Gamma \longrightarrow \mathbb A^4$, and
$\Pi' : \mathfrak S \longrightarrow \mathbb A^4$ are isomorphic triple cover maps.
\end{cor}
Now we are in a position to construct a triple cover resolution for
any sufficiently local triple cover $\pi : X \longrightarrow Y$. By
``sufficiently local'' we mean that $Y$ is affine, $E$ is a free
sheaf of rank~2 on $Y$, and $X$ is the
subvariety of $\mathbb V(E)$ defined by the three quadrics
\begin{eqnarray}
\label{eq:local-quadrics} z^2 & = & az + bw + 2(a^2 - bd) \\
\nonumber zw & = & -dz - aw + (bc-ad) \\
\nonumber w^2 & = & cz + dw + 2(d^2 - ac);
\end{eqnarray}
here the coefficients $a,b,c,d$ are regular functions on $Y$, and
$z,w$ are global sections that generate $E$. It follows from Miranda's
analysis in \cite{miranda-3} that this is in fact the local situation
for any triple cover.
Given such a sufficiently local triple cover, we
define a morphism $f : Y \longrightarrow \mathbb A^4$ by the formula
$f(y) = (a(y), b(y), c(y), d(y))$. This is equivalent to
requiring that $f^{*}(A) = a$, and so on; thus we have the
following commutative diagram:
\[
\begin{CD}
f^{*}\Gamma @>>> \Gamma \\
@V{f^{*}\rho}VV @VV{\rho}V \\
X=f^{*}\mathfrak X @>>> \mathfrak X \\
@V{\pi}VV @VV{\Pi}V \\
Y @>f>> \mathbb A^4
\end{CD}
\]
It is proven in \cite{miranda-3} that the fat points of
$X \subset \mathbb V(E)$ are precisely the points where $a=b=c=d=0$;
it follows that the morphism $f^{*}\rho$ is an isomorphism away
from the fat-point ramification locus of $\pi$,
and has a $\mathbb P^1$ fibre over any fat point in $X$.
We will refer to
$f^{*}\rho$ as the {\em resolution of the triple cover $\pi$}.
In this way we may view the right-hand side of the above diagram
as a sort of universal local picture of our triple cover resolution.
The reader with some skill in visualizing three-dimensional
commutative diagrams\footnote{At least, with more skill than the
authors have in drawing them.} will see that the isomorphism
$\phi : \Gamma \longrightarrow \mathfrak S$ pulls back via $f$ to an
isomorphism $f^{*}\phi : f^{*}\Gamma \longrightarrow f^{*}\mathfrak S$;
here $f^{*}\mathfrak S$ is the subvariety of $Y \times \mathbb P^1$ defined
by the the equation
\[
bu^{3} -3au^2v + 3duv^2 - cv^{3} = 0.
\]
This is in fact a variety over $Y$, whose structure morphism
$\pi' : f^{*}\mathfrak S \longrightarrow Y$ is none other than $f^{*}\Pi'$.
We can similarly ``pull back'' our other result:
\begin{cor}
Let $B \subset Y$ be the (set-theoretic) image under $\pi$ of the
fat-point ramification locus in $X$. Away from $B$, the three
morphisms $\pi : X \longrightarrow Y$,
$\pi \circ f^{*}\rho : f^{*}\Gamma \longrightarrow Y$, and
$\pi' : f^{*}\mathfrak S \longrightarrow Y$ are isomorphic triple cover maps.
\end{cor}
Thus we reach the following interesting conclusion:
\begin{prop}\label{prop:local-result}
Let $\pi : X \longrightarrow Y$ be a sufficiently local triple cover;
as before, this means that $X$ is defined as a subvariety of a free rank~2
vector bundle on $Y$. If $\pi$ has no fat-point
ramification, then in fact $X$ is isomorphic as a triple cover to a
subvariety of a (trivial) $\mathbb P^1$-bundle over $Y$ equipped with the
natural projection.
\end{prop}
\begin{proof}
This is just a restatement of the isomorphism between $X$ and $f^{*}\mathfrak S$
from the previous corollary.
\end{proof}
\section{Geometric description of the resolution}
In this section we are going to describe geometrically the isomorphism
appearing in the preceding proposition. Along the way, we will also
describe the geometric meaning of the $\mathbb P^1$-bundle appearing there.
To begin, let $\pi : X \longrightarrow Y$ be any triple cover.
For convenience of notation, we set $F = \pi_*(\mathcal{O}_X)$. Following
\cite{miranda-3}, we have that
$F = \mathcal{O}_Y \oplus E$,
where $E$ is a rank~2 locally free sheaf on $Y$. If $U \subset Y$
is any open set over which $E$ is generated freely by two sections
$z,w$, then over $U$ we have that $X$ is defined as a subvariety
of $\mathbb V(E)$ by the quadrics~(\ref{eq:local-quadrics}); this makes
sense, because local sections of $E$ correspond to local coordinates
on $\mathbb V(E)$. Over a fixed $y \in Y$, the fibre of $\mathbb V(E)$ is an
affine plane, and the quadrics~(\ref{eq:local-quadrics}) cut out one,
two, or three points in this plane.
Our idea is to consider the function on $X$ that
is defined by sending a point in a fibre of $\pi$ to the line through the
other two points in the fibre; clearly we need to work a bit to understand
this idea. For one thing, this definition only makes sense for
fibres containing three distinct points of $X$. Still, we may hope
to define a rational map on $X$ whose locus of indeterminacy is
contained in the ramification locus of $\pi$. The greater difficulty
is understanding what the range of this function should be: we need to map
to a bundle whose fibre over a fixed point in $Y$ is the $\mathbb P^1$ of
lines in the fibre of $\mathbb V(E)$.
It turns out that we can make this idea work by considering the
inclusion $E \hookrightarrow \mathcal{O}_Y \oplus E = F$. This inclusion
allows us to identify the fibre
of $\mathbb V(E)$ with the ``finite part'' of the fibre of $\mathbb P(F)$,
which is a projective plane. The ``line at infinity'' in the fibre
of $\mathbb P(F)$ is identified with the fibre of $\mathbb P(E)$. (These
identifications follow from applying the functors $\mathbf{Spec}$ and
$\mathbf{Proj}$ to the stated inclusion.) As sets, we have that
$\mathbb P(F) = \mathbb V(E) \cup \mathbb P(E)$, and so we may view $X \subset \mathbb V(E)$
as a subvariety of $\mathbb P(F)$ that does not intersect $\mathbb P(E)$.
Now we are in a position to describe our putative rational map on $X$.
Over a point $y$ not in the branch locus of $\pi$, the fibre of $\pi$
consists of three distinct points $x_1,x_2,x_3$. The line through
any two of these points, say $x_2$ and $x_3$, is a line in the fibre
of $\mathbb P(F)$ over $y$. Such a line corresponds to a point $p_{2,3}$ in the
fibre of $\mathbb P(F^{*})$ over $y$. There is a natural projection
$\mathbb P(F^{*}) --\rightarrow \mathbb P(E^{*})$ which dualizes the inclusion
$\mathbb P(E) \hookrightarrow \mathbb P(F)$; over the point $y$, this map is
the projection whose center is the point corresponding to the line
at infinity in the fibre of $\mathbb P(F)$ over $y$. We have that $p_{2,3}$ is
never equal to the center of this projection, because $X$ does not meet
$\mathbb P(E)$; thus we can project $p_{2,3}$ to a point $q_{2,3}$ in
the fibre of $\mathbb P(E^{*})$ over $y$. We define a rational map
$\psi : X --\rightarrow \mathbb P(E^{*})$ by setting $\psi(x_1) = q_{2,3}$,
and similarly for $x_2,x_3$.
We have the following result:
\begin{prop}
If $\pi : X \longrightarrow Y$ is sufficiently local, then the map
$\psi$ is in fact rational, and its image is contained in
$f^{*}\mathfrak S \subset \mathbb P(E^{*}) = Y \times \mathbb P^1$. The restricted map
\[
\psi : X --\rightarrow f^{*}\mathfrak S
\]
is birational, and the
isomorphism $f^{*}\phi : f^{*}\Gamma \longrightarrow f^{*}\mathfrak S$ is
the resolution of indeterminacy of this birational map.
\end{prop}
All of these claims can be checked easily (by the reader!)
once we establish the
expression for $\psi$ in terms of local coordinates on
$X \subset \mathbb V(E)$:
\begin{lemma}
The local expression for $\psi$ over a point $y \in Y$ is
\begin{eqnarray*}
\psi(y \times (z,w)) &=& y \times [z + a(y) : b(y)] \\
&=& y \times [c(y) : w + d(y)] \\
&=& y \times [w-2d(y) : z-2a(y)],
\end{eqnarray*}
where $z,w$ are coordinates on the fibre of $\pi$ over $y$, and $a,b,c,d$
are the sections of $\mathcal{O}_Y$ appearing as coefficients in the
equations~(\ref{eq:local-quadrics}).
\end{lemma}
Note that
the equivalence of the three expressions for $\psi$ is a consequence
of the equations~(\ref{eq:local-quadrics}) which define $X$ as a
subvariety of $\mathbb V(E)$.
\begin{proof}
In order to proceed, we need to recall a fact from \cite{miranda-3}
regarding the sheaf $E$: the $\mathcal{O}_Y$-algebra $\pi_{*}(\mathcal{O}_X)$ is in
fact a rank~3 $\mathcal{O}_Y$-module, and $E$ is the rank~2 submodule
consisting of sections that have zero trace over $\mathcal{O}_Y$. This means
that if we take local generators $z,w$ of $E$ as local coordinates
on $\mathbb V(E)$, then the vector sum of the points $(z_i,w_i)$
in any fibre of $\pi$ must be zero.
Now we can prove the lemma. Let $y \in Y$ be any point not in the
branch locus of $\pi$. Then the fibre of $\pi$ over $y$ consists of
three distinct points, which we denote $(z_1,w_1),(z_2,w_2),(z_3,w_3)$.
The fibre of $\mathbb V(E)$ over $y$ is an affine plane with coordinates
$z,w$; inside this plane, the line containing $(z_2,w_2)$ and $(z_3,w_3)$ is
given by
\[
-(w_3 - w_2)z + (z_3 - z_2) w + (z_2 w_3 - z_3 w_2) = 0.
\]
In local coordinates, then, we have
\[
\psi(z_1 , w_1) = [ -(w_3 - w_2) : (z_3 - z_2) ];
\]
this is understood to be a point in the fibre of $\mathbb P(E^{*})$ over $y$.
We claim that this expression agrees with the first one given in the
statement of the lemma. To see this, we use the
equations~(\ref{eq:local-quadrics}) and the zero trace observation above
to compute that
\begin{eqnarray*}
\left( z_1 + a(y) \right) (z_3 - z_2) &=& z_1 z_3 - z_1 z_2 + a(y)(z_3 - z_2) \\
&=& z_1 z_3 - z_1 z_2 + \left( z_{3}^2 - z_{2}^2 - b(y)(w_3 - w_2) \right) \\
&=& (z_1 + z_2 + z_3)(z_3 - z_2) - b(y)(w_3 - w_2) \\
&=& 0 - b(y)(w_3 - w_2) .
\end{eqnarray*}
This proves that $\psi(z_1,z_2) = [ z_1 + a(y) : b(y) ]$ on the open set where
$\pi$ is unramified and where this expression is defined.
Since we only require $\psi$ to be a rational map, the lemma is proved.
\end{proof}
This result shows that the locus of indeterminacy of $\psi$
is precisely the fat-point ramification locus of $\pi$, which in general
is a proper subset of the ramification locus of $\pi$. This is
consistent with the fact that reasonable definitions of the rational
map $\psi$ can be made for double ramification points and for
curvilinear triple ramification points; in these cases the Zariski
tangent spaces to the ramification points determine lines in the
fibres of $\pi$. At a fat point, the dimension of the Zariski tangent
space in the fibre is equal to 2, so there is no reasonable way
to define $\psi$ at such a point.
\section{Globalization}
Now we are going to define our resolution for an arbitrary triple cover
$\pi : X \longrightarrow Y$. The idea is straightforward: we know
from \cite{miranda-3} that $Y$ is covered by open affine sets $Y_i$ for
which the restricted triple covers $\pi : X_i \longrightarrow Y_i$
are sufficiently local, and we
have already defined the resolution for sufficiently local triple
covers. It remains to check that these local definitions patch
together compatibly to define a global resolution.
Recall that in defining
the universal local resolution $\rho : \Gamma \longrightarrow \mathfrak X$,
we constructed $\Gamma$ as a subvariety of $\mathbb V(F) \times \mathbb P^1$.
The unidentified factor of $\mathbb P^1$ is an obstruction to globalization:
if each sufficiently local resolution variety is defined as a
subvariety of $\mathbb V(E)_{|Y_i} \times \mathbb P^1$, then it is not clear how to interpret
the second factor as the restriction of a globally defined object.
Fortunately, we have seen how to remedy this: using the isomorphism
$\phi : \Gamma \longrightarrow \mathfrak S$, we will take $\mathfrak S$ to be our
resolution variety instead of $\Gamma$. Then we take the resolution
variety for a sufficiently local triple cover to be $\mathfrak S_i = f_{i}^{*}\mathfrak S$
instead of $f_{i}^{*}\Gamma$. In the previous section we showed that
each variety $\mathfrak S$ is naturally a subvariety of
$\mathbb P(E^{*})_{|Y_i}$. Thus we may hope to patch together the varieties
$\mathfrak S_i$ to construct a subvariety $\mathfrak S_X$ of $\mathbb P(E^{*})$.
Now we will invoke a beautiful result of Miranda from \cite{miranda-3}
to finish our construction. Miranda shows that any triple cover
$\pi : X \longrightarrow Y$ is determined by a rank~2 locally free sheaf
$E$ on $Y$ and a global section $\sigma$ of $S^3(E)^{*} \otimes \Lambda^2(E)$.
In fact, Miranda shows that if $a,b,c,d$ are the coefficients appearing
in the quadrics~(\ref{eq:local-quadrics}) that define $X_i$ as a
subvariety of $\mathbb V(E)_{|Y_i}$, then the local expression for $\sigma$ over
$Y_i$ is
\[
-b(z^3)^{*} + a(z^2)^{*}w^{*} - dz^{*}(w^2)^{*} + c(w^3)^{*}.
\]
Using the natural isomorphism $S^3(E)^{*} \cong S^3(E^{*})$, we get the
following local expression for $\sigma$:
\[
-\frac{1}{6}b(z^{*})^3 + \frac{1}{2}a(z^{*})^{2}w^{*} - \frac{1}{2}dz^{*}(w^{*})^{2} + \frac{1}{6}c(w^{*})^{3}.
\]
Up to a constant factor, this is just the cubic defining $\mathfrak S_i$ as a
subvariety of $\mathbb P(E^{*})_{|Y_i}$. Since $\sigma$ is a global section,
we conclude that the varieties $\mathfrak S_i$ must patch together to form a
variety $\mathfrak S_X \subset \mathbb P(E^{*})$. It is clear that the structure
morphisms patch together compatibly, and so we have the following result:
\begin{prop}
Let $\pi : X \longrightarrow Y$ be any triple cover, and
let $E$ be a rank~2 locally free sheaf on $Y$ such that $X \subset \mathbb V(E)$.
Then there is a variety $\mathfrak S_X \subset \mathbb P(E^{*})$
and a birational morphism $\rho_X : \mathfrak S_X \longrightarrow X$
which is an isomorphism away from the fat-point ramification locus of
$\pi$, and whose fibre over every fat point is a $\mathbb P^1$.
\end{prop}
We refer to the morphism $\rho_X : \mathfrak S_X \longrightarrow X$ as the {\em
resolution of the triple cover $\pi$}. Now we get the following
global result:
\begin{prop}
Let $\pi : X \longrightarrow Y$ be a triple cover. $X$ is isomorphic
as a triple cover to a subvariety of a $\mathbb P^1$-bundle on $Y$ equipped
with the natural projection if and
only if $\pi$ has no fat-point ramification.
\end{prop}
\begin{proof}
One implication is the global version of Proposition~\ref{prop:local-result};
the other implication
follows from the fact that the fibre of a subvariety of a $\mathbb P^1$-bundle
cannot have a two-dimensional Zariski tangent space.
\end{proof}
\end{document} |
\begin{document}
\title{Symplectic Matroids, Circuits, and Signed Graphs}
\author{Zhexiu Tu}
\address{University of the South\\ Department of Math and Computer Science\\
Sewanee, TN37355}
\email{[email protected]}
\subjclass[2000]{Primary 05B35; Secondary 05E15; 20F55; 05C25}
\keywords{Symplectic Matroids; Circuit Axiomatization, Signed Graphs}
\begin{abstract} One generalization of ordinary matroids is symplectic matroids. While symplectic matroids were initially defined by their collections of bases, there has been no cryptomorphic definition of symplectic matroids in terms of circuits. We give a definition of symplectic matroids by collections of circuits. As an application, we construct a class of examples of symplectic matroids from graphs in terms of circuits.
\end{abstract}
\maketitle
\section{Introduction}
A matroid is a combinatorial structure that generalizes the notion of linear independence in vector spaces. There are many textbooks on this subject. We refer the readers to \cite{oxley} for more background on matroids. There are different cryptomorphic characterizations of matroids, for example, in terms of bases, circuits, flats, etc.. Below we list a matroid definition in terms of circuits.
\begin{Def}
A finite \textit{matroid} $M$ is a pair $(E,\mathcal{C})$, where $E$ is a finite set (called the ground set) and $\mathcal {C}$ is a family of subsets of $E$ (called the circuits) with the following properties:
\begin{enumerate}
\item[(C1)] $\emptyset \notin \mathcal{C}$.
\item[(C2)] $C_1 , C_2 \in \mathcal{C}$ and $C_2 \subseteq C_1$ implies $C_2 = C_1$.
\item[(C3)] $C_1, C_2 \in \mathcal{C}$ with $C_1 \neq C_2$ and $e \in C_1 \cap C_2$ implies there exists some $C_3 \in \mathcal{C}$ such that $C_3 \subset (C_1 \cup C_2 ) - \{ e \}$.
\end{enumerate}
\end{Def}
We refer to matroids as \textit{ordinary matroids}, to distinguish them from different generalizations of matroids, such as \textit{symplectic matroids}. Symplectic matroids are obtained when we replace the symmetric group with the \textit{hyperoctahedral group}, a group of symmetries of the $n$-cube $[-1, 1]^n$. Geometrically, symplectic matroids are related to the vector spaces endowed with bilinear forms, although in a way different from the way ordinary matroids are related to vector spaces. Symplectic matroids are a generalization of the following matroids that are all equivalent, $\Delta$-matroids \cite{bouch}, metroids \cite{bouch}, or 2-matroids \cite{bouch}.
Symplectic matroids were defined in \cite{borov} by Borovik, Gelfand and White using the maximality property of bases. In 2003, T. Chow defined symplectic matroids in terms of independent sets, and proved the equivalence between the two definitions in \cite{chow}. In \cite{chow}, Chow posed a complicated exchange property on independent sets, and proposed a conjectural exchange property on the collection of bases. We know of no progress toward defining symplectic matroids using any other axiomatizations similar to those of ordinary matroids.
In \cite{borov}, a special type of symplectic matroids, called \textit{Lagrangian matroids}, which turn out to be equivalent to $\Delta$-matroids, was studied. Borovik, Gelfand and White provided the circuit axiomatizations of Lagrangian matroids and proved the equivalence between the definitions. However, Lagrangian matroids are just a special case of all symplectic matroids. At present there are no circuit axiomatizations of symplectic matroids.
In this paper, we define symplectic matroids in terms of circuits. Some of these axioms resemble circuit axioms for ordinary matroids, including the circuit elimination axiom. We prove the equivalence between our definition and the definition by Borovik et al in \cite{borov}. As an application of this result, we show how every finite undirected multigraph gives rise to a symplectic matroid in terms of circuits.
It is worth mentioning that in \cite{borov} a special type of symplectic matroids, called the \textit{Lagrangian matroids}, was studied. Borovik, Gelfand and White provided and proved the equivalence between the definitions of Lagrangian matroids in terms of bases and circuits. Lagrangian matroids are a class of Coxeter matroids where we let $W = BC_n \cong S_2 \wr S_n$ and $P = S_n$. Hence they are the symplectic matroids where the size of each basis matches the size of the symplectic ground set. In other words, they are the full-rank symplectic matroids. Lagrange matroids are in the names of symmetric matroids \cite{bouch30}, $\Delta$-matroids \cite{bouch30}, metroids \cite{bouch35}, or 2-matroids \cite{bouch34}.
The structure of this paper is as follows. In Section~\ref{sec2}, we give basic definitions and terms that we will use in our proofs. In Section~\ref{sec3}, we give an alternative definition or axiomatization of a symplectic matroid in terms of circuits (Theorem~\ref{thm:main}), which is our main theorem. In Section~\ref{sec4}, we show that symplectic matroids always satisfy the circuit axioms that we have defined in Section~\ref{sec3}. In Section~\ref{sec5}, we go backwards and show that out circuit axioms guarantee symplectic matroids. It then suffices to prove Theorem~\ref{thm:main}. In Section~\ref{sec6}, we apply Theorem~\ref{thm:main} and construct a class of examples of symplectic matroids from graphs in terms of circuits.
\section{Background and definitions}
\label{sec2}
In this section we give the basic definitions of symplectic matroids. Let
\[ [n] = \{1,2,\ldots,n \} \textrm{ and } [n]^* = \{1^*,2^*,\ldots,n^* \} \]
where the map $*: [n] \to [n]^*$ is defined by $i \mapsto i^*$ and $*: [n]^* \to [n]$ is defined by $i^* \mapsto i$. We apply $*$ to sets and collections of sets, for example $C^*$ and $\mathcal{C}^*$. Let
\[ E_{\pm n} : = [n] \cup [n]^*\]
be the new ground set. Thus $i^{**} = i$ signifies that $i$ is an involutive permutation of $E_{\pm n}$. That is why sometimes we write $i^*$ as $-i$ and $E_{\pm n}$ can be thought of as a set equivalent to $\{ -n, -(n-1) , \ldots, -1 , 1, 2, \ldots , n \}$. We say a set $S$ is \textit{admissible} if $S \cap S^* = \emptyset$. A permutation $\omega$ of $E_{\pm n}$ is \textit{admissible} if $\omega (x^*) = \omega (x)^*$ for all $x \in E_{\pm n}$. An ordering $<$ on $E_{\pm n}$ is \textit{admissible} if and only if $<$ is a linear ordering and from $i < j$ it follows that $j^* < i^*$. Denote by $E_k$ the collection of all admissible $k$-subsets in $E_{\pm n}$, for $k < 2n$. If $<$ is an arbitrary linear ordering on $E_{\pm n}$, it induces the partial ordering (which we also denote by the same symbol $<$) on $E_k$: if $A, B \in E_k$ and
\[ A := \{ a_1 < a_2 < \ldots < a_k \} \textrm{ and } B := \{b_1 < b_2 < \ldots < b_k \},\]
we set $A \leq B$ if
\[ a_1 \leq b_1 , \,\, a_2 \leq b_2, \,\, \ldots , \,\, a_k \leq b_k. \]
We can visualize an admissible ordering as a signed permutation $\sigma$ of $[n]$ followed by the negative of the reversal of $\sigma$. For example, when $n = 3$
\[ 1 < 3 < 2^* < 2 < 3^* < 1^* \]
is one admissible ordering.
\begin{Def}
If $\mathcal{B}$ is a non-empty family
of equi-numerous admissible subsets of $E_{\pm n}$ with the property that for every admissible ordering $<$ of $E_{\pm n}$, the collection $\mathcal{B}$ always contains a unique maximal element, then $M = (E_{\pm n}; \mathcal{B})$ is a symplectic matroid, and $\mathcal{B}$ is called the collection of \textit{bases} of $M$.
\end{Def}
Below is an example of a non-symplectic matroid.
\begin{Ex}
Let $n = 3$ and $k = 2$, and let $\mathcal{B} = \{12, 2^*3, 13 \}$, where we use our abbreviated notation by listing $\{a, b\}$ as $ab$. Consider the admissible ordering $1 < 3 < 2^* < 2 < 3^* < 1^*$. Then $12$ and $2^* 3$ are incomparable in the induced ordering on $E_2$, and both are larger than $13$, hence $\mathcal{B}$ cannot be a symplectic matroid.
\end{Ex}
\section{Circuits}
\label{sec3}
Let $M = (E_{\pm n}; \mathcal{B})$ be a symplectic matroid, where $\mathcal{B}$ is the collection of bases of $M$. Let $\mathcal{C}$ be the collection of minimal admissible subsets of $E_{\pm n}$ not contained in any member of $\mathcal{B}$. That collection of subsets $\mathcal{C}$ is called the collection of \textit{circuits} of $M$. An admissible set containing no circuits as its subset is called an \textit{independent set}. Otherwise, it is \textit{dependent}.
We let $A \Delta B$ be the \textit{symmetric difference} between two sets $A$ and $B$ defined by $A \Delta B = A \cup B - A \cap B$. We give an important definition of the term \textit{span}.
\begin{Def}
Let $\mathcal{C}$ be a collection of admissible subsets of $E_{\pm n}$. Then an admissible set $P$ spans $x \in E_{\pm n}$ if there exist some $J \in \mathcal{C}$ such that $J - P = \{x\}$.
\end{Def}
A characterization of $\mathcal{C}$ could be used as an alternative definition or axiomatization of
a symplectic matroid. This is precisely what the following theorem provides, followed by an example.
\begin{restatable}{thm}{main}
\label{thm:main}
Let $\mathcal{B}$ be the collection of bases of a symplectic matroid. Let $\mathcal{C}$ be the collection of minimal admissible subsets of $E_{\pm n}$ not contained in any member of $\mathcal{B}$. Then $\mathcal{C}$ satisfies the following four properties.
\begin{enumerate}
\item[(SC1)] $\emptyset \notin \mathcal{C}$.
\item[(SC2)] If $C_1,C_2 \in \mathcal{C}$ with $C_1 \subseteq C_2$, then $C_1 = C_2$.
\item[(SC3)] If $C_1,C_2 \in \mathcal{C}$ with $C_1 \neq C_2$, $x \in C_1 \cap C_2$ and $C_1 \cup C_2$ is admissible, then there exists some $C \in \mathcal{C}$ with $C \subseteq (C_1 \cup C_2)-\{x\}$.
\item[(SC4)] Let $P$ be an admissible subset of $E_{\pm n}$ and $B \in \mathcal{B}$. If $|P| < |B|$, $P$ does not span $E_{\pm n}-P \cup P^*$.
\end{enumerate}
Conversely, let $\mathcal{C}$ be a collection of admissible subsets of $E_{\pm n}$, and let $\mathcal{B}$ be the collection of maximal admissible subsets of $E_{\pm n}$ not containing members of $\mathcal{C}$. If $\mathcal{C}$ satisfies (SC1) - (SC4), then $\mathcal{B}$ is the collection of bases of a symplectic matroid.
\end{restatable}
\begin{Rem}
(SC1), (SC2) and (SC3) resemble the circuit axioms of ordinary matroids. However, they don't suffice to guarantee the equi-cardinality of bases of symplectic matroids. (SC4) guarantees the equi-cardinality of bases.
\end{Rem}
\begin{Ex}
Let $\mathcal{B} = \{\{1, 2, 3\}, \{1^*, 2^*, 3\}, \{1, 3, 4\}, \{2^*, 3, 4\} \}$. Then
\[ \{ \mathcal{C} = \{3^*\}, \{4^*\}, \{1^*, 2\}, \{1, 2^*\}, \{1^*, 4\}, \{2, 4\} \}\]
is the collection of minimal admissible subsets not contained in any member of $\mathcal{B}$. Meantime, $\mathcal{B}$ is the collection of maximal admissible subsets not containing members of $\mathcal{C}$.
We can check that $\mathcal{C}$ satisfies (SC1), (SC2) and (SC3) without much obstacle. For any admissible set $P$ with $|P| = 4$, it contains some $C \in \mathcal{C}$. For any admissible set $Q = \{a , b \}$ where $a,b \in [4] \cup [4]^*$, $Q$ doesn't span $E_{\pm 4} - Q \cup Q^*$.
\end{Ex}
\section{Symplectic matroids satisfying circuit axioms}
\label{sec4}
Throughout this section, $\mathcal{B}$ is the collection of bases of a symplectic matroid $M$, and $\mathcal{C}$ is the collection of minimal admissible subsets of $E_{\pm n}$ not contained in any member of $\mathcal{B}$.
\begin{Lemma}
\label{lemma1}
Let $B \in \mathcal{B}$, and some $x \notin B$ such that $B \cup \{ x \}$ is admissible. Then there exists a unique circuit $C \subseteq B \cup \{x\}$ where $C$ is given by
\[C=\{x\} \cup \{b \in B \mid B \cup \{x\} - \{b\} \in \mathcal{B}\}.\]
\end{Lemma}
\begin{proof}
Let $B \in \mathcal{B}$ and $x \notin B$ such that $B \cup \{x\}$ is admissible. Then $|B \cup \{x\}| > |B|$. Therefore, $B \cup \{x\}$ is dependent, which means $B \cup \{x\}$ contains a circuit. Since $\{b \in B \mid B \cup \{x\} - \{b\} \in \mathcal{B}\} \subseteq B$ and $B \cup \{x\}$ is admissible, so $C$ is definitely admissible.
The expression of the unique circuit $C$ and its proof for symplectic matroids is the same as those for ordinary matroids, which can be found in various papers or textbooks, for example, in \cite{minieka}.
\end{proof}
\begin{Lemma}
\label{lemma2}
Let $C_1$ and $C_2$ be two distinct circuits of M, $C_1 \cup C_2$ be admissible and $x \in C_1 \cap C_2$. Then for every $c\in C_1 \Delta C_2$, there exists some $C_c \in \mathcal{C}$ such that $c \in C_c \subseteq C_1 \cup C_2 - \{x\}$.
\end{Lemma}
\begin{proof}
Suppose $C_1 \cup C_2 - \{x\}$ is independent. Then $C_1 \cup C_2 - \{x\} \subseteq B$. We know $x \notin B$, otherwise $C_1 \subseteq B$. Hence, $C_1,C_2 \subseteq C_1 \cup C_2 \subseteq B \cup \{x\}$. $B \cup \{x\}$ is dependent because B is a basis, and $B \cup \{x\}$ is admissible. Thus $B \cup \{x\}$ contains a unique circuit by Lemma~\ref{lemma1}. That contradicts $C_1$ and $C_2$ being distinct. Thus $C_1 \cup C_2 - \{x\}$ is dependent.
Since we suppose $C_1 \cup C_2$ is admissible, we show the existence of such a circuit $C_c$. This proof resembles that in \cite{borov}. We proceed by induction on $|C_1 \cup C_2|$. For the base step of induction, consider $C_1 = \{c_1,x\}$ and $C_2 = \{c_2,x\}$. Then $C=\{c_1,c_2\}=C_1 \Delta C_2$ must be a circuit. For the inductive step, let $c \in C_2 - \ C_1$ without the loss of generality. We have shown that there exists a circuit $C \subseteq (C_1 \cup C_2) - \{x\}$. Suppose $c \notin C$. Since $C \not \subseteq C_2$, there exists some $y \in (C \cap C_1) - C_2$. We notice $x \in C_1 - C$, but $c \notin C \cup C_1$. Thus, $C \cup C_1 \subset C_1 \cup C_2$ and we can apply the induction hypothesis to $C$, $C_1$, and $x, y$ to find a circuit $C_3$ with $x \in C_3 \subseteq (C \cup C_1) - \{y\}$. Since $y \notin C_2$ and $y \notin C_3$, we have $C_3 \cup C_2 \subset C_1 \cup C_2$. However, $x \in C_2 \cap C_3$ and $c \in C_2 - C_3$. Thus, by applying the induction hypothesis again, we get a circuit $C_c$ with $c \in C_c \subseteq (C_3 \cup C_2) - \{x\} \subseteq (C_1 \cup C_2) - \{x\}$.
\end{proof}
\begin{Th}
\label{theorem1}
Let $P$ be an admissible subset of $E_{\pm n}$ and $B \in \mathcal{B}$. If $|P| < |B|$, $P$ does not span $E_{\pm n}-P \cup P^*$.
\end{Th}
\begin{proof}
Suppose there exists some $P$ such that $|P| < |B| = k$ and $P$ is the minimal set that spans $E_{\pm n}-P \cup P^*$, which means no subset $P_0$ of $P$ spans $E_{\pm n}-P_0 \cup P_0^*$. Without the loss of generality, suppose $P = \{ 1 , 2, \ldots, k-1 \}$. Hence $P$ spans every element in $\{ k, k+1,\ldots,n \} \cup \{ k, k+1,\ldots,n \}^*$. Thus there exist some $J_{k+j} \in \mathcal{C}$ such that
\[ J_{k+j} - P = \{ k+j \} \]
for all $j = 0 , \ldots, n-k$, and $J_{(k+j)^*} \in \mathcal{C}$ such that
\[ J_{(k+j)^*} - P = \{ (k+j)^* \} \]
for all $j = 0 , \ldots, n-k$. However, $P$ cannot be independent because $P \cup \{x \}$ is always dependent for any $x \in E_{\pm n}-P \cup P^*$, which makes $P$ a basis of size $k-1$, a contradiction. So $P$ is dependent. Thus $P \not \subseteq J_{k+j}$ nor $P \not \subseteq J_{(k+j)^*}$ for all $j$.
Suppose $P$ is a circuit. (The proof when $P$ contains a circuit is similar.) There exists some $z \in P$ such that $z \notin J_{n^*}$. Let $S : = P - \{ z \}$. Then $J_{n^*} - \{ n^* \} \subseteq S$. Thus $S \cup \{ n^* \}$ is dependent because $J_{n^*} \subseteq S \cup \{ n^* \}$. For any $x \in E_{\pm n}-P \cup P^*$ and $x \neq n$, if $z \in J_x$, then by Lemma~\ref{lemma2}, there exists some $C \subseteq J_x \cup P - \{ z \}$, which means $S \cup \{ x \}$ is dependent; if $z \notin J_x$, then $J_x \subseteq S \cup \{ x \}$, which means $S \cup \{ x \}$ is dependent. Hence $S \cup \{ x \}$ is always dependent for all $x \in E_{\pm n}-P \cup P^*$. Moreover, $S \cup \{ z \}$ is dependent.
We are left with $S \cup \{ z^* \}$. Suppose $S \cup \{ z^* \}$ is dependent. Then $S$ is maximally independent, and hence a basis. However, we have $|S| = k-2$, which contradicts $|B| = k$. Suppose $S \cup \{ z^* \}$ is independent. Then $S \cup \{ z^* \}$ is maximally independent, and hence a basis. However, we have $|S \cup \{ z^* \}| = k-1$, which contradicts $|B| = k$. Therefore, there exists no $P$ such that $|P| < |B|$ and $P$ spans $E_{\pm n}-P \cup P^*$.
\end{proof}
Below we state the \textit{Symmetric Exchange Axiom}.
For every $X,Y \in \mathcal{B}$, if $i \in Y - X$, then there exists a $j \in X-Y$ such that $X \cup \{i\} - \{j\} \in \mathcal{B}$.
We show this Symmetric Exchange Axiom leads to the Maximality Property of symplectic matroids.
\begin{Th}
\label{theorem2}
If $\mathcal{B}$ is a collection of admissible sets of cardinality $k$ in $[n] \cup [n]^\ast$ where $k \leq n$, then the Symmetric Exchange Axiom guarantees the Maximality Property.
\end{Th}
\begin{proof}
This proof resembles that in \cite{borov}. Assume $\mathcal{B}$ satisfies the Symmetric Exchange Axiom. X and $X \cup \{i\} - \{j\}$ must be comparable because the ordering of $E_{\pm n}$ is total. Suppose X, Y are two distinct maximal bases. Let $i$ be the maximal element of $X \Delta Y$. Without the loss of generality, suppose $i \in Y$. Then there exists some $j \in X$ such that $X \cup \{i\}-\{j\} \in \mathcal{B}$. We know X and $X \cup \{i\} - \{j\}$ are comparable and distinct. Since $X$ is maximal and $i$ is the maximal element in $X \Delta Y$, then $X \cup \{i\} - \{j\}$ is greater than $X$. This causes a contradiction. Therefore, the Symmetric Exchange Axiom induces the Maximality Property.\\
\end{proof}
\section{Circuit Axioms leading to symplectic matroids}
\label{sec5}
Now we prove the other direction of the main theorem. Lemma~\ref{lemma1}, Lemma~\ref{lemma2}, and Theorem~\ref{theorem1} already told us that when $\mathcal{B}$ is the collection of bases of a symplectic matroid, then (SC1) - (SC4) hold. Now suppose $\mathcal{C}$ is a collection satisfying axioms (SC1) - (SC4) and $\mathcal{B}$ the collection of maximal admissible subsets of $E_{\pm n}$ not containing members of $\mathcal{C}$. We prove the following claims.
\noindent \textbf{Claim 1}\\
The bases in $\mathcal{B}$ are equi-numerous.
Suppose $B_1 , B_2 \in \mathcal{B}$ such that $|B_1| < |B_2|$. By Axiom (SC4), there exists an $x \in E_{\pm n} - B_1 \cup B_1^*$ that $B_1$ doesn't span. Then $B_1 \cup \{ x \}$ is admissible and meantime contains no circuit, which contradicts the maximality of the basis $B_1$.
\noindent \textbf{Claim 2}\\
Let $B \in \mathcal{B}$, and some $x \notin B$ such that $B \cup \{ x \}$ is admissible. Then there exists a unique circuit $C \in B \cup \{x\}$ where $C$ is given by
\[C=\{x\} \cup \{b \in B \mid B \cup \{x\} - \{b\} \in \mathcal{B}\}. \]
To prove Claim 2, we let $x \notin B$ such that $B \cup \{ x \}$ is admissible. Then there exists some $D \in \mathcal{C}$ such that $D \subseteq B \cup \{ x \}$. If $\{ x \} \in \mathcal{C}$, then we are done. Otherwise let
\[C=\{x\} \cup \{b \in B \mid B \cup \{x\} - \{b\} \in \mathcal{B}\}. \]
We want to show $C = D$.
Since $D \not \subset B$, we know $x \in D$. Now let $y \in D - \{ x \}$. Then $y \in B$. Let $A := B \cup \{ x \} - \{ y \}$. Suppose, for contradiction, that $A$ contains some circuit $E \in \mathcal{C}$. For sure $x \in E$. If $E$ and $D$ are distinct, then by Axiom (SC3), there exists some circuit $F$ such that $F \subseteq E \cup D - \{ x \}$. But then $F \subseteq B$, a contradiction. Hence $E = D$. However $y \notin E$, $y \in D$. Thus we reach a contradiciton.
Hence $A \in \mathcal{B}$. So $y \in C$ and $D \subseteq C$. To show $C \in \mathcal{C}$, we must show $C - \{ z\}$ is independent for all $z \in C$. If $z = x$, then $C - \{z \} \subseteq B \in \mathcal{B}$. Otherwise $C - \{ z \} \subseteq B \cup \{x \} - \{z \}$, which is a member of $\mathcal{B}$ by the definition of $C$. Therefore $C,D \in \mathcal{C}$ and by Axiom (SC2), we have $D = C$. Claim 2 is proved.
Let $A, B \in \mathcal{B}$ with $a \in A - B$. We show that there exists $b \in B-A$ such that $B \cup \{ a\} - \{ b\} \in \mathcal{B}$. Claim 2 says that there exists a circuit $C \in B \cup \{a\}$ such that
\[C - \{a\} = \{b \in B \mid B \cup \{a\} - \{b\} \in \mathcal{B}\}. \]
However, $C - \{a\}$ is never empty because otherwise $C \subseteq A$ and is thus independent, which leads to a contradiction. So the Symmetric Exchange Property is satisfied here, which leads to the Maximality of symplectic matroids by Theorem~\ref{theorem2}.
\section{From graphs to symplectic matroids}
\label{sec6}
In this section, a \textit{graph} refers to a finite undirected multigraph. Inspired by Theorem 2 in \cite{chow}, we apply Theorem~\ref{thm:main} to see how every graph gives rise to a symplectic matroid.
Let $G$ be a graph with $n$ edges $e_1, e_2, \ldots, e_n$. We define a family $\mathcal{C}(G)$ of admissible
subsets of $E_{\pm n}$ as follows. If $S \subseteq E_{\pm n}$ is admissible, let
\[ G(S) : = \{ e_i \mid i \in S \textrm{ or } i^* \in S \}.
\]
We let an admissible set $S$ be a member of $\mathcal{C}(G)$ if and only if
\begin{enumerate}
\item either $G(S)$ is a (single) cycle and there is an even number of edges $e_i$ in $G(S)$ such that $i^* \in S$ (The parity of $G(S)$ is the product of the signs of these edges and is thus positive);
\item or $G(S)$ is a union of (single) cycles, there is an even number of edges $e_i$ in $G(S)$ such that $i^* \in S$, and in each cycle there is an odd number of edges with negative signs.
\end{enumerate}
We use some notions and terms from \cite{Zas}, which we review now. A \textit{signed graph} is a graph with each edge given either a plus sign or a minus sign. A
cycle in a signed graph is \textit{balanced} if the product of the signs of the corresponding edges is positive
and is \textit{unbalanced} otherwise. Every signed graph $\Gamma$ gives rise to an ordinary matroid
$M(\Gamma)$ in the following manner. The ground set of $M(\Gamma)$ is the signed edge
set of $\Gamma$, and a set of edges is independent if every connected
component is either a tree or a unicyclic graph whose unique cycle is unbalanced. \cite[Theorem~5.1]{Zas} shows that
$M(\Gamma)$ is a matroid. Notice that a basis of $M(\Gamma)$ can have as many elements as $G$ has vertices, but not more.
To phrase this another way, our construction of $\mathcal{C}(G)$ is the union of all $M(\Gamma)$ as $\Gamma$ ranges over all $2^n$ signed graphs with underlying graph $G$.
\begin{Th}
For every graph $G$, $\mathcal{C}(G)$ is the collection of circuits of a symplectic matroid.
\end{Th}
\begin{Rem}
The symplectic matroid we construct from graph $G$ in terms of circuits is the same matroid constructed differently in terms of independent sets by Theorem 2 in \cite{chow}.
\end{Rem}
\begin{proof}
It is easy to check that members of $\mathcal{C}(G)$ satisfy (SC1) and (SC2). Let $C_1 , C_2 \in \mathcal{C}(G)$ and suppose $C_1 , C_2$ are single cycles. If $C_1 \cap C_2 \neq \emptyset$ and suppose $e_1 \in C_1 \cap C_2$, there definitely exists a cycle $C_3 = C_1 \cup C_2 - C_1 \cap C_2 \subseteq C_1 \cup C_2 - \{ e_1 \}$. For any $e \in C_1 \cap C_2$, the deletion of such an edge doesn't change the parity of $C_3$ because we delete it twice from $C_1$ and $C_2$. Thus there is an even number of negative edges in $C_3$. If either or both of $C_1$ and $C_2$ are unions of (single) cycles, the proof would be analogous.
\cite[Theorem~5.1]{Zas} shows that
$M(\Gamma)$ is a matroid whose set of edges is independent if every connected
component is either a tree or a unicyclic graph. Notice that a basis of $M(\Gamma)$ cannot have more elements than $G$ has vertices. Therefore if an admissible subset $P$ of $E_{\pm n}$ satisfies $|P| < |B|$, then $|P| < \# V(G) - 1$. In other words, $G(P)$ is a subset of a spanning tree in $G$. Therefore if $P$ spans $x \in E_{\pm n} - P \cup P^*$, there exists a unique $J \in \mathcal{C}(G)$ such that $J - P = \{ x \}$. Considering the parity of $J$, $P$ is not able to span $x^*$ at the same time. Thus $P$ does not span $E_{\pm n} - P \cup P^*$.
Therefore $\mathcal{C}(G)$ is the collection of circuits of a symplectic matroid.
\end{proof}
\end{document} |
\begin{document}
\title{The 1--2--3 Conjecture almost holds for regular graphs}
\author[agh]{Jakub Przyby{\l}o\fnref{MNiSW}}
\ead{[email protected]}
\fntext[MNiSW]{This work was partially supported by the Faculty of Applied Mathematics AGH UST statutory tasks within subsidy of Ministry of Science and Higher Education.}
\address[agh]{AGH University of Science and Technology, al. A. Mickiewicza 30, 30-059 Krakow, Poland}
\begin{abstract}
The well-known 1--2--3 Conjecture asserts that the edges of every graph without isolated edges can be weighted with $1$, $2$ and $3$ so that adjacent vertices receive distinct weighted degrees. This is open in general, while it is known to be possible from the weight set $\{1,2,3,4,5\}$. We show that for regular graphs it is sufficient to use weights $1$, $2$, $3$, $4$. Moreover, we prove the conjecture to hold for every $d$-regular graph with $d\geq 10^8$.
\end{abstract}
\begin{keyword}
1--2--3 Conjecture \sep weighted degree of a vertex \sep regular graph
\end{keyword}
\maketitle
\section{Introduction}
One of the most basic observations in graph theory implies that there are no antonyms of regular graphs, understood as graphs whose all vertices have pairwise distinct degrees, except the trivial one vertex case.
Potential alternative definitions of an irregular graph were studied in the paper of Chartrand, Erd\H{o}s and Oellermann~\cite{ChartrandErdosOellermann}.
Chartrand et al.~\cite{Chartrand} on the other hand turned towards measuring the level of irregularity of graphs, rather than defining their irregular representatives. Suppose we admit multiplying edges of a given simple graph $G$, then what is the minimum $k$ such that we may obtain an irregular multigraph (a multigraph with pairwise distinct all degrees) of $G$ via replacing its every edge $e$ by at most $k$ parallel copies of $e$? Such value was called the \emph{irregularity strength} of $G$, see details and exemplary results concerning this graph invariant in~\cite{Aigner,Lazebnik,Faudree,Frieze,KalKarPf,Lehel,MajerskiPrzybylo2,Nierhoff}.
It was investigated in numerous further papers and gave rise to a wide list of related problems.
Perhaps the most closely associated with the irregularity strength itself is its local variant, oriented towards
differentiating degrees of exclusively adjacent vertices. Note that rather than multiplying edges of a given
graph $G=(V,E)$, we may consider its \emph{edge $k$-weighting}, i.e. an assignment $\omega:E\to\{1,2,\ldots,k\}$, and instead of focusing on a degree of a vertex $v$ in the corresponding multigraph, consider its so-called \emph{weighted degree} in $G$, defined as $\sigma_\omega(v):=\sum_{u\in N(v)}\omega(uv)$.
If this causes no ambiguities, we also write $\sigma(v)$ instead of $\sigma_\omega(v)$ and call it simply the
\emph{sum at $v$}.
We say $\omega$ is \emph{vertex-colouring} if $\sigma(u)\neq\sigma(v)$ for every edge $uv\in E$ --
we shall write that $u$ and $v$ are \emph{sum-distinguished} then or that there is \emph{no sum conflict} between $u$ and $v$.
This concept gained equally considerable attention in the combinatorial community as its precursor largely due to the following intriguing conjecture.
\begin{conjecture}[1--2--3 Conjecture]\label{Conjecture123Conjecture}
Every graph without isolated edges admits a ver\-tex-co\-louring edge $3$-weighting.
\end{conjecture}
This remarkable presumption originates in the paper~\cite{123KLT} of Karo\'nski, {\L}uczak and Thomason, who confirmed it in particular for $3$-colourable graphs.
First general constant upper bound was however showed by Addario-Berry, Dalal, McDiarmid, Reed and Thomason~\cite{Louigi30}, who designed strong and vastly applicative theorems on so-called degree constrained subgraphs (cf. e.g.~\cite{Louigi2}) to prove that every graph without isolated edges admits a vertex-colouring edge $30$-weighting. The same technique was further developed by Addario-Berry, Dalal and Reed~\cite{Louigi} to decrease $30$ to $16$, and by Wang and Yu~\cite{123with13}, who pushed it further down to $13$. A big break-through was later achieved due to research devoted to a total variant of the same concept, introduced in~\cite{12Conjecture}, and especially thanks to the result of Kalkowski~\cite{Kalkowski12}, generalized later through algebraic approach towards list setting by Wong and Zhu~\cite{WongZhu23Choos}. See also e.g.~\cite{BarGrNiw,PrzybyloWozniakChoos,WongZhuChoos} for other results, concerning in particular list versions of the both problems.
A modification and development of a surprisingly simple algorithm designed by Kalkowski in~\cite{Kalkowski12} allowed
Kalkowski, Karo\'nski and Pfender~\cite{KalKarPf_123} to
achieve the best general bound thus far in view of Conjecture~\ref{Conjecture123Conjecture},
implying that weights $1,2,3,4,5$ are always sufficient.
It is moreover known that the 1--2--3 Conjecture holds for very dense and large enough graphs, i.e. that there exists a constant $n'$ such that every graph with $n\geq n'$ vertices and minimum degree $\delta(G)>0.99985n$ admits a vertex-colouring edge $3$-weighting, as proved recently by Zhong in~\cite{123dense-Zhong}, and that even just weights 1,2 are asymptotically almost surely sufficient for a random graph (chosen from $G_{n,p}$ for a constant $p\in(0,1)$), see~\cite{Louigi}. On the other hand, it was proved by Dudek and Wajc~\cite{DudekWajc123complexity} that determining whether a particular graph admits a vertex-colouring edge $2$-weighting is NP-complete, while Thomassen, Wu and Zhang~\cite{ThoWuZha} showed that the same problem is polynomial in the family of bipartite graphs.
In this paper we provide two results drawing us very close to a complete solution of the 1--2--3 Conjecture in the case of regular graphs, which apparently might seem most obstinate in its context in view of exactly equal degrees of all their vertices (though obviously regularities within them might be and are an asset while analysing them).
\section{Main Results}
\begin{theorem}\label{1234regTh}
Every $d$-regular graph with $d\geq 2$ admits a vertex-colouring edge $4$-weighting.
\end{theorem}
This was earlier known for $d\leq 3$ by~\cite{123KLT} and possibly for $d=4$ -- see e.g.~\cite{Seamon123survey}.
Recently this was also confirmed for $d=5$ by Bensmail~\cite{Julien5regular123}.
Though our proof of Theorem~\ref{1234regTh} was obtained independently, its generic idea partly resembles
the one from~\cite{Julien5regular123}, but extends to all regular graphs. Apart from this, we moreover prove that the 1--2--3 Conjecture holds for regular graphs with sufficiently large degree by showing the following.
\begin{theorem}\label{123largeregTh}
Every $d$-regular graph with $d\geq 10^8$ admits a vertex-colouring edge $3$-weighting.
\end{theorem}
The proof of this result is completely different and based on the probabilistic method.
There can be found two common factors of the both approaches though.
Firstly, both exploit at some point modifications of Kalkowski's algorithm from~\cite{Kalkowski12} in order to get read of a part of possible sum conflicts, but in a different manner -- this is used as one of the main tools in the first proof, and only as a kind of a final cleaning device in the second one. Secondly, in the both approaches we single out a special, usually small subset of vertices, and use the edges between this set an the rest of the vertices to adjust the sums in the graph (in the first proof such a set $I$ is stable, while in the second one -- such set $V_0$ is chosen randomly, and thus usually not stable), all details follow.
We shall apply the following rather standard notation for any given graph $G=(V,E)$, $v\in V$, $E'\subseteq E$ and $V',V''\subseteq V$ where $V'\cap V''=\emptyset$. By $G[V']$ we understand the graph induced by $V'$ in $G$, by $N_{V'}(v)$ -- the set of edges $uv\in E$ with $u\in V'$, by $d_{V'}(v)$ -- the number of edges $uv\in E$ with $u\in V'$ (i.e., $d_{V'}(v):=|N_{V'}(v)|$), by $d_{E'}(v)$ -- the number of edges in $E'$ incident with $v$, by $E(V')$ -- the set of edges from $E$ with both ends in $V'$, by $E(V',V'')$ -- the set of edges from $E$ with one end in $V'$ and the other in $V''$, and finally, by $G-v$ we mean the graph obtained from $G$ by removing $v$ and all its incident edges.
Moreover, the sum of graphs $G_1=(V_1,E_1), G_2=(V_2,E_2)$ is understood as $G_1\cup G_2:=(V_1\cup V_2, E_1\cup E_2)$.
\section{Proof of Theorem~\ref{1234regTh}}
Let $G=(V,E)$ be a $d$-regular graph, $d\geq 2$, and let $I\subset V$ be an arbitrary maximal independent set in $G$. Denote $R:=V\smallsetminus I$. Let $R_1\subseteq R$ be the set of isolated vertices in $G[R]$, and set $R_2:=R\smallsetminus R_1$. Denote by $G_1,\ldots,G_p$ the components of $G[R_2]$ (each of which contains at least one edge). For every $i=1,\ldots,p$, we order the vertices of $G_i$ into a sequence $v_1,\ldots,v_{n}$ so that each $v_j$ with $j<n$ has a \emph{forward neighbour} in $G_i$, that is a neighbour $v_k$ of $v_j$ in $G_i$ with $k>j$ (this can be achieved by denoting any vertex of $G_i$ as $v_n$ and using e.g. BFS algorithm to find a spanning tree of $G_i$ rooted at $v_n$, denoting consecutive vertices encountered within the algorithm: $v_{n-1},v_{n-2},\ldots$); we denote the edge joining $v_j$ with such $v_k$ with the least index $k$ ($k>j$) the \emph{first forward edge of} $v_j$.
Analogously we define \emph{backward neighbours} of a given vertex in $G_i$.
The vertex $v_n$ shall moreover be called the \emph{last vertex} of $G_i$.
By definition, every vertex in $R$ is incident with an edge joining it with $I$; for every $v\in R_2$ which is not the last vertex (in some component of $G[R_2]$) choose arbitrarily one such edge and denote it $e_v$ -- we shall call $e_v$ the \emph{supporting edge} of $v$.
We shall first assign initial weights $\omega(e)$ to all the edges $e$ of $G$. These shall be modified so that at the end of our construction:
\begin{itemize}
\item[(a)] $\sigma(v)<3d$ for every $v\in R_2$;
\item[(b)] $\sigma(v)\geq 3d$ for every $v\in I$;
\item[(b')] $\sigma(v)<4d$ for every $v\in I$ with a neighbour in $R_1$;
\item[(b'')] $\sigma(v)\in\{3d-1,4d\}$ for every $v\in R_1$,
\end{itemize}
where by $\sigma(v)$ we mean the sum at a given vertex $v$ in $G$.
Note that since $I$ and $R_1$ are stable sets and there are no edges between $R_1$ and $R_2$ in $G$, by (a), (b), (b') and (b''), potential sum conflicts shall only be possible between adjacent vertices in $R_2$ then.
We shall also require that throughout the whole construction:
\begin{itemize}
\item[(c)]
$\omega(e)\in\{1,2,3\}$ for $e\in E(R_2)$,\\
$\omega(e)\in\{3,4\}$ for $e\in E(I,R_2)$,\\
$\omega(e)\in\{2,3,4\}$ for $e\in E(I,R_1)$.
\end{itemize}
Major concern of our weight modifyng algorithm shall be devoted to distinguishing adjacent vertices in $R_2$.
Only in its final stage shall we adjust the sums in $R_1$ (still consistently with (a), (b), (b'), (b'') and (c)).
Initially we assign the weight:
\begin{itemize}
\item[(i)] $\omega(e)=1$, if $e$ is the first forward edge of some vertex;
\item[(ii)] $\omega(e)=2$, if $e$ is an edge of $G[R_2]$ which is not the first forward edge of any vertex;
\item[(iii)] $\omega(e)=3$, if $e$ is incident with a vertex in $I$ and is not a supporting edge;
\item[(iv)] $\omega(e)=4$, if $e$ is a supporting edge.
\end{itemize}
Note that these weights are consistent with (c).
In the following main part of our modifying procedure we analyse and alter the sums at consecutive vertices in all the components of $G[R_2]$. Thus suppose we have already analysed all vertices in $G_1,\ldots,G_{i-1}$, and within $G_i$ -- the vertices $v_1,\ldots,v_{j-1}$ (following the rules $(1^\circ)$ -- $(3^\circ)$ specified below), hence we are about to consider the vertex $v_j$ (consistently with the vertex ordering fixed in $G_i$). While analysing this vertex:
\begin{itemize}
\item[$(1^\circ)$] we are not allowed to modify the sums at already analysed vertices (which are fixed and shall not change till the end of the construction).
\end{itemize}
On the other hand we wish to make some weight alterations so that:
\begin{itemize}
\item[$(2^\circ)$] the obtained sum at $v_j$ is distinct from the sums at all the already analysed neighbours of $v_j$ in $G[R_2]$ (i.e. those in $\{v_1,\ldots,v_{j-1}\}$);
\end{itemize}
while for this aim:
\begin{itemize}
\item[$(3^\circ)$] we are allowed to modify by $1$ the weights of the edges joining $v_j$ with its backward neighbours in $G_i$ and the weights of their supporting edges so that (c) still holds.
\end{itemize}
Before we show that we can indeed perform our modifying procedure in accordance with $(1^\circ)$ -- $(3^\circ)$,
let us observe the following.
\begin{observation}\label{a_b_Observation}
After analysing all vertices of $R_2$ consistently with requirements $(1^\circ)$ -- $(3^\circ)$,
the conditions (a), (b) and (b') shall hold.
\end{observation}
\begin{pf}
By (iii) and (iv) all edges incident with a vertex $v\in I$ are initially weighted $3$ or $4$,
while by $(3^\circ)$ the weight of an edge $e$ incident with $v$ can only be altered if $e$ is a supporting edge -- by $(3^\circ)$ and (c) we however must still have $\omega(e)\in\{3,4\}$ afterwards, thus (b) follows.
If $v$ has moreover a neighbour $u\in R_1$, then by (iii) we must have $\omega(uv)=3$, and this weigh
is not modified within our procedure (cf. $(3^\circ)$), and thus (b') is fulfilled as well.
To see that (a) must also hold, note first that each edge $e$ of $G[R_2]$ can be modified at most once (consistently with $(3^\circ)$) within the algorithm, when it joins the currently analysed vertex with its backward neighbour. Therefore, for every vertex $v\in R_2$ which is not the last vertex of some component of $G[R_2]$, immediately after analysing $v$, the first forward edge of $v$ still has unchanged weight $1$ (cf. (i)). By (i) -- (iv) and $(3^\circ)$, all its remaining incident edges have in turn weights at most $3$, except for $e_v$, which has weight $4$. Therefore, $\sigma(v)\leq 3d-1$, and by $(1^\circ)$ this does not change till the end of the construction. In order to prove the same holds also in the case when $v\in R_2$ is the last vertex of some component of $G[R_2]$, it is sufficient to note that then, by our construction:
$\omega(e)\leq 3$ for every edge incident with $v$, as only supporting edges can be at this point weighted $4$.
Thus (a) follows, as by (i) and $(3^\circ)$ the edge joining $v$ with the vertex $u$ directly preceding it in the corresponding ordering cannot have weight greater than $2$ (as according to the main feature of the previously fixed orderings, this has to be the first forward edge of $u$).
\qed
\end{pf}
Now we explain how we can perform every consecutive step of our modifying procedure, associated with a currently analysed vertex $v_j$ from component $G_i$, so that $(1^\circ)$ -- $(3^\circ)$ hold (provided that the previous steps were consistent with these rules). For this aim note first that while analysing $v_j$, the weight of every \emph{backward edge} of $v_j$ (i.e. an edge joining it with its backward neighbour in $G_i$) \textbf{can} be modified by $1$ if necessary. Indeed, suppose $e=v_kv_j$ is such an edge (i.e. $k<j$). If $e$ is not the first forward edge of $v_k$, then by (ii), $\omega(e)=2$ and by (c), $\omega(e_{v_k})\in\{3,4\}$. Thus, so that $(1^\circ)$ is obeyed, according to $(3^\circ)$, if $\omega(e_{v_k})=3$, we may change the weights of $e$ and $e_{v_k}$ to $1$ and $4$, resp., while if $\omega(e_{v_k})=4$, we may change the weights of $e$ and $e_{v_k}$ to $3$ and $3$, respectively. On the other hand, if $e$ is the first forward edge of $v_k$, then neither $\omega(e)$ nor $\omega(e_{v_k})$ have been modified thus far, hence we may modify their respective current values $1$ and $4$ to $2$ and $3$ respectively. Suppose now that $v_j$ has $b$ backward neighbours, hence also $b$ backward edges, then as each of these provides one more possible alteration of the sum at $v_j$, we altogether have $b+1$ available options for this sum (which do not influence the sums at the backward neighbours of $v_j$). Thus we may choose among these admissible alterations
such that result in $\sigma(v_j)$ distinct from sums fixed for all $b$ backward neighbours of $v_j$ in $R_2$, i.e. consistent with $(2^\circ)$.
After analysing in this manner all vertices in $R_2$ we obtain a weighting of $G$ for which, by $(1^\circ)$, $(2^\circ)$ and Observation~\ref{a_b_Observation} (which guarantees (a) and (b)), $\sigma(u)\neq \sigma(v)$ for every $uv\in E(R_2\cup I)$.
Now we modify the sums in $R_1$ so that (b'') holds. Recall that by definition, each vertex $v\in R_1$ is only adjacent with vertices in $I$, and thus all edges incident with such $v$ are weighted $3$ by (iii) and (c). One after another, for every $v\in R_1$ we proceed as follows.
If $\sigma(u')\geq 3d+1$ for any neighbour $u'$ of $v$ ($u'\in I$), then we change the weight of exactly one edge,
namely $u'v$ from $3$ to $2$.
Otherwise, i.e. when due to (b) wee have $\sigma(u)=3d$ for every neighbour $u$ of $v$ ($u\in I$), we change the weight of $uv$ from $3$ to $4$ for all $u\in N(v)\subseteq I$. Note that in the both cases none of (a), (b) and (b') shall be violated, while we shall attain:
$\sigma(v)\in\{3d-1,4d\}$. After processing in this manner consecutively all vertices in $R_1$, all neighbours in $G$ shall finally be sum-distinguished, as vertices in $R_1$ are only adjacent with those in $I$, cf. (b), (b') and (b'').
\qed
\section{Proof of Theorem~\ref{123largeregTh}\label{SectionProofLargeD}}
\subsection{Tools}
The proof of Theorem~\ref{123largeregTh} relies heavily on a random distribution of vertices and edges of a given graph
to subsets with carefully predefined proportions. For this aim we shall however also make use of Corollary~\ref{QuarterDecompositionLemma} below implied by the following straightforward deterministic observation from~\cite{PrzybyloStandard22}, and possibly many other sources.
\begin{observation}
\label{EvenDecomposition}
Every graph $G=(V,E)$ can be edge decomposed into two subgraphs $G_1, G_2$ so that for each $v\in V$ and $i\in{1,2}$:
\begin{equation}\label{EQ_EulerianDecomposition}
d_{G_i}(v)\in \left[\frac{d_G(v)}{2}-1,\frac{d_G(v)}{2}+1\right].
\end{equation}
\end{observation}
\begin{corollary}\label{QuarterDecompositionLemma}
Every graph $G_1=(V_1,E_1)$ has a subgraph $G'_1$ such that for each $v\in V_1$:
\begin{equation}\label{dG'1}
d_{G'_1}(v)\in\left[\frac{9}{16}d_{G_1}(v)-3,\frac{9}{16}d_{G_1}(v)+3\right].
\end{equation}
\end{corollary}
\begin{pf}
Such a subgraph can be constructed via four times repeated application of Observation~\ref{EvenDecomposition},
first to $G_1$ to obtain say $G^{(1)}_2$ and $G^{(2)}_2$, then to $G^{(2)}_2$ to obtain $G^{(1)}_3$ and $G^{(2)}_3$, next to $G^{(2)}_3$ to obtain $G^{(1)}_4$ and $G^{(2)}_4$, and finally to $G^{(2)}_4$ to get $G^{(1)}_5$ and $G^{(2)}_5$. It is then straightforward to verify that~(\ref{EQ_EulerianDecomposition}) implies that $G'_1:=G^{(1)}_2\cup G^{(2)}_5$ complies with our requirements.
\qed
\end{pf}
For random arguments we shall mostly use the symmetric variant of the Lov\'asz Local Lemma, see e.g.~\cite{AlonSpencer} and the Chernoff Bound, see e.g.~\cite{JansonLuczakRucinski}.
\begin{theorem}[\textbf{The Local Lemma}]
\label{LLL-symmetric}
Let $A_1,A_2,\ldots,A_n$ be events in an arbitrary pro\-ba\-bi\-li\-ty space.
Suppose that each event $A_i$ is mutually independent of a set of all the other
events $A_j$ but at most $D$, and that $\mathbf{Pr}(A_i)\leq p$ for all $1\leq i \leq n$. If
$$ p \leq \frac{1}{e(D+1)},$$
then $ \mathbf{Pr}\left(\bigcap_{i=1}^n\overline{A_i}\right)>0$.
\end{theorem}
\begin{theorem}[\textbf{Chernoff Bound}]\label{ChernofBoundTh}
For any $0\leq t\leq np$,
$$\mathbf{Pr}\left(\left|{\rm BIN}(n,p)-np\right|>t\right)<2e^{-\frac{t^2}{3np}}$$
where ${\rm BIN}(n,p)$ is the sum of $n$ independent Bernoulli variables, each equal to $1$ with probability $p$ and $0$ otherwise.
\end{theorem}
Finally, the following technical observation shall be useful repeatedly throughout the proof of Theorem~\ref{123largeregTh} while applying the local lemma.
\begin{observation}
For every $x\geq 10^8$,
\begin{eqnarray}
&&2e^{-\frac{x}{2.45\cdot 10^6}} <\frac{1}{2ex^2}; \label{TechIneq1}\\
&&2e^{-\frac{x}{4.9\cdot 10^6}} < \frac{1}{ex}. \label{TechIneq2}
\end{eqnarray}
\end{observation}
\begin{pf}
Note first that (\ref{TechIneq1}) is directly implied by inequality (\ref{TechIneq2}) -- it is sufficient to square the both sides of (\ref{TechIneq2}).
To prove inequality~(\ref{TechIneq2}) it is however equivalently sufficient to show that $f(x)>0$ for $x\geq 10^8$ where
$$f(x):=\frac{x}{4.9\cdot 10^6}-\ln(2ex).$$
This in turn holds since $f'(x) = \frac{1}{4.9\cdot 10^6}-\frac{1}{x}>0$ for $x > 4.9\cdot 10^6$ and
$f(10^8) = \frac{100}{4.9}-\ln\left(2e10^8\right) = \frac{20\cdot4.9+2}{4.9}-\ln2-1-8\ln10
> 20.4-0.7-1-8\cdot 2.31> 0$.
\qed
\end{pf}
\subsection{Random vertex and edge partitions}
Let $G=(V,E)$ be a $d$-regular graph with $d\geq 10^8$.
\begin{claim}\label{ClaimV0}
We can choose a subset $V_0\subseteq V$ such that for every $v\in V$:
\begin{equation}\label{dV0}
\left|d_{V_0}(v)-0.05d\right|\leq 3\cdot10^{-4}d.
\end{equation}
\end{claim}
\begin{pf}
Randomly and independently we place every vertex from $V$ in $V_0$ with probability $0.05$.
Denote by $A_{1,v}$ the event that (\ref{dV0}) does not hold for a given $v\in V$.
By the Chernoff Bound, i.e. Lemma~\ref{ChernofBoundTh}, and inequality~(\ref{TechIneq1}):
$$\mathbf{Pr}\left(A_{1,v}\right) < 2e^{-\frac{(3\cdot10^{-4}d)^2}{3\cdot0.05d}}
= 2e^{-\frac{d}{\frac53\cdot 10^6}} <\frac{1}{ed^2}.$$
As every event $A_{1,v}$ is mutually independent of all other events $A_{1,u}$ except those where $u$ share a common neighbour with $v$, i.e. all except at most $d(d-1)<d^2-1$ events, by the Lov\'asz Local Lemma, i.e. Lemma~\ref{LLL-symmetric}, with positive probability none of the events $A_{1,v}$ holds, and thus $V_0$ as desired must exist.
\qed
\end{pf}
Fix any $V_0$ consistent with Claim~\ref{ClaimV0}, and denote:
$$V_1:=V\smallsetminus V_0,~~~~
G_1:=G[V_1] {\rm~~~~and~~~~} G_0:=G[V_0],$$
hence by~(\ref{dV0}), for every $v\in V$:
\begin{equation}\label{dV1}
\left|d_{V_1}(v) - 0.95d\right|\leq 3\cdot10^{-4}d.
\end{equation}
We shall first fix sums for all vertices in $V_1$ (containing great majority of all the vertices), keeping these relatively small, and using weights of some of the edges between $V_1$ and $V_0$ for some necessary adjustments.
By Corollary~\ref{QuarterDecompositionLemma} there is a subgraph $G'_1$ of $G_1$ such that~(\ref{dG'1}) holds for every $v\in V_1$.
By~(\ref{dG'1}) and~(\ref{dV1}) we thus obtain that for every $v\in V_1$:
\begin{equation}\label{dG'1_2}
0.5d < \frac{9}{16}(0.95-0.0003)d-3\leq d_{G'_1}(v) \leq \frac{9}{16}(0.95+0.0003)d+3 < 0.54 d.
\end{equation}
Let
$$c_1:V_1\to\{1,2,\ldots,10^4\}$$
be an auxiliary assignment of integers to the vertices of $G_1$. Denote:
\begin{eqnarray}
E': &=& \left\{uv\in E(G'_1): c_1(u)+c_1(v)\geq 10^4+2\right\}, \label{DefinitionOfE'}\\
E'': &=& \left\{uv\in E(G'_1): c_1(u)+c_1(v)\leq 10^4+1\right\} \label{DefinitionOfE''}
\end{eqnarray}
(the edges in $E'$ shall be the only ones in $G_1$ with weight $3$ assigned, while the remaining ones shall be weighted $1$ -- this, combined with Claim 2 below shall assure convenient sums distribution in neighbourhoods of all vertices in $V_1$), and for each $i=1,2,\ldots,10^4$:
$$V_{1,i}: = \left\{v\in V_1: c_1(v)=i\right\}.$$
\begin{claim}\label{ClaimV1iE'}
We may choose $c_1$ so that for every $i\in \{1,2,\ldots,10^4\}$ and each $v\in V_{1,i}$:
\begin{eqnarray}
&& \left| d_{V_{1,i}}(v) - 10^{-4}d_{V_1}(v) \right| \leq 11\cdot 10^{-6}d; \label{dV1i}\\
&& \left| d_{E'}(v) - (i-1)10^{-4}d_{G'_1}(v) \right| \leq 6\cdot 10^{-4}d. \label{dE'}
\end{eqnarray}
\end{claim}
\begin{pf}
We choose $c_1:V_1\to\{1,2,\ldots,10^4\}$ randomly by independently assigning every vertex $v\in V_1$ its value $c_1(v)$ from the set $\{1,2,\ldots,10^4\}$, each with equal probability.
For every $v\in V_1$ we denote
by $A_{2,v}$ and $A_{3,v}$ the events that (\ref{dV1i}) and that (\ref{dE'}) does not hold, respectively.
Let $v\in V_1$. As by~(\ref{dV1}) we have $11\cdot 10^{-6}d\leq 10^{-4}d_{V_1}(v)$, by the Chernoff Bound, (\ref{dV1}) and (\ref{TechIneq1}) we obtain that:
\begin{eqnarray}
\mathbf{Pr}\left(A_{2,v}\right)<2e^{-\frac{(11\cdot 10^{-6}d)^2}{3\cdot 10^{-4}d_{V_1}(v)}} \leq 2e^{-\frac{121\cdot 10^{-12}d^2}{3\cdot 10^{-4}(0.95+3\cdot10^{-4})d}} =
2e^{-\frac{d}{\frac{285.09}{121}\cdot 10^{6}}}
< \frac{1}{2ed^2}. \label{PrA2v}
\end{eqnarray}
Note further that for every $i\geq 2$, $i\leq 10^4$, by~(\ref{DefinitionOfE'}) and~(\ref{DefinitionOfE''}):
\begin{eqnarray}
&&\mathbf{Pr}\left(A_{3,v}~|~c_1(v)=i\right) \nonumber\\
&=& \mathbf{Pr}\left(\left|{\rm BIN}\left(d_{G'_1}(v),(i-1)10^{-4}\right) - (i-1)10^{-4}d_{G'_1}(v)\right|>6\cdot 10^{-4}d\right) \nonumber\\
&=&\mathbf{Pr}\left(\left|d_{E'}(v)-(i-1)10^{-4}d_{G'_1}(v)\right| > 6\cdot 10^{-4}d ~|~ c_1(v)=i\right) \nonumber\\
&=&\mathbf{Pr}\left(\left|d_{G'_1}(v)-d_{E''}(v)-(i-1)10^{-4}d_{G'_1}(v)\right| > 6\cdot 10^{-4}d ~|~ c_1(v)=i\right) \nonumber\\
&=& \mathbf{Pr}\left(\left|(10^4-i+1)10^{-4}d_{G'_1}(v)-d_{E''}(v)\right| > 6\cdot 10^{-4}d ~|~ c_1(v)=i\right) \nonumber\\
&=& \mathbf{Pr}\left(\left|{\rm BIN}\left(d_{G'_1}(v),(10^4-i+1)10^{-4}\right) -
(10^4-i+1)10^{-4}d_{G'_1}(v)\right|>6\cdot 10^{-4}d\right) \nonumber\\
&=& \mathbf{Pr}\left(A_{3,v}~|~c_1(v)=10^4-i+2\right). \label{A3vSymmetry}
\end{eqnarray}
Now for every fixed $13\leq i\leq 0.5\cdot 10^4+1$
(as then by~(\ref{dG'1_2}),
$(i-1)10^{-4}d_{G'_1}(v) > 12\cdot 10^{-4} \cdot 0.5d = 6\cdot 10^{-4}d$), by the Chernoff Bound, (\ref{dG'1_2}) and (\ref{TechIneq1}) we obtain:
\begin{equation}
\mathbf{Pr}\left(A_{3,v}~|~c_1(v)=i\right) < 2e^{-\frac{(6\cdot 10^{-4}d)^2}{3\cdot (i-1)10^{-4}d_{G'_1}(v)}}
< 2e^{-\frac{(6\cdot 10^{-4}d)^2}{3\cdot 0.5 \cdot 0.54d}}
= 2e^{-\frac{d}{2.25 \cdot 10^6}}
< \frac{1}{2ed^2}. \label{A3vMostCases}
\end{equation}
For $i=1$, by the definition of $E'$ and $c_1$, we trivially have:
\begin{equation}\label{A3vCase0}
\mathbf{Pr}\left(A_{3,v}~|~c_1(v)=1\right) = 0.
\end{equation}
For $i\in\{2,3,\ldots,12\}$ in turn (as then by~(\ref{dG'1_2}), $(i-1)10^{-4}d_{G'_1}(v) > 0.5\cdot 10^{-4}d$), by the Chernoff Bound and (\ref{TechIneq1}):
\begin{eqnarray}
\mathbf{Pr}\left(A_{3,v}~|~c_1(v)=i\right)
&\leq& \mathbf{Pr}\left(\left|d_{E'}(v)-(i-1)10^{-4}d_{G'_1}(v)\right| > 0.5\cdot 10^{-4}d ~|~ c_1(v)=i\right) \nonumber\\
& <& 2e^{-\frac{( 0.5\cdot 10^{-4}d)^2}{3\cdot (i-1)10^{-4}d_{G'_1}(v)}}
< 2e^{-\frac{( 0.5\cdot 10^{-4}d)^2}{3\cdot 11\cdot 10^{-4}d}}
= 2e^{-\frac{d}{\frac{33}{25}\cdot 10^{6}}}
< \frac{1}{2ed^2}. \label{A3vSmallCases}
\end{eqnarray}
By (\ref{A3vMostCases}), (\ref{A3vSmallCases}), (\ref{A3vSymmetry}), (\ref{A3vCase0}) and the law of total probability,
\begin{equation}\label{PrA3v}
\mathbf{Pr}\left(A_{3,v}\right) < \frac{1}{2ed^2}.
\end{equation}
Let $\Delta_1$ be the maximum degree of $G_1$.
As every event $A_{2,v}$ and every event $A_{3,v}$ is mutually independent of all other events $A_{2,u}$ and $A_{3,u}$ except possibly those where $u$ is at distance at most $2$ from $v$ in $G_1$, i.e. all except at most $2\Delta_1^2+1<2d^2-1$, by~(\ref{PrA2v}), (\ref{PrA3v}) and the Lov\'asz Local Lemma, with positive probability none of the events $A_{2,v}$, $A_{3,v}$ holds, and thus $c_1$ as desired must exist.
\qed
\end{pf}
\begin{claim}\label{ClaimE1inV0andV1}
We may choose a set of edges $E_1\subseteq E(V_1,V_0)$ such that:
\begin{eqnarray}
&& \left| d_{E_1}(u) - 0.08d_{V_0}(u) \right| \leq 5\cdot10^{-5}d {\rm ~~~~for~~~~} u\in V_1; \label{dE1inV1}\\
&& \left| d_{E_1}(v) - 0.08d_{V_1}(v) \right| \leq 10^{-3}d {\rm ~~~~for~~~~} v\in V_0. \label{dE1inV0}
\end{eqnarray}
\end{claim}
\begin{pf}
Suppose we randomly and independently place every edge from $E(V_0,V_1)$ in $E_1$ with probability $0.08$.
For every $u\in V_1$ and $v\in V_0$, denote
by $A_{4,u}$ and $A_{5,v}$ the events that (\ref{dE1inV1}) and that (\ref{dE1inV0}) does not hold, respectively.
Then by the Chernoff Bound, (\ref{dV0}), (\ref{dV1}) and (\ref{TechIneq2}) we obtain that for every $u\in V_1$:
\begin{equation}\label{PrA4v}
\mathbf{Pr}\left(A_{4,u}\right) < 2e^{-\frac{(5\cdot 10^{-5}d)^2}{3\cdot 0.08d_{V_0}(u)}}
\leq 2e^{-\frac{(5\cdot 10^{-5}d)^2}{3\cdot0.08\cdot0.0503d}}
= 2e^{-\frac{d}{4.8288 \cdot 10^{6}}}
< \frac{1}{ed},
\end{equation}
while for each $v\in V_0$:
\begin{equation}\label{PrA5v}
\mathbf{Pr}\left(A_{5,v}\right) < 2e^{-\frac{(10^{-3}d)^2}{3\cdot0.08d_{V_1}(v)}} \leq
2e^{-\frac{(10^{-3}d)^2}{3\cdot0.08\cdot0.9503d}} =
2e^{-\frac{d}{24\cdot 9503}} <
\frac{1}{ed}.
\end{equation}
As every event $A_{4,v}$ and every event $A_{5,v}$ is mutually independent of all other events $A_{4,u}$ and $A_{5,u}$ except possibly those where $u$ is at distance at most $1$ from $v$ in the graph induced by the edges of $E(V_0,V_1)$, i.e. (by (\ref{dV0}) and (\ref{dV1})) all except at most $\max_{v\in V} d_{E(V_0,V_1)}(v) < d-1$, by~(\ref{PrA4v}), (\ref{PrA5v}) and the Lov\'asz Local Lemma, with positive probability none of the events $A_{4,v}$, $A_{5,v}$ holds, and thus $E_1$ as desired must exist.
\qed
\end{pf}
Carefully designed weight adjustments of the edges in $E_1$ shall be used to supplement roughly even distribution of sums assured by Claim~\ref{ClaimV1iE'}, and consequently to provide sum distinction of the neighbours in $V_1$.
Out of the remaining edges in $E(V_0,V_1)$ we shall next choose a set $E_0$ of edges with special features, which
shall partition $V_0$ into 5 subsets (with decreasing sums, all however larger than the ones in $V_1$), and shall facilitate the use of a modification of Kalkowski's algorithm in $G_0$ to distinguish the remaining neighbours in $G$.
Denote:
\begin{equation}\label{E*definition}
E^*:= E(V_0,V_1)\smallsetminus E_1.
\end{equation}
Consider an assignment:
$$c_0:V_0\to\{0,1,2,3,4\}$$
and the following partition of $V_0$ induced by it:
$$V_{0,j}:=\left\{v\in V_0: c_0(v)=j\right\}, ~~~~j=0,1,2,3,4.$$
\begin{claim}\label{ClaimE0inV0andV1anddV0i}
We may choose $E_0\subseteq E^*$ and $c_0:V_0 \to \{0,1,2,3,4\}$ such that:
\begin{eqnarray}
&& \left| d_{E_0}(v) - 0.2c_0(v)d_{E^*}(v) \right| \leq 10^{-3}d {\rm ~~~~for~~~~} v\in V_0; \label{dE0inV0}\\
&& \left| d_{V_{0,c_0(v)}}(v)- 0.2d_{V_0}(v)\right| \leq 10^{-3}d {\rm ~~~~for~~~~} v\in V_0; \label{dV0i}\\
&& \left| d_{E_0}(u) - 0.4d_{E^*}(u) \right| \leq 2\cdot10^{-4}d {\rm ~~~~for~~~~} u\in V_1. \label{dE0inV1}
\end{eqnarray}
\end{claim}
\begin{pf} First for every $v\in V_0$ choose randomly and independently an integer in $\{0,1,2,3,4\}$, each with equal probability, and denote it by $c_0(v)$. Then include every edge $uv\in E^*$ with $v\in V_0$ in $E_0$ randomly and independently with probability $0.2c_0(v)$. For every $v\in V_0$ and $u\in V_1$, denote by
$A_{6,v}$, $A_{7,v}$ and $A_{8,u}$ the events that (\ref{dE0inV0}), (\ref{dV0i}) and (\ref{dE0inV1}) does not hold, respectively.
Let $v\in V_0$.
First note that by the Chernoff Bound, (\ref{dE1inV0}), (\ref{dV1}) and (\ref{TechIneq1}), for every fixed $j\in\{1,2,3,4\}$:
\begin{eqnarray}
\mathbf{Pr}\left(A_{6,v} ~|~ c_0(v)=j\right) &<& 2e^{-\frac{(10^{-3}d)^2}{3\cdot 0.2j\cdot d_{E^*}(v)}} \leq
2e^{-\frac{(10^{-3}d)^2}{0.6 j (0.92d_{V_1}(v)+ 10^{-3}d)}} \nonumber\\
&\leq& 2e^{-\frac{(10^{-3}d)^2}{2.4\cdot (0.92\cdot 0.9503d+ 10^{-3}d)}}
= 2e^{-\frac{d}{2100662.4}} < \frac{1}{2ed^2}. \label{PrA6v1234}
\end{eqnarray}
Moreover, if $c_0(v)=0$, then we trivially have: $d_{E_0}(u)=0$, and hence:
\begin{equation}
\mathbf{Pr}\left(A_{6,v} ~|~ c_0(v)=0\right) = 0. \label{PrA6v0}
\end{equation}
By (\ref{PrA6v1234}), (\ref{PrA6v0}) and the law of total probability we thus obtain that:
\begin{equation}
\mathbf{Pr}\left(A_{6,v}\right) < \frac{1}{2ed^2}. \label{PrA6v}
\end{equation}
Further, by the Chernoff Bound, (\ref{dV0}) and (\ref{TechIneq1}),
\begin{eqnarray}
\mathbf{Pr}\left(A_{7,v} \right) &<& 2e^{-\frac{(10^{-3}d)^2}{3\cdot 0.2d_{V_0}(v)}} \leq
2e^{-\frac{(10^{-3}d)^2}{ 0.6\cdot 0.0503 d}} =
2e^{-\frac{ d}{30180}} <
\frac{1}{2ed^2}. \label{PrA7v}
\end{eqnarray}
Let now $v\in V_1$.
Note that as our choices are independent, then every edge from $E^*$ which is incident with $v$ is in fact independently chosen to $E_0$ with probability $$\frac{1}{5}\cdot(0+0.2+0.4+0.6+0.8)=0.4,$$ and hence, by the Chernoff Bound,
(\ref{dE1inV1}), (\ref{dV0}) and (\ref{TechIneq1}),
\begin{eqnarray}
\mathbf{Pr}\left(A_{8,v}\right) &<& 2e^{-\frac{(2\cdot10^{-4}d)^2}{3\cdot 0.4d_{E^*}(v)}} \leq
2e^{-\frac{(2\cdot10^{-4}d)^2}{1.2\cdot (0.92d_{V_0}(v)+5\cdot 10^{-5}d)}} \leq
2e^{-\frac{(2\cdot10^{-4}d)^2}{1.2\cdot (0.92\cdot 0.0503d+5\cdot 10^{-5}d)}} \nonumber\\
& = &2e^{-\frac{d}{1389780}} < \frac{1}{2ed^2}. \label{PrA8v}
\end{eqnarray}
It is easy to notice that each of the events $A_{6,v}$, $A_{7,v}$, $A_{8,v}$ is mutually independent of all but (much) less than $2d^2$ other events of such types, and thus by (\ref{PrA6v}), (\ref{PrA7v}), (\ref{PrA8v}) and the Lov\'asz Local Lemma, with positive probability none of the events $A_{6,v}$, $A_{7,v}$, $A_{8,v}$ holds, and hence there must exist $c_0$ and $E_0$ as required.
\qed
\end{pf}
\subsection{Initial weighting}
We define an initial edge 3-weighting $\omega_0$ of $G$ as follows:
$$\omega_0(e):=\left\{ \begin{array}{lll}
1,&{\rm if} &e\in E(V_1)\smallsetminus E';\\
2,&{\rm if}& e\in E_1\cup E_0\cup E(V_0);\\
3,&{\rm if}& e\in E'\cup E(V_0,V_1)\smallsetminus(E_0\cup E_1).
\end{array}\right.$$
We shall further modify only the weights of the edges in $E_1$ (increasing some of these to $3$, to adjust the sums in $V_1$) and of the edges in $E(V_0)$ (possibly changing some of them by $1$ in order to distinguish sums within $V_0$ in the final part of our construction).
Note that at this point for every $v\in V_1$:
\begin{eqnarray}
\sigma(v)
&=& 3\cdot d_{E'}(v)+1\cdot \left(d_{V_1}(v)-d_{E'}(v)\right)+2\cdot \left(d_{E_0}(v)+d_{E_1}(v)\right)+3\cdot \left(d_{V_0}(v)-d_{E_0}(v)-d_{E_1}(v)\right)\nonumber\\
&=& d+2d_{E'}(v)+1\cdot \left(d_{E_0}(v)+d_{E_1}(v)\right)+2\cdot \left(d_{V_0}(v)-d_{E_0}(v)-d_{E_1}(v)\right) \nonumber\\
&=& d+2d_{E'}(v)+2d_{V_0}(v)-d_{E_0}(v)-d_{E_1}(v), \nonumber
\end{eqnarray}
and hence, if $v\in V_{1,i}$ for some $i\in\{1,2,\ldots,10^4\}$, then by (\ref{dE'}), (\ref{dE0inV1}), (\ref{E*definition}), (\ref{dG'1}), (\ref{dE1inV1}), (\ref{dV0}):
\begin{eqnarray}
&&\sigma(v) \in\nonumber\\
&&\Big[d+2\left((i-1)10^{-4}d_{G'_1}(v)-6\cdot 10^{-4}d\right)+2d_{V_0}(v)
-\left(0.4\left(d_{V_0}(v)-d_{E_1}(v)\right)+2\cdot10^{-4}d\right)-d_{E_1}(v), \nonumber\\
&&d+2\left((i-1)10^{-4}d_{G'_1}(v)+6\cdot 10^{-4}d\right)+2d_{V_0}(v)
-\left(0.4\left(d_{V_0}(v)-d_{E_1}(v)\right)-2\cdot10^{-4}d\right)-d_{E_1}(v)\Big]\nonumber\\
&\subseteq& \bigg[d+2(i-1)10^{-4}\left(\frac{9}{16}(d-d_{V_0}(v))-3\right)+1.6d_{V_0}(v)-0.6d_{E_1}(v) -0.0014d, \nonumber\\
&&d+2(i-1)10^{-4}\left(\frac{9}{16}(d-d_{V_0}(v))+3\right)+1.6d_{V_0}(v)-0.6d_{E_1}(v) +0.0014d\bigg] \nonumber\\
&\subseteq& \bigg[d+2(i-1)10^{-4}\left(\frac{9}{16}(d-d_{V_0}(v))-3\right)+1.6d_{V_0}(v)
-0.6\left(0.08d_{V_0}(v)+5\cdot10^{-5}d\right) -0.0014d, \nonumber\\
&&d+2(i-1)10^{-4}\left(\frac{9}{16}(d-d_{V_0}(v))+3\right)+1.6d_{V_0}(v)
-0.6\left(0.08d_{V_0}(v)-5\cdot10^{-5}d\right) +0.0014d\bigg] \nonumber\\
&=& \bigg[\left(1+\frac{9}{8}(i-1)10^{-4}\right)d+\left(1.552-\frac{9}{8}(i-1)10^{-4}\right)d_{V_0}(v) -0.00143d - 6(i-1)10^{-4}, \nonumber\\
&&\left(1+\frac{9}{8}(i-1)10^{-4}\right)d+\left(1.552-\frac{9}{8}(i-1)10^{-4}\right)d_{V_0}(v) + 0.00143d + 6(i-1)10^{-4}\bigg]\nonumber\\
&\subseteq& \bigg[\left(1+\frac{9}{8}(i-1)10^{-4}\right)d+\left(1.552-\frac{9}{8}(i-1)10^{-4}\right)\left(0.05d-0.0003d\right)
-0.00143d - 6(i-1)10^{-4}, \nonumber\\
&& \left(1+\frac{9}{8}(i-1)10^{-4}\right)d+\left(1.552-\frac{9}{8}(i-1)10^{-4}\right)\left(0.05d+0.0003d\right)
+ 0.00143d + 6(i-1)10^{-4}\bigg] \nonumber\\
&=& \big[\left(1.0776+1.06875(i-1)10^{-4}\right)d - 0.0018956d
+ 0.0003375(i-1)10^{-4}d - 6(i-1)10^{-4}, \nonumber \\
&& \left(1.0776+1.06875(i-1)10^{-4}\right)d + 0.0018956d
- 0.0003375(i-1)10^{-4}d + 6(i-1)10^{-4}\big] \nonumber\\
&\subseteq& \big[\left(1.0776+1.06875(i-1)10^{-4}\right)d - 0.0018956d,
\left(1.0776+1.06875(i-1)10^{-4}\right)d + 0.0018956d\big]. \nonumber\\\label{FirstSv}
\end{eqnarray}
Let
$\Delta_2:=\max_{1\leq i \leq 10^4} \Delta(G[V_{1,i}])$.
By~(\ref{dV1i}) and~(\ref{dV1}),
\begin{eqnarray}
\Delta_2 &\leq& 10^{-4}\cdot 0.9503d+11\cdot 10^{-6}d
= 10^{-4}\cdot 1.0603 d. \label{Delta2}
\end{eqnarray}
For every $i=1,2,\ldots,10^4$ we arbitrarily choose a proper vertex colouring of $G[V_{1,i}]$:
\begin{equation}\label{DefinitionOfc1i}
c_{1,i}: V_{1,i}\to\{0,1,\ldots,\Delta_2\}.
\end{equation}
Now we modify weights of some of the edges in $E_1$ by adding $1$ to them (hence switching their weights from $2$ to $3$) so that for every $i\in \{1,2,\ldots,10^4\}$ and each $v\in V_{1,i}$:
\begin{equation}
\sigma(v)=\left\lfloor \left(1.0776+1.06875(i-1)10^{-4}\right)d + 0.0018956d\right\rfloor+c_{1,i}(v). \label{SecondSv}
\end{equation}
This is feasible by (\ref{FirstSv}), as by~(\ref{dE1inV1}), (\ref{dV0}) and~(\ref{Delta2}):
\begin{eqnarray}
d_{E_1}(v) &\geq& 0.08d_{V_0}(v) -5\cdot 10^{-5}d \geq 0.08 \cdot \left(0.05d - 3\cdot 10^{-4}d\right)
-5\cdot 10^{-5}d \nonumber\\
&=& 0.003926 d> 0.00389723 d \geq 2\cdot 0.0018956d + \Delta_2. \nonumber
\end{eqnarray}
We denote the obtained weighting of the edges of $G$ by $\omega_1$. As a result, by (\ref{SecondSv}) and the definitions of $c_{1,i}$, neighbours are sum-distinguished within every $V_{1,i}$ in $G$, i.e. for each $i=1,2,\ldots,10^4$
and every edge $uv\in E(V_{1,i})$ we have $\sigma(u)\neq \sigma(v)$. In fact however, all neighbours in $V_1$ are at this point sum-distinguished, as no conflicts are possible between distinct sets $V_{1,i}$. To see this it is sufficient to observe that for each $1\leq i < 10^4$ and any $u\in V_{1,i}$ and $v\in V_{1,i+1}$, by (\ref{SecondSv}) and~(\ref{Delta2}) we now have:
\begin{eqnarray}
\sigma(u) &\leq& \left(1.0776+1.06875(i-1)10^{-4}\right)d + 0.0018956d + 10^{-4}\cdot 1.0603 d \nonumber\\
&<& \left(1.0776+1.06875(i-1)10^{-4}\right)d + 0.0018956d + 1.06875\cdot 10^{-4}d - 1 \nonumber\\
&<& \left\lfloor\left(1.0776+1.06875\cdot i\cdot 10^{-4}\right)d + 0.0018956d\right\rfloor\leq \sigma(v). \label{V1iSmallerV1i+1}
\end{eqnarray}
\subsection{Setting sums in $V_0$}
We shall now modify the sums in $V_0$ by altering weights of some of the edges in $E(V_0)$ so that there are no sum conflicts within the sets $V_{0,i}$.
First however we choose for every vertex $v\in V_0$ a set $E_v\subset E(V_0)$ of its \emph{personal} incident edges, whose weights' modifications shall settle the final sum at $v$ (up to an additive factor of $1$). These sets shall satisfy the following two features:
\begin{eqnarray}
E_u\cap E_v = \emptyset &{\rm ~~for~~}& u,v\in V_0, u\neq v; \label{emptyintersectionFeature}\\
|E_v| \geq 0.5d_{V_0}(v)-1 &{\rm ~~for~~}& v\in V_0. \label{halfFeature}
\end{eqnarray}
We define these for each component of $G_0$ separately.
Suppose $H$ is any such component. We add a new vertex $u$ (if necessary) and join it with single edges with all vertices of odd degrees in $H$, thus obtaining an Eulerian graph $F$ of $H$. Then we traverse the edges of $F$ along any its Eulerian tour, starting at $u$ (or at any other vertex if $H$ was itself Eulerian), and temporarily direct these edges
consistently with our direction of movement along the Eulerian tour. We then remove the vertex $u$ (if we priory had to add it) and for every vertex $v$ we define $E_v$ as the set of edges in $H$ outgoing from $v$. It is straightforward to verify then that such sets $E_v$ meet our requirements~(\ref{emptyintersectionFeature}) and~(\ref{halfFeature}).
We now arbitrarily arrange the vertices of $V_0$ into a sequence $v_1,v_2,\ldots, v_k$
and analyse them one after another. Once we reach a given vertex $v$ we associate to it a set
$$S_v\in \mathbb{S}:=\left\{\left\{2i,2i+1\right\}:i\in\mathbb{Z}\right\}$$
distinct from all sets $S_u$ already assigned to neighbours $u$ of $v$ from $V_{0,c_0(v)}$ (hence also disjoint from
them due to the definition of $\mathbb{S}$) and guarantee that ever since this moment the sum at $v$ belongs to this set
(note this shall guarantee that neighbours within every $V_{0,j}$ shall be sum-distinguished for $j=0,1,2,3,4$ at the end of our algorithm). To achieve such a goal for the currently analysed vertex $v$, we admit, if necessary, one of the following two modifications of the weight of every edge $uv\in E_v$:
\begin{itemize}
\item increasing the weight of $uv$ to $3$ (from $2$) if $u$ was not yet analysed, i.e. $v$ precedes $u$ in the chosen ordering;
\item otherwise, changing the weight of $uv$ to $1$ or $3$ so that as a result we still have $\sigma(u)\in S_u$.
\end{itemize}
Note that exactly one of the two options of changing the weight of $uv$ (either to $1$ or to $3$) from the second point
above is always available. Note moreover that in order to achieve our goal we cannot set the sum at $v$ to be equal to
a number from any two-element set $S_u$ already associated to a neighbour $u$ of $v$ from $V_{0,c_0(v)}$, i.e. we must avoid at most $2d_{V_{0,c_0(v)}}(v)$ integers, while the admitted modifications of weights of the edges in $E_v$ yield
\begin{eqnarray}
|E_v|+1 \geq 0.5d_{V_0}(v) &=& 2\left(0.2 d_{V_0}(v) + 10^{-3}d\right) + \left(0.1 d_{V_0}(v) - 2\cdot 10^{-3}d\right) \nonumber\\
&\geq&2d_{V_{0,c_0(v)}}(v) +\left(0.1\cdot \left(0.05d-3\cdot 10^{-4}d\right) - 2\cdot 10^{-3}d\right)
>
2d_{V_{0,c_0(v)}}(v) \nonumber
\end{eqnarray}
potential options for the sum at $v$ (cf. (\ref{halfFeature}), (\ref{dV0i}) and (\ref{dV0})).
We then choose any of these options, say $s^*$ which does not belong to any (already fixed) $S_u$ with
$u\in N_{V_{0,c_0(v)}}(v)$ and which requires modification of at most $2d_{V_{0,c_0(v)}}(v)$ weights of the edges incident with $v$.
We then perform these at most $2d_{V_{0,c_0(v)}}(v)$ admitted weight modifications in $E_v$ so that $\sigma(v)=s^*$
(note
the weights modified in this step are the only ones incident with $v$ with a possible value $1$) and choose as $S_v$ the only element of $\mathbb{S}$ containing $s^*$.
After analysing all vertices in $V_0$ we obtain our final edge $3$-weighting $\omega_2$ of $G$, such that there are no sum conflicts between neighbours within any $V_{0,i}$, $i=0,1,2,3,4$. Moreover, as within the algorithm above, for every $v\in V_0$ we might have had at most $2d_{V_{0,c_0(v)}}(v)$ edges weighted $1$ immediately after fixing $S_v$ and assuring that the sum at $v$ belongs to this set, and this sum at $v$ could change only by $1$ throughout the rest of the algorithm (so that $\sigma(v)\in S_v$), hence at the end of its execution we still have for every $v\in V_{0,i}$:
\begin{equation}\label{FirstSvEstimation}
\sigma(v)\geq 1 \cdot 2d_{V_{0,i}}(v) + 2 \cdot\left(d_{V_0}(v)-2d_{V_{0,i}}(v)\right) -1 + \sum_{u\in N_{V_1}(v)}\omega_2(uv).
\end{equation}
\subsection{Final calculations}
It remains to show that there are not possible any sum conflicts between neighbours from different sets $V_{0,i}$ and between neighbours from $V_0$ and $V_1$.
Note first that for every $i=0,1,2,3,4$ and each $v\in V_{0,i}$, by (\ref{FirstSvEstimation}), (\ref{dE0inV0}), (\ref{dV0i}), (\ref{dE1inV0}) and (\ref{dV0}):
\begin{eqnarray}
\sigma(v)
&\geq& 2 d_{V_0}(v)-2d_{V_{0,i}}(v) -1+ 2 \left(d_{E_1}(v)+d_{E_0}(v)\right)+ 3\left(d_{V_1}(v) - d_{E_1}(v) - d_{E_0}(v)\right) \nonumber\\
&=& 2d+d_{V_1}(v) - d_{E_1}(v) - d_{E_0}(v) -2d_{V_{0,i}}(v) -1 \nonumber\\
&\geq& 2d +d_{V_1}(v) - d_{E_1}(v) - 0.2i\left(d_{V_1}(v) - d_{E_1}(v)\right) - 10^{-3}d - 2\left(0.2d_{V_0}(v)+10^{-3}d\right) -1 \nonumber\\
&=& 2d - 0.4 d_{V_0}(v) + \left(1-0.2i\right) d_{V_1}(v) - \left(1-0.2i\right)d_{E_1}(v) - 0.003d - 1 \nonumber\\
&\geq& 2d - 0.4 d_{V_0}(v) + \left(1-0.2i\right)d_{V_1}(v) - \left(1-0.2i\right)\left(0.08d_{V_1}(v)+ 10^{-3}d\right) - 0.003d - 1 \nonumber\\
&=& 2d - 0.4 d_{V_0}(v) + \left(1-0.2i\right)\cdot 0.92d_{V_1}(v) - 0.004d + 2i\cdot 10^{-4}d - 1 \nonumber\\
&=& 2d - 0.4 d_{V_0}(v) + \left(1-0.2i\right)\cdot 0.92\left(d-d_{V_0}(v)\right) - 0.004d + 2i\cdot 10^{-4}d - 1 \nonumber\\
&=& \left(2.916 - 0.184i\right)d - \left(1.32 - 0.184i\right)d_{V_0}(v)+ 2i\cdot 10^{-4}d - 1 \nonumber\\
&\geq& \left(2.916 - 0.184i\right)d - \left(1.32 - 0.184i\right) \cdot 0.0503 d + 2i\cdot 10^{-4}d - 1 \nonumber\\
&=& \left(2.849604 - 0,1745448i\right)d - 1, \label{LowerSvBoundinV0i}
\end{eqnarray}
while by~(\ref{dE0inV0}), (\ref{dE1inV0}) and (\ref{dV1}), also for every $i=0,1,2,3,4$ and each $v\in V_{0,i}$:
\begin{eqnarray}
\sigma(v) &\leq& 2d_{E_0}(v)+3\left(d-d_{E_0}(v)\right)
=
3d-d_{E_0}(v)
\leq
3d - 0.2i\left(d_{V_1}(v)-d_{E_1}(v)\right) + 10^{-3}d \nonumber\\
&\leq& 3d - 0.2i\left(d_{V_1}(v)-0.08d_{V_1}(v) - 10^{-3}d\right) + 10^{-3}d
=
\left(3.001 +2\cdot 10^{-4}i\right)d - 0.184i \cdot d_{V_1}(v) \nonumber\\
&\leq& \left(3.001 +2\cdot 10^{-4}i\right)d - 0.184i \left(0.95d - 0.0003d \right)
=
\left(3.001 - 0.1745448 i\right)d. \label{UpperSvBoundinV0i}
\end{eqnarray}
Thus by~(\ref{UpperSvBoundinV0i}) and~(\ref{LowerSvBoundinV0i}), for every $i=0,1,2,3$ and $u\in V_{0,i}$, $v\in V_{0,i+1}$:
\begin{equation}\label{V0i+1SmallerV0i}
\sigma(v) \leq \left[3.001 - 0.1745448 \left(i+1\right)\right]d = \left(2.8264552 - 0.1745448i\right)d < \sigma(u).
\end{equation}
Finally, to justify that there are no sum conflicts between vertices from $V_0$ and $V_1$, by~(\ref{V1iSmallerV1i+1}) and~(\ref{V0i+1SmallerV0i}) it is sufficient to show that sums in $V_{1,10^4}$ are smaller than sums in $V_{0,4}$.
To see that this is actually true, note that by~(\ref{SecondSv}), (\ref{DefinitionOfc1i}), (\ref{Delta2}) and~(\ref{LowerSvBoundinV0i}), for any $u\in V_{1,10^4}$ and $v\in V_{0,4}$:
\begin{eqnarray}
\sigma(u) &\leq& \left(1.0776+1.06875\cdot \left(10^4-1\right)\cdot 10^{-4}\right)d + 0.0018956d + 10^{-4}\cdot 1.0603 d \nonumber\\
&=& 2.148244755 d
< 2.1514248 d - 1
= \left(2.849604 - 0,1745448\cdot 4\right)d - 1
\leq \sigma(v). \nonumber
\end{eqnarray}
This finishes the proof of Theorem~\ref{123largeregTh} as the obtained weighting $\omega_2$ is thus indeed a $3$-weighting of the edges of $G$ such that there are no sum conflicts between neighbours in $G$.
\qed
\section{Concluding Remarks}
The constant $10^8$ above could still be improved, but at the cost of clarity of presentation of the proof of Theorem~\ref{123largeregTh}. Nevertheless, we were far from being able to push it down to $10^7$.
Actually, introducing the special subgraph $G'_1$ of $G_1$, based on Corollary~\ref{QuarterDecompositionLemma}, served merely optimization purposes. These required also using only $1$'s and $3$'s as weights in $G_1$.
However, forgetting of this direction towards optimization, focused on the lower bound for $d$, might be otherwise beneficial. Namely, using mostly $2$'s and $1$'s in $G_1$ we might assure via a similar argument as in Section~\ref{SectionProofLargeD} an arbitrarily small fraction of all edges weighted $3$ in a vertex-colouring $3$-weighting
of any $d$-regular graph with $d$ large enough.
Apart from this, our approach can also be relatively easily extended to graphs which are not regular, but whose minimum degree $\delta$ is slightly larger (by an arbitrary $\epsilon>0$) than half of the maximum degree $\Delta$ (and $\Delta$ is large enough) -- this greatly improves the mentioned result from~\cite{123dense-Zhong} that the 1--2--3 Conjecture holds if $\delta>0.99985n$, where $n$ is sufficiently large order of a graph.
We omit details here, as we believe that in fact this can still be improved towards a stronger result for general graphs for which
it is sufficient that $\delta$ is at least a very small function of $\Delta$, of order much less than $\Delta$.
This shall however require a few extra ideas, as our approach does not directly transfer
at this point to such a case.
\end{document} |
\begin{document}
\markboth{P.~Skrzypacz and D.~Wei}{On the solvability of the Brinkman-Forchheimer-extended Darcy equation}
\title{ON THE SOLVABILITY OF THE BRINKMAN-FORCHHEIMER-EXTENDED DARCY EQUATION}
\author{
Piotr Skrzypacz\footnote{
Dr. Piotr Skrzypacz, School of Science and Technology,
Nazarbayev University,
53 Kabanbay Batyr Ave., Astana 010000 Kazakhstan,
Email: {\it [email protected]}
} ~and~ Dongming Wei\footnote{
Dr. Dongming Wei, School of Science and Technology,
Nazarbayev University,
53 Kabanbay Batyr Ave., Astana 010000 Kazakhstan,
Email: {\it [email protected]}
}~
}
\maketitle
\begin{abstract}
The nonlinear Brinkman-Forchheimer-extended Darcy equation is used
to model some porous medium flow in chemical reactors of packed bed type.
The results concerning the existence and uniqueness of a weak solution
are presented for nonlinear convective flows in medium with nonconstant porosity and for small data.
Furthermore, the finite element approximations to the flow profiles in the fixed bed reactor are presented for several Reynolds numbers
at the non-Darcy's range.
\end{abstract}
\textbf{2010 Mathematics Subject Classification (MSC):}~
76D03,
35Q35
\textbf{Keywords:}~
Brinkman-Forchheimer Equation, Packed Bed Reactors, Existence and Uniqueness of Solution\\
\section{{I}ntroduction}
In this section we introduce the mathematical model describing incompressible isothermal flow in porous medium without reaction. The
considered equations for the velocity and pressure fields are for flows in fluid saturated porous media. Most of
research results for flows in porous media are based on the Darcy equation which is considered to be a suitable model at a small range of Reynolds numbers. However, there are restrictions of Darcy equation for modeling some porous medium flows, e.g. in closely packed
medium, saturated fluid flows at slow velocity but with relatively large Reynolds numbers. The flows in such closely packed medium behave nonlinearly and can not be modelled accurately by the Darcy equation which is linear. The deficiency can be circumvented with the Brinkman--Forchheimer-extended Darcy law for flows in closely packed media, which leads to the following model:
Let $\Omega\subset{\mathbb R}^n$, $n=2, 3$, represent the reactor channel. We denote
its boundary by $\Gamma=\partial\Omega$. The conservation of volume-averaged
values of momentum and mass in the packed reactor reads as follows
\begin{equation}\label{s2eq1}
\begin{array}{rcr}
\displaystyle
-\textrm{div}\,\left(\varepsilon \nu \nabla \boldsymbol u-\varepsilon\boldsymbol
u\otimes\boldsymbol u \right)+\frac{\varepsilon}{\varrho}\nabla p + \sigma(\boldsymbol u)=\boldsymbol f&\textrm{in}&\Omega\,,\\
\textrm{div}\,(\varepsilon\boldsymbol u)=0&\textrm{in}&\Omega\,,
\end{array}
\end{equation}
where $\boldsymbol u\,:\Omega\to{\mathbb R}^n$,~ $p\,:\Omega\to{\mathbb R}$ denote the unknown velocity and pressure, respectively. The
positive quantity $\varepsilon=\varepsilon(\boldsymbol x)$ stands for porosity which describes the proportion of the
non-solid volume to the total volume of material and varies spatially in general. The expression
$\sigma(\boldsymbol{u})$ represents the friction forces caused by the packing and will be specified later on. The
right-hand side $\boldsymbol f$ represents an outer force (e.g. gravitation), $\varrho$ the constant fluid density and
$\nu$ the constant kinematic viscosity of the fluid, respectively. The expression $\boldsymbol u\otimes\boldsymbol u$ symbolizes the dyadic product of $\boldsymbol u$ with itself.
The formula given by Ergun \cite{Ergun_1} will be used to model the influence
of the packing on the flow inertia effects
\begin{align}\label{s2eq2}
\sigma(\boldsymbol u)=150\nu\frac{(1-\varepsilon)^2}{\varepsilon^2d_p^2}\boldsymbol u
+1.75\frac{1-\varepsilon}{\varepsilon d_p}\boldsymbol u|\boldsymbol u|\;.
\end{align}
Thereby $d_p$ stands for the diameter of pellets and $|\cdot|$ denotes the Euclidean vector norm. The linear term in (\ref{s2eq2}) accounts for the head loss according to Darcy and the quadratic term according to Forchheimer law, respectively. For the derivation of the equations, modelling and homogenization questions in porous media we refer to e.g. \cite{Bey_1, Hornung_1}.
To close the system (\ref{s2eq1}) we prescribe Dirichlet boundary condition
\begin{equation}\label{s2eq3}
\boldsymbol u\arrowvert_{\Gamma}=\boldsymbol g\,,
\end{equation}
whereby
\begin{equation}\label{zerocomp}
\int\limits_{\Gamma_i}\varepsilon\boldsymbol
g\cdot\boldsymbol n\,ds=0
\end{equation}
has to be fulfilled on each connected component $\Gamma_i$ of the
boundary $\Gamma$.
We remark
that in the case of polygonally bounded domain the outer normal
vector $\boldsymbol n$ has jumps and thus the above integral
should be replaced by a sum of integrals over each side of
$\Gamma$.
The distribution of porosity $\varepsilon$ is assumed to satisfy the following bounds
\begin{equation}\label{A1}
0<\varepsilon_0\le \varepsilon(\boldsymbol x)\le
\varepsilon_1\le 1\quad\forall\,\boldsymbol x\in\Omega\tag{\textrm{A1}}\,,
\end{equation}
with some constants $0<\varepsilon_0,\;\varepsilon_1\le 1$.
A comprehemsive account of fluid flows through porous media beyond the Darcy law's valid regimes and classified by the Reynolds number, can be found in, e.g., \cite{Zhao_1}. Also, see \cite{Upton_1} for simulating pumped water levels in abstraction boreholes using such nonlinear Darcy-Forchheimer law, and \cite{Grillo_1}, \cite{Sobieski_1}, and \cite{Lal_1} for recent referenes on this model.
In the next section we use the porosity distribution which is estimated for packed beds consisting of spherical particles and takes the near wall channelling effect into account. This kind of porosity distribution obeys assumption \eqref{A1}.
Let us introduce dimensionless quantities
\begin{align*}
\boldsymbol u^*=\frac{\boldsymbol u}{U_0}\,,\quad
p^*=\frac{p}{\varrho U_0^2}\,,\quad
\boldsymbol{x}^*=\frac{\boldsymbol x}{d_p}\,,\quad
\boldsymbol{g}^*=\frac{\boldsymbol g}{U_0}\,,
\end{align*}
whereby $U_0$ denotes the magnitude of some reference velocity. For simplicity of notation we omit the asterisks. Then, the reactor flow problem reads in dimensionless form as follows
\begin{equation}\label{s2eq4}
\left\{\begin{array}{rclrl}
\displaystyle
-\textrm{div}\,\left(\frac{\varepsilon}{Re}\nabla \boldsymbol u-
\varepsilon\boldsymbol u\otimes\boldsymbol u \right)+\varepsilon\nabla p +
\frac{\alpha}{Re}\boldsymbol u+\beta\boldsymbol u|\boldsymbol u|&=&\boldsymbol f &\textrm{in} &\Omega\,,\\
\textrm{div}\,(\varepsilon\boldsymbol u)&=&0 &\textrm{in} &\Omega\,,\\
\boldsymbol u&=&\boldsymbol g &\textrm{on} &\Gamma\,,
\end{array}\right.
\end{equation}
where
\begin{equation}\label{alpha_beta_model}
\begin{split}
\alpha(\boldsymbol x)&=150\kappa^2(\boldsymbol x)\,,\qquad \beta(\boldsymbol x)=1.75\kappa(\boldsymbol x)
\end{split}
\end{equation}
with
\begin{equation}\label{kappa_model}
\kappa(\boldsymbol x)=\frac{1-\varepsilon(\boldsymbol x)}{\varepsilon(\boldsymbol x)}\,,
\end{equation}
and the Reynolds number is defined by
$$Re=\frac{U_{0}\,d_p}{\nu}\,.$$
The existence and uniqueness of solution of the nonlinear model (\ref{s2eq4}) with constant porosity and without the convective term has been established in \cite{Kaloni}. We will extend this result to the case when the porosity depends on the location and with the convective term in this work.
\begin{remark}\label{remarkNSE_1}
\eqref{s2eq4} becomes a Navier-Stokes problem if $\varepsilon\equiv 1$.
\end{remark}
{\bf Notation}~~Throughout the work we use the following notations for function
spaces. For $m\in{\mathbb N}_0$, $p\ge 1$ and bounded subdomain $G\subset\Omega$ let $W^{m,p}(G)$ be the
usual Sobolev space equipped with norm $\|\cdot\|_{m,p,G}$. If $p=2$, we denote the Sobolev space
by $H^m(G)$ and use the standard abbreviations $\|\cdot\|_{m,G}$ and $|\cdot|_{m,G}$ for the norm
and seminorm, respectively. We denote by $D(G)$ the space of $C^\infty(G)$ functions with compact
support contained in $G$. Furthermore, $H_0^m(G)$ stands for the closure of $D(G)$ with respect
to the norm $\|\cdot\|_{m,G}$. The counterparts spaces consisting of vector valued functions
will be denoted by bold faced symbols like $\boldsymbol{H}^m(G):=[H^m(G)]^n$ or
$\boldsymbol{D}(G):=[D(G)]^n$. The $L^2$ inner product over $G\subset\Omega$ and
$\partial G\subset\partial\Omega$ will be denoted by $(\cdot,\cdot)_G$ and
$\langle\cdot,\cdot\rangle_{\partial G}$, respectively. In the case $G=\Omega$ the
domain index will be omitted. In the following we denote by $C$ the generic
constant which is usually independent of the model parameters, otherwise
dependences will be indicated.
\section{Existence and uniqueness results}\label{s3}
In the following the porosity $\varepsilon$ is assumed to belong to
$W^{1,3}(\Omega)\cap L^\infty(\Omega)$. We start with the weak formulation of problem \eqref{s2eq4} and look for its solution in suitable Sobolev spaces.
\subsection{Variational formulation}
Let $$L^2_0(\Omega):=\{ v\in L^2(\Omega): (v,1)=0\}$$ be the space
consisting of $L^2$ functions with zero mean value. We define the spaces
\begin{equation*}
\boldsymbol{X}:=\boldsymbol{H}^1(\Omega)\,,\quad
\boldsymbol{X}_0:=\boldsymbol{H}^1_0(\Omega)\,,\quad
Q:=L^2(\Omega)\,,\quad M:=L^2_0(\Omega)\,,
\end{equation*}
and
\begin{equation*}
\boldsymbol{V}:=\boldsymbol{X}_0\times M\,.
\end{equation*}
Let us introduce the following bilinear forms
\begin{equation*}
\begin{alignedat}{3}
&a:\,\boldsymbol{X}\times\boldsymbol{X}&\to{\mathbb R}\,,&&\qquad a(\boldsymbol u, \boldsymbol v)&=\frac{1}{Re}\bigl(\varepsilon\nabla\boldsymbol u,\nabla\boldsymbol v\bigr)\,,\\[1.5ex]
&b:\,\boldsymbol{X}\times Q&\to{\mathbb R}\,,&&\qquad b(\boldsymbol u,q)&=\bigl(\textrm{div}(\varepsilon\boldsymbol{u}),q\bigr)\,,\\[1.5ex]
&c:\,\boldsymbol{X}\times \boldsymbol{X}&\to{\mathbb R}\,,&&\qquad c(\boldsymbol u,\boldsymbol v)&=\frac{1}{Re}\bigl(\alpha\boldsymbol{u},\boldsymbol{v}\bigr)\,.\\
\end{alignedat}
\end{equation*}
Furthermore, we define the semilinear form
\begin{equation*}
d:\,\boldsymbol{X}\times\boldsymbol{X}\times\boldsymbol{X}\rightarrow{\mathbb R}\,,\qquad d(\boldsymbol w;\boldsymbol u,\boldsymbol v)=\bigl(\beta |\boldsymbol{w}|\boldsymbol{u},\boldsymbol{v}\bigr)\,,
\end{equation*}
and trilinear form
\begin{equation*}
n:\,\boldsymbol{X}\times\boldsymbol{X}\times\boldsymbol{X}\rightarrow{\mathbb R}\,,\qquad
n(\boldsymbol w,\boldsymbol u,\boldsymbol v)=\bigl((\varepsilon \boldsymbol w\cdot\nabla)\boldsymbol u,\boldsymbol v\bigr)\,.
\end{equation*}
We set
\begin{equation*}
A(\boldsymbol{w};\boldsymbol{u},\boldsymbol{v}):=a(\boldsymbol{u},\boldsymbol{v})+c(\boldsymbol{u},\boldsymbol{v})+n(\boldsymbol{w},\boldsymbol{u},\boldsymbol{v})+d(\boldsymbol{w};\boldsymbol{u},\boldsymbol{v})\,.
\end{equation*}
Multiplying momentum and mass balances in (\ref{s2eq4}) by test functions $\boldsymbol v\in\boldsymbol{X}_0$ and $q\in M$, respectively, and integrating by parts implies the weak formulation:\\[2ex]
\hspace*{1cm}Find $\displaystyle (\boldsymbol u,p)\in \boldsymbol{X}\times M$ with $\boldsymbol u\arrowvert_{\Gamma}=\boldsymbol g$\quad such that
\begin{equation}\label{s3eq5}
A(\boldsymbol{u};\boldsymbol{u},\boldsymbol{v})-b(\boldsymbol{v},p)+b(\boldsymbol{u},q)=(\boldsymbol{f},\boldsymbol{v})\quad\forall\;(\boldsymbol v,q)\in \boldsymbol{V}\,.
\end{equation}
First, we recall the following result from \cite{BernardiLaval}:
\begin{theorem}\label{s3thm1}
The mapping $u\mapsto \varepsilon u$ is an isomorphism from
$H^1(\Omega)$ onto itself and from $H^1_0(\Omega)$ onto
itself. It holds for all $u\in H^1(\Omega)$
\begin{equation*}
\|\varepsilon u\|_1\le
C\{\varepsilon_1+|\varepsilon|_{1,3}\}\,\|u\|_1\qquad\text{and}\qquad
\left\|\frac{u}{\varepsilon}\right\|_1\le
C\left\{\varepsilon_0^{-1}+\varepsilon_0^{-2}\,|\varepsilon|_{1,3}\right\}\|u\|_1\,.
\end{equation*}
\end{theorem}
In the following the closed subspace of $\boldsymbol H^1_0(\Omega)$ defined by
\begin{equation*}
\boldsymbol{W}=\{\boldsymbol w\in\boldsymbol H^1_0(\Omega):\quad b(\boldsymbol w,q)=0\quad\forall\; q\in L^2_0(\Omega)\}.
\end{equation*}
will be employed. Next, we establish and prove some properties of trilinear form $n(\cdot,\cdot,\cdot)$ and nonlinear form $d(\cdot;\cdot,\cdot)$.
\begin{lemma}\label{s3lem2}
Let $\boldsymbol u, \boldsymbol v\in\boldsymbol H^1(\Omega)$ and $\boldsymbol w\in\boldsymbol H^1(\Omega)$ with $\text{div}\,(\varepsilon\boldsymbol w)=0$ and $\boldsymbol w\cdot\boldsymbol n\arrowvert_\Gamma=0$. Then we have
\begin{equation}\label{s3eq6}
n(\boldsymbol w,\boldsymbol u,\boldsymbol v)=-n(\boldsymbol w,\boldsymbol v,\boldsymbol u)\,.
\end{equation}
Furthermore, the trilinear form $n(\cdot,\cdot,\cdot)$ and the nonlinear form
$d(\cdot;\cdot,\cdot)$ are continuous, i.e.
\begin{equation}\label{s3eq7}
|n(\boldsymbol u,\boldsymbol v,\boldsymbol w)|\le C_\varepsilon\,\|\boldsymbol u\|_1 \|\boldsymbol v\|_1 \|\boldsymbol w\|_1\quad\forall\;\boldsymbol u,\boldsymbol v,\boldsymbol w\in \boldsymbol H^1(\Omega)\,,
\end{equation}
\begin{equation}\label{s3eq7b}
|d(\boldsymbol u,\boldsymbol v,\boldsymbol w)|\le C_\varepsilon\, \|\boldsymbol u\|_1 \|\boldsymbol v\|_1 \|\boldsymbol w\|_1\quad\forall\;\boldsymbol u,\boldsymbol v,\boldsymbol w\in \boldsymbol H^1(\Omega)\,,
\end{equation}
and for $\boldsymbol{u}\in\boldsymbol{W}$ and for a sequence $\boldsymbol u^k\in\boldsymbol{W}$ with $\lim\limits_{k\to\infty}\|\boldsymbol u^k-\boldsymbol{u}\|_0=0$, we have also
\begin{equation}\label{s3eq8}
\lim\limits_{k\to\infty}n(\boldsymbol u^k,\boldsymbol u^k,\boldsymbol v)=n(\boldsymbol u,\boldsymbol u,\boldsymbol v)\quad\forall\;\boldsymbol v\in\boldsymbol{W}.
\end{equation}
\end{lemma}
\begin{proof}
We follow the proof of \cite[Lemma 2.1, \S 2, Chapter IV]{giro} and adapt it to the trilinear form
$$n(\boldsymbol w,\boldsymbol u,\boldsymbol v)=\bigl((\varepsilon \boldsymbol
w\cdot\nabla)\boldsymbol u,\boldsymbol v\bigr)=\sum\limits_{i,j=1}^n\bigl(\varepsilon
w_j\partial_ju_i,v_i\bigr)\,,$$ which has the weighting factor $\varepsilon$. Hereby, symbols with subscripts denote components of bold faced vectors, e.g. $\boldsymbol{u}=(u_i)_{i=1,\ldots,n}$. Let $\boldsymbol{u}\in\boldsymbol{H}^1$, $\boldsymbol{v}\in\boldsymbol{D}(\Omega)$ and $\boldsymbol w\in\boldsymbol{W}$.
Integrating by parts and employing density argument, we obtain immediately \eqref{s3eq6}
\begin{equation*}
\begin{split}
&\sum\limits_{i,j=1}^n\bigl(\varepsilon w_j\partial_j u_i,v_i\bigr)
=-\sum\limits_{i,j=1}^n\bigl(\partial_j\left(\varepsilon w_jv_i\right),u_i \bigr)+\sum\limits_{i,j=1}^n\langle\varepsilon w_j n_j u_i,v_i\rangle\\
&=-\sum\limits_{i,j=1}^n\bigl(\varepsilon w_j\partial_jv_i,u_i\bigr)-\bigl(\text{div}\,(\varepsilon\boldsymbol w)\boldsymbol u,\boldsymbol v\bigr)
+\bigl\langle (\varepsilon\boldsymbol{w}\cdot\boldsymbol{n})\boldsymbol{u},\boldsymbol{v}\bigr\rangle\\
&=-n(\boldsymbol w,\boldsymbol v,\boldsymbol u).
\end{split}
\end{equation*}
From Sobolev embedding $H^1(\Omega)\hookrightarrow L^4(\Omega)$ (see \cite{Adams}) and H\"older inequality follows
\begin{equation*}
\left|\bigl(\varepsilon w_j\partial_ju_i,v_i\bigr)\right|\le |\varepsilon|_{0,\infty}\,\|w_j\|_{0,4}\,\|\partial_ju_i\|_0\,\|v_i\|_{0,4}
\le C\,|\varepsilon|_{0,\infty}\,\| w_j\|_1\, |u_i|_1\,\|v_i\|_1\,,
\end{equation*}
and consequently the proof of \eqref{s3eq7} is completed.
Since $\lim\limits_{k\rightarrow\infty}\|u_{i}^ku_{j}^k-u_iu_j\|_{0,1}=0$ and $\displaystyle\varepsilon\partial_jv_i\in L^\infty(\Omega)$, the continuity estimate \eqref{s3eq7} implies
\begin{equation*}
\begin{split}
\lim\limits_{k\rightarrow\infty} n(\boldsymbol u^k,\boldsymbol u^k,\boldsymbol v)&=-\lim\limits_{k\rightarrow\infty} n(\boldsymbol u^k,\boldsymbol v,\boldsymbol u^k)=-\lim\limits_{k\to\infty}\sum\limits_{i,j=1}^n\bigl(\varepsilon u_j^k\,\partial_jv_i^k,u_i^k\bigr)\\
&=-\sum\limits_{i,j=1}^n\bigl(\varepsilon u_j\partial_j v_i,u_i\bigr)=-n(\boldsymbol u,\boldsymbol v,\boldsymbol u)=n(\boldsymbol u,\boldsymbol u,\boldsymbol v)\,.
\end{split}
\end{equation*}
The continuity of $d(\cdot;\cdot,\cdot)$ follows from H\"older inequality and Sobolev embedding $H^1(\Omega)\hookrightarrow L^4(\Omega)$ (see \cite{Adams})
\begin{equation*}
|d(\boldsymbol u;\boldsymbol v,\boldsymbol w)|\le |\beta|_\infty\, \|\boldsymbol u\|_{0,4}\, \|\boldsymbol v\|_{0,4}\, \|\boldsymbol w\|_0\le C_\varepsilon \|\boldsymbol u\|_1\, \|\boldsymbol v\|_1\, \|\boldsymbol w\|_1\,.
\end{equation*}
\end{proof}
In the next stage we consider the difficulties caused by prescribing the inhomogeneous Dirichlet
boundary condition. Analogous difficulties are already encountered in the analysis of
Navier--Stokes problem. We will carry out the study of three dimensional case. The extension in
two dimensions can be constructed analogously. Since $\boldsymbol g\in\boldsymbol{H}^{1/2}(\Gamma)$, we can extend $\boldsymbol g$ inside of $\Omega$ in the form of
$$\boldsymbol g=\varepsilon^{-1}\,\textrm{curl}\,\boldsymbol h$$
with some $\boldsymbol h\in \boldsymbol{H}^2(\Omega)$. The operator $\textrm{curl}$ is
defined then as
$$
\textrm{curl}\, \boldsymbol{h}=(\partial_2 h_3-\partial_3 h_2,\, \partial_3
h_1-\partial_1 h_3,\, \partial_1 h_2-\partial_2 h_1)\,.
$$
We note that in the two dimensional
case the vector potential $\boldsymbol{h}\in \boldsymbol{H}^2(\Omega)$ can be replaced
by a scalar function $h\in
H^2(\Omega)$ and the operator $\textrm{curl}$ is then redefined as $\textrm{curl}\, h=(\partial_2
h, -\partial_1 h)$. Our aim is to adapt the extension of Hopf (see \cite{Hopf}) to our
model. We recall that for any parameter $\mu>0$ there exists a scalar function $\varphi_\mu\in C^2(\bar{\Omega})$ such that
\begin{equation}\label{Ex}
\left.\begin{split}
&\hspace{-0.5cm}\bullet\quad\varphi_\mu=1~\text{in some neighborhood of
$\Gamma$ (depending on $\mu$)}\,,\\[2ex]
&\hspace{-0.5cm}\bullet\quad\varphi_\mu(\boldsymbol{x})=0~\text{if
$d_\Gamma(\boldsymbol{x})\ge 2\exp{(-1/\mu)}$\,, where $d_\Gamma(\boldsymbol
x):=\inf\limits_{\boldsymbol
y\in\Gamma}|\boldsymbol{x}-\boldsymbol{y}|$}\\[-0.5ex]
&\hspace{0.5cm}\text{denotes the distance of $\boldsymbol x$ to
$\Gamma$}\,,\\[2ex]
&\hspace{-0.5cm}\bullet\quad
|\partial_j\varphi_\mu(\boldsymbol{x})|\le\mu/d_\Gamma(\boldsymbol{x})~~
\text{if~~$d_\Gamma(\boldsymbol x)< 2\exp{(-1/\mu)}$\,, $j=1,\ldots, n\,.$}
\end{split}\;\right\}\tag{\text{Ex}}
\end{equation}
For the construction of $\varphi_\mu$ see also \cite[Lemma 2.4,
\S 2, Chapter IV]{giro}.\\[1.5ex]
Let us define
\begin{equation}\label{s3extension}
\boldsymbol g_\mu:=\varepsilon^{-1}\,\textrm{curl}\,(\varphi_\mu\boldsymbol h)\,.
\end{equation}
In the following lemma we establish bounds which are crucial for proving existence of velocity.
\begin{lemma}\label{s3lem3}
The function $\boldsymbol g_\mu$ satisfies the following conditions
\begin{equation}\label{s3eq9}
\textrm{div}\,(\varepsilon\boldsymbol g_\mu) =0,\quad \boldsymbol
g_\mu\arrowvert_{\Gamma}=\boldsymbol g\qquad\forall\,\mu>0\,,
\end{equation}
and for any $\delta>0$ there exists sufficiently small $\mu>0$ such that
\begin{align}
\label{s3eq10} |d(\boldsymbol u+\boldsymbol g_\mu;\boldsymbol
g_\mu,\boldsymbol u)| & \le \delta\,\|\beta\|_{0,\infty}\,|\boldsymbol u|_1\bigl(|\boldsymbol u|_1+\|\boldsymbol g_\mu\|_0\bigr)\qquad\forall\;\boldsymbol u\in\boldsymbol{X}_0\,,\\
\label{s3eq11} |n(\boldsymbol u,\boldsymbol g_\mu,\boldsymbol u)| &\le \delta\,|\boldsymbol u|_1^2\qquad\forall\;\boldsymbol u\in\boldsymbol{W}\,.
\end{align}
\end{lemma}
\begin{proof}
The relations in \eqref{s3eq9} are obvious. We follow \cite{Kaloni} in order to show \eqref{s3eq10}. Since $\boldsymbol h\in\boldsymbol{H}^2(\Omega)$ Sobolev's embedding theorem implies $\boldsymbol h\in \boldsymbol{L}^\infty(\Omega)$, so we get according to the properties of $\varphi_\mu$ in \eqref{Ex} the following bound
\begin{equation*}
|\boldsymbol g_\mu|\le C\,\varepsilon_0^{-1}\,\left\{|\nabla\boldsymbol
h|+\frac{\mu}{d_\Gamma(\boldsymbol{x})} |\boldsymbol h|\right\}
\le C\,\left\{\frac{\mu}{d_\Gamma(\boldsymbol{x})}+|\nabla\boldsymbol h|\right\}.\end{equation*}
Defining
$$
\Omega_\mu:=\{\boldsymbol x\in\Omega:\;d_\Gamma(\boldsymbol{x})<2\exp(-1/\mu)\}
$$
we obtain from Cauchy-Schwarz and triangle inequalities
\begin{equation}\label{s3eq12}
\begin{split}
|\bigl(\beta |\boldsymbol u+\boldsymbol g_\mu|, \boldsymbol g_\mu\cdot\boldsymbol u\bigr)|
&\le\,\|\beta\|_{0,\infty}\,\|\boldsymbol u\|_0\,\|\boldsymbol
u\cdot\boldsymbol g_\mu\|_{0,\Omega_\mu}\\
&\qquad+\|\beta\|_{0,\infty}\,\|\boldsymbol g_\mu\|_0\,\|\boldsymbol u\cdot\boldsymbol g_\mu\|_{0,\Omega_\mu}\,,
\end{split}
\end{equation}
\begin{equation*}
\begin{split}
&\|\boldsymbol u\cdot\boldsymbol g_\mu\|_{0,\Omega_\mu}^2
\le \int\limits_{\Omega_\mu}|\boldsymbol u|^2|\boldsymbol g_\mu|^2 d\boldsymbol x\\
&\le C\int\limits_{\Omega_\mu}|\boldsymbol
u|^2\biggl\{\bigl(\mu/d_\Gamma(\boldsymbol{x})\bigr)^2+2\mu/d_\Gamma(\boldsymbol{x})\,|\nabla\boldsymbol h|+|\nabla\boldsymbol h|^2\biggr\}d\boldsymbol x\\
&\le C\left \{\mu^2 \|\boldsymbol u/d_\Gamma\|_{0,\Omega_\mu}^2+2\mu
\|\boldsymbol u/d_\Gamma\|_{0,\Omega_\mu}\,\|\boldsymbol{u}\|_{0,4,\Omega_\mu}\,\bigl\||\nabla\boldsymbol h|\bigr\|_{0,4,\Omega_\mu}
+\|\boldsymbol u\|_{0,4,\Omega_\mu}^2\bigl\||\nabla\boldsymbol h|\bigr\|_{0,4,\Omega_\mu}^2\right\}\\
&\le C\left \{\mu \|\boldsymbol u/d_\Gamma\|_{0,\Omega_\mu}+\|\boldsymbol u\|_{0,4}\bigl\||\nabla\boldsymbol h|\bigr\|_{0,4,\Omega_\mu}\right\}^2,
\end{split}
\end{equation*}
and consequently
\begin{equation}\label{s3eq13}
\|\boldsymbol u\cdot\boldsymbol g_\mu\|_{0,\Omega_\mu}\le C\left\{\mu
\|\boldsymbol u/d_\Gamma\|_{0,\Omega_\mu}+\|\boldsymbol u\|_{0,4}\bigl\||\nabla\boldsymbol h|\bigr\|_{0,4,\Omega_\mu}\right\}\,.
\end{equation}
Applying Hardy inequality (see \cite{Adams})
$$\|v/d_\Gamma\|_0\le C|v|_1\quad\forall\;v\in H_0^1(\Omega)$$
and using Sobolev embedding $H^1(\Omega)\hookrightarrow L^4(\Omega)$, estimate (\ref{s3eq13}) becomes
\begin{equation}\label{s3eq14}
\|\boldsymbol u\cdot\boldsymbol g_\mu\|_{0,\Omega_\mu}\le C\lambda(\mu)\|\boldsymbol u\|_1,
\end{equation}
where
$$\lambda(\mu):=\max\bigl\{\mu,\bigl\||\nabla\boldsymbol
h|\bigr\|_{0,4,\Omega_\mu}\bigr\}\,.$$
From \eqref{s3eq12}, \eqref{s3eq14}, Poincar\'e inequality and from the fact that $\lim\limits_{\mu\to 0}\lambda(\mu) = 0$ we conclude that for any $\delta>0$ we can choose sufficiently small $\mu>0$ such that
\begin{equation*}
|(\beta\,|\boldsymbol u+\boldsymbol g_\mu|\boldsymbol g_\mu,\boldsymbol u)|
\le\delta\,\|\beta\|_{0,\infty}\,|\boldsymbol{u}|_1\bigl(|\boldsymbol{u}|_1+\|\boldsymbol{g}_\mu\|_0\bigr)
\end{equation*}
holds. Therefore the proof of estimate \eqref{s3eq10} is completed. Now, we take a look at the trilinear convective term
\begin{equation*}
\begin{split}
n(\boldsymbol{u},\boldsymbol{g}_\mu,\boldsymbol{u})&=
\bigl((\varepsilon\boldsymbol{u}\cdot\nabla)\boldsymbol{g}_\mu, \boldsymbol{u}\bigr)_{\Omega_\mu}
=\biggl((\varepsilon\boldsymbol{u}\cdot\nabla)\left\{\varepsilon^{-1}\,\text{curl}\,(\varphi_\mu\boldsymbol{h})\right\}, \boldsymbol{u}\biggr)_{\Omega_\mu}\\
&=\biggl((\boldsymbol{u}\cdot\nabla)\left\{\text{curl}\,(\varphi_\mu\boldsymbol{h})\right\}, \boldsymbol{u}\biggr)_{\Omega_\mu}-\bigl((\boldsymbol{u}\cdot\nabla\varepsilon)\,\boldsymbol{g}_\mu,\boldsymbol{u}\bigr)_{\Omega_\mu}\,.
\end{split}
\end{equation*}
The first term of above difference becomes small due to
\cite[Lemma 2.3, \S 2, Chapter IV]{giro}, and it satisfies
\begin{equation}\label{s3eq15}
\left|\bigl((\boldsymbol{u}\cdot\nabla)\left\{\text{curl}\,(\varphi_\mu\boldsymbol{h})\right\}, \boldsymbol{u}\bigr)_{\Omega_\mu}\right|=\left|\bigl((\boldsymbol{u}\cdot\nabla)(\varepsilon\boldsymbol{g}_\mu), \boldsymbol{u}\bigr)_{\Omega_\mu}\right|\le \delta |\boldsymbol{u}|_1^2
\end{equation}
as long as $\mu>0$ is chosen sufficiently small. Using H\"older inequality, Sobolev embedding $H^1(\Omega)\hookrightarrow L^6(\Omega)$ yields
\begin{equation*}
\left|\bigl((\boldsymbol{u}\cdot\nabla\varepsilon)\,\boldsymbol{g}_\mu,\boldsymbol{u}\bigr)_{\Omega_\mu}\right|\le C\|\varepsilon\|_{1,3}\,\|\boldsymbol{g}_\mu\cdot\boldsymbol{u}\|_0\,\|\boldsymbol{u}\|_1\,,
\end{equation*}
which together with \eqref{s3eq14} implies for sufficiently small $\mu>0$ the bound
\begin{equation}\label{s3eq16}
\left|\bigl((\boldsymbol{u}\cdot\nabla\varepsilon)\,\boldsymbol{g}_\mu,\boldsymbol{u}\bigr)_{\Omega_\mu}\right|\le \delta |\boldsymbol{u}|_1^2\,.
\end{equation}
From \eqref{s3eq15} and \eqref{s3eq16} follows the desired estimate \eqref{s3eq11}.
\end{proof}
While the general framework for linear and non-symmetric saddle point problems can be found in \cite{BernardiLaval},
our problem requires more attention due to its nonlinear character. Setting $\boldsymbol{w}:=\boldsymbol u-\boldsymbol g_\mu$, the weak formulation \eqref{s3eq5} is equivalent to the following problem\\[2ex]
\hspace*{0.4cm}Find $\displaystyle (\boldsymbol w,p)\in \boldsymbol{V}$ such that
\begin{gather}\label{s3eq17}
\begin{split}
A(\boldsymbol{w}+\boldsymbol{g}_\mu;\boldsymbol{w}+\boldsymbol{g}_\mu,\boldsymbol{v})-b(\boldsymbol{v},p)+b(\boldsymbol{w}+\boldsymbol{g}_\mu,q)=(\boldsymbol{f},\boldsymbol{v})\quad\forall\; (\boldsymbol{v},q)\in\boldsymbol{V}\,.
\end{split}
\end{gather}
Let us define the nonlinear mapping $G:\; \boldsymbol{W}\rightarrow \boldsymbol{W}$ with
\begin{equation}\label{s3eq18}
\begin{split}
\bigl[G(\boldsymbol w),\boldsymbol v\bigr]:=&a(\boldsymbol w+\boldsymbol g_\mu,\boldsymbol v)+c(\boldsymbol w+\boldsymbol g_\mu,\boldsymbol v)-(\boldsymbol f,\boldsymbol v)\\
&\;+n(\boldsymbol w+\boldsymbol g_\mu,\boldsymbol w+\boldsymbol g_\mu,\boldsymbol v)+d(\boldsymbol w+\boldsymbol g_\mu;\boldsymbol w+\boldsymbol g_\mu,\boldsymbol v)\,,
\end{split}
\end{equation}
whereby $[\cdot,\cdot]$ defines the inner product in $\boldsymbol{W}$ via
$[u,v]:=(\nabla u,\nabla v)$.
Then, the variational problem (\ref{s3eq17}) reads in the space of $\varepsilon$-weighted divergence free functions $\boldsymbol{W}$ as follows\\[2ex]
\hspace*{0.4cm}Find $\displaystyle \boldsymbol w\in \boldsymbol{W}$ such that
\begin{equation}\label{s3eq19}
\bigl[G(\boldsymbol w),\boldsymbol v\bigr]=0\quad\forall\;\boldsymbol v\in\boldsymbol{W}.
\end{equation}
\subsection{Solvability of nonlinear saddle point problem}
We start our study of the nonlinear operator problem \eqref{s3eq19} with the following lemma.
\begin{lemma}\label{s3lem4}
The mapping $G$ defined in (\ref{s3eq18}) is continuous and there exists $r>0$ such that
\begin{equation}\label{s3eq20}
\bigl[G(\boldsymbol u),\boldsymbol u\bigr] >0\quad\forall\;\boldsymbol u\in\boldsymbol{W}\quad\textrm{with}\quad |\boldsymbol u|_1=r.
\end{equation}
\end{lemma}
\begin{proof}
Let $(\boldsymbol u^k)_{k\in{\mathbb N}}$ be a sequence in $\boldsymbol{W}$ with $\lim\limits_{k\to\infty}\|\boldsymbol u^k-\boldsymbol u\|_1=0$. Then, applying Cauchy--Schwarz inequality and \eqref{s3eq11}, we obtain for any $\boldsymbol v\in\boldsymbol{W}$
\begin{equation*}
\begin{split}
&\left|\bigl[G(\boldsymbol u^k)-G(\boldsymbol u),\boldsymbol v\bigr]\right|\le \frac{1}{Re}\left|\bigl(\varepsilon\nabla(\boldsymbol u^k-\boldsymbol u),\nabla\boldsymbol v\bigr)\right|
+\frac{1}{Re}\left|\bigl(\alpha(\boldsymbol u^k-\boldsymbol u),\boldsymbol
v\bigr)\right|\\
&\quad +\left|\bigl(\beta|\boldsymbol u^k+\boldsymbol g_\mu|(\boldsymbol
u^k-\boldsymbol u),\boldsymbol v\bigr)\right|+\left|\bigl(\beta (|\boldsymbol
u^k+\boldsymbol g_\mu|-|\boldsymbol u+\boldsymbol g_\mu|)(\boldsymbol
u+\boldsymbol g_\mu),\boldsymbol v\bigr)\right|\\
&\quad+\left|n(\boldsymbol u^k,\boldsymbol u^k, \boldsymbol v)-n(\boldsymbol
u,\boldsymbol u,\boldsymbol v)\right|
+\left|n(\boldsymbol u^k-\boldsymbol u,\boldsymbol g_\mu,\boldsymbol
v)\right|+\left|n(\boldsymbol g_\mu, \boldsymbol u^k-\boldsymbol
u,,\boldsymbol v)\right|\\
&\le\frac{\varepsilon_1}{Re}|\boldsymbol u^k-\boldsymbol u|_1|\boldsymbol v|_1
+\frac{1}{Re}\|\alpha\|_{0,\infty}\|\boldsymbol u^k-\boldsymbol u\|_0\|\boldsymbol v\|_0\\
&\quad+\|\beta\|_{0,\infty}\|\boldsymbol u^k+\boldsymbol g_\mu\|_{0,4}\|\boldsymbol u^k-\boldsymbol u\|_0\|\boldsymbol v\|_{0,4}
+\|\beta\|_{0,\infty}\|\boldsymbol u+\boldsymbol g_\mu\|_{0,4}\|\boldsymbol u^k-\boldsymbol u\|_0\|\boldsymbol v\|_{0,4}\\
&\quad+\left|n(\boldsymbol u^k,\boldsymbol u^k, \boldsymbol v)-n(\boldsymbol
u,\boldsymbol u,\boldsymbol v)\right|
+C\|\boldsymbol u^k-\boldsymbol u\|_1\|\boldsymbol g_\mu\|_1\|\boldsymbol
v\|_1\,.
\end{split}
\end{equation*}
The boundedness of $\boldsymbol u^k$ in $\boldsymbol{W}$, \eqref{s3eq8}, the Poincar\'e inequality, and the above inequality imply that
$$
\left|\bigl[G(\boldsymbol u^k)-G(\boldsymbol u),\boldsymbol v\bigr]\right|\to
0\quad\text{as}\quad k\to\infty\qquad\forall\,
\boldsymbol{v}\in\boldsymbol{W}\,.
$$
Thus, employing
$$
|G(\boldsymbol u^k)-G(\boldsymbol
u)|_1=\sup\limits_{\overset{\boldsymbol
v\in\boldsymbol{W}}{\boldsymbol v\neq \boldsymbol
0}}\frac{\bigl[G(\boldsymbol u^k)-G(\boldsymbol u),\boldsymbol
v\bigr]}{|\boldsymbol v|_1}\,,
$$
we state that $G$ is continuous. Now, we note that for any $\boldsymbol u\in\boldsymbol{W}$ we have
\begin{equation}\label{s3eq21}
\begin{split}
&\bigl[G(\boldsymbol u),\boldsymbol u\bigr]
=\frac{1}{Re}\bigl(\varepsilon\nabla(\boldsymbol u+\boldsymbol g_\mu),\nabla\boldsymbol u\bigr)+\frac{1}{Re}\bigl(\alpha(\boldsymbol u+\boldsymbol g_\mu),\boldsymbol u\bigr)\\
&\quad +\bigl(\beta|\boldsymbol u+\boldsymbol g_\mu|(\boldsymbol u+\boldsymbol g_\mu),\boldsymbol u\bigr)
+n(\boldsymbol u+\boldsymbol g_\mu,\boldsymbol u+\boldsymbol g_\mu,\boldsymbol u)-(\boldsymbol f,\boldsymbol u)\\
&\ge\frac{\varepsilon_0}{Re}|\boldsymbol u|_1^2-\frac{\varepsilon_1}{Re}|(\nabla\boldsymbol g_\mu,\nabla\boldsymbol u)|
+\frac{1}{Re}(\alpha\boldsymbol u,\boldsymbol u)-\frac{1}{Re}|(\alpha\boldsymbol g_\mu,\boldsymbol u)|\\
&\quad +(\beta|\boldsymbol u+\boldsymbol g_\mu|,|\boldsymbol
u|^2)-\left|(\beta |\boldsymbol u+\boldsymbol g_\mu|\boldsymbol
g_\mu,\boldsymbol u)\right|\\
&\quad +n(\boldsymbol u,\boldsymbol g_\mu,\boldsymbol u)+n(\boldsymbol g_\mu,\boldsymbol g_\mu,\boldsymbol u)-\|\boldsymbol f\|_0\|\boldsymbol u\|_0\\
&\ge\frac{\varepsilon_0}{Re}|\boldsymbol u|_1^2-\frac{\varepsilon_1}{Re}|\boldsymbol g_\mu|_1|\boldsymbol u|_1\\
&\quad -\frac{1}{Re}\|\alpha\|_{0,\infty}\|\boldsymbol g_\mu\|_0\|\boldsymbol u\|_0
-\left|(\beta |\boldsymbol u+\boldsymbol g_\mu|\boldsymbol g_\mu,\boldsymbol
u)\right|\\
&\quad -\left|n(\boldsymbol u,\boldsymbol g_\mu,\boldsymbol u)\right|
-C\|\boldsymbol g_\mu\|_1^2\|\boldsymbol u\|_1
-\|\boldsymbol f\|_0\|\boldsymbol u\|_0\,.
\end{split}
\end{equation}
From the Poincar\'e inequality, we infer the estimate
\begin{equation*}
\|v\|_1\le C|v|_1\quad\forall\; v\in H_0^1(\Omega),
\end{equation*}
which together with \eqref{s3eq10}, \eqref{s3eq11} and \eqref{s3eq21} results in
\begin{equation*}
\begin{split}
&\bigl[G(\boldsymbol u),\boldsymbol
u\bigr]\ge\left\{\frac{\varepsilon_0}{Re}-\delta(1+\|\beta\|_{0,\infty})\right\}|\boldsymbol u|_1^2\\
&\quad -{\mathbb B}igl\{\frac{\varepsilon_1}{Re}|\boldsymbol
g_\mu|_1+C_1\frac{1}{Re}\|\alpha\|_{0,\infty}\|\boldsymbol g_\mu\|_0
+\delta \|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_0
+C_2\|\boldsymbol g_\mu\|_1^2+C_3\|\boldsymbol f\|_0{\mathbb B}igr\}|\boldsymbol u|_1.
\end{split}
\end{equation*}
Choosing $\delta$ such that
\begin{equation*}
0<\delta<\delta_0:=\frac{\varepsilon_0}{Re}\bigl(1+\|\beta\|_{0,\infty}\bigr)^{-1}\,,
\end{equation*}
and $r>r_0$ with
\begin{equation}\label{s3eq22}
\begin{split}
r_0:=\frac{\displaystyle\frac{\varepsilon_1}{Re}|\boldsymbol
g_\mu|_1+\frac{1}{Re}C_1\|\alpha\|_{0,\infty}\|\boldsymbol
g_\mu\|_0+\delta \|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_0+C_2\|\boldsymbol g_\mu\|_1^2+C_3\|\boldsymbol f\|_0}
{\displaystyle\frac{\varepsilon_0}{Re}-\delta(1+\|\beta\|_{0,\infty})}\,,
\end{split}
\end{equation}
leads to the desired assertion (\ref{s3eq20}).
\end{proof}
The following lemma plays a key role in the existence proof.
\begin{lemma}\label{s3lem5}
Let $Y$ be finite-dimensional Hilbert space with inner product $[\cdot,\cdot]$ inducing a norm $\|\cdot\|$, and $T:\,Y\rightarrow Y$ be a continuous mapping such that
$$\bigl[T(x),x\bigr]>0\quad\textrm{for}\quad\|x\|=r_0>0.$$
Then there exists $x\in Y$, with $\|x\|\le r_0$, such that
$$T(x)=0.$$
\end{lemma}
\begin{proof}
See \cite{Lions_1}.
\end{proof}
Now we are able to prove the main result concerning existence of velocity.
\begin{theorem}\label{s3thm6}
The problem (\ref{s3eq19}) has at least one solution $\boldsymbol u\in\boldsymbol{W}$.
\end{theorem}
\begin{proof}
We construct the approximate sequence of Galerkin solutions. Since the space $\boldsymbol{W}$ is separable, there exists a sequence of linearly independent elements $\left(\boldsymbol w^i\right)_{i\in{\mathbb N}}\subset\boldsymbol{W}$. Let $\boldsymbol{X}_m$ be the finite dimensional subspace of $\boldsymbol{W}$ with
$$\boldsymbol{X}_m:=\text{span}\{\boldsymbol{w}^i\,,~ i=1,\ldots ,m\}$$
and endowed with the scalar product of $\boldsymbol{W}$. Let $\boldsymbol u^m=\sum\limits_{j=1}^ma_j\boldsymbol w^j,\;
a_j\in{\mathbb R}$\,, be a Galerkin solution of (\ref{s3eq19}) defined by
\begin{align}
\label{s3eq23} & \bigl[ G(\boldsymbol u^m),\boldsymbol w^j\bigr]=0,\quad\forall\; j=1,\ldots ,m\,.
\end{align}
From Lemma \ref{s3lem4} and Lemma \ref{s3lem5} we conclude that
\begin{equation}\label{s3eq24}
\bigl[G(\boldsymbol u^m),\boldsymbol w\bigr]=0\quad\forall\;\boldsymbol w\in\boldsymbol X_m
\end{equation}
has a solution $\boldsymbol u^m\in\boldsymbol X_m$. The unknown coefficients $a_j$ can be obtained from the algebraic system (\ref{s3eq23}). On the other hand, multiplying (\ref{s3eq23}) by $a_j$, and adding the equations for $j=1,\ldots, m$ we have
\begin{equation*}
\begin{split}
0&=\bigl[G(\boldsymbol u^m),\boldsymbol u^m\bigr]\\
&\ge \left\{\frac{1}{Re}-\delta(1+\|\beta\|_{0,\infty})\right\}|\boldsymbol u^m|_1^2\\
&\quad -{\mathbb B}igl\{\frac{1}{Re}|\boldsymbol
g_\mu|_1+C_1\frac{1}{Re}\|\alpha\|_{0,\infty}\|\boldsymbol g_\mu\|_0
+\delta \|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_0
+C_2\|\boldsymbol g_\mu\|_1^2+C_3\|\boldsymbol f\|_0{\mathbb B}igr\}|\boldsymbol u^m|_1.
\end{split}
\end{equation*}
This gives together with (\ref{s3eq22}) the uniform boundedness in $\boldsymbol{W}$
\begin{equation*}
|\boldsymbol u^m|_1\le r_0,
\end{equation*}
therefore there exists $\boldsymbol u\in\boldsymbol{W}$ and a subsequence $m_k\rightarrow\infty$ ( we write for the convenience $m$ instead of $m_k$ ) such that
\begin{equation*}
\boldsymbol u^m\rightharpoonup\boldsymbol u\quad\textrm{in}\quad\boldsymbol{W}.
\end{equation*}
Furthermore, the compactness of embedding $H^1(\Omega)\hookrightarrow L^4(\Omega)$ implies
\begin{equation*}
\boldsymbol u^m\rightarrow\boldsymbol u\quad\textrm{in}\quad\boldsymbol{L}^4(\Omega).
\end{equation*}
Taking the limit in (\ref{s3eq24}) with $m\rightarrow\infty$ we get
\begin{equation}\label{s3eq25}
\bigl[G(\boldsymbol u),\boldsymbol w\bigr]=0\quad\forall\;\boldsymbol w\in\boldsymbol X_m.
\end{equation}
Finally, we apply the continuity argument and state that (\ref{s3eq25}) is preserved for any $\boldsymbol w\in\boldsymbol{W}$, therefore $\boldsymbol u$ is the solution of (\ref{s3eq19}).
\end{proof}
For the reconstruction of the pressure we need inf-sup-theorem
\begin{theorem}\label{s3thm7}
Assume that the bilinear form $b(\cdot,\cdot)$ satisfies the inf-sup condition
\begin{equation}\label{s3eq26}
\inf\limits_{q\in M}\sup\limits_{\boldsymbol v\in\boldsymbol{X}_0}\frac{b(\boldsymbol v,q)}{|\boldsymbol v|_1\,\|q\|_0}\ge\gamma>0.
\end{equation}
Then, for each solution $\boldsymbol u$ of the nonlinear problem (\ref{s3eq19}) there exists a unique pressure $p\in M$ such that the pair $(\boldsymbol u,p)\in\boldsymbol{V}$ is a solution of the homogeneous problem (\ref{s3eq17}).
\end{theorem}
\begin{proof}
See \cite[Theorem 1.4, \S 1, Chapter IV]{giro}.
\end{proof}
We end up this subsection by proving the existence of the pressure.
\begin{theorem}\label{s3thm8}
Let $\boldsymbol w$ be solution of problem \eqref{s3eq19}. Then, there exists unique pressure $p\in M$.
\end{theorem}
\begin{proof}
We verify the inf-sup condition (\ref{s3eq26}) of Theorem
\ref{s3thm7} by employing the isomorphism of Theorem \ref{s3thm1}.
From \cite[Corollary 2.4, \S 2, Chapter I]{giro} follows that for any $q$ in $L_0^2(\Omega)$ there exists $\boldsymbol v$ in $\boldsymbol H_0^1(\Omega)$ such that
$$(\textrm{div}\,\boldsymbol v,q)\ge \gamma^*\|\boldsymbol v\|_1\|q\|_0$$
with a positive constant $\gamma^*$. Setting $\boldsymbol u=\boldsymbol v/\varepsilon$ and applying the isomorphism in Theorem \ref{s3thm1}, we obtain the estimate
\begin{equation*}
b(\boldsymbol u,q)=(\textrm{div}\,\boldsymbol v,q)\ge
\gamma^*\|\boldsymbol v\|_1\|q\|_0\ge \gamma_\varepsilon
\|\boldsymbol u\|_1\|q\|_0
\end{equation*}
where
$\displaystyle\gamma_\varepsilon=\frac{\gamma^*}{C\left\{\varepsilon_0^{-1}+\varepsilon_0^{-2}\,|\varepsilon|_{1,3}\right\}}$. From the above estimate we conclude the inf-sup condition \eqref{s3eq26}.
\end{proof}
\subsection{Uniqueness of weak solution}
We exploit a priori estimates in order to prove uniqueness of weak velocity and pressure.
\begin{theorem}\label{s3thm9}
If $\|\boldsymbol g_\mu\|_1$, $\displaystyle\|\boldsymbol f\|_{-1}:=\sup\limits_{\boldsymbol 0\neq \boldsymbol v\in \boldsymbol H^1(\Omega)}\frac{(\boldsymbol f,\boldsymbol v)}{\|\boldsymbol{v}\|_1}$ are sufficiently small, then the solution of (\ref{s3eq19}) is unique.
\end{theorem}
\begin{proof}
Assume that $(\boldsymbol u_1, p_1)$ and $(\boldsymbol u_2, p_2)$ are two different solutions of (\ref{s3eq17}). From \eqref{s3eq6} in Lemma \ref{s3lem2} we obtain $n(\boldsymbol w,\boldsymbol u,\boldsymbol u)=0~~\forall\;\boldsymbol w,\boldsymbol u\in\boldsymbol{W}$. Then, we obtain
\begin{equation}\label{s3eq27}
\begin{split}
0&=\bigl[G(\boldsymbol u_1)-G(\boldsymbol u_2),\boldsymbol
u_1-\boldsymbol u_2\bigr]\\
&=a(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol u_1-\boldsymbol u_2)+c(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol u_1-\boldsymbol u_2)-(\boldsymbol f,\boldsymbol u_1-\boldsymbol u_2)\\
&\quad + n(\boldsymbol u_1+\boldsymbol g_\mu,\boldsymbol u_1+\boldsymbol g_\mu,\boldsymbol u_1-\boldsymbol u_2)-n(\boldsymbol u_2+\boldsymbol g_\mu,\boldsymbol u_2+\boldsymbol g_\mu,\boldsymbol u_1-\boldsymbol u_2)\\
&\quad + (\beta |\boldsymbol u_1+\boldsymbol g_\mu|(\boldsymbol u_1+\boldsymbol g_\mu),\boldsymbol u_1-\boldsymbol u_2)\\
&\quad-\bigl(\beta |\boldsymbol u_2+\boldsymbol g_\mu|(\boldsymbol u_2+\boldsymbol g_\mu),\boldsymbol u_1-\boldsymbol u_2)\\
&\ge \frac{\varepsilon_0}{Re}|\boldsymbol u_1-\boldsymbol u_2|_1^2-\|\boldsymbol f\|_{-1}\|\boldsymbol u_1-\boldsymbol u_2\|_1\\
&\quad +n(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol u_2+\boldsymbol g_\mu,\boldsymbol u_1-\boldsymbol u_2)\\
&\quad +\bigl(\beta|\boldsymbol u_1+\boldsymbol g_\mu|(\boldsymbol u_1-\boldsymbol u_2),\boldsymbol u_1-\boldsymbol u_2\bigr)\\
&\quad +\bigl(\beta(|\boldsymbol u_1+\boldsymbol g_\mu|-|\boldsymbol u_2+\boldsymbol g_\mu|)(\boldsymbol{u}_2+\boldsymbol{g}_\mu),\boldsymbol u_1-\boldsymbol u_2\bigr)\\
&\ge \frac{\varepsilon_0}{Re}|\boldsymbol u_1-\boldsymbol u_2|_1^2-\|\boldsymbol f\|_{-1}\|\boldsymbol u_1-\boldsymbol u_2\|_1\\
&\quad -\left|n(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol
u_2,\boldsymbol u_1-\boldsymbol u_2)\right|
-\left|n(\boldsymbol
u_1-\boldsymbol u_2,\boldsymbol g_\mu,\boldsymbol u_1-\boldsymbol
u_2)\right|\\
&\quad -\|\beta\|_{0,\infty}\left|\bigl(|\boldsymbol u_1+\boldsymbol
g_\mu|\cdot |\boldsymbol u_1-\boldsymbol u_2|,|\boldsymbol u_1-\boldsymbol
u_2|\bigr)\right|\\
&\quad -\|\beta\|_{0,\infty}\left|\bigl(\bigl||\boldsymbol u_1+\boldsymbol
g_\mu|-|\boldsymbol u_2+\boldsymbol
g_\mu|\bigr|\cdot |\boldsymbol{u}_2+\boldsymbol{g}_\mu|,|\boldsymbol
u_1-\boldsymbol u_2|\bigr)\right|\,.
\end{split}
\end{equation}
From Cauchy-Schwarz inequality and Sobolev embedding $H^1(\Omega)\hookrightarrow
L^4(\Omega)$ we deduce
\begin{equation}\label{s3eq28}
\bigl|\bigl(|\boldsymbol u_1+\boldsymbol g_\mu|\cdot |\boldsymbol u_1-\boldsymbol
u_2|,|\boldsymbol u_1-\boldsymbol u_2|\bigr)\bigr|\le C\left\{\|\boldsymbol u_1\|_0+\|\boldsymbol g_\mu\|_0\right\}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2\,,
\end{equation}
\begin{equation}\label{s3eq29}
\begin{split}
&\bigl|\bigl(\bigl||\boldsymbol u_1+\boldsymbol g_\mu|-|\boldsymbol
u_2+\boldsymbol
g_\mu|\bigr|\cdot |\boldsymbol{u}_2+\boldsymbol{g}_\mu|,|\boldsymbol
u_1-\boldsymbol u_2|\bigr)\bigr|\\
&\le C\left\{\|\boldsymbol u_2\|_0+\|\boldsymbol g_\mu\|_0\right\}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2,
\end{split}
\end{equation}
and according to (\ref{s3eq7}) we have
\begin{equation}\label{s3eq30}
|n(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol u_2,\boldsymbol u_1-\boldsymbol u_2)|\le C\|\boldsymbol u_2\|_1\|\boldsymbol u_1-\boldsymbol u_2\|_1^2,
\end{equation}
and by (\ref{s3eq9}) we can find $\mu$ such that
\begin{equation}\label{s3eq31}
|n(\boldsymbol u_1-\boldsymbol u_2,\boldsymbol g_\mu,\boldsymbol u_1-\boldsymbol u_2)|\le \frac{\varepsilon_0}{4 Re}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2.
\end{equation}
Now, we find upper bounds for $\boldsymbol u_1$ and $\boldsymbol u_2$. Testing the equation (\ref{s3eq17}) with $\boldsymbol u$ results in
\begin{equation*}
\begin{split}
\frac{\varepsilon_0}{Re}\|\boldsymbol u\|_1^2 &\le \|\boldsymbol f\|_{-1}\|\boldsymbol u\|_1+\frac{\varepsilon_0}{Re}\|\boldsymbol g_\mu\|_1\|\boldsymbol u\|_1+C\|\boldsymbol g_\mu\|_0\|\boldsymbol u\|_0\\
&\quad +C\|\boldsymbol g_\mu\|_1^2\|\boldsymbol u\|_1
+C\|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_0\|\boldsymbol
u\|_1^2+C\|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_{0,4}^2\|\boldsymbol u\|_1\,.
\end{split}
\end{equation*}
From Sobolev embedding $H^1(\Omega)\hookrightarrow L^4(\Omega)$ we deduce for sufficiently small $\|\boldsymbol g_\mu\|_1$
\begin{equation}\label{s3eq32}
\|\boldsymbol u\|_1\le\frac{\|\boldsymbol f\|_{-1}+C_1\|\boldsymbol
g_\mu\|_1+C_2\|\boldsymbol
g_\mu\|_1^2}{\displaystyle\frac{\varepsilon_0}{Re}-C_3\|\beta\|_{0,\infty}\|\boldsymbol g_\mu\|_1}=:C\bigl(\|\boldsymbol g_\mu\|_1, \|\boldsymbol f\|_{-1}\bigr).
\end{equation}
Putting (\ref{s3eq28})-(\ref{s3eq32}) into (\ref{s3eq27}) and using the inequality
$$\|\boldsymbol f\|_{-1}\|\boldsymbol u_1-\boldsymbol u_2\|_1\le \frac{\varepsilon_0}{4Re}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2+\frac{2 Re}{\varepsilon_0}\|\boldsymbol f\|_{-1}^2$$
we obtain
\begin{equation}\label{s3eq33}
\begin{split}
0&\ge \frac{\varepsilon_0}{2 Re}\|\boldsymbol u_1-\boldsymbol
u_2\|_1^2-\frac{2Re}{\varepsilon_0}\|\boldsymbol
f\|_{-1}^2-C\bigl(\|\boldsymbol g_\mu\|_1,\|\boldsymbol
f\|_{-1}\bigr)\,\|\beta\|_{0,\infty}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2\\
&\quad -\frac{\varepsilon_0}{4 Re}\|\boldsymbol u_1-\boldsymbol u_2\|_1^2-C\bigl(\|\boldsymbol g_\mu\|_1,\|\boldsymbol f\|_{-1}\bigr)\|\boldsymbol u_1-\boldsymbol u_2\|_1^2\,.
\end{split}
\end{equation}
For sufficiently small $\|\boldsymbol g_\mu\|_1$, $ \|\boldsymbol f\|_{-1}$ the constant $C(\|\boldsymbol g_\mu\|_1,\|\boldsymbol f\|_{-1})$ in \eqref{s3eq32} gets small and consequently the right hand side of (\ref{s3eq33}) is nonnegative. This implies
$\boldsymbol u_1=\boldsymbol u_2$ and according to Theorem \ref{s3thm8} is $p_1-p_2=0$.
\end{proof}
\section{A Channel Flow Problem in Packed Bed Reactors}
In this section, we provide an example of the flow problem in packed bed reactors with numerical solutions at small and relatively large Reynolds numbers to show the nonlinear behavior of the velocity solutions.
Let the reactor channel be represented by $\Omega=(0,L)\times (-R,R)$ where
$R=5$ and $L=60$.
\begin{figure}
\caption{Varying porosity.\label{fig_poro}
\label{fig_poro}
\end{figure}
In all computations we use the porosity distribution which is determined experimentally and takes into account the effect of wall channelling in packed bed reactors
\begin{equation}
\varepsilon(x,y)=\varepsilon(y)=\varepsilon_\infty\left\{1+\frac{1-\varepsilon_\infty}{\varepsilon_\infty}\,e^{-6(R-|y|)}\right\}\,,
\end{equation}
where $\varepsilon_\infty=0.45$. The distribution of the porosity is presented in Figure~\ref{fig_poro}.
We distinguish between the inlet, outlet and membrane parts of domain boundary $\Gamma$, and denote them by $\Gamma_{in}$, $\Gamma_{out}$ and $\Gamma_w$, respectively. Let
\begin{equation*}
\begin{array}{lcl}
\Gamma_{in}&=&\{(x,y)\in\Gamma:\; x=0\}\,,\\
\Gamma_{out}&=&\{(x,y)\in\Gamma:\; x=L\}\,,\\
\Gamma_{w}&=&\{(x,y)\in\Gamma:\; y=-R,\; y=R\}\,.
\end{array}
\end{equation*}
At the inlet $\Gamma_{in}$ and at the membrane wall we prescribe Dirichlet boundary conditions, namely the plug flow conditions
\begin{equation*}
\boldsymbol{u}\arrowvert_{\Gamma_{in}}=\boldsymbol{u}_{in}=(u_{in},0)^T\,,
\end{equation*}
and
\begin{equation*}
\boldsymbol{u}\arrowvert_{\Gamma_{w}}=\boldsymbol{u}_{w}=
\begin{cases}
(0,u_w)^T & \quad\text{for}\quad y=-R\,,\\
(0,-u_w)^T & \quad\text{for}\quad y=R\,,
\end{cases}
\end{equation*}
whereby $u_{in}>0$, $u_w>0$. At the outlet $\Gamma_{out}$ we set the following outflow boundary condition
\begin{equation*}
-\frac{1}{Re}\,\frac{\partial\boldsymbol{u}}{\partial\boldsymbol{n}}+p\boldsymbol{n}=\boldsymbol{0}
\end{equation*}
where $\boldsymbol{n}$ denotes the outer normal. In order to avoid
discontinuity between the inflow and wall conditions we replace constant
profile by trapezoidal one with zero value at the corners. Our
computations are carried out on the Cartesian mesh using biquadratic conforming and discontinuous piecewise linear finite elements for the approximation of the velocity and pressure, respectively. The finite element analysis of the Brinkman-Forchheimer-extended Darcy equation will be conducted in the forthcoming work. The plots of velocity magnitude in fixed bed reactor ($u_w=0$) are
presented along the vertical axis $x=50$. In the investigated reactor the
inlet velocity is assumed to be normalized ($u_{in}=1$). Due to the
variation of porosity we might expect higher velocity at the reactor
walls $\Gamma_w$. This tunnelling effect can be well observed in Figure~\ref{fig4} which shows the velocity profiles
for different Reynolds numbers. We remark that the maximum of velocity magnitude decreases with increasing Reynolds numbers.
\begin{figure}
\caption{Flow profiles in fixed bed reactor at $x=50$.\label{fig4}
\label{fig4}
\end{figure}
\section{ Conclusion}
In this work, we have extended the existence and uniqueness of solution result in literature for the porous medium flow problem based on the nonlinear Brinkman-Forchheimer-extended Darcy law. The existing result is valid only for constant porosity and without the considered convection effects, and our result holds for variable porosity and it includes convective effects. We also provided a numerical solution to demonstrate the nonlinear velocity solutions at moderately large Reynolds numbers for which case the Brinkman-Forchheimer-extended Darcy law applies.
\end{document} |
\begin{document}
\begin{abstract}
We discuss in this note the stickiness phenomena for nonlocal minimal surfaces. Classical minimal surfaces in convex domains do not stick to the boundary of the domain, hence examples of stickiness can be obtained only by removing the assumption of convexity. On the other hand, in the nonlocal framework, stickiness is ``generic''. We provide various examples from the literature, and focus on the case of complete stickiness in highly nonlocal regimes.\\
In questa nota ci occupiamo del fenomeno di attaccamento al bordo delle superfici minime nonlocali. Generalmente, le superfici minime classiche non presentano tale fenomeno in un dominio convesso, pertanto alcuni esempi di attaccamento al bordo si ottengono solamente in assenza della condizione di convessit\`{a}. Per contro, nel contesto nonlocale, l'attaccamento al bordo \`{e} un comportamento ``generico''. Proporremo diversi esempi dalla letteratura, per di pi\`{u} incentrati sul caso di attaccamento completo al bordo, nei cosiddetti regimi altamente nonlocali.
\end{abstract}
\maketitle
\tableofcontents
The problem regarding surfaces with least area among those enclosed by a given curve is one of the first questions that arose in the calculus of variations. Named after Plateau due to his experiments on soap films and bubbles, carried out by the French physicist in the nineteenth century, the question on minimal surfaces actually dates back to Lagrange (1760). Plateau's problem received some first answers in the thirties in ${\mathbb R}^3$, by Douglas and Rad\`{o}. In its full generality, it was attacked by several outstanding mathematicians, who tackled the problem from different, very ingenious prospectives, such as, to mention the most famous: Almgren and Allard, introducing the theory of varifolds, Federer and Fleming developing the theory of currents, Reifenberg applying methods from algebraic topology, De Giorgi working with the perimeter operator (see the beautiful Introduction of \cite{MinGiusti} for more details). The achievements and the history on Plateau's and closely related problems are inscribed in many branches of mathematics, such as geometric measure theory (actually born to study this problem), differential geometry, calculus of variations, potential theory, complex analysis and mathematical physics. The story is far from being over, since the various fields of study are now days very active, they present a variety of new accomplishments and still pose many open problems. The reader can consult the following books, surveys and papers \cite{ColdMin,Perez} for classical minimal surfaces, \cite{NevMar,Nevmarr} for the Willmore conjecture and min-max theory approach, \cite{DeLellis1,DeLellis2,ambtt} for recent achievements in geometric measure theory, and can find further references of their interest therein.
This note will just ``scratch the surface'' in the attempt to give an introduction to the argument. We will focus on the case of co-dimension one, following the approach of the Italian mathematician Ennio De Giorgi, who defines minimal surfaces as boundaries of sets which minimize a perimeter operator inside a domain, among sets with given boundary data. In this context, the main argument on which we focus is the so-called stickiness phenomenon: in some occasions, minimal surfaces are forced by the minimization problem and the boundary constraints to ``attach'' to the boundary of the given domain.
For classical minimal surfaces, this phenomena is rare and happens only in ``extreme'' conditions. In convex domains, minimal surfaces reach transversally the boundary of the domain, so stickiness is not contemplated. Furthermore, minimal graphs (i.e., minimal surfaces which are also graphs) always attain in convex domains their (continuous) boundary data in a continuous way. We will present in Example \ref{msstick1} a situation in which stickiness may happen if the domain is not convex.
On the other hand, nonlocal minimal surfaces, introduced as the nonlocal (fractional) counterpart of the classical ones, typically stick. Even taking the ``best'' domain (i.e. a ball) and a very nice exterior data, surprisingly the stickiness phenomenon is not only possible, but it appears in many circumstances. In this note, we gather several examples from the literature and we discuss in more detail the case of complete stickiness (that is, when the nonlocal minimal surface attaches completely to the boundary of the domain), in highly nonlocal regimes (that is, for small values of the fractional parameter).
In the rest of the paper, we set the following notations:
\begin{itemize}
\item points in ${\mathbb R}n$ as $x=(x_1,\dots,x_n)$ and points in ${\mathbb R}^{n+1}$ as $X=(x,x_{n+1})$,
\item the $(n-1)$-Hausdorff measure as ${\mathcal H}^{n-1}$,
\item the complementary of a set $\Omega \subset {\mathbb R}n$ by ${\mathcal C} \Omega={\mathbb R}n \setminus \Omega$,
\item the ball of radius $r>0$ and center $x\in {\mathbb R}n$ as
\[ B_r(x)=\big\{ y\in {\mathbb R}n \; \big| \; |y-x|<r\big\}, \qquad B_r:= B_r(0),\]
\item the area of the unit sphere as
\[ \omega_n:={\mathcal H}^{n-1}(\partial B_1).\]
\end{itemize}
\section{An introduction to classical minimal surfaces}
Just to give a basic idea, the approach of De Giorgi to minimal surfaces can be summarized as follows.
Consider an open set $\Omega \subset {\mathbb R}n$ and a measurable set $E \subset {\mathbb R}n$. If the set $E$ has $C^2$ boundary inside $\Omega$, the area of the boundary of $E$ in $\Omega$ is given by
\eqlab{\label{smo} \mbox{Area}(\partial E \cup \Omega) = {\mathcal H}^{n-1}(\partial E\cap \Omega).}
On the other hand, in case $E$ does not have a smooth boundary, one can introduce a weak version of the perimeter.
\begin{definition} Let $\Omega\subset {\mathbb R}n$ be an open set and $E\subset{\mathbb R}n $ be a measurable set. The perimeter of $E$ in $\Omega$ is given by
\eqlab{ \label{bv} P(E,\Omega):= \sup_{g \in C_c^1(\Omega,{\mathbb R}n), |g|\leq 1} \int_{E} {\rm div} \, g \, dx.}
\end{definition}
Notice that when $E$ has $C^2$ boundary, the expected \eqref{smo} is recovered. Indeed, taking any $g \in C_c^1(\Omega, {\mathbb R}n)$, we have that
\bgs{ \int_{E} \mbox{div} g \, dx = \int_{\partial E} g \cdot \nu_E \, d {\mathcal H}^{n-1} ,}
using the divergence theorem and denoting $\nu_E$ as the exterior normal derivative to $E$. Then
\bgs{ P(E,\Omega) = &\; \sup_{g \in C_c^1(\Omega,{\mathbb R}n), |g|\leq 1}\int_{E} \mbox{div} g \, dx \\
=&\; \sup_{g \in C_c^1(\Omega,{\mathbb R}n),|g|\leq 1} \int_{\partial E} g \cdot \nu_E \, d {\mathcal H}^{n-1}
\\
\leq &\; \int_{\partial E\cap \Omega} d {\mathcal H}^{n-1} ={\mathcal H}^{n-1}(\partial E \cap \Omega) .}
A particular choice of $g$ leads to the opposite inequality and proves the statement. Since $E$ has smooth boundary, $\nu_E$ is a $C^1$ vector valued function, so it can be extended to a vector field $N \in C^1({\mathbb R}n, {\mathbb R}n)$, with $\|N\|\leq 1$. Consider a cut-off function $\eta \in C^\infty_c(\Omega)$ with $|\eta|\leq 1$ and use $g =\eta N$. Then
\bgs{ P(E,\Omega)=&\; \sup_{g \in C_c^1(\Omega,{\mathbb R}n),|g|\leq 1} \int_{\partial E} g \cdot \nu_E \, d {\mathcal H}^{n-1}
\\
\geq &\; \sup_{\eta \in C^\infty_c(\Omega), |\eta|\leq 1} \int_{\partial E} \eta \, d {\mathcal H}^{n-1}
\\
=&\;{\mathcal H}^{n-1}(\partial E \cap \Omega).}
We recall that
the space of functions of bounded variation $BV(\Omega)$ is defined
as
\[ BV(\Omega):=\big\{ u\in L^1(\Omega) \; \big| \; [u]_{BV(\Omega)}<\infty\big\},
\]
where
\[ [u]_{BV(\Omega)}= \sup_{g \in C_c^1(\Omega,{\mathbb R}n), |g|\leq 1} \int_{{\mathbb R}n} u \, {\rm div} g\, dx,\]
and that $BV(\Omega)$ is a Banach space with the norm
\[ \|u\|_{BV(\Omega)} =\|u\|_{L^1(\Omega)} + [u]_{BV(\Omega)}.\]
It is evident then that the perimeter of a set $E\subset {\mathbb R}n$ is the total variation of its characteristic function, i.e. the BV norm
of the characteristic function of $E$
\sys[\chi_E(x)=]{ &1, && x\in E
\\
&0, && x \in {\mathcal C} E,}
so we can write that
\eqlab{\label{bvv} P(E,\Omega)= [\chi_E]_{BV(\Omega)}.}
Sets of (locally) finite perimeter, or of (local) finite total variation (i.e., sets with $P(E,\Omega)<\infty$) bear the name of the Italian mathematician Renato Caccioppoli, who introduced them in 1927.
Among sets of finite perimeter, minimal sets are the ones that minimize the perimeter with respect to some fixed ``boundary'' data. Of course, we work in the class of equivalence of sets, that is, we identify sets which coincide up to sets of measure zero. Maintaining the same perimeter, in principle sets could have completely different topological boundaries.
That is why in this note we assume measure theoretic notions (see for instance \cite[Chapter 3]{MinGiusti}, \cite[Section 1.2]{bucluk}).
In order to avoid any technical difficulties, a set is defined as minimal in $\Omega$ if it minimizes the perimeter among competitors with whom it coincides outside of $\Omega$. Precisely:
\begin{definition} \label{min}
Let $\Omega \subset {\mathbb R}n$ be an open, bounded set, $\mathcal B$ be an open ball such that $\bar \Omega \subset \mathcal B$ and $E\subset {\mathbb R}n$ be a measurable set. Given $E_0:= E \cap( {\mathcal B} \setminus \Omega)$, then $E$ is a minimal set in $\Omega$ with respect to $E_0$ if $P(E,{\mathcal B})<\infty$ and
\[P(E, {\mathcal B})\leq P(F,{\mathcal B}) \]
for any $F$ such that
\[ F\cap ({\mathcal B} \setminus \Omega) =E_0. \]
\end{definition}
Since the perimeter is a local operator, the ``boundary'' data considered is in the proximity of $\partial \Omega$.
That is why it is not necessary to require that $E=F$ in the whole complementary of $\Omega$, and it suffices to consider the ball ${\mathcal B}$ (and hence, not to worry about what happens far away from $\Omega$). We make the choice of a ball ${\mathcal B}$ for simplicity, one could consider an open set $\mathcal O \supset \bar \Omega$, or for some $\rho>0$ the set $\Omega_\rho:=\{ x\in {\mathbb R}n \; | \; d(x,\partial \Omega)=\rho\}.$
In the space $BV(\Omega)$, it is also quite natural to prove the existence of minimal sets. The lower semi-continuity of $BV(\Omega)$ functions and the fact that sequences of sets with uniformly bounded perimeters are precompact in the $L^1_{loc}$ topology, allow to employ direct methods in the calculus of variations (see, for instance, \cite[Theorem 1.20]{MinGiusti}, \cite[Theorem 3.1]{TeoAlessio}) and to prove the existence of a minimal set, for a given $E_0$ of finite perimeter.
\begin{theorem} Let $\Omega \subset {\mathbb R}n$ be a bounded open set and let $E_0\subset {\mathcal C} \Omega$ be a set of finite perimeter. Then there exists $E$ a minimal set in $\Omega$ with respect to $E_0$.
\end{theorem}
The arduous part is to prove {regularity}: are the boundaries of these sets actually smooth (almost everywhere)? This is indeed the case, and this entitles the theory to refer to boundaries of minimal sets as {minimal surfaces}.
The boundary regularity of minimal sets can be summed up in the following theorem.
\begin{theorem}\label{minreg}
Let $\Omega\subset {\mathbb R}n$ be a bounded open set and $E$ be a minimal set. Then $\partial E$ is smooth, up to a closed, singular set of Hausdorff dimension at most $n-8$.
\end{theorem}
In other words, minimal surfaces are smooth for $n\leq 7$ (and they are actually analytical). In ${\mathbb R}^8$, there exist minimal surfaces with singular points. A well known example is Simons cone, which is a minimal cone (with a singularity in the origin):
\[ \mathcal S=\big\{x=(x,y)\in {\mathbb R}^4\times {\mathbb R}^4 \; \big| \; |x|=|y| \big\}.\]
\subsection{Minimal graphs}
In the first part of this Section, we have introduced the perimeter operator and have discussed some essential properties of the following problem.
\begin{problem} \label{pb1} Given $\Omega \subset {\mathbb R}n$ a bounded open set, ${\mathcal B}$ an open ball such that $\bar \Omega \subset {\mathcal B}$ and $E_0 \subset {\mathcal B} \setminus \Omega$ a set of finite perimeter, find
\[ \min \big\{ P(E,{\mathcal B}) \; \big| \; P(E,{\mathcal B})<\infty, E=E_0 \mbox{ in } {\mathcal B} \setminus \Omega\big\} .\]
\end{problem}
A special case of minimal sets that we look for are minimal subgraphs, case in which the minimal surfaces are called minimal graphs. We recall the space of Lipschitz continuous functions, denoted by $C^{0,1}(\Omega)$, defined for some open set $\Omega \subset{\mathbb R}n$ by continuous functions with finite Lipschitz constant
\[ [u]_{C^{0,1}(\Omega)} =\sup_{x,y\in \Omega, x\neq y}\frac{|u(x)-u(y)|}{|x-y|}.\]
The problem of looking for minimal graphs in $C^{0,1}(\Omega)$ can be stated as follows.\\
\begin{problem} \label{pb2} Given $\Omega \subset {\mathbb R}n$ a bounded open set with Lipschitz continuous boundary, and fixing $\varphi$ smooth enough on $\partial \Omega$, find $u\in C^{0,1}(\Omega)$ that realizes
\[ \min_{u=\varphi \mbox{ on } \partial \Omega} {\mathcal A}(u,\Omega),\]
where ${\mathcal A}$ is the area operator, defined as
\eqlab{ \label{are} {\mathcal A}(u,\Omega)= \int_{\Omega} \sqrt{1+|Du|^2}\, dx.}
\end{problem}
Notice that the area operator is well defined for $u\in C^{0,1}(\Omega)$.
Existence and uniqueness (given that the area functional is convex) can be proved in the following context (see \cite[Theorem 12.10]{MinGiusti}).
\begin{theorem}
Let $\Omega$ be a bounded open set with $C^2$ boundary of non-negative mean curvature, and $\varphi \in C^2({\mathbb R}n)$. Then Problem \ref{pb2} is uniquely solvable in $C^{0,1}(\Omega)$.
\end{theorem}
Tools of regularity of nonlinear partial differential equations in divergence form allow then to go from Lipschitz to analyticity in the interior and, in the hypothesis of the above theorem, to $C^2(\bar \Omega)$, settling the question on regularity of minimizers of Problem \ref{pb2} (see \cite[Theorem 12.11, 12.12]{MinGiusti}).
\begin{theorem}
Let $u \in C^{0,1}(\Omega)$ a solution of Problem \ref{pb2}. Then $u$ is analytic in $\Omega$. If moreover, $\partial \Omega$ and $\varphi$ are of class $C^{k,\alpha}$, with $k\geq 2$, then $u\in C^{k,\alpha}(\bar \Omega)$.
\end{theorem}
We stress out that in order to ensure existence of a solution of Problem \ref{pb2}, the condition that the mean curvature of $\partial \Omega$ is nowhere negative is necessary. We provide here \cite[Example 12.15]{MinGiusti} (see also \cite[Example 1.1]{GiustiDirect}, \cite[Section 2.3]{giaq }) showing that for a domain whose boundary is somewhere non-positive, the solution may not exist, or may not be regular up to the boundary. The following example is depicted in Figure \ref{stt}.
\begin{example}\label{msstick}
Let $0<\rho<R$, $M>0$ be fixed, and let $A^R_\rho$ be the annulus
\[ A_R^\rho =\big\{ x\in {\mathbb R}^2 \; \big| \; \rho<|x|<R\big\}.\]
Define $\varphi $ on the boundary of $A^{\rho}_R$ as
\sys[\varphi(x)=]{& 0, && \mbox{ for } |x|=R
\\
&M, && \mbox{ for } |x|=\rho.}
If $u$ is a minimum for the area in $A_R^{\rho}$, then the spherical average of $u$
\[ v(r) := \frac{1}{2\pi} \int_0^{2\pi} u(r,\vartheta) \, d\vartheta \]
decreases the area. Indeed, given the strict convexity of the area functional by, Jensen's inequality one gets that
\[ {\mathcal A}(v,A_R^\rho)<{\mathcal A}(u,A_R^\rho).\]
This implies that the minimum must be radial, i.e. $u=u(r)$.
The area functional can then be written as
\[ F(u)= 2\pi \int_{\rho}^R r\sqrt{1+(u'(r))^2}\, dr,\]
with Euler-Lagrange equation implying that $ru'/\sqrt{1+u'^2}$ is a constant, hence
\[ \frac{r u'(r)}{ \sqrt{1+(u'(r))^2}} = -c,\]
with $c\in[0, \rho] $ (positive since $u$ is non-increasing in $r$) to be determined using the boundary conditions.
The ODE, combined with $u(R)=0$, has the unique solution
\[ u(r)= c \log \frac{\sqrt{R^2-c^2} +R }{\sqrt{r^2-c^2} +r}.\]
One notices that the map
\[ f(c):=c \log \frac{\sqrt{R^2-c^2} +R }{\sqrt{\rho^2-c^2} +\rho}\] is non-decreasing in $[0,\rho]$, thus
\[ \sup_{0\leq c\leq \rho} u(\rho) = \sup_{c\in [0,\rho)} f(c)= \rho \log \frac{\sqrt{R^2-\rho^2} + R }{\rho}:=M_0,\]
with $M_0=M_0(R,\rho)$.
However, by boundary conditions, one should have $u(\rho)=M$, thus a solution exists if only if $M_0\geq M$. Furthermore, notice that
\begin{itemize}
\item if $M_0 < M$, Problem \ref{pb2} does not have a solution;
\item if $M_0=M$, thus when
\eqlab{\label{12} u(r)= \rho \log \frac{\sqrt{R^2-\rho^2} + R }{\sqrt{r^2-\rho^2} +r } }
we have that
\[ \lim_{r\searrow \rho} |u'(r)| =\infty,\]
implying that $u$ is not smooth up to the boundary.
\end{itemize}
\end{example}
Taking into account Example \ref{msstick}, we see that looking for a minimum in $C^{0,1}(\Omega)$ can lead to a problem without any classical solution. Another formulation can be considered for Problem \ref{pb2}, which for the existence does not require non-negative mean curvature of $\Omega$ and relaxes the condition on the boundary data. As with general sets, one works in the space of functions of bounded variation.
For $u \in BV(\Omega)$, the area functional is defined as
\eqlab{ \label{fff} {\mathcal A}(u,\Omega) = \sup_{g \in C_c^{\infty}(\Omega, {\mathbb R}^{n+1}) , |g|\leq 1} \int_{\Omega} g_{n+1} + u\, {\rm div} g \, dx,
}
with ${\rm div} g= \sum_{i=1}^n \partial_i g_i(x)$.
Notice that for $u\in C^{0,1}(\Omega)$, Definition \ref{are} is recovered.
With definition \eqref{fff}, the problem can be considered in this way (see \cite[14.4]{MinGiusti}).
\begin{problem}\label{pb3}
Let $\Omega\subset {\mathbb R}n$ be a bounded open set, $\mathcal B $ be an open ball containing $\bar \Omega$ and let $\varphi \in W^{1,1}({\mathcal B} \setminus \Omega)$. Find
\[ \min \big\{ {\mathcal A}(u,{\mathcal B}) \; \big| \; u\in BV(\mathcal B), u=\varphi \mbox{ in } {\mathcal B}\setminus \bar \Omega \big\} .\]
\end{problem}
Problem \ref{pb3} can be reformulated. Notice that
\eqlab{ \label{af} {\mathcal A}(u,{\mathcal B}) = &\; {\mathcal A}(u,\Omega) + {\mathcal A}(u,{\mathcal B} \setminus \bar \Omega) + \int_{\partial \Omega} |u-\varphi| \, d {\mathcal H}^{n-1}
\\
=&\; {\mathcal A} (u,\Omega) + {\mathcal A}( \varphi, {\mathcal B} \setminus \bar \Omega) + \int_{\partial \Omega} |u-\varphi| \, d{\mathcal H}^{n-1}.
}
Since $\varphi$ is fixed outside of $\Omega$, minimizing $u $ in ${\mathcal B}$ with exterior data $\varphi$ boils down to minimizing both the area of $u$ in $\Omega$ and the area along the vertical wall $\partial \Omega \times {\mathbb R}$, lying between the graph of $\varphi$ and $u$.
The existence for any smooth set $\Omega$ is settled in the next Theorem, see \cite[Theorem 14.5]{MinGiusti}.
\begin{theorem} For $\Omega$ with Lipschitz continuous boundary, there exists a solution of Problem \ref{pb3}.
\end{theorem}
\begin{remark} \label{nmg} Notice the resemblance of Problem \ref{pb3} with Problem \ref{pb1}. The similitude does not stop at the way the problem is defined: for sets that are graphs, the two formulations are actually equivalent. This follows after some considerations:
\begin{enumerate}
\item defining the subgraph of $u\in BV(\Omega)$ as
\[ Sg (u,\Omega)= \big\{ (x,x_{n+1}) \in \Omega\times {\mathbb R} \subset {\mathbb R}^{n+1}\; \big| \; x_{n+1}<u(x)\big\},\]
it holds that
\[ {\mathcal A}(u,\Omega)=P(Sg (u,\Omega), \Omega \times {\mathbb R}),\]
\item given a set $F$ in a cylinder, then the perimeter decreases by replacing $F$ by a suitable subgraph, obtained with a ``vertical rearrangement'' of the set $F$(check \cite[Lemma 5.1]{TeoAlessio}, \cite[Lemma 14.7, Theorem 14.8]{MinGiusti}).
\item observe that the domain in which we minimize the perimeter in the class of subgraphs is unbounded, so additional care is needed to deal with local minimizers (we say that $u$ is a local minimizer in $\Omega$ if it minimizes the functional in any set compactly contained in $\Omega$).
\end{enumerate}
\end{remark}
In particular, finding a minimal graph is equivalent to finding a local minimizer of the perimeter in the class of subgraphs (\cite[Theorem 14.9]{MinGiusti}).
Precisely:
\begin{theorem}
Let $u\in BV_{{\rm loc}}(\Omega)$ be a local minimum for the area functional. Then $Sg(u,\Omega)$ minimizes locally the perimeter in $\Omega\times {\mathbb R}$.
\end{theorem}
Since for graphs Problem \ref{pb1} and Problem \ref{pb3} are equivalent, regularity of general minimal surfaces applies to minimal graphs. Actually, purely functional techniques are used to prove that minimal graphs are smooth in any dimension \cite[Theorem 14.13]{MinGiusti}.
\begin{theorem}
Let $u\in BV_{{\rm loc}}(\Omega)$ locally minimize the area functional. Then $u$ is analytical inside $\Omega$.
\end{theorem}
On the other hand, looking at boundary regularity, \cite[Theorem 15.9]{MinGiusti} states that:
\begin{theorem}\label{regmg}
Let $\Omega\in {\mathbb R}n$ be a bounded open set with $C^2$ boundary, and let $u$ solve Problem \ref{pb3}. Suppose that $\partial \Omega$ has non-negative mean curvature near $x_0$ and that $\varphi$ is continuous at $x_0$. Then
\[ \lim_{x\to x_0} u(x)= \varphi(x_0) .\]
\end{theorem}
The above theorem can actually be stated for domains $\Omega$ with Lipschitz boundary, by using a suitable notion of mean curvature. Also, notice that asking for non-negative mean curvature is more general than asking $\Omega$ to be convex.
A more attentive look at Theorem \ref{regmg} allows us to conclude that
in general, for continuous boundary data $\varphi$ and for convex domains, the stickiness phenomena does not happen for minimal graphs. We will see that the situation dramatically changes for nonlocal minimal graphs.
On the other hand, looking at Example \ref{msstick}, one can provide an example of stickiness in non-convex domains.
\begin{example}\label{msstick1}
Let $0<\rho<R$, $M>0$ be fixed, and let $A^R_\rho$ be the annulus
\[ A_R^\rho =\big\{ x\in {\mathbb R}^2 \; \big| \; \rho<|x|<R\big\}.\]
Define $\varphi $ as
\sys[\varphi(x)=]{& 0, && \mbox{ for } x\in {\mathcal C} B_R
\\
&M, && \mbox{ for } x\in \bar B_\rho,}
and let $u(x)$ be the minimum of the area functional, defined by \eqref{12} as
\[ u(x)= \rho \log \frac{\sqrt{R^2-\rho^2} + R }{\sqrt{|x|^2-\rho^2} +|x| } .\]
Consider
\sys[v(x):=]{&u(x) , && \rho \leq |x|\leq R\\
& \varphi(x), && x \in B_{\rho}\cup {\mathcal C} \bar B_R.}
Notice that according to \eqref{af} we have that
\bgs{ {\mathcal A}(v,B_{R+2})=&\; {\mathcal A}(v,A_R^\rho) + {\mathcal A}(v, B_{R+2} \setminus \bar {A_R^\rho} ) + \int_{\partial A_R^\rho} |v-\varphi| \, d{\mathcal H}^{n-1}
\\
=&\; {\mathcal A}(u,A_R^\rho) + {\mathcal A}(\varphi, B_{R+2} \setminus \bar {A_R^\rho} ) + (M_0-M) \omega_n \rho^{n-1}.
}
Now, $u$ is a minimum for the area in $A_R^\rho$ (as shown in Example \ref{msstick}), the contribution of $\varphi$ is fixed, and $M_0$ is the highest possible value that $u$ can reach. This implies that $v$ is a solution of Problem \ref{pb3}. In this case, we notice that on $\partial B_\rho \times {\mathbb R}$ the solution $v$ sticks at the boundary, that $v$ is not continuous across the boundary, and the subgraph of $v$ has a vertical wall along the boundary of the cylinder in which we minimize. See Figure \ref{stt}.
\end{example}
\begin{figure}
\caption{The geometric construction in Examples \ref{msstick}
\label{stt}
\end{figure}
\section{An introduction to nonlocal minimal surfaces}
Justified by nonlocal phase transition problems and by imaging processing, one is led to introduce a nonlocal (and fractional version) of the perimeter. This was admirably accomplished in the seminal paper \cite{nms} by Caffarelli, Roquejoffre and Savin in 2010. The readers can check also the beautiful and useful review \cite{senonlocal}.\\
Roughly speaking, one would like to have a definition of the nonlocal perimeter that takes into account long-range interactions between points in the set and in its complement, in the whole space, weighted by the their mutual distance. The goal is then to minimize such a perimeter in a domain $\Omega \subset {\mathbb R}n$ among all competitors coinciding outside of $\Omega$, in a similar way to Definition \ref{min}. Notice now that in the nonlocal framework the data coming from far away plays a role, so the ``boundary'' data $E_0$ is given in the whole of ${\mathbb R}n\setminus \Omega$ and the data even very distant from $\Omega$ gives a contribution.
To arrive at the definition of fractional perimeter introduced in \cite{nms}, one could start from \eqref{bvv} and make use of a ``fractional counterpart'' of the $BV$ semi-norm. Notice that $ W^{1,1}(\Omega) \subset BV(\Omega)$, hence a good candidate turns out to be the {Gagliardo} $W^{s,1}$ semi-norm.
For some given $s\in (0,1)$, we recall that for a measurable function $u\colon {\mathbb R}n \to {\mathbb R}$
\[ [u]_{W^{s,1}(\Omega) }=\int_{\Omega}\int_{\Omega}
\frac{ |u(x)-u(y)|}{|x-y|^{n+s}} \, dx \,dy.\]
Informally thus (because these quantities may well be infinite), the fractional perimeter is given by the $W^{s,1}$ semi-norm of the characteristic function of the set $E$
\[ P_s (E,\Omega)= \frac12 \left([\chi_E]_{W^{s,1}({\mathbb R}n)} - [\chi_E]_{W^{s,1} ({\mathcal C} \Omega)}\right).\]
Of course, it would not be enough to take the $W^{s,1}$ semi-norm only in $\Omega$, because all far away information would be lost. Nonetheless, one excludes the interactions ${\mathcal C} \Omega$ with ${\mathcal C} \Omega$. This is due to the fact that in the minimization problem the data outside of the domain $\Omega$ is fixed, and so is that contribution. All in all, the fractional perimeter is defined as follows.
\begin{definition}\label{pssss}
Let $s\in (0,1)$ be fixed, $\Omega\subset {\mathbb R}n$ be an open set and $E \subset {\mathbb R}n$ be a measurable set. Then
\[ P_s(E,\Omega)= \frac12\iint_{{\mathbb R}^{2n} \setminus ({\mathcal C} \Omega)^2} \frac{|\chi_{E}(x) -\chi_E(y)|}{|x-y|^{n+s}} \, dx \, dy.\]
\end{definition}
In the above definition, notice that only the interactions between $E$ and its complement survive. Thus, denoting for two disjoint sets $A, B \subset {\mathbb R}n$
\[ {\mathcal L}_s(A,B) =\int_A \int_B\frac{ dx \, dy }{|x-y|^{n+s}} \]
we can write
\eqlab{\label{pss} P_s(E,\Omega)= P_s^{L} (E,\Omega) + P_s^{NL} (E,\Omega),}
where we separate the ``local'' and the ``nonlocal'' contributions to the perimeter (see Figure \ref{sp})
\[ P_s^{L} (E,\Omega) := {\mathcal L}_s(E\cap \Omega,{\mathcal C} E \cap \Omega),\]
\[ P_s^{NL} (E, \Omega) := {\mathcal L}_s({\mathcal C} E\cap \Omega,E \cap {\mathcal C} \Omega) +{\mathcal L}_s (E\cap \Omega, {\mathcal C} E \cap {\mathcal C} \Omega).\]
\begin{figure}
\caption{The contributions to the fractional perimeter}
\label{sp}
\end{figure}
As a remark, it holds that
\[ W^{1,1}(\Omega) \subset BV(\Omega)\subset \bigcap_{s\in (0,1)} W^{s,1}(\Omega), \]
in particular if $E$ has finite perimeter, then it has finite fractional perimeter, for every $s\in(0,1)$ (on the other hand, the converse is not true).
One notices that sending that $s\searrow 1$, the local perimeter comes up. This further justifies the fractional perimeter as a good generalization, in this sense, of the classical perimeter. As a matter of fact, in \cite{uniform} the authors prove, under local regularity assumptions on $\partial E$, that for $s \nearrow 1$, the limit of
$(1-s)P_s(E,B_1)$ goes to the classical $P(E,B_1)$ (the result in the $\Gamma$-convergence sense is reached in \cite{DePhil}). The optimal result (in the pointwise sense) can be found in \cite[Theorem 1.6]{fractalLuk} (which is based on the previous\cite[Theorem 2]{BBM} and \cite[Theorem 1]{Davila}). One has that for a set $E$ with finite perimeter in a neighborhood of $\Omega$, the local component of the fractional perimeter recovers, in the renormalized limit, the local perimeter of the set inside the domain $\Omega$,
\[ \lim_{s\nearrow 1} (1-s)P_s^L(E,\Omega)= \frac{\omega_{n-1}}{n-1} P(E,\Omega),\]
while we have that
\[ \lim_{s\nearrow 1} (1-s)P_s^{NL}(E,\Omega)=\frac{\omega_{n-1}}{n-1} P(E,\partial \Omega),\]
concluding that
\eqlab{ \label{sto1} \lim_{s\nearrow 1} (1-s)P_s(E,\Omega)= \frac{\omega_{n-1}}{n-1} P(E,\bar \Omega).}
Basically, in the limit, the far away data vanishes and the nonlocal component concentrates on the boundary of the domain.
The minimization problem is the following.
\begin{definition} \label{smin}
Let $\Omega \subset {\mathbb R}n$ be a bounded open set. Given $E_0:= E \setminus \Omega$, then $E$ is an $s$-{minimal set} in $\Omega$ with respect to $E_0$ if $P_s(E,\Omega)<\infty$ and
\[P_s(E, \Omega)\leq P_s(F,\Omega) \]
for any $F$ such that
\[ F\setminus \Omega =E_0. \]
\end{definition}
As in the classical case one obtains existence in the nonlocal framework by direct methods (check \cite[Theorem 3.2]{nms}, \cite[Theorem 1.8]{approxLuk}.)
\begin{theorem}Let $\Omega \subset {\mathbb R}n$ be an open set and let $E_0 \subset {\mathcal C} \Omega$. There exist an $s$-minimal set in $\Omega$ with respect to $E_0$ if and only if there exists $F\subset {\mathbb R}n$ with $F\setminus \Omega=E_0$ such that $P_s(F,\Omega)<\infty$.
\end{theorem}
In particular, asking $P_s(\Omega,{\mathbb R}n)<\infty$ is enough to guarantee existence. Furthermore, interestingly, as a corollary of the previous theorem, local minimizer always exist (see \cite[Corollary 1.9]{approxLuk}).
As in the classical case again, it is much more involved to study the regularity of $s$-minimal sets.
Accordingly to \eqref{sto1}, for $s$ close to $1$, it is natural to expect properties similar to those of classical minimal surfaces (and this is proved in \cite{regularity}). For any $s\in (0,1)$, however, it is known that minimal surfaces are smooth up to dimension $2$ (thanks to \cite{SavV}). As a matter of fact, the best result to this day, following from \cite{regularity}, \cite{SavV} and \cite{bootstrap}, is the following.
\begin{theorem} Let $s\in (0,1)$ be the fractional parameter, $\Omega\subset {\mathbb R}n$ be a bounded open set and $E$ be a $s$-minimal set. Then
\begin{enumerate}
\item $\partial E$ is smooth, up to a closed, singular set, of Hausdorff dimension at most $n-3$,
\item there exists $\varepsilon_0 \in (0,1/2)$ such that for all $s\in (0,1-\varepsilon_0)$, $\partial E$ is smooth, up to a closed, singular set of Hausdorff dimension at most $n-8$.
\end{enumerate}
\end{theorem}
\subsection{Nonlocal minimal graphs}
The problem we look at in this subsection can be thought as the fractional version of Problem \ref{pb3}.
\begin{problem}\label{pb4}
Let $\Omega\subset {\mathbb R}n$ be a bounded open set, and let $\varphi $ have integrable ``local tail''. Find
\[ \min \big\{ \mathcal F_s(u,{\mathcal B}) \; \big| \; u\in W^{s,1}(\Omega), u=\varphi \mbox{ in } {\mathcal C} \Omega \big\} .\]
\end{problem}
Consider $F\subset {\mathbb R}^{n+1}$, that is the subgraph of some function $u$, that is
\[F:= Sg (u,\Omega)= \big\{ (x,x_{n+1}) \in \Omega\times {\mathbb R}\subset {\mathbb R}^{n+1}\; \big| \; x_{n+1}<u(x)\big\}.\]
In order to deal with nonlocal minimal graphs, one could take into consideration Remark \ref{nmg} and work in the geometric setting, thus trying to
find the $s$-minimal graph which locally minimizes the $s$-perimeter in the class of subgraphs. This approach is motivated by a couple of observations:
\begin{itemize}
\item according to \cite[Theorem 1.1]{graph}, if one considers $\Omega$ a bounded open set with $C^{1,1}$ boundary and the exterior data as a continuous subgraph in ${\mathcal C} \Omega \times {\mathbb R}$, then the (local) minimizer of the $s$-perimeter is indeed a subgraph in $\Omega\times {\mathbb R}$ (and a local minimizer always exists according to \cite[Corollary 1.9]{approxLuk}),
\item an analogue of Point 2) of Remark \ref{nmg} is proved in \cite[Theorem 4.1.10]{Lucaphd} (and in the upcoming paper \cite{LucaTeo}). If $F\setminus ({\mathcal C} \Omega \times {\mathbb R})$ is a subgraph, and $F\cap (\Omega \times {\mathbb R})$ is contained in a cylinder, then the perimeter decreases if $F$ is replaced by a subgraph, built with a ``vertical rearrangement '' of the set $F$.
\end{itemize}
In this setting, analogously to Point 3) in Remark \ref{nmg}, it is necessary to work with local minimizers, since the nonlocal part of the perimeter could give infinite contribution.
However, remarkably in \cite{Lucaphd} (and \cite{LucaTeo}), a very nice functional setting is introduced for the area of a graph, which is is equivalent to the perimeter framework in the following sense.
\begin{proposition} Let $\Omega\subset {\mathbb R}n$ be a bounded open set and $u\colon {\mathbb R}n \to {\mathbb R}$ be a measurable function such that $u\in W^{s,1}(\Omega)$. If $u$ is a minimizer for $\mathcal F_s$, then $u$ locally minimizes $P_s(\cdot, \Omega\times {\mathbb R})$ among sets with given exterior data $Sg(u,{\mathcal C} \Omega)$.
\end{proposition}
This $s$-fractional area functional is introduced in the next definition.
\begin{definition}\label{ars}
Let $\Omega \subset {\mathbb R}n$ be a bounded open set, and let $u\colon {\mathbb R}n \to {\mathbb R}$ be a measurable function. Then
\[ \mathcal F_s(u,\Omega):= \iint_{{\mathbb R}^{2n}\setminus ({\mathcal C}\Omega)^2} \mathcal G_s \left( \frac{u(x)-u(y)}{|x-y|} \right) \frac{dx \, dy}{|x-y|^{n-1+s}},\]
where
\[ \mathcal G_s(t)= \int_0^t \left( \int_0^\tau \frac{d\rho} {(1+\rho^2)^{\frac{n+1+s}2} }\right) \, d\tau.\]
\end{definition}
The formula for the area functional is motivated on the one hand, by the Euler-Lagrange equation for nonlocal minimal graphs. Namely, critical points of $\mathcal F_s$ are weak solutions of the Euler-Lagrange equation (see also Section \ref{el}). On the other hand, as mentioned previously, minimizing the area functional is equivalent to minimizing the perimeter. It actually holds that
the local part of the area functional (that is, the interactions of $\Omega$ with itself) equals the perimeter of the subgraph of the function $u \in W^{s,1}(\Omega)$ (plus a constant term), and roughly speaking, the same relation holds between the nonlocal part of the area and that of the perimeter (see \cite[Lemma 4.2.7, 4.2.8]{Lucaphd}, \cite{LucaTeo}).
In order to have existence of Problem \ref{pb4} in $W^{s,1}(\Omega)$, one needs to ask a quite strong condition on the tail. This difficulty is surmounted by the authors of \cite{LucaTeo} by choosing a good notion of minimizer. We leave further explanations to the previously cited paper, mentioning that the existence result is obtained in the following setting.
Let $\mathcal O\subset {\mathbb R}n$ be a given open set such that $\Omega $ is compactly contained in $\mathcal O$. Defining the ``local tail'' of a measurable function $\varphi \colon {\mathcal C} \Omega\to {\mathbb R}$ as
\[ \mbox{Tail}_s(\varphi, \mathcal O \setminus \Omega; x):= \int_{\mathcal O \setminus \Omega} \frac{|\varphi(y)|}{|x-y|^{n+s}} \, dy, \]
we can state
the existence of solutions of Problem \ref{pb4} (see \cite[Theorem 4.1.3]{Lucaphd} and \cite{LucaTeo}).
\begin{theorem}
Suppose that $\mbox{Tail}_s(\varphi, \mathcal O \setminus \Omega; \cdot)\in L^1(\Omega)$ for $\mathcal O$ big enough depending on $\Omega$. Then there exists a unique minimizer of Problem \ref{pb4}.
\end{theorem}
As for regularity, combining results from \cite{Lucaphd,LucaTeo,Teocab} one has the following interior regularity theorem.
\begin{theorem}If $u\in W^{s,1}(\Omega)$ is a minimizer of $\mathcal F_s(\cdot, \Omega)$, then $u\in C^\infty(\Omega)$.
\end{theorem}
Boundary regularity of nonlocal minimal surfaces is a much more complicated and surprising story, and it gives a quite exhaustive answer to questions about the stickiness phenomena. A very recent result of \cite{sticks} establishes, at least in the plane, a dichotomy: either nonlocal minimal graphs are continuous across the boundary (and in that case, their derivatives are H\"{o}lder continuous), or they are not continuous, which equals to presenting stickiness.
This result is contained in \cite[Corollary 1.3]{sticks}. More precisely:
\begin{theorem}\label{dich}
Let $u\colon{\mathbb R} \to {\mathbb R}$, with $u\in C^{1,\frac{1+s}2}([-h,0])$ for some $h\in (0,1)$, be such that
$u$ is locally $s$-minimal for $\mathcal F_s(\cdot, (0,1))$. Then
\[ \overline{ \partial Sg(u) \cap \left( (0,1)\times {\mathbb R}\right) } \mbox{ is a closed, } C^{1,\frac{1+s}2} \mbox{ curve}.\]
Moreover, the following alternative holds:
\begin{enumerate}
\item either
\[\lim_{x_1 \searrow 0} u(x_1)= \lim_{x_1\nearrow 0} u(x_1)\]
and
\[u\in C^{1,\frac{1+s}2}([0,1/2]),\]
\item or
\[l= \lim_{x_1 \searrow 0} u(x_1)\neq \lim_{x_1\nearrow 0} u(x_1)\]
and there exists $\mu>0$ such that
\[u^{-1} \in C^{1,\frac{1+s}2} ([l-\mu, l+\mu]).\]
\end{enumerate}
\end{theorem}
Notice that this theorem says that geometrically, the $s$-minimal graph is a $C^{1,\frac{1+s }2}$ curve in the interior of the cylinder, and up to the boundary. We further discuss Point 2) of this theorem in Section \ref{stick}.
\subsection{The fractional Euler-Lagrange equation}\label{el}
Classical minimal surfaces are characterized by the fact that at regular points, the mean curvature vanishes. This holds also in the fractional case, so we begin by introducing the fractional mean curvature (see \cite{abaty,nms}). Let $E\subset {\mathbb R}n$ and $q\in \partial E$. Then
\[ {\mathcal I}_s[E](q) := P.V. \int_{{\mathbb R}n} \frac{\chi_{{\mathcal C} E}(x)-\chi_E(x)}{|x-q|^{n+s} } \, dx.\]
We will, for the sake of simplicity, omit the $P.V.$ in our computations.
Just like for the $s$-perimeter, it holds that sending $s$ to $1$, the classical mean curvature appears. More precisely, let $E$ have $C^2$ boundary, then for any $q\in \partial E$ it holds that
\[ \lim_{s\nearrow 1} (1-s) {\mathcal I}_s[E](q)= \omega_{n-1} H[E](q),\]
where $H[E[(q)$ denotes the classical mean curvature at $q\in \partial E$, with the convention that balls have positive mean curvature.
In the case of nonlocal minimal subgraphs $Sg(u)\subset {\mathbb R}^{n+1}$, one can give an explicit formula for the mean curvature, in dependence of the function $u$. Suppose for simplicity that we have a global minimal graph of $u\in C^{1,\alpha}({\mathbb R}n)$, which up to translations and rotations satisfies $u(0)=0, \nabla u (0)=0$. Then for $Q\in \partial Sg(u)$, (i.e. $u(q)=q_{n+1}$) one can write
\bgs{ {\mathcal I}_s[Sg(u)](Q) =&\; \int_{{\mathbb R}n} \frac{\chi_{{\mathcal C} Sg(u)}(X)-\chi_{Sg(u)}(X)}{|X-Q|^{n+1+s} } \, dX \\
= &\; \int_{{\mathbb R}^{n}} dx'\int_{u(x)}^\infty \frac{dx_{n+1}}{(|x-q|^2 +|x_{n+1}-q_{n+1}|^2)^{\frac{n+1+s}2} }
\\
&\; -\int_{{\mathbb R}^{n}} dx'\int_{-\infty}^{u(x)} \frac{dx_{n+1}}{(|x-q|^2 +|x_{n+1}-q_{n+1}|^2)^{\frac{n+1+s}2} }
\\
=&\; \int_{{\mathbb R}n} \frac{dx}{ |x-q|^{n+s} } \int_{ \frac{u(x)-q_{n+1} }{ |x-q| } }^\infty
\frac{d\rho }{
(1+\rho^2)^{
\frac{n+1+s}2
}
}
\\
&\; - \int_{{\mathbb R}n} \frac{dx'}{|x-q|^{n+s}} \int_{-\infty}^{\frac{u(x)-q_{n+1}}{|x-q|}} \frac{d\rho }{
(1+\rho^2)^{
\frac{n+1+s}2
}
}
\\
=&\; 2\int_{{\mathbb R}n} \frac{dx}{ |x-q|^{n+s} } \int_0^{\frac{u(x)-q_{n+1}}{|x-q|}} \frac{d\rho }{
(1+\rho^2)^{
\frac{n+1+s}2
}
}, }
where we have changed variables and have used symmetry.
Denoting
\[ G_s(\tau)= \int_0^\tau \frac{d\rho }{
(1+\rho^2)^{
\frac{n+1+s}2
}
},\]
recalling Definition \ref{ars} we notice that
\[ \mathcal G'_s(t)=G_s(t)\]
which allows to prove, at least formally, that
\[ \frac{d}{d\varepsilon}{\mathcal B}igg|_{\varepsilon=0} \mathcal F_s(u+\varepsilon v)=0,\]
implies that, in a weak sense,
\[ {\mathcal I}_s[Sg(u)] =0. \]
This explains the connection between the fractional mean curvature operator and the functional formulation for the area operator in Definition \ref{ars}, introduced in \cite{LucaTeo}.
\\
The formula for the mean curvature operator can be written also ``locally'', having $F\subset {\mathbb R}^{n+1}$ a set that is locally the graph of a function $u\in C^{1,\alpha}(B_r(q))$. Up to rotations and translations, and denoting for $r,h>0$
\[ K_r^h(Q):= B_r(q)\times (q_{n+1}-h,q_{n+1}+h),\]
one has that
\eqlab{\label{af23}
{\mathcal I}_s[F](q)= 2\int_{B_r(q)} G_s\left(\frac{u(x)-u(q)}{|x-q|}\right) \frac{dx}{|x-q|^{n+s}}
+ \int_{{\mathbb R}n \setminus K_r^h(Q)} \frac{\chi_{{\mathcal C} Sg (u)}(X) -\chi_{Sg(u)}(X)}{|X-Q|^{n+1+s}} \, dX.
}
The reader can check \cite{regularity} where formula \eqref{af23} was first introduce, \cite{bootstrap} where the formula for the non-zero gradient is given, \cite{abaty,lukes} for further discussion on the mean curvature.
We give the Euler-Lagrange equation mentioned here above in the strong form, both in the interior and at the boundary of the domain. The following result, stated in a condensed form in \cite[Appendix B]{bucluk}, is a consequence of \cite[Theorem 5.1]{nms}, where the equation is given in the viscosity sense, \cite{obss,bootstrap} where regularity is settled, and \cite{graph}, where the authors go from the viscosity to the strong formulation.
\begin{theorem}\label{el} Let $\Omega\subset {\mathbb R}n$ be an open set and let $E$ be locally $s$-minimal in $\Omega$.
\begin{enumerate}
\item If $q\in \partial E$ and $E$ has either an interior or an exterior tangent ball at $q$, then there exists $r>0$ such that $\partial E \cap B_r(q)$ is $C^\infty$ and
\[ {\mathcal I}_s[E](x)=0 \quad \mbox{ for any } x\in \partial E\cap B_r(q).\]
In particular,
\[ {\mathcal I}_s[E](x) =0 \quad {\mathcal H}^{n-1}-\mbox{a.e. for } x\in \partial E \cap \Omega.\]
\item If $q \in \partial E \cap \partial \Omega$ and $\partial \Omega$ is $C^{1,1}$ in $B_{R_0}(q)$ for some $R_0>0$, and $B_{R_0}(p)\setminus \Omega\subset {\mathcal C} E$, then
\[ {\mathcal I}_s[E](q) \leq 0.\]
Moreover, if there exists $R<R_0$ such that
\[ \partial E \cap \left(\Omega \cap B_r(q)\right) \neq \emptyset \qquad \mbox{ for any } r<R\]
then
\[ {\mathcal I}_s[E](q) = 0.\]
\end{enumerate}
\end{theorem}
This theorem provides the Euler-Lagrange equation almost anywhere in the interior of the domain $\Omega$ (at all regular points), and at the boundary of $\Omega$ with smooth boundary, as long as, roughly speaking, $E$ detaches from the boundary of $\Omega$ towards the interior, or $\partial E$ coincides with $\partial \Omega$ near the point $q$.
\section{The stickiness phenomena for nonlocal minimal surfaces}\label{stick}
In the nonlocal setting, the stickiness phenomena is typical. The situation drastically changes with respect to the classical objects since even in convex domains and with smooth exterior data, the $s$-minimal surface may attach to the boundary of the domain.
A first example is given in \cite[Theorem 1.1]{boundary} showing stickiness to half-balls. We look for a nonlocal minimal set in a ball, having as exterior data a half-ring around that ball. A small enough radius of the ring will lead to stickiness. Precisely:
\begin{theorem}\label{halfball}
For any $\delta>0$, denote
\[ K_\delta := \left(B_{1+\delta}\setminus B_1\right) \cap\{ x_n<0\},\]
and let $E_\delta$ be $s$-minimal for $P_s(\cdot, B_1)$ with $E\setminus B_1=K_\delta$.
There exists $\delta_0:=\delta_0(n,s)>0$ such that for any $\delta \in (0,\delta_0]$ we have that
\[ E_\delta=K_\delta.\]
\end{theorem}
Not only does stickiness happen in unexpected situations, what is more is that small perturbations of the exterior data may cause stickiness. We describe this phenomena with the example given in \cite[Theorem 1.4]{boundary}. It is well known that the only $s$-minimal set with exterior data given by the half-plane is the half-plane itself. But surprisingly, flat lines are ``unstable'' $s$-minimal surfaces in the following sense. Changing slightly the exterior data by adding two compactly contained ``bumps'', the $s$-minimal surface in the cylinder sticks to the walls of the cylinder, for a portion which is comparable to the height of the bumps. The exact statement is the following.
\begin{theorem}
Fix $\varepsilon_0>0$ arbitrarily small. Then there exists $\delta_0:=\delta_0(\varepsilon_0)>0$ such that for any $\delta\in(0,\delta_0]$ the following holds true.
Consider
\[ H= {\mathbb R} \times (-\infty,0) \qquad F_- =(-3,-2)\times[0,\delta), \qquad F_+=(2,3)\times[0,\delta),\]
and
\[ F\supset H\cup F_-\cup F_+.\]
Let $E$ be the $s$-minimal set in $(-1,1)\times {\mathbb R}$ among all sets such that $E=F$ outside of $(-1,1)\times {\mathbb R}$. Then
\[ E\supseteq (-1,1) \times (-\infty, \delta^{\frac{2+\varepsilon_0}{1-s}}).\]
\end{theorem}
The proof of this theorem is very interesting in itself, carried out by building a suitable barrier from below.
As a matter of fact, taking into account the dichotomy in Theorem \ref{dich}, it is clear that this unstable behavior appears to be typical. This is the case: even in the plane, if we start with a $s$-minimal surface which is continuous across the boundary, it is enough to perturb slightly the exterior data in order to get stickiness. Indeed, consider $v\colon {\mathbb R} \to {\mathbb R}$ smooth enough, fixed outside of the interval $(0,1)$, which plays the role of the exterior data, and let $u\colon {\mathbb R} \to {\mathbb R}$, $s$-minimal with respect to $v$, be continuous across the boundary. Then smoothly perturbing $v$ outside of the cylinder will produce a $s$-minimal graph which sticks to the cylinder.
This generic behavior is better explained in \cite[Theorem 1.1]{sticks}.
\begin{theorem}
Let $\alpha \in (s,1)$, the function $v\in C^{1,\alpha}({\mathbb R}), $
and
$ \varphi \in C^{1,\alpha}({\mathbb R})$ non-negative and not identically zero, such that $ \varphi=0 \mbox{ in } (-d,d+1) \mbox{ for some } d>0.$ Consider then $u\colon {\mathbb R} \times [0,\infty) \to {\mathbb R}$ such that
\[ u(x_1,t)= v(x_1)+t\varphi(x_1), \qquad \, t\geq 0,\, x_1\in {\mathbb R}\setminus (0,1)\]
and suppose that
the set
\[ E_t= \big\{ (x_1,x_2)\in {\mathbb R}^2 \, \big| \, x_2<u(x_1,t)\big\} \]
is locally $s$-minimal in $(0,1)\times {\mathbb R}$.
Assume that
\[ \lim_{x_1\searrow 0} u(x_1,0)= v(0).\]
Then for any $t>0$
\[ \limsup_{x_1\searrow 0} u(x_1,t)>v(0).\]
\end{theorem}
\section{Complete stickiness in highly nonlocal regimes}
A very nice example of complete stickiness, that is when the minimal surface attaches completely to the boundary of the domain, was recalled in Theorem \ref{halfball}.
On the one hand, complete stickiness depends on how ``large'' the exterior data is. On the other hand, fixing the exterior data, we obtain complete stickiness for $s$ small enough.
Indeed, as $s$ gets smaller, the nonlocal contribution prevails and the effects are quite surprising. In this section, we sum up some results from the literature related to highly nonlocal regimes, and provide examples of complete stickiness both for nonlocal minimal sets and graphs.
To describe the ``purely nonlocal contribution'', one makes use of the set function introduced in \cite{asympt1}
\eqlab{ \label{alpha}
\alpha(E)= \lim_{s \searrow 0} s \int_{{\mathcal C} B_1}\frac{ \chi_E(x)} {|x|^{n+s}} \, dx.}
As \cite[Examples 2.8, 2.9]{asympt1} show, it is possible to have smooth sets (hence with finite $s$-perimeter for any $s$) for which the limit in \eqref{alpha} does not exist. In this case, neither $\lim_{s\searrow 0} s P_s(E,\Omega)$ exists, since the two limits are intrinsically connected. Whenever this happens, one can use $\limsup$ and $\liminf$ as in \cite{bucluk}. For simplicity, we use however $\alpha$ as defined in \eqref{alpha}, and notice that the results in this section hold for $\limsup (\liminf)$ instead of the limit, whenever the limit does not exist.
The fact that this set function well describes the behavior of the perimeter as $s$ goes to $0$ is given in \cite[Theorem 2.5]{asympt1}.
\begin{theorem}
Let $\Omega\subset {\mathbb R}n$ be a bounded open set with $C^{1,\gamma}$ boundary for some $\gamma \in (0,1)$.
Suppose that $P_{s_0}(E,\Omega)$ is finite for some $s_0\in (0,1)$. Then
\eqlab{ \label{pers0} \lim_{s\searrow 0} s P_s(E,\Omega)= \alpha({\mathcal C} E) |E \cap \Omega| + \alpha (E) |{\mathcal C} E \cap \Omega|.}
\end{theorem}
If one goes back to \eqref{pss}, one gets that the local contribution completely vanishes in the limit
\[ \lim_{s\searrow 0} s P_s^{L}(E,\Omega)=0.\]
On the other hand, in the limit, the nonlocal part gives a combination of the purely nonlocal contribution, expressed in terms of the function $\alpha$, and the Lebesgue measure of the set (or its complement) in $\Omega$. Recalling also the limit as $s\nearrow 1$ in \eqref{sto1}, one could say that in some sense, the fractional perimeter interpolates between the perimeter of the set and its volume. It is even clearer if we take, for example, a set $E$ bounded, with finite perimeter, contained in $\Omega$. Then \eqref{pers0} and \eqref{sto1} give that
\[ \lim_{s\searrow 0} s P_s(E,\Omega) =\omega_n |E| \]
and
\[ \lim_{s \nearrow 1} (1-s)P_s(E,\Omega)= \frac{\omega_{n-1}}{n-1} P(E,\Omega).\]
A second element describing purely nonlocal regimes comes from the mean curvature operator. What we discover is that, as $s$ decreases towards zero, in the limit the mean curvature operator forgets any local information it had detained on the local geometry of the set, and measures only the nonlocal contribution of the set. More precisely
\eqlab{ \label{mcs0}\lim_{s\searrow 0}s {\mathcal I}_s[E](p)= \omega_n -2 \alpha (E),}
for any $p\in \partial E$ and whenever $\partial E$ is $C^{1,\gamma}$ around $p$, for some $\gamma \in (0,1]$.
We provide a few more details on the set function $\alpha(E)$, which are useful in the sequel.
Denote for $q\in {\mathbb R}n$ and $R>0$
\eqlab{ \label{alphaeq}
\alpha_s (E,R,q) = \int_{{\mathcal C} B_R(q) } \frac{\chi_E(x)}{|x-q|^{n+s} }\, dx.}
Then it holds that
\[ \lim_{s \searrow 0} s\alpha_s(E,R,q)= \alpha(E).\]
In particular, this says that $\alpha$ represents indeed the contribution from infinity, as it does not depend neither on the fixed point $q\in {\mathbb R}n$, nor on the radius we pick. So, to compute the contribution from infinity of a set it is enough to compute its weighted measure outside of a ball of any radius, centered at any point. For more details and examples, check \cite[Section 4]{bucluk}. We just recall here a couple of examples, which are therein explained: the contribution from infinity
\begin{itemize}
\item of a bounded set is zero,
\item of a cone is given by the opening of the cone,
\item of a slab is zero,
\item of the supergraph of a parabola is zero,
\item of the supergraph of $x^3$ in $R^2$ is $\pi$,
\item of the supergraph of a bounded function is $\omega_n/2$.
\end{itemize}
\subsection{Complete stickiness}
We start this subsection with an example. As we have already mentioned, the only $s$-minimal set having as the half-space as exterior data is the half-space itself, for any value of $s$. On the other hand, let us try to understand what happens if we minimize the perimeter in $B_1\subset {\mathbb R}^2$, using the first quadrant of the plane as exterior data. As (\cite[Theorem 1.3]{boundary}) shows, there exists some small $s_0$ such that for all $s\in (0,s_0)$ the $s$-minimal surface sticks to $\partial B_1$, and the $s$-minimal set is exactly the first quadrant of the plane, deprived of its intersection with $B_1$.
This example still holds if, instead of the ball, one picks a domain $\Omega$, bounded, with smooth boundary and takes as the exterior data the whole half-plane, deprived of some small cone, at some distance from $\Omega$. For simplicity, we give an example that one can keep in mind, before we introduce the main theorem of the section.
\begin{example}\label{exxx}
Let for any given $h\geq 1$ and $\vartheta \in (0,\pi/2)$
\bgs{ \Sigma := {\mathcal B}ig\{ (x_1,x_2)\in {\mathbb R}^2 \; {\mathcal B}ig| x_2\geq {\mathcal B}ig((x_1-h)\tan \vartheta {\mathcal B}ig)_+ {\mathcal B}ig\}}
and let $E_0:= \Sigma \setminus B_1$.
Then there exists $s_0 >0$ such that for any $s \in (0,s_0)$, the set $E_s$ that minimizes $P_s(\cdot, B_1)$ with respect to $E_0$, is empty inside $B_1$,
or in other words
\[ E_s =\Sigma\setminus B_1.\]
\end{example}
\begin{proof}[Sketch of proof]
We argue by contradiction and suppose that there is some boundary of $E$ inside $\Omega$. We follow the next steps.
\begin{enumerate}
\item Step 1. We prove that, if there exists an exterior tangent ball at a point on the boundary of $E\cap \bar B_1$, of some suitable (uniform) radius, the fractional mean curvature of $E$ at that point is strictly positive.
\item Step 2. We prove that there exists some ball, compactly contained in $B_1$, which is exteriorly tangent to the boundary of $E$.
\item Step 3. We obtain a contradiction by comparing Step 1 with the Euler-Lagrange equation (that holds, thanks to Step 2, check Theorem \ref{el}).
\end{enumerate}
\noindent \textbf{Step 1}.
We have set out to prove that, if there exists an exterior tangent ball at $q\in \partial E\cap \bar B_1$, there exists $\tilde C>0$ such that
\[ {\mathcal I}_s[E](q) = \int_{{\mathbb R}^n} \frac{ \chi_{{\mathcal C} E}(x) -\chi_E(x)}{|x-q|^{n+s}} \, dx \geq \tilde C.\]
Let $\delta$ be a radius (that will be chosen as small as we want in the sequel), and $p\in B_1$ such that $B_\delta(p)$ is compactly contained in $B_1$, exterior tangent to $\partial E$ at $q$, that is
\[ B_\delta(p) \subset{\mathcal C} E\cap B_1, \quad q \in \partial E \cap \partial B_\delta(p).\]
Denote $p'$ as the point symmetric to $p$ with respect to $q$,
\[ D_\delta :=B_\delta(q) \cup B_\delta(p'), \]
$K_\delta$ as the convex hull of $D_\delta$ and
\[ P_\delta :=K_\delta \setminus D_\delta.\]
Let $R>4$ be as large as we want, to be specified later on.
We split the integral into four different parts and estimate each one.
\begin{enumerate}
\item The contribution in $D_\delta$ is non-negative, since $E$ covers ``less'' of $D_\delta$ than of its complement, i.e.
\[ \chi_{{\mathcal C} E\cap D_\delta}\geq \chi_{E \cap D_\delta},\]
hence
\[ \int_{D_\delta} \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{n+s} } \geq 0.\]
\item The contribution on $P_\delta$ is bounded from below thanks to \cite[Lemma 3.1]{graph},
\[ \int_{P_\delta} \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{n+s} } \geq -C_1\delta^{-s}.\]
\item As for the contribution in $B_R(q) \setminus K_\delta$, we have that
\bgs{
&\bigg|\int_{B_R(q) \setminus K_\delta} \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{n+s} }\bigg|
\leq
\bigg |\int_{B_R(q) \setminus B_\delta(q)} \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{n+s} } \bigg|
\\
\leq &\; \omega_n \int_{\delta}^R \rho^{-1-s} \, d\rho =\omega_n \frac{ \delta^{-s}-R^{-s}}s .}
\item We prove that the contribution of ${\mathcal C} B_R(q)$ is bounded by
\[ \int_{{\mathcal C} B_R(q)} \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{n+s} } \geq \frac{C (\vartheta)R^{-s}}{s},\]
for some constant $C(\vartheta)\in (0,\omega_n/2)$, in particular independent on $q$.
\end{enumerate}
Of course, $\omega_n$ is actually $\omega_2$, but we keep the above formulas in this general from since the estimates hold in any dimension.
Putting the four contributions together, our goal is to obtain that
\[ s{\mathcal I}_s[E](q) \geq \left(C(\vartheta) +\omega_n\right) R^{-s}- \delta^{-s}( C_1s +\omega_n) \geq \frac{C(\vartheta)}{8}>0.\]
Since $R^{-s} \nearrow 1$ as $s\searrow 0$, there exists $s$ small enough such that
\[ C(\vartheta) R^{-s} \geq \frac{C(\vartheta)} 2, \quad \omega_n R^{-s} \geq \omega_n -\frac{C(\vartheta)}4 , \quad C_1 s \leq \frac{C(\vartheta)}{16} \]
thus
\[ s{\mathcal I}_s[E](q) \geq \frac{C(\vartheta)}4 + \omega_n -\delta^{-s}\left(\omega_n +\frac{C(\vartheta)}{16} \right)\geq \frac{C(\vartheta)}8,\]
if and only if
\eqlab{ \label{dss}\delta \geq e^{\frac{-1}{s } \log \frac{8\omega_n+C(\vartheta)}{8\omega_n +C(\vartheta)/2} }:=\delta_s.}
Notice that $\delta_s <1$, hence for any $s\in(0,\sigma)$ taking $ \delta>\delta_\sigma,$
\[ \qquad \delta^{-s} <\delta^{-\sigma}<\delta_{\sigma}^{-\sigma},\]
hence for any radius greater than $\delta_\sigma$ the $s$-curvature will remain strictly positive for any $s<\sigma$.
We can conclude that
there exists $\sigma$ such that, having at $q$ an exterior tangent ball of radius (at least) $\delta_\sigma$, implies that
\[ s{\mathcal I}_s[E](q) \geq \frac{C(\vartheta)}8 >0 \quad \mbox{ for all } \; s\leq \sigma.\]
\noindent \textbf{Step 2}. To carry out Step 2, we prove that there exists an exterior tangent ball to $\partial E$, compactly contained in a ball slightly smaller than $B_1$.
We denote
\[ B_1^+ = B_1\cap \{x_2>0\} ,
\quad B_1^- =B_1 \cap \{ x_2<0\},
.
\]
First of all, we notice by comparison with the plane, that
\[ B_1^- \subset {\mathcal C} E.\]
Otherwise, we start moving upwards the semi-plane $\{ x_2 \leq 2\}$ until we first encounter $\partial E \cap \bar B_1^-$ at $p=(p_1,p_2)$. Since
\[ {\mathcal C} E \supset {\mathcal C} \{x_2>p_2\}, \qquad E\subset \{x_2>p_2\}\] it holds that
\[ {\mathcal I}_s[E](p) = {\mathcal I}_s[E](p)-{\mathcal I}_s[\{x_2>p_2\}] (p)\geq 0,\]
and since $E$ is minimal, it holds in the strong sense that
\[ {\mathcal I}_s[E](p) \leq 0. \]
This would imply that $E=\{x_2<p_2\}$ by the maximum principle (see \cite[Appendix B]{bucluk}), which is false.\\
For some $r_0>0$ and $s$ small enough (notice that $\delta_s \searrow 0$ as $s\searrow 0$, see \eqref{dss}), and $x\in B_1^{-}$, consider $\delta_s<\delta<r_0/4$ such that
\[ B_\delta(x) \subset B_{1-r_0/2}^- \subset {\mathcal C} E .\]
We remark that for a domain $\Omega$ with $C^2$ boundary, $r_0$ is chosen to be such that
\eqlab{ \label{r0} \mbox{ the set } \; \big\{ x\in \Omega \; \big| \; d(x,\partial \Omega)\leq r_0 \big\} \mbox{ still has } C^2 \mbox{ boundary}
}
(check \cite[Appendix A.2]{bucluk}, \cite[Appendix B]{MinGiusti} for instance).
Suppose now by contradiction that $E$ is not empty inside $B_{1-r_0/2}$, hence
\[|E \cap B^+_{1-r_0/2}|>0, \quad \mbox{ in particular} \quad \exists\; y\in E\cap B^+_{1-r_0/2} .\]
We consider the segment connecting $x$ and $y$ inside $B_{1-r_0/2}$, and we move the ball of radius $\delta$ along this segment starting from $x$, until we first hit the boundary of $E$. We denote by $q$ the first contact point (for a more detailed discussion, see \cite[Lemma A.1]{bucluk}), i.e. for $p\in B_{1-r_0/2}^+$
\[ q\in \partial E \cap \partial B_\delta(p), \qquad B_\delta(p)\subset {\mathcal C} E.\]
\noindent \textbf{Step 3}.
Since at $q$ there exists an exterior tangent ball of radius $\delta$, we use the Euler-Lagrange equation in the strong form and have that
\[ {\mathcal I}_s[E](q)=0.\]
This provides a contradiction with Step 1, and it follows that
\[ |E\cap B_{1-r_0/2} | =0.\]
Now it is enough to ``expand'' $B_{1-r_0/2}$ towards $B_1$. If there is some of $E$ in the annulus $B_1 \setminus B_{1-r_0/2}$, one can find an exterior tangent ball at $\partial B_{1-\rho} \cap \partial E$ for some $\rho \in (0,r_0/2)$ and use again the fact that the curvature is both strictly positive and equal to zero to obtain a contradiction. This would conclude the proof.
It remains to prove that for $q\in \partial E \cap \bar B_1$
\[ \int_{{\mathcal C} B_R(q) } \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{2+s} } \geq \frac{C (\vartheta)R^{-s}}{s},\]
for some constant $C(\vartheta)$ not depending on $q$.
We do this with a geometric argument. We want to build a parallelogram of center $q$, and take $R$ as large as we need, such as to have the parallelogram in the interior of $B_R(q)$. Then we use symmetry arguments to obtain the conclusion.
We build our parallelogram in the following way, check Figure \ref{par}. We denote
\[ l_1= (x_1-h) \tan \vartheta\]
and draw through $q$ the parallel to the bisecting line of the angle complementary to $\vartheta$. We call $p$ the intersection between this parallel line and $l_1$, and $p'$ the point symmetric to $p$ with respect to $q$, that sits on this parallel line. We draw through $p,p'$ two lines parallel to the axis $Ox$. The parallelogram we need is formed by the intersections of these last drawn parallels to $Ox$, $l_1$ and the parallel to $l_1$ through $p'$. We choose $R$ such that this parallelogram stays in the interior of $B_R(q)$, remarking that $R$ depends only on $\vartheta,h $, and we can make this choice independent on $q\in \bar B_1$. In particular, one can take
\[ R:=\max\{ \max_{x\in \bar B_1}d (x,l_1) \cot \frac{\vartheta}4, 4\}. \]
\begin{center}
\begin{figure}
\caption{The geometric construction in Example \ref{exxx}
\label{par}
\end{figure}
\end{center}
This ensures that both $B_1$ and the parallelogram we built are in $B_R(q)$. We identify six ``corresponding'' regions, which by symmetry produce some nice cancellations. Not to introduce heavy notations, the reader can check directly Figure \ref{par1}.
\begin{center}
\begin{figure}
\caption{The geometric construction in Example \ref{exxx}
\label{par1}
\end{figure}
\end{center}
Notice that
\bgs{ & A\subset {\mathcal C} E, \quad A' \subset E,
\\
& B \subset {\mathcal C} E, \quad B' \subset E \cup {\mathcal C} E,
\\
& C\cup C' \subset {\mathcal C} E}
and accordingly we have that
\bgs{\int_{{\mathcal C} B_R(q) } \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{2+s} } \, dx=&\;
\left(\int_{ A\cup A'} + \int_{B\cup B'} + \int_{C \cup C'} \right) \frac{ \chi_{{\mathcal C} E} (x) -\chi_{E}(x)}{|x-q|^{2+s} } \, dx
\\ \geq &\; 2 \int_C \frac{dx}{|x-q|^{2+s} }.
}
Now $C$ contains a cone $\mathcal C_{\vartheta}(q)$ centered at $q$, of opening $\gamma:=\gamma(\vartheta)$, independent on $q$.
In particular (see Figure \ref{gg}) we have that
\[ \frac{\gamma}2 =\frac{\pi}2 -\alpha-\frac{\pi-\vartheta}2 \geq \frac{\vartheta}2 -\frac{\vartheta}4 =\frac{\vartheta}4,\]
given that
\[ \cot \alpha = \frac{R}{d(q,l_1)} \geq \frac{\max_{x\in \bar B_1} d(x,l_1) \cot \frac{\vartheta}4}{d(q,l_1)}\geq \cot \frac{\vartheta}4. \]
\begin{center}
\begin{figure}
\caption{The small cone $\mathcal C_\vartheta(q)$ in Example \ref{exxx}
\label{gg}
\end{figure}
\end{center}
Passing to polar coordinates, it follows that
\[ \int_C \frac{dx}{|x-q|^{2+s} } \, dx \geq \int_{\mathcal C_{\vartheta}(q)} \frac{dx}{|x-q|^{2+s} } \, dx =\gamma \frac{R^{-s}}{s}\geq \frac{\vartheta}2 \frac{R^{-s}}s.\]
This concludes the sketch of the proof.
\end{proof}
The reader may wonder if this behavior depends on the particular geometry of the sets involved. The answer is no, and actually it only matters that the exterior data occupies, at infinity, less than half the space, or mathematically written
\[ \alpha(E_0) <\frac{\omega_n}{2}.\]
Intuitively, one can try to understand why this is to be expected. Let us first check
\eqref{pers0}, and re-write it as
\[ \lim_{s\searrow 0} s P_s(E,\Omega) = \alpha(E_0)|\Omega| + \left(\omega_n -2\alpha(E_0)\right) |E \cap \Omega|.\]
In broad terms, minimizing the perimeter for $s$ small reduces to minimizing
$(\omega_n -2\alpha(E_0))|E\cap \Omega|$. Hence if
\[ \alpha(E_0)<\omega_n/2\]
the best choice to select the minimal set is to take $E\cap \Omega =\emptyset$ (whereas, for $\alpha(E_0)>\omega_n/2$, $E\cap \Omega=\Omega$ would be the right choice).
We notice also that if $\alpha(E_0)=\omega_n/2$, we do not get any information at this point.
Another element that can help, and that further strengthen the intuition, is the asymptotic behavior of the fractional mean curvature \eqref{mcs0}.
Suppose now that $\alpha(E_0)<\omega_n/2$. Then, given the continuity of the fractional mean curvature in $s$ (see \cite[Section 5]{bucluk}), from \eqref{mcs0} for $s$ small enough it follows that
\[ {\mathcal I}_s[E](x) >0,\]
(and this holds for any set $E$ such that $E\setminus \Omega=E_0$, not only for $s$-minimal sets).
This strict positivity of the mean curvature comes very handy when one compares it with the Euler-Lagrange equation recalled in Theorem \ref{el}. If there exists an exterior (or interior) tangent ball to the minimal surface $\partial E$, then
\[ {\mathcal I}_s[E](x) =0.\]
This would provide a contradiction at all (smooth) points on the boundary of the minimal set, inside the domain $\Omega$, and would show that there cannot be any boundary of $E$ inside $\Omega$.
This informal discussion can be set in the following theorem (see \cite[Theorem 1.7]{bucluk}).
\begin{theorem}\label{thm}
Let $\Omega \subset {\mathbb R}n$ be a bounded and connected open set with $C^2$ boundary and let $E_0 \subset {\mathcal C} \Omega$ be given such that
\[ \alpha(E_0)<\frac{\omega_n}2.\]
Suppose that $E_0$ does not completely surround $\Omega$, i.e., there exists $M>0$ and $x_0\in \partial \Omega$ such that
\eqlab { \label{bmm} B_M(x_0)\cap {\mathcal C} \Omega \subset {\mathcal C} E_0.}
Then there exists $s_0\in (0,1/2)$ such that for all $s<s_0$, the corresponding $s$-minimal surface sticks completely to the boundary of $\Omega$, that is
\[ E\cap \Omega=\emptyset.\]
\end{theorem}
\begin{proof}[Sketch of the proof]
We follow the proof of Example \ref{exxx}, with some additional difficulties.
\noindent \textbf{Step 1}. In order to carry out Step 1, we split the integral into the four components, exactly as we did in Example \ref{exxx}. Let $\delta$ be a radius (that will be chosen as small as we want in the sequel), and $p\in \Omega$ such that $B_\delta(p)$ is compactly contained in $\Omega$, exterior tangent to $\partial E$, that is
\[ B_\delta(p) \subset{\mathcal C} E\cap \Omega, \quad q\in \partial E \cap \partial B_\delta(p).\]
Let $R>4$ be as large as we wish. We observe that the estimates in 1), 2) and 3) stay exactly the same. It only remains to prove 4), and actually we notice that
\bgs{ \int_{{\mathcal C} B_R(q)} \frac{\chi_{{\mathcal C} E} (x)- \chi_E(x)}{|x-q|^{n+s}} \, dx = &\;
\int_{{\mathcal C} B_R(q)} \frac{1- 2 \chi_E(x)}{|x-q|^{n+s}} \, dx
\\
=&\;\frac{\omega_n R^{-s}}s - \alpha_s(E,R,q),}
recalling \eqref{alphaeq}.
Then it follows that
\bgs{
s{\mathcal I}_s[E](q) \geq \omega_n R^{-s}- \delta^{-s}( C_1 s +\omega_n) + \omega_n R^{-s}- 2s\alpha_s(E,R,q) .}
Now
\[ \lim_{s\searrow 0}\left( \omega_n R^{-s} -2s\alpha_s(E,R,q) \right)= \omega_n-2\alpha(E):=C(E).\]
The computations follow exactly as in the proof of Example \ref{exxx}, with $C(E)$ instead of $C(\vartheta)$.
Notice also that, in case $E$ is a cone, $\alpha(E)$ is exactly the opening of the cone (hence, $\alpha(\Sigma)=2\vartheta$).
Therefore there exists $\sigma$ such that, for all $s\leq \sigma$, having at $q$ an exterior tangent ball of radius (at least) $\delta_\sigma$, implies that
\eqlab{ \label{mmma} s{\mathcal I}_s[E](q) \geq \frac{C(E)}4 >0.}
\noindent \textbf{Step 2}.
In order to prove Step 2, we need to fit a ball of suitable small radius inside $\Omega \cap {\mathcal C} E$.
We define $r_0$ as in \eqref{r0}, and $\sigma$ small enough such that
\[ \delta_\sigma<\delta \leq \frac14 \min\{M,r_0\}. \]
Since $\delta>\delta_{\sigma}$, \eqref{mmma} holds.
Denote by $\nu_\Omega(x_0)$ the exterior normal to $\partial \Omega$ at $x_0\in \partial \Omega$. ``Taking a step'' of length $\delta$ away from the boundary of $\Omega$ inside the ball $B_M(x_0)$, in the direction of the normal, reaching $x_1$, we have that $B_\delta(x_1) \subset B_M(x_0)\cap {\mathcal C} \Omega\ \subset {\mathcal C} E$. We want to ``move'' this ball along the normal towards the interior of $\Omega$, until we reach $x_2$, the point on the normal at distance $r_0$ from the boundary of $\Omega$. We can exclude an encounter with $\partial E$, both on the boundary of $\Omega$ and inside of $\Omega$, since in both cases we have the Euler-Lagrange equation and Step 1, which provide a contradiction. Thus, denoting
\[ \Omega_{-r_0/2}:= {\mathcal B}ig\{ x\in \Omega \; {\mathcal B}ig| \; d(x,\partial \Omega)=\frac{r_0}2{\mathcal B}ig\},
\]
we have that
\[B_\delta(x_2) \subset \Omega_{r_0/2} \cap {\mathcal C} E.\]
Now, if the boundary of $E$ lies inside $\Omega_{-r_0/2}$, we pick $p\in E\cap \Omega_{-r_0/2}$ and slide the ball $B_\delta(x_2)$ along a continuous path connecting $x_2$ with $p$. At the first contact point on $\partial E \cap \partial B_\delta(\bar x)$, with $\bar x$ lying on the continuous path between $x_2, p$, we obtain a contradiction from Step 1 and the Euler-Lagrange equation. We obtain the same contradiction by ``enlarging'' $\Omega_{-r_0/2}$, since, at the first contact point the ball $B_{\frac{r_0}4}$ provides a tangent exterior ball to $\partial E \cap \Omega_{-\rho}$, for some $\rho \in (0,r_0/2)$ We obtain that $E\cap \Omega =\emptyset$, concluding the sketch of the proof.
\end{proof}
Of course, the analogue holds for the data that occupies, at infinity, more than half the space. In that case, the result is as follows.
\begin{theorem}\label{thm}
Let $\Omega \subset {\mathbb R}n$ be a bounded and connected open set with $C^2$ boundary and let $E_0 \subset {\mathcal C} \Omega$ be given such that
\[ \alpha(E_0)>\frac{\omega_n}2.\]
Suppose that ${\mathcal C} E_0$ does not completely surround $\Omega$, i.e., there exists $M>0$ and $x_0\in \partial \Omega$ such that
\eqlab { \label{bmm} B_M(x_0)\cap {\mathcal C} \Omega \subset E_0.}
Then there exists $s_0\in (0,1/2)$ such that for all $s<s_0$, the corresponding $s$-minimal surface sticks completely to the boundary of $\Omega$, that is
\[ E\cap \Omega=\Omega.\]
\end{theorem}
On the other hand, if
\[\alpha(E)=\frac{\omega_n}2,\]
neither \eqref{pers0} nor \eqref{mcs0} provide any additional information,
since we get that
\[ \lim_{s\searrow 0} sP_s(E,\Omega)= \frac{\omega_n}2|\Omega|\]
and that for any $q\in \partial E$
\[ \lim_{s\searrow 0} s{\mathcal I}_s[E](q) =0.\]
This is actually not strange at all, since in this case, actually everything could happen, depending on $\Omega, E_0$ and their respective positions. Take as an example the ``simplest'' minimal set, the half-plane.
If $\Omega \subset \{ x_2<0\}$, then $E\cap \Omega = \Omega$, if $\Omega \subset \{ x_2>0\}$ then $E\cap \Omega= \emptyset$, while if $\Omega$ sits ``in the middle'', $E$ covers the $\Omega \cap \{ x_2<0\}$, and it is empty in $\Omega \cap \{ x_2>0\}$.
Naturally, one may wonder what happens if \eqref{bmm} does not holds, hence if the exterior data completely surrounds $\Omega$. At least with the geometrical type of reasoning we used, in absence of \eqref{bmm} we are unable to obtain the conclusion of complete stickiness. However, only two alternatives hold: either for $s$ small enough all $s$-minimal surfaces stick or they develop a wildly oscillating behavior. Indeed, as precisely stated in \cite[Theorem 1.4 B]{bucluk}, either there exists $\sigma >0$ such that for any $s<\sigma$, all corresponding $s$-minimal sets with exterior data $E_0$ are empty inside $\Omega$, or there exist decreasing sequences of radii $\delta_k\searrow 0$ and of parameters $s_k \searrow 0$ such that for every corresponding $s_k$-minimal set with exterior data $E_0$, it happens that $\partial E_{s_k}$ intersects every ball $B_{\delta_k}(x) $ compactly contained in $\Omega$.
For further details and a thorough discussion, refer to \cite{bucluk}.
To conclude this note, we reason on Example \ref{msstick1} in the nonlocal framework for $s$-small enough. The question is what happens in an unbounded domain $\Omega$ and what does complete stickiness mean in this case.
\begin{example}\label{msstick2}
Let $0<\rho<R$, $M>0$ be fixed, and let $A^R_\rho$ be the annulus
\[ A_R^\rho =\big\{ x\in {\mathbb R}^2 \; \big| \; \rho<|x|<R\big\}.\]
Let
$\varphi\colon {\mathbb R}n \to {\mathbb R} $ be such that
\bgs{
&\varphi(x)=M ,&& \mbox{ for } x\in \bar B_\rho,\\
& \varphi(x)=0, && \mbox{ in } A^R_{R+2} }
and such that at infinity, it satisfies
\[ \alpha(Sg(\varphi)) <\frac{\omega_{n+1}}2, \]
for instance, depicted in Figure \ref{stt1}.
\begin{figure}
\caption{Example \ref{msstick2}
\label{stt1}
\label{stt1}
\end{figure}
We want to minimize the $s$-perimeter in $A_R^\rho\times {\mathbb R}$, in the class of subgraphs with exterior data given by $\varphi$.
What happens is that for any $K$ large enough, there exists some $s:=s(K)>0$ small enough such that
\[ u_s \leq -K.\]
This means that for small values of the fractional perimeter, the stickiness occurs on both walls of the cylinder, with the height of the stickiness being as large as we want.
The idea of the proof starts from Theorem \ref{thm}. The exterior data does not surround the domain, thus we may start moving a ball from the outside towards the inside. There is however the challenge of the unbounded domain $ A_R^\rho\times {\mathbb R}$. We could solve this issue by cutting the cylinder at some height, solving the problem in the cut cylinder and then making that height as large as we want. Doing this, one should also take into account that, in principle, the data in the infinite cylinder minus the cut cylinder will contribute to $\alpha$ (this is actually negligible, since the slab has zero contribution from infinity). However, this cutting procedure provides a non smooth domain, thus Theorem \ref{thm} cannot be applied directly. One could to ``smoothen'' the domain by building ``domes'' on top of cylinders, or find a new approach to the proof that does not require a smooth domain.
\end{example}
This discussion is developed in \cite{lukclaud}, where the authors prove a general theorem related to Example \ref{msstick2}, more precisely on the Plateau problem for nonlocal minimal graphs, with obstacles. We propose here a sketch of the theorem, referring to the original work for the complete statement, proof and further details.
\begin{theorem}
Let $\Omega\subset{\mathbb R}^n$ be a bounded and connected open set with $C^2$ boundary and let $\varphi:{\mathbb R}^n\to {\mathbb R}$ be such that
$$\varphi\in L^\infty_{loc}({\mathbb R}n)\quad\mbox{ and }\quad\overline{\alpha}\big(Sg(\varphi)\big)
<\frac{\omega_{n+1}}{2}.$$
Let $A\subset\subset\Omega$ be a bounded open set (eventually empty) with $C^2$ boundary. Let also
\begin{itemize}
\item [a)] $\psi\in C^2(\overline A)$.
\end{itemize}
or
\begin{itemize}
\item [b)] $\psi\in C(\overline A)\cap C^2(A)$ be such that the supgraph of $\varphi$ has $C^2$ boundary, i.e.
\[\left(\Omega\times {\mathbb R} \right) \setminus Sg(\varphi, \bar A)= \big\{(x,t)\in{\mathbb R}^{n+1}\,\big|\,x\in\overline A,\,t> \psi(x)\big\}\]
has $C^2$ boundary.
\end{itemize}
For every $s\in(0,1)$ we denote by $u_s$
the unique $s$-minimal function that satisfies
\sys[]
{u_s=\varphi\quad\mbox{a.e. in }{\mathcal C}\Omega\\
u_s\ge\psi\quad\mbox{a.e. in }A.
}
Then for every $k$ there exists $s_k\in(0,1)$ decreasing towards $0$, such that
\bgs{
u_s\le-k\quad\mbox{ a.e. in }\Omega\setminus A\quad\mbox{ and }
\quad u_s=\psi\quad\mbox{ a.e. in }A,
}
for every $s\in(0,s_k)$.
In particular
$$\lim_{s\to0}u_s(x)=-\infty,\quad\mbox{ uniformly in }x\in\Omega\setminus A.$$
\end{theorem}
In this theorem, $\varphi$ plays the role of the boundary data, whereas $\psi$ is the obstacle.
We conclude by remarking that the $s$-minimal sets asymptotically ``empties'' the unbounded domain $\Omega$, whereas if we pick a large enough $K$, the $s$-minimal surface will stick to both walls of the cylinder, from $-K$ until respectively reaching the boundary data $\varphi$ and the obstacle $\psi$.
\end{document} |
\begin{document}
\maketitle\thispagestyle{empty}
\begin{abstract}
In a work of \cite{Onno-Levy} stochastic integrals are
regarded as $L^2$-curves. In \cite{Filipovic-Tappe} we have shown
the connection to the usual It\^o-integral for
c\`adl\`ag-integrands. The goal of this note is to complete this
result and to provide the full connection to the It\^o-integral. We
also sketch an application to stochastic partial differential
equations.
\textbf{Key Words:} Stochastic integrals, $L^2$-curves, connection
to the It\^o-integral, stochastic partial differential equations.
\end{abstract}
\keywords{60H05, 60H15}
\section{Introduction}
In the paper \cite{Filipovic-Tappe} we have established an existence
and uniqueness result for stochastic partial differential equations,
driven by L\'evy processes, by applying a result from
\cite[Thm. 4.1]{Onno-Levy}. In \cite{Onno-Levy} stochastic integrals
are regarded as $L^2$-curves. It was therefore necessary to
establish the connection to the usual It\^o-integral (developed,
e.g., in \cite{Jacod-Shiryaev} or \cite{Protter}) for
c\`adl\`ag-integrands, which we have provided in \cite[Appendix
B]{Filipovic-Tappe}.
The goal of the present note is to complete this result and to
provide the full connection to the It\^o-integral. More precisely,
we will show that the space of adapted $L^2$-curves is embedded into
the space of It\^o-integrable processes (see Proposition
\ref{prop-embedding} below), and that the corresponding
It\^o-integral is a c\`adl\`ag-version of the stochastic integral in
the sense of \cite{Onno-Levy} (see Proposition \ref{prop-coin}
below).
This is the content of Section \ref{sec-curves}. Afterwards, we
outline an application to stochastic partial differential equations
in Section \ref{sec-SDE}.
\section{Stochastic integrals as $L^2$-curves}\label{sec-curves}
Throughout this text, let $(\Omega,\mathcal{F},(\mathcal{F}_t)_{t
\geq 0},\mathbb{P})$ be a filtered probability space satisfying the
usual conditions. Furthermore, let $(H,\| \cdot \|)$ denote a
separable Hilbert space.
For any $T \in \mathbb{R}_+$ the space $C[0,T] :=
C([0,T];L^2(\Omega;H))$ of all continuous curves from $[0,T]$ into
$L^2(\Omega;H)$ is a Banach space with respect to the norm
\begin{align*}
\| r \|_{T} := \sup_{t \in [0,T]} \| r_t \|_{L^2(\Omega;H)} = \sqrt{
\sup_{t \in [0,T]} \mathbb{E} [ \| r_t \|^2 ] }.
\end{align*}
The subspace $C_{\rm ad}[0,T]$ consisting of all adapted processes
from $C[0,T]$ is closed with respect to this norm. Note that, by the
completeness of the filtration $(\mathcal{F}_t)_{t \geq 0}$,
adaptedness is independent of the choice of the representative.
\subsection{Stochastic integral with respect to a L\'evy martingale}
Let $M$ be a real-valued, square-integrable L\'evy martingale. We
recall how in this case the stochastic integral $\text{{\rm
(G-)}}(\Phi \cdot M)$ in the sense of \cite[Sec.
3]{Onno-Levy} is defined for $\Phi \in C_{\rm ad}[0,T]$.
\begin{lemma}\label{lemma-ex-Levy-integral}
\cite[Prop. 3.2.1]{Onno-Levy} Let $\Phi \in C_{\rm ad}[0,T]$ be
arbitrary. For each $t \in [0,T]$ there exists a unique random
variable $Y_t \in L^2(\Omega)$ such that for every $\varepsilon > 0$
there exists $\delta > 0$ such that
\begin{align}\label{partition-vG-int}
\mathbb{E} \left[ \bigg\| Y_t - \sum_{i=0}^{n-1} \Phi_{t_i}
(M_{t_{i+1}} - M_{t_i}) \bigg\|^2 \right] < \varepsilon
\end{align}
for every partition $0 = t_0 < t_1 < \ldots < t_n = t$ with $\sup_{i
= 0,\ldots,n-1} |t_{i+1} - t_i| < \delta$.
\end{lemma}
\begin{definition}\label{def-Levy-integral}
(\cite{Onno-Levy}) Let $\Phi \in C_{\rm ad}[0,T]$ be arbitrary. Then
the stochastic integral $Y = \text{{\rm (G-)}}(\Phi \cdot M)$ is the
stochastic process $Y = (Y_t)_{t \in [0,T]}$ where every $Y_t$ is
the unique element from $L^2(\Omega)$ such that
(\ref{partition-vG-int}) is valid.
\end{definition}
We observe that the integrand $\Phi$ as well as the stochastic
integral $\text{{\rm (G-)}} (\Phi \cdot M)$ are only determined up
to a version. In particular, it is not clear if the integral process
has a c\`adl\`ag-version.
\begin{lemma}
\cite[Thm. 3.3.2]{Onno-Levy} For each $\Phi \in C_{\rm ad}[0,T]$ we
have
\begin{align*}
\text{{\rm (G-)}}(\Phi \cdot M) \in C_{\rm ad}[0,T].
\end{align*}
\end{lemma}
We are now interested in finding the connection between the
stochastic integral $\text{{\rm (G-)}}(\Phi \cdot M)$ and the usual
It\^o-integral (developed, e.g., in \cite{Jacod-Shiryaev}, \cite{Protter}, \cite{Applebaum}
for the finite dimensional case and in \cite{Da_Prato},
\cite{P-Z-book} for the infinite dimensional case). We use the
abbreviation
\begin{align*}
L^2(\mathcal{P}_T) := L^2(\Omega \times
[0,T],\mathcal{P}_T,\mathbb{P} \otimes \lambda;H),
\end{align*}
where $\mathcal{P}_T$ denotes the predictable $\sigma$-algebra on
$\Omega \times [0,T]$ and $\lambda$ the Lebesgue measure. Since for
any square-integrable L\'evy martingale $M$ the predictable
quadratic covariation $\langle M,M \rangle$ is linear,
$L^2(\mathcal{P}_T)$ is the space of all $L^2$-processes $\Phi$, for
which the It\^o-integral $\Phi \cdot M$ exists, independent of the
choice of $M$.
\begin{lemma}
For each $\Phi \in C_{\rm ad}[0,T]$ there exists a predictable
version ${}^p \Phi \in L^2(\mathcal{P}_T)$ of $\Phi$.
\end{lemma}
\begin{proof}
By \cite[Prop. 3.6.ii]{Da_Prato} there exists a predictable version
${}^p \Phi$ of $\Phi$. Since $\Phi \in C_{\rm ad}[0,T]$, we also
have
\begin{align*}
\int_0^T \mathbb{E} [ \| {}^p \Phi_t \|^2 ] dt = \int_0^T \mathbb{E}
[ \| \Phi_t \|^2 ] dt \leq T \sup_{t \in [0,T]} \mathbb{E}[\| \Phi_t
\|^2] < \infty,
\end{align*}
that is ${}^p \Phi \in L^2(\mathcal{P}_T)$.
\end{proof}
\begin{proposition}\label{prop-embedding}
The map $\Phi \mapsto {}^p \Phi$ defines an embedding from $C_{\rm
ad}[0,T]$ into $L^2(\mathcal{P}_T)$.
\end{proposition}
\begin{proof}
For two predictable versions $\Phi^1,\Phi^2 \in L^2(\mathcal{P}_T)$
of $\Phi$ we have
\begin{align*}
\int_0^T \mathbb{E} [\| \Phi_t^1 - \Phi_t ^2 \|^2] dt = 0,
\end{align*}
whence $\Phi^1 = \Phi^2$ in $L^2(\mathcal{P}_T)$. Therefore, the map
$\Phi \mapsto {}^p \Phi$ is well-defined. The linearity of $\Phi
\mapsto {}^p \Phi$ is immediately checked, and the estimate
\begin{align*}
\int_0^T \mathbb{E}[\| {}^p \Phi_t \|^2] dt = \int_0^T \mathbb{E}[\|
\Phi_t \|^2] dt \leq T \sup_{t \in [0,T]} \mathbb{E}[\| \Phi_t
\|^2], \quad \Phi \in C_{\rm ad}[0,T]
\end{align*}
proves the continuity of $\Phi \mapsto {}^p \Phi$. For $\Phi \in
C_{\rm ad}[0,T]$ with ${}^p \Phi = 0$ in $L^2(\mathcal{P}_T)$ we
have
\begin{align*}
\int_0^T \mathbb{E}[\| \Phi_t \|^2] dt = \int_0^T \mathbb{E}[\| {}^p
\Phi_t \|^2] dt = 0.
\end{align*}
Since $\Phi \in C_{\rm ad}[0,T]$, the map $t \mapsto \mathbb{E}[\|
\Phi_t \|^2]$ is continuous, which implies $\Phi = 0$ in $C_{\rm
ad}[0,T]$, showing that $\Phi \mapsto {}^p \Phi$ is injective.
\end{proof}
The notation ${}^p \Phi$ reminds of the {\em predictable projection}
of a process $\Phi$, which we shall briefly recall. In the
real-valued case one defines, for every
$\overline{\mathbb{R}}$-valued and $\mathcal{F}_T \otimes
\mathcal{B}[0,T]$-measurable process $\Phi$ the predictable
projection ${}^{\pi} \Phi$ of $\Phi$, according to \cite[Thm.
I.2.28]{Jacod-Shiryaev}, as the (up to an evanescent set) unique
$(-\infty,\infty]$-valued process satisfying the following two
conditions:
\begin{enumerate}
\item It is predictable;
\item $({^{\pi}} \Phi)_{\tau} = \mathbb{E}[\Phi_{\tau} \, | \, \mathcal{F}_{\tau
-}]$ on $\{ \tau \leq T \}$ for all predictable times $\tau$.
\end{enumerate}
Note that for every predictable process $\Phi$ we have ${}^{\pi}
\Phi = \Phi$.
We transfer this definition to any $H$-valued process
$\tilde{\Phi}$, which is an $\mathcal{F}_T \otimes
\mathcal{B}[0,T]$-measurable version of a process $\Phi \in
C_{\rm ad}[0,T]$ by using the notion of conditional expectation from
\cite[Sec. 1.3]{Da_Prato}. Then, the second property of the
predictable projection ensures that ${}^{\pi} \tilde{\Phi}$ is
finite, i.e. $H$-valued.
We obtain the following relation between the embedding ${}^p \Phi$
and the predictable projection ${}^{\pi} \tilde{\Phi}$:
\begin{lemma}\label{lemma-pred-proj}
For each $\Phi \in C_{\rm ad}[0,T]$ and every $\mathcal{F}_T \otimes
\mathcal{B}[0,T]$-measurable version $\tilde{\Phi}$ we have
\begin{align*}
{}^{\pi} \tilde{\Phi} = {}^p \Phi \quad \text{in
$L^2(\mathcal{P}_T)$.}
\end{align*}
\end{lemma}
\begin{proof}
For each $t \in [0,T]$ the identities
\begin{align*}
{}^{\pi} \tilde{\Phi}_t = \mathbb{E}[ \tilde{\Phi}_t \, | \,
\mathcal{F}_{t-}] = \mathbb{E}[ \Phi_t \, | \, \mathcal{F}_{t-} ] =
\mathbb{E}[ {}^p \Phi_t \, | \, \mathcal{F}_{t-} ] = {}^p \Phi_t
\quad \text{$\mathbb{P}$--a.s.}
\end{align*}
are valid, which gives us
\begin{align*}
\int_0^T \mathbb{E}[\|{}^{\pi} \tilde{\Phi}_t - {}^p \Phi_t\|^2]dt =
0,
\end{align*}
proving the claimed result.
\end{proof}
\begin{lemma}\label{lemma-cadlag}
If $\Phi \in C_{\rm ad}[0,T]$ has a c\`adl\`ag-version, then we have
\begin{align*}
{}^p \Phi = \Phi_- \quad \text{in $L^2(\mathcal{P}_T)$.}
\end{align*}
\end{lemma}
\begin{proof}
The process $\Phi_-$ is predictable and we have
\begin{align*}
\mathbb{E} \bigg[ \int_0^T \| {}^p \Phi_t - \Phi_{t-} \|^2 dt \bigg] =
\mathbb{E} \bigg[ \int_0^T \| \Phi_t - \Phi_{t-} \|^2 dt \bigg] =
\mathbb{E} \bigg[ \int_0^T \| \Delta \Phi_t \|^2 dt \bigg] = 0,
\end{align*}
because $\mathcal{N}_{\omega} = \{ t \in [0,T] : \Delta
\Phi_t(\omega) \neq 0 \}$ is countable for all $\omega \in \Omega$.
\end{proof}
\begin{proposition}
For each $\Phi \in C_{\rm ad}[0,T]$ we have
\begin{align*}
\text{{\rm (G-)}}(\Phi \cdot M) = {}^p \Phi \cdot M \quad \text{in
$C_{\rm ad}[0,T]$.}
\end{align*}
In particular, $\text{{\rm (G-)}}(\Phi \cdot M)$ has a
c\`adl\`ag-version.
\end{proposition}
\begin{proof}
Let $t \in [0,T]$ and $\epsilon > 0$ be arbitrary. Since $\Phi \in
C_{\rm ad}[0,T]$, it is uniformly continuous on the compact interval
$[0,t]$, and thus there exists $\delta > 0$ such that
\begin{align}\label{continuity-e-d}
\mathbb{E}[\| \Phi_u - \Phi_v \|^2] < \frac{\epsilon}{\langle M,M
\rangle_t}
\end{align}
for all $u,v \in [0,t]$ with $|u - v| < \delta$. Let $\mathcal{Z} =
\{ 0 = t_0 < t_1 < \ldots < t_n = t\}$ be an arbitrary decomposition
with $\sup_{i = 0,\ldots,n-1} |t_{i+1} - t_i| < \delta$. Defining
\begin{align*}
\Phi^{\mathcal{Z}} := \Phi_0 \mathbbm{1}_{[0]} + \sum_{i = 0}^{n-1}
\Phi_{t_i} \mathbbm{1}_{(t_i,t_{i+1}]},
\end{align*}
we obtain, by using the It\^o-isometry and (\ref{continuity-e-d}),
\begin{align*}
&\mathbb{E} \left[ \bigg\| ({}^p \Phi \cdot M)_t - \sum_{i=0}^{n-1}
\Phi_{t_i} (M_{t_{i+1}} - M_{t_i}) \bigg\|^2 \right] = \mathbb{E}
\left[ \bigg\| \int_0^t ({}^p \Phi_s - \Phi_s^{\mathcal{Z}}) dM_s
\bigg\|^2 \right]
\\ &= \mathbb{E} \bigg[ \int_0^t \| {}^p \Phi_s - \Phi_s^{\mathcal{Z}} \|^2 d \langle
M,M \rangle_s \bigg] = \sum_{i=0}^{n - 1} \int_{t_i}^{t_{i+1}}
\mathbb{E} [ \| \Phi_s - \Phi_{t_i}\|^2 ] d\langle M,M \rangle_s <
\epsilon,
\end{align*}
establishing that ${}^p \Phi \cdot M$ is a version of $\text{{\rm
(G-)}}(\Phi \cdot M)$.
\end{proof}
\subsection{Stochastic integral with respect to Lebesgue measure}
In an analogous fashion, we introduce the stochastic integral ${\rm
\text{(G-)}}(\Phi \cdot \lambda)$ with respect to the Lebesgue
measure $\lambda$, cf. \cite[Lemma 3.6]{Onno-Levy}. By similar
arguments as in the previous subsection, we obtain the same relation
between this stochastic integral ${\rm \text{(G-)}}(\Phi \cdot
\lambda)$ and the usual Bochner integral $\Phi \cdot \lambda$.
\subsection{Stochastic integral with respect to a L\'evy process}\label{subsec-Levy}
Now let $X$ be a square-integrable L\'evy process with
semimartingale decomposition $X_t = M_t + bt$, where $M$ is a
square-integrable L\'evy martingale and $b \in \mathbb{R}$.
According to \cite[Def. 3.7]{Onno-Levy} we set
\begin{align*}
{\rm \text{(G-)}} (\Phi \cdot X) := \text{{\rm (G-)}} (\Phi \cdot M)
+ b \text{{\rm (G-)}} (\Phi \cdot \lambda).
\end{align*}
As a direct consequence of our previous results, we obtain:
\begin{proposition}\label{prop-coin}
For each $\Phi \in C_{\rm ad}[0,T]$ we have
\begin{align*}
\text{{\rm (G-)}}(\Phi \cdot X) = {}^p \Phi \cdot X \quad \text{in
$C_{\rm ad}[0,T]$.}
\end{align*}
In particular, $\text{{\rm (G-)}}(\Phi \cdot X)$ has a
c\`adl\`ag-version.
\end{proposition}
Summing up, we have seen that the space $C_{\rm ad}[0,T]$ of all
adapted continuous curves from $[0,T]$ into $L^2(\Omega;H)$ is embedded into
$L^2(\mathcal{P}_T)$ via $\Phi \mapsto {}^p \Phi$, see Proposition
\ref{prop-embedding}, and that the It\^o-integral ${}^p \Phi \cdot
X$ is a c\`adl\`ag-version of $\text{{\rm (G-)}}(\Phi \cdot X)$, see
Proposition \ref{prop-coin}. Moreover, we have seen the relation to
the predictable projection in Lemma \ref{lemma-pred-proj}.
We close this section with an example, which seems surprising at a
first view. Let $X$ be a standard Poisson process with values in
$\mathbb{R}$. In Ex. 3.9 in \cite{Onno-Levy} it is derived that
\begin{align*}
{\rm \text{(G-)}}\int_0^t X_s dX_s = \frac{1}{2} ( X_t^2 - X_t ).
\end{align*}
Apparently, this does not coincide with the pathwise
Lebesgue-Stieltjes integral
\begin{align*}
\int_0^t X_s dX_s = \frac{1}{2} ( X_t^2 + X_t ).
\end{align*}
The explanation for this seemingly inconsistency is easily provided.
The process $X$ is not predictable, whence it is not
It\^o-integrable, and a straightforward calculation shows that
\begin{align*}
{\rm \text{(G-)}}\int_0^t X_s dX_s = \int_0^t X_{s-} dX_s.
\end{align*}
This, however, is exactly what an application of Proposition
\ref{prop-coin} and Lemma \ref{lemma-cadlag} yields.
\section{Solutions of stochastic partial differential
equations as $L^2$-curves}\label{sec-SDE}
Regarding stochastic integrals as $L^2$-curves provides an existence
and uniqueness proof for stochastic partial differential equations.
Of course, this result is well-known in the literature (see, e.g.,
\cite{Ruediger-mild}, \cite{Da_Prato}, \cite{SPDE}, \cite{Marinelli-Prevot-Roeckner}, \cite{P-Z-book}), whence we only give an outline.
Consider the stochastic partial differential equation
\begin{align}\label{SPDE}
\left\{
\begin{array}{rcl}
dr_t & = & (A r_t + \alpha(t,r_t))dt + \sum_{i=1}^n
\sigma_i(t,r_{t-})dX_t^i
\\ r_0 & = & h_0,
\end{array}
\right.
\end{align}
where $A : \mathcal{D}(A) \subset H \rightarrow H$ denotes the
infinitesimal generator of a $C_0$-semigroup $(S_t)_{t \geq 0}$ on
$H$, and where $X^1,\ldots,X^n$ are real-valued, square-integrable
L\'evy processes as in Section \ref{subsec-Levy}. We assume that the standard Lipschitz conditions
are satisfied.
Then, there exists a unique solution $r \in C_{\rm ad}[0,T]$ of the equation
\begin{align*}
r_t := S_t h_0 + \text{{\rm (G-)}} \int_0^t S_{t-s} \alpha(s,r_s)ds +
\sum_{i=1}^n \text{{\rm (G-)}} \int_0^t S_{t-s}
\sigma_i(s,r_s)dX_s^i,
\end{align*}
see \cite{Onno-Wiener} for the Wiener case and \cite{Onno-Levy} for
the L\'evy case. It is remarkable that the proof is established by
means of precisely the same arguments as in the classical
Picard-Lindel\"of iteration scheme for ordinary differential
equations, where one works on the Banach space $C([0,T];H)$ instead
of $C_{\rm ad}[0,T]$.
Applying Proposition \ref{prop-coin} for any fixed $t \in [0,T]$, we
obtain the existence of a (up to a version) unique, predictable mild
solution for the SPDE
\begin{align*}
\left\{
\begin{array}{rcl}
dr_t & = & (A r_t + \alpha(t,r_t))dt + \sum_{i=1}^n
\sigma_i(t,r_{t})dX_t^i
\\ r_0 & = & h_0,
\end{array}
\right.
\end{align*}
which, in addition, is mean-square continuous.
Observe that we have no statement on path properties of the
solution. If, however, the semigroup in pseudo-contractive, i.e.,
there exists $\omega \in \mathbb{R}$ such that
\begin{align*}
\| S_t \| \leq e^{\omega t}, \quad t \geq 0
\end{align*}
then the stochastic convolution (It\^o-)integrals have a
c\`adl\`ag-version. This can be shown by using the Kotelenez
inequality (see \cite{Kotelenez}) or by using the
Sz\H{o}kefalvi-Nagy theorem on unitary dilations (see, e.g.,
\cite[Thm. I.8.1]{Nagy}, or \cite[Sec. 7.2]{Davies}). We refer to
\cite[Sec. 9.4]{P-Z-book} for an overview. In this case, we conclude
that there even exists a (up to indistinguishability) unique
c\`adl\`ag, adapted mild solution $(r_t)_{t \geq 0}$ for
(\ref{SPDE}), which, in addition, is mean-square continuous.
\end{document} |
\begin{document}
\title{\textbf{Integrated Continuous-time\\ Hidden Markov Models}}
\author{Paul G. Blackwell}
\date{\textit{School of Mathematics and Statistics,\\ University of Sheffield, Sheffield S3 7RH, U.K.\\ [email protected]}\\[2ex]\today}
\maketitle
\begin{abstract}
\noindent
Motivated by applications in movement ecology, in this paper I propose a new class of \textit{integrated continuous-time hidden Markov models} in which each observation depends on the underlying state of the process over the whole interval since the previous observation, not only on its current state. This class gives a new representation of a range of existing models, including some widely applied switching diffusion models. I show that under appropriate conditioning, a model in this class can be regarded as a conventional hidden Markov model, enabling use of the Forward Algorithm for efficient evaluation of its likelihood without sampling of its state sequence. This leads to an algorithm for inference which is more efficient, and scales better with the amount of data, than existing methods. This is demonstrated and quantified in some applications to animal movement data and some related simulation experiments.
\end{abstract}
\section{Introduction}
The motivation for this paper comes from the analysis of animal movement data, arising for example from GPS tagging. This type of application has received a great deal of attention recently; see for example the review by \citet{Toby}. Typically the animal's location $X(t)$ is observed at discrete, sometimes regular, instants in time $t_1,t_2,\ldots$. Conceptualisation of the process often involves an underlying behavioural state $S(t)$, with the movement process switching between different forms depending on that behaviour.
Hidden Markov models\ (defined in \S\ref{standardHMM}) have thus been widely used to model movement, with the `hidden' state representing the behaviour \citep{Morales2004}. However, the application of this approach in continuous-time modelling \cite[e.g.][]{Blackwell1997,Exact} has been limited by computational complexity, as the existing algorithms for hidden Markov models\ do not immediately apply; see \citet{Toby} for discussion. The aim of this paper is to show how to carry out fast, exact computation, with an algorithm closely related to the Forward Algorithm of a conventional hidden Markov model\ (see \S\ref{standardHMM}), for a broad class of continuous-time models, including many suitable for representing animal movement. Its use is illustrated in some real examples using telemetry data, and its performance compared with existing methods.
\section{Existing classes of model}
\subsection{Hidden Markov Models}
\label{standardHMM}
A hidden Markov model\ is an unobserved discrete-time Markov chain $\{S_k\}$ accompanied by observations $Y_k$, with the distribution of each observation determined by the corresponding value of the chain, so that
\[
Y_j \sim f_{S_j}(\cdot).
\]
In the simplest case, the observations are conditionally independent given the chain. In a movement context, the `observation' in this sense is some function of the sequence of observed locations, for example
the displacement $Y_j = X(t_{j})-X(t_{j-1})$.
Hidden Markov models\ are very widely studied and applied, and it is beyond the scope of the current paper to fully review modelling with hidden Markov models\ and methods for inference. However, one key factor in their wide adoption is the existence of a highly efficient algorithm, the Forward Algorithm, allowing the calculation of the probability of a sequence of observations by indirectly summing over all possible state sequences, a calculation that would be prohibitively expensive if carried out na\"ively. See for example \cite{ZucchiniMacDonald} for general background, and \cite{moveHMM} and \cite{Toby} for specific applications to discrete-time models of animal movement.
\subsection{Continuous-time Hidden Markov Models}
\label{sec:cthmm}
A continuous-time Hidden Markov Model is an unobserved continuous-time Markov chain $\{S(t)\}$ accompanied by conditionally independent observations $Y_{t_1}, Y_{t_2}, \ldots$ with distributions determined by the values of the chain at a countable set of times, so that
\[
Y_{t_i} \sim f_{S(t_i)}(\cdot).
\]
For examples in medical contexts, see \citet{JacksonSharples2002, Liu2017}.
This case can be handled using broadly the same inferential methods as a standard hidden Markov model, with the form of the transition matrices dependent on the time intervals $t_i-t_{i-1}$, and subject to some constraints even if the observations are equally spaced (since not every discrete-time Markov chain can be expressed as a restriction of a time-homogeneous continuous-time Markov chain to equally spaced times). This model has what is known as the `snapshot' property: the distribution of the observation $Y_{t_i}$ depends only on the state at the same instant, $S(t_i)$, and conditional on $S(t_i)$ is independent of $S(t), t \not=t_i$.
Thinking about movement in continuous time, an animal's location $X(t_i)$ naturally depends on its behaviour between $t_{i-1}$ and $t_i$; that is, $X(t_i)$ depends both on $X(t_{i-1})$ and on the whole of $\{S(t), t_{i-1}<t<t_i \}$, as discussed by \citet[\S4.4]{Toby}. The process does not have the `snapshot' property and as a consequence, cannot be represented as a continuous-time Hidden Markov Model in the sense defined above. The computational approach, in particular the Forward Algorithm, that gives such power to the usual hidden Markov model\ does not immediately apply.
\section{Integrated continuous-time Hidden Markov Models}
For more flexible modelling, it is useful to consider a Markov process $Z(t)=(X(t),S(t))$ on $\mathbb{X}\times\mathbb{S}$, where $\mathbb{S}$ is discrete. If either (a) $\mathbb{X}$ is continuous and the regularity conditions given by \cite{Berman} for such processes are satisfied, or (b) $\mathbb{X}$ is discrete, then $S(t)$ is piecewise constant over time, with transition rates $\lambda_{ij}(t, x),~i,j\in \mathbb{S}$ say.
I define an \textit{integrated continuous-time hidden Markov model} (InCH) to be a Markov process $Z(t)$ as above, satisfying one of the conditions (a) and (b), with rates $\lambda_{ij}(t, x)$ that are bounded.
In general, it lacks the snapshot property defined in \S\ref{sec:cthmm}, since the way $X(t)$ is changing depends on $S(t)$. This class includes a wide range of existing models; the reason for formulating them in this particular way is the potential improvement in computational efficiency permitted if these conditions are met, as described in \S\ref{InCH}.
In a movement context, usually $\mathbb{X}$ will be continuous. In particular, the separable switching diffusion models of \cite{Exact} can be thought of as InCH models on $\mathbb{R}^d\times\{1,\ldots,n\}$. Usually we will be interested in $d=2$, but cases with $d=1$ and $d=3$ arise naturally. Related applications involving multiple animals lead to higher values of $d$; see \citet{Mu}.
It is useful to distinguish some particular cases. An InCH is \textit{spatially homogeneous} if $\lambda_{ij}(t, x)$ is independent of $x$; otherwise it is \textit{spatially heterogeneous}. Of course, $X(t)$ need not represent geographical space, but the terminology is appropriate to many applications, and makes the necessary distinction from time-homogeneity.
In the next section, I show how to carry out computation using ideas closely related to the conventional hidden Markov model\ or the `snapshot' case, for both spatially heterogeneous and spatially homogeneous InCH models. While the applications are certainly not limited to animal movement, it is convenient to use the terms `location' and `behaviour' to refer to the components of a process $Z$. Similarly, while models with discrete $\mathbb{X}$ are certainly possible, the particular interest here is in continuous $\mathbb{X}$, and I will refer to the density of $X$, for simplicity.
\section{Representation and Algorithms}
\subsection{Uniformization}
\label{uniform}
Consider an InCH process with transition rates $\lambda_{ij}(t, x)$. Let $\lambda_{i}(t, x) = \sum_{j\not=i}\lambda_{ij}(t, x)$ represent the rate of switching out of behaviour $i$, at time $t$, when at location $x$, and
let $\kappa$ be an upper bound so that $\kappa \geq \lambda_{i}(t, x) ~\forall i,t,x$.
Then, following \cite{Exact}, the occurrences of changes in behaviour can be represented as a dynamic thinning of a Poisson process of potential switches of uniform rate $\kappa$, with retention probability
$\lambda_{S(t)}(t, X(t))/\kappa$.
The unthinned Poisson process of potential switches does not depend on $t$ and $x$, enabling us to partially separate location and behaviour in a way that turns out to be crucial for inference.
If the $\lambda_{ij}(t, x)$ are known, for example if we are interested purely in simulating a known process, then we can simply take $\kappa = \sup_{i,t,x}\{\lambda_{i}(t, x)\}$.
In the more general inference context, the most straightforward case, for both exposition and implementation, is when the prior support of $\lambda_{ij}(t, x)$ is bounded above by $u_{ij}(t, x)$, with the function $u_{ij}(t, x)$ also bounded above. We can then define $u_{i}(t, x) = \sum_{j\neq i}u_{ij}(t, x)$
and take $\kappa = \sup_{i,t,x}\{u_{i}(t, x)\}$. This is the approach taken in \S\ref{sec:ibex} and \S\ref{sec:kinkajou}. If the priors are not all bounded above, then $\kappa$ is not fixed, and must be sampled in the inference process; see the discussion in \S\ref{discuss}.
In the spatially homogeneous case, the behaviour process can be thought of as a Markov chain on $\mathbb{S}$ subordinated to a $\text{Poisson}(\kappa)$ process; that is, a continuous-time Markov chain in which some `events' do not change the state of the process.
\citet{RaoTeh} make use of the idea of uniformization, in the context of inference for a continuous-time Markov chain, and give additional background on the concept, including a proof (in the time-homogeneous case) of the representation described above.
\subsection{Existing inference methods}
For discrete-time hidden Markov models, `snapshot' continuous-time hidden Markov models\ and continuous-time Markov chains, a range of efficient algorithms for inference are available, as already indicated. Here, I focus on existing methods specific to switching diffusions and similar models, for comparison with the new methods in \S\ref{InCH} below.
\citet{Exact} make use of the uniformization representation in \S\ref{uniform}, with a Markov chain Monte Carlo
algorithm that relies on forward simulation of the model between potential switches, sampling not only the times of the potential switches but also the locations and states at those times. This enables exact inference, in the sense that there is no time discretisation or approximation, and so the limiting distribution for the chain is exactly the joint posterior distribution of trajectories and parameters. It is widely applicable because of the flexibility in specifying the transition rates. However, because the state space for the Markov chain Monte Carlo\ algorithm includes the behaviour not only at the observation times but also at the unknown collection of potential switching times, the algorithm is computationally demanding and mixes relatively slowly.
In the spatially heterogeneous case, more efficient updates that do not rely purely on forward simulation are possible, by proposing locations and states in a more general way. For example, it is possible to propose a reconstruction of part of the behaviour sequence without reference to the locations, from a spatially homogeneous continuous-time Markov chain, conditioning only on the behaviour at the start and end of the interval being updated, and then propose corresponding locations, given the behaviour, from the movement process conditioned to give the appropriate time-inhomogeneous bridge; the acceptance probability then accounts for the difference between the true behaviour process and the proposal distribution. Alternatively, locations can be proposed at potential switching times using some time-homogeneous bridge process, and then behaviours proposed from the true behaviour process given the locations; again, the acceptance probability can account for the difference between the true and proposal movement processes.
Experimentation suggests that these algorithms can be worthwhile in particular cases; however, for most purposes they are likely to be superseded by the algorithms introduced in \S\ref{InCH} below.
In the spatially homogeneous case of \cite{Blackwell1997}, where transition rates do not depend on location, behavioural trajectories $S(\cdot)$ can be sampled within a Markov chain Monte Carlo\ algorithm without sampling the locations $X(\cdot)$ associated with the transitions. This kind of algorithm, as detailed in \cite{Blackwell2003}, does not use uniformization, nor would it particularly benefit from it. However, the algorithms of the next section offer great benefits in efficiency in this homogeneous case too, and do rely on uniformization, combined with some additional simplification that exploits the homogeneity.
\subsection{Inference for integrated continuous-time hidden Markov models}
\label{InCH}
A much more efficient inferential approach can be developed by exploiting the fact that, conditional on the times and locations corresponding to potential switches, an InCH process is effectively a time-inhomogeneous version of a conventional discrete-time hidden Markov model, defined at the potential switching times, in which the transition probabilities are
\begin{align*}
p_{ij}(t,x) &= \lambda_{ij}(t,x)/\kappa, ~ i\neq j\\
p_{ii}(t,x) &= 1-\lambda_{i}(t,x)/\kappa,
\end{align*}
and the observations are given by the changes in location, with
\[
f(X(t_{k+1})\mid t_k, X(t_{k}), S(t_{k}))
\]
given by the density of the movement process corresponding to behaviour $S(t_{k})$,
\[
f_{S(t_{k})} (X(t_{k+1})\mid t_k, X(t_{k})).
\]
This differs from a typical hidden Markov model\ in that the transition probabilities and observation densities are highly variable between time points. Nevertheless, the standard Forward Algorithm that enables evaluation of the likelihood for a hidden Markov model\ without the need for explicit sampling of the states still applies here, and will enable the calculation of the likelihood for this model very efficiently.
Of course, the times of potential switches are \emph{not} known, and the corresponding locations are \emph{not} observed. Instead, we observe the locations, and typically not the behavioural states, at a set of known times which may or may not be regularly spaced. Thus in practice we need to embed the evaluation of the likelihood using the Forward Algorithm within a Markov chain Monte Carlo\ algorithm; but that algorithm now has a state space of \emph{much} lower dimension, not involving the behavioural states which are now integrated out.
The method of \citet{RaoTeh} has some similarities. They are primarily interested in the continuous-time Markov chain (or Markov Jump Process, in their terminology) itself, and do not integrate it out; instead they use the Forward Algorithm to permit fast updating of a part of the realisation of the chain.
The details of the InCH approach are given in the two following sections, which deal separately with the spatially heterogeneous and homogeneous cases. The former is easier to describe, and so is given first;
it covers a rich class of models, allowing general spatio-temporal covariates in movement modelling, for example. The latter represents an important special case in applications, and it permits the integration out of locations, for further improvement in efficiency where applicable.
\subsection{Spatially heterogeneous case}
\label{sec:hetero}
Here we need to consider a hidden Markov model\ defined at a set of times which is the union of the potential switching times, at which the transition matrix is defined as above in \S\ref{InCH}, and the observation times, at which (with probability 1) no change in state occurs and so the transition matrix is just the identity matrix $I_n$. Spatial locations at the potential switching times need to be sampled within a Markov chain Monte Carlo\ algorithm, but the states do not.
Write $t_c$ for the $c$th observation time, and $t_{c,k}$ for the $k$th potential switching time between $t_c$ and $t_{c+1}$, for $k=1,\ldots,M_c$. Of course, for any given $c$, $M_c$ may be zero.
An outline of the key step in the new Markov chain Monte Carlo\ algorithm is as follows.
Choose $a,b$ such that $1\leq a<b \leq n_\text{obs}$.
Define ${\cal{T}} = \{t_{c,k}| c=a,\ldots,b-1, k=1,\ldots,M_c\}$ and
${\cal{X}} = \{X(t_{c,k}) | c=a,\ldots,b-1, k=1,\ldots,M_c\}$.
Propose new counts $M'_c, c=a,\ldots,b-1$, times ${\cal{T}'} = \{t'_{c,k} | c=a,\ldots,b-1, k=1,\ldots,M'_c\}$
and locations ${\cal{X}'} = \{X'(t'_{c,k}) | c=a,\ldots,b-1, k=1,\ldots,M'_c\}$.
Evaluate the Hastings ratio based on likelihoods that integrate over all state sequences, replacing
${\cal{T}},{\cal{X}}$ with ${\cal{T}'},{\cal{X}'}$,
and accept or reject accordingly.
We could choose new values ${\cal{X}'}$ independently of ${\cal{X}}$, for simplicity, or close to ${\cal{X}}$ to allow `small' steps that retain the information in ${\cal{X}}$. That is, we could take either an independence sampling or a random walk approach. For maximum flexibility, we formulate the proposals as a mixture of these two extremes.
In more detail, we propose $t'_{c,k}$ and $X'(t'_{c,k})$ for a given $c$ as follows. $M'_c$ is proposed independently of $M_c$, with $M'_c \sim \text{Poisson}((t_{c+1}-t_c)\kappa)$, and $t'_{c,k}$ are defined as the order statistics of $M'_c$ independent uniform random variables on $(t_{c},t_{c+1})$.
We define $\mu_\text{I}, \Sigma_\text{I}$ as the mean and covariance respectively of a Brownian bridge with diffusion parameter $\omega$ from $X(t_c)$ to $X(t_{c+1})$, evaluated at times $t'_{c,k}, k=1,\ldots,M'_c$, corresponding to the idea of an independent proposal. Similarly we write $\mu_\text{D}, \Sigma_\text{D}$ for the mean and covariance of a series of Brownian bridges with diffusion parameter $\omega$ passing through $X(t_c), X(t_{c,1}),\ldots,X(t_{c,{M_c}}), X(t_{c+1})$, again evaluated at $t'_{c,k}, k=1,\ldots,M'_c$. We propose new locations for a given $c$ from a weighted mixture of these bridges,
\begin{align}
\label{mvn}
X'(t'_{c,1}),\ldots,X'(t'_{c,{M'_c}}) \sim \text{Normal}(p\mu_\text{I} + (1-p)\mu_\text{D}, p^2\Sigma_\text{I}+(1-p)^2\Sigma_\text{D}).
\end{align}
The proposal for ${\cal{X}'}$ is just the collection of these proposals for $c=a,\ldots,b-1$.
Both $\omega$ and $p$ are effectively tuning parameters.
To calculate the Hastings ratio, define $\Theta$ to be all model and tuning parameters plus the initial distribution of the state, $\overline{\cal{T}}$ to be all the observation times plus the potential switches outside $(t_a, t_b)$, and $\overline{\cal{X}}$ to be the corresponding locations.
Then
\begin{align*}
p({\cal{X}},{\cal{T}}\mid \overline{\cal{X}}, \overline{\cal{T}}, \Theta) &= p({\cal{X}},{\cal{T}},\overline{\cal{X}} \mid \overline{\cal{T}}, \Theta)/p(\overline{\cal{X}}\mid \overline{\cal{T}}, \Theta)\\
&\propto p(\overline{\cal{X}},{\cal{X}}\mid \overline{\cal{T}},{\cal{T}},\Theta)p({\cal{T}}\mid \Theta),
\end{align*}
where the first term is exactly the probability given by running the Forward Algorithm for $\overline{\cal{X}},{\cal{X}}$, and the second just defines a Poisson process of rate $\kappa$ on $(t_a,t_b)$.
Writing $q(\cdot)$ for a generic proposal density, we also have
\[
q({\cal{X}'},{\cal{T}'}\mid{\cal{X}},{\cal{T}},\overline{\cal{X}},\overline{\cal{T}},\Theta) = q({\cal{T}'}\mid\Theta)q({\cal{X}'}\mid{\cal{T}},\overline{\cal{T}},{\cal{X}},\overline{\cal{X}},\Theta),
\]
where the first term again defines a Poisson$(\kappa)$ process on $(t_a,t_b)$, and the second is the product over $c$ of densities of multivariate normal distributions of the form in equation (\ref{mvn}).
The Hastings ratio $\text(HR)({\cal{T}'},{\cal{X}'}\mid{\cal{T}},{\cal{X}})$ is then
\begin{align*}
& \frac{q({\cal{X}},{\cal{T}}\mid{\cal{X}'},{\cal{T}'},\overline{\cal{X}},\overline{\cal{T}},\Theta)}{q({\cal{X}'},{\cal{T}'}\mid{\cal{X}},{\cal{T}},\overline{\cal{X}},\overline{\cal{T}},\Theta)} \times
\frac{p({\cal{X}'},{\cal{T}'}\mid \overline{\cal{X}}, \overline{\cal{T}}, \Theta)}{p({\cal{X}},{\cal{T}}\mid \overline{\cal{X}}, \overline{\cal{T}}, \Theta)}\\
=~&
\frac{q({\cal{X}}\mid{\cal{T}'},\overline{\cal{T}},{\cal{X}'},\overline{\cal{X}},\Theta)}{q({\cal{X}'}\mid{\cal{T}},\overline{\cal{T}},{\cal{X}},\overline{\cal{X}},\Theta)} \times
\frac{p(\overline{\cal{X}},{\cal{X}'}\mid \overline{\cal{T}},{\cal{T}'},\Theta)}{p(\overline{\cal{X}},{\cal{X}}\mid \overline{\cal{T}},{\cal{T}},\Theta)},
\end{align*}
since the terms from the Poisson process cancel, and is easily calculated usng the Forward Algorithm and equation (1).
The other steps in the overall Markov chain Monte Carlo\ algorithm are straightforward, sampling the model parameters by using random walk proposals and evaluating the likelihood using the Forward Algorithm.
\subsection{Spatially homogeneous case}
\label{SpatHom}
In the special case where the InCH process is spatially homogeneous \emph{and} the movement processes are solutions to a linear stochastic differential equation, we can completely avoid the need to sample the locations $x(t_{c,k})$, integrating them out using a matrix calculation which can be thought of as a special case of Kalman Filtering.
As discussed at length elsewhere (\cite{Blackwell1997,Blackwell2003,Exact}), models where the movement process for each state is defined by a linear stochastic differential equation can lead to surprisingly rich behaviour, so this case is of practical importance. In particular, even the case where movement simply switches between different speeds of Brownian motion is important in data analysis; see \citet{Kranstauber2012} and the example in \S\ref{BM}.
For spatially homogeneous but non-linear models, one straightforward option would be to apply the `heterogeneous' methods above, but the example from \citet{AlisonJABES} of continuous-time step-and-turn models suggests that a more efficient compromise ought to be possible.
Such cases are not explicitly considered in this paper; for the remainder of this section, I concentrate on the homogeneous linear case.
Spatial homogeneity means that the transition probabilities of the uniform chain do not depend on the locations at the potential switching times, and linearity implies that for any given sequence of behaviours, movement densities can be calculated explicitly even over time intervals that incorporate changes in behaviour.
For particular states $i$ and $j$ at times $t_c$ and $t_{c+1}$, with potential switching times
\[
{\cal{T}}_c = t_{c,1},\ldots,t_{c,M_c}
\]
we have
\begin{equation}
f_{ij}(x(t_{c+1})\midx(t_{c}), {\cal{T}}_c)
= \sum_{\mathbf{s}} \pi_{ij}(\mathbf{s}) \phi_{ij}(x(t_{c+1})\midx(t_{c}),{\cal{T}}_c,\mathbf{s})
\label{SpatHomMeth}
\end{equation}
where
\[
\mathbf{s} = (s_1,\ldots,s_{M_c-1})
\]
is a possible sequence of states entered at times $t_{c,1},\ldots,t_{c,M_c-1}$,
\[
\pi_{ij}(\mathbf{s}) = p_{i,s_1}\times\cdots\times p_{s_{M_c-1},j},
\]
each $p_{i,j}$ is a transition probability as derived in \S\ref{InCH}, and $\phi_{ij}(\cdot\mid\cdot)$ is the transition density conditional on the sequence and timing of states. In general, $\phi_{ij}(\cdot\mid\cdot)$ can be calculated as a density from a $d$-dimensional normal distribution with parameters calculated recursively as in \S3.3 of \cite{Blackwell2003}. See \S\ref{BM} below for a special case.
This summation over sequences of states is, of course, exactly the kind of calculation that the hidden Markov model\ Forward Algorithm is designed to avoid, because its computational cost increases rapidly with the number of time points. As a brute-force way of calculating the likelihood globally, it would be impractical because it scales so badly with the size of the data-set. As used here however, for calculation of the likelihood locally between successive observations, it is feasible provided $\kappa$ is not too large, which will be true in cases where the data are reasonably informative about the model.
Given this explicit calculation of $f_{ij}(x(t_{c+1})\midx(t_{c}), {\cal{T}}_c) $, the calculation of the overall likelihood via the Forward Algorithm can be simplified from the version in \S\ref{InCH} and \S\ref{sec:hetero}. We can regard the process as a heterogeneous hidden Markov model\ just at the actual observation times $t_c$, but with observation probabilities given by equation (\ref{SpatHomMeth}) and transition probabilities
\[
\pi_{ij}(c) = \sum_{\mathbf{s}} \pi_{ij}(\mathbf{s}).
\]
The sampling of the potential switching times on on $(t_a,t_b)$ uses a Poisson process of rate $\kappa$.
Since no additional locations need to be sampled, we have only the actual locations ${\cal{X}_\text{obs}}$, and we have
\begin{align*}
p({\cal{T}'}\mid\overline{\cal{T}},{\cal{X}_\text{obs}},\Theta) &\propto p({\cal{X}_\text{obs}}|{\cal{T}'},\overline{\cal{T}},\Theta)p({\cal{T}'}|\Theta),\\
q({\cal{T}'}\mid{\cal{T}},\overline{\cal{T}},{\cal{X}_\text{obs}},\Theta) &= q({\cal{T}'}\mid\Theta)
\end{align*}
and hence Hastings ratio
\[
\frac{p({\cal{X}_\text{obs}}\mid{\cal{T}'},\overline{\cal{T}},\Theta)}{p({\cal{X}_\text{obs}}\mid{\cal{T}},\overline{\cal{T}},\Theta)},
\]
calculated simply from two passes of the Forward Algorithm.
As in the heterogeneous case, the other steps which sample the model parameters are straightforward, using the Forward Algorithm to calculate the likelihood.
\subsection{Sampling the states}
\label{FFBS}
The key strength of the InCH approach is that likelihood evaluation, and therefore parameter estimation, does not need sampling of the behavioural states but instead integrates them out. However, we are often interested in reconstructing the states too, as part of the detailed interpretation of the the data and of the states themselves. In a standard hidden Markov model\ setting, the Viterbi algorithm is typically used to calculate the single most likely state sequence, after the Forward Algorithm has been run to calculate the overall likelihood of the observations, and the same approach could be taken here. However, within the fully Bayesian approach, it seems more natural to \emph{sample} the hidden state sequence, if it is of interest. This can be achieved using the Forward-Filtering Backward-Sampling of \citet{Fruwirth}, originally designed for discrete-time models, adapted to the continuous-time context by making use of uniformization and by allowing the model to be time-heterogeneous. \citet{RaoTeh} use a similar idea as a key part of their Markov chain Monte Carlo\ algorithm for a continuous-time Markov chain, giving a Gibbs sampler for the state sequence over some sub-interval within their data. See their Appendix A for details; adaptation for the current context is straightforward.
This is of most interest in a more applied setting, but a simple example is given in \S \ref{backsample}.
\section{Implementation}
\label{sec:implement}
In each of the examples below, I chose to fix $\kappa$ so that $\kappa\delta t=1$ for the typical interval between observations. This ensures that for such an interval, the probabilities of 0, 1 or 2 potential switches are not too small (approximately 0.368, 0.368, 0.184 respectively), permitting visits to a behaviour to have a chance of being represented even if they do not span an observation.
All runs were carried out on the same low-specification desk-top PC (2.90 GHz, 8.0GB). Runs were 100,000 iterations for the homogeneous model, or 200,000 iterations for the heterogeneous model, with burn-in of 10,000 iterations (exept where noted) and thinning by a factor of 100. The various tuning parameters, such as proposal variances for the Metropolis-Hastings steps, were optimized after Latin hypercube sampling, with 5 replicates at each sampled point.
Coding is in R \citep{R3.4}, for ease of development, and there is in all cases scope for substantial improvement by re-writing in a fully compiled language. Relative speeds are therefore much more informative than absolute speeds.
Effective sample size was calculated using the package Coda \citep{CODA}, minimizing over the key unknown quantities. These include the model
parameters and, where applicable, over the sampled behavioural states at the times of observations. They also include the counts of potential switching times in each interval between observations, as a proxy for the switching times themselves which would be less straightforward to assess. This minimum effective sample size was compared with the running time required, to give an effective sample size per second.
\section{Example: heterogeneous case}
\label{sec:ibex}
\subsection{Data}
As a small-scale example, I use irregular data consisting of 71 GPS relocations at approximately 4-hour intervals of an ibex in the Belledonne mountain in the French Alps, originating with the French
\textit{Office national de la chasse et de la faune sauvage}
and made available in the ADEhabitatLT package \citep{ADEhabitat} for R. The majority of the intervals between observations were around 4 hours, plus or minus 90 seconds, but there were some `missing values' leading to eight intervals of around 8 hours, one of 12 hours, and one of 16 hours.
\subsection{Model}
The model fitted was a two-state switching diffusion based on a division of the space into two regions, inside and outside a circular boundary. The boundary is intended as a simple representation of the animal's home range; its behaviour switches at some finite rate to `match' its location, inside or outside the boundary. The movement processes for the two states are Ornstein-Uhlenbeck processes \citep{Dunn1977,Blackwell1997,Exact} with a common centre of attraction. The boundary is taken to be known and fixed; this gives an adaptive model, in the sense of \citet{Exact}. Estimation of the boundary is possible within the Markov chain Monte Carlo\ part of the algorithm; it is omitted here for simplicity, but is addressed
in an unpublished report by S.\ Tishkovskaya and P.\ G.\ Blackwell, available from the author, and also in a 2019 University of Sheffield School of Mathematics and Statistics PhD thesis by H.\ Alkhezi.
\subsection{Comparison of methods}
This adaptive model can be fitted exactly using the algorithm of \citet{Exact}, sampling potential switching times, corresponding locations, and the full behavioural trajectory. It can also be fitted using the InCH approach introduced in \S\ref{sec:hetero}, obviating the need to sample the behaviours. In each case I took $\kappa=0.25$.
The algorithm of \citet{Exact} mixes rather poorly, because the space to be explored by the Markov chain Monte Carlo\ algorithm includes the complete state trajectory at the potential switching times, behaviours as well as locations. The runs reported here, five replicates of $200,000$ iterations as described in \S\ref{sec:implement}, give effective samples sizes in some cases too small for definitive analysis, but sufficient for comparison with the new approach.
\subsection{Results: real data}
\label{sec:het-real}
Fitting the two-state adaptive model with fixed boundary, using the state-sampling algorithm of \citet{Exact},
five replicates of $2\times10^5$ iterations with the optimal tuning parameters took
2240s. A burn-in of 50,000 iterations was required, and gave an effective sample size of 35.0, equivalent to 0.0156 samples per second.
Using the InCH approach, the corresponding running time was 6240s, for an effective sample size of 274, giving 0.0439/s.
\begin{figure}
\caption{Posterior distributions of the movement parameters for the two states in the heterogeneous model}
\label{fig_new_het_real}
\end{figure}
Posterior distributions of the movement parameters are given in Figure \ref{fig_new_het_real}.
In this small heterogeneous example, the InCH approach is around 2.8 times as efficient as the original `exact' algorithm, and of course remains exact in the same sense.
\subsection{Results: simulated data}
\label{sec:het-sim}
Using rounded versions of the point estimates from the analysis in \S\ref{sec:het-real}, I simulated a larger data-set of 201 observations at 4 hour intervals (so approximately three times the size of the real data). I analysed these simulated data in the same way as before, re-running the Latin hypercube tuning since a larger data-set changes the trade-off between running time, acceptance rate and mixing.
The method of \cite{Exact} took 2240s for an effective sample size of only 11.1, giving a sampling rate of 0.00493 per second.
The InCH approach took 6780s for an effective sample size of 443, giving a sampling rate of 0.0653 per second.
\begin{figure}
\caption{Posterior distributions of the movement parameters for the two states in the heterogeneous model. True values are shown as solid dots.}
\label{fig_new_het_sim}
\end{figure}
Posterior distributions of the movement parameters are given in Figure \ref{fig_new_het_sim}.
These results are better than might be expected in the light of the previous example, particularly with the InCH method where the time taken per effective sample increases noticeably more slowly than the amount of data. Initial investigations suggest that this is due to the increased regularity in the simulated data, which for simplicity were simulated at regular intervals within no missing data. The few larger intervals in the real data impose a disproportionate computational cost, in both algorithms.
For this case, the InCH approach is around 13 times as efficient as the original algorithm.
\section{Example: homogeneous case}
\label{sec:kinkajou}
\subsection{Data}
\label{kink_intro}
As an example, I consider a small data-set of two-dimensional GPS locations for a kinkajou (\textit{Potos flavus}), taken from the Movebank Repository \citep{kinkajou_data}.
The data-set consists of 61 fixes, mostly of intervals of 9 and 11 minutes but with a few missing values leading to intervals of 20 to 30 minutes.
\subsection{Model}
The model fitted is a simple InCH process, with $n$ different states each involving isotropic Brownian motion on $\mathbb{R}^2$ with a different speed (i.e.\ diffusion parameter) $v_l, l=1,\ldots,n$, in increasing order to avoid label switching.
\label{BM}
In this case, the calculation of transition densities between observations, given in \S\ref{SpatHom}, has a particularly simple form. For particular states $i$ and $j$ at times $t_c$ and $t_{c+1}$,
$\phi_{ij}(X(t_{c+1})\mid X(t_{c}),{\cal{T}}_c,\mathbf{s})$ in the notation of \S\ref{SpatHomMeth} is specified by
\[
X(t_{c+1})\mid X(t_{c}),{\cal{T}}_c,\mathbf{s} \sim \text{N}(\mathbf{0}, v_{ij}(X(t_{c}),{\cal{T}}_c,\mathbf{s}) I_2)
\]
where
\[
v_{ij}(X(t_{c}),{\cal{T}}_c,\mathbf{s})
= (t_{c,1}-t_c)v_i + \sum_{k=1}^{M_c-1} (t_{c,k+1}-t_{c,k})v_{s_k} + (t_{c+1}-t_{c,m_c})v_j,
\]
that is, the appropriately time-weighted average of the diffusion parameters in different states,
and $I_2$ is the $2\times2$ identity matrix.
The runs reported here have $n=3$, except the run showing the sampling of the states which has $n=2$ for ease of visualization.
\subsection{Comparison of methods}
The model being fitted is spatially homogeneous, so the method of \S\ref{SpatHom} is appropriate here. For comparison, the same model could be fitted using the forward simulation method of \citet{Exact}, which is the origin of the thinned Poisson representation. The method there does not exploit hidden Markov model\ computational methods, but instead tracks the whole state trajectory as part of its Markov chain Monte Carlo\ algorithm, re-sampling a part of it at each iteration. Since the `full data' likelihood conditional on a complete trajectory for the states can be calculated more easily than the `integrated' likelihood of \S\ref{SpatHom}, the algorithm of \citet{Exact} runs more quickly.
However, it mixes much less well, because of the high dimension of the space to be explored by the Markov chain Monte Carlo\ algorithm.
This comparison is arguably unfair, however, as the existing algorithm does not exploit the spatial homogeneity of the model or the simplification described in \S\ref{BM}. Instead, therefore, performance is shown for a version in which the behavioural sequence is sampled, and the simplification of \S\ref{BM} is applied. This is essentially the algorithm applied by \citet{Blackwell2003}, in the particular case where all movement processes are versions of Brownian motion, representing the state of the art for an exact analysis in the spatially homogeneous case, and therefore a fairer comparison.
\subsection{Results: real data}
\label{sec:real}
Firstly, results are shown for the analysis of the small data-set of 61 observations. The results support a 3-state model as being plausible, but the key point of interest here is computational performance, compared for the existing and proposed algorithms.
Using the homogeneous version of the existing algorithm, as described above,
five replicates of $10^5$ iterations took 682s. The effective sample size was 406, giving
0.595 samples per second.
Regarding the model as an InCH and using the Forward Algorithm to calculate the likelihood, five replicates of $10^5$ iterations took 1130s and produced an effective sample size of 569, giving a rate of 0.504/s.
Posterior distributions of the movement parameters are given in Figure \ref{fig_new_hom_real}.
\begin{figure}
\caption{Posterior distributions of the diffusion parameters for the three states in the homogeneous model.}
\label{fig_new_hom_real}
\end{figure}
For this small sample, with a homogeneous model, the InCH approach is marginally less efficient than the existing approach.
\subsection{Results: simulated data}
The real strength of the InCH approach is that its efficiency scales better with the size of the data-set than existing methods. To illustrate this, I simulated data from the estimated parameters in \S\ref{sec:real} to obtain
301 observations (i.e.\ simulating an observation period 5 times longer than the data used in \S\ref{sec:real}) and then analysed them in the same way as before.
The values of the tuning parameters differ between the two data-sets, based on separate Latin hypercube optimization.
With the existing algorithm, five replicates of $10^5$ iterations took 1219s, so the running time is only around 1.8 times as long as for the smaller data-set. This is because much of the computational effort goes on localized updates to the behavioural sequence, for which only part of the likelihood needs to be evaluated.
However, the effective sample size is only 26.6,
giving a sampling rate of 0.0219/s.
The InCH approach ran five replicates of $10^5$ iterations in 4340s, about 3.8 times as long as for the small data-set. It gave an effective sample size of 345, decreasing much more slowly than was the case for the existing method. This is because the dimension of the space over which the Markov chain Monte Carlo\ algorithm is sampling is not increasing with size of the data-set, so mixing does not degrade so quickly. It does still decrease to some extent, because the optimal proportion of the Poisson $\kappa$ process to resample at each iteration (estimated through the Latin hypercube experiments) is decreasing. The net rate of generating independent samples is 0.0796/s, so for this data-set the InCH approach is around 3.6 times as efficient as the existing method.
Posterior distributions of the movement parameters are given in Figure \ref{fig_new_hom_sim}.
\begin{figure}
\caption{Posterior distributions of the diffusion parameters for the three states in the homogeneous model.
True values are shown as vertical dashed lines.}
\label{fig_new_hom_sim}
\end{figure}
\subsection{State reconstruction}
\label{backsample}
As described in \S \ref{FFBS}, reconstruction of the actual states is not necessary for parameter estimation, but may be of interest. For clarity, I have fitted a simpler model to the data of \S \ref{kink_intro}, of the same form as above but with only two states, and the results of sampling the behavioural states are summarized in Figure \ref{fig:decode}.
\begin{figure}
\caption{Posterior probability that the kinkajou is in the `fast' state in the 2-state model (solid line). Times of observations (relative to an arbitrary origin) are indicated by the vertical dashed lines.}
\label{fig:decode}
\end{figure}
Based on a sample of size 5000 from the posterior distribution of states, obtained using the Forward-Filtering Backward-Sampling algorithm, posterior probabilities of being in the faster of the two states are calculated every minute during the time covered by the data. (Calculating the probability at every potential switching time, over all 5000 trajectories, would be prohibitively expensive, and would add essentially no further information.) Figure \ref{fig:decode} shows these posterior probabilities, and also indicates the times of the observations. The probabilities are often more extreme (closer to 0 or 1) just at the times of observations, and often close to 0.5 when there are missing data, as might be expected.
\section{Discussion}
\label{discuss}
\subsection{Summary of results}
I have shown that, while standard hidden Markov model\ techniques do not apply directly to continuous-time movement models, a very broad class of such models can be be seen as hidden Markov models\ after conditioning on the Poisson process of potential times of behavioral change. This can be exploited within a Markov chain Monte Carlo\ algorithm as a highly efficient way of evaluating likelihood for these models without sampling the behavioural states, resulting in much improved mixing.
In an example, the scaling of computational performance with the size of data-set is shown to be much better in the new approach than in existing methods. Thus, it is possible to extend the key benefit of the hidden Markov model\ approach to realistic continuous-time models.
\subsection{Extensions}
For definiteness of exposition, the models and algorithms above make a number of assumptions that are not in fact essential.
I have assumed that behaviour is not observed at all, which is the most common case, though \citet{Blackwell2003} addresses the opposite case. Increasingly, partial information about behaviour is available, either through direct observation or through other kinds of telemetry such as accelerometry. The methods above can incorporate this extra information readily, by adding an extra term in the calculation of the likelihood at the time of the observation.
As is widespread in movement analysis, including discrete-time hidden Markov models, I have neglected GPS measurement error above. However, it can be readily incorporated by including extra variables in the state of the Markov chain used for inference, representing the true, rather than observed, location at the time of each GPS fix. See \citet{AlisonBAYSM} for an illustration of this in a similar context. Depending on the model, a more sophisticated Kalman filtering approach may also be possible c.f.\ \citet{TheoVelocity}.
As mentioned above, it is conceptually simplest to keep the rate $\kappa$ of potential switches as a constant. However, that requires the prior distributions for the rates $\lambda_i(\cdot)$ to be bounded above. An alternative is to allow $\kappa$ to be data-driven, via the $\lambda_{ij}(\cdot)$s. Some care is needed, since $\kappa$ is not really a parameter but rather a computational device (for example, increasing $\kappa$ does not change the model at all, though it slows the calculation), but
one successful approach is explored in a 2019 University of Sheffield School of Mathematics and Statistics PhD thesis by H.\ Alkhezi.
Finally, it may be desirable to allow behavioural switching to depend on the length of time already spent in the current state, as well as the absolute time and other covariates, as a kind of semi-Markov extension. This can be done readily either by simply incorporating this elapsed time as an argument to $\lambda_{ij}(\cdot)$, which complicates the computation somewhat, or by extending the state space; again, see the thesis by Alkhezi
for details.
\end{document} |
\begin{document}
\title{Equilibration of Isolated Systems: investigating the role of coarse-graining on the initial state magnetization}
\author{Gabriel Dias Carvalho}
\email{[email protected]}
\affiliation{Instituto de Física, Universidade Federal Fluminense, Niterói, Rio de Janeiro 24210-346, Brazil}
\author{Luis Fernando dos Prazeres}
\affiliation{Instituto de Física, Universidade Federal Fluminense, Niterói, Rio de Janeiro 24210-346, Brazil}
\author{Pedro Silva Correia}
\email{[email protected]}
\affiliation{Departamento de Ciências Exatas, Universidade Estadual de Santa Cruz, Ilhéus, Bahia 45662-900, Brazill}
\author{Thiago R de Oliveira}
\affiliation{Instituto de Física, Universidade Federal Fluminense, Niterói, Rio de Janeiro 24210-346, Brazil}
\date{\today}
\begin{abstract}
Many theoretical and experimental results show that even isolated quantum systems evolving unitarily may equilibrate, since the evolution of some observables may be around an equilibrium value with negligible fluctuations most of the time. There are rigorous theorems giving the conditions for such equilibration to happen. In particular, initial states prepared with a lack of resolution in the energy will equilibrate.
We investigate how equilibration may be affected by a lack of resolution, or coarse-graining, in the magnetization of the initial state. In particular, for a chaotic spin chain and using exact diagonalization, we show that an initial state with well-defined magnetization almost does not equilibrate. On the contrary, a coarse, not well-defined, magnetization induces equilibration in a way that will depend on the degree of coarse graining.
We also analyze the time for the system to reach equilibrium, showing good agreement with theoretical estimates and with some evidence that less resolution leads to faster equilibration. Our study highlights the crucial role of initial state preparation in the equilibration dynamics of quantum systems and provides new insights into the fundamental nature of equilibration in closed systems.
\end{abstract}
\pacs{}
\maketitle
\section{Introduction}
One of the basic assumptions of thermodynamics is that systems will reach thermal equilibrium if left alone after some perturbation. It is also one of the main foundational problems of statistical physics; to show that equilibration occurs from microscopic principles. One main approach is to consider that the system is not isolated and that interaction with the external environment will lead to thermal equilibrium.
A different approach, that has gained a lot of attention, is to show that even isolated systems described by pure states may reach thermal equilibrium when one is monitoring only a few observables \cite{ReviewNatureEisert} \footnote{Actually, this approach was proposed
already by Boltzmann \cite{Lebowitz93, Lebowitz07} and extended to the quantum scenario by von Neumann \cite{Goldstein10}. There is also some discussion if this equilibration process is, in fact, equivalent to the decoherence of open systems; see \cite{Argentinos} for
a positive view and \cite{Schlosshauer05} for a negative one.}. Since the dynamic is unitary, the expectation value of an observable should never stop oscillating, but such oscillations may be very small most of the time and the system appears as in equilibrium. Rigorous theorems are showing that to equilibrate, initial states must be in a superposition containing a large number of energy eigenvectors, which is equivalent to states prepared with an apparatus that lack resolution in energy fail to equilibrate \cite{Reimann2008, Popescu2009}.
It can be argued that since for local Hamiltonians, the distance between energy levels decays exponentially with the number of particles, it is almost impossible to prepare an initial state with just a few eigenstates. Thus, we should expect that most states prepared in the lab, or nature, will equilibrate if left isolated. From a more mathematical point of view, it is possible
to show that when randomly chosen using a Haar measure, most of the states will equilibrate
(see theorem 2 in \cite{Popescu2009}).
While in terms of energy it is clear when an initial state will equilibrate, there are no general or rigorous results about other physical properties, such as magnetization,
correlations, or entropy. The few exceptions are \cite{Brandao}, where the authors show that states with exponentially decaying correlations equilibrate, and \cite{WilmingEisert}, where it is shown that product states equilibrate when the system's energy eigenstates at finite energy density have a sufficient amount of entanglement. There are also many numerical works, for particular spins chains with an initial product state and well-defined local magnetization, that explores how the changes in the parameters of $H$ affect equilibration and thermalization \cite{Polkovniov11, Gogolin16}, but few works look at the role of the properties of the initial states \cite{Banuls11}.
In this work, we analyze how the lack of resolution in the magnetization of the initial state affects equilibration.
In particular, we study two different situations: (a) we do not have the complete spatial resolution to prepare the initial state, namely, we know the number of spins pointing up but do not know their positions on the lattice; (b) we do not know how many spins are pointing up and also where they are. In case (a) we have a well-defined global but not local magnetization. In case (b) neither a global nor a local well-defined magnetization. This context of lack of resolution is what justifies our use of the term coarse-graining \cite{CrisCG}. Thus, we will consider as our initial state a coherent superposition in the local basis of magnetization, which is not the same as the energy basis. We show that this addition of coherences will be relevant in the process of equilibration. Note that the effects of lack of resolution are already present in the experimental literature. In \cite{PinheiroExp} the authors analyze an experiment with p-bosons in an XYZ quantum Heisenberg model and discuss experimental imperfections in the system's preparation, detection, and manipulation, emphasizing the rich physics involved. In \cite{atomlattice} a spin-lattice is emulated in a lattice where individual atoms are trapped in each potential minimum. The measuring detector is such that it cannot resolve light coming from each atom individually.
We find a relation between the degree of coarse-graining and the degree of equilibration of the initial state: more coarse-graining improves the equilibration. As expected, this is because coarse-grained states are not a superposition of a few energy eigenstates. We also briefly study the time scale for the equilibration to happen and compare with results in \cite{ThiagoNJ}. There, the authors describe the equilibration of an observable as the dephasing of the complex amplitudes of the energy gaps, estimating theoretically from heuristic arguments the equilibration time in terms of the dispersion of the distribution of energy gaps.
Our article is organized as follows: In section \ref{sec:isosyseq} we discuss some general features of equilibration of isolated systems. In section \ref{sec:isi} we discuss our system of interest, including the Hamiltonian, the measured observable, and the initial state. It also explained the construction of our initial states and in what sense we have ignorance of it. In section \ref{sec:decg} we present our main result, regarding the effective dimension of the system and the coarse-graining. In section \ref{sec:eqcg} we compare our equilibration times with values obtained via the procedure in \cite{ThiagoNJ}.
\section{Equilibration of isolated systems}
\langlebel{sec:isosyseq}
Consider an initial state $|\psi_0\rangle = \sum_{k} c_k |E_k \rangle $ evolving under $H= \sum_{k} E_k |E_k\rangle\langleE_k|$ and lets monitor an observable $\hat{A}$ by measuring its expectation value $A(t)=\langle \hat{A}(t)\rangle$. As the system is isolated and evolving unitarily, we expect $A(t)$ to oscillate and never reach a stationary state. However, it may oscillate very close to an equilibrium value most of the time, such that one is not able to distinguish it from an equilibrium; we have probabilistic equilibration. One way to quantify this is to look at the time average of the fluctuation; if it is small, the system is very close to equilibrium most of the time. It is possible to show that \footnote{There are many variations of this result. The original one is \cite{Reimann2008}, and the one we use with the operator norm was given in \cite{Short11}. It is also possible to consider reduced density operators instead of observables as in \cite{Popescu2009}. For a review of these results see \cite{Gogolin16}.}
\begin{equation}
\overline{(A(t)-\overline{A})^2} \leq ||A|| / d_{eff},
\langlebel{equilibr}
\end{equation}
with $\overline{A}$ the infinite-time average of $A(t)$, $||A||$ the operator norm of $\hat{A}$ and
\begin{equation}
d_{eff}=\frac{1}{\sum |c_k|^4},
\langlebel{deff}
\end{equation}
known as the effective dimension. $d_{eff}$ is a measure of how many energy eigenstates participate in the initial state $\ket{\psi_0}$ or how much of the Hilbert space is explored during the time evolution; hence the name effective dimension ($d_{eff}$ is also known as the participation ratio, a measure of the localization of a state in the energy basis). The result above is valid for Hamiltonians without degeneracies and "degenerate gaps": $E_k-E_l=E_m-E_n$ only if $E_k=E_m; E_l=E_n$ or $E_k=E_l; E_m=E_n$. While it is generally expected and numerically found that such conditions are true for systems of interacting particles, the results may be generalized for systems with some, not exponentially large, degeneracies and some "degenerated gaps" \cite{Short12} \cite{ReimannNJ2012}.
These analytical and general results give the mathematical ingredients needed for a system to equilibrate: a $H$ without many degenerated gaps and an initial state prepared with a lack of resolution in the energy. It is expected that, for systems with local interactions, it is almost impossible to prepare in the lab a $|\psi_0\rangle$ with low $d_{eff}$, since the distance between the energy levels becomes much smaller than any actual experimental resolution (for more details see sec. 2.1 of \cite{ReimannNJ2012} and \cite{Gallego} for quantification of the resources need to prepare an initial state that does not equilibrate). However, it is still unclear which other physical properties of a system and state are important for equilibration. There are a few analytical results for particular cases that equilibrate: (1) states with exponentially decaying correlations \cite{Brandao}, and (2) product states when the system's energy eigenstates at finite energy density have a sufficient amount of entanglement \cite{WilmingEisert}. A few negative results also show that some simple initial product states never equilibrate \cite{Wilming22}. On the other hand, many numerical results study how the Hamiltonian parameters affect equilibration and thermalization,
mostly considering initial product states \cite{Polkovniov11, Gogolin16}, with only a few analyzing the effects of different initial states
\cite{Banuls11}.
Instead of considering the resolution in the preparation of the system in terms of its energy, we will consider in terms of the system's magnetization.
\section{Initial States and Corase-Graining}
\langlebel{sec:isi}
We will consider a one-dimensional spin lattice with $n = 16$ spins that evolves with a Hamiltonian based on the usual quantum Heisenberg XYZ model with an external magnetic field in $z$ and open boundary conditions. We also add a next-nearest-neighbor interaction to break integrability and avoid a large number of degenerate gaps. The Hamiltonian is thus given by:
\begin{equation}
\langlebel{Hamiltonian}
H = \sum_{i=1}^{n} J \, ( S_{i}^{x} S_{i + 1}^{x} + J_{y} \, S_{i}^{y} S_{i + 1}^{y} + J_{z} \, S_{i}^{z} S_{i + 1}^{z} + S_{i}^{z} S_{i + 2}^{z} + h_{z} \, S_{i}^{z}) .
\end{equation}
We set $J = 1$ and $\hbar = 1$ and $S_{i}^{x,y,z}$ are spins $1/2$ operators acting at site $i$. All the coupling parameters $J_{y}, J_{z}$ and $h_{z}$ are assumed to be positive. The measured observable is the magnetization in $z$ direction, given by
\begin{equation}
M_{z}(n) = \sum_{i=1}^{n} \mathbb{1}^{i-1} \otimes \sigma_{z} \otimes \mathbb{1}^{n-i},
\langlebel{observable}
\end{equation}
with $\mathbb{1}^{i-1}$ the identity operator acting on the first $i-1$ spins and $\sigma_{z}$ a Pauli matrix.
We want to study how the resolution in the preparation of a magnetic state affects the equilibration. As mentioned, most studies consider a full magnetic product state (or anti-ferro):
\begin{equation}
\ket{\psi_{0}} = \ket{ \uparrow . . . \uparrow \uparrow \uparrow }.
\langlebel{psi0nocg}
\end{equation}
To prepare such a state, for large systems where equilibration may occur, we need a very high resolution in the measure of the total spin, or be able to measure each spin locally.
The concept of coarse-graining is incorporated into our framework through the composition of our initial state. We consider a superposition where the number of states involved depends on our ability to resolve such a state. Due to our limited ability to resolve the initial state, we make the following assumptions: (a) we can resolve the total magnetization of the initial state, but not the direction of individual spins; and (b) we can only resolve that the total magnetization is larger than a given value. Therefore, we consider our initial state as a superposition of spin configurations that can not be distinguished within the apparatus resolution.
To illustrate case (a), consider a system of 3 spins with total magnetization corresponding to two spins pointing up. We can not distinguish between the states $|\downarrow \uparrow \uparrow\rangle $, $|\uparrow \downarrow \uparrow\rangle$ and $|\uparrow \uparrow \downarrow\rangle$. Then, our initial state is $|\downarrow \uparrow \uparrow\rangle + |\uparrow \downarrow \uparrow\rangle + |\uparrow \uparrow \downarrow\rangle$. Note that in case (a) we have a well-defined total magnetization. The states that fall into this category are analogous to Dicke states \cite{Dicke}. To illustrate case (b), consider a system of 3 spins where we can not distinguish between state $|\uparrow \uparrow \uparrow\rangle $ and the states with one spin flipped, as $|\downarrow \uparrow \uparrow\rangle $. Then, our initial state is $|\uparrow \uparrow \uparrow\rangle + |\downarrow \uparrow \uparrow\rangle + |\uparrow \downarrow \uparrow\rangle + |\uparrow \uparrow \downarrow\rangle$. This is equivalent to a coarse-graining measure of the total magnetization in the $\hat{z}$ direction.
In both cases, we utilize coherent sums in the form of pure states to describe our initial states. This choice is driven by two main considerations: the first is our assumption that the system is completely isolated, which means there is no leak of information for the environment or the system isn't part of a larger entangled system. The second is our desire to avoid introducing probabilities by hand. Had we chosen statistical mixtures as initial states, probabilities would have been present from the start.
To quantify the coarse-graining, we can define a parameter $k$, which is related to the number of flipped-down spins. In the $k = 0$ situation we have, in both cases, full resolution in the measure of the magnetization in the $\hat{z}$ direction (no coarse-graining at all) and can ensure that all spins are pointing in the up-direction (Eq. \ref{psi0nocg}). In the situation in which $k \ne 0$, all the ket-states with the number of pointing down spins equal (case (a)) and equal or less (case (b)) than $k$ participate in the sum. For example, let us consider the case where $n = 6$ and $k = 2$. Our initial states would be given by
\begin{equation}
\ket{\psi_{0}} = \ket{\uparrow \uparrow \uparrow \uparrow \downarrow \downarrow} + p.p.,
\langlebel{psi0cg}
\end{equation}
for case (a), and
\begin{equation}
\ket{\psi_{0}} = \ket{\uparrow \uparrow \uparrow \uparrow \uparrow \uparrow } + \ket{ \uparrow \uparrow \uparrow \uparrow \uparrow \downarrow } + \ket{ \uparrow \uparrow \uparrow \uparrow \downarrow \downarrow} + p.p.,
\langlebel{psi0cgc}
\end{equation}
for case (b), with "p.p." meaning "possible permutations". For simplicity, we choose to omit the normalization factors in the discussion above. The parameter $k$ controls the resolution, or the amount of coarse-graining, with greater $k$ meaning less resolution and thus more states participating in the superposition of the initial state. From now on, in the context of case (a), the initial state with $k$-flipped-down spins and their permutations will be called the $k$-flipped initial state. In the context of case (b), the superposition will also include states with until $k$-flipped-down spins and their permutations.
The main message of this section: due to our lack of resolution, we have to add terms in $\ket{\psi_0}$, transforming it into a sum of possible states. The states that will compose the sum will be determined by the two types of coarse-graining: (a) the spatial one, where we know the global magnetization but do not know where the flipped-down spins are located. In this case, all possible permutations with $k$-flipped-down spins must compose the superposition; (b) the magnetization spectrum coarse-graining together with the spatial coarse-graining. In this case, we only know a magnetization lower bound, and all possible permutations with a $k$-flipped-down spins, or less, must compose the superposition.
\section{Effective Dimension and Coarse-graining}
\langlebel{sec:decg}
One should not expect to have equilibration for small systems, since thermodynamics and statistical
physics only predict it for macroscopic systems. This is consistent with Eq. \ref{equilibr} since the upper
bound can only be small for large systems. In fact, one
should study the behavior of the average fluctuations with the number $N$ of particles; as in
textbook statistical mechanics, where one shows that the relative fluctuation decays with $\sqrt{N}$.
Here we will analyze the behavior of $d_{eff}$ with $N$, using exact diagonalization, as it is
an upper bound on the average fluctuation: a large effective dimension guarantees equilibration. Thus, we will look at how $d_{eff}$ scales with the system size $N$. For non-integrable systems, it is expected that $d_{eff}$ scales exponentially with $n$ \footnote{One may argue that a polynomial scaling may also be enough for equilibration of local observables and this is expected for some models that can be mapped in free fermions \cite{Venuti}.}.
\begin{figure}
\caption{\small Effective dimension for case (a) and (b) as a function of the dimension $D = 2^n$ of the Hilbert space for different $k$-flipped initial states (intensity of the coarse-graining description) for Hamiltonian \ref{Hamiltonian}
\end{figure}
To analyze the scaling of $d_{eff}$ with $n$, in Fig. \ref{EdxD} we show, in a log-log scale, the effective dimension of the coarse-grained initial state as a function of total Hilbert space dimension $D=2^{n}$ for spins chains with $n$ from 2 to 16 spins. Fig. \ref{EdxD}(a) is for case (a) and Fig. \ref{EdxD}(b) is for case (b). In both figures, the different curves are for initial states with different levels of coarse-graining, given by the $k$. In a log-log graph, if equilibration is present, it is expected the effective dimension to be linear with the system's Hilbert space dimension. The larger the slope of the line, the stronger the equilibration behavior. Notice that for the $0$-flipped initial state, the effective dimension is almost constant in both (a) and (b) graphs, which means no equilibration. The behavior of the graph then suggests that the coarse-graining induces equilibration. Greater the lack of resolution, the greater the trend of equilibration. Note that case (b) has more coarse-graining since it also includes a lack of knowledge in total magnetization. Thus, its initial states contain more states in the superposition and it is expected to have a larger $d_{eff}$; this can be seen in the larger values for $d_{eff}$ in Fig. \ref{EdxD}(b) when compared to Fig \ref{EdxD}(a). The curves in Fig. \ref{EdxD} have their beginnings for values of $D$ in which $n > 2k$. The reason is the fact that the set of states is symmetric concerning $k = \frac{n}{2}$. For example, a state of three spins with two spins up and one down is equivalent (same magnetization modulus) to a state with one spin down and two spins up.
To have a more quantitative analysis, we fit the curves in Fig. \ref{EdxD} with the function $b \, D^a$, except the $7$-flipped curve, since it only has two points. In the log-log scale, the corresponding function represents linear fits with $a$ the angular coefficient, or the slope, of the lines. Fig. \ref{kxa} shows the slope coefficient $a$ versus $k$. The important point here is the fact that the greater the number of flipped-down spins (in other words, the coarse-graining), the greater the slope of the linear fit in Fig. \ref{EdxD} and the tendency towards equilibration. This confirms the qualitative behavior of Fig. \ref{EdxD}.
\begin{figure}
\caption{\small Slope of linear fits of the points in the graph of effective dimension versus $D = 2^n$ as a function of $k$ for case (a) (red points) and case (b) (blue points). The greater the coarseness of our description of the initial state, the greater the slope and the tendency to equilibrate.}
\end{figure}
As mentioned before, $d_{eff}$ directly measures how many energy eigenstates contribute to the initial pure state. To see how our coarse-grained states behave in terms of the energy eigenstates, we look at the weighted energy distribution of the states \cite{Lea1}. This distribution is often called the Local Density of States (LDOS) or strength function, given by
\begin{equation}
P(E)= \sum_k |c_k|^2 \delta(E-E_k).
\end{equation}
Note that $d_{eff}$ is related to $P(E)$; in fact, it is the "purity" of the distribution $P(E)$. As $P(E)$ is more distributed in the spectrum, larger is $d_{eff}$. The LDOS has been studied in many models and realistic systems with two-body interactions. Usually, the density of states is Gaussian and the initial state can only reach a high level of delocalization if its average energy is in the middle of the spectrum (see \cite{Lea1} for a review of some models and random Hamiltonians).
In Fig. \ref{espectro} we show the LDOS for cases (a) and (b) as a function of the energy for $n=15$ and four different sets of $k$-flipped spins. For other values of $n$, the behavior is similar. Each bar corresponds to an interval of two units of energy, and the numbers in the frame ticks are the median values. The height of the bars is calculated by adding the quantities $|c_{k}|^2$ present in each energy interval. The energy spectrum for Hamiltonian \ref{Hamiltonian} is depicted in \ref{Heigen}, showing, as expected, a Gaussian shape centered at zero. From Fig. \ref{espectro}, we can see that an initial state with larger coarse-graining has a more broad LDOS, that allows for larger $d_{eff}$ and better equilibration, as expected from the upper bound in Eq. 1. We can also see that the initial state without any coarse-graining
is concentrated in the border of the spectrum and with a very peaked LDOS and small $d_{eff}$.
\begin{figure*}
\caption{\small Sum of square modulus of coefficients $c_{k}
\end{figure*}
\begin{figure}
\caption{\small Histogram of energy eigenvalues for Hamiltonian \ref{Hamiltonian}
\end{figure}
We should mention that we found the same behavior of the effective dimension in Figure \ref{EdxD} for some other values of the parameters of $H$ in the non-integrable regime. It is also worth noting that while coarse graining is incorporated in the preparation and description of the initial state, it is not applied to the observable. Thus, while the initial state may have a coarse resolution, we can precisely measure the magnetization observable \ref{observable} in the situation under consideration.
\section{Equilibration time and Coarse-graining}
\langlebel{sec:eqcg}
The upper bound in Eq. 1 gives sufficient conditions for a system to equilibrate, but, as infinite time averages are used, there is no information about the time scales
for the equilibration to happen. And, if the time scales involved are very large, the results are not relevant to explain the rapid equilibration we observe in nature. Most
of the numerical simulations show reasonable equilibration times, but there is still no consensus about general proofs. If one considers finite time averages, for example,
the equilibration time obtained increases with the smallest energy gap, and, therefore, exponentially with the system size for local Hamiltonians \cite{Short12}. Some works are trying to obtain an equilibration time: see \cite{Wilming18}, supplementary information section in \cite{Reimann16} and introduction in \cite{ThiagoNJ}
for a brief survey of the literature. Here we will use the one proposed by one of us and collaborators in \cite{ThiagoNJ}. Let's consider the time evolution of
the relative fluctuation, given by
\begin{equation}
g(t) = \frac{M_z(t)-\overline{M_z}}{\delta_{M_{z}}} = \sum_{i \ne j} (c_{j}^{*} A_{ij} c_{i}) e^{-i(E_{i}-E_{j})t} = \sum_{\alpha} \nu_{\alpha} e^{i G_{\alpha} t},
\langlebel{timesignal}
\end{equation}
with $\delta_{M_{z}}$ the difference between the highest and lowest eigenvalues, $G_{\alpha} = G_{(i,j)} = E_{j} - E_{i}$ the energy gaps, $\nu_{\alpha} = \nu_{(i,j)} = c_{j}^{*} A_{ji} c_{i}/2$ and $A_{ij}=\langleE_i|\hat{M}_z|E_j\rangle$.
The main idea to obtain an estimate of the equilibration time is to note that the size of the fluctuation is the modulo of a sum of complex numbers. If one considers each complex number as a vector in the complex plane, it is clear that an initial out-of-equilibrium $\psi_0$ has
similar complex number phases, such that the sum does not cancel out. But, as time evolves, each number gets a different phase and the terms in the sum start to cancel each other. This is the usual dephasing mechanism, and an estimate for the dephasing time is the time for the initial phases to spread around $2\pi$. Based on these arguments, an estimative for the equilibration time is given by
\begin{equation}
T_{eq} \sim \pi / \sqrt{\sum_{\alpha} q_{\alpha} G_{\alpha}^2},
\langlebel{Teq}
\end{equation}
with $q_{\alpha}:= |\nu_{\alpha}|^2/\sum_{\begin{theorem}a} |\nu_{\begin{theorem}a}|^2 $. The denominator in \ref{Teq} is the dispersion of the distribution of the energy gaps weighted by the $c_k$ and $A_{ij}$ (to take into account the role of the initial state and observable).
In Fig. \ref{TimeSignal} we show the square modulus of the time evolution of the relative fluctuation $g(t)$ for both cases (a) and (b) for $n=13$ and different values of parameter $k$. The information about the equilibration time may be extracted the first time the curve becomes close to zero. We can see that most curves seem to equilibrate between $Jt=1-5$. In Fig. \ref{eqtime} we show the estimates for equilibration times computed via \ref{Teq} for cases (a) and (b) as a function of $n$, the number of particles. We can see that the estimate $T_{eq}$ seems to slowly decrease with $k$. As mentioned, this is an estimate and not a very precise prediction. In fact, the value for $k=0$ seems to be a bit larger than the other values if compared with the one would obtain in Fig. \ref{TimeSignal}, but are all of the same order of magnitude. These results suggest that coarse-graining may not only increase the degree of equilibration but also dictate how fast it happens.
\begin{figure}
\caption{\small Time evolution of the relative fluctuation \ref{timesignal}
\end{figure}
\begin{figure}
\caption{\small Equilibration time versus $n$ for case (a) and case (b) for Hamiltonian \ref{Hamiltonian}
\end{figure}
\section{Conclusions and Perspectives}
There is a renewed interest in the study of equilibration and thermalization of isolated quantum systems. In this scenario, the system may appear to be at equilibrium due to the fact that most of the time the fluctuation of some observables is too small to be measured. There are rigorous theorems showing the conditions for a Hamiltonian, observable, and initial state to equilibrate: the observable should be local, $H$ should not contain many degenerated gaps and the initial state has to be in a superposition of a large number of energy eigenstates. Therefore, to prepare an initial state that does not equilibrate one needs an apparatus with an incredibly good resolution for energy measures.
Thus, one can interpret the lack of resolution in the energy measure/preparation, or a coarse-graining measure/preparation, as the origin of equilibration. On the other hand, many numerical results are showing that a typical spins chains Hamiltonian with initial states that are products in the local bases equilibrate.
In this work, we studied how a lack of resolution in the magnetization of the initial state affects equilibration.
We studied two kinds of coarseness: (a) spatial coarse-graining, in which we do not have the spatial resolution to measure the magnetization of particular spins; (b) a magnetization coarse-graining, in which we have a lower bound in the initial state's magnetization spectrum. We found, for a chaotic spin chain, that more coarse-grained initial states have a higher level of equilibration; with smaller fluctuation around the equilibrium. In particular, the initial state without any coarse graining does not seem to equilibrate at all. We also studied how these magnetic coarse-grained states are decomposed in the energy bases,
showing that more coarse-grained states have a more delocalized distribution in the energy bases. This is in agreement with rigorous bounds that connect equilibration with the level of coarse-graining in the energy. Finally, we studied how fast equilibration happens, showing that the estimates proposed in \cite{ThiagoNJ} do predict the order of magnitude of the equilibration time. We also found some evidence that the equilibration time may decrease with the level of coarse-graining.
As perspectives, it would be interesting to study the effects of coarse-graining in other models and to consider other kinds of coherent superpositions besides the "uniform" one used here. We considered a coarse-grained initial state which is a uniform superposition of all possible states with a well-defined magnetization. A natural possible next step is to put a different probability distribution in the weights of the superposition to see if it is possible to identify changes in the results. Or even treat the initial states as statistical mixtures instead of coherent sums.
\begin{acknowledgments}
This work is supported by the Instituto Nacional de Ciência e Tecnologia de Informação Quântica (465469/2014-0), and by the Air Force Office of Scientific Research under award number FA9550-19-1-0361.
\end{acknowledgments}
\end{document} |
\betagin{document}
\title{{\bf Microlocal analysis of generalized functions:
pseudodifferential techniques and propagation of singularities}}
\author{Claudia Garetto \\
Dipartimento di Matematica\\ Universit\`a di Torino, Italia\\
\texttt{[email protected]}\\
\ \\
G\"{u}nther H\"{o}rmann\footnote{Supported by FWF grant P14576-MAT,
current address: Institut f\"ur Technische Mathematik, Geometrie und Bauniformatik,
Universit\"at Innsbruck}\\
Institut f\"ur Mathematik\\
Universit\"at Wien, Austria\\
\texttt{[email protected]}
}
\ensuremath{\partial}ate{February 2004}
\maketitle
\betagin{abstract} We characterize microlocal regularity, in the $\ensuremath{{
\breakal G}}inf$-sense,
of Colombeau generalized functions by an appropriate extension of the classical
notion of micro-ellipticity to pseudodifferential operators with slow scale
generalized symbols. Thus we obtain an alternative, yet equivalent, way to
determine generalized wave front sets, which is analogous to the original
definition of the wave front set of distributions via intersections over
characteristic sets. The new methods are then applied to regularity theory of
generalized solutions of (pseudo-)differential equations, where we extend the
general noncharacteristic regularity result for distributional solutions and
consider propagation of $\ensuremath{{
\breakal G}}inf$-singularities for homogeneous first-order
hyperbolic equations.\\
\emph{AMS 2000 subject classification:} primary 46F30; 35S99, 35D10.
\end{abstract}
\setcounter{section}{-1}
\section{Introduction}
Microlocal analysis in Colombeau algebras of generalized functions, as it has
been initiated (in published form) in
\breakite{DPS:98,NPS:98}, is a compatible
extension of its distribution theoretic analogue to the realm of an
unrestricted differential-algebraic context. The main emphasis in recent
research on the subject is on microlocal properties of basic nonlinear
operations as well as on regularity theory for generalized solutions to partial
(and pseudo-) differential equations (cf.\
\breakite{NP:98,HK:01,HdH:01,HOP:03,GH:03,Garetto:04,GGO:03}).
For Schwartz distributions the so-called elementary characterization of
microlocal regularity is a corollary to its original definition via
characteristic sets under pseudodifferential actions (cf.
\breakite{Hoermander:71}),
whereas the intuitively appealing function-like aspect of Colombeau generalized
functions seems to have fostered a ``generalized elementary'' approach as being
a natural definition there. This may have two reasons: first, the new
microlocal regularity notion is based on $\ensuremath{{
\breakal G}}inf$-regularity, which coincides
with $\ensuremath{\mathcal{C}^\infty}$-regularity in case of embedded distributions, and was introduced
in
\breakite{O:92} in direct analytical terms in form of asymptotic estimates of the
derivatives; second, as soon as one puts oneself into the much wider setting of
Colombeau spaces -- with the possibility to allow for highly singular symbols
of (pseudo-)differential operators -- the question of good choices for
appropriate generalized notions of the characteristic set or
(micro-)ellipticity turns into a considerable and crucial part of the research
issue (cf.\
\breakite{HdH:01,HO:03,HOP:03,GGO:03}).
In the present paper, we succeed to prove characterizations of the generalized
wave front set of a Colombeau generalized functions in terms of intersections
over certain non-ellipticity domains corresponding to pseudodifferential
operators yielding $\ensuremath{{
\breakal G}}inf$-regular images. Thus we obtain direct analogues of
H\"ormander's definition of the distributional wave front set given in
\breakite{Hoermander:71}. Moreover, as first test applications of the new results
we discuss a generalization of the noncharacteristic regularity theorem for
pseudodifferential equations and propagation of $\ensuremath{{
\breakal G}}inf$-singularities (or
rather, generalized wave front sets) for generalized solutions of first-order
hyperbolic differential equations with $\ensuremath{\mathcal{C}^\infty}$-coefficients.
In the remainder of the introductory section we fix some notation and review
basic notions from Colombeau theory. Section 1 provides the technical
background on the generalized symbol classes used later on and introduces an
appropriate micro-ellipticity notion. The theoretical core of the paper is
Section 2 where the main results on micro-locality, microsupport, and the wave
front set characterizations are proven. Section 3 discusses applications to
regularity theory of generalized solutions of (pseudo-)differential equations.
Since by now several variants of pseudodifferential operator approaches in
Colombeau algebras and generalized symbol calculi occur in the literature
(
\breakite{NPS:98,Garetto:04,GGO:03}), and we employ yet a slightly different one
here, we decided to sketch the basics of such a rather general scheme of
calculus in the Appendix, the skeleton of which is structurally close to the
comprehensive treatment in
\breakite{GGO:03}.
We point out that
\breakite{NPS:98} already includes a result on micro-locality
(similar to our Theorem \ref{theorem_micro_support}) of actions of generalized
pseudodifferential operators, whose definition is based solely on regularizing
nets of symbols, but not Colombeau classes, and uses Fourier integral
representations with additional asymptotic cut-offs. The definitions of the
operator actions can thus be compared in a weak sense only.
\subsection{Notation and basic notions from Colombeau theory}
We use
\breakite{Colombeau:84,Colombeau:85,O:92,GKOS:01} as standard references for
the foundations and various applications of Colombeau theory. We will work with
the so-called special Colombeau algebras, denoted by $\ensuremath{{
\breakal G}}^s$ in
\breakite{GKOS:01},
although here we will consistently drop the superscript $s$ to avoid notational
overload.
We briefly recall the basic construction. Throughout the paper $\Omega$ will
denote an open subset of $\mb{R}^n$. \emph{Colombeau generalized functions} on
$\Omega$ are defined as equivalence classes $u =
\breakl{(u_\varepsilon)_\varepsilon}$ of nets of
smooth functions $u_\varepsilon\in\ensuremath{\mathcal{C}^\infty}(\Omega)$ (\emph{regularizations}) subjected to
asymptotic norm conditions with respect to $\varepsilon\in (0,1]$ for their
derivatives on compact sets. More precisely, we have
\betagin{itemize}
\item moderate nets $\ensuremath{{
\breakal E}}M(\Omega)$: $(u_\varepsilon)_\varepsilon\in\ensuremath{\mathcal{C}^\infty}(\Omega)^{(0,1]}$ such that
for all $K \mathscr{S}ubset \Omega$ and $\alpha\in\mb{N}^n$ there exists $p \in \mb{R}$ such that
\betagin{equation}\lambdabel{basic_estimate}
\norm{\ensuremath{\partial}^\alpha u_\varepsilon}{L^\infty(K)} = O(\varepsilon^{-p}) \qquad (\varepsilon \to 0);
\end{equation}
\item negligible nets $\mb{N}N(\Omega)$: $(u_\varepsilon)_\varepsilon\in \ensuremath{{
\breakal E}}M(\Omega)$ such that for all
$K \mathscr{S}ubset \Omega$ and for all $q\in\mb{R}$ an
estimate $\norm{u_\varepsilon}{L^\infty(K)} = O(\varepsilon^{q})$ ($\varepsilon \to 0$) holds;
\item $\ensuremath{{
\breakal E}}M(\Omega)$ is a differential algebra with operations defined at fixed $\varepsilon$,
$\mb{N}N(\Omega)$ is an ideal, and $\ensuremath{{
\breakal G}}(\Omega) := \ensuremath{{
\breakal E}}M(\Omega) / \mb{N}N(\Omega)$ is the (special)
\emph{Colombeau algebra};
\item there are embeddings, $\ensuremath{\mathcal{C}^\infty}(\Omega)\hookrightarrow \ensuremath{{
\breakal G}}(\Omega)$ as subalgebra and
$\ensuremath{{
\breakal D}}'(\Omega) \hookrightarrow \ensuremath{{
\breakal G}}(\Omega)$ as linear space, commuting with partial derivatives;
\item $\Omega \to \ensuremath{{
\breakal G}}(\Omega)$ is a fine sheaf and $\ensuremath{{
\breakal G}}c(\Omega)$ denotes
the subalgebra of elements with compact support; by a
cut-off in a neighborhood of the support one can always obtain
representing nets with supports contained in a joint compact set.
\end{itemize}
The subalgebra $\ensuremath{{
\breakal G}}inf(\Omega)$ of \emph{regular Colombeau}, or
\emph{$\ensuremath{{
\breakal G}}inf$-regular}, generalized functions consists of those elements in
$\ensuremath{{
\breakal G}}(\Omega)$ possessing representatives such that estimate (\ref{basic_estimate})
holds for a certain $m$ uniformly over all $\alpha\in\mb{N}^n$. We will occasionally
use the notation $\ensuremath{{
\breakal E}}Minf(\Omega)$ for the set of such nets of regularizations. In
a similar sense, we will denote by $\ensuremath{{
\breakal E}}Sinf(\mb{R}^n)$ the set of regularizations
$(v_\varepsilon)_\varepsilon\in\mathscr{S}(\mb{R}^n)^{(0,1]}$ with a uniform asymptotic power of $\varepsilon$-growth for
all $\mathscr{S}$-seminorms of $v_\varepsilon$.
A Colombeau generalized function $u = [(u_\varepsilon)_\varepsilon]\in\ensuremath{{
\breakal G}}(\Omega)$ is said to be
\emph{generalized microlocally regular}, or \emph{$\ensuremath{{
\breakal G}}inf$-microlocally
regular}, at $(x_0,\xi_0)\in \mb{C}O{\Omega} = \Omega\times (\mb{R}^n\setminus\{0\})$
(cotangent bundle with the zero section removed) if there is
$\phi\in\ensuremath{\mathcal{C}^\infty}c(\Omega)$
with $\phi(x_0) = 1$ and a conic neighborhood $\ensuremath{{
\breakal G}}a\subseteq\mb{R}^n\setminus\{0\}$
of $\xi_0$ such that $\ensuremath{{
\breakal F}}(\phi u)$ is \emph{(Colombeau) rapidly
decreasing} in $\ensuremath{{
\breakal G}}a$ (cf.\
\breakite{Hoermann:99}), i.e., there exists $N$ such that for all $l$ we have
\betagin{equation}\lambdabel{rap_dec_Ga}
\sup\limits_{\xi\in\ensuremath{{
\breakal G}}a} \lambdara{\xi}^l |(\phi u_\varepsilon)\ensuremath{{
\breakal F}}T{\ }(\xi)| = O(\varepsilon^{-N})
\qquad (\varepsilon \to 0),
\end{equation} where we have used the standard notation $\lambdara{\xi} = (1 +
|\xi|^2)^{1/2}$. Note that, instead of specifying the test function $\phi$ as
above, one may equivalently require the existence of an open neighborhood $U$
of $x_0$ such that for all $\phi\in\ensuremath{\mathcal{C}^\infty}c(U)$ the estimate (\ref{rap_dec_Ga})
holds.
Finally, we will use the term \emph{proper cut-off function} for any
$
\breakhi\in\ensuremath{\mathcal{C}^\infty}(\Omega\times\Omega)$ such that $\mathrm{supp}(
\breakhi)$ is a proper subset of
$\Omega\times\Omega$ (i.e., both projections are proper maps) and $
\breakhi =1$ in a
neighborhood of the diagonal $\{(x,x) : x\in\Omega\}\subset\Omega\times\Omega$.
\section{Slow scale micro-ellipticity}
The pseudodifferential operator techniques which we employ are based on a generalization of the classical symbols spaces $S^m(\Omega\times\mb{R}^n)$ (cf.\
\breakite{Hoermander:71}). These spaces are Fr\'{e}chet spaces endowed with the seminorms
\betagin{equation}\lambdabel{seminorm}
|a|^{(m)}_{K,\alphapha,\betata} = \sup_{x\in K,\xi\in\mb{R}^n}\lambdangle\xi\rangle^{-m+|\alphapha|}|\partial^\alphapha_\xi\partial^\betata_x a(x,\xi)|,
\end{equation}
where $K$ ranges over the compact subsets of $\Omega$. Several types of Colombeau generalized symbols are studied in
\breakite{NPS:98, Garetto:04, GGO:03, GH:03} providing a pseudodifferential calculus and regularity theory on the level of operators. In the current paper, we focus on the microlocal aspects. To this end we introduce a particularly flexible class of symbols with good stability properties with respect to lower order perturbations. In the spirit of the earlier Colombeau approaches, our symbols are defined via families $(a_\varepsilon)_{\varepsilon}\in S^m(\Omega\times\mb{R}^n)^{(0,1]} =: {\mathcal{S}}^m[\Omega\times\mb{R}^n]$ of regularizations, subjected to asymptotic estimates of the above seminorms in terms of $\varepsilon$. The particular new feature is a slow scale growth, which proved to be essential in regularity theory (cf.
\breakite{HO:03, HOP:03, GGO:03}). This property is measured by the elements of the following set of \emph{strongly positive slow scale nets}
\betagin{equation}\lambdabel{slow_scale}\betagin{split}
{\rm{P}}i_\mathrm{sc} := \{ (\omegaega_\varepsilon)_\varepsilon\in\mb{R}^{(0,1]}\, :\quad &\exists c > 0\, \forall \varepsilon : c \le \omegaega_\varepsilon,\\
&\forall p \mathrm{g}e 0\, \exists c_p > 0\, \forall\varepsilon: \omegaega_\varepsilon^p \le c_p\, \varepsilon^{-1} \}.
\end{split}\end{equation}
\betagin{definition}\lambdabel{def_ssc_symbols}
Let $m$ be a real number.
The set of slow scale nets of symbols of order $m$ is defined by
\betagin{equation}\lambdabel{def_ssc_symbols_mod}\betagin{split}
\underline{\mathcal{S}}_{\,\ssc}^m(\Omega\times\mb{R}^n) := \{ (a_\varepsilon)_\varepsilon\in {\mathcal{S}}^m[\Omega\times\mb{R}^n]:\, &\forall K \mathscr{S}ubset \Omegaega\ \exists (\omegaega_\varepsilon)_\varepsilon \in {\rm{P}}i_\mathrm{sc}\\
& \forall \alphapha, \betata \in \mb{N}^n\, \exists c>0\, \forall \varepsilon:\, |a_\varepsilon|^{(m)}_{K,\alphapha,\betata} \le c\, \omegaega_\varepsilon \},
\end{split}\end{equation}
the negligible nets of symbols of order $m$ are the elements of
\betagin{equation}\lambdabel{def_negligible}\betagin{split}
\mb{N}u^m(\Omega\times\mb{R}^n) := \{ (a_\varepsilon)_\varepsilon\in {\mathcal{S}}^m[\Omega\times\mb{R}^n]:\, &\forall K \mathscr{S}ubset \Omegaega\, \forall \alphapha, \betata \in \mb{N}^n\\
&\forall q\in\mb{N}, \exists c>0\, \forall \varepsilon:\, |a_\varepsilon|^{(m)}_{K,\alphapha,\betata} \le c\, \varepsilon^q \}.
\end{split}
\end{equation}
The classes of the factor space
\[
{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}_{\,\ssc}^m(\Omega\times\mb{R}^n) / \mb{N}u^m(\Omega\times\mb{R}^n)
\]
are called \emph{slow scale generalized symbols of order $m$}.\\
Furthermore, let $\mb{N}uinf(\Omega\times\mb{R}^n):= \bigcap_m \mb{N}u^m(\Omega\times\mb{R}^n)$ be the negligible nets of order $-\infty$. The \emph{slow scale generalized symbols of refined order $m$} are given by
\[
{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{m/-\infty}(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}_{\,\ssc}^m(\Omega\times\mb{R}^n) / \mb{N}uinf(\Omega\times\mb{R}^n).
\]
\end{definition}
Note that ${\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{m/-\infty}(\Omega\times\mb{R}^n)$ can be viewed as a finer partitioning of the classes in ${\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$, in other words, if $a$ is a slow scale generalized symbol of order $m$ then $\forall (b_\varepsilon)_\varepsilon\in a$:
\betagin{equation}
\lambdabel{kappa}
\kappa((b_\varepsilon)_\varepsilon) := (b_\varepsilon)_\varepsilon +\mb{N}uinf(\Omega\times\mb{R}^n)\subseteq (b_\varepsilon)_\varepsilon + \mb{N}u^{m}(\Omega\times\mb{R}^n) = a.
\end{equation}
Slow scale generalized symbols enable us to design a particularly simple, yet sufficiently strong, notion of micro-ellipticity.
\betagin{definition}
\lambdabel{def_micro_ellipticity}
Let $a \in {\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$ and $(x_0,\xi_0) \in \mb{C}O{\Omega}$. We say that $a$ is slow scale micro-elliptic at $(x_0,\xi_0)$ if it has a representative $(a_\varepsilon)_\varepsilon$ satisfying the following: there is an relatively compact open neighborhood $U$ of $x_0$, a conic neighborhood $\ensuremath{{
\breakal G}}amma$ of $\xi_0$, and $(r_\varepsilon)_\varepsilon , (s_\varepsilon)_\varepsilon$ in ${\rm{P}}i_\mathrm{sc}$ such that
\betagin{equation}
\lambdabel{estimate_below}
| a_\varepsilon(x,\xi)| \mathrm{g}e \frac{1}{s_\varepsilon} \lambdara{\xi}^m\qquad\qquad (x,\xi)\in U\times\ensuremath{{
\breakal G}}amma,\, |\xi| \mathrm{g}e r_\varepsilon,\, \varepsilon \in (0,1].
\end{equation}
We denote by $\ensuremath{{
\breakal E}}llsc(a)$ the set of all $(x_0,\xi_0) \in \mb{C}O{\Omega}$ where $a$ is slow scale micro-elliptic.\\
If there exists $(a_\varepsilon)_\varepsilon \in\ a$ such that \eqref{estimate_below} holds at all points in $\mb{C}O{\Omega}$ then the symbol $a$ is called \emph{slow scale elliptic}.
\end{definition}
Note that here the use of the attribute \emph{slow scale} refers to the appearance of the slow scale lower bound in \eqref{estimate_below}. This is a crucial difference to more general definitions of ellipticity given in
\breakite{Garetto:04, GGO:03, HOP:03}, whereas a similar condition was already used in
\breakite[Section 6]{HO:03} in a special case. In fact, due to the overall slow scale conditions in Definition \ref{def_ssc_symbols}, any symbol which is slow scale micro-elliptic at $(x_0,\xi_0)$ fulfills the stronger hypoellipticity estimates
\breakite[Definition 6.1]{GGO:03}; furthermore, \eqref{estimate_below} is stable under lower order (slow scale) perturbations.
\betagin{proposition}
\lambdabel{proposition_lower_order}
Let $(a_\varepsilon)_\varepsilon \in \underline{\mathcal{S}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$ satisfy \eqref{estimate_below} in $U \times \ensuremath{{
\breakal G}}amma \ni (x_0,\xi_0)$. Then
\betagin{enumerate}
\item for all $\alphapha,\betata \in \mb{N}^n$ there exists $(\lambdambda_\varepsilon)_\varepsilon \in {\rm{P}}i_\mathrm{sc}$ such that
\[
|\partial^\alphapha_\xi \partial^\betata_x a_\varepsilon(x,\xi)| \le \lambdambda_\varepsilon |a_\varepsilon(x,\xi)| \lambdara{\xi}^{-|\alphapha|}\qquad (x,\xi) \in U\times\ensuremath{{
\breakal G}}amma,\, |\xi|\mathrm{g}e r_\varepsilon,\, \varepsilon\in(0,1];
\]
\item for all $(b_\varepsilon)_\varepsilon \in \underline{\mathcal{S}}_{\,\ssc}^{m'}(\Omega\times\mb{R}^n)$, $m'<m$, there exist $(r'_\varepsilon)_\varepsilon$, $(s'_\varepsilon)_\varepsilon \in {\rm{P}}i_\mathrm{sc}$ such that
\[
|a_\varepsilon(x,\xi)+b_\varepsilon(x,\xi)| \mathrm{g}e \frac{1}{s'_\varepsilon}\lambdara{\xi}^m\qquad (x,\xi) \in U\times\ensuremath{{
\breakal G}}amma,\, |\xi|\mathrm{g}e r'_\varepsilon,\, \varepsilon\in(0,1].
\]
\end{enumerate}
\end{proposition}
\betagin{proof}
Combining \eqref{estimate_below} with the seminorm estimates of $(a_\varepsilon)_\varepsilon \in \underline{\mathcal{S}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$, we have that for $(x,\xi) \in U\times\ensuremath{{
\breakal G}}amma$, $|\xi|\mathrm{g}e r_\varepsilon$, $\varepsilon\in(0,1]$
\[
|\partial^\alphapha_\xi \partial^\betata_x a_\varepsilon(x,\xi)|\le c\, \omegaega_\varepsilon \lambdara{\xi}^{m-|\alphapha|}\le c\, \omegaega_\varepsilon s_\varepsilon |a_\varepsilon(x,\xi)| \lambdara{\xi}^{-|\alphapha|},
\]
so that assertion (i) holds with $\lambdambda_\varepsilon = c\, \omegaega_\varepsilon s_\varepsilon$. To prove (ii), again by \eqref{estimate_below} for $(a_\varepsilon)_\varepsilon$ and the seminorm estimates for $(b_\varepsilon)_\varepsilon$ we obtain
\[
|a_\varepsilon(x,\xi)+b_\varepsilon(x,\xi)|\mathrm{g}e \frac{1}{s_\varepsilon}\lambdara{\xi}^m - c\, \omegaega_\varepsilon \lambdara{\xi}^{m'}
= \lambdara{\xi}^m \big( \frac{1}{s_\varepsilon} - c\, \omegaega_\varepsilon \lambdara{\xi}^{m'-m}),
\]
which is bounded from below by $\lambdara{\xi}^m / {2s_\varepsilon}$ whenever $(x,\xi)\in U\times\ensuremath{{
\breakal G}}amma$ with $|\xi| \mathrm{g}e \max( r_\varepsilon , (2c\, \omegaega_\varepsilon s_\varepsilon)^{1/(m-m')})$.
\end{proof}
\betagin{remark}\leavevmode
\lambdabel{remark_ellipticity}
\betagin{trivlist}
\item[(i)] In case of classical symbols the notion of slow scale
micro-ellipticity coincides with the classical one, which equivalently is
defined as the set of noncharacteristic points. Indeed, if
$a\in\mathcal{S}^m(\Omega\times\mb{R}^n)$ and $(a_\varepsilon)_\varepsilon$ is a representative of
the class of $a$ in ${\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$ satisfying
\eqref{estimate_below} then for any $q\in\mb{N}$
\[
|a(x,\xi)| \mathrm{g}e |a_\varepsilon(x,\xi)|-|(a-a_\varepsilon)(x,\xi)|\mathrm{g}e \lambdara{\xi}^m(\frac{1}{s_\varepsilon}-c\varepsilon^q),
\]
where we are free to fix $\varepsilon$ small enough such that the last factor is
bounded away from $0$. In particular, we have that $
\breakompl{\ensuremath{{
\breakal E}}llsc(a)} =
\mb{C}har(a(x,D))$.
\item[(ii)] The same Definition \ref{def_micro_ellipticity} can be applied to
symbols of refined order.In that case, by Proposition
\ref{proposition_lower_order}, \eqref{estimate_below} will hold for any
representative once it is known to hold for one. Moreover, if
$a\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{m/-\infty}(\Omega\times\mb{R}^n)$ and $
\breakompl{\ensuremath{{
\breakal E}}llsc(a)} = \emptyset$
then $a$ is slow scale elliptic.
\end{trivlist}
\end{remark}
Thanks to the previous proposition the simple slow scale ellipticity condition in Definition \ref{def_micro_ellipticity} already guarantees the existence of a parametrix. For the proof we refer to
\breakite[Section 6]{GGO:03}; note that an inspection of the construction shows that the symbol of the parametrix has uniform growth $\varepsilon^{-1}$ over all compact sets. Regular symbols are introduced in Remark \ref{rem_special}.
\betagin{theorem}
\lambdabel{theorem_parametrix}
Let $a$ be a slow scale elliptic symbol of order $m$. Then there exists a properly supported pseudodifferential operator with regular symbol $p \in {\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{-m}(\Omega\times\mb{R}^n)$ such that for all $u\in\ensuremath{{
\breakal G}}c(\Omega)$
\[
\betagin{split}
a(x,D)
\breakirc p(x,D)u &= u + Ru,\\
p(x,D)
\breakirc a(x,D)u &= u + Su,
\end{split}
\]
where $R$ and $S$ are operators with regular kernel.
\end{theorem}
Note that if $a(x,D)$ is properly supported then the operators $R$ and $S$ are properly supported too and the previous equalities are valid for all $u$ in $\ensuremath{{
\breakal G}}(\Omega)$. In this situation, combining the construction of a parametrix with the pseudolocality property (see the Appendix for details), we obtain that $\mathrm{sing supp}_g(a(x,D)u) = \mathrm{sing supp}_g(u)$ for all $u\in\ensuremath{{
\breakal G}}(\Omega)$.\\
In the sequel $\Oprop{m}(\Omega)$ denotes the set of all properly supported operators $a(x,D)$ where $a$ belongs to ${\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n)$. We are now in a position to introduce a way to measure regularity of Colombeau generalized functions mimicking the original definition of the distributional wave front set in
\breakite{Hoermander:71} based on characteristic sets. As a matter of fact, the set constructed below as the complement of the slow scale micro-ellipticity regions will turn out to be the generalized wave front set in the sense of
\breakite{DPS:98, Hoermann:99}.
\betagin{definition}
Let $u\in\ensuremath{{
\breakal G}}(\Omega)$. We define
\betagin{equation}
\lambdabel{def_W_ssc}
\mathrm{W}_{\ssc}(u) :=\hskip-5pt \bigcap_{\substack{a(x,D)\in\,\Oprop{0}(\Omega)\\[0.1cm] a(x,D)u\, \in\, \ensuremath{{
\breakal G}}inf(\Omega)}}\hskip-5pt
\breakompl{\ensuremath{{
\breakal E}}llsc(a)}.
\end{equation}
\end{definition}
\betagin{remark}
\lambdabel{remark_any_order}
Note that the standard procedure of lifting symbol orders with $(1-\ensuremath{{
\breakal D}}elta)^{m/2}$ easily shows that we may as well take the intersection over operators $a(x,D)\in\Oprop{m}(\Omega)$ in \eqref{def_W_ssc}. The same holds for similar constructions introduced throughout the paper.
\end{remark}
Since $
\breakompl{\ensuremath{{
\breakal E}}llsc(a)}$ is a closed conic set, $\mathrm{W}_{\ssc}(u)$ is a closed conic subset of $\mb{C}O{\Omega}$ as well. Moreover, recalling that given $v\in\ensuremath{{
\breakal G}}inf(\Omega)$ and $a(x,D)$ properly supported, $a(x,D)(u+v)\in\ensuremath{{
\breakal G}}inf(\Omega)$ iff $a(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$, we have $\mathrm{W}_{\ssc}(u+v)=\mathrm{W}_{\ssc}(u)$.\\
We present now a first alternative way to define $\mathrm{W}_{\ssc}(u)$, which will be useful in course of our exposition. Denote by $\Oprop{m/-\infty}(\Omega)$ the set of all properly supported operators $a(x,D)$ where $a\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{m/-\infty}(\Omega\times\mb{R}^n)$; one can prove that
\betagin{equation}
\lambdabel{alt_def_W_ssc}
\mathrm{W}_{\ssc}(u) =\hskip-5pt \bigcap_{\substack{a(x,D)\in\,\Oprop{0/-\infty}(\Omega)\\[0.1cm] a(x,D)u\, \in\, \ensuremath{{
\breakal G}}inf(\Omega)}}\hskip-5pt
\breakompl{\ensuremath{{
\breakal E}}llsc(a)}.
\end{equation}
In fact, the crucial point is to observe that we do not change a pseudodifferential operator with generalized symbol $a$ by adding negligible nets of symbols of the same order or of order $-\infty$. To be more precise, if $a(x,D)\in\Oprop{0}(\Omega)$, $(x_0,\xi_0)\in\ensuremath{{
\breakal E}}llsc(a)$ with $(a_\varepsilon)_\varepsilon$ satisfying \eqref{estimate_below}, then $b(x,\xi):=(a_\varepsilon)_\varepsilon +\mb{N}u^{-\infty}(\Omega\times\mb{R}^n)$ is slow scale micro-elliptic at $(x_0,\xi_0)$, $b(x,D)\in\Oprop{0/-\infty}(\Omega)$ since $b(x,D)\equiv a(x,D)$ and consequently we have the inclusion $\supseteq$ in \eqref{alt_def_W_ssc}. For the reverse inclusion if $a(x,D)\in\Oprop{0/-\infty}(\Omega)$ with $(x_0,\xi_0)\in{\ensuremath{{
\breakal E}}llsc(a)}$, it is clear that $b=(a_\varepsilon)_\varepsilon +\mb{N}u^0(\Omega\times\mb{R}^n)$ is a well-defined element of ${\wt{\underline{\mathcal{S}}}}_{\,\ssc}^0(\Omega\times\mb{R}^n)$ and slow scale elliptic at $(x_0,\xi_0)$. Arguing as before we obtain \eqref{alt_def_W_ssc}.
From a technical point of view, the most interesting aspect of \eqref{alt_def_W_ssc} is the stability of micro-ellipticity estimates under variations of the representatives of $a$, valid for symbols of refined order due to Proposition \ref{proposition_lower_order} .
\betagin{proposition}
\lambdabel{prop_sing_supp}
Let $\pi:\mb{C}O{\Omega}\to\Omega:(x,\xi)\to x$. For any $u\in\ensuremath{{
\breakal G}}(\Omega)$,
\[
\pi(\mathrm{W}_{\ssc}(u)) = \mathrm{sing supp}_\mathrm{g}(u).
\]
\end{proposition}
\betagin{proof}
We first prove that $\Omega\setminus\mathrm{sing supp}_\mathrm{g}(u) \subseteq \Omega\setminus
\pi(\mathrm{W}_{\ssc}(u))$. Let $x_0\in\Omega\setminus\mathrm{sing supp}_\mathrm{g}(u)$. There exists
$\phi\in\ensuremath{\mathcal{C}^\infty}_{\rm{c}}(\Omega)$ such that $\phi(x_0)=1$ and $\phi u\in\ensuremath{{
\breakal G}}inf(\Omega)$. The multiplication operator $\phi(x,D):\ensuremath{{
\breakal G}}c(\Omega)\to\ensuremath{{
\breakal G}}c(\Omega):u\to\phi u$ is properly supported with symbol $\phi\in S^0(\Omega\times\mb{R}^n)\subseteq{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^0(\Omega\times\mb{R}^n)$, which is micro-elliptic (slow scale micro-elliptic) at $(x_0,\xi_0)$ for each $\xi_0\neq 0$. Therefore, for all $\xi_0\neq 0$ we have that $(x_0,\xi_0)\in
\breakompl{\mathrm{W}_{\ssc}(u)}$ i.e. $x_0\in\Omega\setminus \pi(\mathrm{W}_{\ssc}(u))$.\\
To show the opposite inclusion, let $x_0\in\Omega\setminus\pi(\mathrm{W}_{\ssc}(u))$. Then for all $\xi\neq 0$ there exists $a\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{0/-\infty}(\Omega\times\mb{R}^n)$ slow scale micro-elliptic at $(x_0,\xi)$ such that $a(x,D)$ is properly supported and $a(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$. Since $S_{x_0} := \{x_0\}\times \{\xi: |\xi|=1\}$ is a compact subset of $\Omega\times\mb{R}^n$, there exist $a_1,...,a_N\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{0/-\infty}(\Omega\times\mb{R}^n)$, $U$ a relatively compact open neighborhood of $x_0$ and $\ensuremath{{
\breakal G}}amma_i$ conic neighborhoods of $\xi_i$ with $|\xi_i|=1$ ($i=1,\ldots,N$), with the following properties: $a_i$ is slow scale micro-elliptic in $U\times\ensuremath{{
\breakal G}}amma_i$, $S_{x_0}\subseteq U\times
\breakup_{i=1}^N\ensuremath{{
\breakal G}}amma_i$, $a_i(x,D)$ is properly supported, and $a_i(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$. Consider the properly supported pseudodifferential operator $A:=\sum_{i=1}^N a_i(x,D)^\ast a_i(x,D)$. By Theorem \ref{theorem_calculus} in the Appendix we may write $A=\sigmama(x,D)\in\Oprop{0/-\infty}(\Omega)$, and combining assertions $(ii)$ and $(iii)$ of the same theorem, we have that
\betagin{equation}
\lambdabel{differ_sigma}
\sigmama-\sum_{i=1}^N|a_i|^2 \in {\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{-1/-\infty}(\Omega\times\mb{R}^n).
\end{equation}
Since $a_i(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$ and each $a_i(x,D)^\ast$ maps $\ensuremath{{
\breakal G}}inf(\Omega)$ into $\ensuremath{{
\breakal G}}inf(\Omega)$, we conclude that $\sigmama(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$.\\
It is clear that $\sum_{i=1}^N|a_i|^2$ is slow scale elliptic in $U$. In fact, every $\xi\neq 0$ belongs to some $\ensuremath{{
\breakal G}}amma_i$ and, given $(s_{i,\varepsilon})_\varepsilon, (r_{i,\varepsilon})_\varepsilon$ satisfying \eqref{estimate_below} for $(a_{i,\varepsilon})_\varepsilon$, $s_\varepsilon:=\max_{i}(s^2_{i,\varepsilon})$, $r_\varepsilon:=\max(r_{i,\varepsilon})$, we get
\[
\sum_{i=1}^N|a_{i,\varepsilon}(x,\xi)|^2\mathrm{g}e \frac{1}{s_\varepsilon}\qquad\qquad x\in U,\, |\xi|\mathrm{g}e r_\varepsilon,\, \varepsilon\in(0,1].
\]
Let $U'\subset U''\subset U$ be open neighborhoods of $x_0$, $\overline{U'}\subset U^{''}$, $\overline{U''}\subset U$ and $\phi\in\ensuremath{\mathcal{C}^\infty}(\Omega)$, $0\le\phi\le 1$ such that $\phi=0$ on $U'$ and $\phi=1$ on $\Omega\setminus U''$. By construction $b(x,\xi):=\phi(x)+\sigmama(x,\xi)\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{0/-\infty}(\Omega\times\mb{R}^n)$, $b(x,D)\in\Oprop{0/-\infty}(\Omega)$ and $b(x,D)u_{\vert_{U'}} = \phi u_{\vert_{U'}} + \sigmama(x,D)u_{\vert_{U'}} = \sigmama(x,D)u_{\vert_{U'}} \in \ensuremath{{
\breakal G}}inf(U')$. Since $\sum_{i=1}^N|a_i|^2$ is slow scale elliptic in $U$, with a positive real valued representative, and $\phi$ is identically $1$ outside $U''$, we have that $\phi+\sum_{i=1}^N|a_i|^2$ is slow scale elliptic in $\Omega$. By \eqref{differ_sigma}, and application of Proposition \ref{proposition_lower_order}(ii), $b$ itself is slow scale elliptic in $\Omega$. Then, by using a parametrix for $b(x,D)$ we conclude that $\mathrm{sing supp}_g(b(x,D)u) = \mathrm{sing supp}_g(u)$ and consequently $U'
\breakap\mathrm{sing supp}_g(u) =\emptyset$, which completes the proof.
\end{proof}
\section{Pseudodifferential characterization of the\\ generalized wave front set}
This section is devoted to the proof that $\mathrm{W}_{\ssc}(u)$ coincides with the generalized wave front set of $u$. Our approach will follow the lines of reasoning in
\breakite[Chapter 8]{Folland:95} and
\breakite[Section 18.1]{Hoermander:V3}. The main tool will be a generalization of the micro-support of a regular generalized symbol of order $m$ or refined order $m$.
\betagin{definition}
\lambdabel{def_micro_support}
Let $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m}(\Omega\times\mb{R}^n)$ and $(x_0,\xi_0)\in\mb{C}O{\Omega}$. The symbol $a$ is $\ensuremath{{
\breakal G}}inf$-smoothing at $(x_0,\xi_0)$ if there exist a representative $(a_\varepsilon)_\varepsilon$ of $a$, a relatively compact open neighborhood $U$ of $x_0$, a conic neighborhood $\ensuremath{{
\breakal G}}amma$ of $\xi_0$, and a natural number $N$ such that
\betagin{equation}
\lambdabel{estimate_micro}
\betagin{split}
\forall m\in \mb{R}\ \forall\alphapha ,\betata\in\mathbb{N}^n\ &\exists c>0\ \forall(x,\xi)\in U\times\ensuremath{{
\breakal G}}amma\ \forall\varepsilon\in(0,1]:\\
&|\partial^\alphapha_\xi\partial^\betata_x a_\varepsilon(x,\xi)|\le c\lambdara{\xi}^m \varepsilon^{-N}.
\end{split}
\end{equation}
We define the \emph{generalized microsupport} of $a$, denoted by $\mu\mathrm{supp}_g(a)$, as the complement of the set of points $(x_0,\xi_0)$ where $a$ is $\ensuremath{{
\breakal G}}inf$-smoothing.\\
If $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$ then we denote by $\mu_\g(a)$ the complement of the set points $(x_0,\xi_0)\in\mb{C}O{\Omega}$ where \eqref{estimate_micro} holds for some representative of $a$.
\end{definition}
\betagin{remark}\leavevmode
\lambdabel{remark_micro_support}
\betagin{trivlist}
\item[(i)] Any $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{-\infty}(\Omega\times\mb{R}^n)$, i.e., a regular generalized symbol of order $-\infty$, has empty generalized microsupport.
\item[(ii)] In the case of $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$, every representative is of the form $(a_\varepsilon)_\varepsilon +(n_\varepsilon)_\varepsilon$, where $(a_\varepsilon)_\varepsilon\in{{\underline{\mathcal{S}}}}_{\mathrm{rg}}^m(\Omega\times\mb{R}^n)$ and $(n_\varepsilon)_\varepsilon\in\mb{N}u^{-\infty}(\Omega\times\mb{R}^n)$, and \eqref{estimate_micro} holds for any representative once it is known to hold for one. As a consequence, if $a$ is a classical symbol of order $m$ considered as an element of ${\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$, then its generalized microsupport $\mu_\g(a)$ equals the classical one.
\item[(iii)] If $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$ and $\mu\mathrm{supp}_\mathrm{g}(a)=\emptyset$ then $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{-\infty}(\Omega\times\mb{R}^n)$. In fact, chosen any $(a_\varepsilon)_\varepsilon\in a$, for all $x_0\in\Omega$ the compact set $S_{x_0}$ can be covered by $U\times
\breakup_{i=1}^N\ensuremath{{
\breakal G}}amma_i$, with $U$ and $\ensuremath{{
\breakal G}}amma_i$ such that \eqref{estimate_micro} is valid. Hence there exists $N\in\mathbb{N}$ such that for all orders $m\in\mathbb{R}$ and for all $\alphapha,\betata$
\[
|\partial^\alphapha_\xi\partial^\betata_x a_\varepsilon(x,\xi)|\le c\lambdara{\xi}^{m-|\alphapha|}\varepsilon^{-N}\qquad\qquad (x,\xi)\in U\times\mb{R}^n,\ \varepsilon\in(0,1].
\]
\item[(iv)] If $a\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^m(\Omega\times\mb{R}^n)$ and $\kappa$ is the quotient map ${{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m}(\Omega\times\mb{R}^n)$ onto ${\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$ then
\betagin{equation}
\lambdabel{intmusupp}
\mu\mathrm{supp}_\mathrm{g}(a) = \bigcap_{(a_\varepsilon)_\varepsilon \in a} \mu_\g\big(\,\kappa\big((a_\varepsilon)_\varepsilon\big)\,\big).
\end{equation}
Observe first that for all $(a_\varepsilon)_\varepsilon\in a$ we have $\mu\mathrm{supp}_g(a) \subseteq \mu_\g(\kappa((a_\varepsilon)_\varepsilon))$.
On the other hand, if $(x_0,\xi_0)\not\in \mu\mathrm{supp}_g(a)$ then $(x_0,\xi_0)\not\in \mu_\g(\kappa((a_\varepsilon)_\varepsilon))$ for some $(a_\varepsilon)_\varepsilon\in a$.
\end{trivlist}
\end{remark}
Similarly, as in the previous section, we introduce the notations $\Opropr{m}(\Omega)$ and $\Opropr{m/-\infty}(\Omega)$ for the sets of all properly supported operators $a(x,D)$ with symbol in ${\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m}(\Omega\times\mb{R}^n)$ and ${\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$ respectively.
\betagin{proposition}
\lambdabel{micro_support_product}
Let $a(x,D)\in\Opropr{m/-\infty}(\Omega)$ and $b(x,D)\in\Opropr{m'/-\infty}(\Omega)$. Then, there exists a symbol $a\sharp b\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m+m'/-\infty}(\Omega\times\mb{R}^n)$ such that $a(x,D)
\breakirc b(x,D) = a\sharp b(x,D)\in\Opropr{m+m'/-\infty}(\Omega)$ and
\betagin{equation}
\lambdabel{micro_product}
\mu_\g(a\sharp b) \subseteq \mu_\g(a)
\breakap \mu_\g(b).
\end{equation}
In the same situation, without regarding refined orders we have that
\betagin{equation}
\lambdabel{micro_product_2}
\mu\mathrm{supp}_g(a\sharp b) \subseteq \mu\mathrm{supp}_g(a)
\breakap \mu\mathrm{supp}_g(b).
\end{equation}
\end{proposition}
\betagin{proof}
Theorem \ref{theorem_calculus} provides the existence of $a\sharp b\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m+m'/-\infty}(\Omega\times\mb{R}^n)$ such that $a(x,D)
\breakirc b(x,D) = a\sharp b(x,D)\in\Opropr{m+m'/-\infty}(\Omega)$ with asymptotic expansion $a\sharp b \sim \sum_\mathrm{g}amma \partial^\mathrm{g}amma_\xi a D^\mathrm{g}amma_x b/\mathrm{g}amma!$. Assume that $(x_0,\xi_0)\not\in{\mu_\g(a)}$. By \eqref{estimate_micro} and the symbol properties of $(b_\varepsilon)_\varepsilon\in b$, we obtain the following estimate valid on some neighborhood $U\times\ensuremath{{
\breakal G}}amma$ of $(x_0,\xi_0)$:
\betagin{equation}
\lambdabel{estimate1}
\exists N\in\mb{N}\ \forall m\in\mb{R}\ \forall\alphapha,\betata\in\mb{N}^n\qquad |\partial^\alphapha_\xi\partial^\betata_x(\partial^\mathrm{g}amma_\xi a_\varepsilon D^\mathrm{g}amma_x b_\varepsilon)(x,\xi)|\le c\lambdara{\xi}^m\varepsilon^{-N}.
\end{equation}
Since $a\sharp b\sim\sum_\mathrm{g}amma\partial^\mathrm{g}amma_\xi a D^\mathrm{g}amma_x b/\mathrm{g}amma !$ we have that for any $(d_\varepsilon)_\varepsilon\in a\sharp b$ and $h\mathrm{g}e 1$, the difference $((d_\varepsilon)_\varepsilon -\sum_{|\mathrm{g}amma|\le h-1}\frac{1}{\mathrm{g}amma !}\partial^\mathrm{g}amma_\xi a_\varepsilon D^\mathrm{g}amma_x b_\varepsilon)_\varepsilon$ is an element of ${{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m+m'-h}(\Omega\times\mb{R}^n)$ and of growth type $\varepsilon^{-M}$ on $\overline{U}$, for some $M\in\mb{N}$ independent of $h$. This together with \eqref{estimate1} implies that $(x_0,\xi_0)$ does not belong to ${\mu_\g(a\sharp b)}$. The proof of relation \eqref{micro_product_2} is similar.
\end{proof}
We recall a technical lemma proved in
\breakite[Proposition (8.52)]{Folland:95} which will be useful in the sequel.
\betagin{lemma}
\lambdabel{lemma_folland}
Suppose $(x_0,\xi_0)\in\mb{C}O{\Omega}$, $U$ is a relatively compact open neighborhood of $x_0$, $\ensuremath{{
\breakal G}}amma$ is a conic neighborhood of $\xi_0$. There exists $p\in{S}^0(\Omega\times\mb{R}^n)$ such that $0\le p\le 1$, $\mathrm{supp}(p)\subseteq U\times\ensuremath{{
\breakal G}}amma$ and $p(x,\xi)=1$ if $(x,\xi)\in U'\times\ensuremath{{
\breakal G}}amma'$ and $|\xi|\mathrm{g}e 1$, where $U'\times\ensuremath{{
\breakal G}}amma'$ is a smaller conic neighborhood of $(x_0,\xi_0)$. In particular, $p$ is micro-elliptic at $(x_0,\xi_0)$ and $\mu\mathrm{supp}(p)\subseteq U\times\ensuremath{{
\breakal G}}amma$.
\end{lemma}
\betagin{remark}
\lambdabel{remark_folland}
The proof of the above lemma actually shows that for each conic neighborhood $\ensuremath{{
\breakal G}}amma$ of $\xi_0$ there exists $\tau(\xi)\in{S}^0(\Omega\times\mb{R}^n)$ such that $0\le \tau\le 1$, $\mathrm{supp}(\tau)\subseteq\ensuremath{{
\breakal G}}amma$ and $\tau(\xi)=1$ in some conic neighborhood $\ensuremath{{
\breakal G}}amma'$ of $\xi_0$ when $|\xi|\mathrm{g}e 1$.\\
Note that after multiplying $p(x,\xi)$ in Lemma \ref{lemma_folland} with a proper cut-off function, we obtain a properly supported operator $q(x,D)\in{\rm{P}}si^{0}(\Omega)$ whose symbol is micro-elliptic at $(x_0,\xi_0)$ and $\mu\mathrm{supp}(q)=\mu\mathrm{supp}(p)$.
\end{remark}
\betagin{theorem}
\lambdabel{theorem_micro_support}
For any $a(x,D)\in\Opropr{m}(\Omega)$ and $u\in\ensuremath{{
\breakal G}}(\Omega)$
\betagin{equation}
\lambdabel{inclusion}
\mathrm{W}_{\ssc}(a(x,D)u) \subseteq \mathrm{W}_{\ssc}(u)
\breakap \mu\mathrm{supp}_\mathrm{g}(a).
\end{equation}
Similarly, if $a(x,D)\in\Opropr{m/-\infty}(\Omega)$ then
\betagin{equation}
\lambdabel{inclusion_1}
\mathrm{W}_{\ssc}(a(x,D)u) \subseteq \mathrm{W}_{\ssc}(u)
\breakap \mu_\g(a).
\end{equation}
\end{theorem}
\betagin{proof}
We first prove the assertion \eqref{inclusion_1} in two steps.
\emph{Step 1:}\enspace\enspace ${\mathrm{W}_{\ssc}(a(x,D)u)}\subseteq \mu_\g(a)$.
If $(x_0,\xi_0)\not\in\mu_\g(a)$ then \eqref{estimate_micro} holds on some $U\times\ensuremath{{
\breakal G}}amma$, and by Lemma \ref{lemma_folland} we find $q\in{S}^{0}(\Omega\times\mb{R}^n)\subseteq{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{0/-\infty}(\Omega\times\mb{R}^n)$, which is micro-elliptic at $(x_0,\xi_0)$ with $\mu\mathrm{supp}(q)\subseteq U\times\ensuremath{{
\breakal G}}amma$. Apply Proposition \ref{micro_support_product} to obtain $q(x,D)a(x,D)= q\sharp a(x,D)\in\Opropr{m/-\infty}(\Omega)$ and $\mu_\g(q\sharp a)\subseteq \mu_\g(q)
\breakap \mu_\g(a)\subseteq (U\times\ensuremath{{
\breakal G}}amma)
\breakap \mu_\g(a) = \emptyset$. Remark \ref{remark_micro_support}(iii) shows that $q\sharp a \in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{-\infty}(\Omega\times\mb{R}^n)$ and therefore $q(x,D)a(x,D)$ is a properly supported pseudodifferential operator with regular kernel. This implies $q(x,D)(a(x,D)u)\in\ensuremath{{
\breakal G}}inf(\Omega)$ and hence $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(a(x,D)u)}$.
\emph{Step 2:}\enspace\enspace ${\mathrm{W}_{\ssc}(a(x,D)u)}\subseteq \mathrm{W}_{\ssc}(u)$.
Let $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(u)}$ then by \eqref{alt_def_W_ssc} there exists $p(x,D)\in\Oprop{0/-\infty}(\Omega)$ such that $p$ is slow scale micro-elliptic at $(x_0,\xi_0)$ and $p(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$.
\emph{Claim}: There exists $r(x,D)\in\Oprop{0/-\infty}(\Omega)$ such that $r$ is micro-elliptic at $(x_0,\xi_0)$ and there exists $s(x,D)\in\Opropr{m/-\infty}(\Omega)$ such that $r(x,D)a(x,D)u-s(x,D)p(x,D)^\ast p(x,D)u$ belongs to $\ensuremath{{
\breakal G}}inf(\Omega)$.
Assuming for the moment that the claim is proved, we show that it completes the proof of the theorem. The operators $s(x,D)$ and $p(x,D)^\ast$ map $\ensuremath{{
\breakal G}}inf(\Omega)$ into itself, hence we obtain that $s(x,D)p(x,D)^\ast p(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$. The claim implies that $r(x,D)a(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$ and $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(a(x,D)u)}$, since $r$ is micro-elliptic at $(x_0,\xi_0)$.\\
To prove the claim we construct a slow scale elliptic symbol based on $p$. Let $(p_\varepsilon)_\varepsilon$ be a representative of $p$ satisfying \eqref{estimate_below} in a conic neighborhood $U\times\ensuremath{{
\breakal G}}amma$ of $(x_0,\xi_0)$. By Lemma \ref{lemma_folland} there is $\psi\in{S}^0(\Omega\times\mb{R}^n)$, $0\le\psi\le 1$, with $\mathrm{supp}(\psi)\subseteq U\times\ensuremath{{
\breakal G}}amma$ and identically $1$ in a smaller conic neighborhood $U'\times\ensuremath{{
\breakal G}}amma'$ of $(x_0,\xi_0)$ if $|\xi|\mathrm{g}e 1$. The net $(1+|p_\varepsilon|^2-\psi)_\varepsilon$ belongs to $\underline{\mathcal{S}}_{\,\ssc}^0(\Omega\times\mb{R}^n)$, and by construction of $\psi$, satisfies \eqref{estimate_below} at all points in $\mb{C}O{\Omega}$. The pseudodifferential calculus for slow scale generalized symbols of refined order guarantees the existence of $\sigmama\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{0/-\infty}(\Omega\times\mb{R}^n)$ such that $p(x,D)^\ast p(x,D) = \sigmama(x,D)\in\Oprop{0/-\infty}(\Omega)$ and $\sigmama-|p|^2\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{-1/-\infty}(\Omega\times\mb{R}^n)$. Application of Proposition \ref{proposition_lower_order}(ii) to this situation yields that the symbol $1+\sigmama-\psi\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{0/-\infty}(\Omega\times\mb{R}^n)$ is slow scale elliptic and coincides with $\sigmama$ in a conic neighborhood $U'\times\ensuremath{{
\breakal G}}amma'$ of $(x_0,\xi_0)$ for $|\xi|\mathrm{g}e 1$.\\
Take a proper cut-off function $
\breakhi$ and define a pseudodifferential operator via the slow scale amplitude $
\breakhi(x,y)(1+\sigmama(x,\xi)-\psi(x,\xi))$. By Theorem \ref{theorem_calculus} it can be written in the form $b(x,D)\in\Oprop{0/-\infty}(\Omega)$, where $b-(1+\sigmama-\psi)\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^{-\infty}(\Omega\times\mb{R}^n)$. In other words, $b$ itself is slow scale elliptic and $\mu_\g(b) = \mu_\g(1+\sigmama-\psi)$. In particular, since $\sigmama$ and $1+\sigmama-\psi$ coincide on $U'\times\ensuremath{{
\breakal G}}amma'$, $|\xi|\mathrm{g}e 1$, we have that $\mu_\g(b-\sigmama)
\breakap ( U'\times\ensuremath{{
\breakal G}}amma') = \emptyset$. Theorem \ref{theorem_parametrix} gives a parametrix $t(x,D)\in\Opropr{0/-\infty}(\Omega)$ for $b(x,D)$, i.e., the operators $b(x,D)t(x,D)-I$ and $t(x,D)b(x,D)-I$ have regular kernel. Let $r(x,D)\in\Oprop{0/-\infty}(\Omega)$ be an operator constructed as in Lemma \ref{lemma_folland}, with classical symbol $r$, micro-elliptic at $(x_0,\xi_0)$, and $\mu\mathrm{supp}(r)\subseteq U'\times\ensuremath{{
\breakal G}}amma'$.\\
We show that $r(x,D)$ and $s(x,D):=r\sharp(a\sharp t)(x,D)=r(x,D)a(x,D)t(x,D)\in\Opropr{m/-\infty}(\Omega)$ satisfy the assertions of the claim. We rewrite the difference $r(x,D)a(x,D)u-s(x,D)p(x,D)^\ast p(x,D)u$ as
\betagin{multline*}
r(x,D)a(x,D)\big(u-t(x,D)b(x,D)u\big)\\
+ r(x,D)a(x,D)t(x,D)\big(b(x,D)-\sigmama(x,D)\big)u.
\end{multline*}
Here, the first summand is in $\ensuremath{{
\breakal G}}inf(\Omega)$ due to the fact that $t(x,D)$ is a parametrix for $b(x,D)$ and the mapping properties of $r(x,D)a(x,D)$. An iterated application of Proposition \ref{micro_support_product} to the second summand shows that it can be written with a regular symbol of refined order $m$, having generalized micro-support contained in the region $\mu\mathrm{supp}(r)
\breakap\mu_\g(b-\sigmama)\subseteq (U'\times\ensuremath{{
\breakal G}}amma')
\breakap \mu_\g(b-\sigmama) =\emptyset$. Hence it has smoothing generalized symbol and therefore the claim is proven.\\
Finally we prove the assertion \eqref{inclusion}. Let $(a_\varepsilon)_\varepsilon\in a \in {\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m}(\Omega\times\mb{R}^n)$ and consider the corresponding symbol $\kappa((a_\varepsilon)_\varepsilon)=(a_\varepsilon)_\varepsilon +\mb{N}u^{-\infty}(\Omega\times\mb{R}^n)\in{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^{m/-\infty}(\Omega\times\mb{R}^n)$. Then $\kappa((a_\varepsilon)_\varepsilon)(x,D)\in\Opropr{m/-\infty}(\Omega)$ and $\kappa((a_\varepsilon))(x,D)=a(x,D)$. Theorem \ref{theorem_micro_support} applied to all $(a_\varepsilon)_\varepsilon \in a$ yields
\[
\bigcap_{(a_\varepsilon)_\varepsilon \in a}\hskip-4pt \mathrm{W}_{\ssc}(\kappa((a_\varepsilon)_\varepsilon)(x,D)u)\ \subseteq\ \mathrm{W}_{\ssc}(u)\
\breakap\ \bigcap_{(a_\varepsilon)_\varepsilon \in a}\hskip-4pt \mu_\g(\kappa((a_\varepsilon)_\varepsilon))
\]
i.e.,
\[
\mathrm{W}_{\ssc}(a(x,D)u)\ \subseteq\ \mathrm{W}_{\ssc}(u)\
\breakap\ \bigcap_{(a_\varepsilon)_\varepsilon \in a}\hskip-4pt \mu_\g(\kappa((a_\varepsilon)_\varepsilon)),
\]
which completes the proof by Remark \ref{remark_micro_support}(iv).
\end{proof}
\betagin{corollary}
\lambdabel{corollary_2}
Let $a(x,D)\in\Oprop{m}(\Omega)$ where $a$ is a slow scale elliptic symbol. Then for any $u\in\ensuremath{{
\breakal G}}(\Omega)$
\betagin{equation}
\lambdabel{equality_corollary_ell}
\mathrm{W}_{\ssc}(a(x,D)u) = \mathrm{W}_{\ssc}(u).
\end{equation}
\end{corollary}
\betagin{proof}
Since $a\in{\wt{\underline{\mathcal{S}}}}_{\,\ssc}^m(\Omega\times\mb{R}^n)\subseteq{\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^m(\Omega\times\mb{R}^n)$, Theorem \ref{theorem_micro_support} implies that $\mathrm{W}_{\ssc}(a(x,D)u)\subseteq \mathrm{W}_{\ssc}(u)$. Let $p(x,D)$ be a parametrix for $a(x,D)$ as in Theorem \ref{theorem_parametrix} then $u = p(x,D)a(x,D)u+v$, where $v\in\ensuremath{{
\breakal G}}inf(\Omega)$. Therefore $\mathrm{W}_{\ssc}(u)=\mathrm{W}_{\ssc}(p(x,D)a(x,D)u)$ and Theorem \ref{theorem_micro_support} applied to $p(x,D)\in\Opropr{-m}(\Omega)$ gives $\mathrm{W}_{\ssc}(u)\subseteq\mathrm{W}_{\ssc}(a(x,D)u)$.
\end{proof}
The statements of the above theorem and corollary are valid for pseudodifferential operators which are not necessarily properly supported, if we consider instead compactly supported generalized functions.\\
As in the classical theory (cf.\
\breakite{Folland:95}), we introduce a
notion of microsupport for operators. However, in the case of generalized
pseudodifferential operators we are cautious to distinguish the corresponding
notions for symbols and operators by a slight change in notation, thereby
taking into account the non-injectivity when mapping symbols to operators
(cf.\
\breakite{GGO:03}).
\betagin{definition}
\lambdabel{def_micro_supp_op}
Let $A$ be any properly supported pseudodifferential operator with regular symbol. We define the \emph{generalized microsupport} of $A$ by
\betagin{equation}
\lambdabel{micro_supp_op}
\text{\grecomath{\symbol{22}}}\mathrm{supp}_g(A)\ :=\ \bigcap_{\substack{ a(x,D)\in\Opropr{m}(\Omega),\\[0.1cm]\hskip-10pt a(x,D)=A}}\mu\mathrm{supp}_g(a).
\end{equation}
\end{definition}
Now we have all the technical tools at hand which enable us to identify $\mathrm{W}_{\ssc}(u)$ as the generalized wave front set $\mathrm{WF}g(u)$.
\betagin{theorem}
\lambdabel{theorem_wave_front}
For all $u\in\ensuremath{{
\breakal G}}(\Omega)$
$$\mathrm{W}_{\ssc}(u)=\mathrm{WF}g(u).$$
\end{theorem}
\betagin{proof}
By definition of $\mathrm{WF}g(u)$ the assertion which we are going to prove is the following: $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(u)}$ iff there exists a representative $(u_\varepsilon)_\varepsilon$ of $u$, a cut-off function $\phi\in\mathcal{C}^\infty_{\rm{c}}(\Omega)$ with $\phi(x_0)=1$, a conic neighborhood $\ensuremath{{
\breakal G}}amma$ of $\xi_0$ and a number $N$ such that for all $l\in\mathbb{R}$
\betagin{equation}
\lambdabel{estimate_wave_front}
\sup_{\xi\in\ensuremath{{
\breakal G}}amma}\,\lambdara{\xi}^l |\widehat{\phi u_\varepsilon}(\xi)| = O(\varepsilon^{-N})\qquad \text{as}\ \varepsilon\to 0.
\end{equation}
We first show sufficiency, that is if $(x_0,\xi_0)\in\mb{C}O{\Omega}$ satisfies \eqref{estimate_wave_front} then it does not belong to $\mathrm{W}_{\ssc}(u)$. As noted in Remark \ref{remark_folland}, there exists $p(\xi)\in{S}^0(\Omega\times\mb{R}^n)$ with $\mathrm{supp}(p)\subseteq\ensuremath{{
\breakal G}}amma$, which is identically $1$ in a conical neighborhood $\ensuremath{{
\breakal G}}amma'$ of $\xi_0$ when $|\xi|\mathrm{g}e 1$. We recall that taking a typical proper cut-off $
\breakhi$, by Theorem \ref{theorem_pre_calculus}, we can write the properly supported pseudodifferential operator with amplitude $
\breakhi(x,y)p(\xi)\phi(y)$ in the form $\sigmama(x,D)\in\Oprop{0/-\infty}(\Omega)$, where $\sigmama(x,\xi)-p(\xi)\phi(x)\in{S}^{-1}(\Omega\times\mb{R}^n)$; in particular, $\sigmama(x,D)v-p(D)(\phi v)\in\ensuremath{{
\breakal G}}inf(\Omega)$ for all $v\in\ensuremath{{
\breakal G}}(\Omega)$. By assumption, $p(\xi)\phi(x)$ is micro-elliptic at $(x_0,\xi_0)$, the symbol $\sigmama$ is micro-elliptic there and from \eqref{estimate_wave_front} we have that
\[
p(D)\phi(x,D)u = \biggl[\biggl(\int_{\mb{R}^n}e^{ix\xi}p(\xi)\widehat{\phi u_\varepsilon}(\xi)\, \ensuremath{\partial}slash\xi\biggr)_\varepsilon\biggr] \in \ensuremath{{
\breakal G}}inf(\Omega).
\]
Since $\sigmama(x,D)u-p(D)(\phi u)\in\ensuremath{{
\breakal G}}inf(\Omega)$ we obtain that $\sigmama(x,D)u\in\ensuremath{{
\breakal G}}inf(\Omega)$ and hence $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(u)}$.\\
Conversely, suppose $(x_0,\xi_0)\not\in{\mathrm{W}_{\ssc}(u)}$. There is an open neighborhood $U$ of $x_0$ such that $(x,\xi_0)\in
\breakompl{\mathrm{W}_{\ssc}(u)}$ for all $x\in U$. Choose $\phi\in\ensuremath{\mathcal{C}^\infty}_{\rm{c}}(U)$ with $\phi(x_0)=1$ and define
\[
\mathscr{S}igmama :=\{ \xi\in\mb{R}^n\setminus 0:\ \ \exists x\in\Omega\ (x,\xi)\in\mathrm{W}_{\ssc}(\phi u)\}.
\]
From Theorem \ref{theorem_micro_support} we have that $\mathrm{W}_{\ssc}(\phi u)\subseteq \mathrm{W}_{\ssc}(u)
\breakap (\mathrm{supp}(\phi)\times\mb{R}^n\setminus 0)$ and therefore $\xi_0\notin\mathscr{S}igmama$. Moreover, since $\mathrm{W}_{\ssc}(\phi u)$ is closed and conic, the $\mathscr{S}igmama$ itself is a closed conic subset of $\mb{R}^n\setminus 0$. Again, by Remark \ref{remark_folland} there is a symbol $p(\xi)\in{S}^0(\Omega\times\mb{R}^n)$ such that $0\le p\le 1$, $p(\xi)=1$ in a conic neighborhood $\ensuremath{{
\breakal G}}amma$ of $\xi_0$ when $|\xi|\mathrm{g}e 1$ and $p(\xi)=0$ in a conic neighborhood $\mathscr{S}igmama_0$ of $\mathscr{S}igmama$. By construction $\mu\mathrm{supp}(p)
\breakap (\Omega \times {\mathscr{S}igmama_0}) = \emptyset$ and $\mathrm{W}_{\ssc}(\phi u)\subseteq\Omegaega\times\mathscr{S}igmama$. Therefore, $\mathrm{W}_{\ssc}(p(D)\phi u)\subseteq \mathrm{W}_{\ssc}(\phi u)
\breakap \mu\mathrm{supp}(p)=\emptyset$ and by Proposition \ref{prop_sing_supp} we conclude that $p(D)\phi u\in\ensuremath{{
\breakal G}}inf(\Omega)$. In terms of representatives $(u_\varepsilon)_\varepsilon \in u$, this means that
\betagin{equation}
\lambdabel{representative}
\biggl(\int_{\mb{R}^n}e^{ix\xi}p(\xi)\widehat{\phi u_\varepsilon}(\xi)\, \ensuremath{\partial}slash\xi = (\phi u_\varepsilon \ast {p}^{\vee})\biggr)_\varepsilon \in\ensuremath{{
\breakal E}}Minf.
\end{equation}
Note that ${p}^\vee$ is a Schwartz function outside the origin, i.e., we have for all $\ensuremath{\partial}eltalta>0$ and $\alphapha,\betata\in\mb{N}^n$ that $\sup_{|x|>\ensuremath{\partial}eltalta}|x^\alphapha\partial^\betata {p}^\vee(x)|<\infty$ (
\breakite[Theorem (8.8a)]{Folland:95}). If ${\rm{dist}}(x,\mathrm{supp}(\phi))>\ensuremath{\partial}eltalta$ this yields $\partial^\alphapha(\phi u_\varepsilon\ast{p}^\vee)(x)=\int_{|y|>\ensuremath{\partial}eltalta}\phi u_\varepsilon(x-y)\partial^\alphapha{p}^\vee(y)\, dy$ and for all $l>0$ the estimate
\betagin{equation}
\lambdabel{estimate_charac}
\betagin{split}
&\lambdara{x}^l|\partial^\alphapha(\phi u_\varepsilon\ast p^\vee)(x)|\\
&\le c_1\int_{|y|>\ensuremath{\partial}eltalta}|\phi u_\varepsilon(x-y)|\lambdara{x-y}^l\lambdara{y}^l|\partial^\alphapha{p}^\vee(y)|\, dy\\
&\le c_2 \sup_{z\in\mathrm{supp}(\phi)}|u_\varepsilon(z)|,
\end{split}
\end{equation}
uniformly for such $x$. When $\ensuremath{\partial}eltalta$ is chosen small enough, the set of points $x$ with ${\rm{dist}}(x,\mathrm{supp}(\phi))\le\ensuremath{\partial}eltalta$ is compact in $\Omega$, and from \eqref{representative} we have that there exists $M\in\mb{N}$ such that for all $l\in\mb{R}$, $\sup \lambdara{x}^l|\partial^\alphapha(\phi u_\varepsilon\ast{p}^\vee)(x)| = O(\varepsilon^{-M})$ as $\varepsilon\to 0$, where the supremum is taken over $\{ x: {\rm{dist}}(x,\mathrm{supp}(\phi))\le\ensuremath{\partial}eltalta \}$. Hence we have shown that $(\phi u_\varepsilon\ast{p}^\vee)_\varepsilon\in\ensuremath{{
\breakal E}}Sinf(\mb{R}^n)$. The Fourier transform maps $\ensuremath{{
\breakal E}}Sinf(\mb{R}^n)$ into $\ensuremath{{
\breakal E}}Sinf(\mb{R}^n)$, which implies that $(p(\xi)\widehat{\phi u_\varepsilon}(\xi))_\varepsilon\in\ensuremath{{
\breakal E}}Sinf(\mb{R}^n)$. Since $p(\xi)=1$ in a conic neighborhood of $\xi_0$, the proof is complete.
\end{proof}
Combing Theorem \ref{theorem_micro_support} with Theorem \ref{theorem_wave_front} the following corollary is immediate from Definition \ref{def_micro_supp_op}.
\betagin{corollary}
\lambdabel{corollary_wave_front}
For any properly supported operator $A$ with generalized regular symbol and $u\in\ensuremath{{
\breakal G}}(\Omega)$ we have
\betagin{equation}
\lambdabel{wave_front_op}
\mathrm{WF}g(Au) \subseteq \mathrm{WF}g(u)
\breakap \text{\grecomath{\symbol{22}}}\mathrm{supp}_\mathrm{g}(A).
\end{equation}
\end{corollary}
While the purpose of the foregoing discussion was to prepare for applications to $\ensuremath{{
\breakal G}}inf$-regularity theory for pseudodifferential equations with generalized symbols, one may, as an intermediate step, investigate the propagation of $\ensuremath{{
\breakal G}}inf$-singularities in case of differential equations with smooth coefficients. Having this special situation in mind, it is natural to consider the following set, defined for any $u\in\ensuremath{{
\breakal G}}(\Omega)$ by
\betagin{equation}
\lambdabel{characteristic_inter}
\mathrm{W}_{\rm{cl}}(u) := \bigcap\mb{C}har(A),
\end{equation}
where the intersection is taken over all classical properly supported operators $A\in{\rm{P}}si^0(\Omega)$ such that $Au\in\ensuremath{{
\breakal G}}inf(\Omega)$. A careful inspection of the proof of Theorem \ref{theorem_wave_front} suggests that $\mathrm{W}_{\rm{cl}}(u)$ can be used in place of $\mathrm{W}_{\ssc}(u)$. Indeed, it gives an alternative characterization of $\mathrm{WF}g(u)$.
\betagin{theorem}
\lambdabel{theorem_Wcl}
For all $u\in\ensuremath{{
\breakal G}}(\Omega)$
\betagin{equation}
\lambdabel{Wcl=Wsc=WFg}
\mathrm{W}_{\rm{cl}}(u) = \mathrm{W}_{\ssc}(u) = \mathrm{WF}g(u).
\end{equation}
\end{theorem}
\betagin{proof}
The inclusion $\mathrm{W}_{\ssc}(u)\subseteq\mathrm{W}_{\rm{cl}}(u)$ is obvious. Conversely, if $(x_0,\xi_0)\not\in{\mathrm{WF}g(u)}$, as in the proof of Theorem \ref{theorem_wave_front} one can find a properly supported operator $P\in{\rm{P}}si^0(\Omega)$ such that $(x_0,\xi_0)\notin\mb{C}har(P)$ and $Pu\in\ensuremath{{
\breakal G}}inf(\Omega)$.
\end{proof}
\section{Noncharacteristic $\ensuremath{{
\breakal G}}inf$-regularity and\\
propagation of singularities}
As a first application of Theorem \ref{theorem_wave_front} we prove an
extension of the classical result on noncharacteristic regularity for
distributional solutions of arbitrary pseudodifferential equations (with
smooth symbols). A generalization of this result for partial differential
operators with Colombeau coefficients was achieved in
\breakite{HOP:03}, here we
present a version for pseudodifferential operators with slow scale generalized
symbols.
\betagin{theorem} If $P = p(x,D)$ is a properly supported pseudodifferential
operator
with slow scale symbol and $u\in\ensuremath{{
\breakal G}}(\Omega)$ then \betagin{equation}\lambdabel{nonchar_theorem}
\mathrm{WF}g(P u) \subseteq \mathrm{WF}g(u) \subseteq
\mathrm{WF}g(P u)
\breakup
\breakompl{\ensuremath{{
\breakal E}}llsc(p)}.
\end{equation}
\end{theorem}
\betagin{proof} The first inclusion relation is obvious from Theorem
\ref{theorem_micro_support}. \\
Assume that $(x_0,\xi_0) \not\in \mathrm{WF}g(P u)$ but $p$ is micro-elliptic there.
Thanks to Theorem \ref{theorem_wave_front} we can find $a(x,D) \in
\Oprop{0}(\Omega)$ such that $a(x,D) P u \in \ensuremath{{
\breakal G}}inf(\Omega)$ and $(x_0,\xi_0) \in
\ensuremath{{
\breakal E}}llsc(a)$. By the (slow scale) symbol calculus and Proposition
\ref{proposition_lower_order}(ii) $a(x,D) p(x,D)$ has a slow scale symbol,
which is micro-elliptic
at $(x_0,\xi_0)$. Hence another application of Theorem \ref{theorem_wave_front}
yields that $(x_0,\xi_0) \not\in \mathrm{WF}g(u)$.
\end{proof}
\betagin{remark} As can be seen from various examples in
\breakite{HO:03},
relation (\ref{nonchar_theorem}) does not hold in general for regular symbols
$p$ which satisfy estimate (\ref{estimate_below}). In this sense, the overall
slow scale properties of the symbol are crucial in the above statement and are
not just technical convenience. In fact, adapting the reasoning in
\breakite[Example 4.6]{HO:03} to the symbol $p_\varepsilon(x,\xi) = 1 + c_\varepsilon x^2$,
$c_\varepsilon \mathrm{g}eq 0$, we obtain the following: $p_\varepsilon(x,\xi) \mathrm{g}eq 1$ whereas the
unique solution $u$ of $p u = 1$ is $\ensuremath{{
\breakal G}}inf$ if and only if $(c_\varepsilon)_\varepsilon$ is a
slow scale net.
\end{remark}
In the remainder of this section we show how Theorem \ref{theorem_Wcl} enables
us to extend a basic result on propagation of singularities
presented in
\breakite[Sections 23.1]{Hoermander:V3}, where we now allow for
Colombeau generalized functions as solutions and initial values in first-order
strictly hyperbolic partial differential equations with smooth coefficients.
Hyperbolicity will be assumed with respect to time direction and we will
occasionally employ pseudodifferential operators whose symbols depend
smoothly on the real parameter $t$. This means that we have symbols from the
space $\ensuremath{\mathcal{C}^\infty}(\mb{R},S^m(\Omega\times\mb{R}^n))$: these are
of the form $a(t,x,\xi)$ with $a\in\ensuremath{\mathcal{C}^\infty}(\mb{R}\times\Omega\times\mb{R}^n)$ such that
for each $t$ and $h\in\mb{N}$ one has $\frac{d^{h}}{dt^h}a(t,.,.)\in S^m(\Omega\times\mb{R}^n)$, where all symbol
seminorm estimates are uniform when $t$ varies in a compact subset
of $\mb{R}$. Defined on representatives in the obvious way, a properly supported
operator $a(t,x,D_x)$ maps $\ensuremath{{
\breakal G}}(\Omega)$ into $\ensuremath{{
\breakal G}}(\Omega)$. Moreover, if we assume that $a(t,x,D_x)$ is uniformly properly supported with respect to $t$, that is there exists a proper closed set $L$ such that $\text{supp}\,k_{a(t,x,D_x)}\subseteq L$ for all $t$, if $u\in\ensuremath{{
\breakal G}}(\Omega\times\mb{R})$ then $a(t,x,D_x)(u(t,
\breakdot))\in\ensuremath{{
\breakal G}}(\Omega\times\mb{R})$.
\betagin{proposition}\lambdabel{prop_first_order}
Let $P(t,x,D_x)$ be a first-order partial differential operator with
real principal symbol $P_1$ and coefficients in $\ensuremath{\mathcal{C}^\infty}(\mb{R}\times\Omega)$, which
are constant outside some compact subset of $\Omega$.
Assume that $u\in\ensuremath{{
\breakal G}}(\Omega\times\mb{R})$ satisfies the homogeneous Cauchy problem
\betagin{align}
\ensuremath{\partial}_t u + i P(t,x,D_x) u &= 0
\lambdabel{first_order_equ}\\
u(.,0) &= g \in\ensuremath{{
\breakal G}}(\Omega) \lambdabel{initial_cond}.
\end{align}
If ${\rm{P}}hi_t$ denotes the Hamilton flow corresponding to $P_1(t,.,.)$ on
$T^*(\Omega)$ then we have for all $t\in\mb{R}$
\betagin{equation}\lambdabel{WFg_flow}
\mathrm{WF}g(u(.,t)) = {\rm{P}}hi_t\big( \mathrm{WF}g(g) \big).
\end{equation}
\end{proposition}
\betagin{proof}
We adapt the symbol constructions presented in
\breakite{Hoermander:V3}, pp.\
388-389. Observe that one can carry out all steps of that classical procedure
in $\Omega \subseteq \mb{R}^n$ and with all operators uniformly properly supported. To be
more precise, let $(x_0,\xi_0)\in\big(\mb{C}O{\Omega}\big)\setminus\mathrm{WF}g(g)$ and
choose $q\in S^0(\Omega\times\mb{R}^n)$ polyhomogeneous, i.e., having homogeneous
terms in the asymptotic expansion, such that $q$ is micro-elliptic at
$(x_0,\xi_0)$, $\mu\mathrm{supp}(q)
\breakap\mathrm{WF}g(g)= \emptyset$, and $q(x,D)$ is properly
supported. By Corollary \ref{corollary_wave_front} we deduce that
$\mathrm{WF}g(q(x,D)g) \subseteq \mathrm{WF}g(g)
\breakap \mu\mathrm{supp}(q) = \emptyset$, therefore
$q(x,D)g\in\ensuremath{{
\breakal G}}inf(\Omega)$.
We can find a symbol $Q(t,x,\xi)$, which is polyhomogeneous of order $0$,
smoothly depending on the parameter $t\in\mb{R}$, and with the following
properties:
\betagin{trivlist}
\item the operators $Q(t,x,D_x)$ are uniformly properly supported for $t\in\mb{R}$,
\item $[\ensuremath{\partial}_t+iP(t,x,D_x),Q(t,x,D_x)]=R(t,x,D_x)$ is a $t$-parametrized
operator\\ of order $-\infty$ and uniformly properly supported,
\item $Q_0(t,x,\xi)=q_0({\rm{P}}hi_t^{-1}(x,\xi))$ for the principal symbols,
\item and $Q(0,x,\xi)-q(x,\xi)\in S^{-\infty}(\Omega\times\mb{R}^n)$.
\end{trivlist}
From the properties of $Q$ and \eqref{first_order_equ} we have
\betagin{multline*}
(\ensuremath{\partial}_t+i P(t,x,D_x))Q(t,x,D_x)u \\
= Q(t,x,D_x)(\ensuremath{\partial}_tu+i P(t,x,D_x)u)+[\ensuremath{\partial}_t+iP,Q]u
= R(t,x,D_x)u,
\end{multline*}
and $Q(0,x,D_x)u(.,0)=q(x,D)g+R_0(x,D)g$, where $R_0$ is of order $-\infty$. Observe that $R(t,x,D_x)u(.,t) \in
\ensuremath{{
\breakal G}}inf(\Omega)$ and $R_0(x,D)g\in\ensuremath{{
\breakal G}}inf(\Omega)$, which implies
$Q(0,x,D_x)u(.,0)\in\ensuremath{{
\breakal G}}inf(\Omega)$. Setting $v=Qu\in\ensuremath{{
\breakal G}}(\Omega\times\mb{R})$ we obtain $\ensuremath{\partial}_t v + i P(t,x,D_x) v\in\ensuremath{{
\breakal G}}(\Omega\times\mb{R})$ and
\betagin{align*}
\ensuremath{\partial}_t v(.,t) + i P(t,x,D_x) v(.,t) & \in\ensuremath{{
\breakal G}}inf(\Omega) \qquad \forall t\in\mb{R},\\
v(.,0) & \in \ensuremath{{
\breakal G}}inf(\Omega),
\end{align*}
which we interpret as a Cauchy problem with
$\ensuremath{{
\breakal G}}inf$-data with respect to the space variable $x$. From an inspection of the
energy estimates discussed in
\breakite{LO:91} one directly infers that
$v(.,t) \in \ensuremath{{
\breakal G}}inf(\Omega)$ for all $t\in\mb{R}$ (note that the coefficients are
independent of
$\varepsilonilon$). Therefore at fixed value of $t$ we have that
$Q(t,x,D_x)u(.,t)\in\ensuremath{{
\breakal G}}inf(\Omega)$ and the noncharacteristic regularity relation
\eqref{nonchar_theorem} yields
\betagin{multline}\lambdabel{WF_u_Q}
\mathrm{WF}g(u(.,t))\subseteq \mb{C}har(Q(t,x,D_x))\\
=\{ (x,\xi):\, Q_0(t,.,.)=q_0({\rm{P}}hi_t^{-1}(.,.))
\text{ is not micro-elliptic at } (x,\xi) \}.
\end{multline}
But $q_0
\breakirc{\rm{P}}hi_t^{-1}$ is micro-elliptic at $(x,\xi)={\rm{P}}hi_t(x_0,\xi_0)$, which implies $(x,\xi)\not\in\mathrm{WF}g(u(.,t))$ and therefore we have shown
\[
\mathrm{WF}g(u(.,t)) \subseteq {\rm{P}}hi_t(\mathrm{WF}g(g)).
\]
The opposite inclusion is proved by time reversal.
\end{proof}
\betagin{remark} The same result can be proven when $P$ is a pseudodifferential
operator (with parameter $t$ and global symbol estimates), but with $g$ and
$u$ in the $\ensuremath{{
\breakal G}}_{2,2}$-spaces as introduced in
\breakite{BO:92} and using a basic
regularity result from
\breakite{GH:03}. However, this formally requires first to
transfer the notion of micro-ellipticity to that context, which will be left
to future presentations.
\end{remark}
While Proposition \ref{prop_first_order} determines the wave front set of
$u(.,t)$ for fixed $t$, we are aiming for a complete description of $\mathrm{WF}g(u)$
in the cotangent bundle over $\Omega\times\mb{R}$. The crucial new ingredient needed
in extending the analogous discussion in
\breakite{Hoermander:V3}, p.\ 390, to
Colombeau generalized functions is a result about microlocal regularity of the
restriction operator $\ensuremath{{
\breakal G}}(\Omega\times\mb{R}) \to \ensuremath{{
\breakal G}}(\Omega)$.
\betagin{lemma}\lambdabel{restriction}
Let $I\subseteq\mb{R}$ be an open interval, $t_0\in I$, and $u\in\ensuremath{{
\breakal G}}(\Omega\times I)$
such that
\betagin{equation}\lambdabel{inter_cond}
\mathrm{WF}g(u)
\breakap \big( \Omega\times\{t_0\}\times \{(0,\tau) : \tau\in\mb{R} \}\big)
= \emptyset.
\end{equation}
Then the wave front set of the restriction of $u$ to $\Omega\times\{t_0\}$
satisfies the following relation
\betagin{equation}\lambdabel{WFg_restriction}
\mathrm{WF}g(u \mid_{t=t_0}) \subseteq \{ (x,\xi)\in\mb{C}O{\Omega} : \exists\tau\in\mb{R}:
(x,t_0;\xi,\tau)\in\mathrm{WF}g(u) \}.
\end{equation}
\end{lemma}
\betagin{remark}
Note that, unlike with distributions, the restriction $u \mid_{t=t_0}$ is
always well-defined for Colombeau functions on $\Omega\times I$, regardless of the
microlocal intersection condition (\ref{inter_cond}) in the above lemma.
However, an obvious adaption of the counter example in
\breakite[Example
5.1]{HK:01} shows that, in general, the latter cannot be dropped without
losing the relation (\ref{WFg_restriction}).
\end{remark}
\betagin{proof}
We are proving a microlocal statement, so we may assume that $t_0 = 0$ and
$u$ has compact support near $\{ t = 0 \}$; in particular, we may then pick a
representative $(u_\varepsilon)_\varepsilon$ of $u$ with $\mathrm{supp}(u_\varepsilon)$ contained in a
fixed compact set uniformly for all $\varepsilon\in (0,1]$.
The aim of the proof is to show the following: if $(x_0,\xi_0)\in\mb{C}O{\Omega}$ is
in the complement of the right-hand side of
relation (\ref{WFg_restriction}) and $\phi\in\ensuremath{\mathcal{C}^\infty}c(\Omega)$ supported near $x_0$,
then $(\phi(.) u_\varepsilon(.,0))\ensuremath{{
\breakal F}}T{\ }(\xi)$ is (Colombeau-type) rapidly
decreasing, i.e., with uniform $\varepsilon$-asymptotics (cf.\
\breakite[Definition
17]{Hoermann:99}), in some conic neighborhood $\ensuremath{{
\breakal G}}a(\xi_0)$ of $\xi_0$. As a
preliminary observation, note that if $\psi\in\ensuremath{\mathcal{C}^\infty}c(I)$ with
$\psi(0) = 1$ then
we may write
\betagin{align}\lambdabel{restr_integral}
\big(\phi(.) u_\varepsilon(.,0)\big)\ensuremath{{
\breakal F}}T{\ }(\xi) & =
\big((\phi\otimes\psi)(.,0) u_\varepsilon(.,0)\big)\ensuremath{{
\breakal F}}T{\ }(\xi) \\
& = \int \ensuremath{{
\breakal F}}_{n+1}\big((\phi\otimes\psi)\, u_\varepsilon \big)(\xi,\tau)
\,\ensuremath{\partial}slash\tau, \nonumber
\end{align}
where $\ensuremath{{
\breakal F}}_{n+1}$ denotes $(n+1)$-dimensional Fourier transform. We will find
rapid decrease estimates of the integrand upon an appropriate splitting of
the integral depending on the parameter $\xi$.
First, the hypothesis (\ref{inter_cond}) gives that for each $y\in\Omega$ we find
an open neighborhood $V(y,0)$ and open cones $\ensuremath{{
\breakal G}}a^\pm_y \ni (0,\pm1)$ such
that for all $f\in\ensuremath{\mathcal{C}^\infty}c(V(y,0))$ the function $\ensuremath{{
\breakal F}}T{(f u_\varepsilon)}(\xi,\tau)$ is
Colombeau-rapidly decreasing in $\ensuremath{{
\breakal G}}a_y := \ensuremath{{
\breakal G}}a^-_y
\breakup \ensuremath{{
\breakal G}}a^+_y$. By
compactness the set $\mathrm{supp}(u)
\breakap (\Omega\times\{0\})$ is covered by
finitely many neighborhoods $V(y_1,0),\ldots, V(y_M,0)$. Again by compactness,
we may assume that there is $U_1\subseteq \Omega$ open and $\etaa_1 > 0$ such that
$\mathrm{supp}(u)
\breakap (\Omega\times\{0\})\subseteq U_1\times (-\etaa_1,\etaa_1)
\subseteq \bigcup_{j=1}^M V(y_j,0)$. Furthermore, $\bigcap_{j=1}^M \ensuremath{{
\breakal G}}a_{y_j}$
is an open cone around $(0,-1)$ and $(0,1)$ and there is $c_1 > 0$ such
that it still contains the conic neighborhood $\ensuremath{{
\breakal G}}a_1 := \{ (\xi,\tau) :
|\tau| \mathrm{g}eq c_1 |\xi| \}$. Using a finite partition of unity subordinated to
$(V(y_j,0))_{1 \leq j \leq M}$, we obtain the following statement: for any
$\phi\in\ensuremath{\mathcal{C}^\infty}c(U_1)$ and
$\psi\in\ensuremath{\mathcal{C}^\infty}c((-\etaa_1,\etaa_1))$ there exists $N \mathrm{g}eq 0$ with the
property that $\forall l\in\mb{N}\, \exists C_l>0\, \exists \varepsilon_0 > 0$
which guarantee the rapid decrease estimate
\betagin{equation}\lambdabel{rap_dec_1}
|\ensuremath{{
\breakal F}}_{n+1} \big((\phi\otimes\psi)\, u_\varepsilon\big)(\xi,\tau)|
\leq C_l \varepsilon^{-N} \lambdara{(\xi,\tau)}^{-l} \qquad
(\xi,\tau)\in\ensuremath{{
\breakal G}}a_1, 0 < \varepsilon < \varepsilon_0.
\end{equation}
This will provide corresponding upper bounds for the integrand in
(\ref{restr_integral}) whenever $|\tau| \mathrm{g}eq c_1 |\xi|$, $\xi$ arbitrary.
Second, it follows from the assumption on $(x_0,\xi_0)$ that
$\big(\{(x_0,0)\}\times \{\xi_0\} \times\mb{R}\big)
\breakap \mathrm{WF}g(u) = \emptyset$.
Hence for all $\sigmama\in\mb{R}$ there is an open set $V_{\sigmama}\ni(x_0,0)$
and an open conic neighborhood
$\ensuremath{{
\breakal G}}a(\xi_0,\sigmama)$ such that for all $f\in\ensuremath{\mathcal{C}^\infty}c(V_{\sigmama})$ the function
$\ensuremath{{
\breakal F}}T{(f u_\varepsilon)}(\xi,\tau)$ is Colombeau-rapidly decreasing in $\ensuremath{{
\breakal G}}a_{\sigmama}$.
By a conic compactness argument (via projections to the
unit sphere), we deduce that finitely many cones $\ensuremath{{
\breakal G}}a_{\sigmama_j}$
($j=1,\ldots,M$) suffice to cover the two-dimensional sector
$\{(\lambda \xi_0,\tau) : \lambda > 0, |\tau| \leq c_1 \lambda |\xi_0| \}$.
Let $\pi_n
\breakolonon\mb{R}^{n+1} \to \mb{R}^n$ be the projection $\pi_n(\xi,\tau) = \xi$
and define
\[
\ensuremath{{
\breakal G}}a(\xi_0) := \bigcap_{j=1,\ldots,M} \pi_n(\ensuremath{{
\breakal G}}a_{\sigmama_j}),
\]
which is an open conic neighborhood of $\xi_0$ in $\mb{R}^n\setminus 0$. Furthermore,
$\bigcap_{j=1}^M V_{\sigmama_j} \ni (x_0,0)$ is open and contains still some
neighborhood of product form, say $U_0\times (-\etaa_0,\etaa_0)$.
Therefore, for any $\phi\in\ensuremath{\mathcal{C}^\infty}c(U_0)$ and
$\psi\in\ensuremath{\mathcal{C}^\infty}c((-\etaa_0,\etaa_0))$ there exists $N \mathrm{g}eq 0$ with the following
property:
$\forall l\in\mb{N}\, \exists C_l>0\, \exists \varepsilon_0 > 0$ we have an
estimate
\betagin{equation}\lambdabel{rap_dec_0}
|\ensuremath{{
\breakal F}}_{n+1}((\phi\otimes\psi)\, u_\varepsilon)(\xi,\tau)|
\leq C_l \varepsilon^{-N} \lambdara{(\xi,\tau)}^{-l} \qquad
\xi\in\ensuremath{{
\breakal G}}a(\xi_0), |\tau| \leq c_1 |\xi|,
\end{equation}
when $0 < \varepsilon < \varepsilon_0$.
So if $\xi\in\ensuremath{{
\breakal G}}a(\xi_0)$, we also obtain
corresponding upper bounds in (\ref{restr_integral})
over the remaining integration domain $|\tau| \leq c_1 |\xi|$.
To summarize, when $\xi\in \ensuremath{{
\breakal G}}a(\xi_0)$ we may combine
(\ref{rap_dec_1}-\ref{rap_dec_0}) by taking $U := U_0
\breakap U_1$,
$\phi\in\ensuremath{\mathcal{C}^\infty}c(U)$, and $\etaa := \min(\etaa_0,\etaa_1)$, $\psi\in\ensuremath{\mathcal{C}^\infty}c((-\etaa,\etaa))$,
$\psi(0)=1$. Upon applying this to (\ref{restr_integral}) we arrive at
\[
|(\phi(.) u_\varepsilon(.,0))\ensuremath{{
\breakal F}}T{\ }(\xi)| \leq \varepsilon^{-N} C_l
\int \!\!\! \frac{\ensuremath{\partial}slash\tau}{(1 + |\xi|^2 + \tau^2)^{l/2}}
= \varepsilon^{-N} C_l \lambdara{\xi}^{1-l}\!\!
\int\limits_\mb{R} \!\lambdara{r}^{-l} \,\ensuremath{\partial}slash r,
\]
for some $N$ independent of $l \mathrm{g}eq 2$ and $\varepsilon$ sufficiently small.
\end{proof}
\betagin{theorem}\lambdabel{thm_first_order}
Let $u$ be the (unique) solution of the homogeneous Cauchy problem
(\ref{first_order_equ}-\ref{initial_cond}) and denote by
$\mathrm{g}a(x_0,\xi_0)$ the maximal bicharacteristic curve passing through
the characteristic point
$(x_0,0;\xi_0,-P_1(0,x_0,\xi_0))\in\mb{C}O{\Omega\times\mb{R}}$. Then
the generalized wave front of $u$ is given by
\betagin{equation}
\mathrm{WF}g(u) = \bigcup_{(x_0,\xi_0)\in\mathrm{WF}g(g)} \mathrm{g}a(x_0,\xi_0).
\end{equation}
\end{theorem}
\betagin{proof} We refer to the discussion in
\breakite{Hoermander:V3}, p.\ 390,
which we can adapt to our case with only little changes required.
First, $\ensuremath{\partial}_t + i P(t,x,D_x)$ is already a partial differential operator and
hence we
obtain $\mathrm{WF}_g(u) \subseteq \mb{C}har(\ensuremath{\partial}_t + i P_1) \not\ni (x,t;0,\tau)$ due to
(\ref{nonchar_theorem}); therefore, combined with Lemma \ref{restriction} we
immediately obtain the inclusion
\[
\mathrm{WF}g(u(.,t)) \subseteq \{ (x,\xi)\in\mb{C}O{\Omega} :
(x,t;\xi,-P_1(t,x,\xi)) \in \mathrm{WF}g(u) \}.
\]
On the other hand, based on the inclusion relation (\ref{WF_u_Q}) we can carry out the following construction: let $(x_1,t_1,\xi_1,\tau_1)\in\mb{C}O{\Omegaega\times\mb{R}}$ such that $\xi_1\neq 0$ and $(x_1,\xi_1)\not\in\mathrm{WF}g(u(.,t_1))$ and $Q_0$ as in the proof of Proposition \ref{prop_first_order}, which is micro-elliptic at $(x_1,\xi_1)$. We claim that $(x_1,t_1,\xi_1,\tau_1)\not\in\mathrm{WF}g(u)$.
Indeed, one may use cut-off functions of product form $\phi(x) \psi(t)$ with
small supports near $x_1$, $t_1$ and write
$\big((\phi\otimes\psi) u_\varepsilon\big)\ensuremath{{
\breakal F}}T{\ }(\xi,\tau) = \int e^{-it\tau}\psi(t)(\phi u_\varepsilon(.,t))\ensuremath{{
\breakal F}}T{\ }(\xi)\, dt$. We have Colombeau rapid decrease estimates in $(\xi,\tau)$ for those
$\xi$-directions where $Q(t,x,\xi)$ stays micro-elliptic for all $(x,t) \in
\mathrm{supp}(\phi\otimes\psi)$.
Together with the observation at the
beginning of the proof and \eqref{WFg_flow} this implies the equality
\betagin{multline*}
{\rm{P}}hi_t(\mathrm{WF}g(g)) = \mathrm{WF}g(u(.,t)) \\= \{ (x,\xi)\in\mb{C}O{\Omega} :
(x,t;\xi,-P_1(t,x,\xi)) \in \mathrm{WF}g(u) \},
\end{multline*}
which yields the asserted statement.
\end{proof}
\section*{Appendix: Pseudodifferential calculus with\\
\hphantom{Appendix:}
general scales}
\setcounter{section}{1}
\renewcommand{\Alph{section}}{\ensuremath{{
\breakal A}}lph{section}}
\renewcommand{A.\arabic{equation}}{A.\arabic{equation}}
\setcounter{equation}{0}
We provide some background of the required pseudodifferential tools for a sufficiently large class of generalized symbols.
\betagin{definition}\lambdabel{def_general}
Let $\mathfrak{A}$ be the set of all nets $(\omegaega_\varepsilon)_\varepsilon\in\mb{R}^{(0,1]}$ such that $c_0\le\omegaega_\varepsilon\le c_1\varepsilonilon^{-p}$ for some $c_0,c_1,p > 0$ and for all $\varepsilon$. Let $\mathfrak{B}$ be any subset of $\mathfrak{A}$ closed with respect to pointwise product and maximum. For $m\in\mb{R}$ and $\Omegaega$ an open subset of $\mb{R}^N$, we define the spaces of $\mathfrak{B}$-nets of symbols
\[
\betagin{split}
\underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n) := \{
(a_\varepsilon)_\varepsilon\in \mathcal{S}^m[\Omega\times\mb{R}^n]:\, &\forall K \mathscr{S}ubset \Omegaega\ \exists (\omegaega_\varepsilon)_\varepsilon \in \mathfrak{B}\\
& \forall \alphapha, \betata \in \mb{N}^n\, \exists c>0\, \forall \varepsilon:\, |a_\varepsilon|^{(m)}_{K,\alphapha,\betata} \le c\, \omegaega_\varepsilon\},
\end{split}
\]
\[
\betagin{split}
\underline{\mathcal{S}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n) := \{
(a_\varepsilon)_\varepsilon\in \mathcal{S}^m[\Omega\times\mb{R}^n]&:\, \forall K \mathscr{S}ubset \Omegaega\ \exists (\omegaega_\varepsilon)_\varepsilon \in \mathfrak{B}\ \forall m \in \mb{R}\\
& \forall \alphapha, \betata \in \mb{N}^n\, \exists c>0\, \forall \varepsilon:\, |a_\varepsilon|^{(m)}_{K,\alphapha,\betata} \le c\, \omegaega_\varepsilon\}
\end{split}
\]
and the factor spaces
\[
{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n) / \mb{N}u^m(\Omega\times\mb{R}^n),
\]
\[
{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m,-\infty}(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n) / \mb{N}uinf(\Omega\times\mb{R}^n)
\]
and
\[
{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n) / \mb{N}uinf(\Omega\times\mb{R}^n).
\]
If\/ $\Omegaega$ is an open subset of $\mb{R}^n$ then the elements of $\underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$, ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$, and ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m,-\infty}(\Omega\times\mb{R}^n)$ are called \emph{$\mathfrak{B}$-nets of symbols of order $m$}, \emph{$\mathfrak{B}$-generalized symbols of order $m$}, and \emph{$\mathfrak{B}$-generalized symbols of refined order $m$} respectively. The sets $\underline{\mathcal{S}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n)$ and ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n)$ constitute the \emph{$\mathfrak{B}$-nets of smoothing symbols} and the \emph{$\mathfrak{B}$-generalized smoothing symbols} respectively. Finally, if instead of $\Omega$ we have $\Omega\times\Omega$ then we use the notion of amplitude rather than symbol.
\end{definition}
As already mentioned in
\breakite{GGO:03}, if we require the above symbol estimates
to hold only for small values of $\varepsilon$
this would result in larger spaces of nets of symbols as well as somewhat
larger quotient spaces. Even though it is possible then to define
pseudodifferential operators with $\mathfrak{B}$-generalized symbols equally
well
we prefer to consider here the spaces ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ defined above
since a complete pseudodifferential calculus can be developed.\\
\betagin{remark}
\lambdabel{rem_special}
Note that as special cases of $\mathfrak{B}$-generalized
symbols we obtain for $\mathfrak{B}={\rm{P}}i_\mathrm{sc}$ the slow scale generalized
symbols, and for $\mathfrak{B}=\{(\varepsilon^{-N})_\varepsilon : N\in\mb{N}\}$ the regular
generalized symbols ${\wt{\underline{\mathcal{S}}}}_{\mathrm{rg}}^m(\Omega\times\mb{R}^n)$ introduced in earlier work (cf.\
\breakite{GGO:03}), to which
we refer for further details and notations concerning regular symbols and
amplitudes.
\end{remark}
In the sequel we say that $(a_\varepsilon)_\varepsilon\in\underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ is of growth
type $(\omegaega_\varepsilon)_\varepsilon\in\mathfrak{B}$ on $K \mathscr{S}ubset\Omegaega$ if
$(\omegaega_\varepsilon)_\varepsilon$ estimates each seminorm $|a_\varepsilon|^{(m)}_{K,\alphapha,\betata}$.
We list the basic steps in establishing a calculus with asymptotic
expansions.\betagin{definition}
\lambdabel{def_asym_expan_represen}
Let $\{m_j\}_j$ be a decreasing sequence of real numbers tending to $-\infty$ and let $\{(a_{j,\varepsilon})_\varepsilon\}_j$ be a sequence of $\mathfrak{B}$-nets of symbols $(a_{j,\varepsilon})_\varepsilon\in\underline{\mathcal{S}}_{\mathfrak{B}}^{m_j}(\Omega\times\mb{R}^n)$ such that
\betagin{equation}\lambdabel{growth_type}
\forall K \mathscr{S}ubset \Omega\ \exists (\omegaega_\varepsilon)_\varepsilon \in \mathfrak{B}\ \forall j\in\mb{N}:\ (a_{j,\varepsilon})_\varepsilon\, \text{is of growth type $(\omegaega_\varepsilon)_\varepsilon$ on $K$}.
\end{equation}
We say that $\sum_j(a_\varepsilon)_\varepsilon$ is the asymptotic expansion of $(a_\varepsilon)_\varepsilon\in\ensuremath{{
\breakal E}}[\Omega\times\mb{R}^n]$, $(a_\varepsilon)_\varepsilon\sim\sum_j(a_{j,\varepsilon})_\varepsilon$ for short, iff for all $K\mathscr{S}ubset\Omegaega$ there exists $(\omegaega_\varepsilon)_\varepsilon\in\mathfrak{B}$ such that for all $r\mathrm{g}e 1$ the difference $(a_\varepsilon-\sum_{j=0}^{r-1}a_{j,\varepsilon})_\varepsilon$ belongs to $\underline{\mathcal{S}}_{\mathfrak{B}}^{m_r}(\Omega\times\mb{R}^n)$ and is of growth type $(\omegaega_\varepsilon)_\varepsilon$ on $K$.
\end{definition}
\betagin{theorem}
\lambdabel{theorem_asym_expan_represen}
For any sequence of $\mathfrak{B}$-nets of symbols $(a_{j,\varepsilon})_\varepsilon\in\underline{\mathcal{S}}_{\mathfrak{B}}^{m_j}(\Omega\times\mb{R}^n)$ as in Definition \ref{def_asym_expan_represen} there exists $(a_\varepsilon)_\varepsilon\in\underline{\mathcal{S}}_{\mathfrak{B}}^{m_0}(\Omega\times\mb{R}^n)$ such that $(a_\varepsilon)_\varepsilon\sim\sum_j(a_{j,\varepsilon})_\varepsilon$. Moreover if $(a'_\varepsilon)_\varepsilon\sim\sum_j(a_{j,\varepsilon})_\varepsilon$ then $(a_\varepsilon-a'_\varepsilon)\in\underline{\mathcal{S}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n)$.
\end{theorem}
This result is easily obtained from the proof of Theorem 5.3 in
\breakite{GGO:03}, noting we do not have powers of $\varepsilon$ depending on $x$-derivatives and replacing $\varepsilon^{-N}$ with $(\omegaega_\varepsilon)_\varepsilon\in\mathfrak{B}$. The following proposition concerning negligible nets of symbols is a consequence of Theorem 5.4 in
\breakite{GGO:03}.
\betagin{proposition}
\lambdabel{proposition_asym_expan_negligible}
Let $\{m_j\}_j$ be a decreasing sequence of real numbers tending to $-\infty$ and let $(a_{j,\varepsilon})_\varepsilon\in\mb{N}u^{m_j}(\Omega\times\mb{R}^n)$ for all $j$. Then there exists $(a_\varepsilon)_\varepsilon\in\mb{N}u^{m_0}(\Omega\times\mb{R}^n)$ such that for all $r\mathrm{g}e 1$ we have $(a_\varepsilon-\sum_{j=0}^{r-1}a_{j,\varepsilon})_\varepsilon\in\mb{N}u^{m_r}(\Omega\times\mb{R}^n)$.
\end{proposition}
\betagin{definition}
\lambdabel{def_asym_expan_symbol}
Let $\{m_j\}_j$ be a decreasing sequence of real numbers tending to $-\infty$. Let $\{a_j\}_j$ be a sequence of $\mathfrak{B}$-generalized symbols $a_j\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_j}(\Omega\times\mb{R}^n)$ such that there exists a choice of representatives $(a_{j,\varepsilon})_\varepsilon$ of $a_j$ satisfying \eqref{growth_type}. We say that $\sum_j a_j$ is the asymptotic expansion of $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_0}(\Omega\times\mb{R}^n)$, $a\sim\sum_j a_j$ for short, iff there exists a representative $(a_\varepsilon)_\varepsilon$ of $a$ and for all $j$ representatives $(a_{j,\varepsilon})_\varepsilon$ of $a_j$, such that $(a_\varepsilon)_\varepsilon\sim\sum_j(a_{j,\varepsilon})_\varepsilon$.
\end{definition}
Proposition \ref{proposition_asym_expan_negligible} allows us to claim that $a\sim\sum_j a_j$ iff for any choice of representatives $(a_{j,\varepsilon})_\varepsilon$ of $a_j$ there exists a representative $(a_\varepsilon)_\varepsilon$ of $a$ such that $(a_\varepsilon)_\varepsilon\sim\sum(a_{j,\varepsilon})_\varepsilon$. This observation combined with Theorem \ref{theorem_asym_expan_represen} is crucial in the proof of Theorem \ref{theorem_asym_expan_symbol}.
\betagin{theorem}
\lambdabel{theorem_asym_expan_symbol}
For any sequence of $\mathfrak{B}$-generalized symbols $a_j\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_j}(\Omega\times\mb{R}^n)$ as in Definition \ref{def_asym_expan_symbol} there exists $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_0}(\Omega\times\mb{R}^n)$ such that $a\sim\sum_j a_j$. Moreover, if $b\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_0}(\Omega\times\mb{R}^n)$ has asymptotic expansion $\sum_j a_j$ then there exists a representative $(a_\varepsilon)_\varepsilon$ of $a$ and a representative $(b_\varepsilon)_\varepsilon$ of $b$ such that $(a_\varepsilon-b_\varepsilon)_\varepsilon\in\underline{\mathcal{S}}_{\mathfrak{B}}^{-\infty}(\Omega\times\mb{R}^n)$.
\end{theorem}
\betagin{remark}
\lambdabel{remark_negligible_infty}
Definition \ref{def_asym_expan_symbol} can be stated for $\mathfrak{B}$-generalized symbols of refined order. More precisely, if $\{m_j\}_j$ is a sequence as above, $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_0,-\infty}(\Omega\times\mb{R}^n)$, $a_j\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m_j,-\infty}(\Omega\times\mb{R}^n)$ for all $j$ and $(a_\varepsilon)_\varepsilon$ and $(a_{j,\varepsilon})_\varepsilon$ denote representatives of $a$ and $a_j$ respectively, the following assertions are equivalent:
\betagin{itemize}
\item[(1)] $a \sim \sum_j a_j$,
\item[(2)] $\exists (a_\varepsilon)_\varepsilon$\, $\exists \{(a_{j,\varepsilon})_\varepsilon\}_j\, :$ $(a_\varepsilon)_\varepsilon \sim \sum_j(a_{j,\varepsilon})_\varepsilon$,
\item[(3)] $\forall \{(a_{j,\varepsilon})_\varepsilon\}_j$\, $\exists (a_\varepsilon)_\varepsilon\, :$ $(a_\varepsilon)_\varepsilon \sim \sum_j(a_{j,\varepsilon})_\varepsilon$,
\item[(4)] $\forall \{(a_{j,\varepsilon})_\varepsilon\}_j$\, $\forall (a_\varepsilon)_\varepsilon\, :$ $(a_\varepsilon)_\varepsilon \sim \sum_j(a_{j,\varepsilon})_\varepsilon$.
\end{itemize}
\end{remark}
We briefly recall the main definitions and results concerning pseudodifferential operators with $\mathfrak{B}$-generalized symbols. As already observed after Theorem \ref{theorem_asym_expan_represen}, the proofs are obtained from the corresponding ones in
\breakite{GGO:03}, with the slight difference of having a net $(\omegaega_\varepsilon)_\varepsilon$ in $\mathfrak{B}$ instead of a power $\varepsilon^{-N}$.
\betagin{definition}
\lambdabel{def_pseudo}
Let $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m}(\Omega\times\mb{R}^n)$. The pseudodifferential operator with $\mathfrak{B}$-generalized symbol $a$, is the map $a(x,D):\ensuremath{{
\breakal G}}c(\Omega)\to\ensuremath{{
\breakal G}}(\Omega)$ given by the formula
\[
a(x,D)u := \int_{\mb{R}^n}e^{ix\xi}a(x,\xi)\widehat{u}(\xi)\, \ensuremath{\partial}slash\xi := \biggl[ \biggl( \int_{\mb{R}^n} e^{ix\xi} a_\varepsilon(x,\xi) \widehat{u}_\varepsilon(\xi) \ensuremath{\partial}slash\xi \biggr)_\varepsilon \biggr] .
\]
\end{definition}
Proposition 4.7 in
\breakite{GGO:03} guarantees the well-definedness of $a(x,D)$ as well as the additional mapping property $a(x,D):{\ensuremath{{
\breakal G}}cinf}(\Omega)\to\ensuremath{{
\breakal G}}inf(\Omega)$.
In the following, we make
occasional use of some basic properties of the
space $L(\ensuremath{{
\breakal G}}c(\Omega),\wt\mb{C})$ of $\wt\mb{C}$-linear maps from $\ensuremath{{
\breakal G}}c(\Omega)$ to $\wt\mb{C}$.
In particular, we recall that $\ensuremath{{
\breakal G}}(\Omega)$ is linearly embedded into
$L(\ensuremath{{
\breakal G}}c(\Omega),\wt\mb{C})$ via generalized integration and $L(\ensuremath{{
\breakal G}}c(\Omega),\wt\mb{C})$ is a sheaf with respect to $\Omega$. This and further results
are discussed in detail in
\breakite[Section 2]{GGO:03}.
\betagin{definition}
\lambdabel{def_kernel}
Let $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$. The kernel of $a(x,D)$ is the
$\wt{\mb{C}}$-linear map $k
\breakolonon \ensuremath{{
\breakal G}}c(\Omega\times\Omega) \to \wt{\mb{C}}$
defined by
\betagin{equation}
\lambdabel{kernel}
k(u):=\int_\Omega a(x,D)(u(x,
\breakdot))\, dx .
\end{equation}
\end{definition}
To see that formula \eqref{kernel} makes sense for $k$ as an element of
$L(\ensuremath{{
\breakal G}}c(\Omega\times\Omega),\wt\mb{C})$, we may reason as in
\breakite[Proposition 3.10 and Remark 3.11]{GGO:03} that we have
$a(x,D)(u(x,
\breakdot)) \in \ensuremath{{
\breakal G}}c(\Omega)$. Moreover, for all $u,v\in\ensuremath{{
\breakal G}}c(\Omega)$
\[
k(v\otimes u) = \int_\Omega a(x,D)u\, v(x)\, dx = \int_\Omega u(x){\ }^ta(x,D)v\, dx ,
\]
where $v\otimes u := [(v_\varepsilon(x)u_\varepsilon(y))_\varepsilon] \in \ensuremath{{
\breakal G}}c(\Omega\times\Omega)$; as a
consequence, since $\ensuremath{{
\breakal G}}(\Omega)$ is embedded into $L(\ensuremath{{
\breakal G}}c(\Omega),\wt\mb{C})$, pseudodifferential operators having the same kernel are identical.\\
We say that a pseudodifferential operator with $\mathfrak{B}$-generalized symbol is \emph{properly supported} if the support of its kernel is a proper set of $\Omega\times\Omega$. As shown in Proposition 4.17 of
\breakite{GGO:03}, we have that any properly supported pseudodifferential operator $a(x,D)$ maps $\ensuremath{{
\breakal G}}c(\Omega)$ into $\ensuremath{{
\breakal G}}c(\Omega)$, $\ensuremath{{
\breakal G}}cinf(\Omega)$ into $\ensuremath{{
\breakal G}}cinf(\Omega)$ and can be extended uniquely to a linear map from $\ensuremath{{
\breakal G}}(\Omega)$ into $\ensuremath{{
\breakal G}}(\Omega)$ such that for all $u\in\ensuremath{{
\breakal G}}(\Omega)$ and $v\in\ensuremath{{
\breakal G}}c(\Omega)$
\[
\int_\Omega a(x,D)u\, v(x)\, dx = \int_\Omega u(x){\ }^ta(x,D)v\, dx .
\]
This extension maps $\ensuremath{{
\breakal G}}inf(\Omega)$ into $\ensuremath{{
\breakal G}}inf(\Omega)$. By the same reasoning as in Proposition 4.11 in
\breakite{GGO:03}, we prove that each pseudodifferential operator $a(x,D)$ with $\mathfrak{B}$-generalized symbol has the pseudolocality property, i.e., $$\mathrm{sing supp}_g(a(x,D)u) \subseteq \mathrm{sing supp}_g(u)$$ for all $u\in\ensuremath{{
\breakal G}}c(\Omega)$, and that this result is valid for all $u$ in $\ensuremath{{
\breakal G}}(\Omega)$ if $a(x,D)$ is properly supported.\\
Pseudodifferential operators can be defined also by $\mathfrak{B}$-generalized amplitudes. This means for $b\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\Omega\times\mb{R}^n)$ to define the action of the corresponding operator on $u\in\ensuremath{{
\breakal G}}c(\Omega)$, via the oscillatory integral
\[
Bu(x) := \int_{\Omega\times\mb{R}^n} e^{i(x-y)\xi}b(x,y,\xi)u(y)\, dy\, \ensuremath{\partial}slash\xi ,
\]
which gives a Colombeau function in $\ensuremath{{
\breakal G}}(\Omega)$ (cf. Section 3 in
\breakite{GGO:03}). It is clear that the same constructions concerning kernel and properly supported pseudodifferential operators are still valid. For the sake of completeness we recall that any \emph{integral operator $R$ with regular kernel}, i.e. any operator of the form
\[
\qquad Ru(x) = \int_\Omega k(x,y)u(y)\, dy,\qquad\qquad\quad u\in\ensuremath{{
\breakal G}}c(\Omega),
\]
where $k\in\ensuremath{{
\breakal G}}inf(\Omega\times\Omega)$ can be written as a pseudodifferential operator with regular amplitude in ${\wt{{\mathcal{S}}}}_{\mathrm{rg}}^{-\infty}(\Omega\times\Omega\times\mb{R}^n)$ vice versa if $B$ is a pseudodifferential operator with amplitude in ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{-\infty}(\Omega\times\Omega\times\mb{R}^n)$, then its kernel is a regular generalized function. Finally an operator with regular kernel is regularizing, i.e. it maps $\ensuremath{{
\breakal G}}c(\Omega)$ into $\ensuremath{{
\breakal G}}inf(\Omega)$.\\
Consider $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$, $k$ the kernel of $a(x,D)$ and
let $
\breakhi\in\ensuremath{\mathcal{C}^\infty}(\Omega\times\Omega)$ be a proper function identically $1$ in a
neighborhood of $\mathrm{supp}\, k$. We may write $a(x,D)=a_0(x,D)+a_1(x,D)$, where
\betagin{equation}
\lambdabel{equation1}
a_0(x,D)u:=\int_{\Omega\times\mb{R}^n} e^{i(x-y)\xi}a(x,\xi)
\breakhi(x,y)u(y)\, dy\, \ensuremath{\partial}slash\xi
\end{equation}
is a properly supported pseudodifferential operator with generalized amplitude $a(x,\xi)
\breakhi(x,y)\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\Omega\times\mb{R}^n)$ and
\betagin{equation}
\lambdabel{equation2}
a_1(x,D)u:=\int_{\Omega\times\mb{R}^n} e^{i(x-y)\xi}a(x,\xi)(1-
\breakhi(x,y))u(y)\, dy\, \ensuremath{\partial}slash\xi
\end{equation}
is an operator with regular kernel in $\ensuremath{{
\breakal G}}inf(\Omega\times\Omega)$. The following theorem shows that every properly supported pseudodifferential operator defined via an amplitude can be written in the form of Definition \ref{def_pseudo}. This is the main tool in the proof of Theorem \ref{theorem_calculus}.
\betagin{theorem}
\lambdabel{theorem_pre_calculus}
For any properly supported pseudodifferential operator $A$ with amplitude $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\Omega\times\mb{R}^n)$ there exists $\sigmama\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ such that $A\equiv \sigmama(x,D)$ on $\ensuremath{{
\breakal G}}c(\Omega)$ and $\sigmama \sim \sum_\mathrm{g}amma \frac{1}{\mathrm{g}amma !}\partial^\mathrm{g}amma_\xi D^\mathrm{g}amma_y a(x,y,\xi)_{\vert_{x=y}}$.
\end{theorem}
\betagin{theorem}
\lambdabel{theorem_calculus}
Let $a\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ and $b\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m'}(\Omega\times\mb{R}^n)$ be $\mathfrak{B}$-generalized symbols. If the corresponding pseudodifferential operators are properly supported then
\betagin{itemize}
\item[(i)] there exists $a'\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ such that ${\ }^ta(x,D)\equiv a'(x,D)$ on $\ensuremath{{
\breakal G}}c(\Omega)$ and $a'\sim\sum_\mathrm{g}amma \frac{(-1)^{|\mathrm{g}amma|}}{\mathrm{g}amma !}\partial^\mathrm{g}amma_\xi D^\mathrm{g}amma_x a(x,-\xi)$;
\item[(ii)] there exists $a^\ast\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ such that $a(x,D)^\ast\equiv a^\ast(x,D)$ on $\ensuremath{{
\breakal G}}c(\Omega)$ and $a^\ast\sim\sum_\mathrm{g}amma\frac{1}{\mathrm{g}amma !}\partial^\mathrm{g}amma_\xi D^\mathrm{g}amma_x \overline{a}$;
\item[(iii)] there exists $a\sharp b\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m+m'}(\Omega\times\mb{R}^n)$ such that $a(x,D)
\breakirc b(x,D)\equiv a\sharp b(x,D)$ on $\ensuremath{{
\breakal G}}c(\Omega)$ and $a\sharp b\sim\sum_\mathrm{g}amma \frac{1}{\mathrm{g}amma!}\partial^\mathrm{g}amma_\xi a\, D^\mathrm{g}amma_x b$.
\end{itemize}
\end{theorem}
\betagin{proof}
We briefly sketch the proof of assertion $(ii)$. For the details concerning the transposed operator and the product we refer to
\breakite{GGO:03}. By definition of the formal adjoint, $\lambdangle a(x,D)v,\overline{u}\rangle = \lambdangle v,\overline{a(x,D)^\ast u}\rangle$ for all $u,v\in\ensuremath{{
\breakal G}}c(\Omega)$. This means \[
\lambdangle v,\overline{a(x,D)^\ast u} \rangle = \int_\Omega \overline{\int_{\Omegaega\times\mb{R}^n}e^{i(x-y)\xi}\overline{a}(y,\xi)u(y)\, dy\, \ensuremath{\partial}slash\xi}\, v(x)\, dx,
\]
which, from the embedding of $\ensuremath{{
\breakal G}}(\Omega)$ into $L(\ensuremath{{
\breakal G}}c(\Omega),\wt{\mb{C}})$, leads us to $\ensuremath{\partial}isplaystyle a(x,D)^\ast u = \int_{\Omega\times\mb{R}^n} e^{i(x-y)\xi}\overline{a}(y,\xi)u(y)\, dy\, \ensuremath{\partial}slash\xi.$ Now, $a(x,D)^\ast$ is a properly supported pseudodifferential operator with amplitude $\overline{a}(y,\xi)\in{\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^m(\Omega\times\Omega\times\mb{R}^n)$ and an application of Theorem \ref{theorem_pre_calculus} completes the proof.
\end{proof}
Along the lines of Proposition 5.17 in
\breakite{GGO:03} we easily prove that the composition of a properly supported pseudodifferential operator with $\mathfrak{B}$-generalized symbol and an operator with regular kernel is an operator with regular kernel. Therefore, combining \eqref{equation1} and \eqref{equation2} with Theorem \ref{theorem_calculus}, we have that for arbitrary pseudodifferential operators with $\mathfrak{B}$-generalized symbol the equalities $(i)$ and $(ii)$ on $\ensuremath{{
\breakal G}}c(\Omega)$ are valid modulo some operator with regular kernel. Furthermore the composition $a(x,D)
\breakirc b(x,D)$, where at least one of the operators is properly supported, is a pseudodifferential operator $a\sharp b(x,D)$ modulo some operator with regular kernel.
\betagin{remark}\leavevmode
\lambdabel{remark_rho_delta}
\betagin{trivlist}
\item[(i)] It is clear from the structure of ${\wt{\underline{\mathcal{S}}}}_{\mathfrak{B}}^{m,-\infty}(\Omega\times\mb{R}^n)$ that all the definitions and results of this appendix can be stated for symbols of refined order.
\item[(ii)] Let now ${\mathcal{S}}^m_{\rho,\ensuremath{\partial}eltalta}[\Omega\times\mb{R}^n]$ be the set of all nets $(a_\varepsilon)_\varepsilon\in{S}^m_{\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n)^{(0,1]}$ with
\[
|a_\varepsilon|^{(m)}_{K,\alphapha,\betata,\rho,\ensuremath{\partial}eltalta} := \sup_{x\in K,\xi\in\mb{R}^n}|\partial^\alphapha_\xi\partial^\betata_x a_\varepsilon(x,\xi)|\lambdara{\xi}^{-m+\rho|\alphapha|-\ensuremath{\partial}eltalta|\betata|}
\]
seminorm in ${S}^m_{\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n)$. We define $\underline{\mathcal{S}}^m_{\mathfrak{B},\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n)$ and $\underline{\mathcal{N}}^m_{\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n)$ as the subspaces of ${\mathcal{S}}^m_{\rho,\ensuremath{\partial}eltalta}[\Omega\times\mb{R}^n]$ obtained by requiring the same estimate of $|a_\varepsilon|^{(m)}_{K,\alphapha,\betata,\rho,\ensuremath{\partial}eltalta}$ as in $\underline{\mathcal{S}}_{\mathfrak{B}}^m(\Omega\times\mb{R}^n)$ and $\mb{N}u^m(\Omega\times\mb{R}^n)$ respectively. In conclusion, under the assumption $0 \le \ensuremath{\partial}eltalta < \rho \le 1$ it is possible to develop a pseudodifferential calculus for generalized symbols in ${{\wt{\underline{\mathcal{S}}}}^m_{\mathfrak{B},\rho,\ensuremath{\partial}eltalta}}(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}^m_{\mathfrak{B},\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n) / \underline{\mathcal{N}}^m_{\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n)$ and ${{\wt{\underline{\mathcal{S}}}}^{m,-\infty}_{\mathfrak{B},\rho,\ensuremath{\partial}eltalta}}(\Omega\times\mb{R}^n) := \underline{\mathcal{S}}^m_{\mathfrak{B},\rho,\ensuremath{\partial}eltalta}(\Omega\times\mb{R}^n) / \underline{\mathcal{N}}^{-\infty}(\Omega\times\mb{R}^n)$, as in the classical theory.
\end{trivlist}
\end{remark}
\paragraph{Acknowledgement:} The authors are grateful to {\sc DIANA}'s
members (cf.\\ {\tt http://www.mat.univie.ac.at/\~{ }diana}) for several
inspiring discussions on the subject. Our special thanks go to Roland
Steinbauer for having posed the striking question leading us to the additional
characterization Theorem \ref{theorem_Wcl}.
\end{document} |
\begin{document}
\title{Weighted Parsing for \ Grammar-Based Language Models\ over Multioperator Monoids}
\begin{abstract}
We develop a general framework for weighted parsing which is built on top of grammar-based language models and employs multioperator monoids as weight algebras.
It generalizes previous work in that area (semiring parsing, weighted deductive parsing) and also covers applications outside the classical scope of parsing, e.g., algebraic dynamic programming.
We show an algorithm for weighted parsing and, for a large class of weighted grammar-based language models, we prove formally that it terminates and is correct.
\end{abstract}
\tableofcontents
\section{Introduction}
\label{sec:intro}
In natural language processing (NLP), parsing is the syntactic analysis of sentences.
Given a sentence $a$ from some natural language $\alg L$, e.g.,
\[
a = \terminal{fruit} \ \terminal{flies} \ \terminal{like} \ \terminal{bananas} \enspace,
\]
the goal is to produce some syntactic description of $a$.
This syntactic description can reflect three different kinds of relationships between words occurring in $a$: sequence, dependency, and constituency~\cite{hutsom92}.
Parsing is usually performed using some language model.
Here we will focus on that branch of constituency parsing in which the language models are provided by some kind of formal grammar, like context-free grammars (CFG)~\cite{Chomsky1963}, linear context-free rewriting systems (LCFRS)~\cite{vijweijos87}, multiple context-free grammars (MCFG)~\cite{Sekietal91}, or tree-adjoining grammars (TAG)~\cite{jossch97}.
Formally, each sentence $a$ from the natural language $\alg L$ is mapped to an element of the set $\mathscr H$ of constituent trees of some given grammar $G$:
\[
\fparse: \alg L \to \mathscr H \enspace. \tag{constituent parsing}
\]
In many NLP applications, it is also desirable to obtain information about a given sentence which is different from its constituent tree.
We refer to the quantity which is to be computed as \emph{parsing objective}, and we call the mapping from $\alg L$ to the set of parsing objectives a \emph{parsing problem}.
In the following, we give some examples of parsing problems (cf.~\cite{Goodman1999}).
First, it can be the case that some sentence $a$ is not grammatical according to $G$ and therefore no constituent tree exists for it.
We can answer the question whether a sentence is grammatical or not by mapping each sentence to an element of the set $\mathbb B = \{\mathsf{t\mkern-1mu t}, \mathsf{f\mkern-1mu f}\}$ of Boolean truth values:
\[
\fparse: \alg L \to \mathbb B \enspace. \tag{recognition}
\]
Second, natural languages are ambiguous and hence a sentence can have several constituent trees, each representing a different meaning (cf.\ Figure~\ref{fig:asts}).
We can tell how many constituent trees there are for some sentence by mapping it to a natural number:
\[
\fparse: \alg L \to \mathbb N \enspace. \tag*{\makebox[2cm][r]{(number of derivations)}}
\]
\begin{figure}
\caption{Two constituent trees for the sentence $a = \terminal{fruit}
\label{fig:asts}
\end{figure}
Our original idea to parsing is addressed by assigning to each sentence $a$ a \emph{set} of constituent trees, i.e., an element of $\mathcal P(\mathscr H)$, the powerset of $\mathscr H$.
This set is empty if $a$ is not grammatical and otherwise contains all constituent trees for $a$:
\[
\fparse: \alg L \to \mathcal P(\mathscr H) \enspace. \tag{set of derivations}
\]
We note that, since $\mathscr H$ is usually infinite, a sentence can be mapped to an infinite set.
Third, it is usually unfeasible to work with all constituent trees of a sentence.
Instead, we want to restrict ourselves to the \enquote{most suitable} ones.
This is usually done by employing a probabilistic language model, e.g., a probabilistic context-free grammar (PCFG).
With a PCFG, we can assign to each constituent tree a \emph{probability}, i.e., a value in ${\mathbb R_0^1}$ (the interval of real numbers between $0$ and $1$).
Then we can map each sentence $a$ to the highest probability among every constituent tree for $a$:
\[
\fparse: \alg L \to \mathbb R_0^1 \enspace. \tag{best probability}
\]
The \emph{best parse} of a sentence $a$ is the combination of the constituent tree for $a$ having the highest probability and this probability value.
Since several constituent trees can have the same probability, it is necessary to return a set of constituent trees:
\[
\fparse: \alg L \to \mathbb R_0^1 \times \mathcal P(\mathscr H) \enspace. \tag{best derivation}
\]
The elements of $\fparse(a)$ are commonly called \emph{Viterbi parses of $a$}.
In an obvious way, one can also compute a set of best constituent trees (best $n$ derivations).
Instead of best probabilities, one can be interested in mapping a sentence $a$ to the sum of the probabilities of each constituent tree of $a$.
The resulting value is an element of $\mathbb R_+$, the set of non-negative real numbers.
\[
\fparse: \alg L \to \mathbb R_+ \enspace. \tag{string probability}
\]
The central idea of \citet{Goodman1999} is to abstract from the particular parsing problems from above by considering the parsing problem
\[
\fparse: \alg L \to \walg K
\]
for any complete semiring $\walg K$, which he called \emph{semiring parsing problem}.
By choosing appropriate semirings, Goodman showed that the particular parsing problems from above are instances of the semiring parsing problem~\cite[Figure 5]{Goodman1999}.
More precisely, a complete semiring is an algebra $(\walg{K},\oplus,\otimes,\walg{0},\walg{1},\infsumop)$, where $\oplus$ (\emph{addition}) and $\otimes$ (\emph{multiplication}) are binary operations on $\walg K$ and $\infsumop$ is an extension of $\oplus$ to infinitely many arguments (where $\oplus$, $\otimes$, and $\infsumop$ satisfy certain algebraic laws).
For the semiring parsing problem, we assume that each rule of the grammar $G$ (modeling $\alg L$) is assigned an element of $\walg K$, called its \emph{weight}.
Then, for each sentence $a$, the value $\fparse(a)$ is the addition of the weights of all abstract syntax trees of $a$ (using $\infsumop$ if $a$ has infinitely many abstract syntax trees); the weight of a single abstract syntax tree is the multiplication of the weights of all occurrences of rules in that tree.
In this paper, we introduce \emph{weighted RTG-based language models} (wRTG-LMs) as a new framework for weighted parsing (where RTG stands for regular tree grammar) and define the \emph{M-monoid parsing problem} (cf.~Section \ref{sec:weighted-RTG-based-grammars}).
In the following, we briefly explain these two concepts.
A wRTG-LM is as a tuple
\[
\overline G = \Big((G, (\alg L, \phi)), \ (\walg K, \oplus, \mathbb 0, \Omega, \infsumop), \ \wt\Big)
\]
where
\begin{itemize}
\item $G$ is an RTG~\cite{Brainerd1969} and $(\alg L,\phi)$ is a \emph{language algebra},
\item $(\walg K, \oplus, \mathbb 0, \Omega, \infsumop)$ is a complete M-monoid~\cite{Kuich1999}, called \emph{weight algebra}, and
\item $\wt$ maps each rule of $G$ to an operation from $\Omega$.
\end{itemize}
Let us explain these components.
We call the tuple $(G, (\alg L,\phi))$ \emph{RTG-based language model} (RTG-LM) and its meaning is based on the initial algebra semantics approach of \citet{Goguen1977} as follows.
The RTG $G$ generates a set of trees.
Each generated tree is evaluated in the language algebra $(\alg L,\phi)$ to a \emph{syntactic object} $a$, i.e., an element of the modeled language $\alg L$; in this sense, the tree describes the grammatical structure of $a$.
For instance, if $\alg L$ is a natural language, then its sentences (if viewed as sequences of words) are the yields of such trees.
As another example, $\alg L$ could be a set of trees or a set graphs, and then each tree generated by $G$ represents the structures of such a syntactic object.
Moreover, each grammar of the above mentioned classes (e.g., CFG, LCFRS, MCFG, and TAG) can be formalized as an RTG-LM.
Complete M-monoids can be understood as a generalization of complete semirings in the sense that the multiplication $\otimes$ is replaced by a set $\Omega$ of finitary operations.
Then each rule $r$ of $G$ is assigned an operation $\wt(r) \in \Omega$, and the weight of an abstract syntax tree of $G$ is obtained by evaluating the corresponding term over operations in the M-monoid (similar to the evaluation of arithmetic expressions).
For example, each complete semiring can be viewed as a complete M-monoid by embedding the multiplication into $\Omega$.
Moreover, complete M-monoids can be used for parsing objectives beyond the above mentioned ones.
For instance, the intersection of a fixed CFG and a sentence $a$ from a corpus is used in EM training~\cite{demlairub77} of PCFGs~\cites{Bak79}{LarYou90}{NedSat08}, and this intersection may be viewed as a parsing objective.
Thus, the set $\walg K$ of parsing objectives is a set of CFGs and the set $\Omega$ contains operations which combine a number of CFGs into a single CFG (according to the construction of \citet{BarPerSha61}).
As another example, the objectives of an algebraic dynamic programming (ADP)~\cite{GieMeySte04} problem can be viewed as parsing objectives. Then the operations of the corresponding complete M-monoid combine partial solutions to solutions of larger subproblems.
Examples of ADP problems are computing the minimum edit distance or optimal matrix chain multiplication. Hence complete M-monoids form a very flexible class of weight algebras.
Now we turn to the second concept: the \emph{M-monoid parsing problem}.
It is defined as follows. \\[4mm]
\textbf{Given:}
\begin{enumerate}
\item a wRTG-LM $\big((G,(\alg L,\phi)),(\walg{K},\oplus,\welem{0},\Omega,\psi,\infsum),\wt\big)$ and
\item an $a \in \alg L$,
\end{enumerate}
\textbf{Compute:}
\(
\displaystyle\fparse(a) = \infsum_{d \in \mathrm{AST}(G, a)} \sem[\walg K]{\wt(d)} \enspace.
\)\\[4mm]
where
\begin{itemize}
\item $\mathrm{AST}(G, a)$ is the set of all abstract syntax trees (AST) generated by $G$ that evaluate in the language algebra $(\alg L,\phi)$ to $a$,
\item $\wt(d)$ is the tree over operations obtained from $d$ by replacing each occurrence of a rule by $\wt(r)$,
\item $\sem[\walg K]{\wt(d)}$ is the evaluation of $\wt(d)$ in the weight algebra $(\walg{K},\oplus,\welem{0},\Omega,\psi,\infsum)$.
\end{itemize}
By our considerations from above, the semiring parsing problem is an instance of the M-monoid parsing problem (cf.\ Section~\ref{sec:mmonoids-associated-with-semirings}). This holds also true for the computation of the intersection of a gammar and a sentence (cf.\ Section~\ref{sec:intersection}) and each ADP problem (cf.\ Section~\ref{sec:adp}).
We also propose an algorithm to solve the M-monoid parsing problem under certain conditions, and we call our algorithm the \emph{M-monoid parsing algorithm} (cf.~Section \ref{sec:algorithm}). Here we are faced with the difficulty that the sum
\[
\infsum_{d \in \mathrm{AST}(G, a)} \sem[\walg K]{\wt(d)}
\]
can have infinitely many summands (infinite sum). Clearly, this cannot be done by a naive terminating algorithm.
Hence the applicability of our M-monoid parsing problem is restricted to cases in which the infinite sum coincides with some finite sum (cf. Theorem~\ref{thm:tr-trc}).
In the literature on weighted parsing, a few algorithms which are limited to specific weighted parsing problems have been investigated.
\begin{itemize}
\item The semiring parsing algorithm has been proposed by \citet{Goodman1999} to solve the semiring parsing problem.
It is a pipeline with two phases.
The first phase takes as input a context-free grammar, a deduction system~\cite{shischper95}, and a syntactic object $a$ and computes a context-free grammar $G'$ (using a construction idea of~\citealp{BarPerSha61}).
The second phase takes $G'$ as input and attempts to calculate $\fparse(a)$ (see above).
Since, in general, $\fparse(a)$ is an infinite sum, this only succeeds if $G'$ is acyclic.
Goodman states that in applications, this computation needs to be replaced by instructions specific to the used semiring.
\item The weighted deductive parsing algorithm by \citet{ned03} addresses this problem by restricting itself to weighted parsing problems where the weight algebra is superior (cf.\ Section~\ref{sec:superior-mmonoids}).
Nederhof's algorithm is a two-phase pipeline, too, where the first phase is the same as in Goodman's approach (but allowing more flexible deduction systems).
In the second phase, he employs the algorithm of \citet{Knuth1977}, which is a generalization of Dijkstra's shortest path algorithm~\cite{dij59}.
This also works in cases where $G'$ is cyclic.
\item The single source shortest distance algorithm by \citet{Mohri2002} is applicable to graphs of which the edges are weighted with elements of some semirings that is closed for the graph (cf.\ Section~\ref{sec:closed-definition}).
This is a much weaker restriction than acyclicity or superiority.
While Mohri's algorithm is not a parsing algorithm, it can be used in the second phase as an alternative to Knuth's algorithm if the CFG $G'$ is non-branching, i.e., a linear grammar~\cite[Def.~1]{kha74}.
\end{itemize}
In the same way as the algorithms of Goodman and Nederhof, our M-monoid parsing algorithm is also a two-phase pipeline (cf.\ Figure~\ref{fig:alg}). The inputs are a wRTG-LM $\overline G$ and a syntactic object $a$.
In the first phase, we use the \emph{canonical weighted deduction system} to compute a new wRTG-LM $\overline G{}'$ (cf. Section~\ref{sec:weighted-deduction-systems}).
This is similar to the first phases of Goodman and Nederhof, but our deduction system always reflects the CYK algorithm instead of being an additional input.
In the second phase, we employ our \emph{value computation algorithm} (cf. Section~\ref{sec:value-computation-algorithm}).
It is a generalization of Mohri's algorithm, which is in spirit of Knuth's generalization of Dijkstra's algorithm.
Thus the M-monoid parsing algorithm is applicable to every closed wRTG-LM, which includes the cases in which the algorithms of Goodman and Nederhof are applicable.
\begingroup\setlength\textfloatsep{10.0pt plus 2.0pt minus 10.0pt}
\begin{figure}
\caption{Two-phase pipeline for solving the M-monoid parsing problem ($A_0'$ is the initial nonterminal of~$G'$).}
\label{fig:alg}
\end{figure}
\endgroup
In this paper, we formally prove the following results concerning the M-monoid parsing algorithm.
\begin{itemize}
\item The value computation algorithm terminates for every closed wRTG-LM as input (Theorem~\ref{thm:vca-terminating}).
\item The value computation algorithm is correct for every closed wRTG-LM as input (Corollary~\ref{cor:vca-correct}).
\item The M-monoid parsing algorithm terminates and is correct for every closed wRTG-LM with finitely decomposable language algebra and for every nonlooping wRTG-LM with finitely decomposable language algebra as input (Theorem~\ref{thm:terminating-correct}).
\end{itemize}
These proofs are based on two fundamental results on closed wRTG-LMs (Theorem~\ref{thm:outside-trees-subsumed} and Theorem~\ref{thm:tr-trc}).
Moreover, we prove that several classes of wRTG-LMs are closed (Theorems~\ref{thm:applications} and~\ref{thm:applications2}).
We show that the two advanced parsing problems from above are indeed instances of the M-monoid parsing problem:
\begin{itemize}
\item Computing the intersection of a grammar and a syntactic object is an M-monoid parsing problem (Theorem~\ref{thm:intersection}).
\item Every ADP problem is an M-monoid parsing problem (Theorem~\ref{thm:ADP-M-monoid}).
\end{itemize}
Finally, we prove that the M-monoid parsing algorithm is applicable to six particular classes of wRTG-LMs (Corollary~\ref{cor:applicability}).
The key ideas and main results of this article were presented at FSMNLP~2019~\cite{moevog19}.
This paper is self-contained in the sense that we recall all necessary definitions and we provide full proofs of each result. These characteristics are the reason for the length of the paper. Those readers who are familiar with universal algebra and regular tree grammars may skip the preliminaries on first reading and consult them on demand. With a few exceptions, we have placed the proofs of the statements into appendices; so, for those readers who are not so much interested in them can read more smoothly through the text.
\section{Preliminaries}
\subsection{Basic mathematical notions}
\paragraph{Number sets.}
We denote the set of \emph{natural numbers} including $0$ by~$\mathbb N$ and the set of \emph{real numbers} by~$\mathbb R$.
The usual addition and multiplication on~$\mathbb N$ and~$\mathbb R$ are denoted by~$+$ and~$\cdot$,
respectively.
Furthermore, we define the following sets:
\begin{itemize}
\item $\mathbb N_+ = \mathbb N \setminus \{ 0\}$ (the set of \emph{natural numbers without~$0$}),
\item $\mathbb R_+ = \{ x \in \mathbb R \mid x \ge 0 \}$ (the set of \emph{non-negative real numbers}),
\item $\mathbb R_0^1 = \{ x \mid x \in \mathbb R \text{ and } 0 \le x \le 1 \}$ (the set of \emph{real numbers between $0$ and $1$}),
and
\item $\mathbb R_0^\infty = \{ x \mid x \in \mathbb R \text{ and } x \ge 0 \} \cup \{ \infty \}$ (the set of \emph{non-negative real numbers with infinity}),
where we extend the usual addition and multiplication in the following way to operate with $\infty$:
\begin{align*}
r + \infty &= \infty \tag{for every $r \in \mathbb R_0^\infty$} \\
r \cdot \infty &= \infty \tag{for every $r \in \mathbb R_0^\infty \setminus \{ 0 \}$} \\
\infty \cdot 0 &= 0 \enspace.
\end{align*}
\end{itemize}
For every $j, k \in \mathbb N$, we denote the set $\{ j, \dots, k \} \subseteq \mathbb N$ by $[j, k]$.
Furthermore, we write $[k]$ rather than~$[1, k]$.
\paragraph{Boolean set.}
Let~$\mathsf{t\mkern-1mu t}$ denote true and~$\mathsf{f\mkern-1mu f}$ denote false.
We define $\mathbb B = \{ \mathsf{t\mkern-1mu t}, \mathsf{f\mkern-1mu f} \}$ (the \emph{Boolean set}).
\paragraph{Sets, binary relations, orders, and families.}
In the following let $A$, $B$, and $C$ be sets.
We denote the cardinality of $A$ by $|A|$.
The power set of $A$ is denoted by $\mathcal P(A)$.
If $A$ contains exactly one element, then we identify $A$ with its element.
\index{u@$\mathbin{\dot{\cup}}$}
\index{disjoint union}
We say that~$C$ is the \emph{disjoint union of~$A$ and~$B$}, denoted by $C = A \mathbin{\dot{\cup}} B$, if $C = A \cup B$ and $A \cap B = \emptyset$.
\index{relation}
\index{relation!binary}
A \emph{binary relation on~$A$ and~$B$} is a subset $R$ of $A \times B$.
Let~$a \in A$ and $b\in B$.
We write $a R b$ instead of $(a_1,a_2) \in R$. For each $A' \subseteq A$ we define $R(A')= \{b \in B \mid a \in A', aRb\}$.
\index{relation!reverse}
The \emph{inverse relation of~$R$} is the relation $R^{-1} \; = \{ (b,a) \mid a R b\}$
on $B$ and $A$.
\index{relation!right-unique}
\index{relation!functional}
We call~$R$ \emph{right-unique} (and \emph{functional}), if $|\{ b \mid a R b \}| \le 1$ (resp., $|\{ b \mid a R b \}| = 1$) for every $a \in A$.
\index{partial function}
\index{mapping}
Usually, a right-unique relation (and a functional relation) on $A$ and $B$ is called \emph{partial function} (resp., \emph{mapping}) and it is denoted by $f: A \to\hspace{-2mm}\shortmid \hspace{2mm} B$ (resp., $f: A \rightarrow B$).
Since we identify a set with one element with this element, we write $f(a) = b$ rather than $f(\{a\}) = \{b\}$ for a mapping $f$.
A mapping~$f: A \to B$ is
\index{mapping!injective}
\index{mapping!surjective}
\index{mapping!bijective}
\begin{itemize}
\item \emph{injective}, if $|f^{-1}(b)| \le 1$ for every $b \in B$,
\item \emph{surjective}, if $|f^{-1}(b)| \ge 1$ for every $b \in B$,
and
\item \emph{bijective}, if it is both injective and surjective.
\end{itemize}
Let $k \in \mathbb N$, $A_1,\dots,A_k$ be sets and $g: A_1 \times \dots \times A_k \rightarrow A$ be a mapping.
\index{mapping!extension to sets}
The \emph{extension of~$g$ to sets} is the mapping $\widehat{g}: \mathcal P(A_1) \times \dots \times \mathcal P(A_k) \rightarrow \mathcal P(A)$, which is defined for every $F_1 \subseteq A_1,\dots,F_k \subseteq A_k$ as
\[ \widehat{g}(F_1,\dots,F_k) = \{ g(a_1,\dots,a_k) \mid a_1 \in F_1,\dots,a_k \in F_k \} \enspace. \]
In the sequel, we will denote the extension also by $g$.
\index{relation!endorelation}
An \emph{endorelation on~$A$} is a binary relation on~$A$ and~$A$.
\index{relation!identity relation}
The \emph{identity relation on~$A$}, denoted by $\id(A)$, is the endorelation on~$A$ which is defined as $\id(A) = \{ (a,a) \mid a \in A \}$.
Let $a, b \in A$ and $k \in \mathbb N$.
We write $a R^k b$ if there are $a_1, \dots, a_k \in A$ such that $a R a_1, a_1 R a_2, \dots, a_{k-1} R a_k, a_k R b$.
In particular, $a R^0 a$ for every $a \in A$.
\index{relation!reflexive}
\index{relation!antisymmetric}
\index{relation!transitive}
\index{relation!total}
\index{relation!well-founded}
In the following let $R \subseteq A \times A$ be an endorelation on~$A$.
We call~$R$
\begin{itemize}
\item \emph{reflexive}, if $\id(A) \subseteq R$,
\item \emph{transitive}, if $a_1 R a_2$ and $a_2 R a_3$ implies $a_1 R a_3$ for every $a_1,a_2,a_3 \in A$,
\item \emph{antisymmetric}, if $a_1 R a_2$ and $a_2 R a_1$ implies $a_1 = a_2$ for every $a_1,a_2 \in A$,
\item \emph{total}, if $a_1 R a_2$ or $a_2 R a_1$ for every $a_1,a_2 \in A$,
and
\item \emph{well-founded}, if for each non-empty subset $B \subseteq A$ there is an element $b \in B$ such that for each element $b' \in B$ it is not true that $b' R b$ holds.
\end{itemize}
\index{transitive closure}
\index{reflexive and transitive closure}
The \emph{transitive closure of~$R$},
denoted by~$R^+$,
is the smallest transitive endorelation~$R'$ on~$A$ such that $R \; \subseteq \; R'$.
The \emph{reflexive and transitive closure of~$R$},
denoted by~$R^*$,
is the smallest reflexive and transitive endorelation~$R'$ on~$A$ such that $R \; \subseteq \; R'$.
\index{order!partial order}
\index{order!total order}
We call $(A,R)$
\begin{itemize}
\item a \emph{partial order}, if~$R$ is reflexive, antisymmetric, and transitive.
\item a \emph{total order}, if $(A,R)$ is a partial order and~$R$ is total.
\end{itemize}
\begin{quote}
\em In the following, if we deal with a partial order, then we will use the symbol $\preceq$ rather than~$R$. Moreover, as a convention, we denote~$\preceq^{-1}$ by~$\succeq$.
\end{quote}
\index{order!strict ordering induced by}
In the following let $(A,\preceq)$ be a partial order.
The \emph{strict ordering relation induced by~$\preceq$} is the binary relation $\prec\; = \; \preceq \setminus \id(A)$.
\index{order!well-partial order}
\index{order!well-order}
We say that $(A,\preceq)$ is
\begin{itemize}
\item a \emph{well-partial order}, if $(A,\preceq)$ is a partial order and the strict ordering relation induced by~$\preceq$ is well-founded.
\item a \emph{well-order}, if $(A,\preceq)$ is a total and well-partial order.
\end{itemize}
\begin{example}
The \emph{natural order on pairs of natural numbers}, $\leq \; \subseteq \mathbb N^2$, is defined as follows:
for every $(a_1,b_1), (a_2,b_2) \in \mathbb N^2$, $(a_1,b_1) \leq (a_2,b_2)$ if one of the following holds:
\begin{enumerate}
\item $a_1 < a_2$, or
\item $a_1 = a_2$ and $b_1 \leq b_2$.
\end{enumerate}
We point out that $(\mathbb N^2, \leq)$ is a well-order.
This statement is proved in Appendix~\ref{sec:proofs-preliminaries}.
\end{example}
\begin{lemma}[restate={[name={}]lempochains}]\label{lem:po-chains}
For every partial order $(A, \preceq)$, $n \in \mathbb N$, and $a_1, \dots, a_n \in A$ the following holds:
if $a_1 \preceq \dots \preceq a_n$ and $a_1 = a_n$, then $a_1 = \dots = a_n$.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:po-chains}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{lower bound}
\index{infimum}
\index{minimum}
In the following let $(A,\preceq)$ be a partial order and let $X \subseteq A$.
If there is an $a \in A$ such that for every $b \in X$ we have $a \preceq b$,
then~$a$ is a \emph{lower bound of~$X$}.
If, additionally, for every lower bound~$a'$ of~$X$ it holds that $a' \preceq a$,
then~$a$ is the \emph{infimum of~$X$}, denoted by $\inford X$.
If $\inford X \in X$, then~$\inford X$ is the \emph{minimum of~$X$}, denoted by $\minord X$.
\index{upper bound}
\index{supremum}
\index{maximum}
Dually, if there is an $a \in A$ such that for every $b \in X$ we have $b \preceq a$,
then~$a$ is an \emph{upper bound of~$X$}.
If, additionally, for every upper bound~$a'$ of~$X$ it holds that $a \preceq a'$,
then~$a$ is the \emph{supremum of~$X$}, denoted by $\supord X$.
If $\supord X \in X$, then~$\supord X$ is the \emph{maximum of~$X$}, denoted by $\maxord X$.
\index{$\argminord$}
\index{$\argmaxord$}
Let $B$ be a set, $X \subseteq B$, and $f: B \rightarrow A$ be a mapping.
We define
\begin{align*}
\argminord_{a \in X} f(a) &= \{ a \in X \mid f(a) \preceq f(a') \text{\ for every $a' \in X$} \} \\
\argmaxord_{a \in X} f(a) &= \{ a \in X \mid f(a') \preceq f(a) \text{\ for every $a' \in X$} \} \enspace. \qedhere
\end{align*}
If the partial order $(A,\preceq)$ is clear from the context,
then we will drop $\preceq$ from $\inford$,
$\supord$, $\minord$, $\maxord$, $\argminord$,
and $\argmaxord$ and we will simply write $\inf$, $\sup$,
$\min$, $\max$, $\argmin$, and $\argmax$, respectively.
If $A = \{ \iota_1, \iota_2 \}$ for two arbitrary elements $\iota_1$ and $\iota_2$, then we write $\min (\iota_1, \iota_2)$ and $\max (\iota_1, \iota_2)$ rather than $\min \{ \iota_1, \iota_2 \}$ and $\max \{ \iota_1, \iota_2 \}$, respectively.
\index{index set}
In the following let $I$ be a countable set (\emph{index set}) and~$A$ be a set.
\index{family}
An \emph{$I$-indexed family over~$A$} (or: \emph{family over $A$}) is a mapping $f: I \rightarrow A$.
As usual, we represent each~$I$-indexed family~$f$ over~$A$ by $(f(i) \mid i \in I)$ and abbreviate $f(i)$ by $f_i$.
The set of all $I$-indexed families over~$A$ is denoted by $A^I$. Let~$J$ be a countable index set.
\index{partition}
A \emph{$J$-partition of~$I$} is a $J$-indexed family $(I_j \mid j \in J)$ over~$\mathcal P(I)$, where
\begin{enumerate*}
\item $\bigcup_{j \in J} I_j = I$, and
\item $I_j \cap I_{j'} = \emptyset$ for every $j,j' \in J$ with $j \not= j'$.
\end{enumerate*}
\paragraph{Strings and formal languages.}
\index{string}
In the following let $A$ be a set and $k \in \mathbb N$.
The set of \emph{strings of length~$k$ over~$A$} is the set $A^k = \{ a_1 \dots a_k \mid a_1,\dots,a_k \in A \}$.
\index{empty string}
In particular, $A^0 = \{ \varepsilon \}$,
where~$\varepsilon$ denotes the \emph{empty string}.
\index{string!set of strings}
The set of \emph{strings over~$A$} is the set $A^* = \bigcup_{i \in \mathbb N} A^i$.
Note that in our notation, we make no difference between the set of strings of length~$k$ over~$A$ and the $k$-fold Cartesian product $\underbrace{A \times \dots \times A}_{\text{$k$ times}}$, which is also denoted by~$A^k$. Thus, for $k=0$, we identify $\varepsilon$ and $()$.
\index{string!length}
\index{string!slice}
Let $w \in A^*$ with $w = w_1 \dots w_k$,
for some $k \in \mathbb N$ and $w_i \in A$ for every $i \in [k]$.
The \emph{length of~$w$}, denoted by $|w|$, is~$k$, i.e., $|w| = k$.
For every $i, j \in [k]$ we denote the $(i,j)$-slice of~$w$ by $w_{i \isep j} = w_i \dots w_j$.
\index{string!concatenation}
Let $w' \in A^*$ be another string such that $w' = w_1' \dots w_{k'}'$ for some $k' \in \mathbb N$ and $w_1',\dots,w_{k'}' \in A$.
The \emph{concatenation of~$w$ and~$w'$},
denoted by $w \circ w'$, is the string $w_1 \dots w_k w_1' \dots w_{k'}'$.
We usually leave out the operation symbol and just write $w w'$ rather than $w \circ w'$.
\index{string!substring}
\index{string!prefix}
\index{string!suffix}
Let $w,w' \in A^*$.
If there are $u,v \in A^*$ such that $w = u w' v$,
then~$w'$ is a \emph{substring of~$w$}, $u$ is a \emph{prefix of~$w$}, which we denote by $u \prefof w$, and $v$ is a \emph{suffix of~$w$}.
\index{alphabet}
\index{formal language}
\index{formal language!concatenation}
If the set $A$ is nonempty and finite, then we call it \emph{alphabet}.
If $A$ is an alphabet, then each subset of~$A^*$ is called \emph{formal language over~$A$}.
Let $L,L'\subseteq A^*$.
The \emph{concatenation of~$L$ and~$L'$} is the formal language
\[ L \circ L' = \{ w w' \mid w \in L \text{ and } w' \in L' \} \enspace. \qedhere \]
\subsection{Universal algebra}
\paragraph{Sorts and signatures.}
\index{sort}
\index{S@$S$-sorted set}
Let $S$ be a set (\emph{sorts}).
An \emph{$S$-sorted set} is a tuple $(A,\sort)$,
where $A$ is a set and $\sort: A \rightarrow S$ is a mapping.
Let $s \in S$, then we denote the set $\sort^{-1}(s)$ by~$A_s$.
We call $(A,\sort)$ \emph{empty} (respectively, \emph{nonempty}, \emph{finite},
and \emph{infinite}) if~$A$ is so.
\index{S@$S$-sorted alphabet}
An \emph{$S$-sorted alphabet} is a nonempty and finite $S$-sorted set.
An $S$-sorted set $(B,\sort')$ is a \emph{subset} of $(A,\sort)$,
if $B \subseteq A$ and $\sort'(b) = \sort(b)$ for every $b \in B$.
Let $(A,sort)$ and $(B,\sort')$ be two $S$-sorted sets.
\index{sort-preserving}
A mapping $f\colon A \rightarrow B$ is called \emph{sort-preserving} if $f(A_s) \subseteq B_s$ for each $s \in S$.
Moreover, let $(A_1,\sort_1)$ be a subset of $(A,\sort)$.
\index{sort-preserving!restriction}
The \emph{restriction of $f$ to $A_1$} is the mapping $f|_{A_1}\colon A_1 \rightarrow B$ such that $f|_{A_1}(a)=f(a)$ for each $a \in A_1$.
\index{ranked set}
A \emph{ranked set} is an $\mathbb N$-sorted set.
By convention, we call its mapping $\rk$ rather than $\sort$, i.e., $\rk: A \rightarrow \mathbb{N}$.
Each $(S^*\times S)$-sorted set $(\Sigma,\sort)$ \emph{can be viewed as the ranked set} $(\Sigma,\rk)$ where for every $k \in \mathbb N$, $s,s_1,\dots,s_k \in S$, and $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$ we define $\rk(\sigma) = k$.
On the other hand, every ranked set $(A,\rk)$ can be considered as an $(S^*\times S)$-sorted set for some set $S$ of sorts with $|S|=1$.
\index{ranked alphabet}
A \emph{ranked alphabet} is a nonempty and finite ranked set.
\index{trg}
For each $(S^* \times S)$-sorted set~$\Sigma$ we define a mapping $\trg: \Sigma \to S$ such that $\trg(\sigma) = s$ if $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$.
Moreover, for each $s \in S$, we denote the set $\trg^{-1}(s)$ by~$\Sigma_s$.
We often denote an $S$-sorted set $(A,\sort)$ and a ranked set $(A,\rk)$ only by~$A$; then the mappings will be denoted by $\sort_A$ and $\rk_A$, respectively.
\begin{quote}
\em In the rest of this paper, if $S$ and $\Sigma$ are unspecified, then they stand for an arbitrary set of sorts and an arbitrary $(S^* \times S)$-sorted set, respectively.
Moreover, for the sake of brevity, whenever we write ``$\sigma \in \Sigma_{(s_1\ldots s_k,s)}$'', then we mean that $k \in \mathbb{N}$ and $s,s_1,\dots,s_k \in S$.
\end{quote}
\paragraph{$S$-sorted algebras and $S$-sorted $\Sigma$-homomorphisms.}
Sorted algebras have been introduced by~\cite{Higgins1963}.
We use the notation of~\cite{Goguen1985} and also refer to~\cite{Goguen1977}.
\index{algebra}
\index{S@$S$-sorted $\Sigma$-algebra}
\index{algebra!carrier set}
\index{algebra!interpretation mapping}
An \emph{$S$-sorted $\Sigma$-algebra} (or: algebra) is a pair $(\alg{A},\phi)$,
where~$\alg{A}$ is an $S$-sorted set (\emph{carrier set}) and~$\phi$ is a mapping from $\Sigma$ to operations on~$\alg{A}$ (\emph{interpretation mapping}) such that the following condition holds:
for every $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$ we have $\phi(\sigma)\colon \alg{A}_{s_1} \times \dots \times \alg{A}_{s_k} \rightarrow \alg{A}_s$. If $|S|=1$, then we call $(\alg{A},\phi)$ simply \emph{$\Sigma$-algebra}.
\index{factor@$\factors$}
For every $a \in \alg A$ we let
\[
\factors(a)= \{b \in \alg{A} \mid b (<_{\mathrm{factor}})^* a\}
\]
where for every $a,b \in \alg{A}$, $b <_{\mathrm{factor}} a$ (\emph{$b$ is a factor of $a$}) if there are a $k \in \mathbb N$ and a $\sigma \in \Sigma_k$ such that $b$ occurs in some tuple $(b_1, \dots, b_k)$ with $\phi(\sigma)(b_1, \dots, b_k) = a$.
\index{finitely decomposable}
We call $(\alg A, \phi)$ \emph{finitely decomposable}~\cite[Def.~1.15]{kla84} if $\factors(a)$ is finite for every $a \in \alg A$.
We note that, in particular, for every finitely decomposable $S$-sorted $\Sigma$-algebra $(\alg A,\phi)$, $\sigma \in \Sigma$, and $a \in \alg L$, the set $\phi(\sigma)^{-1}(a)$ is finite.
\index{homomorphism}
\index{S@$S$-sorted $\Sigma$-homomorphism}
Let $(\alg{A},\phi)$ and $(\alg B,\psi)$ be $S$-sorted $\Sigma$-algebras.
Moreover, let $h\colon \alg{A} \rightarrow \alg B$ be a sort-preserving mapping. We call $h$ an \emph{$S$-sorted $\Sigma$-homomorphism (from $(\alg{A},\phi)$ to $(\alg B,\psi)$)},
if for every $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$
and $a_1 \in \alg{A}_{s_1},\dots,a_k \in \alg{A}_{s_k}$ it holds that
\[ h(\phi(\sigma)(a_1,\dots,a_k)) = \psi(\sigma)(h(a_1),\dots,h(a_k)) \enspace. \]
If we write $h\colon (\alg{A},\phi) \rightarrow (\alg B,\psi)$, then we mean that $h$ is an $S$-sorted $\Sigma$-homomorphism from $(\alg{A},\phi)$ to $(\alg B,\psi)$.
\paragraph{$S$-sorted $\Sigma$-term algebra.}
\index{tree}
In the following let $X$ be a countable $S$-sorted set.
The set of \emph{trees over~$\Sigma$ and~$X$},
denoted by~$\T_\Sigma(X)$, is the smallest $S$-sorted set~$T$ such that
\begin{enumerate}
\item $X_s \subseteq T_s$ for every $s \in S$, and
\item for every $\sigma \in \Sigma_{(s_1 \dots s_k, s)}$ and $t_1 \in T_{s_1},\dots,t_k \in T_{s_k}$ we have $\sigma(t_1,\dots,t_k) \in T_s$.
\end{enumerate}
If~$X = \emptyset$, we write~$\T_\Sigma$ instead of~$\T_\Sigma(X)$. If $\sigma \in \Sigma$ with $\rk(\sigma)=0$, then we abbreviate the tree $\sigma()$ by~$\sigma$.
\begin{quote}
\em In the rest of this paper, if we write ``$t$ has the form $\sigma(t_1,\ldots,t_k)$'', then we mean that there are $\sigma \in \Sigma_{(s_1 \dots s_k, s)}$ and $t_1 \in \T_\Sigma(X)_{s_1},\dots,t_k \in \T_\Sigma(X)_{s_k}$ such that $t=\sigma(t_1,\ldots,t_k)$.
\end{quote}
We note that for each $t \in \T_\Sigma(X)$ the choices of $k \in \mathbb{N}$, $s,s_1,\ldots,s_k \in S$, $\sigma \in \Sigma_{(s_1 \dots s_k, s)}$, and $t_1 \in \T_{s_1},\dots,t_k \in \T_{s_k}$ such that $t=\sigma(t_1,\ldots,t_k)$ are unique.
\index{S@$S$-sorted $\Sigma$-term algebra}
The \emph{$S$-sorted $\Sigma$-term algebra over $X$} is the $S$-sorted $\Sigma$-algebra $(\T_\Sigma(X),\phi_\Sigma)$, where for every $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$ and $t_i \in (\T_\Sigma(X))_{s_i}$ with $i \in [k]$ we define $\phi_\Sigma(\sigma)(t_1,\dots,t_k) = \sigma(t_1,\dots,t_k)$.
\begin{theorem}[cf.~{\cite[Prop.~2.6]{Goguen1977}}]
Let $(\alg{A},\phi)$ be an $S$-sorted $\Sigma$-algebra. If $h\colon X \rightarrow \alg{A}$ is a sort-preserving mapping, then there exists a unique $S$-sorted $\Sigma$-homomorphism $\widetilde{h}\colon (\T_\Sigma(X),\phi_\Sigma) \rightarrow (\alg{A},\phi)$ extending $h$, i.e., such that $\widetilde{h}|_X=h$. Thus, in particular, there exists a unique $S$-sorted $\Sigma$-homomorphism $g\colon (\T_\Sigma,\phi_\Sigma) \rightarrow (\alg{A},\phi)$.
\end{theorem}
\index{derived operation}
For a string $u=s_1\ldots s_n$ with $n \in \mathbb{N}$ and $s_i \in S$ we let $X_u = \{x_{1,s_1},\ldots,x_{n,s_n}\}$ be a set of variables.
The set $X_u$ can be viewed as an $S$-sorted set with $(X_u)_s = \{x_{i,s_i} \mid s_i=s\}$.
Let $(\alg A,\phi)$ be an $S$-sorted $\Sigma$-algebra and $t \in \T_\Sigma(X_u)_s$ for some $s \in S$.
The \emph{$t$-derived operation on $\alg{A}$}, denoted by $\sem{t}$, is the operation $\sem{t}: \alg{A}_{s_1} \times \ldots \times \alg{A}_{s_n} \rightarrow \alg{A}_s$ defined by $\sem{t}(a_1,\ldots,a_n) = \widetilde{h}(t)$ where $h: X_u \rightarrow \alg{A}$ with $h(x_{i,s_i}) = a_i$.
Obviously, for each $t \in \T_\Sigma(X_\varepsilon)_s$ (i.e., $t \in (\T_\Sigma)_s$), $\sem{t}: \{()\} \to \alg{A}_s$.
We abbreviate $\sem{t}()$ by $\sem{t}$.
Then $\sem{t} = g(t)$ where $g\colon (\T_\Sigma,\phi_\Sigma) \rightarrow (\alg{A},\phi)$ is the unique $\Sigma$-homomorphism.
\begin{observation}[cf.~{\cite[Prop.~2.5]{Goguen1977}}]\label{obs:tree-derived-operations}
Let $(\alg{A},\phi)$ be a $\Sigma$-algebra.
Then for every $k \in \mathbb N$, $t \in \T_\Sigma(X_k)$, and $t_1,\dots,t_k \in \T_\Sigma$ it holds that
\[ (t_{\T_\Sigma}(t_1,\dots,t_k))_{\alg{A}} = \sem{t}\big((t_1)_{\alg{A}},\dots,(t_k)_{\alg{A}}\big) \enspace. \]
\end{observation}
We remark that an extension of this result to $S$-sorted $\Sigma$-algebras is straightforward.
\index{tree homomorphism}
\index{S@$S$-sorted tree homomorphism}
Now let $\Delta$ be another $(S^* \times S)$-sorted set. Moreover, we let $h: \Sigma \rightarrow \T_\Delta(X)$ such that, for each $\sigma \in \Sigma_{(s_1\ldots s_k,s)}$, we have $h(\sigma) \in \T_\Delta(X_u)_s$ with $u = s_1\ldots s_k$.
Then we define the $\Sigma$-algebra $(\T_\Delta,\phi_h)$ such that for every $\sigma \in \Sigma_{(s_1\ldots s_k,s)}$ we let
$\phi_h(\sigma) = h(\sigma)_{(\T_\Delta,\phi_\Delta)}$, i.e., the $h(\sigma)$-derived operation on the $\Delta$-term algebra $(\T_\Delta,\phi_\Delta)$.
Then we call the unique $\Sigma$-homomorphism from $(\T_\Sigma,\phi_\Sigma)$ to $(\T_\Delta,\phi_h)$ the \emph{$S$-sorted tree homomorphism induced by $h$}.
\index{tree relabeling}
\index{$S$-sorted tree relabeling}
In the particular case that for every $\sigma \in \Sigma_{(s_1\ldots s_k,s)}$ there is a $\delta \in \Delta_{(s_1\ldots s_k,s)}$ such that $h(\sigma)=\delta(x_{1,s_1},\ldots,x_{k,s_k})$, we call the $S$-sorted tree homomorphism induced by $h$ the \emph{$S$-sorted tree relabeling induced by $h$}.
If $|S|=1$, then we call an $S$-sorted tree homomorphism ($S$-sorted tree relabeling) simply \emph{tree homomorphism} (\emph{tree relabeling}, respectively).
\paragraph{Useful functions on trees.}
\index{tree!position}
\index{tree!leaf}
Let $t \in \T_\Sigma(X)$.
The set $\pos(t) \subseteq \mathbb N^*$ is inductively defined as follows:
\[ \pos(t) = \begin{cases}
\{ \varepsilon \} &\text{if } t \in X \\
\{ \varepsilon \} \cup \bigcup_{i=1}^k \{ i \} \circ \pos(t_i) &\text{if $t$ has the form $\sigma(t_1,\dots,t_k)$.}
\end{cases} \]
Let $p \in \pos(t)$.
We call~$p$ a \emph{leaf} if there is no $k \in \mathbb N$ with $pk \in \pos(t)$.
\index{tree!subtree}
\index{tree!replacement}
\index{tree!label}
\index{tree!label sequence}
In order to formalize manipulations (e.g.\ rewriting) of trees, we introduce the following functions.
Let~$t \in \T_\Sigma(X)$ and $p \in \pos(t)$.
Moreover, let $s \in \T_\Sigma(X)$.
We define
\begin{itemize}
\item the \emph{subtree of~$t$ at position~$p$}, denoted by $t|_p$,
\item the \emph{tree obtained by replacing the subtree of~$t$ at position~$p$ by~$s$}, denoted by $t[s]_p$,
\item the \emph{label of~$t$ at position~$p$}, denoted by $t(p)$, and
\item the \emph{label sequence of~$t$ from root to position~$p$}, denoted by $\seq(t, p)$,
\end{itemize}
by structural induction as follows:
\begin{itemize}
\item For every $h \in X$, $h|_\varepsilon = h$, $h[s]_\varepsilon = s$, $h(\varepsilon) = h$, and $\seq(h, \varepsilon) = h$.
\item If $t = \sigma(t_1,\ldots, t_k)$, then $t|_\varepsilon = t$, $t[s]_\varepsilon = s$, $\sigma(t_1,\ldots,t_k)(\varepsilon) = \sigma$, and $\seq(\sigma(t_1,\ldots,t_k), \varepsilon) = \sigma$.
Moreover, for every $1 \leq i \leq k$ and $p' \in \pos(t_i)$, we define $t|_{ip'} = t_i|_{p'}$,
\[ t[s]_{ip'} = \sigma(t_1,\ldots,t_{i-1},t_i[s]_{p'}, t_{i+1},\ldots,t_k)\enspace, \]
$\sigma(t_1,\ldots,t_k)(ip') = t_i(p')$, and $\seq(\sigma(t_1,\ldots,t_k), ip') = \sigma \seq(t_i, p')$.
\end{itemize}
\index{tree!slice}
Let $t \in \T_\Sigma(X)$ and $p, p' \in \pos(t)$ such that $p \prefof p'$.
We define the \emph{label sequence of~$t$ from position~$p$ to position~$p'$}, denoted by $\seq(t, p, p')$, and the \emph{slice of~$t$ from position~$p$ to position~$p'$}, denoted by $t[p, p']$, as follows:
\begin{align*}
\seq(t, p, p') &= \seq(t|_p, p'') \\
t[p, p'] &= (t|_p)[\sigma(x_{1,s_1}, \dots, x_{k,s_k})]_{p''} \enspace,
\end{align*}
where $p'' \in \pos(t)$ such that $p p'' = p'$ and $t(p') = \sigma$ with $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$.
Let $t_1 \in (\T_\Sigma)_{s_1}, \dots, t_k \in (\T_\Sigma)_{s_k}$.
We write $t[p, p'](t_1, \dots, t_k)$ rather than $(t[p, p'])_{\T_\Sigma}(t_1, \dots, t_k)$.
We define the mapping $\height: \T_\Sigma(X) \to \mathbb N$ inductively as follows:
\[ \height(t) = \begin{cases}
0 &\text{if $t \in X \cup \Sigma$} \\
1 + \max \{ \height(t_1),\dots,\height(t_k) \} &\text{if $t$ has the form $\sigma(t_1,\dots,t_k)$ and $k > 0$.}
\end{cases} \]
\begin{lemma}[restate={[name={}]lemheightfinite}]\label{lem:fixed-height-finite-trees'}
Let~$\Sigma$ be a ranked set and $k = \max \{ \rk(\sigma) \mid \sigma \in \Sigma \}$ such that $k > 0$.
Then for each $h \in \mathbb N$ it holds that $|\{ t \in \T_\Sigma \mid \height(t) \leq h \}| \leq |\Sigma|^{(\sum_{i=0}^h k^i)}$.
In particular, $\{ t \in \T_\Sigma \mid \height(t) \leq h \}$ is finite.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:fixed-height-finite-trees'}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{tree!yield}
For each $\Delta \subseteq \Sigma \cup X$, we define the mapping $\yield_\Delta: \T_\Sigma(X) \rightarrow \Delta^*$ for each $t \in \T_\Sigma(X)$ of the form $\sigma(t_1,\dots,t_k)$ as follows:
\[ \yield_\Delta(t) = \begin{cases}
\sigma &\text{if } k=0 \text{\ and } \sigma \in \Delta\\
\varepsilon &\text{if } k=0 \text{\ and } \sigma \not\in \Delta\\
\yield_\Delta(t_1) \dots \yield_\Delta(t_k) &\text{if $k > 0$}.
\end{cases} \]
If $\Delta = \Sigma \cup X$, then we simply write $\yield$ rather than $\yield_\Delta$.
\paragraph{Cycles in trees.}
\index{cyclic}
\index{acyclic}
\index{cycle}
\index{cycle!elementary}
Let $R$ be a ranked set and $\rho \in R^*$.
We call~$\rho$
\begin{itemize}
\item \emph{cyclic}, if there are $i, j \in [|\rho|]$ such that $i < j$ and $w_i = w_j$,
\item \emph{acyclic}, if~$\rho$ is not cyclic,
\item \emph{a cycle}, if $|\rho| > 1$ and $\rho_1 = \rho_{|\rho|}$,
\item \emph{an elementary cycle}, if~$\rho$ is a cycle and both $\rho_1 \dots \rho_{|\rho| - 1}$ and $\rho_2 \dots \rho_{|\rho|}$ are acyclic.
\end{itemize}
\index{cyclic!c@$(c,w)$-cyclic}
Let $c \in \mathbb N$ and $\rho, w \in R^*$ such that~$w$ is an elementary cycle.
We say that~$\rho$ is \emph{$(c,w)$-cyclic} if there are $v_0, \dots, v_c \in R^*$ such that $\rho = v_0 w v_1 \dots w v_c$ and for every $i \in [0, c]$, $w$ is not a substring of~$v_i$.
Thus, intuitively, $\rho$ is $(c,w)$-cyclic if~$w$ repeats exactly~$c$ times in~$\rho$.
\index{cyclic!c@$c$-cyclic}
We say that~$\rho$ is \emph{$c$-cyclic} if there is a $w \in R^*$ such that~$\rho$ is $(c,w)$-cyclic and for every $w' \in R^*$ and $c' \in \mathbb N$ with $c' > c$, $\rho$ is not $(c',w')$-cyclic.
Let $c \in \mathbb N$, $d \in \T_R$, and $p_1, p_2, p \in \pos(d)$ such that $p_1 \prefof p_2 \prefof p$ and~$p$ is a leaf.
We let $\seq(d, p_1, p_2) = w$.
We say that~$p$ is \emph{cyclic} (\emph{acyclic}, \emph{$(c,w)$-cyclic}, \emph{$c$-cyclic}), if $\seq(d, p)$ is cyclic (resp.\ acyclic, $(c,w)$-cyclic, $c$-cyclic).
We say that~$d$ is \emph{acyclic}, if every leaf $p \in \pos(d)$ is acyclic.
Furthermore, we say that~$d$ is \emph{$c$-cyclic}, if there is a leaf $p \in \pos(d)$ such that~$p$ is $c$-cyclic and for every leaf $p \in \pos(d)$ and $c' \in \mathbb N$ with $c' > c$, $p$ is not $c'$-cyclic.
For every $c \in \mathbb N$, we denote the set $\{ d \in \T_R \mid c' \in \mathbb N, c' \leq c, \text{\ and $d$ is $c'$-cyclic} \}$ by~$\T_R^{(c)}$.
\begin{observation}
For every $c \in \mathbb N$ it holds that $\T_R^{(c)} \subseteq \T_R^{(c+1)}$.
Furthermore, $\T_R = \bigcup_{i \in \mathbb N} \T_R^{(i)}$.
\end{observation}
\subsection{Monoids, semirings, and M-monoids}
\index{monoid}
A \emph{monoid} is a tuple $(\walg{K},\oplus,\welem{0})$, where
\begin{itemize}
\item $\walg{K}$ is a set (\emph{carrier set}),
\item $\oplus: \walg{K} \times \walg{K} \rightarrow \walg{K}$ is a mapping such that for every $\welem{k}_1,\welem{k}_2,\welem{k}_3 \in \walg{K}$ it holds that $(\welem{k}_1 \oplus \welem{k}_2) \oplus \welem{k}_3 = \welem{k}_1 \oplus (\welem{k}_2 \oplus \welem{k}_3)$ (\emph{associativity}), and
\item $\welem{0} \in \walg{K}$ such that for every $\welem{k} \in \walg{K}$ it holds that $\welem{0} \oplus \welem{k} = \welem{k} = \welem{k} \oplus \welem{0}$ (\emph{identity element}).
\end{itemize}
\index{monoid!commutative}
We call $(\walg{K},\oplus,\mathbb 0)$ \emph{commutative} if for every $\welem{k}_1,\welem{k}_2 \in \walg{K}$ it holds that $\welem{k}_1 \oplus \welem{k}_2 = \welem{k}_2 \oplus \welem{k}_1$.
\index{idempotent}
We call $(\walg{K},\oplus,\mathbb 0)$ \emph{idempotent} if for every $\welem{k} \in \walg{K}$ it holds that $\welem{k} \oplus \welem{k} = \welem{k}$.
We define the binary relation $\preceq \, \subseteq \walg K \times \walg K$ for every $\welem k_1, \welem k_2 \in \walg K$ as follows:
$\welem k_1 \preceq \welem k_2$ if there is a $\welem k \in \walg K$ such that $\welem k_1 \oplus \welem k = \welem k_2$.
\begin{lemma}[restate={[name={}]lemnatordrt}]\label{lem:natord-refl-trans}
For every monoid $(\walg K, \oplus, \welem 0)$, the binary relation~$\preceq$ on~$\walg K$ is reflexive and transitive.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:natord-refl-trans}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{monoid!naturally ordered}
We call $(\walg K, \oplus, \mathbb 0)$ \emph{naturally ordered} if~$\preceq$ is a partial order.
\begin{lemma}[restate={[name={}]lemnatord},cf.~{\cites[Proposition~3.2]{Karner1992}[Theorem~1.8]{HebWei1998}[p.\,6]{DroKui2009}}]\label{lem:natord-subsume}
Let $(\walg K, \oplus, \mathbb 0)$ be a monoid.
Then~$\walg K$ is naturally ordered if and only if for every $\welem k_1, \welem k_2, \welem k_3 \in \walg K$ with $\welem k_1 = \welem k_1 \oplus \welem k_2 \oplus \welem k_3$ it holds that $\welem k_1 = \welem k_1 \oplus \welem k_2$.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:natord-subsume}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{infinitary sum operation}
\index{monoid!infinitary sum operation}
An \emph{infinitary sum operation on~$\walg{K}$} is a family $(\infsum_{\hspace{-1mm}I} \mid$ $I$ is a countable index set$)$ of mappings $\infsum_{\hspace{-1mm}I}: \walg{K}^I \rightarrow \walg{K}$.
Instead of $\infsum_I (\welem{k}_i \mid i \in I)$ we write $\infsum_{i \in I} \welem{k}_i$. If~$I$ is finite, then we denote $\infsum_{i \in I} \welem{k}_i$ by $\bigoplus_{i \in I} \welem{k}_i$.
\index{monoid!complete}
A \emph{complete monoid} (cf.~\cite[p.\,124--125]{Eilenberg1974};~\cite[p.\,247--248]{Golan1999};~\cite[p.\,5]{Fulop2018}) is a tuple $(\walg{K},\oplus,\mathbb 0,\infsum)$,
where $(\walg{K},\oplus,\mathbb 0)$ is a commutative monoid and $\infsum$ is an infinitary sum operation on $\walg{K}$
such that for each $I$-indexed family $(\welem{k}_i \mid i \in I)$ over $\walg{K}$ the following axioms hold:
\begin{itemize}
\item If $I=\emptyset$, then $\infsum_{i \in \emptyset} \welem{k}_i = \welem{0}$,
\item If $I= \{j\}$, then $\infsum_{i \in \{j\}} \welem{k}_i = \welem{k}_j$,
\item If $I= \{j,j'\}$, then $\infsum_{i \in \{j,j'\}} \welem{k}_i = \welem{k}_j + \welem{k}_{j'}$,
\item $\infsum_{i \in I} \welem{k}_i = \infsum_{j \in J} \big( \infsum_{i \in I_j} \welem{k}_i \big)$ for every $I$-indexed family $(\welem{k}_i \mid i \in I)$ over $\walg{K}$ and $J$-partition of~$I$.
\end{itemize}
Intuitively, $\infsum$ extends~$\oplus$ while preserving the properties of associativity, commutativity, and the identity element~$\welem{0}$ of~$\oplus$.
However, using the above definition of complete, certain convergence properties of finite sums need not necessarily apply to infinite sums as well.
We solve this problem by requiring an additional property of $\infsum$ as follows.
Let $(\walg K, \oplus, \mathbb 0, \infsum)$ be a complete commutative monoid.
\index{monoid!d-complete}
We call~$\walg K$ \emph{d-complete} (cf.~\cite{Karner1992}), if for every $\welem k \in \walg K$ and family $(\welem k_i \mid i \in \mathbb N)$ of elements of~$\walg K$ the following holds:
if there is an $n_0 \in \mathbb N$ such that for every $n \in \mathbb N$ with $n \ge n_0$, $\infsum_{\substack{i \in \mathbb N: \\ i \le n}} \welem k_i = \welem k$, then $\infsum_{i \in \mathbb N} \welem k_i = \welem k$.
\begin{lemma}[cf.~{\cite[Proposition~3.1]{Karner1992}}]\label{lem:d-complete}
Let $(\walg K, \oplus, \mathbb 0, \infsum)$ be a complete commutative monoid.
Then the following statements are equivalent:
\begin{enumerate}
\item $\walg K$ is d-complete,
\item for every $\welem k \in \walg K$ and family $(\welem k_i \mid i \in \mathbb N)$, if $\welem k \oplus \welem k_i = \welem k$ for every $i \in \mathbb N$, then
\[ \welem k \oplus \infsum_{i \in \mathbb N} \welem k_i = \welem k \enspace, \]
and
\item for every countable set~$I$, family $(\welem k_i \mid i \in I)$ of elements of~$\walg K$, and finite subset~$E$ of~$I$ the following holds:
if for every finite set~$F$ with $E \subseteq F \subseteq I$ it holds that
\[ \infsum_{i \in E} \welem k_i = \infsum_{i \in F} \welem k_i \enspace, \]
then
\[ \infsum_{i \in E} \welem k_i = \infsum_{i \in I} \welem k_i \enspace. \]
\end{enumerate}
\end{lemma}
Instead of giving a proof here, we refer the reader to~\cite{Karner1992}.
Although he stated this lemma for complete semirings rather than monoids, only the properties of the semiring's underlying monoid were used.
Thus the same proof is applicable to our lemma.
\begin{lemma}[restate={[name={}]lemdcompnatord}]\label{lem:d-complete-natord}
Every d-complete monoid is naturally ordered.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:d-complete-natord}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{monoid!completely idempotent}
A complete monoid $(\walg{K},\oplus,\welem{0},\infsum)$ is \emph{completely idempotent}~\cite{drovog14} if for every $\welem{k} \in \walg{K}$ and index set $I$ we have $\infsum_{i \in I} \welem{k} = \welem{k}$.
\begin{lemma}[restate={[name={}]lemiidc}]\label{lem:inf-idp-d-complete}
Every completely idempotent monoid is d-complete.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:inf-idp-d-complete}, we refer to Appendix~\ref{sec:proofs-preliminaries}.
\end{proof}
\index{semiring}
A \emph{semiring} is tuple $(\walg{K},\oplus,\otimes,\welem{0},\welem{1})$ such that
\begin{itemize}
\item $(\walg{K},\oplus,\welem{0})$ is a commutative monoid and $(\walg{K},\otimes,\welem{1})$ is a monoid,
\item for every $\welem{k}_1,\welem{k}_2,\welem{k}_3 \in \walg{K}$ it holds that $\welem{k}_1 \otimes (\welem{k}_2 \oplus \welem{k}_3) = (\welem{k}_1 \otimes \welem{k}_2) \oplus (\welem{k}_1 \otimes \welem{k}_3)$ and $(\welem{k}_1 \oplus \welem{k}_2) \otimes \welem{k}_3 = (\welem{k}_1 \otimes \welem{k}_3) \oplus (\welem{k}_2 \otimes \welem{k}_3)$ (\emph{distributivity of~$\otimes$} over~$\oplus$), and
\item for every $\welem{k} \in \walg{K}$ it holds that $\welem{k} \otimes \welem{0} = \welem{0} = \welem{0} \otimes \welem{k}$ (\emph{absorbing element}).
\end{itemize}
\index{semiring!commutative}
We call $(\walg{K},\oplus,\otimes,\welem{0},\welem{1})$ \emph{commutative}, if $\otimes$ is commutative.
\index{semiring!naturally ordered}
\index{semiring!idempotent}
We call $(\walg{K},\oplus,\otimes,\welem{0},\welem{1})$ \emph{naturally ordered}, if $(\walg K, \oplus, \welem 0)$ is naturally ordered, and \emph{idempotent}, if $(\walg K, \oplus, \welem 0)$ is idempotent.
We call~$\oplus$ \emph{addition} and~$\otimes$ \emph{multiplication}.
\begin{example}\label{ex:boolean-semiring}
We consider the \emph{Boolean semiring} $(\mathbb B, \lor, \land, \mathsf{f\mkern-1mu f}, \mathsf{t\mkern-1mu t})$, where $\lor$ is logical disjunction and $\land$ is logical conjunction.
It is easy to see that $\mathbb B$ is commutative and idempotent.
Let $\infsum[\lor]$ be the infinitary sum operation on~$\walg K$ defined as follows:
for every countable set~$I$ and family $(\welem k_i \mid i \in I)$ of elements of~$\walg K$, if there is an $i \in I$ such that $a_i = \mathsf{t\mkern-1mu t}$, then $\infsum[\lor]_{i \in I} a_i = \mathsf{t\mkern-1mu t}$ and otherwise $\infsum[\lor]_{i \in I} a_i = \mathsf{f\mkern-1mu f}$.
It is easy to see that $(\mathbb B, \lor, \mathsf{f\mkern-1mu f}, \infsum[\lor])$ is completely idempotent.
Thus, by Lemma~\ref{lem:inf-idp-d-complete}, $\walg K$ is d-complete.
Following~\cite[Example~3.1]{Karner1992}, we extend the Boolean semiring by $\infty$, i.e., we consider the semiring $(\mathbb B^{(\infty)}, \lor, \land, \mathsf{f\mkern-1mu f}, \mathsf{t\mkern-1mu t})$, where $\mathbb B^{(\infty)} = \mathbb B \cup \{ \infty \}$ and $\lor$ and $\land$ are extended as follows to operate with $\infty$:
\begin{align*}
\welem K \lor \infty &= \infty &&\text{for every $\welem k \in \mathbb B \cup \{ \infty \}$}
\\
\welem K \land \infty &= \infty &&\text{for every $\welem k \in \{ \mathsf{t\mkern-1mu t}, \infty \}$}
\\
\welem k \land \mathsf{f\mkern-1mu f} &= \mathsf{f\mkern-1mu f} \enspace.
\end{align*}
We define this semiring to be commutative as well, thus its operations are fully specified.
We define the infinitary sum operation $\infsum[\lor']$ such that for every family $(\welem k_i \mid i \in I)$ over $\mathbb B^{(\infty)}$
\[
\infsum[\lor']_{i \in I} \welem k_i = \begin{cases}
\bigoplus_{i \in I: \welem k_i \not= \mathsf{f\mkern-1mu f}} \welem k_i & \text{if $\{ i \in I \mid \welem k_i \not= \mathsf{f\mkern-1mu f} \}$ is finite} \\
\infty & \text{otherwise} \enspace.
\end{cases}
\]
Then $(\mathbb B^{(\infty)}, \lor, \mathsf{f\mkern-1mu f}, \infsum[\lor'])$ is complete, but not d-complete.
The latter can be seen using the family $(\mathsf{t\mkern-1mu t} \mid i \in I)$ over $\mathbb B^{(\infty)}$ for some infinite and countable set $I$.
While $\infsum[\lor']_{i \in E} \mathsf{t\mkern-1mu t} = \mathsf{t\mkern-1mu t}$ for every finite and nonempty subset $E$ of $I$, we have that $\infsum[\lor']_{i \in I} \mathsf{t\mkern-1mu t} = \infty$.
(In particular, $\mathbb B^{(\infty)}$ is not completely idempotent.)
\end{example}
\begin{sloppypar}
\index{semiring!complete}
A \emph{complete semiring} (cf.~{\cite[p.\,124--125]{Eilenberg1974};~\cite[p.\,247--248]{Golan1999};~\cite[p.\,5]{Fulop2018}}) is a tuple $(\walg{K},\oplus,\otimes,\welem{0},\welem{1},\infsum)$,
where $(\walg{K},\oplus,\otimes,\welem{0},\welem{1})$ is a semiring,
$(\walg{K},\oplus,\welem{0},\infsum)$ is a complete monoid,
and
\[\welem{k} \otimes \big( \infsum_{i \in I} \welem{k}_i \big) = \infsum_{i \in I} \big( \welem{k} \otimes \welem{k}_i \big) \ \text{ and } \ \big( \infsum_{i \in I} \welem{k}_i \big) \otimes \welem{k} = \infsum_{i \in I} \big( \welem{k}_i \otimes \welem{k} \big)
\]
hold for every $\welem{k} \in \walg{K}$,
countable index set~$I$, and $I$-indexed family $(\welem{k}_i \mid i \in I)$ over $\walg{K}$.
\end{sloppypar}
\index{M-monoid}
\index{null operation}
A \emph{multioperator monoid} (for short: M-monoid, cf.~\cite{Kuich1999}) is a tuple $(\walg{K},\oplus,\welem{0},\Omega,\psi)$ such that
\begin{itemize}
\item $(\walg{K},\oplus,\welem{0})$ is a commutative monoid,
\item $(\walg{K},\psi)$ is an $\Omega$-algebra for some ranked set $\Omega$,
and
\item $\welem{0}^k \in \Omega$ for every $k \in \mathbb N$, where $\psi(\welem{0}^k): \walg{K}^k \rightarrow \walg{K}$ such that $\psi(\welem{0}^k)(\welem{k}_1,\dots,\welem{k}_k) = \welem{0}$ for every $\welem{k}_1,\dots,\welem{k}_k \in \walg{K}$. We call the operation $\welem{0}^k$ a \emph{null operation}.
\end{itemize}
\index{M-monoid!distributive}
The M-monoid $(\walg{K},\oplus,\welem{0},\Omega,\psi)$ is \emph{distributive} if for each $\omega \in \Omega$, $k \in \mathbb N$, $i \in [k]$,
and $\welem{k},\welem{k}_1,\dots,\welem{k}_k \in \walg{K}$, the operation $\psi(\omega)$ \emph{distributes over $\oplus$}, i.e.,
\begin{align*}
&\psi(\omega)(\welem{k}_1,\dots,\welem{k}_{i-1},\welem{k}_i \oplus \welem{k},\welem{k}_{i+1},\dots,\welem{k}_k) \\
&= \psi(\omega)(\welem{k}_1,\dots,\welem{k}_{i-1},\welem{k}_i,\welem{k}_{i+1},\dots,\welem{k}_k) \oplus \psi(\omega)(\welem{k}_1,\dots,\welem{k}_{i-1},\welem{k},\welem{k}_{i+1},\dots,\welem{k}_k)
\end{align*}
\index{M-monoid!absorbing}
and $\welem{0}$ is \emph{absorbing}, i.e., $\psi(\omega)(\welem{k}_1,\dots,\welem{k}_k) = \welem{0}$ if $\welem{0} \in \{ \welem{k}_1,\dots,\welem{k}_k \}$.
\index{M-monoid!naturally ordered}
\index{M-monoid!absorbing}
We call $(\walg K, \oplus, \welem 0, \Omega, \psi)$ \emph{naturally ordered}, if $(\walg K, \oplus, \welem 0)$ is naturally ordered, and \emph{idempotent}, if $(\walg K, \oplus, \welem 0)$ is idempotent.
In the following, we will often identify $\omega \in \Omega$ with $\psi(\omega)$. Then we will omit the mapping~$\psi$ from the tuple $(\walg{K},\oplus,\welem{0},\Omega,\psi)$ and simply write $(\walg{K},\oplus,\welem{0},\Omega)$.
Also, for the sake of convenience, we will omit in examples and constructions the explicit specification of the null operations $\welem{0}^k$ in the definition of~$\Omega$.
\index{M-monoid!complete}
\begin{sloppypar}
A \emph{complete M-monoid} is a tuple $(\walg{K},\oplus,\welem{0},\Omega,\infsum)$,
where $(\walg{K},\oplus,\welem{0},\Omega)$ is an M-monoid and $(\walg{K},\oplus,\welem{0},\infsum)$ is a complete monoid.
\index{M-monoid!completely idempotent}
\index{M-monoid!d-complete}
A complete M-monoid $(\walg{K},\oplus,\welem{0},\Omega)$ is \emph{d-complete} (\emph{completely idempotent}) if $(\walg{K},\oplus,\welem{0},\infsum)$ is d-complete (completely idempotent).
\end{sloppypar}
\begin{quote}
\em As usual, we will identify any algebra defined in this section with its carrier set~$\walg{K}$, whenever the type of the algebra is clear from the context.
\end{quote}
\subsection{Regular tree grammars}
\index{regular tree grammar}
\index{RTG}
\index{S@$S$-sorted regular tree grammar}
An \emph{$S$-sorted regular tree grammar} (for short: $S$-sorted RTG) is a tuple $G = (N,\Sigma,A_0,R)$,
where
\begin{itemize}
\item $N$ is an $S$-sorted alphabet (\emph{nonterminals})
\item $\Sigma$ is an $(S^* \times S)$-sorted alphabet such that $N \cap \Sigma =\emptyset$ (\emph{terminals}),
\item $A_0 \in N$ (\emph{initial nonterminal}), and
\item $R$ is a finite $(N^* \times N)$-sorted set (\emph{set of rules}) such that $R \subseteq (N \times \T_\Sigma(N))$ and for every $k \in \mathbb N$, $A,A_1,\ldots, A_k \in N$ the following holds:
if $(B,t) \in R_{(A_1\ldots A_k,A)}$,
then $B=A$, $\yield_N(t) = A_1 \ldots A_k$, and $\sort_S(A)=\sort_S(t)$.
\end{itemize}
For each rule $(A,t)$, we call~$A$ the \emph{left-hand side} and~$t$ the \emph{right-hand side} of that rule and denote them by $\lhs(r)$ and $\rhs(r)$, respectively.
\index{RTG!maximal rank}
\index{RTG!normal form}
The \emph{maximal rank of~$G$} is defined as $\maxrk(G) = \max \{ \rk_R(\rho) \mid \rho \in R \}$ where $R$ is viewed as a finite ranked set. If the right-hand side of each rule contains exactly one terminal, then $G$ is called \emph{in normal form}. If $|S|=1$, then an $S$-sorted RTG is a classical regular tree grammar (cf.~\cite{Brainerd1969}).
We usually denote an element $(A,t)$ of~$R$ as $A \rightarrow t$.
For better readability, we show a list $A_1 \rightarrow t_1\ \dots\ A \rightarrow t_k$ of rules with the same left-hand side in the form $A \rightarrow t_1 + \dots + t_k$.
\index{abstract syntax tree}
\index{$\mathrm{AST}(G)$}
The set of \emph{abstract syntax trees (over $R$)} is the set $\mathrm{AST}(G) = (\T_R)_{A_0}$.
We can retrieve from each abstract syntax tree $d$ the tree in~$\T_\Sigma$ which is represented by~$d$. For this we view $R$ as $(S^* \times S)$-sorted set by defining the mapping $\sort: R \to S$ as follows: for every $r = (A \to t)$ in $R$ with $\yield_N(t) = A_1 \ldots A_n$, we let $\sort(r)= (\sort(A_1)\ldots \sort(A_n),\sort(A))$.
Moreover, we define the mapping $h: R \rightarrow \T_\Sigma(X)$ for each $r \in R$ as follows.
If $r =(A \rightarrow t)$ with $\yield_N(t) = A_1 \ldots A_n$, then $h(r)$ is obtained from $t$ by replacing the $i$-th occurrence of a nonterminal in $t$ (counted from left-to-right) by the variable $x_{i,\sort(A_i)}$ for every $i \in [n]$.
Clearly, $h(r) \in \T_\Sigma(X_u)$ with $u = s_1 \ldots s_n$.
Then we denote the $S$-sorted tree homomorphism induced by $h$ by $\pi_\Sigma$.
We note that $\pi_\Sigma: \T_R \rightarrow \T_\Sigma$ and we can say that $\pi_\Sigma(d)$ retrieves from each $d \in \T_R$ the tree in~$\T_\Sigma$ which is represented by~$d$.
\index{P@$\pi_\Sigma$}
\index{derivation}
It is obvious that each abstract syntax tree corresponds to a left derivation of the RTG and vice versa.
\index{regular tree grammar!generated language}
\index{L@$L(G,A)$}
\index{L@$L(G)$}
For every $A \in N$, the \emph{(formal) tree language generated by~$G$ from~$A$} is the set
\[
L(G,A) = \{ \pi_\Sigma(d) \mid d \in (\T_R)_A\}\enspace.
\]
We note that, if $A \in N_s$ for some $s \in S$, then $L(G,A) \subseteq (\T_\Sigma)_s$.
The \emph{(formal) tree language generated by~$G$} is the set $L(G) = L(G,A_0)$.
\index{RTG!unambiguous}
We call $G$ \emph{unambiguous} if for each $t \in L(G)$ there is a unique $d \in (\T_R)_{A_0}$ such that $\pi_\Sigma(d)=t$.
It was proved in~\cite[Theorem~3.16]{Brainerd1969} (also cf.~\cite[Theorem~3.22]{Engelfriet2015}) that for each regular tree grammar $G$ there is a regular tree grammar $G'$ such that $G'$ is in normal form and $L(G)=L(G')$. In a straightforward way, this result can be lifted to $S$-sorted RTG.
\section{Weighted RTG-based language models and the M-monoid parsing problem}
\label{sec:weighted-RTG-based-grammars}
In this section we introduce our framework of weighted RTG-based language models and use it do define the M-monoid parsing problem.
We compare our approach to interpreted regular tree grammars~\cite{KolKuh2011}, a similar framework which makes use of the initial algebra semantics~\cite{Goguen1977}, too.
\subsection{Weighted RTG-based language models}
We approach an algebraic definition of weighted grammars in two steps.
First we define RTG-based language models as an expressive grammar formalism and then we extend this definition by a weight component.
The idea behind RTG-based language models is to specify both the syntax of a language and the language itself within one formalism.
This is based on the \emph{initial algebra semantics}~\cite[Sect.~3.1]{Goguen1977}.
Here we use RTGs to describe the syntax and we use language algebras to generate the modeled language from these syntactic descriptions.
\index{RTG-based language model}
\index{RTG-LM}
\index{RTG-LM!language algebra}
Formally, an \emph{RTG-based language model} (RTG-LM) is a tuple $(G,(\alg L,\phi))$ where
\begin{itemize}
\item $G=(N,\Sigma,A_0,R)$ is an $S$-sorted RTG for some set $S$ of sorts and
\item $(\alg L,\phi)$ is an $S$-sorted $\Gamma$-algebra (\emph{language algebra}) such that $\Sigma \subseteq \Gamma$ (as $(S^* \times S)$-sorted set).
\end{itemize}
\index{L@$L(G)_{\alg L}$}
\index{language generated by~$(G,(\alg L,\phi))$}
\index{RTG-LM!generated language}
The \emph{language generated by~$(G,(\alg L,\phi))$}, denoted by $L(G)_{\alg L}$, is defined as
\[
L(G)_{\alg L} = \{ \sem[\alg L]{\pi_\Sigma(d)} \mid d \in \mathrm{AST}(G) \}\enspace.
\]
We note that $L(G)_{\alg L} \subseteq \alg L_{\sort(A_0)}$.
\index{syntactic object}
We call the elements of~$\alg L$ \emph{syntactic objects}.
\index{$\mathrm{AST}(G, a)$}
For each $a \in \alg L$, we define the set of \emph{trees which evaluate to $a$} as $\mathrm{AST}(G, a) = \{ d \in \mathrm{AST}(G) \mid \pi_\Sigma(d)_{\alg L} = a \}$.
\index{RTG-LM!ambiguous}
We call $(G, (\alg L, \phi))$ \emph{ambiguous} if there are $d_1, d_2 \in \mathrm{AST}(G)$ such that $\sem[\alg L]{\pi_\Sigma(d_1)} = \sem[\alg L]{\pi_\Sigma(d_2)}$ and $d_2 \not= d_2$.
We note that there are two characteristics of ambiguity.
\begin{enumerate}
\item There are a syntactic object $a \in \alg L$ and two trees $t_1, t_2 \in \T_\Sigma$ such that $\sem[\alg L]{(t_1)} = \sem[\alg L]{(t_2)} = a$ and $t_1 \not= t_2$.
This mirrors semantic ambiguity in the modeled language.
For instance, if $\alg L$ is a string language and $a$ a sentence, then $t_1$ and $t_2$ represent different groupings of the words in $a$ into constituents (cf.\ Figure~\ref{fig:asts}).
\item There are $d_1, d_2 \in \mathrm{AST}(G)$ and a $t \in \T_\Sigma$ such that $\pi_\Sigma(d_1) = \pi_\Sigma(d_2) = t$ and $d_1 \not= d_2$.
Then $d_1$ and $d_2$ represent the same syntactic description of the syntactic object $\sem[\alg L]{t}$, but that description may be obtained using different rules of the RTG $G$.
This kind of ambiguity is called \emph{spurious ambiguity} and it is often not wanted.
\end{enumerate}
In the rest of this section, we will not differentiate between different kinds of ambiguity.
Methods for deciding or removing spurious ambiguity are beyond the scope of this paper.
Now we enrich RTG-LMs by a weight component.
This consists of an M-monoid (the weight algebra) for computing the weights of ASTs and a mapping that assigns to each rule of the RTG $G$ an M-monoid operation.
\index{weighted RTG-based language model}
\index{wRTG-LM}
\index{wt}
A \emph{weighted RTG-based language model} (wRTG-LM) is a tuple
\[
\overline{G} = \big((G,(\alg L,\phi)), \ (\walg{K},\oplus,\welem{0},\Omega,\psi,\infsumop), \ \wt\big)\enspace,
\]
where
\begin{itemize}
\item $(G,(\alg L,\phi))$ is an RTG-LM; we assume that $R$ is the set of rules of $G$,
\item $(\walg{K},\oplus,\welem{0},\Omega,\psi,\infsum)$ is a complete M-monoid,
and
\item $\wt: R \rightarrow \Omega$ is mapping such that for each $r \in R$ the operation $\wt(r)$ has arity $\rk_R(r)$ (viewing $R$ as a ranked set).
The tree relabeling induced by $\wt$ by is the mapping $\widetilde{\wt}: \T_R \rightarrow \T_\Omega$ .
We denote $\widetilde{\wt}$ by $\wt$, too.
\end{itemize}
\index{language algebra}
\index{weight algebra}
We call
\begin{itemize}
\item $(\alg L,\phi)$ the \emph{language algebra of $\overline{G}$},
\item $(G,(\alg L,\phi))$ the \emph{RTG-LM of $\overline{G}$}, and \item $(\walg{K},\oplus,\welem{0},\Omega,\psi,\infsum)$ the \emph{weight algebra of~$\overline{G}$}.
\end{itemize}
If we abbreviate the two involved algebras by their respective carrier sets, then a wRTG-LM is specified by a tuple $((G,\alg L),\walg{K},\wt)$.
Intuitively, each wRTG-LM consists of two components:
\index{syntax component}
\index{weight component}
a \emph{syntax component} and a \emph{weight component}.
The syntax component is defined by the $\Sigma$-algebra $(\alg L, \phi)$ and the mapping $\pi_\Sigma: \T_R \to \T_\Sigma$.
The weight component is defined by the $\Omega$-algebra $(\walg K, \psi)$ and the mapping $\wt: \T_R \to \T_\Omega$.
\subsection{M-monoid parsing problem}
In the previous subsection we have introduced wRTG-LMs as the formal foundation of our approach to weighted parsing.
Now we will develop the weighted parsing problem that naturally emerges from wRTG-LMs.
We call this problem \emph{M-monoid parsing problem}.
Given a wRTG-LM \wrtglm\ and a syntactic object $a$, the relevant syntactic descriptions for parsing $a$ are the elements of the set $\mathrm{AST}(G,a)$, i.e., the set of ASTs of $G$ which evaluate to $a$.
We can map each tree from this set to a weight by first applying the tree relabeling $\wt$ to it and then evaluating the resulting tree over $\T_\Omega$ using the unique homomorphism $(.)_{\walg K}$.
Thus we obtain an $\mathrm{AST}(G,a)$-indexed family of elements of $\walg K$.
We note that since several ASTs can be evaluated to the same weight, it is not appropriate to use a set rather than a family here.
We accumulate this family of weights to a single element of $\walg K$ using the infinitary sum operation $\infsumop$.
\index{M-monoid parsing problem}
Formally, the \emph{M-monoid parsing problem} is the following problem. \\[3mm]
\textbf{Given:}
\begin{enumerate}
\item a wRTG-LM $\big((G,(\alg L,\phi)),(\walg{K},\oplus,\welem{0},\Omega,\psi,\infsum),\wt\big)$ and
\item an $a \in \alg L_{\sort(A_0)}$,
\end{enumerate}
\textbf{Compute:} $\displaystyle\fparse_{(G,\alg L)}(a) = \infsum_{d \in \mathrm{AST}(G, a)} \sem[\walg K]{\wt(d)}$ \enspace.
\refstepcounter{equation}(\theequation)\label{eq:parsing-problem}
\\[3ex]
We note that for finite index sets, $\infsum$ can be replaced by~$\bigoplus$.
Whenever $(G,\alg L)$ is clear from the context, we will just write $\fparse$ rather than $\fparse_{(G,\alg L)}$.
In Figure~\ref{fig:overview} we illustrate how the syntax component and the weight component of the input wRTG-LM of the M-monoid parsing problem play together.
\begin{figure}
\caption{Overview of the M-monoid parsing problem for a wRTG-LM $\big((G, \alg L), (\walg K, \oplus, \Omega, \infsum), \wt\big)$ and a syntactic object $a$.}
\label{fig:overview}
\end{figure}
\begin{figure}
\caption{Illustration of the M-monoid parsing problem for the wRTG-LM $\big((G,\lalg{CFG}
\label{fig:overview-ex}
\end{figure}
\begin{example}\label{ex:best-derivation-mmonoid}
In the introduction we have mentioned the \emph{best parsing} problem.
Given a grammar $G$ and a sentence $a$, the goal was to compute the highest probability $p$ among all constituent trees of $a$ in $G$ and the set of all constituent trees with probability $p$.
Here we show that the best parsing problem is an instance of the M-monoid parsing problem.
For this, we slightly modify the best parsing problem: instead of constituent trees, we compute ASTs.
Due to our choice of the underlying RTG -- the nonterminals correspond to syntactic categories -- we can obtain from each AST one of the desired constituent trees.
We note that this approach is common in practical applications of parsing and furthermore, we could directly compute constituent trees by employing a different weight algebra.
Since ASTs correspond to derivations, our problem is called \emph{best derivation problem} (cf.~\cite[Figure~5]{Goodman1999}).
In this example we define a wRTG-LM $\overline G$ for computing the best derivations of a grammar whose language contains, among others, the sentence $\terminal{fruit}\ \terminal{flies}\ \terminal{like}\ \terminal{bananas}$.
We start by giving the syntax component which represents this particular grammar.
Later we introduce the general \emph{best derivation M-monoid} and use it in the weight component.
In the end, we compute the best derivation of $\terminal{fruit}\ \terminal{flies}\ \terminal{like}\ \terminal{bananas}$.
We consider the $S$-sorted RTG $G = (N, \Sigma, \nont{S}, R)$ with a singleton set of sorts (e.g., $S = \{ \iota \}$).
It is defined as follows.
\begin{itemize}
\item $N = N_\iota = \{\nont{S},\nont{NP},\nont{VP},\nont{PP},\nont{NN},\nont{NNS},\nont{VBZ},\nont{VBP},\nont{IN}\}$,
\item $\Sigma = \Sigma_{(\iota\iota,\iota)} \cup \Sigma_{(\iota,\iota)} \cup \Sigma_{(\varepsilon,\iota)}$ and
$\Sigma_{(\iota\iota,\iota)} = \{ \sigma \}$, $\Sigma_{(\iota,\iota)} = \{ \gamma \}$, and
$\Sigma_{(\varepsilon,\iota)} = \{ \alpha_{\terminal{fruit}}, \alpha_{\terminal{flies}}, \alpha_{\terminal{like}}, \alpha_{\terminal{bananas}} \}$, and
\item \(R\) contains the rules (ignoring the numbers above the arrows for the time being):
\end{itemize}
We define the language algebra $(\alg L, \phi)$ as a $\Sigma$-algebra with $\alg L = \{ \terminal{fruit}, \terminal{flies}, \terminal{like}, \terminal{bananas} \}^*$ and
\begin{align*}
\phi(\sigma)(a_1, a_2) &= a_1 a_2 && \text{for every $a_1, a_2 \in \alg L$} \\
\phi(\gamma)(a) &= a && \text{for every $a \in \alg L$} \\
\phi(\alpha_a) &= a && \text{for every $a \in \{ \terminal{fruit}, \terminal{flies}, \terminal{like}, \terminal{bananas} \}$.}
\end{align*}
\begin{sloppypar}
Intuitively, $\alg L$ is a string algebra with the following capabilities.
It can produce each of the syntactic objects $\terminal{fruit}$, $\terminal{flies}$, $\terminal{like}$, and $\terminal{bananas}$ using a constant operation (i.e., $\alpha_a$ for every $a \in \{ \terminal{fruit}, \terminal{flies}, \terminal{like}, \terminal{bananas} \}$).
Furthermore, it can concatenate two syntactic objects (using $\sigma$) and contains an identity mapping (cf.\ $\gamma$).
\end{sloppypar}
We proceed to the definition of the best derivation M-monoid.
We want to use this single M-monoid to describe the computation of the best derivation of every RTG-LM.
For this we choose as carrier set an artificially large set and assume that it contains every rule of every RTG.
Let $R_\infty$ be a ranked set such that $(R_\infty)_k$ is infinite for each $k \in \mathbb{N}$.
\index{best derivation M-monoid}
We define the \emph{best derivation M-monoid} to be the complete M-monoid
\[
\big(\gls{wclass:bd},\ \maxv,\ (0, \emptyset),\ \Omegav, \ \infsumop[\maxv]\big) \enspace,
\]
where
\begin{itemize}
\item $\mathbb{BD} = \mathbb R_0^1 \times \mathcal P(\T_{R_\infty})$,
\item for every $(p_1, D_1), (p_2, D_2) \in \mathbb{BD}$,
\[
\maxv\big((p_1, D_1), (p_2, D_2)\big) = \begin{cases}
(p_1, D_1) &\text{if $p_1 > p_2$} \\
(p_2, D_2) &\text{if $p_1 < p_2$} \\
(p_1, D_1 \cup D_2) &\text{otherwise,}
\end{cases}
\]
\item $\Omegav = \{ \tc{p}{r} \mid p \in {\mathbb R_0^1} \ \text{and} \ r \in R_\infty \}$, where for each $p \in {\mathbb R_0^1}$, $k \in \mathbb N$, and $r \in (R_\infty)_k$, we define \( \tc{p}{r}: \mathbb{BD}^{k} \to \mathbb{BD} \) (tc abbreviates top concatenation)
such that for every $(p_1, D_1), \dots, (p_{k}, D_{k}) \in \mathbb{BD}$,
\[
\tc{p}{r}\big((p_1, D_1), \dots, (p_{k}, D_{k})\big) = (p',D')
\]
where $p'= p \cdot p_1 \cdot \ldots \cdot p_{k}$ and $D'=\{ r(d_1, \dots, d_{k}) \mid d_i \in D_i, 1 \le i \le k\}$, and
\item for every family $((p_i, D_i) \mid i \in I)$ over $\mathbb{BD}$, we define $\infsum[\maxv]_{i \in I} \welem (p_i, D_i) = (p, D)$, where $p = \sup \{ p_i \mid i \in I \}$ and $D = \bigcup_{i \in I: p_i = p} D_i$.
(We note that this supremum exists because~$1$ is an upper bound of every subset of~${\mathbb R_0^1}$ and every bounded subset of~$\mathbb R$ has a supremum.)
\end{itemize}
We finish the definition of the weight component of $\overline G$ by defining the mapping $\wt: R \to \Omegav$.
Since $R_\infty$ is infinite, we can assume that $R_k \subseteq (R_\infty)_k$ for every $k \in \mathbb N$.
We let $\wt(r) = \tc{p}{r}$ for every $r \in R$, where $p$ is shown above the arrow of~$r$.
Intuitively, $\wt$ associates with each rule a pair where the first component is a number in ${\mathbb R_0^1}$ and the second component is a singleton set which contains the rule itself.
We have shown an AST $d \in \mathrm{AST}(G, \terminal{fruit}\ \terminal{flies}\ \terminal{like}\ \terminal{bananas})$ in the center of the upper row of Figure~\ref{fig:overview-ex}.
To its left we have illustrated its evaluation to the syntactic object $a = \terminal{fruit}\ \terminal{flies}\ \terminal{like}\ \terminal{bananas}$ in the syntactic component.
We obtain $\pi_\Sigma(d)$ by dropping the non-highlighted parts of $d$.
The application of the homomorphism $(.)_{\alg L}: \T_\Sigma \to \alg L^*$ to $\pi_\Sigma(d)$ yields $a$.
To the right of $d$ it can be seen how it is evaluated to $\big(0.0216, \{ r_1(r_3(r_8,r_9),r_6(r_{12},r_4(r_{10}))) \}\big)$ in the weight component.
The probability of $d$ (i.e., the real number $0.0216$) is obtained as the product of the numbers which are associated to the rules occurring in $d$.
The set of ASTs of $a$ with this probability consists only of $d$.
This holds for every $d \in \mathrm{AST}(G,a)$.
In the lower row of Figure~\ref{fig:overview-ex} we have indicated that there is a second AST $d'$ which is evaluated to~$a$, too.
We obtain
\[
\wt(d')_{\walg{BD}} = (0.0144, \{ r_1(r_2(r_8),r_5(r_{11},r_7(r_{13},r_4(r_{10})))) \}) \enspace.
\]
Thus
\(
\maxv \big(\wt(d)_{\walg{BD}}, \wt(d')_{\walg{BD}}\big) = \wt(d)_{\walg{BD}}
\).
As one might expect, it is more likely that~$a$ refers to the preferences (to $\terminal{like}\ \terminal{bananas}$) of certain insects ($\terminal{fruit}\ \terminal{flies}$).
\end{example}
\subsection{Comparison with interpreted regular tree grammars (IRTG)}
\begin{table}
\centering\small
\begin{tabular}{lll}
\toprule
\multicolumn{2}{l}{wRTG-LM} & IRTG \\
\midrule
\multicolumn{2}{l}{$\overline G = \big((G, \alg L), (\walg K, \oplus, \mathbb 0, \Omega, \infsum), \wt\big)$} & $\overline G = (G, \mathcal I_1, \mathcal I_2)$ \\[1ex]
\multicolumn{2}{l}{RTG $G = (N, \Sigma, A_0, R)$} & RTG $G = (N, \Sigma, A_0, R)$ \\
\multicolumn{2}{l}{\textbullet abstract syntax trees $\mathrm{AST}(G)$} & \textbullet tree language $L(G)$ \\[1ex]
syntax component & weight component & interpretation $\mathcal I_i = (h_i, \alg A_i)$ ($i \in [2]$) \\
\textbullet tree relabeling $\pi_\Sigma: \T_R \to \T_\Sigma$ & \textbullet tree relabeling $\wt: \T_R \to \T_\Omega$ & \textbullet tree homomorphism $h_i: \T_\Sigma \to \T_{\Delta_i}$ \\
\textbullet $\Gamma$-algebra $\alg L$ ($\Sigma \subseteq \Gamma$\,) & \textbullet complete M-monoid $\walg K$ & \textbullet $\Delta_i$-algebra $\alg A_i$ \\
\textbullet evaluation $(.)_{\alg L}: \T_\Sigma \to \alg L$ & \textbullet evaluation $(.)_{\walg K}: \T_R \to \walg K$ & \textbullet evaluation $(.)_{\alg A_i}: \T_{\Delta_i} \to \alg A_i$ \\[1ex]
\multicolumn{2}{l}{$L(G)_{\alg L} = \{ \pi_\Sigma(d)_{\alg L} \mid d \in \mathrm{AST}(G) \}$} & $L(\overline G) = \{ (h_1(t)_{\alg A_1}, h_2(t)_{\alg A_2}) \mid t \in L(G) \}$ \\
\bottomrule
\end{tabular}
\caption{Comparison of a wRTG-LM to an IRTG with two interpretations.}
\label{tab:comp-irtg-wrtglm}
\end{table}
We compare our framework of wRTG-LMs with \emph{interpreted regular tree grammars} (IRTGs, \autocite{KolKuh2011}).
For this, we briefly recall the basic notions of IRTGs.
An IRTG $\overline G$ consists of an RTG $G = (N, \Sigma, A_0, R)$ and several \emph{interpretations}.
Each interpretation is a pair $(h, \alg A)$, where $h: \T_\Sigma \to \T_\Delta$ is a tree homomorphism and $\alg A$ is a $\Delta$-algebra.
The language generated by $\overline G$ is the set of all tuples which are obtained by interpreting trees of $L(G)$ in the several algebras.
Formally, if $\overline G$ consists of the interpretations $(h_1, \alg A_1), \dots, (h_n, \alg A_n)$ with $n \in \mathbb N$, then the language generated by $\overline G$ is the set
\[
L(\overline G) = \{ (h_1(t)_{\alg A_1}, \dots, h_n(t)_{\alg A_n}) \mid t \in L(G) \} \enspace.
\]
In the right column of Table~\ref{tab:comp-irtg-wrtglm}, we illustrate the concept of IRTGs for the special case of two interpretations (i.e., $n = 2$).
In our comparison of wRTG-LMs and IRTGs, we consider wRTG-LMs as IRTGs with two $\Sigma$\hyp{}interpretations.
We view each wRTG-LM $\big((G, \alg L), (\walg K, \oplus, \mathbb 0, \Omega, \infsum), \wt\big)$ as the IRTG $\big(G, (\pi_\Sigma, \alg L), (\wt, \walg K)\big)$.
This is done as shown in Table~\ref{tab:comp-irtg-wrtglm}:
\begin{itemize}
\item the wRTG-LM and the IRTG consist of the same RTG $G = (N, \Sigma, A_0, R)$,
\item the syntax component corresponds to the first interpretation $\mathcal I_1$, and
\item the weight component corresponds to the second interpretation $\mathcal I_2$.
\end{itemize}
We point out that this view of wRTG-LMs as IRTGs does not conform to the definition of IRTGs.
While the core component of a wRTG-LM is the set $\mathrm{AST}(G)$ of abstract syntax trees, the core component of an IRTG is the tree language $L(G)$.
A second, minor difference is that the language of an IRTG consists of tuples of interpreted trees, while the language of a wRTG-LM consists of syntactic objects (i.e., trees evaluated in the language algebra).
Finally, we compare the M-monoid parsing problem to the \emph{decoding} problem of IRTGs.
Decoding is motivated by modeling translation between natural languages using synchronous grammars.
It is defined as follows.
Given an IRTG $\overline G = \big(G, (h_1, \alg A_1), (h_2, \alg A_2)\big)$ and a syntactic object $a$, compute the set
\[
\decodes(a) = \{ h_2(t)_{\alg A_2} \mid t \in L(G) \land h_1(t)_{\alg A_1} = a \} \enspace.
\]
Compared to the M-monoid parsing problem, we consider the language algebra $\alg L$ as the input language and the weight algebra $\walg K$ as the output language of our translation.
We can derive the M-monoid parsing problem from the IRTG decoding problem by applying two changes.
First, we need to compute a family of elements of $\alg A_2$ rather than a set.
This is because in the M-monoid parsing problem, if several abstract syntax trees have the same weight, then this weight contributes to the value of $\fparse(a)$ multiple times.
Second, we map this family to a single element of $\alg A_2$ using the infinitary sum operation.
The application of these transformations yields Equation~\eqref{eq:parsing-problem}.
\section{Classes of weighted RTG-based language models}
In this section we define several subclasses of wRTG-LMs.
For this, we use two parameters:
\begin{enumerate}
\item a subclass $\gclass{}$ of the class of all RTG-LMs $\gls{gclass:all}$ and
\item a subclass $\wclass{}$ of the class of all complete M-monoids $\gls{wclass:all}$.
\end{enumerate}
\index{G@$(\gclass{}, \wclass{})$-LM}
Now let $\gclass{} \subseteq \gclass{all}$ and $\wclass{} \subseteq \wclass{all}$.
Then a \emph{$(\gclass{},\wclass{})$-LM} is a wRTG-LM $\big((G,(\alg L,\phi)), \ (\walg{K},\oplus,\welem{0},\Omega,\infsumop), \ \wt\big)$ such that
\begin{enumerate}
\item its RTG-LM $(G,(\alg L,\phi))$ is in $\gclass{}$ and
\item its weight algebra $(\walg{K},\oplus,\welem{0},\Omega,\infsumop)$ is in $\wclass{}$.
\end{enumerate}
\index{W@$\wlmclass{\gclass{},\wclass{}}$}
We denote the class of all $(\gclass{},\wclass{})$-LMs by $\wlmclass{\gclass{},\wclass{}}$.
Moreover, we will introduce the subclass $\wlmclass[\mathrm{closed}]{\gclass{}, \wclass{}}$ which imposes an additional restriction on wRTG-LMs.
This class is central to the termination and correctness of the M-monoid parsing algorithm.
\subsection{Classes of RTG-based language models}
\label{sec:classes-rtglms}
In this subsection we recall four particular classes of RTG-LMs:
context-free grammars, linear context-free rewriting systems, tree-adjoining grammars, and yield-grammars.
We mention that also context-free hypergraph grammars~\cite{baucou87,habkre87} can be viewed as RTG-LMs \cite{cou91} (also cf. \cite{dregebvog16}). Each of these classes is determined by a particular class of language algebras. Additionally, in Subsection \ref{subsect:general-classes}, we define three more classes of RTG-LMs which are determined by (a) particular subclasses of regular tree grammars and (b) by an interplay between the involved RTG and the language algebra.
\subsubsection{The CFG-algebras and context-free grammars}
It was suggested in \cite[Sect.~3.1]{Goguen1977} to consider context-free languages as initial many-sorted algebra semantics of context-free grammars.
The context-free grammars are here replaced by RTG.
Let $\Delta$ be a finite set and $S=\{\iota\}$ be a set of sorts (for some arbitrary but fixed $\iota$).
We let $X = \{x_1,x_2,\ldots\}$ be a set of variables.
These variables will be used to denote strings over $\Delta$. For each $k \in \mathbb{N}$, we let $X_k=\{x_1,\ldots,x_k\}$.
We define the $(\{\iota\}^*\times \{\iota\})$-sorted set $\Gamma^{\lalg{CFG},\Delta}$ such that for each $k \ge 0$:
\begin{align*}
(\Gamma^{\lalg{CFG},\Delta})_{(\iota^k,\iota)} = \{ \langle w \rangle \mid & \; w = v_0 x_1 v_1 \ldots x_k v_k \text{ for some } v_0, \ldots, v_k \in \Delta^*\}\enspace.
\end{align*}
We define the \emph{CFG-algebra over $\Delta$}
\index{CFG-algebra over $\Delta$}
to be the $\{\iota\}$-sorted $\Gamma^{\lalg{CFG},\Delta}$-algebra
\((\gls{alg:cfg},\phi)\)
with
\begin{itemize}
\item $\lalg{CFG}^\Delta = (\lalg{CFG}^\Delta)_\iota = \Delta^*$.
\item For every $k \in \mathbb N$, $\langle w\rangle \in (\Gamma^{\lalg{CFG},\Delta})_{(\iota^k,\iota)}$, and $u_1,\ldots,u_k \in \Delta^*$ we define
\[\phi(\langle w\rangle)(u_1,\ldots,u_k) = w'
\]
where $w'$ is obtained from $w$ by replacing each $x_i$ by $u_i$ for each $i \in [k]$.
\end{itemize}
A \emph{context-free grammar over $\Delta$}
\index{context-free grammar}
\index{CFG}
is an RTG-based LM
\[(G,(\lalg{CFG}^\Delta,\phi))\]
where the $S$-sorted RTG $G$ is in normal form.
\index{context-free language}
A \emph{context-free language} is the formal language generated by some context-free grammar.
We note that the language $L(G)_{\lalg{CFG}^\Delta}$ generated by this context-free grammar is a formal language over $\Delta$.
We also note that, by definition of RTG-LMs, the terminal set $\Sigma$ of $G$ is a $(\{\iota\}^*\times \{\iota\})$-sorted subset of $\Gamma^{\lalg{CFG},\Delta}$.
Thus, for the specification of a particular context-free grammar, we only have to specify the $\Delta$ and an RTG.
We denote the class of all context-free grammars by $\gls{gclass:cfg}$.
Indeed, classical context-free grammars and those which are defined here are in the following, easy one-to-one correspondence. Let $G= (N,\Delta,A_0,R)$ be a usual context-free grammar and let $(G',(\lalg{CFG}^\Delta,\phi)$ be a context-free grammar (as defined here) where $G'=(N,\Sigma,A_0,R')$. We say that $G$ and $G'$ \emph{correspond to each other} if the following two statements are equivalent for every $k \in \mathbb{N}$, $A_1,\ldots,A_k \in N$, and $v_0, \ldots, v_k \in \Delta^*$:
\begin{enumerate}
\item $A \rightarrow v_0 A_1 v_1 \ldots A_k v_k$ is in $R$.
\item $A \rightarrow \sigma(A_1,\ldots,A_k)$ is in $R'$ with $\sigma = \langle v_0 x_1 v_1 \ldots x_k v_k\rangle$.
\end{enumerate}
Then, clearly, the languages generated by $G$ and $(G',(\lalg{CFG}^\Delta,\phi))$ are the same.
\begin{example}
\label{ex:cfg}
We let~$\Delta = \{ \term{Fruit},\term{flies},\term{like},\term{bananas} \}$.
We consider the $\{\iota\}$-sorted RTG $G = (N, \Sigma, \nont{S}, R)$ and the language algebra $(\alg L, \phi)$ from Example~\ref{ex:best-derivation-mmonoid}.
We observe that $\big(G, (\alg L, \phi)\big)$ is a context-free grammar.
This can be seen by letting $\sigma = \langle x_1 x_2 \rangle$, $\gamma = \langle x_1 \rangle$, and $\alpha_a = \langle a \rangle$ for every $a \in \Delta$.
Then $(\alg L, \phi) = \lalg{CFG}^\Delta$.
\begin{figure}
\caption{Two abstract syntax trees for the syntactic object $a = \terminal{fruit}
\label{fig:cfg-lm}
\end{figure}
In Figure~\ref{fig:cfg-lm} we have again illustrated the ASTs $d$ and $d'$ from Figure~\ref{fig:overview-ex} and their evaluation in the syntactic component.
This time we have used the notions of $\lalg{CFG}^\Delta$ and also shown $d'$ and $\pi_\Sigma(d')$ entirely.
The AST $d$ in the top row expresses that certain insects ($\terminal{fruit} \ \terminal{flies}$) like something ($\terminal{bananas}$).
The AST $d'$ in the bottom row expresses how $\terminal{fruit}$ performs a certain activity (to fly like bananas).
Hence this RTG-LM is ambiguous.
\end{example}
\subsubsection{The LCFRS-algebras and linear context-free rewriting systems}
The formalization of context-free grammars using the initial algebra semantics can be generalized to (string) linear context-free rewriting systems in a straightforward way.
A formal definition was given by \textcite[Def.~6.2+6.3]{kal10}.
Here we will embed it into our framework of wRTG-LM.
Let $\Delta$ be an alphabet and $S = \mathbb{N}$ be a set of sorts.
In this section it is convenient to use a doubly indexed set of variables.
Let \(k \in \mathbb{N}\) and $l_1,\ldots, l_k \in \mathbb{N}$.
We denote by $X_{l_1,\ldots, l_k}$ the set
\[
X_{l_1,\ldots, l_k} = \{x^{(i)}_j \mid i \in [k], j \in [l_i]\} \enspace.
\]
Intuitively, each $x^{(i)}_j$ denotes a string and each $x^{(i)}$ represents an $l_i$-tuple of strings.
We define the $(\mathbb{N}^*\times \mathbb{N})$-sorted set $\Gamma^{\lalg{LCFRS},\Delta}$ such that for each $k,n,l_1,\ldots, l_k \in \mathbb{N}$:
\begin{align*}
(\Gamma^{\lalg{LCFRS},\Delta})_{(l_1\ldots l_k,n)} = \{ \langle w_1,\ldots, w_n\rangle \mid{} &w_i \in (\Delta \cup X_{l_1,\ldots, l_k})^* \text{ and each variable}\\
& \text{$x^{(i)}_j\in X_{l_1,\ldots,l_k}$ occurs exactly once in $w_1\ldots w_n$}\} \enspace.
\end{align*}
\index{LCFRS-algebra over $\Delta$}
We define the \emph{LCFRS-algebra over $\Delta$} to be the $\mathbb{N}$-sorted $\Gamma^{\lalg{LCFRS},\Delta}$-algebra $(\gls{alg:lcfrs},\phi)$ with
\begin{itemize}
\item $\lalg{LCFRS}^\Delta = \bigcup_{n \in \mathbb{N}} (\lalg{LCFRS}^\Delta)_n$ where $(\lalg{LCFRS}^\Delta)_n=(\Delta^*)^n$.
\item For every $\langle w_1,\ldots, w_n\rangle \in (\Gamma^{\lalg{LCFRS},\Delta})_{(l_1\ldots l_k,n)}$ and $u^{(1)}_1,\ldots,u^{(1)}_{l_1}, \ldots, u^{(k)}_1,\ldots,u^{(k)}_{l_k} \in \Delta^*$ we define
\[\phi(\langle w_1,\ldots,w_n\rangle)((u^{(1)}_1,\ldots,u^{(1)}_{l_1}), \ldots, (u^{(k)}_1,\ldots,u^{(k)}_{l_k})) = (w_1',\ldots, w_n')
\]
where $w_\kappa'$ ($\kappa \in [n]$) is obtained from $w_\kappa$ by replacing each $x^{(i)}_j$ by $u^{(i)}_j$ ($i \in [k]$, $j \in [l_i]$).
\end{itemize}
\index{linear context-free rewriting system}
\index{LCFRS}
A \emph{linear context-free rewriting system over $\Delta$} is an RTG-LM
\[
(G,(\lalg{LCFRS}^\Delta,\phi))
\]
where the $\mathbb N$-sorted RTG $G=(N,\Sigma,A_0,R)$ is in normal form and $A_0 \in N_1$.
We note that the language $L(G)_{\lalg{LCFRS}^\Delta}$ generated by this linear context-free rewriting system is a formal language over $\Delta$.
\index{LCFRS!fan-out}
For each $l \in \mathbb N$ and $A \in N_l$ we call $l$ the \emph{fan-out of $A$};
the \emph{fan-out of $G$} is the maximal fan-out of all nonterminals in $N$.
We denote the class of all linear context-free rewriting systems by $\gls{gclass:lcfrs}$.
Intuitively it is clear that, for each context-free grammar $(G,(\lalg{CFG}^\Delta,\phi))$ over $\Delta$ there is an linear context-free rewriting system $(G',(\lalg{LCFRS}^\Delta,\phi))$ over $\Delta$ in which each nonterminal has fan-out 1, which generates the same language as $(G,(\lalg{CFG}^\Delta,\phi))$.
In fact, if we identify the sort $\iota$ of $G$ with the sort $1$ of $G'$, then $G=G'$.
This also holds the other way around if the variables in the $\Sigma$-symbol of each rule occur in the order $x^{(1)}_1,x^{(2)}_1,\ldots,x^{(k)}_1$.
\begin{example}\label{ex:lcfrs}
We consider the set $\Delta =\{ \terminal{zag}, \terminal{helpen}, \terminal{lezen}, \terminal{Jan}, \terminal{Piet}, \terminal{Marie}\}$ and the following $\mathbb{N}$-sorted RTG $G=(N,\Sigma,\mathrm{root},R)$
with
\begin{itemize}
\item $N = N_1 \cup N_2$ and $N_1 = \{\mathrm{root}, \mathrm{nsub}\}$ and $N_2 = \{\mathrm{dobj}\}$,
\item $\Sigma = \Sigma_{(12,1)} \cup \Sigma_{(12,2)} \cup \Sigma_{(1,2)} \cup \Sigma_{(\varepsilon,1)}$ where
\begin{align*}
\Sigma_{(12,1)} &= \{\langle x^{(1)}_1 x^{(2)}_1 \terminal{zag}\ x^{(2)}_2\rangle\}\\
\Sigma_{(12,2)} &= \{\langle x^{(1)}_1 x^{(2)}_1, \terminal{helpen}\ x^{(2)}_2\rangle\}\\
\Sigma_{(1,2)} &= \{\langle x^{(1)}_1, \terminal{lezen}\rangle\}\\
\Sigma_{(\varepsilon,1)} &= \{\langle \terminal{Jan}\rangle, \langle \terminal{Piet}\rangle, \langle \terminal{Marie}\rangle\}.
\end{align*}
(We note that $\Sigma$ is an $(\mathbb{N}^*\times \mathbb{N})$-sorted subset of $\Gamma^{\lalg{LCFRS},\Delta}$.)
\item $R$ is the following set of rules:
\begin{alignat*}{4}
r_1: \ & & \mathrm{root} & \to \langle x^{(1)}_1 x^{(2)}_1 \terminal{zag}\ x^{(2)}_2\rangle (\textrm{nsub}, \textrm{dobj}) \qquad &
r_2: \ & & \mathrm{nsub} & \to \langle \terminal{Jan}\rangle \\
r_3: \ & & \mathrm{dobj} & \to \langle x^{(1)}_1 x^{(2)}_1, \terminal{helpen}\ x^{(2)}_2\rangle (\textrm{nsub}, \textrm{dobj}) \qquad &
r_4: \ & & \mathrm{nsub} & \to \langle \terminal{Piet}\rangle \\
r_5: \ & & \mathrm{dobj} & \to \langle x^{(1)}_1, \terminal{lezen}\rangle (\textrm{nsub}) \qquad &
r_6: \ & & \mathrm{nsub} & \to \langle \terminal{Marie}\rangle
\end{alignat*}
\end{itemize}
Then \(d = r_1(r_4,r_2(r_5,r_3(r_6)))\) is an example of an abstract syntax tree in $\T_R$.
We have illustrated $d$ and $\pi_\Sigma(d)$ in Figure~\ref{fig:ex-lcfrs}.
Clearly, $\sem[\lalg{LCFRS}^\Delta]{\pi_\Sigma(d)} = \terminal{Jan} \ \terminal{Piet} \ \terminal{Marie} \ \terminal{zag} \ \terminal{helpen} \ \terminal{lezen}$. \qedhere
\begin{figure}
\caption{An abstract syntax tree $d$ for the syntactic object $a = \terminal{Jan}
\label{fig:ex-lcfrs}
\end{figure}
\end{example}
\subsubsection{TAG-algebras and tree-adjoining grammars}
We consider a slight extension of tree-adjoining grammars \cite{jossch97} in which we allow nonterminals (or: states) as in \cite{buenedvog11,buenedvog12}.
Here we restrict ourselves to tree-adjoining grammars generating ranked trees.
Our presentation is essentially the one of \cite{buenedvog11,buenedvog12}.
We also refer to~\cite{tag-irtg} for a formalization of tree-adjoining grammars as interpreted regular tree grammars (IRTG).
Let $S = \{\iota\}$ be a set with one sort $\iota$.
Let $\Delta$ be a finite $(S^* \times S)$-sorted set (of \emph{terminal symbols}).
Let $X$ be an $S$-sorted set of variables and $Z$ be an $(S\times S)$-sorted set of variables (i.e., for each $z \in Z$, we have $\sort(z) = (\iota,\iota)$).
We assume that $\Delta$, $X$, and $Z$ are pairwise disjoint.
Then $\Delta \cup Z$ is an $(S^*\times S)$-sorted set.
Moreover, we let $*$ be a symbol (\emph{foot node}) not in $\Delta \cup X \cup Z$; we let $\sort(*)=\iota$.
For every $m,l \in \mathbb N$, we define the $S$-sorted set $X_m=\{x_{1,\iota},\ldots,x_{m,\iota}\}$ of variables where $X_m \subseteq X$ and $\sort(x_{i,\iota}) = \iota$ (\emph{substitution sites}) and the set $Z_l=\{z_{1,(\iota,\iota)},\ldots,z_{l,(\iota,\iota)}\}$ of variables where $Z_l \subseteq Z$ and $\sort(z_{j,(\iota,\iota)}) = (\iota,\iota)$ (\emph{adjoining sites});
with $i \in [m]$ and $j \in [l]$.
We say that $\zeta \in \T_{\Delta \cup Z_l}(X_m)$ is \emph{linear, nondeleting in $X_m \cup Z_l$} if each element in $X_m \cup Z_l$ occurs exactly once in $\zeta$.
We say that $\zeta \in \T_{\Delta \cup Z_l}(X_m \cup \{*\})$ is \emph{linear, nondeleting in $X_m \cup Z_l \cup \{*\}$} if each element in $X_m \cup Z_l \cup \{*\}$ occurs exactly once in $\zeta$.
We define the $(\{ \iota, (\iota, \iota) \}^* \times \{ \iota, (\iota, \iota ) \})$-sorted set $\Gamma^{\lalg{TAG},\Delta}$ such that $(\Gamma^{\lalg{TAG},\Delta})_{(w,y)} = \emptyset$ for every $w \not\in \{\iota\}^* \circ \{(\iota, \iota)\}^*$ and $y \in \{\iota, (\iota,\iota)\}$.
For every $m,l \in \mathbb N$ we define
\begin{align*}
(\Gamma^{\lalg{TAG},\Delta})_{(\iota^m (\iota,\iota)^l,\iota)} &= \{\langle \zeta \rangle \mid \zeta \in \T_{\Delta \cup Z_l}(X_m) \text{ linear, nondeleting in $ X_m \cup Z_l$}\}\\
(\Gamma^{\lalg{TAG},\Delta})_{(\iota^m (\iota,\iota)^l,(\iota,\iota))} &= \{\langle \zeta \rangle \mid \zeta \in \T_{\Delta \cup Z_l}(X_m \cup \{*\}) \text{ linear, nondeleting in $ X_m \cup Z_l \cup \{*\}$}\}
\end{align*}
\index{TAG-algebra over $\Delta$}
We define the \emph{TAG-algebra over $\Delta$} to be the $\{\iota,(\iota,\iota)\}$-sorted $\Gamma^{\lalg{TAG},\Delta}$-algebra $(\gls{alg:tag},\phi)$ with
\begin{itemize}
\item $\lalg{TAG}^\Delta = (\lalg{TAG}^\Delta)_\iota \cup (\lalg{TAG}^\Delta)_{(\iota,\iota)}$ with\\ $(\lalg{TAG}^\Delta)_\iota = \T_\Delta$ and $(\lalg{TAG}^\Delta)_{(\iota,\iota)} = \{t \in \T_{\Delta}(\{*\}) \mid * \text{ occurs exactly once in } t \}$ and
\item for every $\langle \zeta \rangle \in (\Gamma^{\lalg{TAG},\Delta})_{(\iota^m(\iota,\iota)^l,\iota)}$, $t_1,\ldots,t_m \in (\lalg{TAG}^\Delta)_\iota$, and $t_1',\ldots,t_l' \in (\lalg{TAG}^\Delta)_{(\iota,\iota)}$ we define
\[
\phi(\langle \zeta\rangle)(t_1,\ldots,t_m,t_1',\ldots,t_l') = \tilde{h}(\zeta)
\]
were $\tilde{h}$ is the $S$-sorted tree homomorphism induced by the mapping $h\colon \Delta \cup X_m \cup Z_l \rightarrow \T_\Delta(X)$ defined by
\begin{align*}
h(\delta) &= \delta(x_{1,s_1},\ldots,x_{k,s_k}) \hspace*{-1mm} && \text{ for each $\delta \in \Delta$ with $\rk_\Delta(\delta)=k$}\\
h(x_{i,\iota}) &= t_i && \text{ for each $i \in [m]$}\\
h(z_{j,(\iota,\iota)}) &= t_j'' && \text{ for each $j \in [l]$ and $t_j''$ is obtained from $t_j'$ by replacing $*$ by $x_{1,\iota}$};
\end{align*}
\item for every $\langle \zeta \rangle \in (\Gamma^{\lalg{TAG},\Delta})_{(\iota^l(\iota,\iota)^m,(\iota,\iota))}$, $t_1,\ldots,t_m \in (\lalg{TAG}^\Delta)_\iota$, and $t_1',\ldots,t_l' \in (\lalg{TAG}^\Delta)_{(\iota,\iota)}$ we define
$\phi(\langle \zeta\rangle)(t_1,\ldots,t_m,t_1',\ldots,t_l') = \tilde{h'}(\zeta)$
were $h'\colon \Delta \cup X_m \cup Z_l \cup \{*\} \rightarrow \T_{\Delta}(X)$ is defined in the same way as $h$ but additionally we let $h'(*) = *$.
\end{itemize}
\index{tree-adjoining grammar}
\index{TAG}
A \emph{tree-adjoining grammar over $\Delta$} is an RTG-LM
\[
(G,(\lalg{TAG}^{\Delta},\phi))
\]
where the $\{\iota, (\iota,\iota)\}$-sorted RTG $G=(N,\Sigma,A_0,R)$ is in normal form and $A_0 \in N_\iota$.
We note that the language $L(G)_{\lalg{TAG}^\Delta}$ generated by this tree-adjoining grammar is a formal tree language over $\Delta$.
We denote the class of all tree-adjoining grammars by $\gls{gclass:tag}$.
\begin{example}[cf.~\cite{jossch97} and {\cite[Fig.~1]{buenedvog12}}]\label{ex:tag}
We consider the set
\[
\Delta =\{\term{S}, \term{V}, \term{VP}, \term{N}, \term{NP}, \term{D}, \term{Adv}, \term{saw}, \term{Mary}, \term{a}, \term{man}, \term{yesterday}\}
\]
of terminal symbols.
In order to keep notations short, we assume that $\Delta$ is turned into a finite, non-empty ranked set by adding (implicitly) to each symbol of $\Delta$ a finite number of ranks (like $(\term{NP},2)$ and $(\term{NP},1)$);
however, in trees over $\Delta$ we drop again this rank information as in Figure~\ref{fig:ex-tag}.
In Figure~\ref{fig:ex-tag} we show a tree-adjoining grammar $(G,(\lalg{TAG}^{\Delta},\phi))$ where the RTG $G$ has the nonterminals $\nont{A_0}, \nont{A_1}, \nont{A_2}, \nont{F}$ and five rules.
One possible abstract syntax tree of the RTG $G$ is
\[
d = r_1(r_2,r_3(r_4),r_5)\enspace.
\]
In Figure~\ref{fig:ex-tag-semantics} we have shown $d$, $\pi_\Sigma(d)$, and its evaluation in the $\lalg{TAG}^\Delta$-algebra, i.e., the syntactic object $a$.
Unlike in the previous examples, $a$ is not a string, but a tree.
In particular, it results from evaluating
\[
\phi\biggl(\biggl\langle\tikz[inlinetree]{\graph{"$z_1$" -- "$\term{S}$" -- {"$x_1$", "$\term{VP}$" -- {"$\term{V}$" -- "$\term{saw}$", "$x_2$"}}};}\biggr\rangle\biggr)\Bigg(
\tikz[inlinetree]{\graph{"$\term{NP}$" -- "$\term{N}$" -- "$\term{Mary}$"};},
\tikz[inlinetree]{\graph{"$\term{NP}$" -- {"$\term{D}$" -- "$\term{a}$", "$\term{N}$" -- "$\term{man}$"}};},
\tikz[inlinetree]{\graph{"$\term{S}$" -- {"$\term{Adv}$" -- "$\term{yesterday}$", *}};}
\Bigg)
\]
in the $\lalg{TAG}^\Delta$-algebra.
\end{example}
\begin{figure}
\caption{Example of a TAG (following \cite{jossch97}
\label{fig:ex-tag}
\end{figure}
\begin{figure}
\caption{An abstract syntax tree $d$ in the RTG-LM $(G, \lalg{TAG}
\label{fig:ex-tag-semantics}
\end{figure}
By dropping from TAG-algebras everything which refers to adjoining (the sort $(\iota, \iota)$, the variables in $Z_l$), we might define RTG-algebras and define a regular tree grammar as an $S$-sorted RTG-LM $(G,(\lalg{RTG}^{\Delta}))$.
This initial algebra presentation of RTG might seem technically exaggerated, because $G$ already is a regular tree grammar.
However, if we stay with the property that the RTG $G$ in the RTG-LM is in normal form, then we have to deal with right-hand sides that do not contain exactly one terminal symbol.
The initial algebra approach above offers one possibility for handling this.
\subsubsection{Yield-algebras and yield grammars}
\citet{GieMeySte04} use yield grammars as a main component of algebraic dynamic programming.
Here we show that yield grammars are a particular subclass of $\gclass{all}$.
Let~$S$ be a set (of sorts) and~$\Delta$ be an $(S^* \times S)$-sorted alphabet.
We denote by $\Delta_0$ the set $\bigcup_{s \in S} \Delta_{(\varepsilon, s)}$.
\index{yield-algebra over $\Delta$}
We define the \emph{$S$-yield-algebra over~$\Delta$} to be the $S$-sorted $\Delta$-algebra $(\gls{alg:yield},\phi)$ with
\begin{itemize}
\item $\lalg{YIELD}^\Delta = \bigcup_{s \in S} (\lalg{YIELD}^\Delta)_s$ where $(\lalg{YIELD}^\Delta)_s = \{ \langle w,s \rangle \mid w \in (\Delta_0)^* \}$, and
\item for every $k \in \mathbb N$, $\delta \in \Delta_{(s_1 \dots s_k,s)}$, and $\langle w_1,s_1 \rangle \in (\lalg{YIELD}^\Delta)_{s_1},\dots,\langle w_k,s_k \rangle \in (\lalg{YIELD}^\Delta)_{s_k}$ we define
\[ \phi(\delta)(\langle w_1,s_1 \rangle,\dots,\langle w_k,s_k \rangle) = \begin{cases}
\langle \delta,s \rangle &\text{if $k = 0$} \\
\langle w_1 \dots w_k,s \rangle &\text{otherwise.}
\end{cases} \]
\end{itemize}
We note that the carrier set of $\lalg{YIELD}^\Delta$ consists of tuples and the second component of each such tuple is a sort.
This has technical reasons, because otherwise there would be no mapping $\sort: \lalg{YIELD}^\Delta \rightarrow S$.
We can easily see this by letting $S = \{ a,b \}$ and $\Delta = \Delta_{(\varepsilon,a)} \cup \Delta_{(aa,a)} \cup \Delta_{(aaa,b)}$ with $\Delta_{(\varepsilon,a)} = \{ \text{a} \}$, $\Delta_{(aa,a)} = \{ \alpha \}$, and $\Delta_{(aaa,b)} = \{ \beta \}$.
Then $t_1 = \alpha(\text{a},\alpha(\text{a},\text{a}))$ is in $(\T_\Delta)_a$ and $t_2 = \beta(\text{a},\text{a},\text{a})$ is in $(\T_\Delta)_b$, but $\yield_{\Delta_0}(t_1) = \yield_{\Delta_0}(t_2) = \text{aaa}$.
We also observe that the unique $\Delta$-homomorphism from $(\T_\Delta,\phi_\Delta)$ to $(\lalg{YIELD}^\Delta,\phi)$ is the mapping $f: \T_\Delta \rightarrow (\Delta^* \times S)$, where $f(t) = (\yield_{\Delta_0}(t),\sort(t))$ for every $t \in \T_\Delta$.
\index{yield-grammar}
An \emph{$S$-sorted yield grammar over~$\Delta$ (yield-grammar)} is an RTG-LM
\[(G,(\lalg{YIELD}^\Delta,\phi))\]
where $G$ is an $S$-sorted RTG.
We denote the class of all yield-grammars by $\gls{gclass:yield}$.
\subsubsection{Further classes of RTG-based language models}
\label{subsect:general-classes}
We introduce three classes of RTG-LMs which are not defined by a particular language algebra (such as $\gclass{CFG}$), but rather by imposing restrictions on the involved RTGs.
An RTG-LM $\big((N, \Sigma, A_0, R), \alg L)\big)$ is
\begin{itemize}
\item \emph{acyclic}, if every $d \in \T_R$ is acyclic,
\item \emph{monadic}, if for every $r \in R$ it holds that $|\rk(r)| \le 1$, and
\item \emph{nonlooping},
if for every $d \in \T_R$ and $p \in \pos(d) \setminus \{ \varepsilon \}$ the following holds:
if $\sem[\alg L]{\pi_\Sigma(d)} = \sem[\alg L]{\pi_\Sigma(d|_p)}$, then $d(\varepsilon) \not= d(p)$.
\end{itemize}
We denote the class of all acyclic RTG-LMs by $\gls{gclass:acyc}$,
the class of all monadic RTG-LMs by $\gls{gclass:mon}$,
the class of all nonlooping RTG-LMs by $\gls{gclass:nl}$,
and the class of all RTG-LMs with finitely decomposable language algebra by $\gls{gclass:findc}$.
The CFG-algebras, LCFRS-algebras, and TAG-algebras are finitely decomposable, i.e., $\gclass{CFG} \subseteq \gclass{\operatorname{fin-dc}}$, $\gclass{LCFRS} \subseteq \gclass{\operatorname{fin-dc}}$, and $\gclass{TAG} \subseteq \gclass{\operatorname{fin-dc}}$.
\subsubsection{Summary of considered classes of RTG-LMs}
We summarize all classes of RTG-LMs introduced in this subsection in Table~\ref{tab:classes-rtglms}.
\begin{table}[h]
\centering
\begin{tabular}{ll}
\toprule
Notation & Description: the class of all \dots \\
\midrule
$\gclass{CFG}$ & context-free grammars \\
$\gclass{LCFRS}$ & linear context-free rewriting systems \\
$\gclass{TAG}$ & tree-adjoining grammars \\
$\gclass{YIELD}$ & yield-grammars \\
$\gclass{acyc}$ & acyclic RTG-LMs \\
$\gclass{mon}$ & monadic RTG-LMs \\
$\gclass{nl}$ & nonlooping RTG-LMs \\
$\gclass{\operatorname{fin-dc}}$ & RTG-LMs with finitely decomposable language algebra \\
\bottomrule
\end{tabular}
\caption{Classes of RTG-LMs introduced in Section~\ref{sec:classes-rtglms}.}
\label{tab:classes-rtglms}
\end{table}
\subsection{Classes of weight algebras}
\label{sec:classes-mmonoids}
In this subsection we first show that the weight algebras used by \citet{Goodman1999} and \citet{ned03} are subclasses of $\wclass{all}$, i.e., the class of all complete M-monoids.
We then define additional subclasses of $\wclass{all}$ which will allow us to investigate particular M-monoid parsing problems of this paper with respect to their algorithmic solvability.
\subsubsection{M-monoids that are associated with semirings}
\label{sec:mmonoids-associated-with-semirings}
\index{M-monoid!associated with semiring}
Let $(\walg{K},\oplus,\otimes,\welem{0},\welem{1}, \infsum)$ be a complete semiring.
The \emph{M-monoid associated with~$\walg{K}$} (cf.~\cite[Definition~8.5]{Fulop2009}) is defined as the M-monoid $M(\walg{K}) = (\walg{K},\oplus,\welem{0},\Omega_\otimes,\infsum)$ where $\Omega_\otimes = \bigcup_{k \ge 0} (\Omega_\otimes)_k$ and $(\Omega_\otimes)_k = \{ \mul_{\welem{k}}^{(k)} \mid \welem{k} \in \walg{K} \}$ for every $k \in \mathbb N$.
For every $k \in \mathbb N$ and $\welem{k}, \welem{k}_1, \dots, \welem{k}_k \in \walg{K}$ we define
\[ \mul_{\welem{k}}^{(k)}(\welem{k}_1,\dots,\welem{k}_k) = \welem{k} \otimes \welem{k}_1 \otimes \dots \otimes \welem{k}_k \enspace. \]
In particular, $\mul_{\welem{k}}^{(0)}() = \welem{k}$ for every $\welem{k} \in \walg{K}$.
Note that $\mathbb 1 = \mul_{\mathbb 1}^{(0)}()$.
Clearly, the M-monoid $M(\walg K)$ is complete and distributive.
\citet{Goodman1999} modeled several classic parsing problems by specifying for each of these problems a complete semiring which encapsulates the computation of the problem's solution.
Using the approach from above, we can for each of these semirings define a weight algebra of a wRTG-LM which is associated with that semiring.
In the following, we will do this for some semirings which we find particularly interesting.
We denote the class of all M-monoids that are associated with complete semirings by $\gls{wclass:sr}$.
\index{tropical M-monoid}
\index{tropical semiring}
\begin{example}\label{ex:tropical-M-monoid}
The \emph{tropical semiring} is the complete semiring $(\mathbb{R}_0^\infty,\min,+,\infty,0,\inf)$ with the usual binary minimum operation on the reals.
The \emph{tropical M-monoid} is the M-monoid associated with the tropical semiring, i.e., the M-monoid $\gls{wclass:tropical} = (\mathbb{R}_0^\infty,\min,\infty,\Omega_+,\inf)$.
\end{example}
\index{Viterbi M-monoid}
\index{Viterbi semiring}
\begin{example}\label{ex:Viterbi-M-monoid}
The \emph{Viterbi semiring} is the complete semiring $(\mathbb{R}_0^1,\max,\cdot,0,1,\sup)$ with the usual binary maximum operation on reals.
The \emph{Viterbi M-monoid} is the M-monoid $\gls{wclass:viterbi} = (\mathbb{R}_0^1,\max,0,\Omega_\cdot,\sup)$ (with $\cdot$ as index of $\Omega$).
\end{example}
\begin{example}\label{ex:best-derivation-mmonoid-sr}
Recall the definition of the best derivation M-monoid $\walg{BD}$ from Example~\ref{ex:best-derivation-mmonoid}.
It is a d-complete and distributive M-monoid and furthermore, $(0, \emptyset)$ is absorptive.
The proof of this statement is given in Appendix~\ref{sec:proof-best-derivation-mmonoid}.
\end{example}
\begin{example}\label{ex:nbest-mmonoid}
We define an M-monoid which describes the computation of the probabilities of the $n$ best derivations of a syntactic object (where $n \in \mathbb N_+$).
This M-monoid is (up to notation) associated with the \emph{Viterbi-$n$-best semiring} \cite[Figure 5]{Goodman1999}.
Let $n \in \mathbb N_+$.
In the following, we will denote a family $f: [n] \to \mathbb R_0^1$ as $(f_1, \dots, f_n)$ and let
\[ \pref(\mathbb N, n) = \bigcup_{\substack{n' \in \mathbb N: \\ n' \ge n}} \{ [n'] \} \cup \{ \mathbb N \} \enspace. \]
We define the set $\gls{wclass:nbest} = \{ f: [n] \to {\mathbb R_0^1} \mid \text{for every $i \in [n-1]$: $f(i) \ge f(i+1)$} \}$.
Furthermore, we define the mapping $\takenbest: ({\mathbb R_0^1})^{\pref(\mathbb N, n)} \to \nbest$ such that for every $I \in \pref(\mathbb N, n)$ and family $(f_i \mid i \in I)$ of elements of ${\mathbb R_0^1}$
\[ \takenbest((f_i \mid i \in I)) = (g_1, \dots, g_n) \enspace, \]
where $g \in \nbest$ as follows:
\begin{enumerate}
\item If~$I$ is finite, then for every $i \in [n]$, $g_i = f(v(i))$, where $v: [n] \to I$ is recursively defined such that for every $i \in [n]$
\[ v(i) = \text{an arbitrary} \ j \in \argmax\limits_{j' \in I \setminus v|_i} f_{j'} \enspace, \]
where for every $i \in [n]$, we let $v|_i = \{ v(j) \mid j \in [n] \ \text{and} \ j < i \}$.
\item Otherwise, we define the mapping $v: [n] \to I \cup \{ \bot \}$, where $\bot$ is a new element, recursively for every $i \in [n]$ such that
\[ v(i) = \begin{cases}
\bot &\text{if $i > 1$ and $v(i-1) = \bot$} \\
\text{an arbitrary} \ j \in \argmax\limits_{j' \in I \setminus v|_i} f_{j'} &\text{if there is such a $j$} \\
\bot &\text{otherwise,}
\end{cases} \]
where for every $i \in [n]$, we let $v|_i = \{ v(j) \mid j \in [n] \ \text{and} \ j < i \}$.
Then for every $i \in [n]$
\[ g_i = \begin{cases}
f(v(i)) &\text{if $v(i) \not= \bot$} \\
\sup \{ f_i \mid i \in I \setminus v|_i \} &\text{otherwise.}
\end{cases} \]
(We note, again, that this supremum exists because~$1$ is an upper bound of every subset of~${\mathbb R_0^1}$ and every bounded subset of~$\mathbb R$ has a supremum.)
\end{enumerate}
\begin{sloppypar}
Moreover, we define the mapping $\cdot_{\nbest}: (\mathbb R_0^1)^{[n]} \times (\mathbb R_0^1)^{[n]} \to (\mathbb R_0^1)^{[n]}$ such that for every $(a_1, \dots, a_n), (b_1, \dots, b_n) \in \nbest$
\end{sloppypar}
\[ (a_1, \dots, a_n) \cdot_n (b_1, \dots, b_n) = \takenbest \left( \left( a(\lfloor i / n \rfloor + 1) \cdot b(i \bmod n + 1) \, \middle| \, i \in [0, n^2 - 1] \right) \right) \enspace. \]
\index{n@$n$-best M-monoid}
The \emph{$n$-best M-monoid} is the complete M-monoid $(\nbest, \maxn, (\underbrace{0, \dots, 0}_{\text{$n$ times}}), \Omega_n, \infsum[\maxn])$, where
\begin{itemize}
\item for every $(a_1, \dots, a_n), (b_1, \dots, b_n) \in \nbest$,
\[ \maxn \big( (a_1, \dots, a_n), (b_1, \dots, b_n) \big) = \takenbest \big( (a_1, \dots, a_n, b_1, \dots, b_n) \big) \enspace, \]
\item $\Omega_n = \{ \mulnkk \mid k \in \mathbb N \ \text{and} \ \welem k \in \nbest \}$, where for each $k \in \mathbb N$ and $\welem k \in \nbest$, $\mulnkk: \nbest^k \to \nbest$ such that for each $\welem k_1, \dots, \welem k_k \in \nbest$
\[ \mulnkk(\welem k_1, \dots, \welem k_k) = (\welem k, \underbrace{0, \dots, 0}_{n - 1 \ \text{times}}) \cdot_n \welem k_1 \cdot_n \ldots \cdot_n \welem k_k \enspace, \]
\item for every nonempty $I$-indexed family $(\welem k_i \mid i \in I)$ over $\nbest$,
\[ \infsum[\maxn]_{i \in I} \welem k_i = \takenbest((f_i \mid i \in \mathbb N)) \enspace, \]
where for every $i \in \mathbb N$, $f_i = \welem k_{\lfloor i / n \rfloor + 1}(i \bmod n + 1)$.
\end{itemize}
The n-best M-monoid is a d-complete and distributive M-monoid.
Furthermore, $(\underbrace{0, \dots, 0}_{\text{$n$ times}})$ is absorptive.
The proof of this statement is given in Appendix~\ref{sec:proof-nbest-mmonoid}.
\end{example}
\subsubsection{Superior M-monoids}\label{sec:superior-mmonoids}
The weight algebras of \citet{ned03} are \emph{superior}, a notion defined by \citet{Knuth1977}.
They are essentially complete and distributive M-monoids of the form $(\walg K, \min, \mathbb 0, \Omega, \inf)$, where $(\walg K, \preceq)$ is a total order, $\inf(\walg K) \in \walg K$, and $\Omega$ is a set of \emph{superior} functions, i.e., for each $k \in \mathbb N$, $\omega \in \Omega_k$, $i \in [k]$, and $\welem k_1, \dots, \welem k_k \in \walg K$ it holds that
\begin{enumerate}
\item if $\welem k \preceq \welem k_i$, then
\(\omega(\welem{k}_1,\dots,\welem{k}_{i-1},\welem{k},\welem{k}_{i+1},\dots,_k) \preceq \omega(\welem{k}_1,\dots,\welem{k}_{i-1},\welem{k}_i,\welem{k}_{i+1},\dots,\welem{k}_k)\),
and
\item $\max \{ \welem k_1, \dots, \welem k_k \} \preceq \omega(\welem k_1, \dots, \welem k_k)$.
\end{enumerate}
\index{M-monoid!superior}
We will call such M-monoids \emph{superior M-monoids} and denote the class of all superior M-monoids by $\gls{wclass:sup}$.
It is easy to see that every superior M-monoid is completely idempotent and thus, by Lemma~\ref{lem:inf-idp-d-complete}, also d-complete.
The tropical M-monoid and the Viterbi M-monoid from Example~\ref{ex:tropical-M-monoid} and~\ref{ex:Viterbi-M-monoid}, respectively, are superior.
For the proofs of this statement, we refer to Appendix~\ref{sec:proof-superior-mmonoids}.
\subsubsection{Further classes of complete M-monoids}
We denote the class of all d-complete M-monoids by $\gls{wclass:dcomp}$ and the class of all complete and distributive M-monoids by~$\gls{wclass:dist}$.
We define $\gls{wclass:finidpo}$ to be the class of all M-monoids $(\walg K, \oplus, \mathbb 0, \Omega, \infsum)$ in $\wclass{dist}$ for which
\begin{enumerate*}
\item $\walg K$ is finite,
\item $(\walg K, \oplus, \mathbb 0, \infsum)$ is completely idempotent, and
\item there is a partial order $(\walg K, \preceq)$ such that for every $k \in \mathbb N$, $\omega \in \Omega_k$, and $\welem k_1, \dots, \welem k_k \in \walg K$, $\maxord \{ \welem k_1, \dots, \welem k_k \} \preceq \omega(\welem k_1, \dots, \welem k_k)$.
\end{enumerate*}
Since this third condition looks similar to the second condition on superior M-monoids, we point out the following subtle differences.
First, the carrier set of an M-monoid in $\wclass{fin, id, \preceq}$ is finite, which is not necessarily the case for superior M-monoids.
Second, for M-monoids in $\wclass{fin, id, \preceq}$, $(\walg K, \preceq)$ is an arbitrary \emph{partial} order which is not related to $\oplus$ at all, while for a superior M-monoid, $(\walg K, \preceq)$ is the same \emph{total} order with respect to which $\min$ is defined.
By Lemma~\ref{lem:inf-idp-d-complete}, $\wclass{fin, id, \preceq} \subseteq \wclass{\operatorname{d-comp}}$.
\subsubsection{Summary of considered classes of M-monoids}
We summarize all classes of M-monoids introduced in this subsection in Table~\ref{tab:classes-mmonoids}.
Note that we have written singleton classes of M-monoids without curly braces, e.g., $\walg{BD}$ rather than $\{ \walg{BD} \}$.
\begin{table}[h]
\centering
\begin{tabular}{ll}
\toprule
Notation & Description: the class of all \dots \\
\midrule
$\wclass{sr}$ & M-monoids associated with semirings \\
$\wclass{sup}$ & superior M-monoids \\
$\wclass{\operatorname{d-comp}}$ & d-complete M-monoids \\
$\wclass{dist}$ & distributive M-monoids \\
$\wclass{fin, id, \preceq}$ & finite and idempotent M-monoids with a certain monotonicity property \\
\midrule
& Specific M-monoids \\
\midrule
$\walg{T}$ & the tropical M-monoid \\
$\walg{V}$ & the Viterbi M-monoid \\
$\walg{BD}$ & the best derivation M-monoid \\
$\nbest$ & the n-best M-monoid \\
\bottomrule
\end{tabular}
\caption{Classes of M-monoids introduced in Section~\ref{sec:classes-mmonoids}.}
\label{tab:classes-mmonoids}
\end{table}
\subsection{Closed weighted RTG-based language models}\label{sec:closed}
Although superior M-monoids are common weight structures in weighted parsing, they are not general enough to cover all parsing problems (cf., e.g., computing the intersection and ADP in Section~\ref{sec:problems}).
Hence we would like to determine a class $\wlmclass{}$ of wRTG-LMs that properly includes $\wlmclass{\gclass{all}, \wclass{sup}}$ and can describe both
\begin{enumerate*}[label=(\alph*)]
\item computing the intersection of a grammar and a syntactic object and
\item the problems of ADP.
\end{enumerate*}
Furthermore, we recall that Goodman's semiring parsing algorithm only terminates if the input grammar does not contain cyclic derivations.
We would like $\wlmclass{}$ to properly include all wRTG-LMs with that property, too.
If we think of an algorithmic computation of the mapping $\fparse$ in the M-monoid parsing problem, then we encounter the following problem:
the index set of $\infsum$ can be infinite.
However, an algorithm cannot compute an infinite sum and terminate at the same time.
Clearly, if the index set of $\infsum$ is finite for some input, then an algorithm may compute $\fparse$ on this input.
Hence, if for every input with set of abstract syntax trees $D$ over which $\infsum$ is computed there exists a finite subset~$E$ of~$D$ such that
\[
\infsum_{d \in D} \wthom{d} = \bigoplus_{d \in E} \wthom{d}\enspace,
\]
then an algorithm that correctly computes $\fparse$ on every input may exist.
\textcite{Mohri2002} implemented this idea for graphs weighted with semirings.
He gave an algorithm which solves a problem similar to the M-monoid parsing problem if the input semiring is \emph{closed} for the input graph.
Here we extend this notion to M-monoids that are closed for the input hypergraph.
In order to stay within the domain of parsing, we base our definitions on RTGs rather than on hypergraphs.
\begin{sloppypar}
The rest of this subsection is structured as follows.
In Section~\ref{sec:closed-definition} we will define the class $\wlmclass[\mathrm{closed}]{\gclass{all}, \wclass{all}}$ of closed wRTG-LMs.
In Section~\ref{sec:closed-properties} we will show that for every wRTG-LM in that class, the infinite sum of the M-monoid parsing problem can be computed by a finite sum.
\end{sloppypar}
\subsubsection{Definition of closed weighted RTG-based language models}
\label{sec:closed-definition}
We note that our motivation for closed wRTG-LMs implies that the weight algebra $\walg K$ of each closed wRTG-LM is d-complete (cf.\ Example~\ref{ex:boolean-semiring}).
Moreover, our definition of closed will only involve a single tree over the set of rules.
In order to entail the desired statement about sets of all abstract syntax trees from this definition, distributivity of $\walg K$ is needed.
Thus, the weight algebra of any closed wRTG-LM is in $\wclass{\operatorname{d-comp}} \cap \wclass{dist}$.
In order to define closed wRTG-LMs, we first need a notion for cutting chunks out of trees.
\begin{quote}
\em In this section, we let~$R$ denote a ranked set.
\end{quote}
Let $w \in R^*$ be an elementary cycle.
We define the binary relation $\letvdash{w} \, \subseteq \T_R \times \T_R$ such that for each $d, d' \in \T_R$, $d \letvdash{w} d'$ if there are $p, p' \in \pos(d)$ with $\seq(d, p, p') = w$ and $d' = d[d|_{p'}]_p$.
Furthermore, we define the binary relation $\vdash \subseteq \T_R \times \T_R$ such that for each $d, d' \in \T_R$, $d \vdash d'$ if there is an elementary cycle $w \in R^*$ and $d \letvdash{w} d'$.
\begin{lemma}[restate={[name={}]lemtranswf}]\label{lem:transition-well-founded}
The endorelation~$(\vdash^+)^{-1}$ on~$\T_R$ is well-founded.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:transition-well-founded}, we refer to Appendix~\ref{app:closed-definition}.
\end{proof}
\index{cutout trees}
For each elementary cycle $w \in R^*$, we define the set of \emph{$w$-cutout trees of~$d$} as
\[ \cotrees(d, w) = \{ d' \in \T_R \mid d \letvdash{w}^+ d' \} \enspace. \]
We note that $d \not\in \cotrees(d, w)$ and $\cotrees(d, w)$ is finite.
Moreover, we define the set of \emph{cutout trees of~$d$} as
\[ \cotrees(d) = \{ d' \in \T_R \mid d \vdash^+ d \} \enspace. \]
We note that $d \not\in \cotrees(d)$ either and $\cotrees(d)$ is finite, too.
\begin{lemma}[restate={[name={}]lemcutoutsubset}]\label{lem:subtree-cotrees-subset}
For every $d, d' \in \T_R$ the following holds:
if $d \vdash^+ d'$, then $\cotrees(d') \subset \cotrees(d)$.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:subtree-cotrees-subset}, we refer to Appendix~\ref{app:closed-definition}.
\end{proof}
\index{closed}
\index{wRTG-LM!closed}
Let $c \in \mathbb N$ and $\overline G = \big((G, \alg L), (\walg K, \oplus, \welem 0, \Omega), \wt\big)$ be a $(\gclass{all}, \wclass{\operatorname{d-comp}} \cap \wclass{dist})$-LM.
We say that~$\overline G$ is a \emph{$c$-closed wRTG-LM}, if for every $d \in \T_R$ and elementary cycle $w \in R^*$ such that there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic the following holds:
\begin{equation}\label{eq:c-closed}
\wthom{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \enspace.
\end{equation}
We say that~\emph{$\overline G$ is a closed wRTG-LM} if there is a $c \in \mathbb N$ such that~$\overline G$ is a $c$-closed wRTG-LM.
We denote the class of all closed wRTG-LMs by $\gls{wlmclass:closed}$.
\subsubsection{Properties of closed weighted RTG-based language models}
\label{sec:closed-properties}
\begin{quote}
\em For the rest of this section, we let $c \in \mathbb N$ and $\overline G \in \wlmclass[\mathrm{closed}]{\gclass{all}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ with $\overline G = \big((G, \alg L), (\walg K, \oplus, \welem 0, \Omega), \wt\big)$ and $G = (N, \Sigma, A_0, R)$.
\end{quote}
First we generalize the applicability of Equation~\eqref{eq:c-closed} to trees that are more than $(c+1)$-cyclic.
\begin{lemma}[restate={[name={}]lemgenclosed}]\label{lem:closed-bigger-trees'}
For every $d \in (\T_R)$, $c' \in \mathbb N$ with $c' \geq c + 1$, and elementary cycle $w \in R^*$ such that there is a leaf $p \in \pos(d)$ which is $(c',w)$-cyclic the following holds:
\[ \wthom{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \enspace. \]
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:closed-bigger-trees'}, we refer to Appendix~\ref{app:closed-properties}.
\end{proof}
Recall that for every $c \in \mathbb N$, the set $\T_R^{(c)}$ contains those trees over rules that are at most $c$-cyclic.
Formally, $\T_R^{(c)} = \{ d \in \T_R \mid c' \in \mathbb N, c' \leq c, \text{\ and $d$ is $c'$-cyclic} \}$.
The next theorem intuitively states the following.
For every summation over the weights of ASTs, we may remove an arbitrary finite set of ASTs from the summation as long as their cutout trees which are at most $c$-cyclic remain in the index set of the sum.
\begin{boxtheorem}[restate={[name={}]thmclosed}]\label{thm:outside-trees-subsumed}
For every $l \in \mathbb N$, $D \subseteq \T_R^{(c)}$, and $D' \subseteq \T_R \hspace{-0.2mm} \setminus \hspace{-0.2mm} \T_R^{(c)}$ the following holds:
if $\bigcup_{d \in D'} (\cotrees(d) \cap \T_R^{(c)}) \subseteq D$, then for every $B \subseteq D'$ with $|B| = l$,
\[ \bigoplus_{d \in D} \wthom{d} \oplus \infsum_{d \in D'} \wthom{d} = \bigoplus_{d \in D} \wthom{d} \oplus \infsum_{d \in D' \setminus B} \wthom{d} \enspace. \]
\end{boxtheorem}
\begin{proof}
For the proof of Theorem~\ref{thm:outside-trees-subsumed}, we refer to Appendix~\ref{app:closed-properties}.
\end{proof}
Next we show that for every summation over the weights of a certain set of trees (namely those that are at most $c$-cyclic), we may add an arbitrary finite set of trees to that summation.
\begin{lemma}[restate={[name={}]lemclosed}]\label{lem:outside-trees-spawned}
For every $l \in \mathbb N$, $A \in N$, and $B \subseteq (\T_R)_A \setminus \T_R^{(c)}$ with $|B| = l$ the following holds:
\[
\bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} = \bigoplus_{d \in (\T_R^{(c)})_A \cup B} \wthom{d} \enspace.
\]
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:outside-trees-spawned}, we refer to Appendix~\ref{app:closed-properties}.
\end{proof}
Finally, we extend this result to adding arbitrary (in particular, infinite) set of trees to the summation.
\begin{boxtheorem}\label{thm:tr-trc}
For every $A \in N$ it holds that
\[
\infsum_{d \in (\T_R)_A} \wthom{d} = \bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} \enspace.
\]
\end{boxtheorem}
\begin{proof}
Let $A \in N$.
By Lemma~\ref{lem:outside-trees-spawned}, for every finite $D \subseteq (\T_R)_A$ with $(\T_R^{(c)})_A \subseteq D$
\[
\bigoplus_{d \in D} \wthom{d} = \bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} \enspace.
\]
Thus, by Lemma~\ref{lem:d-complete}~(iii),
\[
\infsum_{d \in (\T_R)_A} \wthom{d} = \bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} \enspace. \qedhere
\]
\end{proof}
Theorem~\ref{thm:tr-trc} shows that the sum over the (possibly infinite) set of ASTs of a closed wRTG-LM can indeed be computed by the sum over a finite subset of that set.
It will be essential in the proof of correctness of the value computation algorithm (cf.\ Section~\ref{sec:vca-correct}).
\section{Two particular M-monoid parsing problems}
\label{sec:problems}
In this section, we consider two computational problems which are related more or less closely to parsing, and we present them as instances of the M-monoid parsing problem.
For this, we formalize each of these problems using a particular class of wRTG-LMs.
We start with computing the intersection of a grammar and a syntactic object and then proceed with algebraic dynamic programming.
\subsection{Intersection of a grammar and a syntactic object}
\label{sec:intersection}
\citet{BarPerSha61} have proven that context-free languages are closed under intersection with regular languages.
They gave a constructive proof which, given a CFG $G$ and a finite-state automaton $M$ (modeling the regular language), creates a new CFG, denoted by $G \rhd M$, whose language is the intersection of the languages of $G$ and $M$.
By choosing $M$ such that its language is a single sentence $a$, the derivations of $G \rhd M$ are exactly the derivations of $a$ in $G$.
In the following, we restrict ourselves to this special case and write $G \rhd a$ rather than $G \rhd M$.
We will briefly describe two applications of $G \rhd a$ in NLP and then formalize the construction of $G \rhd a$ (where $G$ is not restricted to CFG) as an instance of the M-monoid parsing problem.
A \emph{parse forest} is a compact (in particular, finite) representation of the set of abstract syntax trees for some syntactic object.
\textcites{BilLan89} (also cf.~\cite{Lan74}) have shown that the intersection of a CFG $G$ and a sentence $a$ is precisely the parse forest of $a$ in $G$.
Their approach has later been referred to as \emph{parsing as intersection} and generalized to language models beyond CFG, e.g., TAG~\cite{Lan94}.
In EM training~\cite{demlairub77} of PCFGs the probabilistic weights of a CFG $G$ are estimated with respect to sentences from a training corpus~\cites{Bak79}{LarYou90}.
This is done by computing $G \rhd a$ for each training sentence $a$.
We note that the cited publications did not explicitly mention the intersection.
This was first done by \textcite{NedSat08} (also cf.~\cite{NedSat03}).
\textcite{dregebvog16} generalized the use of the intersection in EM training to language models beyond CFG.
We will now formally define the intersection of an RTG-LM $(G, \alg L)$ and a syntactic object $a$.
We will then show that computing $G \rhd a$ is an instance of the M-monoid parsing problem, given that $\alg L$ fulfils a certain condition.
\index{intersection}
\index{G@$G \rhd_\psi a$}
Let $(G,(\alg L,\phi))$ and $(G',(\alg L,\phi))$ be RTG-LMs where $G=(N,\Sigma,A_0,R)$ and $G'=(N',\Sigma,A_0',R')$.
\begin{enumerate}
\item Let $\psi: N' \rightarrow N$ be a mapping and let $a \in \alg L_{\sort(A_0)}$.
Then $(G',(\alg L,\phi))$ is the \emph{$\psi$-intersection of~$G$ and~$a$}, denoted by $G \rhd_\psi a$, if the following conditions hold:
\begin{itemize}
\item $L(G')_{\alg L} = L(G)_{\alg L} \cap \{ a \}$, and
\item the mapping $\widehat{\psi}\colon \mathrm{AST}(G') \rightarrow \mathrm{AST}(G, a)$ is bijective, where $\widehat{\psi} = \widehat{\psi}'|_{\mathrm{AST}(G')}$ and $\widehat{\psi}': \T_{R'} \to \T_R$ is defined inductively by
\begin{align*}
r'(d_1,\dots,d_k) & \mapsto \psi(r')(\widehat{\psi}'(d_1),\dots,\widehat{\psi}'(d_k))
\end{align*}
and $\psi$ is extended in a natural way to rules.
\end{itemize}
\item We call $(G',(\alg L,\phi))$ \emph{intersection of~$G$ and~$a$} if there is a mapping $\psi$ as in (i) such that $(G',(\alg L,\phi))$ is the $\psi$-intersection of~$G$ and~$a$. \qedhere
\end{enumerate}
If the algebra $(\alg L,\phi)$ is finitely decomposable, then the intersection can be constructed easily from the result of a particular M-monoid parsing problem.
We recall that the CFG-algebras, LCFRS-algebras, and TAG-algebras are finitely decomposable.
Let $(G,(\alg L,\phi))$ be an RTG-LM such that $G =(N,\Sigma,A_0,R)$ is in normal form and $(\alg L,\phi)$ is finitely decomposable.
Moreover, let $a \in \alg L_{\sort(A_0)}$.
\index{intersection M-monoid}
The \emph{intersection M-monoid of $(G, \alg L)$} and $a$ is the finite and complete M-monoid
\[
\walg{K}((G,(\alg L,\phi)),a) = (\mathcal P(P_{R,a}),\cup,\emptyset,\Omega,\infsumop[\cup]) \enspace,
\]
which we construct as follows.
\begin{itemize}
\item $P_{R,a} = \{ [A,b] \rightarrow \ \sigma([A_1,a_1],\ldots,[A_k,a_k]) \mid
\begin{aligned}[t]
&(A \rightarrow \sigma(A_1,\ldots,A_k)) \in R, b \in \factors(a)_{\sort(A)}, \text{ and} \\
& (a_1,\ldots,a_k) \in \phi(\sigma)^{-1}(b) \} \enspace;
\end{aligned}$\\
we note that $P_{R,a}$ is finite,
\item $\Omega = \{\omega_r \mid r \in R\}$ where for each $r = (A \rightarrow \sigma(A_1,\ldots,A_k))$ with $\sort(\sigma) =(s_1\ldots s_k,s)$, the operation $\omega_r$ is defined for every $V_1,\ldots,V_k \in \mathcal P(P_{R,a})$ by
\[
\omega_r(V_1,\ldots,V_k) = V_1 \cup \ldots \cup V_k \cup V
\]
where
\begin{align*}
V = \{[A,b] \rightarrow \sigma([A_1,a_1],\ldots,[A_k,a_k]) \mid{} &(\forall i \in [k]): [A_i,a_i] \in \mathrm{lhs}(V_i),\\
& b = \phi(\sigma)(a_1,\ldots,a_k) \}
\end{align*}
and $\mathrm{lhs}(V_i)$ is the set of left-hand sides of all rules in $V_i$,
\item for each family $(V_i \mid i \in I)$ of elements of $\mathcal P(P_{R,a})$ we define
\[
\infsum[\cup]_{i \in I} V_i = \bigcup_{i \in I} V_i\enspace.
\]
We note that $\sum\nolimits^{\cup}_{i \in I} V_i$ is a finite set.
\end{itemize}
We remark that the restriction of $G$ to normal form is for simplicity.
An extension of the definition of the intersection M-monoid which allows arbitrary RTGs is possible in a straightforward way.
\begin{boxtheorem}[restate={[name={}]thmintersection}]\label{thm:intersection}
For each RTG-LM with a finitely decomposable algebra and each syntactic object, the construction of their intersection is an M-monoid parsing problem.
More precisely, let $(G,(\alg L,\phi))$ be an RTG-LM such that $G=(N,\Sigma,A_0,R)$ and $(\alg L,\phi)$ is a finitely decomposable language algebra.
Moreover, let $a \in \alg L_{\sort(A_0)}$.
We consider the M-monoid parsing problem with the following input:
\begin{itemize}
\item the wRTG-LM $((G,(\alg L,\phi)), \walg{K}((G,(\alg L,\phi)),a), \wt)$ where $\wt(r) = \omega_r$ for each $r \in R$ and
\item $a$.
\end{itemize}
Then $(G',(\alg L,\phi))$ is the $\psi$-intersection of $(G,(\alg L,\phi))$ and $a$, where
\begin{itemize}
\item $G' = (N',\Sigma,[A_0,a],\fparse(a))$ with $N'=\mathrm{lhs}(\fparse(a)) \cup \{[A_0,a]\}$ (we note that $\fparse(a)$ is a finite set) and
\item $\psi\colon N' \rightarrow N$ is defined by $\psi([A,b]) = A$ for each $[A,b] \in N'$.
\end{itemize}
\end{boxtheorem}
\begin{proof}
For the proof of Theorem~\ref{thm:intersection}, we refer to Appendix~\ref{app:intersection}.
\end{proof}
We denote the class of all intersection M-monoids $\walg K((G', \alg L'), a)$, where $(G', \alg L')$ is some RTG-LM and $a$ is some syntactic object, by $\gls{wclass:int}$.
\subsection{Algebraic dynamic programming}
\label{sec:adp}
\emph{Algebraic dynamic programming} (ADP) is a framework for modeling dynamic programming problems which was originally developed by \textcite{GieMeySte04}.
They represented dynamic programming problems using a yield grammar and a so-called evaluation algebra for each problem.
In this section, we will introduce a different formalization of ADP which uses only a single formalism: wRTG-LMs.
Moreover, we will show that each ADP problem is an instance of the M-monoid parsing problem.
In this section we fix the following objects and sets.
We let~${\mathsf{a}}$ be a sort representing \enquote{answers} and~${\mathsf{i}}$ be a sort representing \enquote{input}.
Moreover, we let $S = \{ {\mathsf{a}},{\mathsf{i}} \}$ be a set of sorts and $\Sigma$ be an $(S^*\times S)$-sorted set such that
\begin{enumerate}
\item $\Sigma_{(\varepsilon,{\mathsf{a}})} = \emptyset$
and
\item $\Sigma_{(s_1 \dots s_k,{\mathsf{i}})} = \emptyset$ for every $k \in \mathbb N_+$.
\end{enumerate}
Intuitively, in every tree over~$\Sigma$, the leaves are symbols with sort~${\mathsf{i}}$ and the inner nodes are symbols with sort~${\mathsf{a}}$.
In the following, we will formalize the concepts \emph{objective function} and \emph{evaluation algebra} of~\cite{GieMeySte04} using our own methodology.
We note that we have used sets rather than lists in order to represent multiple answers.
This is motivated by the fact that sets are more commonly understood than lists.
Moreover, we do not want duplicate answers and information about order can be added to answers if they are elements of a set, too.
Thus the advantages lists provide over sets are not needed in our case.
We believe that~\cite{GieMeySte04} chose lists over sets because of their choice to implement ADP in Haskell, where lists are a widespread datastructure.
\index{objective function}
Let $\walg{K}$ be an $S$-sorted set.
An \emph{objective function (for $\walg{K}$)} is a family $(h_s \mid s \in S)$ of mappings $h_s: \mathcal P(\walg{K}_s) \rightarrow \mathcal P(\walg{K}_s)$ which fulfils the following requirements:
\begin{enumerate}
\item $h_{\mathsf{i}} = \id$,
\item $h_{\mathsf{a}}$ maps each non-empty subset~$F$ of~$\walg{K}_{\mathsf{a}}$ to a non-empty subset of~$F$,
\item $h_{\mathsf{a}}(\emptyset) = \emptyset$, and
\item $h_{\mathsf{a}}$ is commutative and associative in the following sense:
for every non-empty subset~$F$ of~$\walg{K}_{\mathsf{a}}$ and every $I$-indexed family $(F_i \mid i \in I)$ of elements $F_i \subseteq F$ such that $F = \bigcup_{i \in I} F_i$
\begin{equation}\label{eq:obj-function'}
h_{\mathsf{a}}(F) = h_{\mathsf{a}}(\bigcup_{i \in I} h_{\mathsf{a}}(F_i)) \enspace.
\end{equation}
In particular, by choosing $I = \{i\}$ and $F_i=F$, we obtain that $h_{\mathsf{a}}(h_{\mathsf{a}}(F))=h_{\mathsf{a}}(F)$, i.e., $h_{\mathsf{a}}$ is idempotent.
\end{enumerate}
We note that since $h_{\mathsf{i}} = \id$, Equation~\ref{eq:obj-function'} also holds if we replace~${\mathsf{a}}$ by~${\mathsf{i}}$.
Thus, in the following, we will use Equation~\ref{eq:obj-function'} for arbitrary $s \in S$ and say that \emph{$(h_s \mid s \in S)$ is idempotent}.
Moreover, we will simply write~$h$ rather than $(h_s \mid s \in S)$.
\index{objective function!single-valued}
We say that~$h$ is \emph{single-valued} if $|h_{\mathsf{a}}(F)| \le 1$ for every $F \subseteq \walg{K}_{\mathsf{a}}$.
\index{Bellman's principle of optimality}
Let $(\walg{K},\psi)$ be an $S$-sorted $\Sigma$-algebra and $h$ be an objective function for~$\walg{K}$.
We say that \emph{$h$ satisfies Bellman's principle of optimality} if for every $k \in \mathbb{N}_+$, $s_1,\dots,s_k \in S$, $\sigma \in \Sigma_{(s_1 \dots s_k,{\mathsf{a}})}$, and for every $F_i \subseteq \walg{K}_{s_i}$ with $i \in [k]$ the following holds:
\begin{equation}\label{eq:bellman'}
h_{\mathsf{a}}\big(\psi(\sigma)(F_1,\ldots,F_k)\big) = h_{\mathsf{a}}\Big(\psi(\sigma)\big(h_{s_1}(F_1),\ldots,h_{s_k}(F_k)\big)\Big) \enspace.
\end{equation}
Let $(G,(\lalg{YIELD}^\Sigma,\phi))$ be an $S$-sorted yield grammar over $\Sigma$, $(\walg{K},\psi)$ an $S$-sorted $\Sigma$-algebra, and $h$ an objective function for~$\walg{K}$ such that
\begin{enumerate}
\item $G = (N,\Sigma,A_0,R)$ is unambiguous with $A_0 \in N_{\mathsf{a}}$,
\item $(G, \lalg{YIELD}^\Sigma) \subseteq \gclass{nl}$, and
\item $h$ satisfies Bellman's principle of optimality.
\end{enumerate}
\index{ADP problem}
The \emph{ADP problem for $(G,(\lalg{YIELD}^\Sigma,\phi))$, $\walg{K}$, and $h$} is the problem of computing, for each $w \in (\Sigma_{(\varepsilon,{\mathsf{i}})})^*$, the value
\[
\adp(w) = h_{\mathsf{a}}\big( \{ t_{\walg{K}} \mid t \in L(G) \cap \yield_{\Sigma_{(\varepsilon,{\mathsf{i}})}}^{-1}(w) \} \big)\enspace. \qedhere
\]
We remark that \textcite[p.\,235]{GieMeySte04} do not explicitly require the yield grammar to be unambiguous.
However, they argue against using ambiguous grammars as follows~\cite[p.\,235]{GieMeySte04}:
\enquote{The same candidate has two derivations in the tree grammar: This is bad, as the algorithm will yield redundant answers when asking for more than one, and all counting and probabilistic scoring will be meaningless.}
Here a \emph{candidate} is a $t \in \T_\Sigma$ and a \emph{derivation} of~$t$ is a $d \in (\T_R)_{A_0}$ with $\pi_\Sigma(d) = t$.
Moreover, (ii) is a restriction we impose on $G$ in order to disallow abstract syntax trees that are evaluated in the same way as one of their proper subtrees.
Since the syntactic objects of ADP represent (sub\nobreakdash-)problems which have to be solved, if (ii) did not hold, then the solution of a subproblem would depend on itself, which contradicts dynamic programming.
\begin{example}\label{ex:adp-problem}
Given two strings $u,v \in (\Sigma_{(\varepsilon,{\mathsf{i}})})^*$, we can try to edit $u$ into $v$ by traversing $u$ position by position and, at each position $p$, applying one of the following three operations:
\begin{itemize}
\item delete the symbol of $u$ at position $p$ and advance $p$ to $p+1$,
\item insert a symbol into $u$ in front of position $p$ and remain at $p$, and
\item replace the symbol of $u$ at position $p$ by some other symbol and advance $p$ to $p+1$.
\end{itemize}
If the resulting string is $v$, then this edit was successful.
For the given strings $u,v$ there can be many successful edits.
In order to find out the ``cheapest'' successful edit, we associate a cost with each of the three operations, e.g., delete and insert have the cost 1, replace has cost 0 if the replaced symbol and the replacing symbol are equal, otherwise replace has cost 1.
Then the cost of a successful edit is the sum of the costs of each occurrence of an operation.
The \emph{minimum edit distance problem} is the task to calculate, for two given strings $u$ and $v$, the \emph{minimum edit distance between~$u$ and~$v$},
i.e., the minimum of the costs of all successful edits. We denote this value by $\med(u,v)$.
Next we formulate the minimum edit distance problem as an ADP problem.
\begin{enumerate}
\item We let $\Sigma = \Sigma_{(\varepsilon,{\mathsf{i}})} \cup \Sigma_{({\mathsf{i}},{\mathsf{a}})} \cup \Sigma_{({\mathsf{i}} {\mathsf{a}},{\mathsf{a}})} \cup \Sigma_{({\mathsf{a}} {\mathsf{i}},{\mathsf{a}})} \cup \Sigma_{({\mathsf{i}} {\mathsf{a}} {\mathsf{i}},{\mathsf{a}})}$ with
\begin{align*}
\Sigma_{(\varepsilon,{\mathsf{i}})} &= \{ \text{a},\dots,\text{z} \}\cup \{ \$ \}, \
\Sigma_{(\varepsilon,{\mathsf{a}})} = \emptyset, \\
\Sigma_{({\mathsf{i}},{\mathsf{a}})} &= \{ \text{nil} \}, \
\Sigma_{({\mathsf{i}} {\mathsf{a}},{\mathsf{a}})} = \{ \text{delete} \}, \
\Sigma_{({\mathsf{a}} {\mathsf{i}},{\mathsf{a}})} = \{ \text{insert} \}, \text{ and }
\Sigma_{({\mathsf{i}} {\mathsf{a}} {\mathsf{i}},{\mathsf{a}})} = \{ \text{replace} \}.
\end{align*}
\item We define the $S$-sorted yield grammar $(G,(\lalg{YIELD}^\Sigma,\phi))$ with $G = (N, \Sigma, \nont{A}, R)$ and
\begin{itemize}
\item $N = N_{\mathsf{a}} = \{ \nont{A} \}$ (where $\nont{A}$ stands for \enquote{alignment}), and
\item $R$ consists of the following rules:
\begin{align*}
\nont{A} &\rightarrow \text{nil}(\$) \\
\nont{A} &\rightarrow \text{delete}(\delta, \nont{A}) \tag{for every $\delta \in \Sigma_{(\varepsilon,{\mathsf{i}})}\setminus\{\$\}$} \\
\nont{A} &\rightarrow \text{insert}(\nont{A}, \delta) \tag{for every $\delta \in \Sigma_{(\varepsilon,{\mathsf{i}})}\setminus\{\$\}$} \\
\nont{A} &\rightarrow \text{replace}(\delta, \nont{A}, \delta') \enspace. \tag{for every $\delta,\delta' \in \Sigma_{(\varepsilon,{\mathsf{i}})}\setminus\{\$\}$}
\end{align*}
\end{itemize}
\item We define the $S$-sorted $\Sigma$-algebra $(\mathbb N \cup \Sigma_{(\varepsilon,{\mathsf{i}})},\psi)$ such that $(\mathbb N \cup \Sigma_{(\varepsilon,{\mathsf{i}})})_{\mathsf{a}} = \mathbb N$ and $(\mathbb N \cup \Sigma_{(\varepsilon,{\mathsf{i}})})_{\mathsf{i}} = \Sigma_{(\varepsilon,{\mathsf{i}})}$,
where for every $n \in \mathbb N$ and $\delta,\delta_1,\delta_2 \in \Sigma_{(\varepsilon,{\mathsf{i}})}$
\begin{align*}
\psi(\delta) &= \delta \\
\psi(\text{nil})(\delta) &= 0 \\
\psi(\text{delete})(\delta,n) &= n + 1 \\
\psi(\text{insert})(n,\delta) &= n + 1 \\
\psi(\text{replace})(\delta_1,n,\delta_2) &= \begin{cases}
n &\text{if $\delta_1 = \delta_2$} \\
n + 1 &\text{otherwise.}
\end{cases}
\end{align*}
\item We let~$h$ be the objective function such that $h_{\mathsf{a}}(F) = \{ \min F \}$ for every non-empty subset~$F$ of~$\mathbb N$. Thus, $h$ is single-valued.
\end{enumerate}
Then $\med(u,v) = h_{\mathsf{a}}\big( \{ t_{\walg{K}} \mid t \in L(G) \cap \yield_{\Sigma_{(\varepsilon,{\mathsf{i}})}}^{-1}(u\$v) \} \big)$. Hence, the calculation of $\med(u,v)$ is an ADP problem.
\end{example}
\index{ADP M-monoid over $\walg{K}$ and $h$}
For each ADP problem, we will construct an associated instance of the M-monoid parsing problem as follows.
Let $(\walg{K},\psi)$ be an $S$-sorted $\Sigma$-algebra and $h$ be an objective function for~$\walg{K}$ that satisfies Bellman's principle of optimality.
We define the \emph{algebra associated with $\walg{K}$ and $h$} as the tuple $(\walg{K}', \oplus, \emptyset, \Sigma',\psi',\infsum)$ such that
\begin{itemize}
\item $\walg{K}' = \{ h_s(F) \mid s \in S \text{ and } F \subseteq \walg{K}_s \} \cup \{ \bot \}$ where~$\bot$ is a new element,\footnote{$\bot$ helps to guarantee that $\oplus$ is associative, see the proof of Lemma~\ref{lem:adp-mmonoid-complete-distributive}.}
\item for every $F_1,F_2 \in \walg{K}'$
\[ F_1 \oplus F_2 = \begin{cases}
h_s(F_1 \cup F_2) &\text{if there is an $s \in S$ such that $F_1,F_2 \subseteq \walg{K}_s$} \\
\bot &\text{otherwise,}
\end{cases} \]
\item $\Sigma' = \T_\Sigma(X) \cup \{\welem{0}^k \mid k \in \mathbb N\}$ where $\T_\Sigma(X) = \bigcup_{s \in S,u \in S^*} (\T_\Sigma(X_u))_s$ is viewed as a ranked set and each $\welem{0}^k$ has rank~$k$ (we note that for each $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$, $\sigma(x_{1,s_1},\dots,x_{k,s_k}) \in (\T_\Sigma(X_{s_1 \dots s_k}))_s$),
\item for every $k \in \mathbb{N}$ and $\sigma \in \Sigma'_k$ we define the operation $\psi'(\sigma): (\walg K')^k \to \walg K'$ for every $F_1,\ldots,F_k \in \walg{K}'$ as follows:
\begin{itemize}
\item if $\sigma = t$ with $t \in (\T_\Sigma(X_{s_1 \dots s_k}))_s$, then
\[ \psi'(\sigma)(F_1,\dots,F_k) = \begin{cases}
h_s(t_{\walg{K}}(F_1,\dots,F_k)) &\text{if $F_i \subseteq \walg{K}_{s_i}$ for every $i \in [k]$} \\
\bot &\text{otherwise,}
\end{cases} \]
\item if $\sigma= \welem{0}^k$, then $\psi'(\sigma)(F_1,\ldots,F_k) = \emptyset$, and
\end{itemize}
\item $\infsum$ is defined for each $I$-indexed family $(F_i \mid i \in I)$ of elements $F_i \in \walg{K}'$ as
\[ \infsum_{i \in I} F_i = \begin{cases}
h_s \Big( \bigcup_{i \in I} F_i \Big) &\text{if there is an $s \in S$ such that $F_i \subseteq \walg{K}_s$ for every $i \in I$} \\
\bot &\text{otherwise.}
\end{cases} \]
\end{itemize}
Note that $\emptyset \subseteq \walg{K}_s$ for every $s \in S$.
Thus $\emptyset \in \walg{K}'$, but we cannot assign a sort from~$S$ to~$\emptyset$.
Hence~$\walg{K}'$ is not an $S$-sorted set.
\begin{observation}
If~$h$ is single-valued or $h_{\mathsf{a}} = \id$, then
\[ \psi'(\sigma)(F_1,\dots,F_k) = \psi(\sigma)(F_1,\dots,F_k) \]
for every $k \in \mathbb N$, $s,s_1,\dots,s_k \in S$, $\sigma \in \Sigma_{(s_1 \dots s_k,s)}$, and $F_i \subseteq \walg{K}_{s_i}$ for every $i \in [k]$.
\end{observation}
\begin{lemma}[restate={[name={}]lemadpmmonoid}]\label{lem:adp-mmonoid-complete-distributive}
The algebra associated with $\walg{K}$ and $h$ is a d-complete and distributive M-monoid.
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:adp-mmonoid-complete-distributive}, we refer to Appendix~\ref{sec:proof-adp-mmonoid}.
\end{proof}
As a consequence of this lemma, we will refer to the algebra associated with~$\walg{K}$ and~$h$ as the \emph{ADP M-monoid over~$\walg{K}$ and~$h$}.
\begin{boxtheorem}[restate={[name={}]thmadpmmonoid}]\label{thm:ADP-M-monoid}
Each ADP problem is an instance of the M-monoid parsing problem.
More precisely, let $(G,(\lalg{YIELD}^\Sigma,\phi))$ with $G = (N,\Sigma,A_0,R)$ be a nonlooping RTG-LM.
Moreover, let $(\walg{K},\psi)$ be an $S$-sorted $\Sigma$-algebra and~$h$ be an objective function for~$\walg{K}$ that satisfies Bellman's principle of optimality.
We consider the M-monoid parsing problem with the following input:
\begin{itemize}
\item the wRTG-LM
\[ ((G,(\lalg{YIELD}^\Sigma,\phi)), (\walg{K}', \oplus, \emptyset, \Sigma',\psi',\infsumop), \wt) \]
where $(\walg{K}', \oplus, \emptyset, \Sigma',\psi',\infsum)$ is the ADP M-monoid over $\walg{K}$ and $h$.
Moreover, for every $k \in \mathbb N$ and $r = (A \rightarrow t)$ in $R_k$ (viewing~$R$ as a ranked set) we define $\wt(r) = \psi'(t')$, where $t'$ is obtained from $t$ by replacing the $i$th occurrence of a nonterminal by $x_i$ for every $i \in [k]$.
\item $a \in (\Sigma_{(\varepsilon,{\mathsf{i}})})^*$.
\end{itemize}
Then $\fparse(a) = \adp(a)$.
\end{boxtheorem}
\begin{proof}
For the proof, we refer to Appendix~\ref{app:adp-mmonoid-parsing}.
\end{proof}
We denote by $\gls{wclass:adp}$ the class of all ADP M-monoids over all algebras $(\walg K, \psi)$ and objective functions $h$ for $\psi$ that satisfy Bellman's principle of optimality.
By Lemma~\ref{lem:adp-mmonoid-complete-distributive}, we have that
\(
\wlmclass{{\gclass{YIELD} \cap \gclass{nl}},\) \(\wclass{ADP}} \subseteq \wlmclass{\gclass{all}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}
\).
However, in general
\(
\wlmclass{\gclass{YIELD} \cap \gclass{nl}, \wclass{\mathrm{ADP}}} \not\subseteq \wlmclass[\mathrm{closed}]{\gclass{all},\) \(\wclass{\operatorname{d-comp}} \cap \wclass{dist}}
\).
We will address this problem by the additional concepts which we develop in the following sections.
Thus we will be able to show that our M-monoid parsing algorithm can solve every ADP problem (cf.\ Corollary~\ref{cor:applicability}).
\begin{example}[Continuation of Example~\ref{ex:adp-problem}]
We show how to compute the weight of each rule of the wRTG-LM $\big((G, \lalg{YIELD}^\Delta), (\walg K', \oplus, \emptyset, \psi', \infsum), \wt\big)$, where $(\walg K', \oplus, \emptyset, \psi', \infsum)$ is the ADP M-monoid over $\mathbb N \cup \Sigma_{(\varepsilon,{\mathsf{i}})}$ and $h$ and $\wt$ is defined as in Theorem~\ref{thm:ADP-M-monoid}.
\begin{align*}
\wt(\nont{A} \rightarrow \text{nil}(\$))() &= h((\text{nil}')_{\walg{K}}()) = h(\text{nil}_{\walg{K}}()) = h(\{0\}) = \{0\}\\[2mm]
\wt(\nont{A} \rightarrow \text{delete}(\delta,\nont{A}))(F)
&= h((\text{delete}(\delta,\nont{A})')_{\walg{K}}(F))
= h(\text{delete}(\delta,x_1)_{\walg{K}}(F))\\
&= h(\{n+1 \mid n \in F\}) = \{1 + \min(F)\} \\[2mm]
\wt(\nont{A} \rightarrow \text{insert}(\nont{A},\delta))(F)
&= \{1 + \min(F)\}\\[2mm]
\wt(\nont{A} \rightarrow \text{replace}(\delta,\nont{A},\delta'))(F) &=
\begin{cases}
\{1 + \min(F)\} & \text{if $\delta \not= \delta$}\\
\{\min(F)\} & \text{otherwise}
\end{cases}
\end{align*}
By Theorem \ref{thm:ADP-M-monoid}, for every $u,v \in (\Sigma_{(\varepsilon,{\mathsf{i}})} \setminus \{ \$ \})^*$, we have that $\fparse(u\$v) = \med(u,v)$.
\end{example}
\section{M-monoid parsing algorithm}\label{sec:algorithm}
\index{M-monoid parsing algorithm}
The M-monoid parsing algorithm is supposed to solve the M-monoid parsing problem.
As input, it takes a wRTG-LM~$\overline G$ and a syntactic object~$a$.
Its output is intended to be $\fparse(a)$.
The algorithm is a pipeline with two phases (cf.\ Figure~\ref{fig:alg}) and follows the modular approach of \textcites{Goodman1999}{ned03}.
First, a \emph{canonical weighted deduction system} computes from~$\overline G$ and $a$ a new wRTG-LM~$\overline G{}'$ with the same weight structure as $\overline G$, but a different RTG and the language algebra $\lalg{CFG}^\emptyset$.
Second, $\overline G{}'$ is the input to the \emph{value computation algorithm} (Algorithm~\ref{alg:mmonoid}), which computes the value $V(A_0')$;
this is supposed to be $\infsum_{d \in \mathrm{AST}(G')} \wt(d) = \fparse(a)$.
\subsection{Weighted deduction systems}
\label{sec:weighted-deduction-systems}
The concept of deduction systems is very useful to specify parsing algorithms for strings according to some formal grammar \cite{perwar83,shischper95}.
This concept was extended in~\cite{Goodman1999} and~\cite{ned03} to \emph{weighted deduction systems} in which each inference rule is associated with an operation on some totally ordered set.
A weighted deduction system consists of a \emph{goal item} and a finite set of \emph{weighted inference rules}.
Each inference rule has the form:
\begin{equation}
\frac{x_1: I_1, \ ...,\ x_m:I_m}{\omega(x_1,\ldots,x_m): I_0}
\left\{
\substack{c_1,\ldots, c_q}
\right. \label{equ:inf-rule}
\end{equation}
where $m \in \mathbb N$, $\omega$ is an $m$-ary operation (\emph{weight function}), $I_0,\ldots,I_m$ are \emph{items}, and $c_1,\ldots,c_p$ are \emph{side conditions}.
Each item represents a Boolean-valued property (of some combination of nonterminals of the formal grammar $G$ and/or constituents of the string $a=w$).
The meaning of an inference rule is: given that $I_1, \dots, I_m$ and $c_1, \dots, c_p$ are true, $I$ is true as well.
\citet{ned03} pointed out that ``a deduction system having a grammar $G$ [...] and input string $w$ in the side conditions can be seen as a construction $c$ of a context-free grammar $c(G,w)$ [...]''.
Thus, conceptually, a weighted deduction system is a mapping $c$ of which the argument-value relationship is determined by the goal item and the weighted inference rules.
The mapping $c$ takes a grammar $G$ and a string $a$ as arguments and delivers a system $c(G,a)$ of (unconditional) inference rules, called \emph{instantiation} in \cite{ned03}.
Then a parsing algorithm tries to generate the goal item by generating items on demand using the inference rules of $c(G,a)$; in particular, $c(G,a)$ is not fully constructed before applying the parsing algorithm.
Here we generalize the approach of~\cites{perwar83}{shischper95} in two ways:
\begin{enumerate*}[label=(\arabic*)]
\item instead of string-generating grammars, we consider RTG-LMs over any finitely decomposable language algebra and
\item instead of unweighted grammars as input, we consider wRTG-LMs (as in~\cite{ned03}).
\end{enumerate*}
For this,
\begin{quote}
\em in the sequel, we let $(\alg L, \phi)$ be an arbitrary, but fixed finitely decomposable $S$-sorted $\Gamma$-algebra.
\end{quote}
We denote the class of all RTG-LMs with language algebra $\alg L$ by $\gclass{\alg L}$.
Let $\walg{K}$ and $\walg{L}$ be two complete M-monoids.
\index{weighted deduction system}
\index{K@$(\walg K,\walg L)$-weighted deduction system}
A \emph{$(\walg{K},\walg{L})$-weighted deduction system} (or simply: \emph{weighted deduction system}) is a mapping
\[
\wds_{\walg{K},\walg{L}}: \wlmclass{\gclass{\alg L}, \walg{K}} \times \alg L \to \wlmclass{\gclass{\lalg{CFG}^\emptyset}, \walg{L}} \enspace,
\]
where the argument-value relationship of $\wds_{\walg{K},\walg{L}}$
is determined by some goal item and some finite set of weighted inference rules which may contain references to the arguments.
\footnote{This definition can be compared to the definition of a function $f: \mathbb{N} \times \mathbb{N} \to \mathbb{N}$ by $f(x,y)=x^2+3y$, in which the argument-value relationship is expressed by an arithmetic expression with references to the arguments $x$ and $y$.}
We allow that the weight algebras $\walg{K}$ and $\walg{L}$ of the argument grammar and the resulting grammar are different in order to enhance flexibility (cf., e.g., \cite[Fig.~3]{ned03}).
In the literature, sound and complete are two important properties that deduction systems must fulfill.
In our context, they could be defined as follows.
We say that $\wds_{\walg{K},\walg{L}}$ is
\begin{itemize}
\item \emph{sound} if for each $\overline{G}=((G,\alg L),\walg{K},\wt)$ in $\wlmclass{\gclass{\alg L}, \walg{K}}$ and each $a \in \alg L_{\sort(A_0)}$ where $A_0$ is the initial nonterminal of $G$ the following holds:
if $(G',\lalg{CFG}^\emptyset)$ is the first component of $\wds_{\walg{K},\walg{L}}(\overline{G}, a)$ and $\varepsilon \in L(G')_{\lalg{CFG}^\emptyset}$, then $a \in L(G)_{\alg L}$.
\item \emph{complete} if for each $\overline{G}=((G,\alg L),\walg{K},\wt)$ in $\wlmclass{\gclass{\alg L}, \walg{K}}$ and each $a \in \alg L_{\sort(A_0)}$ where $A_0$ is the initial nonterminal of $G$ the following holds:
if $a \in L(G)_{\alg L}$, then $\varepsilon \in L(G')_{\lalg{CFG}^\emptyset}$, where $(G',\lalg{CFG}^\emptyset)$ is the first component of $\wds_{\walg{K},\walg{L}}(\overline{G}, a)$.
\item \emph{unweighted} if $\walg{K}=\walg{L}$ and this M-monoid is the M-monoid associated with the Boolean semiring.
\end{itemize}
In our context, we need a stronger condition on weighted deduction systems.
We call a \index{weighted deduction system!weight-preserving}
weighted deduction system $\wds_{\walg K,\walg K}: \wlmclass{\gclass{\alg L}, \walg K} \times \alg L \to \wlmclass{\gclass{\lalg{CFG}^\emptyset}, \walg K}$ \emph{weight-preserving}, if for each $\overline G = ((G, \alg L), \walg K, \wt)$ in $\wlmclass{\gclass{\alg L}, \walg K}$ and $a \in \alg L_{\sort(A_0)}$ with $G = (N,\Sigma,A_0,R)$, $\wds_{\walg K,\walg K}(\overline G, a) = ((G', \lalg{CFG}^\emptyset), \walg K, \wt')$, and $G' = (N',\Sigma',A_0',R')$ there is a bijective mapping
\[
\psi: \mathrm{AST}(G, a) \to \mathrm{AST}(G')
\]
such that for every $d \in \mathrm{AST}(G, a)$ we have $\wthom{d} = \wthom{\psi(d)}$.
\begin{observation}\label{obs:weight-preserving-parse}
Let $\overline G = ((G, \alg L), \walg K, \wt)$ be a wRTG-LM with $G = (N,\Sigma,A_0,R)$, $a \in \alg L_{\sort(A_0)}$, and $\wds_{\walg K,\walg K}: \wlmclass{\gclass{\alg L}, \walg K} \times \alg L \to \wlmclass{\gclass{\lalg{CFG}^\emptyset}, \walg K}$ be a weight-preserving weighted deduction system.
If $\wds_{\walg K,\walg K}(\overline G, a) = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$, then $\fparse_{(G,\alg L)}(a) = \fparse_{(G',\lalg{CFG}^\emptyset)}(\varepsilon)$.
\end{observation}
\begin{lemma}[restate={[name={}]lemwdspreserving}]\label{lem:weight-eq-implies-sound-and-complete}
Each weight-preserving weighted deduction system is sound and complete.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:weight-eq-implies-sound-and-complete}, we refer to Appendix~\ref{app:weight-preserving-wds}.
\end{proof}
Next we define a particular weighted deduction system.
It covers, e.g., the (unweighted) CYK deduction system~\cite{shischper95} and the deduction system for LCFRS of \textcite{kal10}.
We will use this particular weighted deduction system in our M-monoid parsing algorithm.
Let $(\walg K,\oplus,\mathbb 0,\Omega,\infsum)$ be a complete M-monoid such that $\id(\walg K) \in \Omega$.
\index{canonical weighted deduction system}
The \emph{canonical $\walg K$-weighted deduction system}
is the weighted deduction system
\[
\cnc: \wlmclass{\gclass{\alg L}, \walg K} \times \alg L \to \wlmclass{\gclass{\lalg{CFG}^\emptyset}, \walg K}
\]
such that for every $\overline G = ((G,\alg L),\walg{K},\wt)$ in $\wlmclass{\gclass{\alg L}, \walg K}$ and $a_0 \in \alg L_{\sort(A_0)}$, where $A_0$ is the initial nonterminal of~$G$, the wRTG-LM $\cnc(\overline G, a_0)$ is defined by
\[
\cnc(\overline G, a_0) = ((G', \lalg{CFG}^\emptyset), \walg K, \wt')
\]
where $G'$ and $\wt'$ are obtained from $\overline G$ and $\wt$ as follows.
We let $G=(N,\Sigma,A_0,R)$ and define $\mathrm{rhs}(R) = \{ t \in \T_\Sigma(N) \mid \text{$t$ is the right-hand side of some $r \in R$} \}$.
Then $G'=(N',\Sigma',A_0',R')$ with
\begin{itemize}
\item $N' = N \times \mathrm{rhs}(R) \times \factors(a_0) \cup \{ [A_0,a_0] \}$ \ (set of \emph{items})
\item $A_0' = [A_0,a_0]$ \ (\emph{goal item})
\item For every rule $r=(A \to t)$ in $R$ and $a,a_1,\ldots,a_k \in \factors(a_0)$, let $\yield_N(t) = A_1 \dots A_k$ with $k \in \mathbb{N}$ (i.e., including $k=0$) and $A_1,\dots,A_k \in N$;
now, if $t'_{\alg L}(a_1,\ldots,a_k)=a$, where~$t'$ is obtained from~$t$ by replacing the $i$th occurrence of a nonterminal by~$x_i$ for every $i \in [k]$, then each rule in the set
\begin{align*}
\mathrm{instances}(r) = \{ [A,t,a] &\to \langle x_1\ldots x_k\rangle([A_1,t_1,a_1],\ldots,[A_k,t_k,a_k]) \mid k_1,\dots,k_k \in \mathbb N \text{ and } \\
&t_1,\dots,t_k \in \mathrm{rhs}(R) \text{ with } \sort(t_i) = \sort(A_i) \text{ for each $i \in [k]$} \}
\end{align*}
is in $R'$.
We define $\wt'(r') = \wt(r)$ for each $r' \in \mathrm{instances}(r)$.
Moreover, for each rule $r = (A_0 \to t)$ in~$R$ the rule
\[ r' = ([A_0,a_0] \to \langle x_1 \rangle ([A_0,t,a_0])) \]
is in~$R'$ and we let $\wt'(r') = \id(\walg K)$.
\item $\Sigma' = \{ \langle x_1 \dots x_k \rangle \in \Gamma^{\lalg{CFG},\emptyset} \mid 0 \le k \le \maxrk(G) \}$. \qedhere
\end{itemize}
We note that the requirement $\id(\walg K) \in \Omega$ is not a restriction, as the identity relation is defined on every set and can therefore be added to $\walg K$, if necessary.
We also note that the nonterminals of $\cnc(\overline G, a_0)$ contain syntactic objects and right-hand sides of rules.
This is in contrast to the literature, where items of deduction systems contain positions of a string~\cites{shischper95}{Goodman1999}{ned03}{kal10}.
This deviation is due to two reasons.
First, since $\cnc$ is defined for arbitrary finitely decomposable language algebras, string positions are not general enough to represent the language algebra in the nonterminals of $\cnc(\overline G, a)$, but syntactic objects are.
Second, if the nonterminals contained syntactic objects, but not right-hand sides of rules, then we do not know how to compute $\cnc$.
\begin{lemma}[restate={[name={}]lemcncwp}]\label{lem:cnc-weight-preserving}
The canonical $\walg{K}$-weighted deduction system $\cnc$ is weight-preserving. Hence, $\cnc$ is sound and complete.
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:cnc-weight-preserving}, we refer to Appendix~\ref{app:cnc-weight-preserving}.
\end{proof}
\begin{example}\label{ex:wds-lcfrs}
We consider the tropical M-monoid $\walg T = (\mathbb R_0^\infty,\min,\infty,\Omega_+,\inf)$ (cf. Example~\ref{ex:tropical-M-monoid}) and the alphabet~$\Delta$ from Example~\ref{ex:lcfrs}.
We illustrate the canonical $\walg T$-weighted deduction system
\[
\cnc: \wlmclass{\gclass{\lalg{LCFRS}^\Delta}, \walg T} \times \Delta^* \to \wlmclass{\gclass{\lalg{CFG}^\emptyset}, \walg T}
\]
of which the argument-value relationship is determined by the inference rules discussed in \cite[Chapter~7]{kal10}.
We apply $\cnc$ to the linear context-free rewriting system with $G = (N,\Sigma,A_0,R)$ from Example~\ref{ex:lcfrs} and the string $a = \terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}\ \terminal{zag}\ \terminal{helpen}\ \terminal{lezen}$.
The weights of the rules of~$G$ in the tropical M-monoid are shown in Table~\ref{tab:lcfrs-weighted}.
Then $\cnc(G, a)$ is the wRTG-LM
\[
\cnc(G, a) = \big((G', \lalg{CFG}^\emptyset), \ (\mathbb R_0^\infty,\min,\infty,\Omega_+,\inf), \ \wt'\big) \enspace,
\]
where
\begin{itemize}
\item $G'=(N',\Sigma',A_0',R')$ is a $\{\iota\}$-sorted RTG given by
\begin{itemize} \fussy
\item $N' = N'_\iota = \{ [A,t,v] \mid A \in \{ \nont{root},\nont{nsub},\nont{dobj} \}, t \in \mathrm{rhs}(R), v \in \factors(a) \} \cup \{ [\nont{root},a] \}$, where
\[ \mathrm{rhs}(R) = \{ \begin{aligned}[t]
&\langle \terminal{Jan} \rangle,\langle \terminal{Piet} \rangle,\langle \terminal{Marie} \rangle, \langle x^{(1)}_1 x^{(2)}_1 \terminal{zag} \, x^{(2)}_2 \rangle(\nont{nsub},\nont{dobj}), \\
&\langle x^{(1)}_1 x^{(2)}_1,\terminal{helpen} \, x^{(2)}_2 \rangle(\nont{nsub},\nont{dobj}),\langle x^{(1)}_1, \terminal{lezen} \rangle(\nont{nsub}) \};
\end{aligned} \]
first, we give an intuition for the computation of $\factors(a)$ by showing the factors of two particular elements of $\lalg{LCFRS}^\Delta$:\\
for $\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}\ \terminal{zag}\ \terminal{helpen}\ \terminal{lezen} \in (\lalg{LCFRS}^\Delta)_1$ we have
\begin{align*}
&\phi\big(\langle x^{(1)}_1 x^{(2)}_1 \terminal{zag}\ x^{(2)}_2 \rangle\big)^{-1} (\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}\ \terminal{zag}\ \terminal{helpen}\ \terminal{lezen}) = \\
&\qquad \{ \begin{aligned}[t]
&\varepsilon, (\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}, \terminal{helpen}\ \terminal{lezen}), \\
&\terminal{Jan}, (\terminal{Piet}\ \terminal{Marie}, \terminal{helpen}\ \terminal{lezen}), \\
&\terminal{Jan}\ \terminal{Piet}, (\terminal{Marie}, \terminal{helpen}\ \terminal{lezen}), \\
&\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}, (\varepsilon, \terminal{helpen}\ \terminal{lezen}) \}
\end{aligned}
\intertext{and for $(\terminal{Piet}\ \terminal{Marie}, \terminal{helpen}\ \terminal{lezen}) \in (\lalg{LCFRS}^\Delta)_2$ we have}
&\phi\big(\langle x^{(1)}_1 x^{(2)}_1, \terminal{helpen}\ x^{(2)}_2 \rangle\big)^{-1} (\terminal{Piet}\ \terminal{Marie}\ , \terminal{helpen}\ \terminal{lezen}) = \\
&\qquad \{ \varepsilon, (\terminal{Piet}\ \terminal{Marie}, \terminal{lezen}), \terminal{Piet}, (\terminal{Marie}, \terminal{lezen}), \terminal{Piet}\ \terminal{Marie}, (\varepsilon, \terminal{lezen}) \} \enspace.
\end{align*}
In total, the set $\factors(a)$ is the set
\[
\factors(a) = \{ \begin{aligned}[t]
&\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}\ \terminal{zag}\ \terminal{helpen}\ \terminal{lezen}, \\
&(\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}, \terminal{helpen}\ \terminal{lezen}), (\terminal{Piet}\ \terminal{Marie},\terminal{helpen}\ \terminal{lezen}), \\
&(\terminal{Marie}, \terminal{helpen}\ \terminal{lezen}), (\varepsilon, \terminal{helpen}\ \terminal{lezen}), \\
&\varepsilon, \terminal{Jan}, \terminal{Jan}\ \terminal{Piet}, \terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}, \\
&(\terminal{Jan}\ \terminal{Piet}\ \terminal{Marie}, \terminal{lezen}), (\terminal{Piet}\ \terminal{Marie}, \terminal{lezen}), \\
&(\terminal{Marie}, \terminal{lezen}), (\varepsilon, \terminal{lezen}), \\
&\terminal{Piet}, \terminal{Piet}\ \terminal{Marie}, \terminal{Marie} \} \enspace.
\end{aligned}
\]
\item $\Sigma' = \Sigma'_{(\varepsilon,\iota)} \cup \Sigma'_{(\iota,\iota)} \cup \Sigma'_{(\iota\iota,\iota)}$
where
\begin{align*}
\Sigma'_{(\varepsilon,\iota)} = \{ \langle \varepsilon \rangle \}\enspace, \
\Sigma'_{(\iota,\iota)} = \{ \langle x_1 \rangle \} \enspace, \ \text{and} \
\Sigma'_{(\iota\iota,\iota)} = \{ \langle x_1 x_2 \rangle \} \enspace,
\end{align*}
\item $A_0' = [\nont{root},a]$, and
\item the set of rules $R'$ is given in Figure~\ref{fig:wds:cyk},
\end{itemize}
and
\item For every $r \in R' \setminus R'_{A_0'}$ with $r = ([A,t,u] \to \langle x_1 \dots x_k \rangle ([A_1,t_1,u_1],\dots,[A_k,t_k,u_k]))$ and $k \in \mathbb N$ we define $\wt'(r) = \wt(A \to t)$ and for every $r \in R'_{A_0'}$ we let $\wt'(r) = \mul^{(1)}_0$. \qedhere
\end{itemize}
\begin{table}[t]
\centering
\begin{tabular}{cc}
\toprule
Rule $r \in R$ & $\wt(r)$ \\
\midrule
\multicolumn{1}{c}{
\begin{tabular}{r@{\;{$\to$}\;}l}
$\mathrm{root}$ & $\langle x^{(1)}_1 x^{(2)}_1 \terminal{zag}\ x^{(2)}_2\rangle (\textrm{nsub}, \textrm{dobj})$ \\
$\mathrm{dobj}$ & $\langle x^{(1)}_1 x^{(2)}_1, \terminal{helpen}\ x^{(2)}_2\rangle (\textrm{nsub}, \textrm{dobj})$ \\
$\mathrm{dobj}$ & $\langle x^{(1)}_1, \terminal{lezen}\rangle (\textrm{nsub})$ \\
$\mathrm{nsub}$ & $\langle \terminal{Jan}\rangle$ \\
$\mathrm{nsub}$ & $\langle \terminal{Piet}\rangle$ \\
$\mathrm{nsub}$ & $\langle \terminal{Marie}\rangle$
\end{tabular}
}
&
\multicolumn{1}{c}{
\begin{tabular}{r@{\;{$\mapsto$}\;}l}
$(\welem{k}_1,\welem{k}_2)$ & $0 + \welem{k}_1 + \welem{k}_2$ \\
$(\welem{k}_1,\welem{k}_2)$ & $4 + \welem{k}_1 + \welem{k}_2$ \\
$\welem{k}$ & $7 + \welem{k}$ \\
$()$ & $3$ \\
$()$ & $5$ \\
$()$ & $12$
\end{tabular}
}
\\
\bottomrule
\end{tabular}
\caption{The linear context-free rewriting system from Example~\ref{ex:lcfrs} weighted in the tropical M-monoid. The numbers occurring in the definitions of $\wt(r)$ are chosen arbitrarily.}\label{tab:lcfrs-weighted}
\end{table}
\begin{figure}
\caption{Application of the canonical $\walg T$-weighted deduction system to the grammar of Example~\ref{ex:lcfrs}
\label{fig:wds:cyk}
\end{figure}
\end{example}
We finish this section with a result which shows how the canonical weighted deduction system connects two classes of RTG-LMs.
\begin{lemma}[restate={[name={}]lemnlcncacyc}]\label{lem:no-loops-cnc-acyclic}
For every $\overline G \in \wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{all}}$ and syntactic object $a$ it holds that $\cnc(\overline G, a) \in \wlmclass{\gclass{acyc}, \wclass{all}}$.
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:no-loops-cnc-acyclic}, we refer to Appendix~\ref{app:nl-cnc-acyc}.
\end{proof}
\subsection{Value computation algorithm}
\label{sec:value-computation-algorithm}
\newlength\variableslength
\newlength\tabularlength
\begin{algorithm}[h]
\begin{algorithmic}[1]
\Require a $(\gclass{\lalg{CFG}^\emptyset}, \wclass{all})$-LM $\big((G', \lalg{CFG}^\emptyset), (\walg K, \oplus, \welem 0, \Omega, \infsum), \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$
\setlength\tabularlength{\textwidth}
\settowidth\variableslength{\algorithmicvariables}
\addtolength\tabularlength{-\variableslength}
\addtolength\tabularlength{-1.05em}
\Variables \begin{tabular}[t]{p{\tabularlength}}
$V: N' \to \walg K$, $V_{\mathrm{new}} \in \walg K$ \Fixedcomment{$\mathcal V: N' \to \mathcal P(\T_{R'})$, $\mathcal V_{\mathrm{new}} \subseteq \T_{R'}$} \\
$\changed \in \mathbb B$ \\
\Fixedcomment{$\mathit{select} \in N$'} \\
\end{tabular}
\Ensure $V(A_0)$
\ForEach{$A \in N'$}\label{l:initialization}
\State $V(A) \gets \welem 0$\label{l:init-v} \Fixedcomment{$\mathcal V(A) \gets \emptyset$}
\EndFor
\Statex \Fixedcomment{$n \gets 0$}
\Repeat\label{l:loop}
\State $\changed \gets \mathsf{f\mkern-1mu f}$\label{l:reset-changed}
\ForEach{$A \in N'$}\label{l:for-loop} \Fixedcomment{$\mathit{select} \gets A$}
\State $V_{\mathrm{new}} \gets \welem 0$\label{l:init-vnew} \Fixedcomment{$\mathcal V_{\mathrm{new}} \gets \emptyset$}
\ForEach{$r = \big( A \to \sigma(A_1, \dots, A_k) \big)$ in $R'$}\label{l:updates}
\State $V_{\mathrm{new}} \gets V_{\mathrm{new}} \oplus \wt'(r)\big(V(A_1), \dots, V(A_k)\big)$\label{l:update-vnew}
\Statex \Comment $\mathcal V_{\mathrm{new}} \gets \mathcal V_{\mathrm{new}} \cup \{ r(d_1, \dots, d_k) \mid d_1 \in \mathcal V(A_1), \dots, d_k \in \mathcal V(A_k) \}$
\EndFor
\If{$V(A) \not= V_{\mathrm{new}}$}\label{l:check-difference}
\State $\changed \gets \mathsf{t\mkern-1mu t}$\label{l:update-changed}
\EndIf
\State $V(A) \gets V_{\mathrm{new}}$\label{l:update-v} \Fixedcomment{$\mathcal V(A) \gets \mathcal V_{\mathrm{new}}$; $n \gets n + 1$}
\EndFor
\Until{$\changed = \mathsf{f\mkern-1mu f}$}\label{l:loop-condition}
\end{algorithmic}
\caption{Value computation algorithm}\label{alg:mmonoid}
\end{algorithm}
\begingroup\setlength\emergencystretch{20pt}
\index{value computation algorithm}
The value computation algorithm is given as Algorithm~\ref{alg:mmonoid}.
It takes as input a $(\gclass{\lalg{CFG}^\emptyset}, \wclass{all})$-LM $\big((G', \lalg{CFG}^\emptyset), \walg K,\wt'\big)$ with $G' = (N', \Sigma', A_0', R')$ and outputs the value $V(A_0') \in \walg K$, where $V: N' \to \walg K$ is a mapping it maintains.
Furthermore, it maintains the Boolean variable \emph{changed}.
The algorithm consists of two phases:
In the first phase (lines~\ref{l:initialization}--\ref{l:init-v}), for every nonterminal~$A$ it lets $V(A)$ be~$\welem 0$.
In the second phase (lines~\ref{l:loop}--\ref{l:loop-condition}), a \emph{repeat-until loop} is iterated until the variable \emph{changed} has the value~$\mathsf{f\mkern-1mu f}$.
This variable is set to~$\mathsf{f\mkern-1mu f}$ at the start of each iteration (line~\ref{l:reset-changed}), but may be assigned the value~$\mathsf{t\mkern-1mu t}$ in line~\ref{l:update-changed}.
In each iteration of the repeat-until loop, an \emph{inner for loop} iterates over every nonterminal (lines~\ref{l:for-loop}--\ref{l:update-v}).
For each nonterminal~$A$, a value~$V_{\mathrm{new}}$ is computed (lines~\ref{l:init-vnew}--\ref{l:update-vnew}), where
\[
V_{\mathrm{new}}(A) = \bigoplus_{\substack{r \in R': \\ r = (A \to \sigma(A_1, \dots, A_k))}} \wt'(r)\big(V(A_1), \dots, V(A_k)\big) \enspace.
\]
If this value differs from $V(A)$, then the variable \emph{changed} is set to~$\mathsf{t\mkern-1mu t}$.
Finally, $V(A)$ is set to~$V_{\mathrm{new}}$ (lines~\ref{l:check-difference}--\ref{l:update-v}).
\endgroup
Note that we have placed additional variables and statements behind the comment symbol~$\triangleright$.
These are not part of the algorithm and can be ignored for the time being.
They describe formal properties of the algorithm which we will refer to in the next section.
\section{Termination and correctness of the M-monoid parsing algorithm}
We are interested in two formal properties of Algorithm~\ref{alg:mmonoid} and of the M-monoid parsing algorithm (Figure~\ref{fig:alg}): termination and correctness.
Algorithm~\ref{alg:mmonoid} computes the weights of the ASTs bottom-up and reuses the results of common subtrees (as in dynamic programming); this requires distributivity of the weight algebra.
Moreover, solving the M-monoid parsing problem involves the computation of an infinite sum, which can only be done by a terminating algorithm in special cases (cf.\ the start of Section~\ref{sec:closed}).
We have already shown (cf.\ Theorem~\ref{thm:tr-trc}) that the class of closed wRTG-LMs is a good candidate for such a special case.
Hence, in the following, we will be concerned with inputs of that class.
\subsection{Properties of the value computation algorithm}
In the following, we will study formal properties of Algorithm~\ref{alg:mmonoid}.
We are mainly interested in two questions:
\begin{enumerate}[label=(\alph*)]
\item Does the algorithm terminate for every input?
\item Does $V(A_0') = \infsum_{d \in \mathrm{AST}(G')} \wt'(d)_{\walg K}$ hold after termination?
\end{enumerate}
For the analysis of Algorithm~\ref{alg:mmonoid} we have introduced the additional variables $\mathcal V: N' \to \mathcal P(\T_{R'})$, $\mathcal V_{\mathrm{new}} \subseteq \T_{R'}$, $\mathit{select} \in N'$, and $n \in \mathbb N$, where
\begin{itemize}
\item $\mathcal V: N' \to \mathcal P(\T_{R'})$ captures for each nonterminal~$A$ and each iteration of the inner for loop the subset of~$(\T_{R'})_A$ which contributes to the computation of $V(A)$ in that iteration,
\item $\mathcal V_{\mathrm{new}} \subseteq \T_{R'}$ is used to accumulate the set $\mathcal V(A)$ of abstract syntax trees,
\item in each iteration of the inner for loop, $\mathit{select} \in N'$ is the nonterminal of that iteration,
and
\item $n \in \mathbb N$ is used to count the iterations of the inner for loop.
\end{itemize}
We have placed these new variables and the statements which modify them behind the comment symbol~$\triangleright$.
In the analysis we will treat this comments as if they were actual statements (\emph{auxiliary statements}).
For technical purposes, we formalize the sequences of values which are taken by~$V$,~$\mathcal V$, $\mathit{select}$, and $\changed$ during the iterations of the inner for loop as families.
We define the families $(V_n \mid n \in \mathbb N)$,
$(\mathcal V_n \mid n \in \mathbb N)$,
$(\mathit{select}_n \mid n \in \mathbb N)$, and
$(\changed_n \mid n \in \mathbb N)$
such that for each $n \in \mathbb N$, we have $V_n: N' \to \walg K$,
$\mathcal V_n: N' \to \mathcal P(\T_{R'})$,
$\mathit{select}_n \in N' \cup \{ \bot \}$,
$\changed_n \in \mathbb B$, and the following holds for every $n \in \mathbb N$:
\begin{itemize}
\item if lines~\ref{l:init-vnew}--\ref{l:update-v} have been executed~$n$ times, then after executing line~\ref{l:update-v}, including the auxiliary statements, the values of~$V$,~$\mathcal V$, $\mathit{select}$, and $\changed$ are~$V_n$,~$\mathcal V_n$, $\mathit{select}_n$, and $\changed_n$ respectively,
\item intuitively, we define the values of $V_n$, $\mathcal V_n$, $\mathit{select}_n$, and $\changed_n$ for those~$n$ which are beyond termination of the algorithm by copying the corresponding values from the final iteration.
Formally, if there is a $k \in \mathbb N$ such that $k < n$ and $\changed_{|N'| \cdot \lfloor k / |N'| \rfloor} = \mathsf{f\mkern-1mu f}$, then we define $V_n = V_k$, $\mathcal V_n = \mathcal V_k$, $\mathit{select}_n = \bot$ and $\changed_n = \mathsf{f\mkern-1mu f}$.
\end{itemize}
Thus,~$V_0$ and~$\mathcal V_0$ denote the respective values after the execution of lines~\ref{l:initialization}--\ref{l:init-v} and $\mathit{select}_0$ is the nonterminal chosen by the inner for loop when Algorithm~\ref{alg:mmonoid} executes line~\ref{l:for-loop} for the first time.
We define $\changed_0 = \mathsf{t\mkern-1mu t}$.
Let $n \in \mathbb N$.
We say that the algorithm \emph{terminates after~$n$ iterations of the inner for loop} if~$n$ is the smallest number such that $\changed_{|N'| \cdot \lfloor n / |N'| \rfloor} = \mathsf{f\mkern-1mu f}$.
We say that the algorithm \emph{still runs in the $n$th iteration of the inner for loop} if for every $n' \in \mathbb N$ with $n' \le n$ it holds that $\changed_{|N'| \cdot \lfloor n' / |N'| \rfloor} = \mathsf{t\mkern-1mu t}$.
Let $A \in N'$ and $d \in (\T_{R'})_A$.
We say that \emph{$d$ is first added to $\mathcal V(A)$ in the $n$th iteration of the inner for loop} if $d \in \mathcal V_{n+1}(A)$ and for every $n' \in \mathbb N$ with $n' \le n$, $d \not\in \mathcal V_{n'}(A)$.
\begin{observation}\label{obs:v-nplus1}
For every $n \in \mathbb N$ and $A \in N'$ such that the algorithm still runs in the $n$th iteration of the inner for loop the following holds:
If $\mathit{select}_n = A$, then
\begin{align*}
V_{n+1}(A) &= \bigoplus_{\substack{r \in R': \\ r = (A \to \sigma(A_1, \dots, A_k))}} \wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big)
\intertext{and}
\mathcal V_{n+1}(A) &= \bigcup_{\substack{r \in R': \\ r = (A \to \sigma(A_1, \dots, A_k))}} \{ r(d_1, \dots, d_k) \mid d_1 \in \mathcal V_n(A_1), \dots, d_k \in \mathcal V_n(A_k) \} \enspace.
\end{align*}
If $\mathit{select}_n \not= A$, then $V_{n+1}(A) = V_n(A)$ and $\mathcal V_{n+1}(A) = \mathcal V_n(A)$.
\end{observation}
\index{value computation algorithm!correct}
Let $\wlmclass{} \subseteq \wlmclass{\gclass{all}, \wclass{all}}$.
Algorithm~\ref{alg:mmonoid} is \emph{correct for $\wlmclass{}$} if for every $\overline G{}' = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$ in $\wlmclass{}$ with $G' = (N', \Sigma', A_0', R')$ the following holds:
if Algorithm~\ref{alg:mmonoid} is executed with~$\overline G{}'$ as input, then $V(A_0') = \infsum_{d \in \mathrm{AST}(G')} \wtphom{d}$ after termination.
In the following subsections we show that Algorithm~\ref{alg:mmonoid} terminates for every wRTG-LM in the class $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset}, \wclass{dist} \cap \wclass{\operatorname{d-comp}}}$ and that is correct for this class.
\begin{quote}
\em For the rest of this section, we let $c \in \mathbb N$ and $\overline G{}' = \big((G', \lalg{CFG}^\emptyset), (\walg K, \oplus, \welem 0, \Omega), \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$ be a $c$-closed $(\gclass{\lalg{CFG}^\emptyset}, \wclass{dist} \cap \wclass{\operatorname{d-comp}})$-LM.
\end{quote}
We start with two general lemmas that are needed for both termination and correctness.
\begin{lemma}[restate={[name={}]lemvisbigsum}]\label{lem:v-is-bigsum}
For every $n \in \mathbb N$ and $A \in N'$ it holds that $V_n(A) = \bigoplus_{d \in \mathcal V_n(A)} \wtphom{d}$.
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:v-is-bigsum}, we refer to Appendix~\ref{app:vca-general}.
\end{proof}
\begin{lemma}[restate={[name={}]lemmcvmonotone}]\label{lem:mcv-monotone}
For every $n \in \mathbb N$ and $A \in N'$ the following holds: for each $n \in \mathbb N$ with $n' > n$, $\mathcal V_n(A) \subseteq \mathcal V_{n'}(A)$.
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:mcv-monotone}, we refer to Appendix~\ref{app:vca-general}.
\end{proof}
\subsubsection{Termination of the value computation algorithm}
An important step in showing that Algorithm~\ref{alg:mmonoid} terminates on every closed wRTG-LM is the following Lemma.
\begin{lemma}[restate={[name={}]lemcutcycles}]\label{lem:cut-cycles}
For every $d \in \T_{R'}, n \in \mathbb N$, and $A \in N'$ the following holds:
if $d \in \mathcal V_n(A)$, then $\cotrees(d) \subseteq \mathcal V_n(A)$.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:cut-cycles}, we refer to Appendix~\ref{app:vca-termination}.
\end{proof}
\begin{quote}
\em For the rest of this subsection, for every $n \in \mathbb N$ and $A \in N'$ we let $\Delta_n(A) = \mathcal V_{n+1}(A) \setminus \mathcal V_n(A)$.
\end{quote}
From Lemma~\ref{lem:cut-cycles}, we are able to conclude the following.
\begin{lemma}[restate={[name={}]lemmcvgrowsonchange}]\label{lem:mcv-grows-on-change}
For every $n \in \mathbb N$ and $A \in N'$ the following holds: if $V_{n+1}(A) \not= V_n(A)$, then $\mathcal V_{n+1}(A) \cap \T_{R'}^{(c)} \supset \mathcal V_n(A) \cap \T_{R'}^{(c)}$.
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:mcv-grows-on-change}, we refer to Appendix~\ref{app:vca-termination}.
\end{proof}
\begin{boxtheorem}\label{thm:vca-terminating}
For every wRTG-LM $\overline G$ in $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ the following holds:
if the value computation algorithm (Algorithm~\ref{alg:mmonoid}) is executed with~$\overline G$ as input, then it terminates.
\end{boxtheorem}
\begin{proof}
We define the mapping $\ffint: \mathcal P(\T_{R'}^{(c)}) \times \walg B \to \mathbb N^2$ such that $\ffint(D, b) = (|\T_{R'}^{(c)}| - |D|, \delta(b))$ for each $D \subseteq \T_{R'}$ and $b \in \walg B$,
where $\delta(\mathsf{t\mkern-1mu t}) = 1$ and $\delta(\mathsf{f\mkern-1mu f}) = 0$.
Then for each $n \in \mathbb N$ with
\[
\bigvee_{n' \in \mathbb N: \, n \cdot |N'| < n' \leq (n+1) \cdot |N'|} \changed_{n'} = \mathsf{t\mkern-1mu t}
\]
we have
\begin{align*}
&\ffint\left(\T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+1) \cdot |N'|}(A), \bigvee_{\substack{n' \in \mathbb N: \\ n \cdot |N'| < n' \leq (n+1) \cdot |N'|}} \changed_{n'}\right)
\\
&= \left(|\T_{R'}^{(c)}| - \left|\T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+1) \cdot |N'|}(A)\right|, \delta\left(\bigvee_{\substack{n' \in \mathbb N: \\ n \cdot |N'| < n' \leq (n+1) \cdot |N'|}} \changed_{n'}\right)\right)
\\
&> \left(|\T_{R'}^{(c)}| - \left|\T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+2) \cdot |N'|}(A)\right|, \delta\left(\bigvee_{\substack{n' \in \mathbb N: \\ (n+1) \cdot |N'| < n' \leq (n+2) \cdot |N'|}} \changed_{n'}\right)\right) \tag{*}
\\
&= \ffint\left(\T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+2) \cdot |N'|}(A), \bigvee_{\substack{n' \in \mathbb N: \\ (n+1) \cdot |N'| < n' \leq (n+2) \cdot |N'|}} \changed_{n'}\right)
\end{align*}
where \enquote{<} is the strict ordering relation induced by~$\leq$, the natural order on~$\mathbb N^2$, and that~(*) holds can be seen as follows.
First, for every $A \in N'$, $\mathcal V_{(n+2) \cdot |N'|}(A) \supseteq \mathcal V_{(n+1) \cdot |N'|}(A)$ by Lemma~\ref{lem:mcv-monotone} and thus $\T_{R'}^{(c)} \cap \mathcal V_{(n+2) \cdot |N'|}(A) \supseteq \T_{R'}^{(c)} \cap \mathcal V_{(n+1) \cdot |N'|}(A)$.
Then we distinguish two cases:
\begin{enumerate}
\item If there is an $A \in N'$ such that $\T_{R'}^{(c)} \cap \mathcal V_{(n+2) \cdot |N'|}(A) \supset \T_{R'}^{(c)} \cap \mathcal V_{(n+1) \cdot |N'|}(A)$, then
\begin{align*}
\T_{R'}^{(c)} \cap \bigcup_{A' \in N'} \mathcal V_{(n+2) \cdot |N'|} &= \Big( \T_{R'}^{(c)} \cap \mathcal V_{(n+2) \cdot |N'|}(A) \Big) \cup \Big( \T_{R'}^{(c)} \cap \bigcup_{A' \in N' \setminus \{ A \}} \mathcal V_{(n+2) \cdot |N'|} \Big)
\\
&\supset \Big( \T_{R'}^{(c)} \cap \mathcal V_{(n+1) \cdot |N'|}(A) \Big) \cup \Big( \T_{R'}^{(c)} \cap \bigcup_{A' \in N' \setminus \{ A \}} \mathcal V_{(n+2) \cdot |N'|} \Big)
\\
&\supseteq \Big( \T_{R'}^{(c)} \cap \mathcal V_{(n+1) \cdot |N'|}(A) \Big) \cup \Big( \T_{R'}^{(c)} \cap \bigcup_{A' \in N' \setminus \{ A \}} \mathcal V_{(n+1) \cdot |N'|} \Big)
\\
&= \T_{R'}^{(c)} \cap \bigcup_{A' \in N'} \mathcal V_{(n+1) \cdot |N'|}
\end{align*}
and thus
\[ |\T_{R'}^{(c)}| - \left|\T_{R'}^{(c)} \cap \bigcup_{A' \in N'} \mathcal V_{(n+1) \cdot |N'|}\right| > |\T_{R'}^{(c)}| - \left|\T_{R'}^{(c)} \cap \bigcup_{A' \in N'} \mathcal V_{(n+2) \cdot |N'|}\right| \enspace. \]
\item Otherwise for every $A \in N'$ we have that $\T_{R'}^{(c)} \cap \mathcal V_{(n+2) \cdot |N'|}(A) = \T_{R'}^{(c)} \cap \mathcal V_{(n+1) \cdot |N'|}(A)$.
Then for every $n' \in \mathbb N$ with $(n+1) \cdot |N'| \leq n' < (n+2) \cdot |N'|$, by Lemma~\ref{lem:mcv-monotone}
\[ \T_{R'}^{(c)} \cap \mathcal V_{n' + 1}(A) \subseteq \T_{R'}^{(c)} \cap \mathcal V_{n'}(A) \enspace, \]
thus by Lemma~\ref{lem:po-chains}
\[ \T_{R'}^{(c)} \cap \mathcal V_{n' + 1}(A) = \T_{R'}^{(c)} \cap \mathcal V_{n'}(A) \enspace, \]
and by Lemma~\ref{lem:mcv-grows-on-change} $V_{n'+1}(A) = V_{n'}(A)$.
Thus $\bigvee_{n' \in \mathbb N: \, (n+1) \cdot |N'| < n' \leq (n+2) \cdot |N'|} \changed_{n'} = \mathsf{f\mkern-1mu f}$ and $\delta(\mathsf{t\mkern-1mu t}) = 1 > 0 = \delta(\mathsf{f\mkern-1mu f})$.
\end{enumerate}
This proves (*).
Now we prove termination by contradiction.
For this, we assume that
\[
\bigvee_{n' \in \mathbb N: \, n \cdot |N'| < n' \leq (n+1) \cdot |N'|} \changed_{n'} = \mathsf{t\mkern-1mu t}
\]
for every $n \in \mathbb N$.
We define the set
\[ I = \left\{ \ffint(\T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+1) \cdot |N'|}(A), \mathsf{t\mkern-1mu t}) \,\middle|\, n \in \mathbb N \right\} \subseteq \mathbb N^2 \enspace. \]
Clearly~$I$ is nonempty.
Since $(\mathbb N^2,<)$ is well-founded, the set~$I$ has a minimal element.
Thus there is an $m \in \mathbb N$ such that for each $n \in \mathbb N$,
\[ \ffint \left( \T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(m+1) \cdot |N'|}(A), \mathsf{t\mkern-1mu t} \right) \leq \ffint \left( \T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(n+1) \cdot |N'|}(A), \mathsf{t\mkern-1mu t} \right) \]
Thus, by choosing $n = m+1$, we obtain
\begin{align*}
\ffint \left( \T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(m+1) \cdot |N'|}(A), \mathsf{t\mkern-1mu t} \right) &\leq \ffint \left( \T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(m+2) \cdot |N'|}(A), \mathsf{t\mkern-1mu t} \right) \\
&< \ffint \left( \T_{R'}^{(c)} \cap \bigcup_{A \in N'} \mathcal V_{(m+1) \cdot |N'|}(A), \mathsf{t\mkern-1mu t} \right) \tag{by *}
\end{align*}
which is a contradiction.
Thus, there is an $n \in \mathbb N$ such that $\bigvee_{n' \in \mathbb N: \, n \cdot |N'| < n' \leq (n+1) \cdot |N'|} \changed_{n'} = \mathsf{f\mkern-1mu f}$.
We let $n_0$ be the smallest $n \in \mathbb N$ such that $\bigvee_{n' \in \mathbb N: \, n_0 \cdot |N'| < n' \leq (n_0+1) \cdot |N'|} \changed_{n'} = \mathsf{f\mkern-1mu f}$.
Then the algorithm terminates after~$n_0$ executions of lines~\ref{l:init-vnew}--\ref{l:update-v}.
\end{proof}
\subsubsection{Correctness of the value computation algorithm}\label{sec:vca-correct}
\begin{lemma}[restate={[name={}]lempassthrough}]\label{lem:value-passed-through}
For every $n \in \mathbb N$, $d \in \T_{R'}^{(c)}$ of the form $d = r(d_1, \dots, d_k)$ with $r = \big(A \to \sigma(A_1, \dots, A_k)\big)$, $\welem k_1, \dots, \welem k_k \in \walg K$, and $I \subseteq [k]$ such that
\begin{enumerate}
\item for every $i \in [k] \setminus I$, $d_i \in \mathcal V_n(A_i)$ and
\item for every $i \in I$, $V_n(A_i) = V_n(A_i) \oplus \welem k_i$
\end{enumerate}
the following holds:
if $\mathit{select}_n = A$, then $V_{n+1}(A) = V_{n+1}(A) \oplus \wt'(r)(\welem l_1, \dots, \welem l_i)$, where
\[ \welem l_i = \begin{cases}
\welem k_i &\text{if $i \in I$} \\
\wtphom{d_i} &\text{otherwise.}
\end{cases} \]
\end{lemma}
\begin{proof}
For the proof of Lemma~\ref{lem:value-passed-through}, we refer to Appendix~\ref{app:vca-correctness}.
\end{proof}
\begin{theorem}\label{thm:vca-correct}
\setlength\emergencystretch{8pt}
For every wRTG-LM $\overline G = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$ in $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ where $G' = (N', \Sigma', A_0', R')$ the following holds:
if the value computation algorithm (Algorithm~\ref{alg:mmonoid}) is executed with~$\overline G$ as input, then after termination for every $A \in N'$ it holds that $V(A) = \infsum_{d \in (\T_{R'})_A} \wt'(d)_{\walg K}$.
\end{theorem}
\begin{proof}
Let $A \in N'$ and $n \in \mathbb N$ such that Algorithm~\ref{alg:mmonoid} terminates after~$n$ executions of lines~\ref{l:init-vnew}--\ref{l:update-v}.
By Theorem~\ref{thm:tr-trc} we have that $\infsum_{d \in (\T_{R'})_A} \wtphom{d} = \bigoplus_{d \in (\T_{R'}^{(c)})_A} \wtphom{d}$.
Furthermore
\begin{align*}
V_{n+1}(A) &= \bigoplus_{d \in \mathcal V_{n+1}(A)} \wtphom{d}
\tag{Lemma~\ref{lem:v-is-bigsum}} \\
&= \bigoplus_{d \in \mathcal V'_{n+1}(A)} \wtphom{d} \enspace,
\tag{Theorem~\ref{thm:outside-trees-subsumed}}
\end{align*}
where $\mathcal V_{n+1}'(A) = \mathcal V_{n+1}(A) \cap (\T_{R'}^{(c)})_A$.
Hence it remains to show that $V_{n+1}(A) = \bigoplus_{d \in (\T_{R'}^{(c)})_A} \wtphom{d}$, which we do by an indirect proof.
Assume that $V_{n+1}(A) \not= \bigoplus_{d \in (\T_{R'}^{(c)})_A} \wtphom{d}$.
Then there is a $d \in (\T_{R'}^{(c)})_A \setminus \mathcal V_{n+1}(A)$ such that
\begin{equation}
V_{n+1}(A) \not= V_{n+1}(A) \oplus \wtphom{d} \enspace. \tag{P1}
\end{equation}
We choose $A \in N'$ and $d \in (\T_{R'}^{(c)})_A \setminus \mathcal V_{n+1}'(A)$ such that~$d$ is the smallest tree (in height) in $\T_{R'}^{(c)}$ with this property.
By Observation~\ref{obs:v-nplus1}, $\{ r \in R' \mid \lhs(r) = A \ \text{and} \ \rk(r) = 0 \} \subseteq \mathcal V_{n+1}(A)$, hence $\height(d) \geq 1$.
We let $d = r(d_1, \dots, d_k)$ and $r = \big( A \to \sigma(A_1, \dots, A_k) \big)$ with $k > 0$.
Since~$d$ is the smallest tree with property~(P1), it cannot be the case for any $i \in [k]$ that $d_i \in (\T_{R'})_{A_i}^{(c)} \setminus \mathcal V_{n+1}'(A_i)$ and $V_{n+1}(A_i) \not= V_{n+1}(A_i) \oplus \wtphom{d_i}$.
Hence for every $i \in [k]$, either $d_i \in \mathcal V_{n+1}'(A_i)$ or $V_{n+1}(A_i) = V_{n+1}(A_i) \oplus \wtphom{d_i}$.
Now we distinguish two cases.
\begin{enumerate}
\item If $d_i \in \mathcal V_{n_A}(A_i)$ for every $i \in [k]$, where $n_A \in \mathbb N$ is the greatest number such that $\mathit{select}_{n_A} = A$, then by Observation~\ref{obs:v-nplus1}, $d \in \mathcal V_{n_A + 1}(A)$.
We note that $n_A \le n$.
Thus by Lemma~\ref{lem:mcv-monotone}, $d \in \mathcal V_{n+1}(A)$, which contradicts the definition of~$d$.
\item Otherwise, let $n_A \in \mathbb N$ the greatest number such that $\mathit{select}_{n_A} = A$.
Then for every $i \in [k]$, $V_{n+1}(A_i) = V_{n+1}(A_i) \oplus \wtphom{d_i}$, $d_i \in \mathcal V_{n_A}(A_i)$, or there is an $n' \in \mathbb N$ with $n_A < n' \le n + 1$ such that $d_i \in \mathcal V_{n'}(A_i)$.
For every $i \in [k]$ such that only the latter holds, since the algorithm terminates after~$n$ executions of lines~\ref{l:init-vnew}--\ref{l:update-v}, we have that $V_{n_i}(A_i) = V_{n_i + 1}(A_i)$, where $n_i \in \mathbb N$ is the greatest number such that $\mathit{select}_{n_i} = A_i$.
Then by Lemma~\ref{lem:v-is-bigsum}
\[ V_{n_i}(A_i) = V_{n_i}(A_i) \oplus \bigoplus_{d' \in \Delta_{n_i}(A_i)} \wtphom{d'} \enspace. \]
Thus, by Lemma~\ref{lem:natord-subsume} (which is applicable due to Lemma~\ref{lem:d-complete-natord}), $V_{n_i}(A_i) = V_{n_i}(A_i) \oplus \wtphom{d_i}$, and by Observation~\ref{obs:v-nplus1}, $V_{n_A}(A_i) = V_{n_A}(A_i) \oplus \wtphom{d_i}$.
By termination after~$n$ iterations of the inner for loop and Observation~\ref{obs:v-nplus1}, $V_{n+1}(A) = V_{n_A}(A)$ for every $A \in N'$.
We let $I = \{ i \in [k] \mid V_{n_A}(A_i) = V_{n_A}(A_i) \oplus \wtphom{d_i} \}$.
Then by Lemma~\ref{lem:value-passed-through},
\begin{align*}
V_{n_A + 1}(A) &= V_{n_A + 1}(A) \oplus \wt'(r)\left(\wtphom{d_1}, \dots, \wtphom{d_k}\right) \\
&= V_{n_A + 1}(A) \oplus \wtphom{d} \enspace.
\end{align*}
Thus $V_{n+1}(A) = V_{n+1}(A) \oplus \wtphom{d}$, which contradicts the definition of~$d$. \qedhere
\end{enumerate}
\end{proof}
\begin{boxcorollary}\label{cor:vca-correct}
The value computation algorithm (Algorithm~\ref{alg:mmonoid}) is correct for the class $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$.
\end{boxcorollary}
\subsection{Properties of the M-monoid parsing algorithm}
\index{M-monoid parsing algorithm!correct}
We say that the M-monoid parsing algorithm is \emph{correct} for some class $\wlmclass{}$ of wRTG-LMs if it computes $\fparse(a)$ for every wRTG-LM in $\wlmclass{}$ and syntactic object $a$.
We want to show that the M-monoid parsing algorithm is correct for every wRTG-LM with finitely decomposable language algebra which is closed or nonlooping.
\begin{lemma}[restate={[name={}]lemclosedpreserved}]\label{lem:1and2-closed}
For every wRTG-LM $\overline G$ with finitely decomposable language algebra and syntactic object~$a$, the wRTG-LM $\cnc(\overline G, a)$ is closed if
\begin{itemize}
\item $\overline G$ is closed or
\item $\overline G$ is nonlooping and the weight algebra of $\overline G$ is in $\wclass{\operatorname{d-comp}} \cap \wclass{dist}$.
\end{itemize}
\end{lemma}
\begin{proof}
For the proof of Lemma \ref{lem:1and2-closed}, we refer to Appendix~\ref{app:mpa-properties}.
\end{proof}
\begin{boxtheorem}\label{thm:terminating-correct}
The M-monoid parsing algorithm is terminating and correct for every closed wRTG-LM with finitely decomposable language algebra and for every nonlooping wRTG-LM with finitely decomposable language algebra and weight algebra in $\wclass{\operatorname{d-comp}} \cap \wclass{dist}$.
\end{boxtheorem}
\begin{proof}
The M-monoid parsing algorithm terminates because (a) the computation of $\cnc$ is terminating for every wRTG-LM with finitely decomposable language algebra and (b) Algorithm~\ref{alg:mmonoid} is terminating by Theorem~\ref{thm:vca-terminating}, which we can be applied due to Lemma~\ref{lem:1and2-closed}.
The M-monoid parsing algorithm is correct because (a) $\cnc$ is weight-preserving (Observation~\ref{obs:weight-preserving-parse} and Lemma~\ref{lem:cnc-weight-preserving}) and (b) Algorithm~\ref{alg:mmonoid} is correct by Theorem~\ref{thm:vca-correct} (which is applicable again due to Lemma~\ref{lem:1and2-closed}), hence
\[
\displaystyle\fparse(a) \overset{\text{(a)}}{=} \infsum_{d \in \mathrm{AST}(G')} \wt'(d)_{\walg K} \overset{\text{(b)}}{=} V(A_0') \enspace. \qedhere
\]
\end{proof}
\section{Application scenarios}
In this section we investigate the applicability of the value computation algorithm (Algorithm~\ref{alg:mmonoid}) and of the M-monoid parsing algorithm.
\index{value computation algorithm!applicable}
\index{M-monoid parsing algorithm!applicable}
We say that an algorithm is \emph{applicable} to a class of wRTG-LMs if it is terminating and correct for every wRTG-LM in that class.
We compare the variety of classes of wRTG-LMs to which our algorithms are applicable to that of similar algorithms from the literature.
In the end we informally discuss complexity results of our algorithms.
\subsection{Value computation algorithm}
By Theorem~\ref{thm:vca-terminating} and Corollary~\ref{cor:vca-correct}, the value computation algorithm (Algorithm~\ref{alg:mmonoid}) is applicable to every closed wRTG-LM with language algebra $\lalg{CFG}^\emptyset$, i.e., to every wRTG-LM in the class $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset},$ $\wclass{\operatorname{d-comp}} \cap \wclass{dist}}$.
We start by identifying some classes of closed wRTG-LMs.
\begin{boxtheorem}[restate={[name={}]thmapplications}]\label{thm:applications}
Each wRTG-LM in each of the following three classes is closed:
$\wlmclass{\gclass{all}, \wclass{fin, id, \preceq}}$,
$\wlmclass{\gclass{all}, \wclass{sup}}$, and
$\wlmclass{\gclass{acyc}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$.
\end{boxtheorem}
\begin{proof}
For the proof, we refer to Appendix~\ref{app:applications}.
\end{proof}
By Theorem~\ref{thm:vca-terminating}, Corollary~\ref{cor:vca-correct} and Theorem~\ref{thm:applications}, the value computation algorithm (Algorithm~\ref{alg:mmonoid}) is applicable to each class of wRTG-LMs that is mentioned in Theorem~\ref{thm:applications} if we restrict their language algebra to $\lalg{CFG}^\emptyset$.
We note that $\wclass{fin, id, \preceq} \subseteq \wclass{\operatorname{d-comp}} \cap \wclass{dist}$ and $\wclass{sup} \subseteq \wclass{\operatorname{d-comp}} \cap \wclass{dist}$ by their definition and Lemma~\ref{lem:inf-idp-d-complete}.
Now we compare the applicability of the value computation algorithm (Algorithm~\ref{alg:mmonoid}) to the applicabilities of
\begin{enumerate*}[label=(\alph*)]
\item the second phase of the semiring parsing algorithm \cite[Figure 10]{Goodman1999},
\item Knuth's algorithm \cite[Section 3]{Knuth1977}, and
\item Mohri's algorithm \cite[Figure 2]{Mohri2002}.
\end{enumerate*}
In order to have a basis for a fair comparison, we understand the inputs of these algorithms as particular wRTG-LMs of the form $\big((G', \lalg{CFG}^\emptyset), (\walg K,\oplus,\welem{0},\Omega,\infsum), \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$.
An algorithm is correct for such a wRTG-LM if it computes $\infsum_{d \in \mathrm{AST}(G')} \wt'(d)_{\walg{K}}$.
Thus the notion applicable is the same for the value computation algorithm and the other ones.
\begin{table*}[h]
\centering
{\small
\begin{tabular}{lll}
\toprule
Algorithm & Class of inputs & Comment \\
\midrule
(a) Goodman & $\wlmclass{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{acyc}, \wclass{sr}}$ & acyclic RTGs and complete semirings \\[1ex]
(b) Knuth & $\wlmclass{\gclass{\lalg{CFG}^\emptyset}, \wclass{sup}}$ & superior M-monoids \\[1ex]
(c) Mohri & $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{mon}, \wclass{com.\;sr}}$ & monadic RTGs and commutative semirings \\[1ex]
(d) Algorithm~\ref{alg:mmonoid} & $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ & d-complete and distributive M-monoids \\
\bottomrule
\end{tabular}
}
\caption{
Comparison of the value computation algorithm (d) to three similar algorithms. The second column represents the class of wRTG-LMs to which the corresponding algorithm is applicable.}
\label{tab:comparison2}
\end{table*}
Table~\ref{tab:comparison2} shows for each algorithm the class of inputs to which it is applicable.
The algorithms (c) and (d) are applicable to closed wRTG-LMs; moreover, (c) is applicable to a proper subset of the inputs of (d).
Each input to which (b) is applicable is in the class $\wlmclass{\gclass{\lalg{CFG}^\emptyset}, \wclass{sup}}$ and thus, due to Theorem~\ref{thm:applications}, closed.
The inputs to which (a) is applicable constitute the class $\wlmclass{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{acyc}, \wclass{sr}}$.
However, according to Theorem~\ref{thm:applications}, only the class $\wlmclass{\gclass{acyc}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ is closed and $\wclass{sr} \not\subseteq \wclass{\operatorname{d-comp}}$ in general.
We remark that for every wRTG-LM in $\wlmclass{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{acyc}, \wclass{sr}}$ the set of ASTs is finite.
Hence only finite sums have to be computed and the restriction to d-complete M-monoids is not needed; thus the value computation algorithm is applicable to $\wlmclass{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{acyc}, \wclass{sr}}$.
In summary, if one of the value computation algorithms (a)--(c) is applicable, then Algorithm~\ref{alg:mmonoid} is applicable too.
\begin{sloppypar}
We conclude the investigation of the value computation algorithm by considering three classes of wRTG-LMs whose weight algebra is a particular M-monoid:
$\wlmclass{\gclass{all}, \walg{BD}}$,
$\wlmclass{\gclass{all}, \nbest}$, and
$\wlmclass{\gclass{all}, \wclass{int}}$.
It turns out that not every wRTG-LM in $\wlmclass{\gclass{all}, \walg{BD}}$ is closed (for an example, cf.\ Appendix~\ref{app:bd-restriction}).
Hence we first impose an additional restriction on this particular class.
\end{sloppypar}
We let \gls{wlmclass:bd} be the class of all wRTG-LMs $\overline G = \big((G, \alg L), \walg{BD}, \wt\big)$ in $\wlmclass{\gclass{all}, \walg{BD}}$ with $G = (N, \Sigma, A_0, R)$ such that for every $r \in R$, $\wt(r) = \tc{p}{r}$ with $p < 1$.
We remark that the condition $p < 1$ is sufficient to ensure that each wRTG-LM in $\wlmclass[<1]{\gclass{all}, \walg{BD}}$ is closed, but not necessary.
There may be weaker sufficient conditions which are more difficult to express, though.
\begin{boxtheorem}[restate={[name={}]thmapplicationsp}]\label{thm:applications2}
Each wRTG-LM in each of the following three classes is closed:
$\wlmclass[<1]{\gclass{all}, \walg{BD}}$,
$\wlmclass{\gclass{all}, \nbest}$, and
$\wlmclass{\gclass{all}, \wclass{int}}$.
\end{boxtheorem}
\begin{proof}
For the proof, we refer to Appendix~\ref{app:applications}.
\end{proof}
By Theorem~\ref{thm:vca-terminating}, Corollary~\ref{cor:vca-correct} and Theorem~\ref{thm:applications2}, the value computation algorithm (Algorithm~\ref{alg:mmonoid}) is applicable to each class of wRTG-LMs that is mentioned in Theorem~\ref{thm:applications2} if we restrict their language algebra to $\lalg{CFG}^\emptyset$.
We recall our comparison of algorithms in Table~\ref{tab:comparison2}.
Neither of algorithms (a)--(c) is in general applicable to any of the wRTG-LMs of Theorem~\ref{thm:applications2}, but Algorithm~\ref{alg:mmonoid} is applicable to each of them.
\subsection{M-monoid parsing algorithm}
By Theorem~\ref{thm:terminating-correct}, the M-monoid parsing algorithm is applicable to each class of wRTG-LMs that is mentioned in Theorem~\ref{thm:applications} or Theorem~\ref{thm:applications2} if we restrict them to finitely decomposable language algebras.
We continue to discuss two classes of nonlooping wRTG-LMs, each of which represents a particular parsing problem.
First we consider the class $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{sr}}$.
It contains exactly those wRTG-LMs for which Goodman's algorithm can solve the semiring parsing problem.
By Theorem~\ref{thm:terminating-correct}, the M-monoid parsing algorithm is applicable to each wRTG-LM in the class $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{sr} \cap \wclass{\operatorname{d-comp}}}$.
By the same argument as in the previous subsection we may extend this result to the class $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{sr}}$.
Second we consider the class $\wlmclass{\gclass{YIELD} \cap \gclass{nl}, \wclass{ADP}}$ of wRTG-LMs.
It contains all those wRTG-LMs which are specifications of ADP problems.
Clearly $\gclass{YIELD} \subseteq \gclass{\operatorname{fin-dc}}$ and by Lemma~\ref{lem:adp-mmonoid-complete-distributive}, $\wclass{ADP} \subseteq \wclass{dist} \cap \wclass{\operatorname{d-comp}}$.
Thus, by Theorem~\ref{thm:terminating-correct}, the M-monoid parsing algorithm is applicable to each wRTG-LM in $\wlmclass{\gclass{YIELD} \cap \gclass{nl}, \wclass{ADP}}$.
In the end, we come to a more general view on nonlooping wRTG-LMs.
By Theorem~\ref{thm:terminating-correct}, the M-monoid parsing algorithm is terminating and correct for every wRTG-LM whose language model is in $\gclass{nl} \cap \gclass{\operatorname{fin-dc}}$ if its weight algebra is in $\wclass{dist} \cap \wclass{\operatorname{d-comp}}$.
Thus the M-monoid parsing algorithm is applicable to the rather general class $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{dist} \cap \wclass{\operatorname{d-comp}}}$ of wRTG-LMs.
The following statement summarizes the findings of this section.
\begin{boxcorollary}\label{cor:applicability}
The M-monoid parsing algorithm is applicable to the following classes of wRTG-LMs.
\begin{enumerate}[label=(\arabic*)]
\item $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{sr}}$ -- this includes every input for which Goodman's semiring parsing algorithm terminates and is correct.
\item $\wlmclass{\gclass{\operatorname{fin-dc}}, \wclass{sup}}$ -- this includes every input of Nederhof's weighted deductive parsing algorithm.
\item $\wlmclass[<1]{\gclass{\operatorname{fin-dc}}, \walg{BD}}$.
\item $\wlmclass{\gclass{\operatorname{fin-dc}}, \nbest}$.
\item $\wlmclass{\gclass{\operatorname{fin-dc}}, \wclass{int}}$ -- thus the M-monoid parsing algorithm can compute the intersection of a grammar and a syntactic object.
\item $\wlmclass{\gclass{YIELD} \cap \gclass{nl}, \wclass{ADP}}$ -- thus the M-monoid parsing algorithm can solve every ADP problem.
\end{enumerate}
\end{boxcorollary}
Like the M-monoid parsing algorithm, the semiring parsing algorithm~\cite{Goodman1999} and the weighted deductive parsing algorithm~\cite{ned03} are only applicable if the language algebra of their input is finitely decomposable.
This is because they use a weighted deduction system in the first phase of their pipeline, too.
By (1) and (2) of Corollary~\ref{cor:applicability}, our approach subsumes semiring parsing and weighted deductive parsing.
The classes of (3) and (4) are essentially instances of the semiring parsing problem to which the M-monoid parsing algorithm is applicable even if the RTG-LM is \emph{looping} (i.e., not nonlooping).
Moreover, their weight algebras are not superior (in which case the weighted deductive parsing algorithm would be applicable).
Likewise (5) and (6) are in general outside the scope of both semiring parsing and weighted deductive parsing.
\subsection{Complexity}
We only discuss the complexity of the second phase of the M-monoid parsing algorithm, i.e., the value computation algorithm (Algorithm~\ref{alg:mmonoid}) because the first phase (canonical weighted deduction system) is executed on demand.
Thus the value computation algorithm is the main determinant of complexity and the canonical weighted deduction system only adds a factor which depends on the language algebra of the input.
Since the weighted parsing algorithms of~\cites{Goodman1999}{ned03} are two-phase pipelines that use a weighted deduction system in their first phase as well, we believe that abstracting from the first phase yields the most significant statement on complexity.
Now we compare the complexity of the value computation algorithm to the complexity of the algorithms of \textcite{Mohri2002}, \textcite{Knuth1977} and the second phase of \textcite{Goodman1999}.
We do this by restricting the inputs of the value computation algorithm to the input scenarios of the other algorithms.
Since there is no complexity bound on the operations in the weight algebra of a wRTG-LM (they can even be undecidable), it is not possible to give a general statement about the complexity of any of the considered algorithms.
Hence we abstract from the costs of these operations.
Mohri's algorithm is applicable to every wRTG-LM in $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{mon}, \wclass{com.\;sr}}$.
Its complexity is polynomial in the maximal number $n_{\max}$ of times the value of a nonterminal changes.
Our value computation algorithm has the same complexity if we restrict its inputs to $\wlmclass[\mathrm{closed}]{\gclass{\lalg{CFG}^\emptyset} \cap \gclass{mon}, \wclass{com.\;sr}}$.
We remark that $n_{\max}$ is in general not polynomial in the size of the input wRTG-LM\@.
Mohri circumvents this problem by specifying the order in which nonterminals are processed for well-known classes of inputs, e.g., acyclic graphs or superior weight algebras.
We can adapt this idea by imposing such an ordering on the iteration over the nonterminals in line~\ref{l:for-loop}.
Knuth's algorithm is applicable to every wRTG-LM in $\wlmclass{\gclass{\lalg{CFG}^\emptyset}, \wclass{sup}}$.
Its complexity is in $O\big(|N'| \cdot (|N'| + |R'|)\big)$.
Our value computation algorithm has the same complexity if we restrict its inputs to $\wlmclass{\gclass{\lalg{CFG}^\emptyset}, \wclass{sup}}$ (assuming that $|N'| \le |R'|$, which is usually the case).
This is because in every iteration of the repeat-until loop (lines~\ref{l:loop}--\ref{l:loop-condition}), at least one nonterminal is assigned a weight which stays the same across all future iterations (since the M-monoid is superior).
The second phase of Goodman's semiring parsing algorithm is applicable to every wRTG-LM in $\wlmclass{\gclass{acyc} \cap \gclass{\lalg{CFG}^\emptyset}, \wclass{sr}}$.
It processes a topological ordering of its input and thus achieves a complexity in $O(|R'|)$.
If we restrict the inputs of our value computation algorithm to $\wlmclass{\gclass{acyc} \cap \gclass{\lalg{CFG}^\emptyset}, \wclass{sr}}$, then its complexity is worse.
We can, however, use the topological ordering of the input in line~\ref{l:for-loop} of the value computation algorithm; then we achieve the same complexity as Goodman.
(Since Goodman requires this ordering to be precomputed, we take the liberty of doing so as well.)
Indeed, Mohri suggests to process acyclic graphs in topological order, too.
Finally, we note that, although our value computation algorithm (Algorithm~\ref{alg:mmonoid}) -- when restricted to the respective inputs -- has the same complexity as the other algorithms, in average performs more computations than those.
This is because in each iteration of lines~\ref{l:for-loop}--\ref{l:update-v}, the values of all nonterminals are recomputed.
In particular, in the final iteration of the repeat-until loop (lines~\ref{l:loop}--\ref{l:loop-condition}), the value of every nonterminal is unchanged.
We could avoid superfluous computations by using a direct generalization of Mohri's algorithm to the branching case rather than Algorithm~\ref{alg:mmonoid}.
However, the intricacies of such a generalization would exceed the scope of this paper.
\printglossaries
\printindex
\begingroup\setlength\emergencystretch{20pt}
\printbibliography
\endgroup
\appendix
\section{Additional proofs}
In this appendix we have placed full proofs of some of the lemmas and claims of the previous sections.
\subsection{Proofs of statements from the preliminaries}\label{sec:proofs-preliminaries}
We recall the definition of the natural order on pairs of natural numbers.
For every $(a_1,b_1), (a_2,b_2) \in \mathbb N^2$ we have that $(a_1,b_1) \leq (a_2,b_2)$ if one of the following holds:
\begin{enumerate}
\item $a_1 < a_2$, or
\item $a_1 = a_2$ and $b_1 \leq b_2$.
\end{enumerate}
\begin{lemma}
$(\mathbb N^2, \leq)$ is a well-order.
\end{lemma}
\begin{proof}
We start with proving that $(\mathbb N^2, \leq)$ is a total order.
Reflexivity follows directly from~(ii).
Now let $(a_1,b_1), (a_2,b_2), (a_3,b_3) \in \mathbb N^2$.
For transitivity let $(a_1,b_1) \leq (a_2,b_2)$ and $(a_2,b_2) \leq (a_3,b_3)$; there are four cases in which this may hold:
\begin{enumerate}
\item if $a_1 < a_2$ and $a_2 < a_3$, then by transitivity of $(\mathbb N,<)$ also $a_1 < a_3$, hence $(a_1,b_1) \leq (a_3,b_3)$,
\item if $a_1 < a_2$ and $a_2 = a_3$, then clearly $a_1 < a_3$ and hence $(a_1,b_1) \leq (a_3,b_3)$,
\item if $a_1 = a_2$ and $a_2 < a_3$, then clearly $a_1 < a_3$ and hence $(a_1,b_1) \leq (a_3,b_3)$, and
\item if $a_1 = a_2$ and $a_2 = a_3$, then $a_1 = a_3$ and furthermore, we have that $b_1 \leq b_2$ and $b_2 \leq b_3$ because otherwise $(a_1,b_1) \leq (a_2,b_2)$ and $(a_2,b_2) \leq (a_3,b_3)$ would not hold.
Now we obtain from the transitivity of $(\mathbb N,\leq)$ that $b_1 \leq b_3$ and thus $(a_1,b_1) \leq (a_3,b_3)$.
\end{enumerate}
For antisymmetry, let $(a_1,b_1) \leq (a_2,b_2)$ and $(a_2,b_2) \leq (a_1,b_1)$.
Then we have to distinguish two cases:
\begin{enumerate}
\item if $a_1 = a_2$, then $b_1 \leq b_2$ and $b_2 \leq b_1$, thus by antisymmetry of $(\mathbb N,\leq)$ we obtain $b_1 = b_2$, and
\item otherwise, we have that $a_1 < a_2$ and $a_2 < a_1$ which is equivalent to $a_1 \leq a_2 \land a_2 \leq a_1 \land a_1 \not= a_2$.
By antisymmetry of $(\mathbb N,\leq)$, $a_1 \leq a_2 \land a_2 \leq a_1$ implies $a_1 = a_2$, but this is a contradiction.
\end{enumerate}
For totality there are two cases as well:
\begin{enumerate}
\item if $a_1 = a_2$, then by totality of $(\mathbb N,\leq)$ either $b_1 \leq b_2$ or $b_2 \leq b_1$, thus either $(a_1,b_1) \leq (a_2,b_2)$ or $(a_2,b_2) \leq (a_1,b_1)$, respectively, and
\item otherwise, by totality of $(\mathbb N,<)$ either $a_1 < a_2$ or $a_2 < a_1$, thus either $(a_1,b_1) \leq (a_2,b_2)$ or $(a_2,b_2) \leq (a_1,b_1)$, respectively.
\end{enumerate}
Since $(\mathbb N^2,\leq)$ is reflexive, transitive, antisymmetric, and total, we conclude that it is a total order.
It remains to show that $(\mathbb N^2,<)$, where~$<$ is the strict total ordering induced by~$\leq$, is well-founded.
For this, let $I \subseteq \mathbb N^2$ such that $I \not= \emptyset$.
We define $I_1 = \{ a \mid (a,b) \in I \}$.
Clearly $I_1 \subseteq \mathbb N$ and $I_1 \not= \emptyset$, thus by well-foundedness of $(\mathbb N,<)$ there is an $a' \in I_1$ such that $a \not< a'$ for every $a \in I_1$.
Now we define $I_{1,2} = \{ b \mid (a,b) \in I_1 \}$ and by the same argumentation obtain that there is a $b' \in I_{1,2}$ such that $b \not< b'$ for every $b \in I_{1,2}$.
By definition of~$I_1$ and~$I_{1,2}$, $(a',b') \in I$ and by definition of~$<$, $(a,b) \not< (a',b')$ for every $(a,b) \in I$.
Thus $(\mathbb N^2,<)$ is well-founded.
\end{proof}
\lempochains*
\begin{proof}
Let $(A, \preceq)$ be a partial order, $n \in \mathbb N$, and $a_1, \dots, a_n \in A$ such that $a_1 \preceq \dots \preceq a_n$ and $a_1 = a_n$
We show that $a_1 = \dots = a_n$ by contradiction.
For this, assume that there are $i, j \in [n]$ such that $a_i \not= a_j$.
Without loss of generality, we assume that $i < j$.
Then, by transitivity of $\preceq$, $a_i \preceq a_j$.
Since $a_i \not= a_j$, we have that $a_i \prec a_j$.
Then, by transitivity of $\prec$, $a_1 \prec a_j$ and thus $a_1 \prec a_n$.
This contradicts the fact that $a_1 = a_n$.
\end{proof}
\lemheightfinite*
\begin{proof}
The proof is done by induction on~$h$.
For the induction base let $h = 0$.
Then
\[ |\{ t \in \T_\Sigma \mid \height(t) \leq 0 \}| = |\{ \sigma \in \Sigma \mid \rk(\sigma) = 0 \}| \leq |\Sigma| \enspace. \]
For the induction step let $h \in \mathbb N$.
We assume (IH) that for every $h' \leq h$ it holds that $|\{ t \in \T_\Sigma \mid \height(t) \leq h' \}| \leq |\Sigma|^{(\sum_{i=0}^{h'} k^i)}$.
Then
\begin{align*}
&|\{ t \in \T_\Sigma \mid \height(t) \leq h + 1 \}| \\
&\leq |\{ \sigma(t_1,\dots,t_k) \mid \sigma \in \Sigma \ \text{and for every $i \in [k]$: $t_i \in \T_\Sigma$ and $\height(t_i) \leq h$} \}| \\
&= |\Sigma| \cdot |\{ (t_1,\dots,t_k) \mid \text{for every $i \in [k]$: $t_i \in \T_\Sigma$ and $\height(t_i) \leq h$} \}| \\
&= |\Sigma| \cdot |\{t \in \T_\Sigma \mid \height(t) \leq h\}|^k \\
&\leq |\Sigma| \cdot (|\Sigma|^{(\sum_{i=0}^h k^i)})^k \tag{IH} \\
&= |\Sigma| \cdot |\Sigma|^{(\sum_{i=1}^{h+1} k^i)} \\
&= |\Sigma|^{(\sum_{i=0}^{h+1} k^i)} \enspace. \qedhere
\end{align*}
\end{proof}
\lemnatordrt*
\begin{proof}
Let $(\walg K, \oplus, \welem 0)$ be a monoid.
For reflexivity of~$\preceq$, let $\welem k \in \walg K$.
Since~$\welem 0$ is the identity element, we have that $\welem k \oplus \welem 0 = \welem k$.
Thus $\welem k \preceq \welem k$.
For transitivity of~$\preceq$, let $\welem k_1, \welem k_2, \welem k_3 \in \walg K$ such that $\welem k_1 \preceq \welem k_2$ and $\welem k_2 \preceq \welem k_3$.
Then there are $\welem k, \welem k' \in \walg K$ such that $\welem k_1 \oplus \welem k = \welem k_2$ and $\welem k_2 \oplus \welem k' = \welem k_3$.
Thus $\welem k_1 \oplus \welem k \oplus \welem k' = \welem k_2 \oplus \welem k' = \welem k_3$ and hence $\welem k_1 \preceq \welem k_3$.
\end{proof}
\lemnatord*
\begin{proof}
Let $(\walg K, \oplus, \mathbb 0)$ be a monoid.
First, we assume that~$\walg K$ is naturally ordered.
Let $\welem k_1, \welem k_2, \welem k_3 \in \walg K$ with $\welem k_1 = \welem k_1 \oplus \welem k_2 \oplus \welem k_3$.
Then by definition of~$\preceq$, $\welem k_1 \oplus \welem k_2 \preceq \welem k_1 \oplus \welem k_2 \oplus \welem k_3$ and $\welem k_1 = \welem k_1 \oplus \welem k_2 \oplus \welem k_3 \preceq \welem k_1 \oplus \welem k_2$.
Thus, as~$\preceq$ is antisymmetric, $\welem k_1 = \welem k_1 \oplus \welem k_2$.
Second, we assume that for every $\welem k_1, \welem k_2, \welem k_3 \in \walg K$ with $\welem k_1 = \welem k_1 \oplus \welem k_2 \oplus \welem k_3$ it holds that $\welem k_1 = \welem k_1 \oplus \welem k_2$.
By Lemma~\ref{lem:natord-refl-trans}, $\preceq$ is reflexive and transitive.
For antisymmetry, let $\welem k_1, \welem k_2 \in \walg K$ such that $\welem k_1 \preceq \welem k_2$ and $\welem k_2 \preceq \welem k_1$.
Then there are $\welem k, \welem k'$ such that $\welem k_1 \oplus \welem k = \welem k_2$ and $\welem k_2 \oplus \welem k' = \welem k_1$.
Hence
\begin{align*}
\welem k_1 &= \welem k_2 \oplus \welem k' = \welem k_1 \oplus \welem k \oplus \welem k' \\
&= \welem k_1 \oplus \welem k \tag{by assumption} \\
&= \welem k_2 \enspace.
\end{align*}
Thus~$\preceq$ is antisymmetric and~$\walg K$ is naturally ordered.
\end{proof}
\lemdcompnatord*
\begin{proof}
Let $(\walg K, \oplus, \mathbb 0, \infsum)$ be a d-complete monoid.
Furthermore, we let $\welem k, \welem l_1, \welem l_2 \in \walg K$ such that $\welem k \oplus \welem l_1 \oplus \welem l_2 = \welem k$.
We define the family $(\welem k_i \mid i \in \mathbb N)$ of elements of~$\welem K$ such that for every $i \in \mathbb N$, $\welem k_i = \welem l_1 \oplus \welem l_2$.
Then, for every $i \in \mathbb N$, $\welem k \oplus \welem k_i = \welem k$.
Thus, by Lemma~\ref{lem:d-complete}~(ii),
\begin{equation}\label{eq:lem-dcn}
\welem k \oplus \infsum_{i \in \mathbb N} \welem k_i = \welem k \enspace.
\end{equation}
Hence
\begin{align*}
\welem k &= \welem k \oplus \infsum_{i \in \mathbb N} \welem k_i \tag{Eq.\,\ref{eq:lem-dcn}} \\
=& \welem k \oplus \infsum_{i \in \mathbb N} (\welem l_1 \oplus \welem l_2)
= \welem k \oplus \infsum_{i \in \mathbb N} \welem l_1 \oplus \infsum_{i \in \mathbb N} \welem l_2
= \welem k \oplus \welem l_1 \oplus \infsum_{i \in \mathbb N \setminus \{ 0 \}} \welem l_1 \oplus \infsum_{i \in \mathbb N} \welem l_2 \\
=& \welem k \oplus \welem l_1 \oplus \infsum_{i \in \mathbb N} \welem l_1 \oplus \infsum_{i \in \mathbb N} \welem l_2
= \welem k \oplus \welem l_1 \oplus \infsum_{i \in \mathbb N} (\welem l_1 \oplus \welem l_2)
= \welem k \oplus \welem l_1 \oplus \infsum_{i \in \mathbb N} \welem k_i
= \welem k \oplus \infsum_{i \in \mathbb N} \welem k_i \oplus \welem l_1 \\
=& \welem k \oplus \welem l_1 \enspace. \tag{Eq.\,\ref{eq:lem-dcn}}
\end{align*}
Thus, by Lemma~\ref{lem:natord-subsume}, $\walg K$ is naturally ordered.
\end{proof}
\lemiidc*
\begin{proof}
Let $(\walg K, \oplus, \mathbb 0, \infsum)$ be a completely idempotent monoid.
Furthermore, we let $\welem k \in \walg K$ and $(\welem k_i \mid i \in \mathbb N)$ be a family of elements of~$\walg K$ such that $\welem k \oplus \welem k_i = \welem k$ for every $i \in \mathbb N$.
Then
\[ \welem k \oplus \infsum_{i \in \mathbb N} \welem k_i = \infsum_{i \in \mathbb N} \welem k \oplus \infsum_{i \in \mathbb N} \welem k_i = \infsum_{i \in \mathbb N} (\welem k \oplus \welem k_i) = \infsum_{i \in \mathbb N} \welem k = \welem k \enspace. \]
Thus, by Lemma~\ref{lem:d-complete}~(ii), $\walg K$ is d-complete.
\end{proof}
\subsection{Superior M-monoids}\label{sec:proof-superior-mmonoids}
\begin{lemma*}[cf.\ Section~\ref{sec:superior-mmonoids}]
The tropical M-monoid $(\mathbb{R}_0^\infty,\min,\infty,\Omega_+,\inf)$ is superior.
\end{lemma*}
\begin{proof}
We show that for every $c \in \mathbb{R}_0^1$ and $k \in \mathbb N$, $\mul^{(k)}_c \in \Omega_+$ is $\le$-superior.
Let $k,i \in \mathbb N$ and $a,a_1,\dots,a_k,c \in \mathbb R_0^\infty$.
\begin{enumerate}
\item Assume that $a \le a_i$.
Then
\begin{align*}
\mul_c^{(k)}(a_1,\dots,a_{i-1},a,a_{i+1},\dots,a_k) &= c + a_1 + \ldots + a_{i-1} + a + a_{i+1} + \ldots + a_k \\
&\le c + a_1 + \ldots + a_{i-1} + a_i + a_{i+1} + \ldots + a_k \tag{*} \\
&= \mul_c^{(k)}(a_1,\dots,a_{i-1},a_i,a_{i+1},\dots,a_k) \enspace,
\end{align*}
where (*) holds because
\[ c + a_1 + \ldots + a_{i-1} + a + a_{i+1} + \ldots + a_k \le c + a_1 + \ldots + a_{i-1} + a_i + a_{i+1} + \ldots + a_k \]
due to the monotonicity of~$+$.
\item Observe that $a' \le a' + b'$ for every $a',b' \in \mathbb R_0^\infty$.
Hence $\max \{ c,a_1,\dots,a_k \} \le c + a_1 + \ldots + a_k$.
Then
\[
\max \{ a_1,\dots,a_k \} \le \max \{ c,a_1,\dots,a_k \}
\le c + a_1 + \ldots + a_k
= \mul_c^{(k)}(a_1,\dots,a_k) \enspace. \qedhere
\]
\end{enumerate}
\end{proof}
\begin{lemma*}[cf.\ Section~\ref{sec:superior-mmonoids}]
The Viterbi M-monoid $(\mathbb{R}_0^1,\max,0,\Omega_\cdot,\sup)$ is superior.
\end{lemma*}
\begin{proof}
Since superior M-monoids are defined to be of the form $(\walg K, \min, \mathbb 0, \Omega)$ for some total order $\walg K$, we have to refer to the inverse total order on ${\mathbb R_0^1}$, $({\mathbb R_0^1}, \ge)$.
We show that for every $c \in \mathbb{R}_0^1$ and $k \in \mathbb N$, $\mul^{(k)}_c \in \Omega_\cdot$ is $\ge$-superior.
Let $b,a,a_1,\ldots,a_k \in \mathbb{R}_0^1$.
\begin{enumerate}
\item Assume that $a \ge a_i$.
Then, by monotonicity of $\cdot$ in ${\mathbb R_0^1}$,
\[
b \cdot a_1 \cdot \ldots \cdot a_{i-1} \cdot a \cdot a_{i+1} \cdot \ldots \cdot a_k \ \ge \
b \cdot a_1 \cdot \ldots \cdot a_{i-1} \cdot a_i \cdot a_{i+1} \cdot \ldots \cdot a_k \enspace,
\]
which proves the first condition.
\item Since $0 \le a_i \le 1$ for each $a_i$ and also $0 \le b \le 1$, we have for each $i \in [k]$ that $b \cdot a_1 \cdot \ldots \cdot a_i \cdot \ldots \cdot a_k \le a_i$.
Thus $\min \{a_1,\ldots,a_k\} \ \ge \ b \cdot a_1 \cdot \ldots \cdot a_i \cdot \ldots \cdot a_k$ which proves the second condition. \qedhere
\end{enumerate}
\end{proof}
\subsection{Best derivation M-monoid is distributive and d-complete}\label{sec:proof-best-derivation-mmonoid}
\begin{lemma*}[cf.\ Example~\ref{ex:best-derivation-mmonoid}]\label{lem:best-derivation-mmonoid}
The best derivation M-monoid is d-complete and distributive.
Furthermore, $(0, \emptyset)$ is absorptive.
\end{lemma*}
\begin{proof}
\begin{sloppypar}
Clearly the best derivation M-monoid is a complete M-monoid.
In order to show distributivity of~$\Omegav$ over $\maxv$, we let $p \in {\mathbb R_0^1}$, $r \in R$ with $\rk(r) = k$, $i \in [k]$, $\tc{p}{r} \in \Omegav$, and $(p', D'), (p_1, D_1), \dots, (p_k, D_k) \in \mathbb R_0^1 \times \mathcal P(\T_R)$.
Then there are three cases:
\end{sloppypar}
\begin{enumerate}
\item if $p_i < p'$, then
\begin{align*}
&\tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), \maxv \big( (p_i, D_i), (p', D') \big), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big) \\
&= \tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (p', D'), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big)
\intertext{and since by monotonicity of~$\cdot$, $p \cdot p_1 \cdot \ldots \cdot p_k < p \cdot p_1 \cdot \ldots \cdot p_{i-1} \cdot p' \cdot p_{i+1} \cdot \ldots \cdot p_k$}
&= \begin{aligned}[t]
\maxv \big(&\tc{p}{r}((p_1, D_1), \dots, (p_k, D_k)), \\
&\tc{p}{r}((p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (p', D'), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k)) \big) \enspace.
\end{aligned}
\end{align*}
\item if $p' < p_i$, then we obtain
\begin{align*}
&\tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), \maxv \big( (p_i, D_i), (p', D') \big), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big) \\
&= \begin{aligned}[t]
\maxv \big(&\tc{p}{r}((p_1, D_1), \dots, (p_k, D_k)), \\
&\tc{p}{r}((p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (p', D'), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k)) \big)
\end{aligned}
\end{align*}
analogously to the first case, and
\item if $p' = p_i$, then
\begin{align*}
&\tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), \maxv \big( (p_i, D_i), (p', D') \big), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big) \\
&= \tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (p_i, D_i \cup D'), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big) \enspace,
\intertext{now $p \cdot p_1 \cdot \ldots \cdot p_k = p \cdot p_1 \cdot \ldots \cdot p_{i-1} \cdot p' \cdot p_{i+1} \cdot \ldots p_k$, hence}
&= \begin{aligned}[t]
\maxv(&\tc{p}{r}((p_1, D_1), \dots, (p_k, D_k)), \\
&\tc{p}{r}((p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (p', D'), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k))) \enspace.
\end{aligned}
\end{align*}
For the last step we remark that
\[ \begin{aligned}[t]
&\{ r(d_1, \dots d_k) \mid d_1 \in D_1, \dots d_{i-1} \in D_{i-1}, d_i \in D_i \cup D', d_{i+1} \in D_{i+1}, \dots, d_k \in D_k \} \\
&= \begin{aligned}[t]
&\{ r(d_1, \dots d_k) \mid d_1 \in D_1, \dots d_k \in D_k \} \cup {} \\
&\{ r(d_1, \dots d_k) \mid d_1 \in D_1, \dots d_{i-1} \in D_{i-1}, d_i \in D', d_{i+1} \in D_{i+1}, \dots, d_k \in D_k \} \enspace.
\end{aligned}
\end{aligned} \]
\end{enumerate}
In order to show that~$\mathbb{BD}$ is d-complete, we show that it is completely idempotent.
For this, let~$I$ be a countable set and $(p, D) \in {\mathbb R_0^1}$.
Then
\begin{align*}
\infsum_{i \in I} (p, D) &= \Big( \sup \{ p \mid i \in I \}, \bigcup_{\substack{i \in I: \\p = \sup \{ p \mid i \in I \}}} D \Big) \\
&= \Big( p, \bigcup_{i \in I} D \Big) \\
&= (p, D) \enspace.
\end{align*}
Thus, by Lemma~\ref{lem:inf-idp-d-complete}, it is d-complete.
In order to show absorptivity of $(0, \emptyset)$, we let $p \in \mathbb R_0^1$, $r \in R$ with $\rk(r) = k$, $\tc{p}{r} \in \Omegav$, and $(p_1, D_1), \dots, (p_k, D_k) \in \mathbb R_0^1 \times \mathcal P(\T_R)$.
Now, if there is an $i \in [k]$ such that $(p_i, D_i) = (0, \emptyset)$, then $p \cdot p_1 \cdot \ldots \cdot p_{i-1} \cdot 0 \cdot p_{i+1} \cdot \ldots \cdot p_k = 0$ by absorptivity of~$0$ and
\[ \{ r(d_1, \dots, d_k) \mid d \in D_1, \dots, d_{i-1} \in D_{i-1}, d_i \in \emptyset, d_{i+1} \in D_{i+1}, \dots, d_k \in D_k \} = \emptyset \enspace, \]
hence
\[ \tc{p}{r} \big( (p_1, D_1), \dots, (p_{i-1}, D_{i-1}), (0, \emptyset), (p_{i+1}, D_{i+1}), \dots, (p_k, D_k) \big) = (0, \emptyset) \enspace. \qedhere \]
\end{proof}
\subsection{N-best M-monoid is distributive and d-complete}\label{sec:proof-nbest-mmonoid}
\begin{lemma*}[cf.\ Example~\ref{ex:nbest-mmonoid}]\label{lem:nbest-mmonoid}
The n-best M-monoid is distributive and d-complete and $(\underbrace{0, \dots, 0}_{\text{$n$ times}})$ is absorptive.
\end{lemma*}
\begin{proof}
Clearly the n-best M-monoid is a complete M-monoid.
For distributivity of~$\Omega_n$ over $\maxn$ we first show that~$\cdot_n$ is commutative and distributive over $\maxn$.
Commutativity of~$\cdot_n$ follows from the commutativity of~$\cdot$ in~${\mathbb R_0^1}$ and for distributivity of~$\cdot_n$ over $\maxn$ we let $(a_1, \dots, a_n), (b_1, \dots, b_n), (c_1, \dots, c_n) \in \nbest$ and $f = \takenbest \big( (a_1, \dots, a_n, b_1, \dots, b_n) \big)$.
Then
\begin{align*}
&\maxn \big( (a_1, \dots, a_n), (b_1, \dots, b_n) \big) \cdot_n (c_1, \dots, c_n) \\
&= (f_1, \dots, f_n) \cdot_n (c_1, \dots, c_n) \\
&= \takenbest \big( (f_1 \cdot c_1, \dots, f_1 \cdot c_n, \dots f_n \cdot c_1, \dots, f_n \cdot c_n) \big) \\
\intertext{and by monotonicity of $\cdot$ in ${\mathbb R_0^1}$}
&= \takenbest \big( (a_1 \cdot c_1, \dots, a_1 \cdot c_n, \dots a_n \cdot c_1, \dots, a_n \cdot c_n, b_1 \cdot c_1, \dots, b_1 \cdot c_n, \dots b_n \cdot c_1, \dots, b_n \cdot c_n) \big) \\
&= \maxn \big( (a_1 \cdot c_1, \dots, a_1 \cdot c_n, \dots a_n \cdot c_1, \dots, a_n \cdot c_n), (b_1 \cdot c_1, \dots, b_1 \cdot c_n, \dots b_n \cdot c_1, \dots, b_n \cdot c_n) \big) \\
&= \begin{aligned}[t]
\maxn \big(&\takenbest((a_1 \cdot c_1, \dots, a_1 \cdot c_n, \dots a_n \cdot c_1, \dots, a_n \cdot c_n)), \\
&\takenbest((b_1 \cdot c_1, \dots, b_1 \cdot c_n, \dots b_n \cdot c_1, \dots, b_n \cdot c_n)) \big)
\end{aligned} \\
&= \maxn \big( (a_1, \dots, a_n) \cdot_n (c_1, \dots, c_n), (b_1, \dots, b_n) \cdot_n (c_1, \dots, c_n) \big) \enspace.
\end{align*}
\begin{sloppypar}
We thus obtain for every $k \in \mathbb N$, $\welem k \in \nbest$, $\mulnkk \in \Omega_n$, $i \in [k]$, and $(a_1, \dots, a_n), (a_{1,1}, \dots, a_{1,n}), \dots, (a_{k,1}, \dots, a_{k,n}) \in \nbest$
\end{sloppypar}
\begin{align*}
&\begin{aligned}[t]
\mulnkk \big(&(a_{1,1}, \dots, a_{1,n}), \dots, (a_{i-1,1} \dots, a_{i-1,n}), \\
&\maxn((a_{i,1}, \dots, a_{i,n}), (a_1, \dots, a_n)), \\
&(a_{i+1,1}, \dots, a_{i+1,n}), \dots, (a_{k,1}, \dots, a_{k,n}) \big)
\end{aligned} \\
&= \begin{aligned}[t]
(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) &\cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{i-1,1}, \dots, a_{i-1,n}) \\
&\cdot_n \maxn \big( (a_{i,1}, \dots, a_{i,n}), (a_1, \dots, a_n) \big) \\
&\cdot_n (a_{i+1,1}, \dots, a_{i+1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n})
\end{aligned}
\intertext{and by commutativity of $\cdot_n$}
&= \begin{aligned}[t]
\maxn \big( (a_{i,1}, \dots, a_{i,n}), (a_1, \dots, a_n) \big) &\cdot_n (\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) \\
&\cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{i-1,1}, \dots, a_{i-1,n}) \\
&\cdot_n (a_{i+1,1}, \dots, a_{i+1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n})
\end{aligned}
\intertext{and by distributivity of $\cdot_n$ over $\maxn$}
&= \begin{aligned}[t]
\maxn \big( &(a_{i,1}, \dots, a_{i,n}) \cdot_n (\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) \\
&\cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{i-1,1}, \dots, a_{i-1,n}) \\
&\cdot_n (a_{i+1,1}, \dots, a_{i+1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n}), \\
&(a_1, \dots, a_n) \cdot_n (\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) \\
&\cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{i-1,1}, \dots, a_{i-1,n}) \\
&\cdot_n (a_{i+1,1}, \dots, a_{i+1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n}) \big)
\end{aligned}
\intertext{and by commutativity of $\cdot_n$}
&= \begin{aligned}[t]
\maxn \big(&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) \cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n}), \\
&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}}) \cdot_n (a_{1,1}, \dots, a_{1,n}) \cdot_n \ldots \cdot_n (a_{i-1,1}, \dots, a_{i-1,n}) \\
&\cdot_n (a_1, \dots, a_n) \\
&\cdot_n (a_{i+1,1}, \dots, a_{i+1,n}) \cdot_n \ldots \cdot_n (a_{k,1}, \dots, a_{k,n}) \big)
\end{aligned} \\
&= \begin{aligned}[t]
\maxn \big(&\mulnkk((a_{1,1}, \dots, a_{1,n}), \dots, (a_{k,1}, \dots, a_{k,n})), \\
&\mulnkk(\begin{aligned}[t]
&(a_{1,1}, \dots, a_{1,n}), \dots, (a_{i-1,1}, \dots, a_{i-1,n}), \\
&(a_1, \dots, a_n), \\
&(a_{i+1,1}, \dots, a_{i+1,n}), \dots, (a_{k,1}, \dots, a_{k,n})) \big) \enspace.
\end{aligned}
\end{aligned}
\end{align*}
Now we show that the n-best M-monoid is d-complete.
For this, let $(a_1, \dots, a_n) \in \nbest$ and $\big((a_{i,1}, \dots, a_{i,n}) \mid i \in I\big)$ be an $I$-indexed family over $\nbest$ such that for every $i \in I$, $(a_1, \dots, a_n) \oplus (a_{i,1}, \dots, a_{i,n}) = (a_1, \dots, a_n)$.
Then for every $i \in \mathbb N$ we have that $a_n \ge a_{i,1}$.
Thus $a_n \ge \sup \{ a_{i,j} \mid i \in I, j \in [n] \}$.
Let~$\psi: J \to \mathbb N$ be a bijective mapping.
We define the family $(f_i \mid i \in \mathbb N)$ such that for each $i \in [n]$, $f_i = a_i$ and for each $i \in \mathbb N \setminus [n]$, $f_i = a_{\lfloor i / n \rfloor, i \bmod n + 1}$.
Then $\takenbest((f_i \mid i \in \mathbb N)) = (a_1, \dots, a_n)$.
Thus
\[ (a_1, \dots a_n) \oplus \infsum[\maxn]_{i \in I} (a_{i,1}, \dots, a_{i,n}) = (a_1, \dots, a_n) \enspace. \]
Then, by Lemma~\ref{lem:d-complete}~(ii), the n-best M-monoid is d-complete.
In order to show that $(\underbrace{0, \dots, 0}_{\text{$n$ times}})$ is absorptive for~$\Omega_n$, we first show that it is absorptive for~$\cdot_n$.
For this, we let $(a_1, \dots, a_n) \in \nbest$.
Then
\begin{align*}
(a_1, \dots, a_n) \cdot_n (\underbrace{0, \dots, 0}_{\text{$n$ times}}) &= \takenbest(\underbrace{a_1 \cdot 0, \dots, a_1 \cdot 0}_{\text{$n$ times}}, \dots, \underbrace{a_n \cdot 0, \dots, a_n \cdot 0}_{\text{$n$ times}}) \\
&= \takenbest(\underbrace{0, \dots, 0}_{\text{$n^2$ times}})
\tag{$0$ is absorptive for $\cdot$} \\
&= (\underbrace{0, \dots, 0}_{\text{$n$ times}})
\end{align*}
Now absorptivity of $(\underbrace{0, \dots, 0}_{\text{$n$ times}})$ for $\Omega_n$ is easy to see.
\end{proof}
\subsection{Definition of closed weighted RTG-based language models}\label{app:closed-definition}
\lemtranswf*
\begin{proof}
Let $D \subseteq \T_R$ with $|D| \not= \emptyset$.
We define the set $I = \{ \height(d) \mid d \in \T_R \}$.
Clearly $I \subseteq \mathbb N$ and $I \not= \emptyset$.
Thus, as $(\mathbb N, <)$ is well-founded, there is an $i \in I$ such that for every $i' \in I$, $i' \not< i$.
We choose an arbitrary $d \in D$ such that $\height(d) = i$.
We show that for every $d' \in D$ it does not hold that $d' (\vdash^+)^{-1} d$ by contradiction.
For this, assume that there is a $d' \in D$ such that $d' (\vdash^+)^{-1} d$.
Then $d \vdash^+ d'$ and thus by definition of~$\vdash$, $\height(d') < \height(d) = i$.
This contradicts the fact that for every $i' \in I$, $i' \not< i$.
\end{proof}
\lemcutoutsubset*
\begin{proof}
Let $d, d' \in \T_R$ such than $d \vdash^+ d'$.
Then
\begin{align*}
\cotrees(d') &= \{ d'' \in \T_R \mid d' \vdash^+ d \} \\
&\subset \{ d'' \in \T_R \mid d' \vdash^+ d'' \} \cup \{ d \} \\
\intertext{and as $d \vdash^+ d'$, by transitivity of~$\vdash^+$}
&\subseteq \{ d'' \in \T_R \mid d \vdash^+ d'' \} \\
&= \cotrees(d) \enspace. \qedhere
\end{align*}
\end{proof}
\subsection{Properties of closed weighted RTG-based language models}\label{app:closed-properties}
This subappendix contains the full proofs of Lemma~\ref{lem:closed-bigger-trees'}, Theorem~\ref{thm:outside-trees-subsumed} and Lemma~\ref{lem:outside-trees-spawned}.
We start with several auxiliary statements.
\begin{lemma}\label{lem:trees-bounded-height'}
For every $c \in \mathbb N$ there is an $n \in \mathbb N$ such that for each $d \in \T_R^{(c)}$, $\height(d) < n$.
\end{lemma}
\begin{proof}
We start with an auxiliary statement:
for every $\rho \in R^*$ with $|\rho| = |R| + 1$ it holds that~$\rho$ is cyclic.
For this, let $\rho \in R^*$ with $|\rho| = |R| + 1$.
Since~$R$ is finite, there are $i,j \in [|R| + 1]$ with $i \not= j$ such that $\rho_i = \rho_j$, hence~$\rho$ is cyclic.
Next we show that for every $c \in \mathbb N$, there is an $n \in \mathbb N$ such that for each $c' \in \mathbb N$ with $c' \leq c$ and $\rho \in R^*$ which is $c'$-cyclic it holds that $|\rho| < n$.
Note that the number of strings $\rho \in R^*$ with $|\rho| = |R| + 1$ is $|R|^{|R|+1}$; we denote this number by~$m$.
Now let $n = (c + 1) \cdot m$.
We show that for every $\rho \in R^*$ with $|\rho| = n$ it holds that~$\rho$ is $c'$-cyclic for some $c' > c$.
Clearly such~$\rho$ is cyclic, hence we let $c' \in \mathbb N$ with $c' \leq c$ and assume that there is a $\rho \in R^*$ such that $|\rho| = n$ and~$\rho$ is $c'$-cyclic.
We let
\[ \rho = \underbracket{\rho_1 \dots \rho_{m\vphantom{()}}} \dots \underbracket{\rho_{c \cdot m + 1} \dots \rho_{(c+1) \cdot m}} \enspace, \]
where $\rho_i \in R^*$ and $|\rho_i| = |R| + 1$ for every $i \in [n]$.
Since $n / m = c + 1$, there is an $i \in [n]$ such that~$\rho_i$ occurs at least $c + 1$ times in~$\rho$.
Furthermore, since $|\rho_i| = |R| + 1$,~$\rho_i$ is cyclic.
Thus there is an elementary cycle which occurs at least $c + 1$ times in~$\rho$, which contradicts our assumption that~$\rho$ is $c'$-cyclic for some $c' \leq c$.
As the same argument can be made for any $n' \geq n$, we conclude that for every $\rho \in R^*$ such that~$\rho$ is $c'$-cyclic it holds that $|\rho| < n$.
Then for every $d \in \T_R^{(c)}$ and $p \in \pos(d)$ it holds that $|p| < n - 1$ and thus $\height(d) < n - 1$, which proves the lemma.
\end{proof}
\begin{lemma}\label{lem:trc-finite}
For every $c \in \mathbb N$ the set~$\T_R^{(c)}$ is finite.
\end{lemma}
\begin{proof}
This follows directly from Lemmas~\ref{lem:trees-bounded-height'} and~\ref{lem:fixed-height-finite-trees'}.
\end{proof}
\lemgenclosed*
\begin{proof}
The proof is done by induction on~$c'$.
For the induction base, let $c' = c + 1$, then the statement of the lemma holds by Equation~\eqref{eq:c-closed}.
For the induction step, let $c' \geq c + 1$.
We assume that for each $d' \in (\T_R)$ and elementary cycle $w' \in R^*$ such that there is a leaf $p \in \pos(d')$ which is $(c',w')$-cyclic the following holds (IH):
\[ \wthom{d'} \oplus \bigoplus_{d'' \in \cotrees(d', w')} \wthom{d''} = \bigoplus_{d'' \in \cotrees(d', w')} \wthom{d''} \enspace. \]
Now we let $d \in (\T_R)$ and $w \in R^*$ be an elementary cycle such that there is a leaf $p \in \pos(d)$ which is $(c'+1,w)$-cyclic.
We let $v_0, \dots, v_{c'+1} \in R^*$ such that $\seq(d, p) = v_0 w v_1 \dots w v_{c'+1}$, $w = r_1 \dots r_m$ with $r_i \in R$ for every $i \in [m]$, $r_m = \big(A \to \sigma(A_1, \dots, A_k)\big)$, $n = |v_0| + |w| + 1$, $s \in [k]$ such that $p_n = s$, and
\[ D = \{ d' \in \cotrees(d, w) \mid d'[x_{s,A_s}]_{p_{1 \isep n}} = d[x_{s,A_s}]_{p_{1 \isep n}} \} \enspace. \]
As $d \subseteq \cotrees(d, w)$
\begin{align*}
&\wthom{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \\
&= \wthom{d} \oplus \bigoplus_{d \in D} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d, w) \setminus D} \wthom{d'} \\
\intertext{and since $\Omega$ distributes over $\oplus$}
&= \wt(d)[x_{s,A_s}]_{p_{1 \isep n}} \left(\wthom{d|_{p_n}} \oplus \bigoplus_{d' \in D} \wthom{d'|_{p_n}}\right)_{\walg K} \oplus \bigoplus_{d' \in \cotrees(d, w) \setminus D} \wthom{d'}
\intertext{and since $\{ d'|_{p_n} \mid d' \in D \} = \cotrees(d|_{p_n}, w)$, by IH}
&= \wt(d)[x_{s,A_s}]_{p_{1 \isep n}}\left(\bigoplus_{d' \in D} \wthom{d'|_{p_n}}\right)_{\walg K} \oplus \bigoplus_{d' \in \cotrees(d, w) \setminus D} \wthom{d'} \\
&= \bigoplus_{d \in D} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d, w) \setminus D} \wthom{d'} \\
&= \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \enspace. \qedhere
\end{align*}
\end{proof}
\begin{lemma}\label{lem:closed-bigger-trees-any}
For every $d \in (\T_R)$ and $c' \in \mathbb N$ with $c' \geq c + 1$ such that~$d$ is $c'$-cyclic the following holds:
\[ \wthom{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \enspace. \]
\end{lemma}
\begin{proof}
This is a consequence of Lemma~\ref{lem:closed-bigger-trees'}.
\end{proof}
\begin{lemma}\label{lem:outside-cutout-trees-subsumed}
For every $m \in \mathbb N$, $d \in \T_R \setminus \T_R^{(c)}$, and $B \subseteq \cotrees(d) \setminus \T_R^{(c)}$ with $|B| = m$ the following holds:
\[ \bigoplus_{d' \in \cotrees(d)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d) \setminus B} \wthom{d'} \enspace.\]
\end{lemma}
\begin{proof}
Let $d \in \T_R$ and $m \in \mathbb N$.
The proof is done by induction on~$m$.
For the induction base, let $m = 0$.
Then $B = \emptyset$ and for every $d \in \T_R^{(c)}$
\[ \bigoplus_{d' \in \cotrees(d)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d) \setminus \emptyset} \wthom{d'} \enspace.\]
For the induction step, let $m \in \mathbb N$.
We assume (IH) that for every $d \in \T_R \setminus \T_R^{(c)}$ and $B \subseteq \cotrees(d) \setminus \T_R^{(c)}$ with $|B| = m$ it holds that
\[ \bigoplus_{d' \in \cotrees(d)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d) \setminus B} \wthom{d'} \enspace.\]
Now let $B \subseteq \cotrees(d) \setminus \T_R^{(c)}$ such that $|B| = m + 1$.
Then, by Lemma~\ref{lem:transition-well-founded}, there is a $d' \in B$ such that for every $d'' \in B$ it does not hold that $d'' {(\vdash^+)}^{-1} d'$ and thus $d' \not{\vdash^+} d''$.
Then
\begin{align*}
&\bigoplus_{d'' \in \cotrees(d) \setminus B} \wthom{d} \\
&= \bigoplus_{d'' \in \cotrees(d') \setminus B} \wthom{d''} \oplus \bigoplus_{d'' \in (\cotrees(d) \setminus \cotrees(d')) \setminus B} \wthom{d''}
\tag{Lemma~\ref{lem:subtree-cotrees-subset}} \\
\intertext{and for every $d'' \in B$, as $d' \not{\vdash^+} d''$, we have that $d'' \not\in \cotrees(d')$; thus}
&= \bigoplus_{d'' \in \cotrees(d')} \wthom{d''} \oplus \bigoplus_{d'' \in (\cotrees(d) \setminus \cotrees(d')) \setminus B} \wthom{d''} \\
&= \bigoplus_{d'' \in \cotrees(d')} \wthom{d''} \oplus \wthom{d'} \oplus \bigoplus_{d'' \in (\cotrees(d) \setminus \cotrees(d')) \setminus B} \wthom{d''}
\tag{Lemma~\ref{lem:closed-bigger-trees-any}} \\
&= \bigoplus_{d'' \in \cotrees(d') \setminus (B \setminus \{ d' \})} \wthom{d''} \oplus \bigoplus_{d'' \in (\cotrees(d) \setminus \cotrees(d')) \setminus (B \setminus \{ d' \})} \wthom{d''} \\
&= \bigoplus_{d'' \in \cotrees(d) \setminus (B \setminus \{ d' \})} \wthom{d''} \\
&= \bigoplus_{d'' \in \cotrees(d)} \wthom{d''} \enspace. \tag*{(IH) \qedhere}
\end{align*}
\end{proof}
\begin{lemma}\label{lem:cutout-trees-subsume'}
For every $d \in \T_R \setminus \T_R^{(c)}$ the following holds:
\[ \wthom{d} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} = \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} \enspace. \]
\end{lemma}
\begin{proof}
Let $d \in \T_R \setminus \T_R^{(c)}$.
Then
\begin{align*}
\wthom{d} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} &=\wthom{d} \oplus \bigoplus_{d' \in \cotrees(d)} \wthom{d'}
\tag{Lemma~\ref{lem:outside-cutout-trees-subsumed}} \\
&=\bigoplus_{d' \in \cotrees(d)} \wthom{d'}
\tag{Lemma~\ref{lem:closed-bigger-trees-any}} \\
&=\bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'}
\tag*{(Lemma~\ref{lem:outside-cutout-trees-subsumed}) \qedhere}
\end{align*}
\end{proof}
\thmclosed*
\begin{proof}
Let $l \in \mathbb N$, $D \subseteq \T_R^{(c)}$, and $D' \subseteq \T_R \setminus \T_R^{(c)}$ such that $\bigcup_{d \in D'} (\cotrees(d) \cap \T_R^{(c)}) \subseteq D$.
The proof is done by induction on~$l$.
For the induction base, let $l = 0$.
Then $B = \emptyset$ and the statement of the lemma holds.
For the induction step, let $l \in \mathbb N$.
We assume that for every $B \subseteq D'$ with $|B| = l$,
\begin{equation}
\bigoplus_{d \in D} \wthom{d} \oplus \infsum_{d \in D'} \wthom{d} = \bigoplus_{d \in D} \wthom{d} \oplus \infsum_{d \in D' \setminus B} \enspace. \tag{IH}
\end{equation}
Now we let $B \subseteq D'$ such that $|B| = l + 1$ and $d \in B$.
Then, as $\cotrees(d) \cap \T_R^{(c)} \subseteq D$
\begin{align*}
&\bigoplus_{d' \in D} \wthom{d'} \oplus \infsum_{d' \in D' \setminus B} \wthom{d'} \\
&=\bigoplus_{d' \in D \setminus (\cotrees(d) \cap \T_R^{(c)})} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} \oplus \infsum_{d' \in D' \setminus B} \wthom{d'} \\
&=\bigoplus_{d' \in D \setminus (\cotrees(d) \cap \T_R^{(c)})} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} \oplus \wthom{d} \oplus \infsum_{d' \in D' \setminus B} \wthom{d'}
\tag{Lemma~\ref{lem:cutout-trees-subsume'}, because $d \in \T_R \setminus \T_R^{(c)}$} \\
&=\bigoplus_{d' \in D \setminus (\cotrees(d) \cap \T_R^{(c)})} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} \oplus \infsum_{d' \in D' \setminus (B \setminus \{ d \})} \wthom{d'} \\
&=\bigoplus_{d' \in D \setminus (\cotrees(d) \cap \T_R^{(c)})} \wthom{d'} \oplus \bigoplus_{d' \in \cotrees(d) \cap \T_R^{(c)}} \wthom{d'} \oplus \infsum_{d' \in D'} \wthom{d'}
\tag{IH} \\
&= \bigoplus_{d' \in D} \wthom{d'} \oplus \infsum_{d' \in D'} \wthom{d'} \qedhere
\end{align*}
\end{proof}
\lemclosed*
\begin{proof}
The proof is done by induction on~$l$.
For the induction base, let $l = 0$.
Then $B = \emptyset$ and the statement of the lemma holds for every $A \in N$.
For the induction step, let $l \in \mathbb N$.
We assume that for every $A \in N$ and $B \subseteq (\T_R)_A \setminus \T_R^{(c)}$ with $|B| = l$,
\begin{equation}
\bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} = \bigoplus_{d \in (\T_R^{(c)})_A \cup B} \wthom{d} \enspace. \tag{IH}
\end{equation}
Now we let $A \in N$, $B \subseteq (\T_R)_A \setminus \T_R^{(c)}$ such that $|B| = l + 1$, and $d' \in B$.
Then
\begin{align*}
\bigoplus_{d \in (\T_R^{(c)})_A \cup B} \wthom{d} &= \bigoplus_{d \in (\T_R^{(c)})_A} \oplus \bigoplus_{d \in B} \wthom{d} \tag{$B \cap \T_R^{(c)} = \emptyset$} \\
&= \bigoplus_{d \in (\T_R^{(c)})_A \setminus (\cotrees(d') \cap \T_R^{(c)})} \wthom{d} \oplus \bigoplus_{d \in \cotrees(d') \cap \T_R^{(c)}} \wthom{d} \oplus \bigoplus_{d \in B} \wthom{d} \\
&= \bigoplus_{d \in (\T_R^{(c)})_A \setminus (\cotrees(d') \cap \T_R^{(c)})} \wthom{d} \oplus \bigoplus_{d \in \cotrees(d') \cap \T_R^{(c)}} \wthom{d} \oplus \wthom{d'} \oplus \bigoplus_{\mathclap{d \in B \setminus \{ d' \}}} \wthom{d} \\
&= \bigoplus_{d \in (\T_R^{(c)})_A \setminus (\cotrees(d') \cap \T_R^{(c)})} \wthom{d} \oplus \bigoplus_{d \in \cotrees(d') \cap \T_R^{(c)}} \wthom{d} \oplus \bigoplus_{\mathclap{d \in B \setminus \{ d' \}}} \wthom{d} \tag{Lemma~\ref{lem:cutout-trees-subsume'}, because $d \in \T_R \setminus \T_R^{(c)}$} \\
&= \bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} \oplus \bigoplus_{d \in B \setminus \{ d' \}} \wthom{d} \\
&= \bigoplus_{d \in (\T_R^{(c)})_A} \wthom{d} \enspace. \tag*{(IH) \qedhere}
\end{align*}
\end{proof}
\subsection{Intersection is an instance of the M-monoid parsing problem}\label{app:intersection}
In this subappendix we give a full proof of Theorem~\ref{thm:intersection}.
For this, we let $(G,(\alg L,\phi))$ be an RTG-LM such that $G=(N,\Sigma,A_0,R)$ and $(\alg L,\phi)$ is a finitely decomposable language algebra.
Moreover, we let $a \in \alg L_{\sort(A_0)}$.
We consider the M-monoid parsing problem with the following input:
\begin{itemize}
\item the wRTG-LM $((G,(\alg L,\phi)), \walg{K}((G,(\alg L,\phi)),a), \wt)$ where $\wt(r) = \omega_r$ for each $r \in R$ and
\item $a$.
\end{itemize}
We show that $(G',(\alg L,\phi))$ is the $\psi$-intersection of $(G,(\alg L,\phi))$ and $a$, where
\begin{itemize}
\item $G' = (N',\Sigma,[A_0,a],R')$ with $N'=\mathrm{lhs}(\fparse(a)) \cup \{[A_0,a]\}$ (we note that $\fparse(a)$ is a finite set), $R' = \fparse(a)$, and
\item $\psi\colon N' \rightarrow N$ is defined by $\psi([A,b]) = A$ for each $[A,b] \in N'$.
\end{itemize}
We extend the mapping $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$ such that $\widehat{\psi}: \T_{R'} \to \T_R$.
This is required for our proofs by structural induction.
Clearly, the extended mapping $\widehat{\psi}$ is not bijective in general and we will only show bijectivity of $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$.
\begin{lemma}\label{lem:intersection-semantics}
For every $d \in \T_{R'}$ it holds that $\sem[\alg L]{\pi_\Sigma(d)} = b$, where $\lhs(d(\varepsilon)) = [A, b]$ for some $A \in N'$.
\end{lemma}
\begin{proof}
The proof is done by structural induction on $d$.
We assume (IH) that for every $k \in \mathbb N$, $i \in [k]$, and $d_i \in \T_{R'}$ it holds that $\sem[\alg L]{\pi_\Sigma(d_i)} = a_i$, where $\lhs(d_i(\varepsilon)) = [A_i, a_i]$ for some $A_i \in N'$.
Then for every $r \in R'$ with $r = \big( [A, b] \to \sigma([A_1, a_1], \dots, [A_k, a_k]) \big)$
\begin{align*}
\sem[\alg L]{\pi_\Sigma \big( r(d_1, \dots, d_k) \big)} &= \phi(\sigma) \big( \sem[\alg L]{\pi_\Sigma(d_1)}, \dots, \sem[\alg L]{\pi_\Sigma(d_k)} \big) \\
&= \phi(\sigma)(a_1, \dots, a_k) \tag{IH} \\
&= b \enspace. \tag*{(Definition of $P_{R,a}$) \qedhere}
\end{align*}
\end{proof}
\begin{lemma}\label{lem:psi-and-pisigma}
For every $d \in \T_{R'}$ it holds that $\sem[\alg L]{\pi_\Sigma(d)} = \sem[\alg L]{\pi_\Sigma(\widehat{\psi}(d))}$.
\end{lemma}
\begin{proof}
The proof is done by structural induction on $d$.
We assume (IH) that for every $k \in \mathbb N$, $i \in [k]$, and $d_i \in \T_{R'}$ it holds that $\sem[\alg L]{\pi_\Sigma(d_i)} = \sem[\alg L]{\pi_\Sigma(\widehat{\psi}(d_i))}$.
Then for every $r \in R'$ with $r = \big( [A,b] \to \sigma([A_1,a_1] \dots, [A_k,a_k]) \big)$ we have
\begin{align*}
\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))} &= \phi(\sigma) \big( \sem[\alg L]{\pi_\Sigma(d_1)}, \dots, \sem[\alg L]{\pi_\Sigma(d_k)} \big) \\
&= \phi(\sigma) \big( \sem[\alg L]{\pi_\Sigma(\widehat{\psi}(d_1))}, \dots, \sem[\alg L]{\pi_\Sigma(\widehat{\psi}(d_k))} \big) \tag{IH} \\
&= \sem[\alg L]{\pi_\Sigma \bigl( \psi(r)(\widehat{\psi}(d_1), \dots, \widehat{\psi}(d_k)) \bigr)} \\
&= \sem[\alg L]{\pi_\Sigma \big( \widehat{\psi}(r(d_1, \dots, d_k)) \bigr)} \enspace. \tag*{\qedhere}
\end{align*}
\end{proof}
\begin{lemma}\label{lem:intersection-injective}
For every $d \in \T_R$ it holds that $|\{ d' \in \T_{R'} \mid \widehat{\psi}(d') = d \ \text{and} \ \sem[\alg L]{\pi_\Sigma(d')} = \sem[\alg L]{\pi_\Sigma(d)} \}| \le 1$.
\end{lemma}
\begin{proof}
The proof is done by structural induction on $d$.
We assume (IH) that for every $k \in \mathbb N$, $i \in [k]$, and $d_i \in \T_R$ it holds that $|\{ d_i' \in \T_{R'} \mid \widehat{\psi}(d_i') = d_i \ \text{and} \ \sem[\alg L]{\pi_\Sigma(d_i')} = \sem[\alg L]{\pi_\Sigma(d_i)} \}| \le 1$.
Let $r \in R$ with $r = \big( A \to \sigma(A_1, \dots, A_k) \big)$.
Then
\begin{align*}
&\{ d' \in \T_{R'} \mid \widehat{\psi}(d') = r(d_1, \dots, d_k) \ \text{and} \ \sem[\alg L]{\pi_\Sigma(d')} = \sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))} \} \\
&= \{ d' \in \T_{R'} \mid \begin{aligned}[t]
&\widehat{\psi}(d') = r(d_1, \dots, d_k), \lhs(d'(\varepsilon)) = [A,\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))}], \ \text{and} \\
&\sem[\alg L]{\pi_\Sigma(d')} = \sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))} \}
\end{aligned} \tag{Lemma~\ref{lem:intersection-semantics}} \\
&= \{ \begin{aligned}[t]
&\big([A,\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))}] \to \sigma([A_1,a_1], \dots, [A_k,a_k])\big)(d_1', \dots, d_k') \mid \\
&(a_1, \dots, a_k) \in \phi(\sigma)^{-1}(\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))}), d_1' \in (\T_{R'})_{[A_1,a_1]}, \dots, d_k' \in (\T_{R'})_{[A_k,a_k]}, \\
&\widehat{\psi}(d_1') = d_1, \dots, \widehat{\psi}(d_k') = d_k, \ \text{and} \ \sem[\alg L]{\pi_\Sigma(r'(d_1', \dots, d_k'))} = \sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))} \}
\end{aligned} \tag{Definition of $P_{R,a}$} \\
\intertext{(where $r' = \big([A,\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))}] \to \sigma([A_1,a_1], \dots, [A_k,a_k])\big)$)}
&= \{ \begin{aligned}[t]
&\big([A,\sem[\alg L]{\pi_\Sigma(r(d_1, \dots, d_k))}] \to \sigma([A_1,\sem[\alg L]{\pi_\Sigma(d_1)}], \dots, [A_k,\sem[\alg L]{\pi_\Sigma(d_k)}])\big)(d_1', \dots, d_k') \mid \\
&d_1' \in (\T_{R'})_{[A_1,\sem[\alg L]{\pi_\Sigma(d_1)}]}, \dots, d_k' \in (\T_{R'})_{[A_k,\sem[\alg L]{\pi_\Sigma(d_k)}]}, \\
&\widehat{\psi}(d_1') = d_1, \dots, \widehat{\psi}(d_k') = d_k, \ \text{and} \ \sem[\alg L]{\pi_\Sigma(d_1')} = \sem[\alg L]{\pi_\Sigma(d_1)}, \dots, \sem[\alg L]{\pi_\Sigma(d_k')} = \sem[\alg L]{\pi_\Sigma(d_k)} \}
\end{aligned} \tag{Lemma~\ref{lem:psi-and-pisigma}}
\end{align*}
This set has at most one element as $|\{ d_i' \in (\T_{R'})_{[A_i,\sem[\alg L]{\pi_\Sigma(d_i)}]} \mid \widehat{\psi}{(d_i')} = d_i \ \text{and} \ \sem[\alg L]{\pi_\Sigma(d_i')} = \sem[\alg L]{\pi_\Sigma(d_i)} \}| \le 1$ for every $i \in [k]$ by (IH).
\end{proof}
For every $d \in \T_R$ we let $R'(d) = \wthom{d}$.
\begin{lemma}\label{lem:intersection-surjective}
For every $d \in \mathrm{AST}(G, a)$ there is a $d' \in \T_{R'(d)}$ such that $\widehat{\psi}(d') = d$.
\end{lemma}
\begin{proof}
The proof is done by induction on $d$.
We assume (IH) that for every $k \in \mathbb N$, $i \in [k]$, and $d_i \in \T_{R'}$ there is a $d_i' \in T_{R'(d_i)}$ such that $\widehat{\psi}(d_i') = d_i$.
We let $r \in R$ with $r = \big( A \to \sigma(A_1, \dots, A_k) \big)$ and $d = r(d_1, \dots, d_k)$.
Then by (IH) for every $i \in [k]$ there is a $d_i' \in T_{R'(d_i)}$ with $\widehat{\psi}(d_i') = d_i$.
Then by definition of $\omega_r$, $r' \in R'(d)$ with $r' = \big( [A,b] \to \sigma([A_1,a_1], \dots [A_k,a_k]) \big)$, $[A_i,a_i] = \lhs(d_i'(\varepsilon))$ for every $i \in [k]$ and $b = \phi(\sigma)(a_1, \dots, a_k)$.
Moreover, $d_1', \dots, d_k' \in \T_{R'(d)}$.
Thus $r'(d_1', \dots, d_k') \in \T_{R'(d)}$.
Clearly $\widehat{\psi}(r'(d_1', \dots, d_k')) = d$.
\end{proof}
\thmintersection*
\begin{proof}
First we show that $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$ is bijective by showing that it is injective and surjective.
For injectivity let $d_1, d_2 \in \mathrm{AST}(G')$ such that $\widehat{\psi}(d_1) = \widehat{\psi}(d_2)$.
By Lemma~\ref{lem:intersection-semantics}, $\sem[\alg L]{\pi_\Sigma(d_1)} = a = \sem[\alg L]{\pi_\Sigma(d_2)}$.
Then by Lemma~\ref{lem:psi-and-pisigma}, there is a $d \in \mathrm{AST}(G, a)$ such that $\widehat{\psi}(d_1) = d = \widehat{\psi}(d_2)$.
Then by Lemma~\ref{lem:intersection-injective}, $d_1 = d_2$; hence $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$ is injective.
For surjectivity, let $d \in \mathrm{AST}(G, a)$.
Then by Lemma~\ref{lem:intersection-surjective}, there is a $d' \in \T_{R'(d)}$ such that $\widehat{\psi}(d') = d$.
Since $R' = \bigcup_{d \in \mathrm{AST}(G, a)} R'(d)$, it holds that $d' \in \T_{R'}$.
Then by definition of $\widehat{\psi}$ and Lemma~\ref{lem:intersection-semantics}, $\lhs(d'(\varepsilon)) = [A_0, a]$ and thus $d' \in \mathrm{AST}(G)$; hence $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$ is surjective.
Now we show that $L(G')_{\alg L} = L(G)_{\alg L} \cap \{ a \}$.
For this, we distinguish two cases.
\begin{enumerate}
\item If $a \in L(G)_{\alg L}$,
\begin{align*}
L(G')_{\alg L} &= \{ \sem[\alg L]{\pi_\Sigma(d)} \mid d \in \mathrm{AST}(G) \} \\
&= \{ \sem[\alg L]{\pi_\Sigma(d)} \mid d \in (\T_{R'})_{[A_0,a]}) \} \\
&= \{ a \mid d \in (\T_{R'})_{[A_0,a]}) \} \tag{Lemma~\ref{lem:intersection-semantics}} \\
&= \{ a \} \\
&= L(G)_{\alg L} \cap \{ a \} \enspace. \tag{$a \in L(G)_{\alg L}$}
\end{align*}
\item Otherwise $a \not\in L(G)_{\alg L}$, then $\mathrm{AST}(G, a) = \emptyset$.
Thus, since $\widehat{\psi}: \mathrm{AST}(G') \to \mathrm{AST}(G, a)$ is bijective, $\mathrm{AST}(G') = \emptyset$ as well.
Consequently
\[
L(G')_{\alg L} = \emptyset = L(G)_{\alg L} \cap \{ a \} \enspace. \qedhere
\]
\end{enumerate}
\end{proof}
\subsection{ADP algebra is a d-complete and distributive M-monoid}\label{sec:proof-adp-mmonoid}
\lemadpmmonoid*
\begin{proof}
\begin{sloppypar}
Let $(\walg{K}', \oplus, \emptyset, \Sigma',\psi',\infsum)$ be the algebra associated with~$\walg{K}$ and~$h$.
We show that $(\walg{K}', \oplus, \emptyset, \Sigma',\psi',\infsum)$ is a d-complete and distributive M-monoid in three steps.
We begin by proving that $(\walg{K}', \oplus, \emptyset, \Sigma',\psi')$ is an M-monoid.
First, $(\walg{K}',\oplus,\emptyset)$ is a commutative monoid, as
\end{sloppypar}
\begin{itemize}
\item $\walg{K}'$ is a set, and $\oplus: \walg{K}' \otimes \walg{K}' \rightarrow \walg{K}'$,
i.e., for every $F_1,F_2 \in \walg{K}'$ it holds that $F_1 \oplus F_2 \in \walg{K}'$.
For the proof of this claim, let $F_1,F_2 \in \walg{K}'$.
Now we distinguish two cases:
\begin{enumerate}
\item If there is an $s \in S$ such that $F_1,F_2 \subseteq \walg{K}_s$, then $F_1 \oplus F_2 = h_s(F_1 \cup F_2)$.
Obviously $F_1 \cup F_2 \subseteq \walg{K}_s$ and then by the definition of~$\walg{K}'$, $h_s(F_1 \cup F_2) \in \walg{K}'$.
\item Otherwise $F_1 \oplus F_2 = \bot$ and $\bot \in \walg{K}'$ by definition.
\end{enumerate}
\item Commutativity of~$\oplus$ easily follows from the commutativity of~$\cup$.
\item We show that $\oplus$ is associative, i.e., for every $F_1,F_2,F_3 \in \walg{K}'$ it holds that $(F_1 \oplus F_2) \oplus F_3 = F_1 \oplus (F_2 \oplus F_3)$, by the following case analysis.
Let $F_1,F_2,F_3 \in \walg{K}'$.
Now either
\begin{enumerate}
\item there is an $s \in S$ such that $F_1,F_2 \subseteq \walg{K}_s$, then,
\begin{enumerate}
\item if also $F_3 \subseteq \walg{K}_s$, then
\begin{align*}
(F_1 \oplus F_2) \oplus F_3 &= h_s(h_s(F_1 \cup F_2) \cup F_3) \\
&= h_s(h_s(F_1 \cup F_2) \cup h_s(F_3)) \tag{$h$ is idempotent} \\
&= h_s((F_1 \cup F_2) \cup F_3) \tag{Equation~\ref{eq:obj-function'}} \\
&= h_s(F_1 \cup (F_2 \cup F_3)) \tag{$\cup$ is associative} \\
&= h_s(F_1 \cup h_s(F_2 \cup F_3)) \tag{$h$ is idempotent} \\
&= F_1 \oplus (F_2 \oplus F_3) \enspace,
\end{align*}
or,
\item if $F_3 \not\subseteq \walg{K}_s$, then $(F_1 \oplus F_2) \oplus F_3 = \bot = F_2 \oplus F_3 = F_1 \oplus (F_2 \oplus F_3)$,
\end{enumerate}
or
\item there is no such $s \in S$ and hence $F_1 \oplus F_2 = \bot = (F_1 \oplus F_2) \oplus F_3$.
Now it may be that there is an $s' \in S$ such that $F_2,F_3 \subseteq \walg{K}_{s'}$,
then $F_2 \oplus F_3 \subseteq \walg{K}_{s'}$, but $F_1 \not \subseteq \walg{K}_{s'}$ and hence $F_1 \oplus (F_2 \oplus F_3) = \bot$.
Otherwise $F_2 \oplus F_3 = \bot$ and hence $F_1 \oplus (F_2 \oplus F_3) = \bot$.
(We note that, if we had not added~$\bot$ to~$\walg{K}'$ and still chosen $\emptyset$ as the identity element, then in this case~$\oplus$ would not be associative.)
\end{enumerate}
\item As $\emptyset \subseteq \walg{K}_s$ for any $s \in S$, we have $\emptyset \in \walg{K}'$.
We show that~$\emptyset$ is the identity element by showing that $\emptyset \oplus F = F$ for every $F \in \walg{K}'$.
Then the other condition, $F \oplus \emptyset = F$, will follow from the commutativity of~$\oplus$.
Let $F \in \walg{K}'$.
Again, we distinguish two cases:
\begin{enumerate}
\item If $F = \bot$, then $\emptyset \oplus \bot = \bot$ by definition.
\item Otherwise there is an $s \in S$ such that $F \subseteq \walg{K}_s$.
Since $\emptyset \subseteq \walg{K}_s$, we have that $\emptyset \oplus F = h_s(\emptyset \cup F) = h_s(F)$ and as~$h$ is idempotent, $h_s(F) = F$.
\end{enumerate}
\end{itemize}
Second, $(\walg{K}',\psi')$ is a $\Sigma'$-algebra as~$\Sigma'$ is a ranked set and $\psi'(\sigma)(F_1,\dots,F_k) \in \walg{K}'$ for every $k \in \mathbb N$, $\sigma \in \Sigma'_k$, and $F_1,\dots,F_k \in \walg{K}'$ which we show by the following case analysis.
Let $k \in \mathbb N$, $\sigma \in \Sigma'_k$, and $F_1,\dots,F_k \in \walg{K}'$.
Then:
\begin{enumerate}
\item If $\sigma = t$ with $t \in (\T_\Sigma(X_{s_1 \dots s_k}))_s$, then there are two possibilities:
\begin{enumerate}
\item If $F_i \subseteq \walg{K}_{s_i}$ for every $i \in [k]$, then
\begin{align*}
\psi'(\sigma)(F_1,\dots,F_k) &= h_s(t_{\walg{K}}(F_1,\dots,F_k)) \\
&= h_s\big(\{ t_{\walg{K}}(a_1,\dots,a_k) \mid a_1 \in F_1,\dots,a_k \in F_k \}\big)
\end{align*}
\begin{sloppypar}
and by definition of~$t_{\walg{K}}$, $t_{\walg{K}}(a_1,\dots,a_k) \in \walg{K}_s$ for every $a_1 \in F_1,\dots,a_k \in F_k$.
Hence $t_{\walg{K}}(F_1,\dots,F_k) \subseteq \walg{K}_s$ and by definition of~$\walg{K}'$, $h_s\big(t_{\walg{K}}(F_1,\dots,F_k)\big) \in \walg{K}'$.
\end{sloppypar}
\item Otherwise $\psi'(\sigma)(F_1,\dots,F_k) = \bot \in \walg{K}'$.
\end{enumerate}
\item If $\sigma = \welem{0}^k$, then $\psi'(\sigma)(F_1,\dots,F_k) = \emptyset \in \walg{K}'$.
\end{enumerate}
Finally, $\welem{0}^k \in \Sigma'$ and $\psi'(\welem{0}^k)(F_1,\dots,F_k) = \emptyset$ for every $k \in \mathbb N$ and $F_1,\dots,F_k \in \walg{K}'$ by definition.
The operation~$\infsum$ fulfils the axioms of an infinitary sum operation on~$\walg{K}'$, as the following case analysis shows.
Let $(F_i \mid i \in I)$ be an $I$-indexed family over $\walg{K}'$. Then:
\begin{itemize}
\item if $I=\emptyset$, then $\infsum_{i \in \emptyset} F_i = \emptyset$,
\item if $I = \{n\}$ and $F_n \in \walg{K}'$, then it holds that either
\begin{enumerate}
\item $F_n = \bot$, then $\infsum_{i \in \{ n \}} F_i = \bot$, or
\item $F_n \subseteq \walg{K}_s$ for some $s \in S$, then $\infsum_{i \in \{ n \}} F_i = h_s(F_n) = F_n$,
\end{enumerate}
\item if $I = \{m,n\}$ with $m \not= n$ and $F_m,F_n \in \walg{K}'$, then it holds that either
\begin{enumerate}
\item there is an $s \in S$ such that $F_m,F_n \subseteq \walg{K}_s$, then
\[ \infsum_{i \in \{ m,n \}} F_i = h_s \Big( \bigcup_{i \in \{ m,n \}} F_i \Big) = h_s(F_m \cup F_n) = F_m \oplus F_n \enspace, \text{ or} \]
\item otherwise $\infsum_{i \in \{ m,n \}} F_i = \bot = F_m \oplus F_n$,
\end{enumerate}
\item for every $J$-partition of~$I$ it holds that either
\begin{enumerate}
\item there is an $s \in S$ such that $F_i \subseteq \walg{K}_s$ for every $i \in I$, then
\begin{align*}
\infsum_{i \in I} F_i = h_s\big(\bigcup_{i \in I} F_i\big) &= h_s\Big(\bigcup_{j \in J} \big(\bigcup_{i \in I_j} F_i\big)\Big) \\
&= h_s\Big(\bigcup_{j \in J} h_s\big(\bigcup_{i \in I_j} F_i\big)\Big) \tag{Equation~\ref{eq:obj-function'}} \\
&= h_s\Big(\bigcup_{j \in J} \big(\infsum_{i \in I_j} F_i\big) \Big) = \infsum_{j \in J} \big( \infsum_{i \in I_j} F_i \big) \enspace, \text{or}
\end{align*}
\item there is an $i' \in I$ such that $F_{i'} = \bot$; then there is a $j' \in J$ such that $i' \in I_{j'}$, hence $\infsum_{i \in I_{j'}} F_i = \bot$ and thus
\[ \infsum_{j \in J} \big( \infsum_{i \in I_j} F_i \big) = \bot = \infsum_{i \in I} F_i \enspace, \text{or} \]
\item there are $i_1,i_2 \in I$ such that $F_{i_1} \subseteq \walg{K}_{s_1}$ and $F_{i_2} \subseteq \walg{K}_{s_2}$ with $s_1,s_2 \in S$ and $s_1 \not= s_2$.
Now we have to distinguish two cases:
\begin{enumerate}
\item there is a $j' \in J$ such that $i_1,i_2 \in I_{j'}$, then $\infsum_{i \in I_{j'}} F_i = \bot$ and hence
\[ \infsum_{j \in J} \big( \infsum_{i \in I_j} F_i \big) = \bot = \infsum_{i \in I} F_i \enspace, \]
\item $i_1 \in I_{j_1}$ and $i_2 \in I_{j_2}$ with $j_1,j_2 \in J$ and $j_1 \not= j_2$.
Then there are $F_1 \subseteq \walg{K}_{s_1}$ and $F_2 \subseteq \walg{K}_{s_2}$ such that $\infsum_{i \in I_{j_1}} F_i = F_1$ and $\infsum_{i \in I_{j_2}} F_i = F_2$, but since $s_1 \not= s_2$
\[ \infsum_{j \in J} \big( \infsum_{i \in I_j} F_i \big) = \bot = \infsum_{i \in I} F_i \enspace. \]
\end{enumerate}
\end{enumerate}
\end{itemize}
In order to show that~$\walg K'$ is even d-complete, we let $F \in \walg k'$ and $(F_i \mid i \in I)$ be an $I$-indexed family over~$\walg K'$ such that for every $i \in I$, $F \oplus F_i = F$.
Then, by definition of~$\oplus$, we have to distinguish two cases.
\begin{enumerate}
\item If there is an $s \in S$ such that $F \subseteq \walg K_s$ and for every $i \in I$, $F_i \subseteq \walg K_s$, then for every $i \in I$, $h_s(F \cup F_i) = F$.
Thus
\begin{align*}
F \oplus \infsum_{i \in I} F_i &= h_s \left( F \cup \infsum_{i \in I} F_i \right) \\
&= h_s \left( F \cup h_s \left( \bigcup_{i \in I} F_i \right) \right) \\
&= h_s \left( h_s(F) \cup h_s \left( \bigcup_{i \in I} F_i \right) \right) \tag{$F \in \walg K'$} \\
&= h_s \left( F \cup \bigcup_{i \in I} F_i \right) \tag{Equation~\ref{eq:obj-function'}} \\
&= h_s \left( \bigcup_{i \in I} \, (F \cup F_i) \right) \tag{$\bigcup$ is idempotent} \\
&= h_s \left( \bigcup_{i \in I} h_s(F \cup F_i) \right) \tag{Equation~\ref{eq:obj-function'}} \\
&= h_s \left( \bigcup_{i \in I} F \right) \\
&= h_s(F) \tag{$\bigcup$ is idempotent} \\
&= F \enspace. \tag{$F \in \walg K'$}
\end{align*}
\item Otherwise, there is an $i \in I$ such that $F \oplus F_i = \bot$.
But then also $F = \bot$.
Thus, by definition of~$\oplus$, $F \oplus \infsum_{i \in I} F_i = \bot$.
\end{enumerate}
Distributivity of~$\walg{K}'$ is implied by the fact that $h$ satisfies Bellman's principle of optimality, which we will show next.
Let $k \in \mathbb N$, $s,s_1,\dots,s_k \in S$, $\sigma \in \Sigma'_k$, $F_1,\dots,F_k,F' \in \walg{K}'$, and $i \in [k]$.
We consider two cases; first, assume that $\sigma = t$ with $t \in (\T_\Sigma(X_{s_1 \dots s_k}))_s$.
Now there are two possibilities:
\begin{enumerate}
\item If $F_{i'} \subseteq \walg{K}_{s_{i'}}$ for every $i' \in [k]$ and $F' \in \walg K_{s_i}$, then
\begin{align*}
&\psi'(\sigma)\Big(F_1,\dots,F_{i-1},F_i \oplus F',F_{i+1},\dots,F_k\Big) \\
&= \psi'(\sigma)\Big(F_1,\dots,F_{i-1},h_{s_i}\big(F_i \cup F'\big),F_{i+1},\dots,F_k\Big) \\
&= h_s\Big({t_{\walg{K}}}\big(F_1,\dots,F_{i-1},h_{s_i}\big(F_i \cup F'\big),F_{i+1},\dots,F_k\big)\Big) \\
&= h_s\Big({t_{\walg{K}}}\big(h_{s_1}(F_1),\dots,h_{s_{i-1}}(F_{i-1}),h_{s_i}\big(F_i \cup F'\big),h_{s_{i+1}}(F_{i+1}),\dots,h_{s_k}(F_k)\big)\Big) \tag{$h$ is idempotent} \\
&= h_s\Big({t_{\walg{K}}}\big(F_1,\dots,F_{i-1},F_i \cup F',F_{i+1},\dots,F_k\big)\Big) \tag{Equation~\ref{eq:bellman'}} \\
&= h_s\Big({t_{\walg{K}}}(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \cup {t_{\walg{K}}}(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k)\Big) \\
&= h_s\Big(h_s\big({t_{\walg{K}}}(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k)\big) \cup h_s\big({t_{\walg{K}}}(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k)\big)\Big) \tag{Equation~\ref{eq:obj-function'}} \\
&= h_s\Big(\psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \cup \psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k)\Big) \\
&= \psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \oplus \psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k) \enspace.
\end{align*}
\item If there is an $i' \in [k]$ such that $F_{i'} \not\subseteq \walg{K}_{s_{i'}}$ or $F' \not\subseteq \walg{K}_{s_i}$, then
\begin{align*}
\psi'(\sigma)\Big(F_1,\dots,F_{i-1},F_i \oplus F',F_{i+1},\dots,F_k\Big) &= \bot \\
\intertext{and furthermore}
\psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) &= \bot
\intertext{or}
\psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k) &= \bot \enspace.
\end{align*}
Hence
\begin{align*} &\psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \oplus \psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k) = \bot \enspace.
\end{align*}
\end{enumerate}
Second, assume that $\sigma = \welem{0}^k$.
Then
\begin{align*}
&\psi'(\sigma)\Big(F_1,\dots,F_{i-1},F_i \oplus F',F_{i+1},\dots,F_k\Big)
= \emptyset
= \emptyset \cup \emptyset
= h_s\big(\emptyset \cup \emptyset\big) \\
&= h_s\big(\psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \cup \psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k)\big) \\
&= \psi'(\sigma)(F_1,\dots,F_{i-1},F_i,F_{i+1},\dots,F_k) \oplus \psi'(\sigma)(F_1,\dots,F_{i-1},F',F_{i+1},\dots,F_k) \enspace. \qedhere
\end{align*}
\end{proof}
\subsection{ADP is an instance of the M-monoid parsing problem}\label{app:adp-mmonoid-parsing}
This subappendix contains the full proof of Theorem~\ref{thm:ADP-M-monoid}.
We start with an auxiliary statement.
\begin{lemma}\label{lem:ast-mmonoid-is-tree-adp}
For every $d \in \T_R$ it holds that $\sem[\walg K']{\wt(d)} = \{ \pi_\Sigma(d)_{\walg{K}} \}$.
\end{lemma}
\begin{proof}
We prove the statement of the lemma by structural induction over~$d$.
For the induction base, let $d = (A \to t)$ in~$R$.
Then $d \in R_{(\varepsilon,A)}$ and hence $t \in \T_\Sigma$.
(We recall that $\T_\Sigma = \T_\Sigma(X_\varepsilon)$.)
Now for both cases, $t \in (\T_\Sigma)_{\mathsf{a}}$ or $t \in (\T_\Sigma)_{\mathsf{i}}$, the proof of $\sem[\walg K']{\wt(d)} = \{ \pi_\Sigma(d)_{\walg{K}} \}$ is the same.
Thus, for every $s \in \{{\mathsf{i}},{\mathsf{a}}\}$, we have
\[ \sem[\walg K']{\wt(d)} = \psi'(t) = h_s\big(\{t_{\walg K}\}\big) = \{t_{\walg K}\} = \{ \pi_\Sigma(d)_{\walg K} \} \enspace. \]
For the induction step, we let $d \in \T_R$ be of the form $r(d_1,\dots,d_k)$ for some $k \in \mathbb N$ with $r = (A \to t)$ in~$R$.
Then there are $A_1,\dots,A_k \in N$ such that $r \in R_{(A_1 \dots A_k,A)}$ and $d_i \in (\T_R)_{A_i}$ for each $i \in [k]$.
We assume (IH) that for every $i \in [k]$, $\sem[\walg K']{\wt(d_i)} = \{ \pi_\Sigma(d_i)_{\walg{K}} \}$.
Furthermore, let~$t'$ be obtained from~$t$ by replacing the $i$th occurrence of a nonterminal in~$t$ by~$x_i$ for every $i \in [k]$.
Again, for both cases, $t \in (\T_\Sigma(N))_{\mathsf{a}}$ or $t \in (\T_\Sigma(N))_{\mathsf{i}}$, the proof of $\sem[\walg K']{\wt(d)} = \{ \pi_\Sigma(d)_{\walg{K}} \}$ is the same.
Thus, for every $s \in \{ {\mathsf{i}},{\mathsf{a}} \}$, we have
\begin{align*}
\sem[\walg K']{\wt(r(d_1,\dots,d_k))} &= \psi'(t')(\sem[\walg K']{\wt(d_1)},\dots,\sem[\walg K']{\wt(d_k)}) \enspace, \\
\intertext{now for every $i \in [k]$ and $r' = (A_i \to t'')$ in~$R$ we have that $\sort(A_i) = \sort(t'')$; thus $\sem[\walg K']{\wt(d_i)} \subseteq \walg K_{\sort(A_i)}$ and we can continue with:}
&= h_s\big(t'_{\walg{K}}(\sem[\walg K']{\wt(d_1)},\dots,\sem[\walg K']{\wt(d_k)})\big) \\
&= h_s\big(t'_{\walg{K}}(\{\pi_\Sigma(d_1)_{\walg{K}}\},\dots,\{\pi_\Sigma(d_k)_{\walg{K}}\})\big) \tag{IH} \\
&= \{t'_{\walg{K}}(\pi_\Sigma(d_1)_{\walg{K}},\dots,\pi_\Sigma(d_k)_{\walg{K}})\} \\
&= \{\big(t'_{\T_\Sigma}(\pi_\Sigma(d_1),\dots,\pi_\Sigma(d_k))\big)_{\walg{K}} \} \tag{Observation~\ref{obs:tree-derived-operations}} \\
&= \{\pi_\Sigma(r(d_1,\dots,d_k))_{\walg{K}}\} \enspace. \tag*{\qedhere}
\end{align*}
\end{proof}
Now we are able to prove Theorem~\ref{thm:ADP-M-monoid}.
\thmadpmmonoid*
\begin{proof}
Let $(G,(\lalg{YIELD}^\Sigma,\phi))$ with $G = (N,\Sigma,A_0,R)$ be an $S$-sorted yield grammar over $\Sigma$,
$(\walg{K},\psi)$ be an $S$-sorted $\Sigma$-algebra,
and~$h$ be an objective function for~$\walg{K}$ that satisfies Bellman's principle of optimality.
Moreover, let
\[ ((G,(\lalg{YIELD}^\Sigma,\phi)), (\walg{K}', \oplus, \emptyset, \Sigma',\infsum), \wt) \]
be the wRTG-LM constructed as in Theorem~\ref{thm:ADP-M-monoid} and $w \in (\Sigma_{(\varepsilon,{\mathsf{i}})})^*$.
In this proof, we write $\yield$ rather than $\yield_{\Sigma_{(\varepsilon,{\mathsf{i}})}}$ for the sake of readability.
\begin{align*}
\fparse(w) &= \infsum_{d \in (\T_R)_{A_0}: \, \sem[\lalg{YIELD}^\Sigma]{\pi_\Sigma(d)} \, = \, \langle w, {\mathsf{a}} \rangle} \sem[\walg K']{\wt(d)} \\
&= \infsum_{d \in (\T_R)_{A_0}: \yield(\pi_\Sigma(d)) = w} \sem[\walg K']{\wt(d)} \\
&= \infsum_{d \in \pi_\Sigma^{-1}(L(G) \cap \yield^{-1}(w))} \sem[\walg K']{\wt(d)} \\
&= \infsum_{t \in L(G) \cap \yield^{-1}(w)} \sem[\walg K']{\wt(\pi_\Sigma^{-1}(t))} \tag{$G$ is unambiguous} \\
&= \infsum_{t \in L(G) \cap \yield^{-1}(w)} \{ \pi_\Sigma(\pi_\Sigma^{-1}(t))_{\walg{K}} \} \tag{Lemma~\ref{lem:ast-mmonoid-is-tree-adp}} \\
&= \infsum_{t \in L(G) \cap \yield^{-1}(w)} \{ t_{\walg{K}} \} \\
&= h_{\mathsf{a}} \left( \bigcup_{t \in L(G) \cap \yield^{-1}(w)} \{ \sem[\walg K]{t} \} \right) \tag{$\sort(t) = {\mathsf{a}}$ for every $t \in L(G)$} \\
&= \adp(w) \enspace. \tag*{\qedhere}
\end{align*}
\end{proof}
\subsection{Each weight-preserving weighted deduction system is sound and complete}\label{app:weight-preserving-wds}
\lemwdspreserving*
\begin{proof}
\begin{sloppypar}
Let $\overline G = ((G,\alg L),\walg K,\wt)$ in $\wlmclass{\gclass{\alg L},\walg{K}}$ with $G = (N,\Sigma,A_0,R)$, $a \in \alg L_{\sort(A_0)}$, $\wds_{\walg K,\walg K}: \wlmclass{\gclass{\alg L},\walg{K}} \times \alg L \to \wlmclass{\gclass{\lalg{CFG}^\emptyset},\walg K}$ be a weight-preserving weighted deduction system, and $\wds_{\walg K,\walg K}(\overline G,a) = ((G',\lalg{CFG}^\emptyset),\walg K,\wt')$.
\end{sloppypar}
If $\varepsilon \in \sem[\lalg{CFG}^\emptyset]{L(G')}$, then there is a $d \in (\T_{R'})_{A_0'}$ such that $\sem[\lalg{CFG}^\emptyset]{\pi_\Sigma(d)} = \varepsilon$.
Then $\psi^{-1}(d) \in (\T_R)_{A_0}$ and $\sem{\pi_\Sigma(\psi^{-1}(d))} = a$.
Thus $a \in \sem{L(G)}$ and $\wds_{\walg K,\walg K}$ is sound.
If $a \in \sem{L(G)}$, then there is a $d \in (\T_R)_{A_0}$ such that $\sem{\pi_\Sigma(d)} = a$.
Then $\psi(d) \in (\T_{R'})_{A_0'}$.
Since $\sem[\lalg{CFG}^\emptyset]{L(G')} \subseteq \{ \varepsilon \}$ we have that $\sem[\lalg{CFG}^\emptyset]{\pi_\Sigma(d)} = \varepsilon$;
thus $\varepsilon \in \sem[\lalg{CFG}^\emptyset]{L(G')}$ and $\wds_{\walg K,\walg K}$ is complete.
\end{proof}
\subsection{The canonical weighted deduction system is weight-preserving}\label{app:cnc-weight-preserving}
\lemcncwp*
\begin{proof}
Let $\overline G = ((G, \alg L), \walg K, \wt)$ in $\wlmclass{\gclass{\alg L}, \walg K}$ with $G = (N,\Sigma,A_0,R)$, $a \in \alg L_{\sort(A_0)}$, and $\cnc(\overline G, a) = ((G', \lalg{CFG}^\emptyset), \walg K, \wt')$ with $G' = (N',\Sigma',A_0',R')$.
Next we will define the mapping $\psi: \mathrm{AST}(G, a) \to \mathrm{AST}(G')$ according to the definition of weight-preserving mappings.
For this, we first define the auxiliary mapping
\[
\psi': \{ d \in \T_R \mid \sem[\alg L]{\pi_\Sigma(d)} \in \factors(a) \} \to \T_{R'}
\]
by induction (which is not possible for $\psi$). Let $d \in \T_R$ with $\sem[\alg L]{\pi_\Sigma(d)} \in \factors(a)$.
If
\begin{itemize}
\item $d$ has the form $r(d_1,\dots,d_k)$ with $r = (A \to t)$ with $\yield_N(t) = A_1 \dots A_k$, $k \in \mathbb N$ and $A_1,\dots,A_k \in N$,
\item for every $i \in [k]$, we have $a_i = \sem[\alg L]{\pi_\Sigma(d_i)}$, and
\item for every $i \in [k]$ there is a $t_i \in \T_\Sigma(N)$ such that $t_i$ is the right-hand side of the rule $d_i(\varepsilon)$,
\end{itemize}
then we let
\begin{align*}
\psi'(d) &= r'(\psi'(d_1),\dots,\psi'(d_k)) \enspace, \text{ where }\\
r' &= \left([A,t,t'_{\alg L}(a_1,\dots,a_k)] \to \langle x_1 \dots x_k \rangle ([A_1,t_1,a_1],\dots,[A_k,t_k,a_k])\right)
\end{align*}
and~$t'$ is obtained from~$t$ by replacing the $i$th occurrence of a nonterminal by~$x_i$ for every $i \in [k]$.
It can be seen that, for every $d \in \T_R$ with $\sem[\alg L]{\pi_\Sigma(d)} \in \factors(a)$, the sets $\pos(d)$ and $\pos(\psi'(d))$ are equal, and that the mapping~$\psi'$ is bijective.
Next we define the mapping
\[
\psi: \{ d \in (\T_R)_{A_0} \mid \sem[\alg L]{\pi_\Sigma(d)} = a \} \to (\T_{R'})_{A_0'}
\]
for each $d \in (\T_R)_{A_0}$ of the form $r(d_1,\dots,d_k)$ with $r = (A_0 \to t)$ and $\sem[\alg L]{\pi_\Sigma(d)} = a$ by
\[ \psi(d) = \big([A_0,a] \to \langle x_1 \rangle ([A_0,t,a])\big)\Big(\psi'(d)\Big) \enspace. \]
Then~$\psi$ is bijective, too, and we have that $\pos(\psi(d)) = \{ \varepsilon \} \cup \{ 1 \} \circ \pos(d)$ for every $d \in (\T_R)_{A_0}$ with $\sem[\alg L]{\pi_\Sigma(d)} = a$.
By the definition of $\cnc$, for every $d \in (\T_R)_{A_0}$ with $\sem[\alg L]{\pi_\Sigma(d')} = a$ and for every $p \in \pos(d)$ it holds that $\wt(d(p)) = \wt(\psi(d)(1p))$ and $\wt(\psi(d)(\varepsilon)) = \id(\walg K)$.
Since $\pos(\psi(d)) = \{ \varepsilon \} \cup \{ 1 \} \circ \pos(d)$, we have that $\wthom{d} = \wthom{\psi(d)}$.
Thus $\cnc$ is weight-preserving. By Lemma~\ref{lem:weight-eq-implies-sound-and-complete} it is also sound and complete.
\end{proof}
\subsection{Applying the canonical weighted deduction system to nonlooping wRTG-LMs yields acyclic wRTG-LMs}\label{app:nl-cnc-acyc}
This subappendix contains the full proof of Lemma~\ref{lem:no-loops-cnc-acyclic}.
We start with an auxiliary statement.
\begin{lemma}\label{lem:cnc-semantics}
\sloppy
For every wRTG-LM $\overline G = \big((G, \alg L), \walg K, \wt\big)$ with $G = (N, \Sigma, A_0, R)$, $a_0 \in \alg L$, $\big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big) = \cnc(\overline G, a_0)$ with $G' = (N', \Sigma', A_0', R')$, and $d \in \T_{R'}$ of the form $r(d_1, \dots, d_k)$ with $r = \big([A,t,b] \to \langle x_1 \dots x_k \rangle ([A_1,t_1,a_1], \dots, [A_k,t_k,a_k])\big)$ the following holds:
$\psi^{-1}(d)_{\alg L} = b$, where~$\psi$ is defined as in the proof of Lemma~\ref{lem:cnc-weight-preserving}.
\end{lemma}
\begin{proof}
Let $\overline G = \big((G, \alg L), \walg K, \wt\big)$ with $G = (N, \Sigma, A_0, R)$ be a wRTG-LM, $a_0 \in \alg L$, $\big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big) = \cnc(\overline G, a_0)$ with $G' = (N', \Sigma', A_0', R')$, and $d \in \T_{R'}$ of the form $r(d_1, \dots, d_k)$ with $r = ([A,t,b] \to \langle x_1 \dots x_k \rangle ([A_1,t_1,a_1], \dots, [A_k,t_k,a_k]))$.
We show the statement of the lemma by structural induction on~$d$.
For the induction step, assume that for every $i \in [k]$ and $d_i \in \T_{R'}$ of the form $r_i(d_{i,1}, \dots, d_{i,k_i})$ with $r_i = ([A_i,t_i,a_i] \to \langle x_1 \dots x_{k_i} \rangle ([A_{i,1},t_{i,1},a_{i,1}], \dots, [A_{i,k_i},t_{i,k_i},a_{i,k_i}]))$ the following holds:
$\psi^{-1}(d_i)_{\alg L} = a_i$.
Then
\begin{align*}
\psi^{-1}(d)_{\alg L} &= t'_{\alg L}(\psi^{-1}(d_1)_{\alg L}, \dots, \psi^{-1}(d_k)_{\alg L}) \\
&= t'_{\alg L}(a_1, \dots, a_k) \tag{IH} \\
&= b \enspace, \tag{definition of $\cnc$}
\end{align*}
where~$t'$ is obtained from~$t$ by replacing the $i$th occurrence of a nonterminal by~$x_i$ for every $i \in [k]$.
\end{proof}
\lemnlcncacyc*
\begin{proof}
Let $\overline G = \big((G, \alg L), \walg K, \wt\big)$ in $\wlmclass{\gclass{nl} \cap \gclass{\operatorname{fin-dc}}, \wclass{all}}$ with $G = (N, \Sigma, A_0, R)$.
Then for every $d \in \T_R$ and $p, p' \in \pos(d)$ it holds that $d(p) = d(p')$ and $(d|_p)_{\alg L} = (d|_{p'})_{\alg L}$ imply $p = p'$.
We give an indirect proof for the lemma.
For this, let $a_0 \in \alg L$ and assume that $\cnc(\overline G, a_0) \not\in \wlmclass{\gclass{acyc}, \wclass{all}}$.
Let $\cnc(\overline G, a_0) = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$.
Then there is a $d \in \T_{R'}$ which is not acyclic, i.e., there is a leaf $p \in \pos(d)$ such that~$p$ is cyclic.
Thus there is are $i, j \in [|p|]$ with $i < j$ such that $d(p_i) = d(p_j)$.
By definition of $\cnc$, $[A_0,a_0]$ does not occur in the right-hand side of any rule in~$R'$, hence $p_i \not= \varepsilon$ and $\lhs(d(p_i)) = [A,t,a]$ for some $A \in N'$, $t \in \T_{\Sigma'}(N')$, and $a \in \alg L$.
Then, by Lemma~\ref{lem:cnc-semantics}, $\psi^{-1}(d|_{p_i})_{\alg L} = \psi^{-1}(d|_{p_j})_{\alg L} = a$, where~$\psi$ is defined as in the proof of Lemma~\ref{lem:cnc-weight-preserving}.
Let $d' = \psi^{-1}(d)$; we remark that $d' \in \T_R$.
Now there are $p, p' \in \pos(d')$ such that $p \not= p'$, $d'(p) = d'(p')$, and $(d'|_p)_{\alg L} = (d'|_{p'})_{\alg L}$, which contradicts the definition of~$\overline G$.
\end{proof}
\subsection{General statements about Algorithm~\ref{alg:mmonoid}}\label{app:vca-general}
\lemvisbigsum*
\begin{proof}
The proof is done by induction on~$n$.
For the induction base let $n = 0$.
Then for every $A \in N'$ we have that
\[ V_0(A) \overset{\text{Line~\ref{l:init-v}}}= \welem 0 = \bigoplus_{d \in \emptyset} \wtphom{d} \overset{\text{Line~\ref{l:init-v}}}= \bigoplus_{d \in \mathcal V_0(A)} \wtphom{d} \enspace. \]
For the induction step, let $n \in \mathbb N$.
We assume (IH) that for every $A \in N'$ it holds that $V_n(A) = \bigoplus_{d \in \mathcal V_n(A)} \wtphom{d}$.
Then for $\mathit{select}_n = A$,
\begin{align*}
V_{n+1}(A) &= \bigoplus_{\substack{r \in R: \\ r = (A \to \sigma(A_1, \dots, A_k))}} \wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big)
\tag{Observation~\ref{obs:v-nplus1}} \\
&= \bigoplus_{\substack{r \in R: \\ r = (A \to \sigma(A_1, \dots, A_k))}} \wt'(r)\left(\bigoplus_{d_1 \in \mathcal V_n(A_1)} \wtphom{d_1}, \dots, \bigoplus_{d_k \in \mathcal V_n(A_k)} \wtphom{d_k}\right)
\tag{IH} \\
&= \bigoplus_{\substack{r \in R: \\ r = (A \to \sigma(A_1, \dots, A_k))}} \quad \bigoplus_{d_1 \in \mathcal V_n(A_1), \dots, d_k \in \mathcal V_n(A_k)} \wt'(r)\left(\wtphom{d_1}, \dots, \wtphom{d_k}\right)
\tag{$\wt'(r)$ distributes over $\oplus$} \\
&= \bigoplus_{\substack{r \in R: \\ r = (A \to \sigma(A_1, \dots, A_k)) \\ d_1 \in \mathcal V_n(A_1), \dots, d_k \in \mathcal V_n(A_k), \\ d = r(d_1, \dots, d_k)}} \wtphom{d}
\\
&= \bigoplus_{d \in \mathcal V_{n+1}(A)} \wtphom{d} \enspace,
\tag{Observation~\ref{obs:v-nplus1}}
\end{align*}
and for every $A' \in N' \setminus \{ A \}$,
\begin{align*}
V_{n+1}(A') &= V_n(A')
\tag{Observation~\ref{obs:v-nplus1}} \\
&= \bigoplus_{d \in \mathcal V_n(A')} \wtphom{d}
\tag{IH} \\
&= \bigoplus_{d \in \mathcal V_{n+1}(A')} \wtphom{d} \enspace.
\tag*{(Observation~\ref{obs:v-nplus1}) \qedhere}
\end{align*}
\end{proof}
\lemmcvmonotone*
\begin{proof}
Let $n \in \mathbb N$, $A \in N'$, and $d \in \mathcal V_n(A)$.
We show that $d \in \mathcal V_{n'}(A)$ for each $n \in \mathbb N$ with $n' > n$ by structural induction on~$d$.
For the induction base let $d \in R'$, then by lines~\ref{l:init-vnew}--\ref{l:update-vnew} and~\ref{l:update-v} we have that $d \in \mathcal V_n(\lhs(d))$ for every $n \in \mathbb N_+$.
Furthermore, by line~\ref{l:init-v}, $\mathcal V_0(A) = \emptyset$ for every $A \in N'$.
Therefore the implication holds.
For the induction step, let $d = r(d_1, \dots, d_k)$ and $r = \big( A \to \sigma(A_1, \dots, A_k) \big)$ with $k > 0$.
We assume (IH) that for every $i \in [k]$, $n, n' \in \mathbb N$, and $d_i \in (\T_{R'})_{A_i}$ with $n' > n$ the following holds: if $d_i \in \mathcal V_n(A_i)$, then $d_i \in \mathcal V_{n'}(A_i)$.
Now, if $d \in \mathcal V_n(A)$, then there is an $n_0 < n$ such that~$d$ is first added to $\mathcal V(A)$ in the $n_0$th iteration of the inner for loop.
Then by Observation~\ref{obs:v-nplus1}, $d_i \in \mathcal V_{n_0 + 1}(A_i)$ for every $i \in [k]$.
Then by~(IH), for every $i \in [k]$ and $n_0' \in \mathbb N$ with $n_0' > n_0$ it holds that $d_i \in \mathcal V_{n_0'}(A_i)$.
Thus for every $n' \geq n$, by Observation~\ref{obs:v-nplus1}, $d \in \mathcal V_{n'+1}(A)$.
\end{proof}
\subsection{Termination of Algorithm~\ref{alg:mmonoid}}\label{app:vca-termination}
\begin{lemma}\label{lem:subtrees-in-mcv}
For every $d \in \T_{R'}$, $n \in \mathbb N$, and $A \in N'$ the following holds: if $d \in \mathcal V_n(A)$, then for every $p \in \pos(d)$: $d|_p \in \mathcal V_n(\lhs(d(p)))$.
\end{lemma}
\begin{proof}
Let $n \in \mathbb N$, $A \in N'$, and $d \in \mathcal V_n(A)$.
We show that $d|_p \in \mathcal V_n(A)$ for each $p \in \pos(d)$ by structural induction on~$d$.
For the induction base, let $d \in R'$; then $\pos(d) = \{ \varepsilon \}$ and $d|_\varepsilon = d \in \mathcal V_n(A)$.
For the induction step, let $d = r(d_1, \dots, d_k)$ and $r = \big( A \to \sigma(A_1, \dots, A_k) \big)$ with $k > 0$.
We assume (IH) that for every $i \in [k]$, $n \in \mathbb N$, and $d_i \in (\T_{R'})_{A_i}$ the following holds: if $d_i \in \mathcal V_n(A_i)$, then for every $p \in \pos(d_i)$: $d_i|_p \in \mathcal V_n(\lhs(d_i(p)))$.
Now if $d \in \mathcal V_n(A)$, then there is an $n_0 < n$ such that~$d$ is first added to $\mathcal V(A)$ in the $n_0$th iteration of the inner for loop.
Then by Observation~\ref{obs:v-nplus1}, $d_i \in \mathcal V_{n_0 + 1}(A_i)$ for every $i \in [k]$.
Furthermore, by Lemma~\ref{lem:mcv-monotone}, $d_i \in \mathcal V_n(A_i)$ for every $i \in [k]$.
Now for every $p \in \pos(d)$ with $p = ip'$ for some $i \in [k]$, the statement of the lemma follows from~(IH), and for $p = \varepsilon$ it trivially holds.
\end{proof}
\begin{lemma}\label{lem:cutout-tree-growth}
For every $d \in \T_{R'}$, $n, l \in \mathbb N$, and elementary cycle $w \in (R')^*$ the following holds:
if there are $p, p' \in \pos(d)$ such that $p \prefof p'$, $\seq(d, p, p') = w$, and $d|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$, then $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$.
\end{lemma}
\begin{proof}
Let $d \in \T_{R'}$, $n, l \in \mathbb N$, $w \in (R')^*$ be an elementary cycle, and $p, p' \in \pos(d)$ such that $p \prefof p'$, $\seq(d, p, p') = w$, and $d|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$
We show that $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$ by induction on~$l$.
For the induction base, let $l = 0$.
We remark that $d(p) = d(p')$.
Then, since $d|_{p_{1 \isep |p| - l}} = d|_p \in \mathcal V_n(\lhs(d(p)))$, we have that $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - l}} = d|_{p'} \in \mathcal V_n(\lhs(d(p)))$ by Lemma~\ref{lem:subtrees-in-mcv}.
For the induction step, let $l \in \mathbb N$.
We assume (IH) that for every $n \in \mathbb N$ the following holds:
if $d|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$, then $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - l}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$.
Now we distinguish two cases.
\begin{enumerate}
\item If $l \ge |p|$, then $d|_{p_{1 \isep |p| - l}} = d = d|_{p_{1 \isep |p| - (l+1)}}$.
Thus $d|_{p_{1 \isep |p| - (l+1)}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$ implies $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - (l+1)}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$ by (IH).
\item Otherwise, we let $n_0 \in \mathbb N$ such that $d|_{p_{1 \isep |p| - (l+1)}}$ is first added to $\mathcal V(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$ in the $n_0$th iteration of the inner for loop.
If $d|_{p_{1 \isep |p| - (l+1)}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$, then $n_0 < n$.
Then by Lemma~\ref{lem:subtrees-in-mcv}, $d|_{p_{1 \isep |p| - l}} \in \mathcal V_{n_0 + 1}(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$.
Then by (IH), $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - l}} \in \mathcal V_{n_0 + 1}(\lhs(d|_{p_{1 \isep |p| - l}}(\varepsilon)))$ and by Observation~\ref{obs:v-nplus1}, $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - (l+1)}} \hspace{-1mm}\in \mathcal V_{n_0 + 1}(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$.
Finally, by Lemma~\ref{lem:mcv-monotone}, $(d[d|_{p'}]_{p})|_{p_{1 \isep |p| - (l+1)}} \in \mathcal V_n(\lhs(d|_{p_{1 \isep |p| - (l+1)}}(\varepsilon)))$. \qedhere
\end{enumerate}
\end{proof}
\begin{lemma}\label{lem:cut-cycle'}
For every $d \in \T_{R'}, n \in \mathbb N$, $A \in N'$, and elementary cycle $w \in (R')^*$ the following holds:
if $d \in \mathcal V_n(A)$ and there are $p, p' \in \pos(d)$ such that $p \prefof p'$ and $\seq(d, p, p') = w$, then $\cotrees(d, w) \subseteq \mathcal V_n(A)$.
\end{lemma}
\begin{proof}
Let $d \in \T_{R'}$, $n \in \mathbb N$, $A \in N'$, and $w \in (R')^*$ such that there are $p, p' \in \pos(d)$ with $p \prefof p'$, $\seq(p, p') = w$, $w$ is an elementary cycle, and $d \in \mathcal V_n(A)$.
We show that for every $l \in \mathbb N$ and $d' \in \T_{R'}$ with $d \nwtrans{l} d'$ it holds that $d' \in \mathcal V_n(A)$ by induction on~$l$.
For the induction base, let $l = 0$.
Then for every $d' \in \T_{R'}$ with $d \nwtrans{0} d'$ it holds that $d' = d$, and $d \in \mathcal V_n(A)$ by definition.
For the induction step, let $l \in \mathbb N$.
We assume (IH) that for every $d' \in \T_{R'}$, $n \in \mathbb N$, and $A \in N'$ with $d \nwtrans{l} d'$ the following holds:
if $d \in \mathcal V_n(A)$, then $d' \in \mathcal V_n(A)$.
Now let $d'' \in \T_{R'}$ such that $d \nwtrans{l+1} d''$.
Then there is a $d' \in \T_{R'}$ such that $d \nwtrans{l} d'$ and $d' \letvdash{w} d''$.
Then, if $d \in \mathcal V_n(A)$, we have that $d' \in \mathcal V_n(A)$ by (IH).
Moreover, there are $p, p' \in \pos(d)$ such that $\seq(d, p, p') = w$ and $d'[d'_{p'}]_p = d''$.
Thus, by Lemma~\ref{lem:cutout-tree-growth}, $d'' \in \mathcal V_n(A)$.
Now, as for every $l \in \mathbb N$ and $d' \in \T_{R'}$ with $d \nwtrans{l} d'$ it holds that $d \in \mathcal V_n(A)$ implies $d' \in \mathcal V_n(A)$, we obtain that for every $d' \in \T_{R'}$ with $d \letvdash{w}^+ d'$ the same implication holds.
Thus $d' \in \mathcal V_n(A)$ for every $d' \in \cotrees(d, w)$.
\end{proof}
\lemcutcycles*
\begin{proof}
This is a consequence of Lemma~\ref{lem:cut-cycle'}.
\end{proof}
\begin{lemma}\label{lem:outside-tree}
For every $n \in \mathbb N$ the following holds:
if $\Delta_n(A) \cap \T_{R'}^{(c)} = \emptyset$, then $V_{n+1}(A) = V_n(A)$, where $\mathit{select}_n = A$.
\end{lemma}
\begin{proof}
Let $n \in \mathbb N$ and $\mathit{select}_n = A$.
Then $\Delta_n(A) = \emptyset$ or $\Delta_n \subseteq \T_{R'} \setminus \T_{R'}^{(c)}$.
For every $d \in \Delta_n(A)$ we have that $d \in \mathcal V_{n+1}(A)$ and thus by, Lemma~\ref{lem:cut-cycles},
\begin{align*}
\cotrees(d) \cap \T_{R'}^{(c)} &\subseteq \mathcal V_{n+1}(A) \cap \T_{R'}^{(c)} \\
&= \big( \mathcal V_n(A) \mathbin{\dot{\cup}} \Delta_n(A) \big) \cap \T_{R'}^{(c)}
\tag{Lemma~\ref{lem:mcv-monotone}} \\
&= \big( \mathcal V_n(A) \cap \T_{R'}^{(c)} \big) \mathbin{\dot{\cup}} \big( \underbrace{\Delta_n(A) \cap \T_{R'}^{(c)}}_{= \, \emptyset} \big)
\tag{distributivity of~$\cap$ over~$\mathbin{\dot{\cup}}$} \\
&= \mathcal V_n(A) \cap \T_{R'}^{(c)} \\
&\subseteq \mathcal V_n(A) \enspace.
\end{align*}
Furthermore, by Lemma~\ref{lem:mcv-monotone}, $\mathcal V_n(A) \subseteq \mathcal V_{n+1}(A)$, and since $\Delta_n(A) = \emptyset$ or $\Delta_n \subseteq \T_{R'} \setminus \T_{R'}^{(c)}$
\begin{align*}
V_{n+1}(A) = \bigoplus_{d \in \mathcal V_{n+1}(A)} \wtphom{d} &= \bigoplus_{d \in \mathcal V_n(A)} \wtphom{d} \oplus \bigoplus_{d \in \Delta_n(A)} \wtphom{d}
\tag{Lemma~\ref{lem:v-is-bigsum}} \\
&= \bigoplus_{d \in \mathcal V_n(A)} \wtphom{d}
\tag{Theorem~\ref{thm:outside-trees-subsumed}} \\
&= V_n(A) \enspace. \tag*{(Lemma~\ref{lem:v-is-bigsum}) \qedhere}
\end{align*}
\end{proof}
\lemmcvgrowsonchange*
\begin{proof}
Let $n \in \mathbb N$ and $A \in N'$.
If $V_{n+1}(A) \not= V_n(A)$, then we obtain $\mathit{select}_n = A$ from Observation~\ref{obs:v-nplus1}.
Then by Lemma~\ref{lem:outside-tree}, $\Delta_n(A) \cap \T_{R'}^{(c)} \not= \emptyset$.
Furthermore, by Lemma~\ref{lem:mcv-monotone}, $\mathcal V_n(A) \subseteq \mathcal V_{n+1}(A)$.
Thus
\begin{align*}
\mathcal V_{n+1}(A) \cap \T_{R'}^{(c)} &= \big(\mathcal V_n(A) \mathbin{\dot{\cup}} \Delta_n(A)\big) \cap \T_{R'}^{(c)}
\\
&= (\mathcal V_n(A) \cap \T_{R'}^{(c)}) \mathbin{\dot{\cup}} \underbrace{\big(\Delta_n(A) \cap \T_{R'}^{(c)}\big)}_{\not= \, \emptyset}
\\
&\supset \mathcal V_n(A) \cap \T_{R'}^{(c)} \enspace. \qedhere
\end{align*}
\end{proof}
\subsection{Correctness of Algorithm~\ref{alg:mmonoid}}\label{app:vca-correctness}
\lempassthrough*
\begin{proof}
Let $n \in \mathbb N$, $d \in \T_{R'}^{(c)}$ of the form $r(d_1, \dots, d_k)$ with $r = \big(A \to \sigma(A_1, \dots, A_k)\big)$, $\welem k_1, \dots, k_k \in \walg K$, and $I \in [k]$ such that $\mathit{select}_n = A$, for every $i \in [k] \setminus I$, $d_i \in \mathcal V_n(A_i)$, and for every $i \in I$, $V_n(A_i) = V_n(A_i) \oplus \welem k_i$.
Then by Observation~\ref{obs:v-nplus1}
\begin{align}\label{eq:through-outer}
V_{n+1}(A) &= \bigoplus_{\substack{r' \in R': \\ r' = (B \to \sigma(B_1, \dots, B_{k'}))}} \wt'(r')\big(V_n(B_1), \dots, V_n(B_{k'})\big)
\nonumber \\
&= \bigoplus_{\substack{r' \in R' \setminus \{ r \}: \\ r' = (B \to \sigma(B_1, \dots, B_{k'}))}} \wt'(r')\big(V_n(B_1), \dots, V_n(B_{k'})\big) \oplus \wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big) \enspace.
\end{align}
Now for each $i \in [k]$, we let $\welem l_i \in \walg K$ be as in the statement of the lemma and define the set
\[ S_i = \begin{cases}
\mathcal V_n(A_i)
&\text{if $i \in I$} \\
\{ d_i \}
&\text{otherwise.}
\end{cases} \]
Then by Lemma~\ref{lem:v-is-bigsum} and distributivity of $\wt'(r)$ over $\oplus$
\begin{align}\label{eq:through-inner}
&\wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big)
\nonumber \\
&= \bigoplus_{(d_1', \dots, d_k') \in \mathcal V_n(A_1) \times \dots \times \mathcal V_n(A_k)} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big)
\nonumber \\
&= \begin{aligned}[t]
&\bigoplus_{(d_1', \dots, d_k') \in \mathcal V_n(A_1) \times \dots \times \mathcal V_n(A_k) \setminus S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) \\
&\oplus \bigoplus_{(d_1', \dots, d_k') \in S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) \enspace.
\end{aligned}
\end{align}
Now
\begin{align*}
\bigoplus_{(d_1', \dots, d_k') \in S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) &= \wt'(r)(U_1, \dots, U_k)
\intertext{where for every $i \in [k]$, if $i \in I$, then $U_i = V_n(A_i)$, else $U_i = \wtphom{d_i}$,}
&= \wt'(r)(U_1', \dots, U_k')
\intertext{where for every $i \in [k]$, if $i \in I$, then $U_i' = V_n(A_i) \oplus \welem k_i$, else $U_i' = \wtphom{d_i}$,}
&= \wt'(r)(U_1, \dots, U_k) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k) \enspace.
\end{align*}
Thus we obtain by Equation~\ref{eq:through-inner}
\begin{align*}
&\wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big)
\\
&= \begin{aligned}[t]
&\bigoplus_{(d_1', \dots, d_k') \in \mathcal V_n(A_1) \times \dots \times \mathcal V_n(A_k) \setminus S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) \\
&\oplus \wt'(r)(U_1, \dots, U_k) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k)
\end{aligned}
\\
&= \begin{aligned}[t]
&\bigoplus_{(d_1', \dots, d_k') \in \mathcal V_n(A_1) \times \dots \times \mathcal V_n(A_k) \setminus S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) \\
&\oplus \bigoplus_{(d_1', \dots, d_k') \in S_1 \times \dots \times S_k} \wt'(r)\big(\wtphom{d_1}, \dots, \wtphom{d_k}\big) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k)
\end{aligned}
\\
&= \wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k) \enspace.
\end{align*}
Finally, by Equation~\ref{eq:through-outer}, we obtain
\begin{align*}
V_{n+1}(A) &= \begin{aligned}[t]
&\bigoplus_{\substack{r' \in R' \setminus \{ r \}: \\ r' = (B \to \sigma(B_1, \dots, B_{k'}))}} \wt'(r')\big(V_n(B_1), \dots, V_n(B_{k'})\big) \\
&\oplus \wt'(r)\big(V_n(A_1), \dots, V_n(A_k)\big) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k)
\end{aligned}
\\
&= \bigoplus_{\substack{r' \in R': \\ r' = (B \to \sigma(B_1, \dots, B_{k'}))}} \wt'(r')\big(V_n(B_1), \dots, V_n(B_{k'})\big) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k)
\\
&= V_{n+1}(A) \oplus \wt'(r)(\welem l_1, \dots, \welem l_k) \enspace. \tag*{(Observation~\ref{obs:v-nplus1}) \qedhere}
\end{align*}
\end{proof}
\subsection{Termination and correctness of the M-monoid parsing algorithm}\label{app:mpa-properties}
In this subappendix we give a full proof of Lemma~\ref{lem:1and2-closed}.
For this, we need a few auxiliary definitions and lemmas.
Let $a$ be a syntactic object and $\overline G = \big((G, \alg L), \walg K, \wt\big)$ be a wRTG-LM with $G = (N, \Sigma, A_0, R)$ such that $\alg L$ is finitely decomposable and $\cnc(\overline G, a) = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$.
We define a partial mapping $\psi: R' \to\hspace{-2mm}\shortmid \hspace{2mm}\! R$ such that for each $r = \big([A,t,a_0] \to \langle x_1 \dots x_k \rangle([A_1,t_1,a_1], \dots, [A_k,t_k,a_k])\big)$ in $R'$, $\psi(r) = A \to t$.
Furthermore, we define the mapping $\psi': \T_{R'} \to \T_R$ such that for each $d \in \T_{R'}$, if $\lhs(d(\varepsilon)) = \big([A_0,a] \to \langle x_1 \rangle([A_0,t,a])\big)$ for some $t \in \T_{\Sigma'}(N)$, then $\psi'(d) = \overline{\psi}(d|_1)$, and otherwise $\psi'(d) = \overline{\psi}(d)$, where $\overline{\psi}$ is the $N$-sorted tree relabeling induced by $\psi$.
We also let $\psi(w) = \psi(w_1) \dots \psi(w_{|w|})$ for every $w \in (R')^*$.
\begin{lemma}\label{lem:psi-weight}
For every $d \in \T_{R'}$ it holds that $\wt'(d)_{\walg K} = \wthom{\psi'(d)}$.
\end{lemma}
\begin{proof}
Let $d \in \T_{R'}$.
We let $d' = d|_1$ if there is a $t \in \T_{\Sigma'}(N)$ such that $\lhs(d(\varepsilon)) = \big([A_0,a] \to \langle x_1 \rangle([A_0,t,a])\big)$ and otherwise, $d' = d$.
We note that if $d' = d|_1$, then $\wt(d(\varepsilon)) = \id$ by definition of $\cnc$ and thus $\wthom{d} = \wthom{d'}$ in both cases.
Now, by definition of $\cnc$, for every $p \in \pos(d')$ it holds that $\wt'(d'(p)) = \wt(\overline{\psi}(d')(p))$ and hence $\wt'(d')_{\walg K} = \wthom{\overline{\psi}(d')} = \wthom{\psi'(d)}$.
\end{proof}
\begin{lemma}\label{lem:psi-cutout}
For every $c \in \mathbb N$, $d \in \T_{R'}$ and elementary cycle $w \in (R')^*$ such that there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic the following holds:
$\psi'(\cotrees(d, w)) = \cotrees(\psi'(d), \psi(w))$.
\end{lemma}
\begin{proof}
Let $c \in \mathbb N$, $d \in \T_{R'}$ and $w \in (R')^*$ be an elementary cycle such that there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic.
Then
\begin{align*}
d' \in \psi'(\cotrees(d, w)) &\iff \exists d'' \in \T_{R'}: d \letvdash{w}^+ d'' \land \psi'(d'') = d' \\
&\iff \exists d'' \in \T_{R'}, p, p' \in \pos(d): \seq(d, p, p') = w \land d[d|_{p'}]_p = d'' \land \psi'(d'') = d' \\
&\iff \exists p, p' \in \pos(\psi'(d)): \seq(\psi'(d), p, p') = \psi(w) \land \psi'(d)[\psi'(d)|_{p'}]_p = d' \\
&\iff \psi'(d) \letvdash{w}^+ d' \\
&\iff d' \in \cotrees(\psi'(d), \psi(w)) \enspace. \qedhere
\end{align*}
\end{proof}
\begin{lemma}\label{lem:psi-preserves-cycles}
For every $d \in \T_{R'}$ and $w \in (R')^*$ the following holds:
if there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic, then there are a $c' \in \mathbb N$ and a leaf $p' \in \pos(\psi'(d))$ which is $(c',\psi(w))$-cyclic and $c' > c + 1$.
\end{lemma}
\begin{proof}
Let $d \in \T_{R'}$, $w \in (R')^*$ such that there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic.
Then there are $v_0, \dots, v_{c+1} \in (R')^*$ such that $\seq(d, p) = v_0 w v_1 \dots w v_{c+1}$ and for every $i \in [0, c+1]$, $w$ is not a substring of $v_i$.
If $\lhs(d(\varepsilon)) = \big([A_0,a] \to \langle x_1 \rangle([A_0,t,a])\big)$ for some $t \in \T_{\Sigma'}(N)$, then we let $p' = 1p$ and $p_0 = 1$ and otherwise, we let $p' = p$ and $p_0 = \varepsilon$.
Then $\seq(\psi'(d), p_0, p') = \psi(v_0) \psi(w) \psi(v_1) \dots \psi(w) \psi(v_{c+1})$.
Furthermore, for every $i \in [0, c+1]$, $\psi(v_i) = v_{i,0} \psi(w) v_{i,1} \dots \psi(w) v_{i,c_i}$ with $c_i \in \mathbb N$ and for every $j \in [0, c_i]$, $v_{i,j} \in R^*$ and $\psi(w)$ is not a substring of $v_{i,j}$.
Thus $\psi'(d)$ is $(c', \psi(w))$-cyclic for $c' = c + 1 + \sum_{i \in [0, c]} c_i$.
\end{proof}
\lemclosedpreserved*
\begin{proof}
Let $a$ be a syntactic object and $\overline G = \big((G, \alg L), \walg K, \wt\big)$ be a wRTG-LM with $G = (N, \Sigma, A_0, R)$ such that $\alg L$ is finitely decomposable and $\cnc(\overline G, a) = \big((G', \lalg{CFG}^\emptyset), \walg K, \wt'\big)$ with $G' = (N', \Sigma', A_0', R')$.
If there is a $c \in \mathbb N$ such that $\overline G$ is $c$-closed, then for every $d \in \T_{R'}$ and elementary cycle $w \in (R')^*$ such that there is a leaf $p \in \pos(d)$ which is $(c+1,w)$-cyclic
\begin{align*}
\wt'(d)_{\walg K} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wt'(d')_{\walg K} &= \wthom{\psi'(d)} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{\psi'(d')} \tag{Lemma~\ref{lem:psi-weight}} \\
&= \wthom{\psi'(d)} \oplus \bigoplus_{d' \in \cotrees(\psi'(d), \psi(w))} \wthom{d'} \tag{Lemma~\ref{lem:psi-cutout}} \\
&= \bigoplus_{d' \in \cotrees(\psi'(d), \psi(w))} \wthom{d'} \tag{Lemma~\ref{lem:closed-bigger-trees'}} \\
&= \bigoplus_{d' \in \cotrees(d, w)} \wthom{\psi'(d')} \tag{Lemma~\ref{lem:psi-cutout}} \\
&= \bigoplus_{d' \in \cotrees(d, w)} \wt'(d)_{\walg K} \enspace. \tag{Lemma~\ref{lem:psi-weight}}
\end{align*}
We note that Lemma~\ref{lem:closed-bigger-trees'} can be applied due to Lemma~\ref{lem:psi-preserves-cycles}.
If $\overline G$ is nonlooping and the weight algebra of $\overline G$ is in $\wclass{\operatorname{d-comp}} \cap \wclass{dist}$, then, by Lemma~\ref{lem:no-loops-cnc-acyclic}, $\cnc(\overline G, a)$ is in $\wlmclass{\gclass{acyc}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ for every syntactic object $a$.
Then, by Lemma~\ref{lem:acyclic-closed}, it is also closed.
\end{proof}
\subsection{Application scenarios employ closed wRTG-LMs}\label{app:applications}
This subappendix contains the full proofs of Theorem~\ref{thm:applications} and Theorem~\ref{thm:applications2}.
\begin{lemma}[restate={[name={}]lemfiniteclosed}]\label{lem:finite-closed}
Every wRTG-LM in $\wlmclass{\gclass{all}, \wclass{fin, id, \preceq}}$ is closed.
\end{lemma}
\begin{proof}
Let $\overline G = \big((G, \alg L), \walg K, \wt\big)$ in $\wlmclass{\gclass{all}, \wclass{fin, id, \preceq}}$ with $G = (N, \Sigma, A_0, R)$.
Then $\walg K$ is finite and idempotent and there is a partial order $(\walg K, \preceq)$ such that for every $k \in \mathbb N$, $\omega \in \Omega_k$, and $\welem k_1, \dots, \welem k_k \in \walg K$: $\maxord \{ \welem k_1, \dots, \welem k_k \} \preceq \omega(\welem k_1, \dots, \welem k_k)$.
We show that $\overline G$ is $|\walg K|$-closed.
For this, let $n = |\walg K| + 1$, $G = (N, \Sigma, A_0, R)$, $d \in \T_R$, $p \in \pos(d)$ be a leaf, and $w \in R^*$ be an elementary cycle such that~$p$ is $(n, w)$-cyclic.
Then there are $v_0, \dots, v_n \in R^*$ such that for every $i \in [0,n]$, $w$ is not a substring of $v_i$ and $\seq(d,p) = v_0 w v_1 \dots w v_n$.
We let $w = r_1 \dots r_m$ with $r_i \in R$ for every $i \in [m]$ and $r_m = (A \to t)$ with $\yield_N(t) = A_1 \dots A_k$ and $A_i \in N$ for every $i \in [k]$.
We consider the set $D = \{ d|_{p_{1 \isep j}} \mid i \in [n], j = \sum_{l=0}^{i-1} |v_l| + i \cdot |w| \}$.
Since $|D| = n$, there are $d_1, d_2 \in D$ such that $d_1 \not= d_2$ and $\wthom{d_1} = \wthom{d_2}$.
Let $i, j \in [n]$ and $i', j' \in \mathbb N$ such that $i' = \sum_{l=0}^{i-1} |v_l| + i \cdot |w|$, $j' = \sum_{l=0}^{j-1} |v_l| + j \cdot |w|$, $d_1 = d|_{p_{1 \isep i'}}$, and $d_2 = d|_{p_{1 \isep j'}}$.
Without loss of generality, we assume that $i < j$.
Let $q = j - |w| + 1$.
By Lemma~\ref{lem:po-chains}, $d|_{p_{1 \isep q}} = d|_{p_{1 \isep j'}}$.
We let $s \in [k]$ such that $p_q = s$.
Then
\begin{align*}
\wthom{d} &= \wt(d)[x_{s, A_s}]_{p_{1 \isep q}} \left( \wthom{d|_{p_{1 \isep q}}} \right)_{\walg K} \\
&= \wt(d)[x_{s, A_s}]_{p_{1 \isep q}} \left( \wthom{d_2} \right)_{\walg K} \\
&= \wthom{d[d_2]_{p_{1 \isep q}}} \enspace. \tag{$d|_{p_{1 \isep q'}}(\varepsilon) = d_2(\varepsilon)$}
\end{align*}
Then, as $\walg K$ is idempotent,
\[ \wthom{d} \oplus \wthom{d[d_2]_{p_{1 \isep q}}} = \wthom{d[d_2]_{p_{1 \isep q}}} \enspace. \]
Finally, as $d[d_2]_{p_{1 \isep q}} \in \cotrees(d)$, we have that
\[ \wthom{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} = \bigoplus_{d' \in \cotrees(d, w)} \wthom{d'} \]
and thus $\overline G$ is $|\walg K|$-closed.
\end{proof}
\begin{lemma}[restate={[name={}]lemsupclosed}]\label{lem:sup-closed}
Every wRTG-LM in $\wlmclass{\gclass{all}, \wclass{sup}}$ is closed.
\end{lemma}
\begin{proof}
Let $\overline G = \big((G, \alg L), \walg K, \wt\big)$ in $\wlmclass{\gclass{all}, \wclass{sup}}$.
We show that $\overline G$ is $0$-closed.
For this, let $G = (N, \Sigma, A_0, R)$, $d \in \T_R$, $p \in \pos(d)$ be a leaf, and $w \in R^*$ be an elementary cycle such that~$p$ is $(1, w)$-cyclic.
Then there are $v_0, v_1 \in R^*$ such that $w$ is not a substring of $v_0$ or $v_1$ and $\seq(d, p) = v_0 w v_1$.
We let $v_0 = r_1 \dots r_m$, $r_m = (A \to t)$ with $\yield_N(t) = A_1 \dots A_k$ and $A_i \in N$ for every $i \in [k]$, and $d' = d[d|_{p_{1 \isep m + |w|}}]_{p_{1 \isep m + 1}}$.
We will show that $\wthom{d} \oplus \wthom{d'} = \wthom{d'}$.
First, we let $\wthom{d|_{p_{1 \isep m + |w|}}} = \welem k$ and $\wthom{d|_{p_{1 \isep m + 1}}} = \welem k'$ (we are not interested in the particular value).
As $\walg K$ is superior, we have that $\welem k \preceq_\oplus \welem k'$.
Thus $\wthom{d|_{p_{1 \isep m + 1}}} \oplus \wthom{d|_{p_{1 \isep m + |w|}}} = \wthom{d|_{p_{1 \isep m + |w|}}}$.
Therefore
\begin{align*}
&\wthom{d} \oplus \wthom{d'} \\
&= (d[x_{s, A_s}]_{p_{1 \isep m + 1}})\left(\wthom{d|_{p_{1 \isep m + 1}}} \oplus \wthom{d|_{p_{1 \isep m + |w|}}}\right)_{\walg K}
\tag{distributivity of $\Omega$ over $\oplus$} \\
&= (d[x_{s, A_s}]_{p_{1 \isep m + 1}})\left(\wthom{d|_{p_{1 \isep m + |w|}}}\right)_{\walg K} \\
&= \wthom{d'} \enspace.
\end{align*}
Now, as $d' \in \cotrees(d, w)$, this entails that
\[ \wthom{d} \oplus \bigoplus_{d'' \in \cotrees(d, w)} \wthom{d''} = \bigoplus_{d'' \in \cotrees(d, w)} \wthom{d''} \]
and thus~$\overline G$ is 0-closed.
\end{proof}
\begin{lemma}[restate={[name={}]lemacyclicclosed}]\label{lem:acyclic-closed}
Every wRTG-LM in $\wlmclass{\gclass{acyc}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ is closed.
\end{lemma}
\begin{proof}
Let $\big((G, \alg L), \walg K, \wt\big) \in \wlmclass{\gclass{acyc}, \wclass{\operatorname{d-comp}} \cap \wclass{dist}}$ with $G = (N, \Sigma, A_0, R)$.
Since every $d \in \T_R$ is acyclic we have that $\T_R = \T_R^{(0)}$.
Thus, by Definition of closed, $\overline G$ is $0$-closed.
\end{proof}
\thmapplications*
\begin{proof}
This is a consequence of Lemmas~\ref{lem:finite-closed},~\ref{lem:sup-closed}, and~\ref{lem:acyclic-closed}.
\end{proof}
\begin{lemma}[restate={[name={}]lembdclosed}]\label{lem:bd-closed}
Every wRTG-LM in $\wlmclass[<1]{\gclass{all}, \walg{BD}}$ is closed.
\end{lemma}
\begin{proof}
Let $\overline G = \big((G, \alg L), \walg{BD}, \wt\big)$ in $\wlmclass[<1]{\gclass{all}, \walg{BD}}$.
We show that $\overline G$ is $0$-closed.
For this, let $G = (N, \Sigma, A_0, R)$, $d \in \T_R$, $p \in \pos(d)$ be a leaf, and $w \in R^*$ be an elementary cycle such that~$p$ is $(1, w)$-cyclic.
Then there are $v_0, v_1 \in R^*$ such that $w$ is not a substring of $v_0$ or $v_1$ and $\seq(d, p) = v_0 w v_1$.
We let $v_0 = r_1 \dots r_m$, $r_m = (A \to t)$ with $\yield_N(t) = A_1 \dots A_k$ and $A_i \in N$ for every $i \in [k]$, and $d' = d[d|_{p_{1 \isep m + |w|}}]_{p_{1 \isep m + 1}}$.
We will show that $\maxv\bigl(\wthom[\walg{BD}]{d}, \wthom[\walg{BD}]{d'}\bigr) = \wthom[\walg{BD}]{d'}$.
First, we let $\wthom[\walg{BD}]{d|_{p_{1 \isep m + |w|}}} = (q, D)$.
Then $\wthom[\walg{BD}]{d|_{p_{1 \isep m + 1}}} = (q', D')$, where~$q'$ is a product of~$q$ and other elements from~$\mathbb R_0^1$ and $D' \in \mathcal P(\T_R)$ (we are not interested in the particular values).
Since by definition of $\wlmclass[<1]{\gclass{all}, \walg{BD}}$ each factor of that product is less than~$1$, we have that $q' < q$ by monotonicity of~$\cdot$ in ${\mathbb R_0^1}$.
Thus $\maxv\Bigl(\wthom[\walg{BD}]{d|_{p_{1 \isep m + 1}}}, \wthom[\walg{BD}]{d|_{p_{1 \isep m + |w|}}}\Bigr) = \wthom[\walg{BD}]{d|_{p_{1 \isep m + |w|}}}$.
Therefore
\begin{align*}
&\maxv\bigl(\wthom[\walg{BD}]{d}, \wthom[\walg{BD}]{d'}\bigr) \\
&= (d[x_{s, A_s}]_{p_{1 \isep m + 1}})\left(\maxv\Bigl(\wthom[\walg{BD}]{d|_{p_{1 \isep m + 1}}}, \wthom[\walg{BD}]{d|_{p_{1 \isep m + |w|}}}\Bigr)\right)_{\walg{BD}}
\tag{distributivity of $\Omegav$ over $\oplus$} \\
&= (d[x_{s, A_s}]_{p_{1 \isep m + 1}})\left(\wthom[\walg{BD}]{d|_{p_{1 \isep m + |w|}}}\right)_{\walg{BD}} \\
&= \wthom[\walg{BD}]{d'} \enspace.
\end{align*}
Now, as $d' \in \cotrees(d, w)$, this entails that
\[ \maxv\bigl(\wthom[\walg{BD}]{d}, \maxv_{d'' \in \cotrees(d, w)} \wthom[\walg{BD}]{d''}\bigr) = \maxv_{d'' \in \cotrees(d, w)} \wthom[\walg{BD}]{d''} \]
and thus~$\overline G$ is 0-closed.
\end{proof}
\begin{lemma}[restate={[name={}]lemnbestclosed}]\label{lem:nbest-closed}
Every wRTG-LM in $\wlmclass{\gclass{all}, \nbest}$ is closed.
\end{lemma}
\begin{proof}
Let $n \in \mathbb N$ and $\overline G = \big((G, \alg L), \nbest, \wt\big)$ in $\wlmclass{\gclass{all}, \nbest}$.
We show that $\overline G$ is $(n-1)$-closed.
For this, let $G = (N, \Sigma, A_0, R)$, $d \in \T_R$, $p \in \pos(d)$ be a leaf, and $w \in R^*$ be an elementary cycle such that~$p$ is $(n, w)$-cyclic.
Then there are $v_0, \dots, v_n \in R^*$ such that for every $i \in [0,n]$, $w$ is not a substring of $v_i$ and $\seq(d, p) = v_0 w v_1 \dots w v_n$.
We let $v_0 = r_1 \dots r_m$, $r_m = (A \to t)$ with $\yield_N(t) = A_1, \dots, A_k$ and $A_i \in N$ for every $i \in [k]$, $ s \in [k]$ such that $p_{m+1} = s$, $m'_n = 1$, $d'_n = d$, and for every $i \in [n]$
\begin{align*}
m'_{i-1} &= m'_i + |v_{n-i-1}| + |w|
\\
s'_i &\in \mathbb N \ \text{such that} \ p_{m'_{i-1} + 1} = s'_i \\
d'_{i-1} &= d'_i[d|_{p_{1 \isep m'_{i-1} + |w|}}]_{p_{m'_n \isep m'_n + |v_0|} \dots p_{m'_{i-1} \isep m'_{i-1} + |v_{n-i}|}} \enspace.
\end{align*}
We will show that
\[ \wthom[\nbest]{d} \oplus \bigoplus_{i = 0}^{n-1} \wthom[\nbest]{d'_i} = \bigoplus_{i = 0}^{n-1} \wthom[\nbest]{d'_i} \enspace. \]
First, we define $d_i'' = d_i'|_{p_{1 \isep m+1}}$ for every $i \in [0,n-1]$.
Then we let $w = r_1 \dots r_l$ and for every $i \in [l]$, we let $\wt(r_i) = \mulnkki{i}$ with $k_i = \rk(r_i)$ and $\welem k_i \in \nbest$.
Then there are $d_1, \dots, d_{k_1} \in \T_R$ such that $d|_{p_{m+1}} = r_1(d_1, \dots, d_{k_1})$.
Thus
\[
\wthom[\nbest]{d|_{p_{1 \isep m+1}}} = \mulnkki{1}\left(\wthom[\nbest]{d_1}, \dots, \wthom[\nbest]{d_k}\right)
\]
and by recursively applying $\wt$ to $d(p_{2 \isep i})$ for each $i \in [m + 2, |p| - |v_n|]$
\begin{align*}
&= \mulnkki{1}\bigg(\begin{aligned}[t]
&\wthom[\nbest]{d_1}, \dots, \wthom[\nbest]{d_{s-1}}, \\
&\mulnkki{2}\bigg(\begin{aligned}[t]
&\wthom[\nbest]{(d_s)|_1}, \dots, \wthom[\nbest]{d_{s'-1}}, \\
&\dots \\
&\mulnkki{l}\bigg(\begin{aligned}[t]
&\wthom[\nbest]{d|_{p_{1 \isep m + l} 1}}, \dots, \wthom[\nbest]{d_{p_{1 \isep m + l} s''-1}}, \\
&\dots \\
&\mulnkki{l}\Big(\wthom[\nbest]{d|_{p_{1 \isep |p| - |v_n|} 1}}, \dots, \wthom[\nbest]{d_{p_{1 \isep |p| - |v_n|} k}}\Big), \\
&\dots, \\
&\wthom[\nbest]{d_{p_{1 \isep m + l} s''+1}}, \dots, \wthom[\nbest]{d|_{p_{1 \isep m + l} k}} \Big),
\end{aligned} \\
&\dots, \\
&\wthom[\nbest]{d_{s'+1}}, \dots, \wthom[\nbest]{(d_s)|_k}\bigg)
\end{aligned} \\
&\wthom[\nbest]{d_{s+1}}, \dots, \wthom[\nbest]{d_k}\bigg) \enspace,
\end{aligned}
\intertext{where $p_{m+2} = s'$ and $p_{m + l + 1} = s''$,}
&= \takenbest\Big(\begin{aligned}[t]
&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{1} \cdot_n \wthom[\nbest]{d_1} \cdot_n \ldots \cdot_n \wthom[\nbest]{d_{s-1}} \\
&\cdot_n \begin{aligned}[t]
&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{2} \cdot_n \wthom[\nbest]{(d_s)|_1} \cdot_n \ldots \cdot_n \wthom[\nbest]{d_{s'-1}} \\
&\cdot_n \ldots \\
&\cdot_n \begin{aligned}[t]
&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{l} \cdot_n \wthom[\nbest]{d|_{p_{1 \isep m + l} 1}} \cdot_n \ldots \cdot_n \wthom[\nbest]{d_{p_{1 \isep m + l} s''-1}} \\
&\cdot_n \ldots \\
&(\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{l} \cdot_n \wthom[\nbest]{d|_{p_{1 \isep |p| - |v_n|} 1}} \cdot_n \ldots \cdot_n \wthom[\nbest]{d_{p_{1 \isep |p| - |v_n|} k}} \\
&\cdot_n \ldots \\
&\cdot_n \wthom[\nbest]{d_{p_{1 \isep m + l} s''+1}} \cdot_n \ldots \cdot_n \wthom[\nbest]{d|_{p_{1 \isep m + l} k}}
\end{aligned} \\
&\cdot_n \ldots, \\
&\cdot_n \wthom[\nbest]{d_{s'+1}} \cdot_n \ldots \cdot_n \wthom[\nbest]{(d_s)|_k}
\end{aligned} \\
&\cdot_n \wthom[\nbest]{d_{s+1}} \cdot_n \ldots \cdot_n \wthom[\nbest]{d_k}\bigg) \enspace,
\end{aligned}
\intertext{where we have skipped the inner applications of $\takenbest$ for readability. Thus, by commutativity of $\cdot_n$, there is a $\welem k \in \walg K$ such that we continue}
&= \takenbest\Big(((\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{1} \cdot_n \ldots \cdot_n (\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{l})^n \cdot_n \welem k\Big) \enspace.
\end{align*}
We let $\wthom[\nbest]{d|_{p_{1 \isep m+1}}} = (a_1, \dots, a_n)$.
Then for every $i \in [n]$, there is an $i' \in [n]$ such that
\[ a_i = \takenbest\Big(((\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{1} \cdot_n \ldots \cdot_n (\welem k, \underbrace{0, \dots, 0}_{\mathclap{\text{$n-1$ times}}})i{l})^n \cdot_n \welem k\Big)_{i'} \enspace. \]
Now as for every $i \in [n-1]$, $(\welem k_1 \cdot \ldots \cdot \welem k_l)^i \ge (\welem k_1 \cdot \ldots \cdot \welem k_l)^n$ and $(\welem k_1 \cdot \ldots \cdot \welem k_l)^0 = 1 \ge (\welem k_1 \cdot \ldots \cdot \welem k_l)^n$, for every $i \in [n]$ and $i' \in [0, n-1]$ by monotonicity of~$\cdot$ we have that $a_i \le (\wthom[\nbest]{d''_{i'}})_1$.
Thus
\[ \wthom[\nbest]{d|_{p_{1 \isep m+1}}} \oplus \bigoplus_{i=0}^{n-1} \wthom[\nbest]{d''_i} = \bigoplus_{i=0}^{n-1} \wthom[\nbest]{d''_i} \enspace. \]
Therefore
\begin{align*}
&\wthom[\nbest]{d} \oplus \bigoplus_{i=0}^{n-1} \wthom[\nbest]{d'_i} \\
&= d[x_{s,A_s}]_{p_{1 \isep m+1}}\left(\wthom[\nbest]{d|_{p_{1 \isep m+1}}} \oplus \bigoplus_{i=0}^{n-1} \wthom[\nbest]{d''_i}\right)_{\walg K}
\tag{distributivity of $\Omegav$ over $\oplus$} \\
&= d[x_{s,A_s}]_{p_{1 \isep m+1}}\left(\bigoplus_{i=0}^{n-1} \wthom[\nbest]{d''_i}\right)_{\walg K} \\
&= \bigoplus_{i=0}^{n-1} \wthom[\nbest]{d'_i} \enspace.
\end{align*}
Now, as $d'_i \in \cotrees(d, w)$ for every $i \in [0, n-1]$, this entails that
\[ \wthom[\nbest]{d} \oplus \bigoplus_{d' \in \cotrees(d, w)} \wthom[\nbest]{d'} = \bigoplus_{d' \in \cotrees(d, w)} \wthom[\nbest]{d'} \]
and thus~$\overline G$ is $n-1$-closed.
\end{proof}
\begin{lemma}[restate={[name={}]lemintersectionclosed}]\label{lem:intersection-closed}
Every wRTG-LM in $\wlmclass{\gclass{all}, \wclass{int}}$ is closed.
\end{lemma}
\begin{proof}
Let $\overline G = \big((G, \alg L), (\walg K, \cup, \emptyset, \omega), \wt\big)$ in $\wlmclass{\gclass{all}, \wclass{int}}$.
Clearly, $(\walg K, \subseteq)$ is a partial order.
By definition of~$\Omega$, for every $k \in \mathbb N$, $\omega \in \Omega_k$, and $\welem k_1, \dots, \welem k_k \in \walg K$ it holds that $\welem k_i \subseteq \omega(\welem k_1, \dots, \welem k_k)$ for each $i \in [k]$.
Thus and since $\walg K$ is finite, idempotent and distributive, by Lemma~\ref{lem:finite-closed}, $\overline G$ is closed.
\end{proof}
\begingroup\setlength\emergencystretch{10pt}
\thmapplicationsp*
\endgroup
\begin{proof}
This is a consequence of Lemmas~\ref{lem:bd-closed},~\ref{lem:nbest-closed}, and~\ref{lem:intersection-closed}.
\end{proof}
\subsection{Restriction of best derivation M-monoid is necessary}\label{app:bd-restriction}
\begin{lemma}\label{lem:bd-restriction}
There is a wRTG-LM in $\wlmclass{\gclass{all}, \walg{BD}} \setminus \wlmclass[<1]{\gclass{all}, \walg{BD}}$ which is not closed.
\end{lemma}
\begin{example}
Let $\overline G = \big((G, \lalg{CFG}^\emptyset), \walg K, \wt\big) \in \wlmclass{\gclass{all}, \mathbb{BD}}$ with $G = (N, \Sigma, A_0, R)$ such that there are $r, r' \in R$ with $r = \big(A \to \langle x_1 \rangle(A)\big)$, $r' = (A \to \langle \varepsilon \rangle)$, $\wt(r) = \tc{1}{r}$, and $\wt(r') = \tc{p}{r'}$ for some $p \in {\mathbb R_0^1}$.
We let
\[ d^{(c)} = \underbrace{r( \dots r(}_{\text{$c$ times}} r' \underbrace{) \dots )}_{\text{$c$ times}} \enspace. \]
We show shat for every $c \in \mathbb N$,
\[ \wthom{d^{(c)}} \oplus \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} \not= \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} \]
by an indirect proof.
Assume that there is a $c \in \mathbb N$ such that
\[ \wthom{d^{(c)}} \oplus \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} = \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} \enspace. \]
Then
\begin{align*}
\wthom{d^{(c)}} \oplus \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} &= (p, \{ d^{(c)} \}) \oplus \bigoplus_{\substack{c' \in \mathbb N: \\ c' < c}} (p, \{ d^{(c')} \})
\tag{as $\oplus$ is idempotent} \\
&= \bigoplus_{\substack{c' \in \mathbb N: \\ c' \le c}} (p, \{ d^{(c')} \}) \\
&\not= \bigoplus_{\substack{c' \in \mathbb N: \\ c' < c}} (p, \{ d^{(c')} \}) = \bigoplus_{d' \in \cotrees(d^{(c)})} \wthom{d'} \enspace,
\end{align*}
which contradicts our assumption.
Now, since for every $c \in \mathbb N$ we have that the leaf $1^c \in \pos(d)$ is $(\lfloor c / 2 \rfloor, rr)$-cyclic, it follows that~$\overline G$ is not $(\lfloor c / 2 \rfloor)$-closed.
Thus~$\overline G$ is not closed.
\end{example}
\end{document} |
\begin{document}
\title{Simulation study of estimating between-study variance and overall effect in meta-analysis of odds-ratios}
\author{Ilyas Bakbergenuly, David C. Hoaglin and Elena Kulinskaya}
\date{\today}
\maketitle
\begin{center}
\textit{Abstract}
\hbox{E}nd{center}
Random-effects meta-analysis requires an estimate of the between-study variance, $\tau^2$. We study methods of estimation of $\tau^2$ and its confidence interval in meta-analysis of odds ratio, and also the performance of related estimators of the overall effect.\\
We provide results of extensive simulations on five point estimators of $\tau^2$ (the popular methods of DerSimonian-Laird, restricted maximum likelihood, and Mandel and Paule; the less-familiar method of Jackson; and the new method (KD) based on the improved approximation to the distribution of the Q statistic by Kulinskaya and Dollinger (2015)); five interval estimators for $\tau^2$ (profile likelihood, Q-profile, Biggerstaff and Jackson, Jackson, and KD), six point estimators of the overall effect (the five inverse-variance estimators related to the point estimators of $\tau^2$ and an estimator (SSW) whose weights use only study-level sample sizes), and eight interval estimators for the overall effect (five based on the point estimators for $\tau^2$; the Hartung-Knapp-Sidik-Jonkman (HKSJ) interval; a KD-based modification of HKSJ; and an interval based on the sample-size-weighted estimator). Results of our simulations
show that none of the point estimators of $\tau^2$ can be recommended, however the new KD estimator provides a reliable coverage of $\tau^2$. Inverse-variance estimators of the overall effect are substantially biased. The SSW estimator of the overall effect and the related confidence interval provide the reliable point and interval estimation of log-odds-ratio.
{\it Keywords: between-study variance, random effects model, meta-analysis, binary outcomes}
\hbox{SE}ction{Introduction}
Meta-analysis is broadly used for combining estimates of a measure of effect from a set of studies in order to estimate an overall (pooled) effect. In studies with binary individual-level outcomes, the most common measure of treatment effect is the odds ratio. The standard method for combining study-level estimates uses a weighted average with inverse-variance weights. Our primary interest lies in meta-analysis of odds ratios via the random-effects model (REM), in which heterogeneity of the true study-level effects is usually modelled through a study-level distribution with an unknown between-study variance $\tau^2$. Inverse-variance weights require an estimate of the between-study variance, which is also of interest in assessing heterogeneity.
A number of methods provide estimates of between-study variance. \cite{veroniki2016methods} and \cite{Langan_2018_RSM_1316} provide comprehensive reviews. The most popular is the \cite{dersimonian1986meta} method. Recommended alternative point estimators include restricted maximum likelihood (REML), the method of \cite{mandel1970interlaboratory}, and the method of \cite{jackson2013confidence}. Interval estimators recommended by \cite{veroniki2016methods} include profile likelihood, the Q-profile interval (\cite{viechtbauer2007confidence}), and the generalized Q-profile intervals of \cite{biggerstaff2008exact} and \cite{jackson2013confidence}. Quality of estimation varies with the effect measure; the simulation study {of estimating heterogeneity of odds-ratios} by \cite{Aert2019} found the last three methods lacking.
In meta-analyses that use inverse-variance weights, the actual measure of effect is the logarithm of the odds ratio (LOR), and the data are the logarithm of each study's sample odds ratio and the large-sample estimate of its variance.
Most moment-based methods of estimating heterogeneity use the moments of Cochran's $Q$ or its generalization (\cite{dersimonian2007random}). However, studies have shown (\cite{kulinskaya2015accurate, Aert2019}) that, for log-odds-ratio, these statistics do not follow the nominal chi-squared distribution or the mixture of chi-squared distributions derived by \cite{biggerstaff2008exact} and \cite{jackson2013confidence}. These departures result in biases and in undercoverage of the standard estimators of between-study variance. Also, in combination with inverse-variance weighting, they lead to biased point estimation of the overall effect and undercoverage of the associated confidence intervals (see \cite{Veroniki_2018_RSM_1319} for a review). Therefore, for estimating between-study variance, we propose a method based on an improved approximation to the moments of Cochran's $Q$ statistic, suggested by \cite{kulinskaya2015accurate}. For the overall effect, we propose a weighted average in which the weights depend only on the effective sample sizes.
To compare our proposals with previous methods, we use simulation to study bias in five point estimators of the between-study variance, and coverage of five interval estimators of the between-study variance. We also study bias in six point estimators of the overall effect, and coverage of eight interval estimators of the overall effect.
\hbox{SE}ction{Estimation of study-level log-odds-ratio} \label{studyLOR}
Consider $K$ studies that used a particular individual-level binary outcome.
Each study $i$ reports a pair of independent binomial variables, $X_{i1}$ and $X_{i2}$, the numbers of events in $n_{i1}$ subjects in the Treatment arm ($j = 1$) and $n_{i2}$ subjects in the Control arm ($j = 2$) such that, for $i = 1, \ldots ,K$,
$$X_{i1}\sim {Binom}(n_{i1},p_{i1})\qquad \text{and}\qquad X_{i2}\sim {Binom}(n_{i2},p_{i2}).$$
The log-odds-ratio for Study $i$ is
\begin{equation}\label{eq:psi}
\theta_{i}=\log\left(\frac{p_{i1}(1-p_{i2})}{p_{i2}(1-p_{i1})}\right)\qquad\text{estimated by} \qquad
\hat\theta_{i}=\log\left(\frac{\hat p_{i1}(1-\hat p_{i2})}{\hat p_{i2}(1-\hat p_{i1})}\right).
\hbox{E}nd{equation}
The large-sample variance of $\hat{\theta}_i$, derived by the delta method, is
\begin{equation}\label{eq:sigma}
{\sigma}_{i}^2=\hbox{Var}(\hat{\theta}_{i})=\frac{1}{n_{i1}{p}_{i1}(1-{p}_{i1})}+\frac{1}{n_{i2}{p}_{i2}(1-{p}_{i2})}.
\hbox{E}nd{equation}
Estimation of $\theta$ and $\sigma^2_i$ requires estimates of the $p_{ij}$. The usual (and maximum-likelihood) estimate of $p_{ij}$ is $\hat{p}_{ij} = x_{ij} / n_{ij}$, but an adjustment is necessary when either of the observed counts is 0 or $n_{ij}$ (i.e., when the $2 \times 2$ table for Study $i$ contains a 0 cell). The standard approach adds $1/2$ to $x_{i1}$, $n_{i1} - x_{i1}$, $x_{i2}$, and $n_{i2} - x_{i2}$ when the $2 \times 2$ table contains exactly one 0 cell, and it omits Study $i$ when the $2 \times 2$ table contains two 0 cells. An alternative approach always adds $a$ $(>0)$ to all four cells of the $2 \times 2$ table for each of the $K$ studies; that is, it estimates $p_{ij}$ by $\hat{p}_{ij(a)}=(x_{ij}+a)/(n_{ij}+2a)$. The most common choice, $a = 1/2$, removes biases of order $n^{-1}$ in $\hat{\theta}_i$ and of order $n^{-2}$ in its estimated variance given by Equation~(\ref{eq:sigma}) (\cite{gart1985}).
\hbox{SE}ction{Standard random-effects model} \label{sec:StdREM}
The standard random-effects model assumes that each estimated study-level effect, $\hat{\theta}_i$, has an approximately normal distribution and that the true study-level effects, $\theta_{i}$, follow a normal distribution:
\begin{equation}\label{standardREM}
\hat{\theta}_{i} \sim N(\theta_{i}, \sigma_{i}^2) \quad \text{and} \quad \theta_{i} \sim N(\theta, \tau^2).
\hbox{E}nd{equation}
Thus, the marginal distribution of $\hat{\theta}_{i}$ is $N(\theta, \sigma_{i}^2 + \tau^2)$. Although the $\sigma_{i}^2$ are generally unknown, they are routinely replaced by their estimates, $\hat{\sigma}_{i}^2$. A key step involves estimating the between-study variance, $\tau^{2}$; the standard random-effects model uses the DerSimonian-Laird estimate (\cite{dersimonian1986meta}). The estimate of the overall effect is then
\begin{equation}\label{thetahatRE}
\hat{\theta}_{RE} = {\sum \limits_{i=1}^{K} \hat{w}_{i} \hat{\theta}_{i}} / {\sum \limits_{i=1}^{K} \hat{w}_{i}},
\hbox{E}nd{equation}
where $\hat{w}_{i} = \hat{w}_{i}(\hat{\tau}^2) = (\hat{\sigma}_{i}^2 + \hat{\tau}^2) ^ {-1}$ is the inverse-variance weight for Study $i$. If the $\sigma_i^2$ and $\tau^2$ were known, the variance of $\hat{\theta}_{RE}$ would be $[\sum w_{i}] ^ {-1}$ with $w_i = (\sigma_i^2 + \tau^2) ^ {-1}$. In practice, the variance of $\hat{\theta}_{RE}$ is traditionally estimated by $[\sum \hat{w}_{i}(\hat{\tau}^2)] ^ {-1}$, and a confidence interval for $\theta$ uses critical values from the normal distribution.
\hbox{SE}ction{Point and interval estimation of $\tau^2$ by Kulinskaya-Dollinger method (KD)}
The chi-squared approximation for $Q$ is inaccurate, and the actual distribution of $Q$ depends on the effect measure. Under the null hypothesis of homogeneity of the log-odds-ratio, \cite{kulinskaya2015accurate} derive corrected approximations for the mean and variance of $Q$ and match those corrected moments to obtain a gamma distribution that (as their simulations confirm) closely fits the null distribution of $Q$.
We propose a new method of estimating $\tau^2$ based on this improved approximation.
Let $E_{KD}({Q})$ denote the corrected expected value of $Q$. Then one obtains the KD estimate $\hat{\tau}_{KD}^2$ by iteratively solving
\begin{equation}
Q(\tau^2)=\sum\limits_{i=1}^{K}\frac{(\theta_{i}-\hat{\theta}_{RE})^{2}}{\hat{\sigma}_{i}^2+\tau^2}=E_{KD}({Q}).
\hbox{E}nd{equation}
We also propose a new KD confidence interval for the between-study variance. This interval for $\tau^2$ combines the Q-profile approach and the improved approximation by \cite{kulinskaya2015accurate}. This corrected Q-profile confidence interval can be estimated from the lower and upper quantiles of $F_Q$, the cumulative distribution function for the corrected distribution of $Q$:
\begin{equation}
Q(\tau_{L}^2)=F_{Q;0.975}\qquad Q(\tau_{U}^2)=F_{Q;0.025}
\hbox{E}nd{equation}
The upper and lower confidence limits for $\tau^2$ can be calculated iteratively.
\hbox{SE}ction{Sample size weighted (SSW) point and interval estimation of $\theta$}
In an attempt to avoid the bias in the inverse-variance-weighted estimators, we included a point estimator whose weights depend only on the studies' sample sizes (\cite{hedges1985statistical, hunter1990methods}). For this estimator (SSW),
$w_{i} = \tilde{n}_i = n_{iT}n_{iC}/(n_{iT} + n_{iC})$; $\tilde{n}_i$ is the effective sample size in Study $i$. These weights would coincide with the inverse-variance weights if all the probabilities across studies were equal.
The interval estimator corresponding to SSW (SSW KD) uses the SSW point estimator as its center, and its half-width equals the estimated standard deviation of SSW under the random-effects model times the critical value from the $t$ distribution on $K - 1$ degrees of freedom. The estimator of the variance of SSW is
\begin{equation}\label{eq:varianceOfSSW}
\widehat{\hbox{Var}}(\hat{\theta}_{\mathit{SSW}})= \frac{\sum \tilde{n}_i^2 (v_i^2 + \hat{\tau}^2)} {(\sum \tilde{n}_i)^2},
\hbox{E}nd{equation}
in which $v_i^2$ comes from Equation (\ref{eq:sigma}) and $\hat{\tau}^2 = \hat{\tau}_{\mathit{KD}}^2$.
\hbox{SE}ction{Simulation study}
In a simulation study with log-odds-ratio as the effect measure, we varied six parameters: the number of studies $K$, the total sample size of each study $n$, the proportion of observations in the control arm $q$, the overall true LOR $\theta$, the between-study variance $\tau^2$, and the probability of an event in the control arm.
The number of studies $K = (5, 10, 30)$.
We included sample sizes that were equal for all $K$ studies and sample sizes that varied among studies. The total sample sizes were $n = (40, 100, 250, 1000)$ for equal sample sizes and $\bar{n} = (30, 60, 100, 160)$ for unequal sample sizes. In choosing sample sizes that varied among studies, we followed a suggestion of \cite{sanchez2000testing}, who selected study sizes having skewness $1.464$, which they considered typical in behavioral and health sciences. The average study sizes were $\bar{n} = (30, 60, 100, 160)$, where $\bar{n}=30$ corresponds to $K=5$ studies of sizes $(12,16,18,20,84)$, $\bar{n}=60$ corresponds to studies of sizes $(24,32,36,40,168)$, $\bar{n}=100$ corresponds to $(64,72,76,80,208)$, and $\bar{n}=160$ corresponds to $(124,132,136,140,268)$. Table \ref{unequal sample sizes} summarizes these sample sizes. For $K = 10$ and $K = 30$, the same set of sample sizes was used twice and six times, respectively.
The values of $q$ were .5 and .75. The sample sizes of the treatment and control arms were $n_{iT}=\lceil{(1 - q_i)n_{i}}\rceil$ and $n_{iC}=n_{i}-n_{iT}$, $i=1,\ldots,K$.
The values of the overall true LOR $\theta$ were $0(0.5)2$.
The values of the between-study variance $\tau^2$ were $0(0.1)1$, corresponding to small to moderate heterogeneity, and $1((1)10$ for moderate to large heterogeneity.
The probability in the control arm, $p_{iC}$, was $0.1,\; 0.2,\; 0.4$.
Altogether, the simulations comprised 7,920 combinations of the six parameters. We generated 10,000 meta-analyses for each combination.
The true values of LOR ($\theta_{i}$) in the $K$ studies were generated from normal distributions with mean $\theta$ and variance $\tau^2$.
For a given probability $p_{iC}$, the number of events in the control group $X_{iC}$ was generated from the Binomial $(n_{iC}, p_{iC})$ distribution. The number of events in the treatment group $X_{iT}$ was generated from the Binomial $(n_{iT}, p_{iT})$ distribution with
$p_{iT}=p_{iC}\hbox{E}xp(\theta_{i})/(1 - p_{iC} + p_{iC}\hbox{E}xp(\theta_{i}))$.
The estimate of effect size in Study $i$, $\hat\theta_i$ is calculated as in Equation~(\ref{eq:psi}),
and its sampling variance is estimated by substitution of $\hat p_{ij}$ in Equation~(\ref{eq:sigma}).
The methods differ, however, in the way they obtain $\hat{p}_{ij}$ from $x_{ij}$ and $n_{ij}$.
For all standard methods, we added $1/2$ to each cell of the $2 \times 2$ table only when the table had at least one cell equal to 0. This approach corresponds to the default values of the arguments \textbf{add}, \textbf{to} and \textbf{drop00} of the \textit{escalc} procedure from \textit{metafor}, \cite{viechtbauer2015package}.
For the KD methods, we corrected for bias by adding $a=1/2$ to each cell of all $K$ tables, and we dropped the double zero studies. We also tried always adding $1/2$ in standard methods, but the results were worse.
\begin{table}
\centering
\caption{Unequal sample sizes for simulations}
\label{unequal sample sizes}
\begin{tabular}{|l|l|l|l|l|l|}
\hline
$\bar{n}\hbox{SE}tminus{K}$&$1$ & $2$ & $3$ &$4$ & $5$ \\
\hline
30 & 12 & 16 & 18 & 20 & 84 \\
60 & 24 & 32 & 36 & 40 & 168 \\
100 & 64 & 72 & 76 & 80 & 208 \\
160 & 124 & 132 & 136 & 140 &268 \\
\hline
\hbox{E}nd{tabular}
\hbox{E}nd{table}
\hbox{SE}ction {Methods of estimation of $\tau^2$ and $\theta$ used in simulations}
\subsection*{Point estimators of $\tau^2$}
\begin{itemize}
\item DL - DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J - method by \cite{jackson2013confidence}
\item KD - new improved moment method based on \cite{kulinskaya2015accurate}
\item MP - Mandel-Paule method \cite{mandel1970interlaboratory}
\item REML - Restricted Maximum Likelihood method
\hbox{E}nd{itemize}
\subsection*{Interval estimators of $\tau^2$}
\begin{itemize}
\item BJ - method by \cite{biggerstaff2008exact}
\item J - method by \cite{jackson2013confidence}
\item KD - new improved Q-profile method based on \cite{kulinskaya2015accurate}
\item PL - profile likelihood confidence interval based on $\tau_{REML}^2$
\item QP - Q-profile confidence interval method \cite{viechtbauer2007confidence}
\hbox{E}nd{itemize}
\subsection*{Point estimators of $\theta$ }
Inverse variance methods with $\tau^2$ estimated by:
\begin{itemize}
\item DL - DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J - method by \cite{jackson2013confidence}\item REML-Restricted Maximum Likelihood Method
\item KD - improved moment method based on \cite{kulinskaya2015accurate}
\item MP - Mandel Paule method \cite{mandel1970interlaboratory}
\item REML - Restricted Maximum Likelihood method
\hbox{E}nd{itemize}
and
\begin{itemize}
\item SSW - fixed weights estimator of $\theta$
\hbox{E}nd{itemize}
\subsection*{Interval estimators of $\theta$}
Standard inverse-variance methods using normal quantiles, with $\tau^2$ estimated by:
\begin{itemize}
\item DL - DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J - method by \cite{jackson2013confidence}\item REML-Restricted Maximum Likelihood Method
\item KD - improved moment method based on \cite{kulinskaya2015accurate}
\item MP - Mandel Paule method \cite{mandel1970interlaboratory}
\item REML - Restricted Maximum Likelihood method
\hbox{E}nd{itemize}
Inverse-variance methods with modified variance of $\theta$ and t-quantiles as in \cite{hartung2001refined} and \cite{sidik2002simple}
\begin{itemize}
\item HKSJ (DL) - $\tau^2$ estimated by DL
\item HKSJ KD - $\tau^2$ estimated by KD
\hbox{E}nd{itemize}
and
\begin{itemize}
\item SSW KD - fixed weights estimator of $\theta$ with the variance given by (\ref{eq:varianceOfSSW}) and t-quantiles
\hbox{E}nd{itemize}
\subsection*{Studies with zero events in one or both arms}
\begin{itemize}
\item J - adds continuity correction $1/2$ to each cell in case of zeros only
\item DL - adds continuity correction $1/2$ to each cell in case of zeros only
\item REML - adds continuity correction $1/2$ to each cell in case of zeros only
\item MP - adds continuity correction $1/2$ to each cell in case of zeros only
\item KD - always adds continuity correction $1/2$ to each cell; excludes double zeros
\hbox{E}nd{itemize}
\subsection{Results of simulation studies }
Our full simulation results, comprising $300$ figures, each presenting $12$ combinations of $K$ by $n$ values, are provided in Appendices A and B. The short summary is given below.
\subsubsection*{Bias in estimation of $\tau^2$ (Web Appendix A1)}
None of the point estimators of $\tau^2$ has bias consistently close enough to 0 to be recommended, but among the existing estimators, MP and KD provide better choices for small and large $K$, respectively.
\subsubsection*{Coverage in estimation of $\tau^2$ (Web Appendix A2)}
Coverage of $\tau^2$ is generally good for $K=5$, but is considerably worse for larger number of studies, especially so for large values of $\theta$. All methods are somewhat conservative at $\tau^2=0$. Overall, KD performs the best.
The large number of studies $K$ presents the greatest challenge for the standard methods. PL is the most affected, with considerable undercoverage up to $n=100$ for medium to large values of $\tau^2$. The other methods also have low coverage for small $n$, but they improve faster with increasing $n$. KD provides reliable coverage except for small sample sizes combined with $p_C=0.4$ and $\theta\geq 1.5$, where its undecoverage worsens with increasing $\tau^2$, though it is still considerably better than all the competitors.
\subsubsection*{Bias in estimation of $\theta$ (Web Appendix B1)}
In the results for the bias of the point estimators of $\theta$, a common pattern is that the bias is roughly linearly related to $\tau^2$ with a positive slope.
As expected, in the vast majority of situations, SSW avoids most, if not all, of the bias in the IV-weighted estimators. The bias of the inverse variance estimators affects their efficiency, so that SSW is sometimes more efficient (it terms of its mean squared error) than its competitors.
\subsubsection*{Coverage in estimation of $\theta$ (Web Appendix B2)}
Because of the undercoverage of the standard CIs based on the IV-weighted estimators, we do not recommend them. HKSJ and HKSJ KD often have coverage close to 95\%, but they sometimes have serious undercoverage. All problems are typically exacerbated for the unbalanced sample sizes. The newly proposed SSW KD interval often has coverage somewhat greater than 95\%, but its coverage is at least 93\% (except for a few cases involving $K = 30$ and unequal sample sizes with $\bar{n} = 30$).
\hbox{SE}ction{Summary}
Our extensive simulations demonstrate that the existing methods of meta-analysis of odds ratio often present a biased view both on the heterogeneity and the overall effect. In brief:\\
small sample sizes are rather problematic, and meta-analyses that involve numerous small studies are especially challenging. Because the study-level effects and their variances are related, estimates of the overall effects are biased, and the coverage of confidence intervals is too low, especially for small sample sizes and large number of studies.
The between-study variance, $\tau^2$, is typically estimated by generic methods which assume normality of the estimated effects $\hat\theta_i$. It is usually overestimated near zero, but the standard methods are negatively biased for larger values of $\tau^2$.
Our findings agree with those by \cite{Aert2019} that the standard interval estimation of $\tau^2$ is often too liberal. The behavior of the profile likelihood method is especially erratic.
Therefore we proposed and studied by simulation the new moment method of estimation of $\tau^2$ based on the improved approximation to distribution of Cochran's $Q$ for odds ratios by \cite{kulinskaya2015accurate}. The KD method provides reliable interval estimation of $\tau^2$ across all values of $\tau^2$, $n$, and $K$. The point estimation of $\tau^2$ is more challenging, and even though KD provides better point estimation for $K=30$, it is positively biased for small values of $K$.
Arguably, the main purpose of a meta-analysis is to provide point and interval estimates of an overall effect.
Our results show that the inverse-variance-weighted estimates of the overall effect are biased.
These biases (and even their sign), depend on the $\tau^2$ and the true value of $\theta$, worsen for the unbalanced studies, and may be considerable even for reasonably large sample sizes such as $n=250$. The coverage of the overall effect follows the same patterns because the centering of confidence intervals is biased. Additionally, traditional intervals using normal quantiles are too narrow, and the use of t-quantiles as in HKSJ methods, brings noticeable though not sufficient improvement.
A pragmatic approach to unbiased estimation of $\theta$ uses weights that do not involve estimated variances of study-level estimates, for example, weights proportional to the study sizes $n_i$. \cite{hedges1985statistical}, \cite{hunter1990methods} and \cite{Shuster-2010}, among others, have proposed such weights.
We propose to use weights proportional to an effective sample size, $\tilde{n}_i=n_{iT}n_{iC}/n_i$; these are the optimal inverse-variance weights for LOR when all the probabilities are equal.
A reasonable estimator of $\tau^2$, such as MP or KD can be used as $\hat{\tau}^2$. Further, confidence intervals for $\theta$ centered at $\hat{\theta}_{\mathit{SSW}}$ with $\hat{\tau}_{\mathit{KD}}^2$ in Equation~(\ref{eq:varianceOfSSW}) can be used. In our simulations, this is by far the best interval estimator of $\theta$, providing near nominal coverage under all studied conditions.
\hbox{SE}ction*{Funding}
The work by E. Kulinskaya was supported by the Economic and Social Research Council [grant number ES/L011859/1].
\hbox{SE}ction*{Appendices description}
\begin{itemize}
\item Appendix A: Plots for bias and coverage of $\tau^2$.
\item Appendix B: Plots for bias, mean squared error, and coverage of estimators of the log-odds-ratio
\hbox{E}nd{itemize}
\hbox{SE}tcounter{section}{0}
\renewcommand{B2.3.\arabic{figure}}{A1.1.\arabic{figure}}
\renewcommand{B\arabic{section}}{A.\arabic{section}}
\hbox{SE}tcounter{figure}{0}
\text{\LARGE{\bf{Appendices}}}
\hbox{SE}ction{Plots for bias of between-study variance.}
Subsections A1.1, A1.2 and A1.3 correspond to $p_{C}=0.1,\; 0.2,\; 0.4$ respectively.
For a given $p_{C}$ value, each figure corresponds to a value of $\theta (= 0, 0.5, 1, 1.5, 2)$, a value of $q (= 0.5, 0.75)$, a value of $\tau^2 = 0.0(0.1)1.0$, and a set of values of $n$ (= 40, 100, 250, 1000) or $\bar{n}$ (= 30, 60, 100, 160).\\
Each figure contains a panel (with $\tau^2$ on the horizontal axis) for each combination of n (or $\bar{n}$) and $K (=5, 10, 30)$.\\
The point estimators of $\tau^2$ are
\begin{itemize}
\item DL (DerSimonian-Laird)
\item REML (Restricted Maximum Likelihood )
\item MP (Mandel-Paule)
\item KD (new improved moment estimator based on Kulinskaya and Dollinger (2015))
\item J (Jackson)
\hbox{E}nd{itemize}
\subsection*{A1.1 Probability in the control arm $p_{C}=0.1$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR1q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR1q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC01LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{A1.2.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{A1.2 Probability in the control arm $p_{C}=0.2$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=60,\;100,\;160$.
\label{BiasTauLOR1q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR1q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC02LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{A1.3.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{A1.3 Probability in the control arm $p_{C}=0.4$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR1q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR0q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu0andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR0q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR05q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu05andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR05q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR1q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu1andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR1q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR15q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu15andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR15q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC04LOR.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasTauLOR2q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasTau2mu2andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasTauLOR2q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{A2.1.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\hbox{SE}tcounter{section}{1}
\hbox{SE}ction{Coverage of between-study variance.}
Subsections A2.1, A2.2 and A2.3 correspond to $p_{C}=0.1,\; 0.2,\; 0.4$ respectively.
For a given $p_{C}$ value, each figure corresponds to a value of $\theta (= 0, 0.5, 1, 1.5, 2)$, a value of $q (= 0.5, 0.75)$, a value of $\tau^2 = 0.0(0.1)1.0$, and a set of values of $n$ (= 40, 100, 250, 1000) or $\bar{n} (= 30, 60, 100, 160)$.\\
Each figure contains a panel (with $\tau^2$ on the horizontal axis) for each combination of n (or $\bar{n}$) and $K (=5, 10, 30)$.\\
The interval estimators of $\tau^2$ are
\begin{itemize}
\item QP (Q-profile confidence interval)
\item BJ (Biggerstaff and Jackson interval )
\item PL (Profile likelihood interval)
\item KD (Improved Q-profile confidence interval based on Kulinskaya and Dollinger (2015))
\item J (Jacksons interval)
\hbox{E}nd{itemize}
\renewcommand{B2.3.\arabic{figure}}{A2.1.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{A2.1 Probability in the control arm $p_{C}=0.1$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC01LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{A2.2.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{A2.2 Probability in the control arm $p_{C}=0.2$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC02LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{A2.3.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{A2.3 Probability in the control arm $p_{C}=0.4$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR0q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu0andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR0q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR05q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu05andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR05q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR1q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu1andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR1q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR15q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu15andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR15q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC04LOR.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovTauLOR2q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovTau2mu2andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of between-studies variance $\tau^2$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovTauLOR2q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B1.1.\arabic{figure}}
\renewcommand{B\arabic{section}}{B\arabic{section}}
\hbox{SE}tcounter{figure}{0}
\hbox{SE}tcounter{section}{0}
\hbox{SE}ction{Bias and mean squared error of point estimators of log-odds-ratio.}
Subsections B1.1, B1.2 and B1.3 correspond to $p_{C}=0.1,\; 0.2,\; 0.4$ respectively.
For a given $p_{C}$ value, each figure corresponds to a value of $\theta (= 0, 0.5, 1, 1.5, 2)$, a value of $q (= 0.5, 0.75)$, a value of $\tau^2 = 0.0(0.1)1.0$, and a set of values of $n$ (= 40, 100, 250, 1000) or $\bar{n}$ (= 30, 60, 100, 160).\\
Figures for mean squared error (expressed as the ratio of the MSE of SSW to the MSEs of the inverse-variance-weighted estimators that use the MP or KD estimator of $\tau^2$) use the above values of $\theta$ and q but only n = 40, 100, 250, 1000.\\
Each figure contains a panel (with $\tau^2$ on the horizontal axis) for each combination of n (or $\bar{n}$) and $K (=5, 10, 30)$.\\
The point estimators of $\theta$ are
\begin{itemize}
\item DL (DerSimonian-Laird)
\item REML (restricted maximum likelihood)
\item MP (Mandel-Paule)
\item KD (Improved moment estimator based on Kulinskaya and Dollinger (2015))
\item J (Jackson)
\item SSW (sample-size weighted)
\hbox{E}nd{itemize}
\subsection*{B1.1 Probability in the control arm $p_{C}=0.1$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$,$p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q05piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$,$p_{iC}=0.1$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q05piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$,$p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q05piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$,$p_{iC}=0.1$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q05piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for LOR=1, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$,$p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q05piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$,$p_{iC}=0.1$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q05piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$,$p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q05piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$,$p_{iC}=0.1$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q05piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$,$p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q05piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$,$p_{iC}=0.1$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q05piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq075piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$,$p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q075piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$,$p_{iC}=0.1$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q075piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq075piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$,$p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q075piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$,$p_{iC}=0.1$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q075piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq075piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$,$p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q075piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$,$p_{iC}=0.1$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q075piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq075piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$,$p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q075piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$,$p_{iC}=0.1$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q075piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC01LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq075piC01LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$,$p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q075piC01fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$,$p_{iC}=0.1$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q075piC01fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B1.2.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{B1.2 Probability in the control arm $p_{C}=0.2$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q05piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q05piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q05piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q05piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q05piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q05piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q05piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q05piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q05piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q05piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq075piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q075piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q075piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq075piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q075piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q075piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq075piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q075piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q075piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq075piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q075piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q075piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC02LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq075piC02LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q075piC02fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q075piC02fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B1.3.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{B1.3 Probability in the control arm $p_{C}=0.4$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q05piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q05piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q05piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q05piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q05piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q05piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q05piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q05piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q05piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q05piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR0q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu0andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR0q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq075piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR0q075piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR0q075piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR05q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu05andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR05q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq075piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR05q075piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR05q075piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR1q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu1andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR1q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq075piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR1q075piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR1q075piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR15q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu15andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR15q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq075piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR15q075piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR15q075piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC04LOR.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{BiasThetaLOR2q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotBiasThetamu2andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Bias of the estimation of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{BiasThetaLOR2q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq075piC04LOR.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{RatioOfMSEwithLOR2q075piC04fromMPandCMP}}
\hbox{E}nd{figure}
\begin{figure}[t]\centering
\includegraphics[scale=0.35]{PlotForRatioOfMSEMPandCMPmu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Ratio of mean squared errors of the fixed-weights to mean squared errors of inverse-variance estimator for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, unequal sample sizes $n=30,\;60,\;100,\;160$.
\label{RatioOfMSEwithLOR2q075piC04fromMPandCMP_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B2.1.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\hbox{SE}ction{Coverage of log-odds-ratio.}
Subsections B2.1, B2.2 and B2.3 correspond to $p_{C}=0.1,\; 0.2,\; 0.4$ respectively.
For a given $p_{C}$ value, each figure corresponds to a value of $\theta (= 0, 0.5, 1, 1.5, 2)$, a value of $q (= 0.5, 0.75)$, a value of $\tau^2 = 0.0(0.1)1$, and a set of values of $n$ (= 40, 100, 250, 1000) or $\bar{n}$ (= 30, 60, 100, 160).\\
Each figure contains a panel (with $\tau^2$ on the horizontal axis) for each combination of n (or $\bar{n}$) and $K (=5, 10, 30)$.\\
The interval estimators of $\theta$ are the companions to the inverse-variance-weighted point estimators
\begin{itemize}
\item DL (DerSimonian-Laird)
\item REML (restricted maximum likelihood)
\item MP (Mandel-Paule)
\item KD (Improved moment estimator based on Kulinskaya and Dollinger (2015))
\item J (Jackson)
\hbox{E}nd{itemize}
and
\begin{itemize}
\item HKSJ (Hartung-Knapp-Sidik-Jonkman)
\item HKSJ KD (HKSJ with KD estimator of $\tau^2$)
\item SSW (SSW as center and half-width equal to critical value from $t_{K-1}$
\hbox{E}nd{itemize}
times estimated standard deviation of SSW with $\hat{\tau}^2$ = $\hat{\tau}^2_{KD}$
\subsection*{B2.1 Probability in the control arm $p_{C}=0.1$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q05piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR2q05piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC01LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q075piC01}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC01LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.1$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR2q075piC01_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B2.2.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{B2.2 Probability in the control arm $p_{C}=0.2$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q05piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.5$, unequal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q05piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC02LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q075piC02}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC02LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.2$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR2q075piC02_unequal_sample_sizes}}
\hbox{E}nd{figure}
\renewcommand{B2.3.\arabic{figure}}{B2.3.\arabic{figure}}
\hbox{SE}tcounter{figure}{0}
\subsection*{B2.3 Probability in the control arm $p_{C}=0.4$}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q05piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq05piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.5$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR2q05piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR0q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu0andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR0q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR05q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu05andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=0.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR05q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR1q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu1andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR1q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$, equal sample sizes $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR15q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu15andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=1.5$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR15q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC04LOR.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$, equal sample size $n=40,\;100,\;250,\;1000$.
\label{CovThetaLOR2q075piC04}}
\hbox{E}nd{figure}
\begin{figure}[t]
\centering
\includegraphics[scale=0.33]{PlotCovThetamu2andq075piC04LOR_unequal_sample_sizes.pdf}
\caption{Coverage of overall effect measure $\theta$ for $\theta=2$, $p_{iC}=0.4$, $q=0.75$,
unequal sample sizes $n=30,\; 60,\;100,\;160$.
\label{CovThetaLOR2q075piC04_unequal_sample_sizes}}
\hbox{E}nd{figure}
\hbox{E}nd{document}
\appendix
\hbox{SE}ction{Appendix}
\subsubsection*{Methods of estimation in simulations}
\subsubsection*{Point estimators of $\tau^2$ include:}
\begin{itemize}
\item DL-DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J method by \cite{jackson2013confidence}
\item KD-Corrected Mandel-Paule method based on \cite{kulinskaya2015accurate}
\item MP-Mandel Paule method \cite{mandel1970interlaboratory}
\item REML-Restricted Maximum Likelihood method
\hbox{E}nd{itemize}
\subsubsection*{Interval estimators of $\tau^2$ include:}
\begin{itemize}
\item BJ-method by \cite{biggerstaff2008exact}
\item J method by \cite{jackson2013confidence}
\item KD-Corrected Mandel-Paule method based on \cite{kulinskaya2015accurate}
\item PL - profile likelihood confidence interval based on $\tau_{REML}^2$
\item QP - Q profile confidence interval method \cite{viechtbauer2007confidence}
\hbox{E}nd{itemize}
\subsubsection*{Point estimators of $\theta$ include:}
inverse variance methods with $\tau^2$ estimated by:
\begin{itemize}
\item DL-DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J method by \cite{jackson2013confidence}\item REML-Restricted Maximum Likelihood Method
\item KD-Corrected Mandel-Paule method based on \cite{kulinskaya2015accurate}
\item MP-Mandel Paule method \cite{mandel1970interlaboratory}
\item REML - Restricted Maximum Likelihood method
\hbox{E}nd{itemize}
and
\begin{itemize}
\item SSW - fixed weights estimator of $\theta$
\hbox{E}nd{itemize}
\subsubsection*{Interval estimators of $\theta$ include:}
\begin{itemize}
\item DL-Inverse-variance with DerSimonian and Laird method by \cite{dersimonian1986meta}
\item J-Inverse-variance with method by \cite{jackson2013confidence}
\item KD-Inverse-variance with Corrected Mandel-Paule method based on \cite{kulinskaya2015accurate}
\item MP-Inverse-variance with Mandel Paule method \cite{mandel1970interlaboratory}
\item REML-Inverse-variance with Restricted Maximum Likelihood Method
\item HKSJ DL inverse variance with modified variance of $\theta$ and t-quantiles from \cite{hartung2001refined} and \cite{sidik2002simple}
\item HKSJ KD -Inverse-variance with same method as KD, and t-quantiles
\item SSW KD - fixed weights estimator of $\theta$ with the variance given by (\ref{eq:varianceOfSSW}) and t-quantiles
\hbox{E}nd{itemize}
\subsection*{Studies with double zero events in both arm}
\begin{itemize}
\item J-adds continuity correction $1/2$ to each cell
\item DL-adds continuity correction $1/2$ to effects to each cell
\item REML-adds continuity correction $1/2$ to each cell
\item MP-adds continuity correction $1/2$ to each cell
\item KD-excludes double zeros
\hbox{E}nd{itemize}
\hbox{E}nd{document} |
\begin{document}
\title[The Khovanov Homology of $(p,-p,q)$ Pretzel Knots]{The Khovanov Homology of $(p,-p,q)$ Pretzel Knots}
\date{\today}
\author[Starkston]{Laura Starkston}
\maketitle
\begin{abstract}
In this paper, we compute the Khovanov homology over $\mathbb{Q}$ for $(p,-p,q)$ pretzel knots for $3\leq p \leq15$, $p$ odd, and arbitrarily large $q$. We provide a conjecture for the general form of the Khovanov homology of $(p,-p,q)$ pretzel knots. These computations reveal that these knots have thin Khovanov homology (over $\mathbb{Q}$ or $\mathbb{Z}$). Because Greene has shown that these knots are not quasi-alternating, this provides an infinite class of non-quasi-alternating knots with thin Khovanov homology.
\end{abstract}
\section{Introduction}
{
In \cite{Khovanov}, Khovanov introduced his categorification of the Jones polynomial, a graded homology theory, which is a powerful link invariant. As with the Jones polynomial, there is a finite way to compute the Khovanov homology of a link given a diagram with finitely many crossings. A number of programs have been written to compute the Khovanov homology of a link. The programs by Bar-Natan and Green can be implemented in Mathematica \cite{KAtlas}. A faster program called KhoHo was written by Shumakovitch \cite{KhoHo}. Of course, one must have a finite description of the knot or link, in order to use these programs to obtain its Khovanov homology.
We set out to find the Khovanov homology of infinite classes of knots, in particular infinite classes of pretzel knots. Lee proves in \cite{Lee} that the Khovanov homology of alternating links (over rational coefficients) is completely determined by the Jones polynomial and knot signature. Ozsv\'{a}th and Szab\'{o} defined a larger class of knots they call quasi-alternating links \cite{quasidef} and Manolescu and Ozsv\'{a}th extended Lee's results to this class in \cite{quasi}. In \cite{Champanerkar} the quasi-alternating status of pretzel links was explored by Champanerkar and Kofman. They classify many pretzel links as either quasi-alternating, or not, leaving open the status of only $2$ classes of $3$ column pretzel links: $P(p,-p,q)$ and $P(p+1,-p,q)$. Greene completes this classification in \cite{Greene}, and finds that the $P(p,-p,q)$ knots are not quasi-alternating, when $q>p$ thus their Khovanov homology is not necessarily determined by their Jones polynomial and signature.
We look at the Khovanov homology of the $P(p,-p,q)$ knots ($q\geq p > 2$, $p$ odd). We utilize the simplicity of the diagrams resulting from resolving one of the crossings in the third column, to make an inductive argument in terms of $q$, which is completed using the fact that these are slice knots. Thus, given a satisfactory base case for some odd value of $p$, we can provide a formula for the Khovanov homology of all $P(p,-p,q)$ knots for $q$ sufficiently high. We show the explicit proof for the $p=3$ case, which applies in exactly the same manner for other values of $p$ once we compute a base case meeting certain criteria. We have already verified that such base cases exist for all odd values $3 \leq p \leq 15$ .
\textbf{Acknowledgements:} Many thanks to Peter Kronheimer for his guidance and advice throughout this project. Thank you also to Joshua Greene for the information on the quasi-alternating status of these knots.
}
\section{Khovanov Homology}
{
\subsection{Graded Modules}
{
Let $A$ be the free graded module generated by two elements, $\mathbf{1}$ and $\mathbf{X}$ over a ring $R$. We assign a quantum grading to $A$ so that the copy of $R$ generated by $\mathbf{1}$ has quantum grading $1$ and the copy of $R$ generated by $\mathbf{X}$ has quantum grading $-1$. This induces a quantum grading on $A^{\otimes q}$ where the copy of $R$ generated by $v_1\otimes \cdots \otimes v_q$ has quantum grading equal to the sum of the quantum gradings of $v_1$ through $v_q$.
We will denote quantum gradings by subscripts in parentheses. For example $A \cong R_{(-1)}\oplus R_{(1)}$. Let $\cdot \{k\}$ denote a quantum grading shift up by $k$. So $M_{(q)}\{k\}=M_{(q+k)}$.
We can turn $A$ into a bialgebra by defining a multiplication, $m$, a comultiplication, $\mathbb{D}elta$, a unit, and a counit. We will only be concerned with $m$ and $\mathbb{D}elta$ here but Khovanov defines the others in \cite{Khovanov}.
\begin{eqnarray*}
m(\mathbf{1} \otimes \mathbf{1}) & = & \mathbf{1}.\\
m(\mathbf{1} \otimes \mathbf{X}) & = & \mathbf{X}.\\
m(\mathbf{X} \otimes \mathbf{1}) & = & \mathbf{X}.\\
m(\mathbf{X} \otimes \mathbf{X}) & = & 0.
\end{eqnarray*}
\begin{eqnarray*}
\mathbb{D}elta(\mathbf{1}) & = & \mathbf{1} \otimes \mathbf{X} + \mathbf{X} \otimes \mathbf{1}.\\
\mathbb{D}elta(\mathbf{X}) & = & \mathbf{X} \otimes \mathbf{X}.
\end{eqnarray*}
}
\subsection{$n$-cube of smoothings}
{
For any given crossing there are two ways to resolve the crossing to eliminate it. We call these the $0$-smoothing and the $1$-smoothing according to the convention in Figure \ref{smoothings}.
\begin{figure}
\caption{The $0$ and $1$ resolutions of a crossing}
\label{smoothings}
\end{figure}
Given a knot or link diagram $D$, with $n$ crossings, there are $2^n$ different total resolutions of the diagram. Each total resolution is a collection of simple closed curves. Additionally, each resolution corresponds to an $n$-tuple of $0$s and $1$s. Fixing an ordering of the crossings, thus gives an identification of the collection of total resolutions of $D$ with the vertices of the unit $n$-cube. See Figure \ref{trefoilcube} for an example.
\begin{figure}
\caption{The cube of total smoothings of the trefoil knot}
\label{trefoilcube}
\end{figure}
To each vertex $v=(x_1,\cdots, x_n) \in \{0,1\}^n$, the \emph{weight} of the corresponding smoothing, $D_v$ is given by $w(D_v)=\sum_{i=1}^nx_i$ (i.e. the number of $1$-smoothings). Each of the edges of the $n$-cube corresponds to a change of one crossing from a $0$-smoothing to a $1$-smoothing. We label this edge with a map. The map is labeled $m$ if two circles in the $0$-smoothing merge to one circle in the $1$-smoothing, and $\mathbb{D}elta$ if one circle in the $0$-smoothing divides into two in the $1$-smoothing. We add in signs so that each square anti-commutes.
We then translate the diagram of smoothings into an algebraic diagram of free modules with a functor $\mathcal{F}$. Each complete smoothing $D_v$ translates to $\mathcal{F}(D_v)=A^{\otimes k}\{w(D_v)\}$ where $k$ is the number of closed curves in the smoothing. The maps $m$ and $\mathbb{D}elta$ translate to the multiplication and comultiplication on the copies of $A$ corresponding to the circles that are merging or dividing.
The resulting diagram for the trefoil is
$$ \xymatrix
{
& A\{1\} \ar[r]^{-\mathbb{D}elta} \ar[dr]^{-\mathbb{D}elta\qquad\qquad\qquad\qquad\qquad\qquad\qquad} & A\otimes A \{2\} \ar[dr]^{\mathbb{D}elta} & \\
A\otimes A \ar[ur]^{m} \ar[r]^{m} \ar[dr]^{m} & A\{1\} \ar[ur]_{\mathbb{D}elta\qquad\qquad\qquad\qquad\qquad\qquad} \ar[dr]^{-\mathbb{D}elta \qquad\qquad\qquad\qquad\qquad\qquad\qquad} & A\otimes A \{ 2\} \ar[r]^{-\mathbb{D}elta} & A\otimes A \otimes A \{3\}\\
& A\{1\} \ar[r]^{\mathbb{D}elta} \ar[ur]^{\qquad\qquad\qquad\qquad\qquad\mathbb{D}elta} & A\otimes A \{2\} \ar[ur]_{\mathbb{D}elta} &
.}$$
}
\subsection{Khovanov Complex}
{
From the $n$-cube of modules, we obtain a chain complex for the diagram, $\overline{CKh}(D)$ in the following manner.
$$\overline{CKh}^i(D)=\bigoplus_{\{D_v \colon w(D_v)=i\}}\mathcal{F}(D_v).$$
In other words, if we align the vertices of the $n$-cube such that vertices of the same weight are in the same column as above, we simply take the direct sum down each column. The differentials are given by the sums of the maps in the cube from smoothings of weight $i$ to smoothings of weight $i+1$. Because each square anticommutes, $d\circ d=0$.
We let $\cdot [k]$ denote a shift up by $k$ in the homological grading ($\overline{CKh}^i(D)[k]=\overline{CKh}^{i+k}(D)$). Let $n_+$ be the number of $(+)$ crossings and $n_-$ be the number of $(-)$ crossings according to the convention in figure \ref{posneg}.
\begin{figure}
\caption{Positive and negative crossings}
\label{posneg}
\end{figure}
Let $CKh(D)=\overline{CKh}(D)[-n_-]\{n_+-2n_-\}$. Let $\overline{Kh}(D)$ and $Kh(D)$ be the cohomologies of $\overline{CKh}(D)$ and $CKh(D)$ respectively. $Kh(D)$ is Khovanov's link invariant. $\overline{Kh}(D)$ is specific to the particular diagram in the sense that it depends on the number of $+$ and $-$ crossings in the diagram.
}
\subsection{A Long Exact Sequence}
{
Suppose we start with a diagram $D$, and resolve only one crossing. Let the diagrams with the $0$-resolution and $1$-resolution of this crossing be denoted $D(*0)$ and $D(*1)$ respectively. It is clear from the construction that $\overline{CKh}(D(*0))$ and $\overline{CKh}(D(*1))$ are disjoint subcomplexes of $\overline{CKh}(D)$, except that $\overline{CKh}(D(*1))$ has its homological and quantum gradings shifted up by $1$ in $\overline{CKh}(D)$. Additionally their union consists of all the vertices of $\overline{CKh}(D)$ and $\overline{CKh}(D)$ is the total complex of $\overline{CKh}(D(*0))\rightarrow \overline{CKh}(D(*1))$. Therefore we get a short exact sequence of complexes:
$$0 \rightarrow \overline{CKh}(D(*1))\{1\}[1] \rightarrow \overline{CKh}(D) \rightarrow \overline{CKh}(D(*0)) \rightarrow 0.$$
This induces a long exact sequence on the homology:
\begin{equation}
\cdots \rightarrow \overline{Kh}^{n-1}(D(*1))\{1\} \rightarrow \overline{Kh}^{n}(D) \rightarrow \overline{Kh}^n(D(*0)) \rightarrow \overline{Kh}^n(D(*1))\{1\} \rightarrow \cdots.
\label{LES}
\end{equation}
}
}
\section{Lee's variant of Khovanov Homology and Rasmussen's s-invariant}
{
\subsection{Lee's Invariant}
{
Lee considered a similar construction in \cite{Lee}. Her complex comes from the same $n$-cube of smoothings, and the modules associated to the vertices are the same as Khovanov's, although Lee takes the coefficient ring to be the rational numbers. The main difference is that the maps $m$ and $\mathbb{D}elta$ are slightly modified. In Lee's construction she uses the following maps as the multiplication and comultiplication on $A$.
\begin{eqnarray*}
m'(\mathbf{1} \otimes \mathbf{1}) & = & \mathbf{1}.\\
m'(\mathbf{1} \otimes \mathbf{X}) & = & \mathbf{X}.\\
m'(\mathbf{X} \otimes \mathbf{1}) & = & \mathbf{X}.\\
m'(\mathbf{X} \otimes \mathbf{X}) & = & \mathbf{1}.
\end{eqnarray*}
\begin{eqnarray*}
\mathbb{D}elta'(\mathbf{1}) & = & \mathbf{1} \otimes \mathbf{X} + \mathbf{X} \otimes \mathbf{1}.\\
\mathbb{D}elta'(\mathbf{X}) & = & \mathbf{X} \otimes \mathbf{X} + \mathbf{1} \otimes \mathbf{1}.
\end{eqnarray*}
Let $CKh'(L)$ be the analogous complex to that constructed in section $2$, but replacing $m$ and $\mathbb{D}elta$ by $m'$ and $\mathbb{D}elta'$ respectively. With the grading shifts included, Khovanov's original maps $m$ and $\mathbb{D}elta$ were constructed so that the differentials preserve quantum grading. (Each map $m$ or $\mathbb{D}elta$ decreases quantum grading by $1$, but as the weight increases by $1$, there is a quantum grading shift by $1$ which cancels out the decrease in quantum grading by the $m$ or $\mathbb{D}elta$ map.)
However, Lee's differentials do not preserve quantum grading, and $\mathbb{D}elta(\mathbf{X})$ does not even have a homogeneous quantum grading. However each monomial in the image of some monomial $x$ under a differential map has quantum grading greater than or equal to the quantum grading of $x$. Let $q(x)$ denote the quantum grading of $x$. Then the differential respects the following filtration:
$$F^p CKh'(L) = \{x\in CKh'(L) \colon q(x) \geq p\}.$$
This filtration together with the homological grading induces a spectral sequence whose $E_0$ term is $CKh'(L)\cong CKh(L)$. The differential on the $0^{th}$ page, $d_0\colon E_0^{q,r}\rightarrow E_0^{q,r+1}$ is the part of Lee's differential that preserves quantum grading. This is exactly Khovanov's original differential, so the $E_1$ page is given by $E_1^{q,r}=Kh^r(K)_{(q)}$.
\textbf{Important Note:} While it is typical in writing the spectral sequence for a filtered differential graded module $A$ to use notation such that $E_1^{q,r} \cong H^{r+q}(F^rA/F^{r+1}A)$ and the $r^{th}$ differential has bidegree $(r, 1-r)$ it is more natural in this context to let $E_1^{r,q}=F^qKh^r(L)/F^{q+1}Kh^r(L) \cong Kh^r(K)_{(q)}$. Using this indexing, the $r^{th}$ differential has bidegree $(1,r)$ (homological degree increases by $1$ and filtration degree increases by $r$).
Lee proves that the rank of $Kh'(L)$ is simply determined by the number of components of the link, $L$. If $L$ has $n$ components the rank of $Kh'(L)$ is $2^n$. Therefore if $K$ is a knot, $Kh'(K)$ has rank $2$.
}
\subsection{Rasmussen's $s$-invariant}
In \cite{Rasmussen} Rasmussen asks, what are the quantum gradings of these two remaining copies of $\mathbb{Q}$ in the $E_{\infty}$ page of the spectral sequence described above? He proves that the difference between the two quantum gradings is exactly $2$ and then defines an invariant $s(K)$ to be the average of these two quantum gradings. Furthermore he proves that $s(K)$ provides a lower bound on the slice genus of the knot:
$$|s(K)| \leq 2g^*(K).$$
}
\section{Main Result}
We compute the Khovanov homology for the class of $(3,-3,q)$ pretzel knots. This same proof can be used to compute $P(p,-p,q)$ knots once an appropriate base case for the induction can be found.
{\begin{theorem}
Let $K_q=P(3,-3,q)$, the 3-stranded pretzel knot where $q \geq 5$. Then
\begin{eqnarray*}
Kh^0(K_q) & = & \mathbb{Q}sub{-1} \oplus \mathbb{Q}sub{1}\\
Kh^i(K_q) & = & 0 \;\;\;\; (0 < i \leq q-4)\\
Kh^{q-3}(K_q) & = & \mathbb{Q}sub{1+2(q-4)}\\
Kh^{q-2}(K_q) & = & \mathbb{Q}sub{5+2(q-4)}\\
Kh^{q-1}(K_q) & = & \mathbb{Q}sub{5+2(q-4)}\\
Kh^{q}(K_q)& = & \mathbb{Q}sub{7 + 2(q-4)} \oplus \mathbb{Q}sub{9 + 2(q-4)}\\
Kh^{q+1}(K_q) & = & \mathbb{Q}sub{11+2(q-4)}\\
Kh^{q+2}(K_q) & = & \mathbb{Q}sub{11+2(q-4)}\\
Kh^{q+3}(K_q) & = & \mathbb{Q}sub{15+2(q-4)}
\end{eqnarray*}
and $Kh^j(K_q)=0$ for all other values of $j$.
\label{mainthm}
\end{theorem}
\begin{proof}
We proceed by induction on $q$. The case for $q=5$ can be verified computationally \cite{KAtlas}.
For $q>5$ we consider the following 3 diagrams.
\begin{center}
\includegraphics[scale = 1]{PretzelFig2}
\end{center}
Notice that $D_1$ is the diagram $D$ with the last crossing in the last column resolved in the $0$-smoothing. Likewise $D_2$ is $D$ with the crossing resolved in the $1$-smoothing. Therefore the long exact sequence (\ref{LES}) gives
$$\cdots \rightarrow \overline{Kh}^{n-1}(D_2)\{1\} \rightarrow \overline{Kh}^n(D) \rightarrow \overline{Kh}^n(D_1) \rightarrow \overline{Kh}^n(D_2)\{1\} \rightarrow \cdots.$$
We observe that $D_1$ is a diagram for the $2$-component unlink. Therefore
\begin{eqnarray*}
Kh^0(D_1) &=& \mathbb{Q}sub{-2} \oplus \mathbb{Q}sub{0}^2 \oplus \mathbb{Q}sub{2}.\\
Kh^i(D_1) & = & 0 \;\;\;\; (\forall i \neq 0).
\end{eqnarray*}
Whichever orientation we put on $D_1$, $n_+ = 3+q-1$ and $n_- = 3$. Therefore
\begin{eqnarray*}
\overline{Kh}^3(D_1) & = & \mathbb{Q}sub{2-q} \oplus \mathbb{Q}sub{4-q}^2 \oplus \mathbb{Q}sub{6-q}.\\
\overline{Kh}^i(D_1) & = & 0 \;\;\;\; (\forall i \neq 3).
\end{eqnarray*}
Inserting these values into the long exact sequence (\ref{LES}) we get the following
$$0 \rightarrow \overline{Kh}^{i-1}(D_2)\{1\} \rightarrow \overline{Kh}^i(D) \rightarrow 0$$
for all $i\neq 3, 4$. This implies that $\overline{Kh}^{i-1}(D_2)\{1\} \cong \overline{Kh}^i(D)$ for $i\neq 3,4$.
Noticing that $D_2$ is a diagram for the $P(3,-3,q-1)$ pretzel knot, we can use the inductive hypothesis to get its Khovanov Homology. Noticing that for $D_2$, $n_+ = 3+q-1$ and $n_- = 3$ the unnormalized Khovanov homology for $D_2$ is given by the column on the left below. The right hand column shifts the gradings by $1$, to give the appropriate isomorphisms.
$$\begin{array}{rclcrcl}
\overline{Kh}^3(D_2) & = & \mathbb{Q}sub{3-q} \oplus \mathbb{Q}sub{5-q} & \overset{\{1\}}{\rightarrow}& \mathbb{Q}sub{4-q} \oplus \mathbb{Q}sub{6-q} & &\\
\overline{Kh}^{3+i}(D_2) & = & 0 \quad (0 < i \leq q-5)&& 0 & \cong & \overline{Kh}^{4+i}(D) \quad (1 < i \leq q-5)\\
\overline{Kh}^{q-1}(D_2) & = & \mathbb{Q}sub{1+q-5} &\overset{\{1\}}{\rightarrow}& \mathbb{Q}sub{1+q-4}& \cong & \overline{Kh}^{q}(D)\\
\overline{Kh}^{q}(D_2) & = & \mathbb{Q}sub{5+q-5}&& \mathbb{Q}sub{5+q-4} & \cong & \overline{Kh}^{q+1}(D)\\
\overline{Kh}^{q+1}(D_2) & = & \mathbb{Q}sub{5+q-5} &\overset{\{1\}}{\rightarrow}& \mathbb{Q}sub{5+q-4} & \cong & \overline{Kh}^{q+2}(D)\\
\overline{Kh}^{q+2}(D_2) & = & \mathbb{Q}sub{7 + q-5} \oplus \mathbb{Q}sub{9+q-5} && \mathbb{Q}sub{7+q-4}\oplus \mathbb{Q}sub{9+q-4} & \cong & \overline{Kh}^{q+3}(D)\\
\overline{Kh}^{q+3}(D_2) & = & \mathbb{Q}sub{11+q-5} &\overset{\{1\}}{\rightarrow}& \mathbb{Q}sub{11+q-4} & \cong & \overline{Kh}^{q+4}(D)\\
\overline{Kh}^{q+4}(D_2) & = & \mathbb{Q}sub{11+q-5} && \mathbb{Q}sub{11+q-4} & \cong & \overline{Kh}^{q+5}(D)\\
\overline{Kh}^{q+5}(D_2) & = & \mathbb{Q}sub{15+q-5} &\overset{\{1\}}{\rightarrow}& \mathbb{Q}sub{15+q-4} & \cong & \overline{Kh}^{q+6}(D)
\end{array}$$
After we shift the gradings back to get $Kh(D)$, we find that we have proven the result for $Kh^i(K_q)$ for all $i\neq 0,1$. We are left to find $\overline{Kh}^3(D)$ and $\overline{Kh}^4(D)$ (which normalize to $Kh^0(K_q)$ and $Kh^1(K_q)$ under the grading shifts). We have the following exact sequence from the long exact sequence
\begin{equation}
\label{SES}
0 \rightarrow \overline{Kh}^3(D) \overset{\alpha}{\rightarrow} \mathbb{Q}sub{2-q} \oplus \mathbb{Q}sub{4-q}^2 \oplus \mathbb{Q}sub{6-q} \overset{\beta}{\rightarrow} \mathbb{Q}sub{4-q}\oplus \mathbb{Q}sub{6-q} \overset{\gamma}{\rightarrow} \overline{Kh}^4(D) \rightarrow 0.
\end{equation}
Since $\alpha, \beta,$ and $\gamma$ preserve quantum grading, basic linear algebra implies that
$$\mathbb{Q}sub{2-q}\oplus \mathbb{Q}sub{4-q} \subseteq \kr(\beta) = \im(\alpha).$$
Since $\alpha$ is injective, $\overline{Kh}^3(D) \cong \im(\alpha)$ therefore
$$\mathbb{Q}sub{2-q} \oplus \mathbb{Q}sub{4-q} \subseteq \overline{Kh}^3(D).$$
This results in $4$ possibilities for the isomorphism class of $\overline{Kh}^3(D)$. Using the exact sequence (\ref{SES}), we can determine the isomorphism class of $\overline{Kh}^4(D)$ corresponding to each of these four possibilities:
$$
\begin{array}{rclrcl}
\overline{Kh}^3(D) & = & \mathbb{Q}sub{2-q}\oplus \mathbb{Q}sub{4-q} & \overline{Kh}^4(D) & = & 0,\\
\overline{Kh}^3(D) & = & \mathbb{Q}sub{2-q}\oplus \mathbb{Q}sub{4-q}^2 & \overline{Kh}^4(D) & = & \mathbb{Q}sub{4-q}, \\
\overline{Kh}^3(D) & = & \mathbb{Q}sub{2-q}\oplus \mathbb{Q}sub{4-q} \oplus \mathbb{Q}sub{6-q} & \overline{Kh}^4(D) & = & \mathbb{Q}sub{6-q},\\
\overline{Kh}^3(D) & = & \mathbb{Q}sub{2-q}\oplus \mathbb{Q}sub{4-q}^2 \oplus \mathbb{Q}sub{6-q} & \overline{Kh}^4(D) & = & \mathbb{Q}sub{4-q} \oplus \mathbb{Q}sub{6-q}.
\end{array}
$$
After normalization these four possibilities are:
$$
\begin{array}{rclrcl}
Kh^0(D) & = & \mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1} & Kh^1(D) & = & 0,\\
Kh^0(D) & = & \mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1}^2 & Kh^1(D) & = & \mathbb{Q}sub{1}, \\
Kh^0(D) & = & \mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1} \oplus \mathbb{Q}sub{3} & Kh^1(D) & = & \mathbb{Q}sub{3},\\
Kh^0(D) & = & \mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1}^2 \oplus \mathbb{Q}sub{3} &Kh^1(D) & = & \mathbb{Q}sub{1} \oplus \mathbb{Q}sub{3}.
\end{array}
$$
We aim to show that the first of these four possibilities is correct.
We now utilize the results of Lee and Rasmussen, described in section 3. In particular we recall Rasmussen's invariant, $s(K)$ and its relation to the slice genus.
$$|s(K)| \leq 2g^*(K).$$
\begin{claim}
$g^*(K_q) = 0$ and thus $s(K_q)=0$.
\end{claim}
We observe that there is a cobordism between $K_q$ and the two component unlink. We may resolve one crossing of $K_q$ to obtain a diagram of a link in the isotopy class of the two component unlink. Resolving the crossing amounts to adding in a band splitting the knot into two components. (See Figure \ref{cobordism}).
\begin{figure}
\caption{Cobordism between $K_q$ and the 2-component unlink}
\label{cobordism}
\end{figure}
We may then cap off the unlink end with 2 discs to obtain a disc bounding $K_q$. Therefore $K_q$ is a slice knot ($g^*(K_q)=0$). Therefore $|s(K_q)| \leq 0$ so $s(K_q)=0$.
We now complete the proof. Because $s(K_q) = 0$, the remaining copies of $\mathbb{Q}$ in the $E_{\infty}$ page of Lee's spectral sequence must be in quantum gradings $-1$ and $1$. The only copies of $\mathbb{Q}$ in those gradings in the $E_1$ term are in the zeroeth homological grading (Table \ref{spectral}). Therefore all differentials on these two copies of $\mathbb{Q}$ must be trivial so that they survive to the $E_{\infty}$ page. All other copies of $\mathbb{Q}$ on the $E_1$ page must not survive to the $E_{\infty}$ page.
\begin{table}[here]
$$
\begin{array}{|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
q+3& & & & & & & & & & & & \mathbb{Q}\\
\hline
q+2& & & & & & & & & & \mathbb{Q} & & \\
\hline
q+1& & & & & & & & & & \mathbb{Q} & & \\
\hline
q& & & & & & & & \mathbb{Q} & \mathbb{Q} & & & \\
\hline
q-1& & & & & & & \mathbb{Q} & & & & & \\
\hline
q-2& & & & & & & \mathbb{Q} & & & & & \\
\hline
q-3& & & & & \mathbb{Q} & & & & & & & \\
\hline
\vdots& & & & \vdots & \vdots & & & & & & & \\
\hline
2& & & & & & & & & & & & \\
\hline
1& & \mathbb{Q}^a& \mathbb{Q}^b& & & & & & & & & \\
\hline
0& \mathbb{Q} & \mathbb{Q}^{1+a} & \mathbb{Q}^b & & & & & & & & & \\
\hline
& \;\; -1 \;\;& \;\;\; 1 \;\;\; & \;\;\; 3 \;\;\;& \cdots & 1+ \tau & 3+\tau & 5+\tau & 7+\tau & 9+\tau & 11+\tau & 13+\tau & 15+\tau \\
\hline
\end{array}
$$
\caption{The $E_1$ page of the Spectral Sequence converging to $Kh'(K_q)$. Note that all empty boxes and boxes that are not shown are trivial. $\tau=2(q-4)$. The vertical axis corresponds to the homological grading in Khovanov homology while the horizontal axis corresponds to the filtration which corresponds to the quantum grading on the $E_1$ page.}
\label{spectral}
\end{table}
Recall that the $r^{th}$ differential goes up $1$ and over $r$, because of an indexing that differs from the standard indexing for a spectral sequence induced by a filtration. (See the note in section 3.1 for further explanation). Let $d_r^{p,q}$ denote the differential on the $r^{th}$ page from $E_r^{p,q}$ to $E_r^{p+1,q+r}$. We know that $d_r^{0,-1} = 0$ and $d_r^{0,1}$ acts trivially on one copy of $\mathbb{Q}$ for every $r$ based on the value of $s(K_q)$. Because the row corresponding to the second homological grading has only zeros, $d_r^{1,1}=0$, for all $r\geq 1$. Thus if $a\neq 0$, an additional copy of $\mathbb{Q}$ will survive in $E_{\infty}^{1,1}$, contradicting Lee's result that there can only be two copies of $\mathbb{Q}$ on the $E_{\infty}$ page. Therefore $a=0$ and $d_r^{0,1}=0$ for all $r\geq 1$. Because the row corresponding to the first homological grading has zeros in quantum gradings greater than $3$, $d_r^{0,3}=0$ for all $r\geq 1$. Therefore if $b\neq 0$, an additional copy of $\mathbb{Q}$ will survive in $E_{\infty}^{0,3}$, again contradicting Lee's result. Therefore $a=b=0$, and the Khovanov homology of $K_q$ is as stated in the theorem.
\end{proof}
}
\section{Coefficients in $\mathbb{Z}$}
{
While Lee's spectral sequence only applies to Khovanov homology with coefficients in $\mathbb{Q}$, the long exact sequence applies with arbitrary coefficients. We can compute a base case over $\mathbb{Z}$ using \cite{KAtlas} or \cite{KhoHo}. Then we obtain isomorphisms as in the previous section $\overline{Kh}^{i-1}(P(3,-3,q-1))\{1\} \cong \overline{Kh}^i(P(p,-p,q))$ for $i\neq 3,4$ which determines the values of $Kh(P(3,-3,q)$ in homological degree $r\neq 0,1$. The values in homological degree $r\geq q-3$ or $r<0$ are completely determined by the base case. We note that support of the Khovanov homology in these homological degrees lies in the two main diagonals. The values in homological degree $1<r<q-3$ are determined by $Kh^1(P(3,-3,q-r+1))$. Given our results over $\mathbb{Q}$ combined with the ``interesting'' part of the long exact sequence, we find that $Kh^0$ has a copy of $\mathbb{Z}$ in each of the $-1^{st}$ and $1^{st}$ gradings and $Kh^i$ is either only $0$ or has some torsion contained in the two main diagonals. We illustrate this process in tables \ref{Khov336} and \ref{Khov337}, indicating where the inductive step allows for one torsion factor with the marking $T_i, T_i'$. Here, $i\in \mathbb{N}$ indicates the value of $q$ for which that torsion factor first appeared in the Khovanov homology of the $(p,-p,q)$ pretzel knot. In fact $T_i, T_i'=0$ in these particular cases (verifiable by computation).
\begin{table}[here]
$$
\begin{array}{|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
9&&&&&&&&&&&\mathbb{Z}\\\hline
8&&&&&&&&&\mathbb{Z}&\mathbb{Z}_2&\\\hline
7&&&&&&&&&\mathbb{Z}&&\\\hline
6&&&&&&&\mathbb{Z}&\mathbb{Z}\oplus\mathbb{Z}_2&&&\\\hline
5&&&&&&\mathbb{Z}&\mathbb{Z}_2&&&&\\\hline
4&&&&&&\mathbb{Z}&&&&&\\\hline
3&&&&\mathbb{Z}&\mathbb{Z}_2&&&&&&\\\hline
2&&&&&&&&&&&\\\hline
1&&T_{6}&T_{6}'&&&&&&&&\\\hline
0&\mathbb{Z}&\mathbb{Z}&&&&&&&&&\\\hline
&-1&1&3&5&7&9&11&13&15&17&19\\\hline
\end{array}
$$
\caption{The Khovanov homology over $\mathbb{Z}$ of $P(3,-3,6)$. The homological grading is on the vertical axis, and the quantum grading on the horizontal axis to display the similarity with the spectral sequence page above. The unknown pieces are $T_i,T_i'$ as described above.}
\label{Khov336}
$$
\begin{array}{|c|c|c|c|c|c|c|c|c|c|c|c|c|}
\hline
10&&&&&&&&&&&&\mathbb{Z}\\\hline
9&&&&&&&&&&\mathbb{Z}&\mathbb{Z}_2&\\\hline
8&&&&&&&&&&\mathbb{Z}&&\\\hline
7&&&&&&&&\mathbb{Z}&\mathbb{Z}\oplus\mathbb{Z}_2&&&\\\hline
6&&&&&&&\mathbb{Z}&\mathbb{Z}_2&&&&\\\hline
5&&&&&&&\mathbb{Z}&&&&&\\\hline
4&&&&&\mathbb{Z}&\mathbb{Z}_2&&&&&&\\\hline
3&&&&&&&&&&&&\\\hline
2&&&T_{6}&T_{6}'&&&&&&&&\\\hline
1&&T_{7}&T_{7}'&&&&&&&&&\\\hline
0&\mathbb{Z}&\mathbb{Z}&&&&&&&&&&\\\hline
&-1&1&3&5&7&9&11&13&15&17&19&21\\\hline
\end{array}
$$
\caption{The Khovanov homology over $\mathbb{Z}$ of $P(3,-3,7)$. The homological grading is on the vertical axis, and the quantum grading on the horizontal axis to display the similarity with the spectral sequence page above. The unknown pieces are $T_i,T_i'$ as described above.}
\label{Khov337}
\end{table}
Based on computations of the cases for small values of $q$, one expects that these torsion factors will all be zero.
\begin{conjecture}
There are no additional torsion factors appearing in the Khovanov homology for $P(p,-p,q)$ for $q>p+2$. In the notation of tables \ref{Khov336} and \ref{Khov337}, $T_i=T_i'=0$ for all $i$.
\end{conjecture}
Note that the rest of the Khovanov homology is fully determined. There are only two places where the long exact sequence allows new torsion to occur at each stage: in the first homological grading at the first and third quantum gradings and this torsion will persist only within the two diagonals, so the homology over $\mathbb{Z}$ coefficients remains thin.
\begin{corollary}
The $(3,-3,q)$ pretzel knots for $q\geq 4$ have thin Khovanov homology over $\mathbb{Z}$.
\end{corollary}
Combining this with Greene's result that these knots are not quasi-alternating \cite{Greene}, we find that we have an infinite class of non-quasi-alternating knots with thin Khovanov homology. We generalize this result in the next section to other odd values of $p$. Note that for even values of $p$, the Khovanov homology of the link can have torsion off the main diagonals and thus is not always thin.
}
\section{$(p,-p,q)$ Pretzel Knots}
{
We can make the same argument to compute the Khovanov homology of other $(p,-p,q)$ pretzel knots, providing we have a base case knot $B_p$ that satisfies the following conditions
\begin{enumerate}
\item $Kh^0(B_p)=\mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1}$
\item $Kh^{1}(B_p)=0$
\item $Kh^i(B_p)=0$ for $i<0$.
\end{enumerate}
(Alternatively we could have $Kh^{-1}=0$ and $Kh^i=0$ for $i>0$ which is the case for the mirror images of these knots).
The other nontrivial groups will determine the formula for the Khovanov homology of the $P(p,-p,q)$ knots. Namely, all $P(p,-p,q)$ knots with $q$ greater than or equal to the value of $q$ in the base case, will have $Kh^0=\mathbb{Q}sub{-1}\oplus \mathbb{Q}sub{1}$, then $Kh^i=0$ for $1\leq i \leq q+c$ where $c$ is some constant determined by the base case. The subsequent groups will be shifted versions of the higher nontrivial groups in the base case.
Homological thinness over $\mathbb{Z}$ coefficients will also follow in the same way as for $p=3$ from homological thinness of the base case over $\mathbb{Z}$, though additional torsion could theoretically show up within the main diagonals as in the previous case.
We have computationally verified that we have base cases satisfying the three required conditions for odd values $3\leq p \leq 15$. In particular the base case for these values of $p$ always occurs in the $P(p,-p,p+2)$ knot. (Recall that in the case $p=3$, the base case is $P(3,-3,5)$.) We suspect that in general the knot $P(p,-p,p+2)$ will provide the appropriate base case. Furthermore, after examining the first few cases, a clear pattern seems to arise. We can therefore extend the result slightly
\begin{theorem}
For $p=3,5,7,9,11,13,15$ and any $q\geq p+2$, the Khovanov homology for $P(p,-p,q)$ over rational coefficients is
\begin{eqnarray*}
Kh^0 & = & \mathbb{Q}sub{-1} \oplus \mathbb{Q}sub{1}\\
Kh^i & = & 0 \;\;\; (0<i\leq q-p-1)\\
Kh^{q-p} & = & \mathbb{Q}sub{3+2(q-p-2)} \\
Kh^{q-p+1} & = & \mathbb{Q}sub{7+2(q-p-2)} \\
Kh^{q-p-2+2i} & = & \mathbb{Q}sub{4i-1+2(q-p-2)}^{i} \oplus \mathbb{Q}sub{4i+1+2(q-p-2)}^{i-2} \;\;\;\; (1 < i \leq n)\\
Kh^{q-p-2+2i+1} & = & \mathbb{Q}sub{4i+1+2(q-p-2)}^{i-1} \oplus \mathbb{Q}sub{4i+3+2(q-p-2)}^i \;\;\;\; (1 < i \leq n)\\
Kh^{q-1} & = & \mathbb{Q}sub{2p+1+2(q-p-2)}^n \oplus \mathbb{Q}sub{2p+3+2(q-p-2)}^{n-1}\\
Kh^{q} & = & \mathbb{Q}sub{2p+3+2(q-p-2)}^n \oplus \mathbb{Q}sub{2p+5+2(q-p-2)}^n\\
Kh^{q-p+1} & = & \mathbb{Q}sub{2p+5+2(q-p-2)}^{n-1} \oplus \mathbb{Q}sub{2p+7+2(q-p-2)}^n\\
Kh^{q+p+1-2i} & = & \mathbb{Q}sub{4p+5-4i+2(q-p-2)}^i \oplus \mathbb{Q}sub{4p+7-4i+2(q-p-2)}^{i-1} \;\;\;\; (1<i\leq n)\\
Kh^{q+p+2-2i} & = & \mathbb{Q}sub{4p+7-4i+2(q-p-2)}^{i-2} \oplus \mathbb{Q}sub{4p+9-4i+2(q-p-2)}^i \;\;\;\; (1<i\leq n)\\
Kh^{q+p-1} & = & \mathbb{Q}sub{4p+1+2(q-p-2)}\\
Kh^{q+p} & = & \mathbb{Q}sub{4p+5+2(q-p-2)}
\end{eqnarray*}
where $n=(p-1)/2$.
\label{ppqthm}
\end{theorem}
\begin{conjecture}
The formula in theorem \ref{ppqthm} holds for $P(p,-p,q)$ for all odd values of $p\geq 17$ and $q\geq p+2$.
\end{conjecture}
When $p$ is even, $P(p,-p,q)$ is a two or three component link (depending on the parity of $q$) and the computation is considerably more complicated. There can be nontrivial homology outside of the main diagonals when $p$ is even so the Khovanov homology is not thin. For example, the Khovanov homology over $\mathbb{Z}$ of $P(2,-2,5)=L_{9n4}$, ($9^2_{43}$ in Rolfsen notation) has two copies of $\mathbb{Z}$ off the main diagonal. Because we can work easily only with odd values of $p$, establishing the base cases via induction on $p$ would require a way to jump from the computation of $P(p,-p,p+2)$ to $P(p+2,-p-2, p+4)$, where the total number of crossings increases by $6$. Thus a different method of induction than the long exact sequence used for theorem \ref{mainthm} would be required for the full generalization to arbitrary odd values of $p$.
}
\end{document} |
\begin{document}
\title{Bell-Type Quantum Field Theories}
\begin{abstract}
In \cite{BellBeables} John~S.~Bell proposed how to associate particle
trajectories with a lattice quantum field theory, yielding what can be
regarded as a $|\Psi|^2$-distributed Markov process on the appropriate
configuration space. A similar process can be defined in the
continuum, for more or less any regularized quantum field theory; such
processes we call Bell-type quantum field theories. We describe
methods for explicitly constructing these processes. These concern,
in addition to the definition of the Markov processes, the efficient
calculation of jump rates, how to obtain the process from the
processes corresponding to the free and interaction Hamiltonian alone,
and how to obtain the free process from the free Hamiltonian or,
alternatively, from the one-particle process by a construction
analogous to ``second quantization.'' As an example, we consider the
process for a second quantized Dirac field in an external
electromagnetic field.
\noindent PACS numbers:
03.65.Ta,
02.50.-r,
03.70.+k
\end{abstract}
\tableofcontents
\section{Introduction}
The aim of this paper is to present methods for constructing Bell-type
QFTs. The primary variables of Bell-type QFTs are the positions of
the particles. Bell suggested a dynamical law, governing the motion
of the particles, in which the Hamiltonian $H$ and the state vector
$\Psi$ determine certain jump rates \cite{BellBeables}. Since these
rates are in a sense the smallest choice possible, we call them the
\emph{minimal jump rates}. By construction, they preserve the
$|\Psi|^2$ distribution. We assume a well-defined Hamiltonian as
given; to achieve this, it is often necessary to introduce
cut-offs. We shall assume this has been done where needed. In cases
in which one has to choose between several possible position
observables, for example because of issues related to the
Newton--Wigner operator \cite{NewtonWigner,Haag}, we shall also assume
that a choice has been made.
Bell-type QFTs can also be regarded as extensions of Bohmian
mechanics. When one tries to incorporate particle creation and
annihilation into Bohmian mechanics, one is naturally lead to models
like the one we presented in \cite{crea1}. The quantum equilibrium
distribution, playing a central role in Bohmian mechanics, then more
or less dictates that creation of a particle occurs in a stochastic
manner---just as in Bell's model.
Bell-type QFTs have in common a good deal of mathematical structure,
which we will elucidate. The paper is organized as follows. In
Section 2 we introduce all the main ideas and reasonings; a
superficial reading should focus on this section. Some examples of
Bell-type QFTs are presented in Section 3. (Simple examples of minimal
jump rates can be found in \cite{crea2A}.) In Section 4 we describe
the construction of a process for the free Hamiltonian based on
``second quantization.'' In Section 5 we sketch the concept of the
``minimal process'' associated with a Hamiltonian $H$. Section 6
concerns some properties of Bell-type QFTs that derive from the
construction methods developed in this paper. In Section 7 we
conclude.
\section{Ingredients of Bell-Type Quantum Field Theories}
\label{sec:making}
\subsection{Review of Bohmian Mechanics and Equivariance}
Bohmian mechanics \cite{Bohm52,DGZ,Stanford} is a non-relativistic
theory about $N$ point particles moving in 3-space, according to which
the configuration $Q=({\boldsymbol Q}_1,\ldots,{\boldsymbol Q}_N)$ evolves according
to\footnote{ The masses $m_k$ of the particles have been absorbed in
the Riemann metric $g_{\mu\nu}$ on configuration space $\mathbb{R}^{3N}$,
$g_{ia,jb} = m_i \, \delta_{ij}\, \delta_{ab}$, $i,j=1\ldots N, \:
a,b=1,2,3$, and $\nabla$ is the gradient associated with $g_{\mu\nu}$,
i.e., $\nabla =(m_1^{-1}\nabla_{{\boldsymbol q}_1}, \dots,
m_N^{-1}\nabla_{{\boldsymbol q}_N})$.}
\begin{equation}\label{Bohm}
\frac{dQ}{dt} = v(Q)\,,\qquad
v=\hbar \, im \, \frac{\Psi^* \nabla\Psi} {\Psi^* \, \Psi}\,.
\end{equation}
$\Psi=\Psi_t(q)$ is the wave function, which
evolves according to the Schr\"odinger equation
\begin{equation}\label{Seq}
i\hbar\frac{\partial\Psi}{\partial t} = H \Psi\,,
\end{equation}
with
\begin{equation}\label{Hamil}
H= -\frac{\hbar^2}{2} \Delta + V
\end{equation}
for spinless particles, with $\Delta = \,\mathrm{div}\,\nabla$. For particles
with spin, $\Psi$ takes values in the appropriate spin space $\mathbb{C}^k$,
$V$ may be matrix valued, and numerator and denominator of
\eqref{Bohm} have to be understood as involving inner products in spin
space. The secret of the success of Bohmian mechanics in yielding the
predictions of standard quantum mechanics is the fact that the
configuration $Q_t$ is $|\Psi_t|^2$-distributed in configuration space
at all times $t$, provided that the initial configuration $Q_0$ (part
of the Cauchy data of the theory) is so distributed. This property,
called \emph{equivariance} in \cite{DGZ}, suffices for empirical
agreement between \emph{any} quantum theory (such as a QFT) and
\emph{any} version thereof with additional (often called ``hidden'')
variables $Q$, provided the outcomes of all experiments are registered
or recorded in these variables. That is why equivariance will be our
guide for obtaining the dynamics of the particles.
The equivariance of Bohmian mechanics follows immediately from comparing
the continuity equation for a probability distribution $\rho$
associated with (\ref{Bohm}),
\begin{equation}\label{master}
\frac{\partial \rho}{\partial t} = -\,\mathrm{div}\,(\rho v)\,,
\end{equation}
with the equation satisfied by $|\Psi|^2$ which follows from
(\ref{Seq}),
\begin{equation}\label{continuity1}
\frac{\partial |\Psi|^2}{\partial t}(q,t) = \frac{2}{\hbar} \, im
\, \Big[ \Psi^*(q,t)\, (H\Psi)(q,t) \Big]\,.
\end{equation}
In fact, it follows from (\ref{Hamil}) that
\begin{equation}\label{JJJ}
\frac{2}{\hbar} \, im \, \Big[ \Psi^*(q,t)\, (H\Psi)(q,t) \Big]=
-\,\mathrm{div}\,\Big[\hbar \, im \, \Psi^*(q,t) \nabla\Psi(q,t) \Big]
\end{equation}
so, recalling (\ref{Bohm}), one obtains that
\begin{equation}\label{continuity2}
\frac{\partial |\Psi|^2}{\partial t} = -\,\mathrm{div}\,(|\Psi|^2 v)\,,
\end{equation}
and hence that if $\rho_t=|\Psi_t|^2$ at some time $t$ then
$\rho_t=|\Psi_t|^2$ for \emph{all} times. Equivariance is an
expression of the compatibility between the Schr\"odinger evolution
for the wave function and the law, such as (\ref{Bohm}), governing the
motion of the actual configuration. In \cite{DGZ}, in which we were
concerned only with the Bohmian dynamics \eqref{Bohm}, we spoke of the
distribution $|\Psi|^2$ as being equivariant. Here we wish to find
processes for which we have equivariance, and we shall therefore speak
of equivariant processes and motions.
\subsection{Equivariant Markov Processes}
The study of example QFTs like that of \cite{crea1} has lead us to the
consideration of Markov processes as candidates for the equivariant
motion of the configuration $Q$ for Hamiltonians $H$ more general than
those of the form \eqref{Hamil}.
Consider a Markov process $Q_t$ on configuration space. The
transition probabilities are characterized by the \emph{backward
generator} $L_t$, a (time-dependent) linear operator acting on
functions $f$ on configuration space:
\begin{equation}\label{backgenerator}
L_t f(q) = \frac{d}{ds} \mathbb{E} (f(Q_{t+s})|Q_t = q)
\end{equation}
where $d/ds$ means the right derivative at $s=0$ and
$\mathbb{E}(\,\cdot\,|\,\cdot\,)$ denotes the conditional expectation.
Equivalently, the transition probabilities are characterized by the
\emph{forward generator} $\mathscr{L}_t$ (or, as we shall simply say,
\emph{generator}), which is also a linear operator but acts on
(signed) measures on the configuration space. Its defining property
is that for every process $Q_t$ with the given transition
probabilities, the distribution $\rho_t$ of $Q_t$ evolves according to
\begin{equation}\label{rhoL}
\frac{\partial \rho_t}{\partial t} = \mathscr{L}_t \rho_t\,.
\end{equation}
$\mathscr{L}_t$ is the dual of $L_t$ in the sense that
\begin{equation}\label{generatorduality}
\int f(q) \, \mathscr{L}_t \rho(dq) = \int L_t f(q) \, \rho(dq)\,.
\end{equation}
We will use both $L_t$ and $\mathscr{L}_t$, whichever is more
convenient. We will encounter several examples of generators in the
subsequent sections.
We can easily extend the notion of equivariance from deterministic to
Markov processes. Given the Markov transition probabilities, we say that
\emph{the $|\Psi|^2$ distribution is equivariant} if and only if for all
times $t$ and $t'$ with $t<t'$, a configuration $Q_t$ with distribution
$|\Psi_t|^2$ evolves, according to the transition probabilities, into a
configuration $Q_{t'}$ with distribution $|\Psi_{t'}|^2$. In this case,
we
also simply say that the transition probabilities are
\emph{equivariant}, without explicitly mentioning $|\Psi|^2$.
Equivariance
is equivalent to
\begin{equation}\label{genequivariance}
\mathscr{L}_t |\Psi_t|^2 = \frac{\partial |\Psi_t|^2}{\partial t}
\end{equation}
for all $t$. When \eqref{genequivariance} holds (for a fixed $t$) we
also say that $\mathscr{L}_t$ is an \emph{equivariant generator} (with
respect to $\Psi_t$ and $H$). Note that this definition of
equivariance agrees with the previous meaning for deterministic
processes.
We call a Markov process $Q$ \emph{equivariant} if and only if for every
$t$ the distribution $\rho_t$ of $Q_t$ equals $|\Psi_t|^2$. For this to
be
the case, equivariant transition probabilities are necessary but not
sufficient. (While for a Markov process $Q$ to have equivariant
transition probabilities amounts to the property that if $\rho_t =
|\Psi_t|^2$ for one time $t$, where $\rho_t$ denotes the distribution of
$Q_t$, then $\rho_{t'} = |\Psi_{t'}|^2$ for every $t'>t$, according to
our definition of an equivariant Markov process, in fact $\rho_t =
|\Psi_t|^2$ for all $t$.) However, for equivariant transition
probabilities there exists a unique equivariant Markov process.
The crucial idea for our construction of an equivariant Markov process
is to note that \eqref{continuity1} is completely general, and to find
a generator $\mathscr{L}_t$ such that the right hand side of
(\ref{continuity1}) can be read as the action of $\mathscr{L}$ on $\rho
= |\Psi|^2$,
\begin{equation}\label{mainequ}
\frac{2}{\hbar} \, im \, \Psi^* H\Psi = \mathscr{L} |\Psi|^2\,.
\end{equation}
We shall implement this idea beginning in Section \ref{sec:mini1},
after a review of jump processes and some general considerations. But
first we shall illustrate the idea with the familiar case of Bohmian
mechanics.
For $H$ of the form \eqref{Hamil}, we have (\ref{JJJ}) and hence that
\begin{equation}\label{mequ}
\frac{2}{\hbar} \, im \, \Psi^*H\Psi = -\,\mathrm{div}\,\left(\hbar \, im \,
\Psi^* \nabla\Psi \right) = -\,\mathrm{div}\,\left( |\Psi|^2 \hbar \, im \,
\frac{\Psi^* \nabla\Psi} {|\Psi|^2} \right) \,.
\end{equation}
Since the generator of the (deterministic) Markov process
corresponding to the dynamical system $dQ/dt=v(Q)$ given by a velocity
vector field $v$ is
\begin{equation}\label{dynamical}
\mathscr{L} \rho = -\,\mathrm{div}\,(\rho v)\,,
\end{equation}
we may recognize the last term of (\ref{mequ}) as $\mathscr{L}
|\Psi|^2$ with $\mathscr{L}$ the generator of the deterministic process
defined by \eqref{Bohm}. Thus, as is well known, Bohmian mechanics
arises as the natural equivariant process on configuration space
associated with $H$ and $\Psi$.
To be sure, Bohmian mechanics is not the only solution of
(\ref{mainequ}) for $H$ given by \eqref{Hamil}. Among the alternatives
are Nelson's stochastic mechanics \cite{stochmech} and other velocity
formulas \cite{Deotto}. However, Bohmian mechanics is the most natural
choice, the one most likely to be relevant to physics. It is, in fact,
the canonical choice, in the sense of minimal process which we shall
explain in Section \ref{sec:mini}.
\subsection{Equivariant Jump Processes}\label{sec:revjump}
Let $\mathcal{Q}$ denote the configuration space of the process,
whatever sort of space that may be (vector space, lattice, manifold,
etc.); mathematically speaking, we need that $\mathcal{Q}$ be a measurable
space. A (pure) jump process is a Markov process on $\mathcal{Q}$ for which
the only motion that occurs is via jumps. Given that $Q_t =q$, the
probability for a jump to $q'$, i.e., into the infinitesimal volume
$dq'$ about $q'$, by time $t+dt$ is $\sigma_t(dq'|q)\, dt$, where
$\sigma$ is called the \emph{jump rate}. In this notation, $\sigma$ is
a finite measure in the first variable; $\sigma(B|q)$ is the rate (the
probability per unit time) of jumping to somewhere in the set
$B\subseteq\mathcal{Q}$, given that the present location is $q$. The overall
jump rate is $\sigma(\mathcal{Q}|q)$.
It is often the case that $\mathcal{Q}$ is equipped with a distinguished
measure, which we shall denote by $dq$ or $dq'$, slightly abusing
notation. For example, if $\mathcal{Q} = \mathbb{R}^d$, $dq$ may be the Lebesgue
measure, or if $\mathcal{Q}$ is a Riemannian manifold, $dq$ may be the
Riemannian volume element. When $\sigma(\,\cdot\,|q)$ is absolutely
continuous relative to the distinguished measure, we also write
$\sigma(q'|q)\, dq'$ instead of $\sigma(dq'|q)$. Similarly, we
sometimes use the letter $\rho$ for denoting a measure and sometimes
the density of a measure, $\rho(dq) = \rho(q)\,dq$.
A jump first occurs when a random waiting time $T$ has elapsed, after
the
time $t_0$ at which the process was started or at which the most
recent previous jump has occurred. For purposes of simulating or
constructing the process, the destination $q'$ can be chosen at the
time of jumping, $t_0 + T$, with probability distribution
$\sigma_{t_0+T} (\mathcal{Q}|q)^{-1} \, \sigma_{t_0+T} (\,\cdot\,|q)$. In
case the overall jump rate is time-independent, $T$ is exponentially
distributed with mean $\sigma(\mathcal{Q}|q)^{-1}$. When the
rates are time-dependent---as they will typically be in what
follows---the waiting time remains such that
\[
\int_{t_0}^{t_0+T} \sigma_t(\mathcal{Q}|q) \, dt
\]
is exponentially distributed with mean 1, i.e., $T$ becomes
exponential after a suitable (time-dependent) rescaling of time. For
more details about jump processes, see \cite{Breiman}.
The generator of a pure jump process can be expressed in terms of the
rates:
\begin{equation}\label{continuity3}
\mathscr{L}_\sigma \rho(dq) = \int\limits_{q'\in\mathcal{Q}} \Big(
\sigma(dq|q') \rho(dq') - \sigma(dq'|q) \rho(dq) \Big)\,,
\end{equation}
a ``balance'' or ``master'' equation expressing $\partial
\rho/\partial t$ as the gain due to jumps to $dq$ minus the loss due
to jumps away from $q$.
We shall say that jump rates $\sigma$ are \emph{equivariant} if
$\mathscr{L}_\sigma$ is an equivariant generator. It is one of our goals
in
this paper to describe a general scheme for obtaining equivariant jump
rates. In Sections \ref{sec:mini1} and \ref{sec:mini2} we will explain
how
this leads us to formula \eqref{tranrates}.
\subsection{Process Additivity}\label{sec:introadd}
The Hamiltonian of a QFT usually comes as a sum, such as
\begin{equation}\label{Hsum}
H = H_0 + H_{I}
\end{equation}
with $H_0$ the free Hamiltonian and $H_{I}$ the interaction
Hamiltonian. If several particle species are involved, $H_0$ is itself
a sum containing one free Hamiltonian for each species. The left hand
side of (\ref{mainequ}), which should govern our choice of the
generator, is then also a sum,
\begin{equation}\label{Hsumgen}
\frac{2}{\hbar} \, im \, \Psi^* H_0 \Psi + \frac{2}{\hbar} \, im
\, \Psi^* H_{I} \Psi = \mathscr{L} |\Psi|^2\,.
\end{equation}
This opens the possibility of finding a generator $\mathscr{L}$ by
setting $\mathscr{L} = \mathscr{L}_0 + \mathscr{L}_{I}$, provided we
have generators $\mathscr{L}_0$ and $\mathscr{L}_{I}$
corresponding to $H_0$ and $H_{I}$ in the sense that
\begin{subequations}
\begin{align}
\frac{2}{\hbar} \, im \, \Psi^* H_0 \Psi
&= \mathscr{L}_0 |\Psi|^2 \\
\frac{2}{\hbar} \, im \, \Psi^* H_{I} \Psi
&= \mathscr{L}_{I} |\Psi|^2\,.
\end{align}
\end{subequations}
This feature of (\ref{mainequ}) we call \emph{process additivity}; it
is based on the fact that the left hand side of (\ref{mainequ}) is
linear in $H$. Note that the backward generator of the process with
forward generator $\mathscr{L}_0 + \mathscr{L}_{I}$ is $L_0 +
L_{I}$; thus forward and backward generators lead to the same
notion of process additivity, and to the same process corresponding to
$H_0 + H_{I}$. In many cases, as will be elaborated in Section
\ref{sec:free}, $H_0$ is based on an operator known from quantum
mechanics (e.g., the Dirac operator), in such a way that
$\mathscr{L}_0$ can be obtained from the appropriate Bohmian law of
motion. In Section \ref{sec:mini1} we will explain how
$\mathscr{L}_{I}$ can usually be taken as the generator of a jump
process.
Our proposal is to take seriously the process generated by $\mathscr{L}
= \mathscr{L}_0 + \mathscr{L}_{I}$ and regard it as the process
naturally associated with $H$. The bottom line is that process
additivity provides a \emph{method of constructing} a Bell-type
theory.
Obviously, the mathematical observation of process additivity (that
sums of generators define an equivariant process associated with sums
of Hamiltonians) applies not only to the splitting of $H$ into a free
and an interaction contribution, but to every case where $H$ is a sum.
And it seems that process additivity provides a physically very
reasonable process in every case where $H$ is naturally a sum, in fact
the most reasonable process: the one that should be considered
\emph{the} Bell-type process, defining \emph{the} Bell-type theory.
\subsection{What Added Processes May Look Like}
To get some feeling for what addition of generators, $\mathscr{L} =
\mathscr{L}_1 + \mathscr{L}_2$, means for the corresponding processes,
we consider some examples. First consider two deterministic processes
(on the same configuration space), having generators of the form
$\mathscr{L} \rho = -\,\mathrm{div}\,(\rho v)$. To add the generators obviously
means to add the velocity vector fields, $v=v_1 + v_2$, so the
resulting velocity is a superposition of two contributions.
Next consider a pure jump process. Since, according to
(\ref{continuity3}), the generator $\mathscr{L}$ is linear in $\sigma$,
adding generators means adding rates, $\sigma = \sigma_1 +
\sigma_2$. This is equivalent to saying there are two kinds of jumps:
if the present location is $q\in\mathcal{Q}$, with probability
$\sigma_1(\mathcal{Q}|q)\,dt$ the process performs a jump of the first type
within the next $dt$ time units, and with probability
$\sigma_2(\mathcal{Q}|q)\,dt$ a jump of the second type. That does not mean,
however, that one can decide from a given realization of the process
which jump was of which type.
Next suppose we add the generators of a deterministic and a jump
process,
\begin{equation}\label{continuity4}
\mathscr{L} \rho(q) = -\,\mathrm{div}\,(\rho v)(q) + \int\limits_{q'\in\mathcal{Q}}
\Big( \sigma(q|q')\, \rho(q') - \sigma(q'|q)\, \rho(q) \Big) dq'\,.
\end{equation}
This process moves with velocity $v(q)$ until it jumps to $q'$, where
it continues moving, with velocity $v(q')$. The jump rate may vary
with time in two ways: first because $\sigma$ may be time-dependent,
second because $\sigma$ may be position-dependent and $Q_t$ moves with
velocity $v$. One can easily understand (\ref{continuity4}) in terms
of gain or loss of probability density due to motion and jumps. So
this process is piecewise deterministic: although the temporal length
of the pieces (the intervals between two subsequent jumps) and the
starting points (the jump destinations) are random, given this data
the trajectory is determined.
The generator of the Wiener process in $\mathbb{R}^d$ is the Laplacian, and
to add to it the generator of a deterministic process means to
introduce a drift. Note that this is different from adding, in
$\mathbb{R}^d$, a Wiener process to a solution of the deterministic
process. In spaces like $\mathbb{R}^d$, where it so happens that one is
allowed to add locations, there is a danger of confusing addition of
generators with addition of realizations. Whenever we speak of adding
processes, it means we add generators.
To add generators of a diffusion and a pure jump process yields what
is often called a jump diffusion process, one making jumps with time-
and position-dependent rates and following a diffusion path in between.
Diffusion processes, however, will play almost no role in this paper.
\subsection{Integral Operators Correspond to Jump Processes}
\label{sec:mini1}
We now address the interaction part $H_{I}$ of the Hamiltonian
(\ref{Hsum}). In QFTs with cutoffs it is usually the case that
$H_{I}$ is an integral operator. For that reason, we shall in this
work focus on integral operators for $H_{I}$. We now point out why
the naturally associated process is a pure jump process. For short,
we will write $H$ rather than $H_{I}$ in this and the subsequent
section. For the time being, think of $\mathcal{Q}$ as $\mathbb{R}^d$ and of wave
functions as complex valued.
What characterizes jump processes versus continuous processes is that
some amount of probability that vanishes at $q\in\mathcal{Q}$ can reappear
in an entirely different region of configuration space, say at
$q'\in\mathcal{Q}$. This is manifest in the equation for $\partial
\rho/\partial t$, (\ref{continuity3}): the first term in the integrand
is the probability increase due to arriving jumps, the second the
decrease due to departing jumps, and the integration over $q'$
reflects that $q'$ can be anywhere in $\mathcal{Q}$. This suggests that
Hamiltonians for which the expression \eqref{continuity1} for
$\partial |\Psi|^2/\partial t$ is naturally an integral over $dq'$
correspond to pure jump processes. So when is the left hand side of
(\ref{mainequ}) an integral over $dq'$? When $H$ is an integral
operator, i.e., when $\sp{q}{H|q'}$ is not merely a formal symbol, but
represents an integral kernel that exists as a function or a measure and
satisfies
\begin{equation}
(H\Psi)(q) = \int dq'\,\sp{q}{H|q'}\, \Psi(q')\,.
\end{equation}
In this case, we should choose the jump rates in such a way that,
when $\rho = |\Psi|^2$,
\begin{equation}\label{la1}
\sigma(q|q') \,\rho(q') - \sigma(q'|q) \,\rho(q) = \frac{2}{\hbar}
\, im \, \Psi^*(q)\, \sp{q}{H|q'} \, \Psi(q') \,,
\end{equation}
and this suggests, since jump rates must be nonnegative (and the right
hand side of \eqref{la1} is anti-symmetric), that
\[
\sigma(q|q') \,\rho(q') = \Big[ \frac{2}{\hbar} \, im \,
\Psi^*(q)\, \sp{q}{H|q'} \, \Psi(q') \Big]^+
\]
(where $x^+$ denotes the positive part of $x\in\mathbb{R}$, that is, $x^+$ is
equal to $x $ for $x>0$ and is zero otherwise), or
\begin{equation}\label{mini1}
\sigma(q|q') = \frac{ \big[ (2/\hbar) \, im \, \Psi^*(q) \, \sp{q}
{H|q'} \, \Psi(q') \big]^+}{\Psi^*(q')\, \Psi(q')} .
\end{equation}
These rates are an instance of what we call the \emph{minimal jump
rates} associated with $H$ (and $\Psi$). The name comes from the fact
that they are actually the minimal possible values given (\ref{la1}),
as is expressed by the inequality \eqref{minimality} and will be
explained in detail in Section \ref{sec:mini4}. Minimality entails
that at any time $t$, one of the transitions $q_1 \to q_2$ or $q_2 \to
q_1$ is forbidden. We will call the process defined by the minimal
jump rates the \emph{minimal jump process} (associated with $H$).
In contrast to jump processes, continuous motion, as in Bohmian
mechanics, corresponds to such Hamiltonians that the formal matrix
elements $\sp{q}{H|q'}$ are nonzero only infinitesimally close to the
diagonal, and in particular to differential operators like the
Schr\"odinger Hamiltonian (\ref{Hamil}), which has matrix elements of
the type $\delta''(q-q') + V(q) \,\delta(q-q')$. We can summarize the
situation, as a rule of thumb, by the following table:
\begin{center}
\begin{tabular}{|r|l|}
\hline
A contribution to $H$ that is a \ldots & corresponds to \ldots\\\hline
integral operator & jumps\\
differential operator & deterministic continuous motion\\
multiplication operator & no motion ($\mathscr{L} = 0$)\\\hline
\end{tabular}
\end{center}
The minimal jump rates as given by (\ref{mini1}) have some nice
features. The possible jumps for this process correspond to the
nonvanishing matrix elements of $H$ (though, depending on the state
$\Psi$, even some of the jump rates corresponding to nonvanishing
matrix elements of $H$ might happen to vanish). Moreover, in their
dependence on the state $\Psi$, the jump rates $\sigma$ depend only
``locally'' upon $\Psi$: the jump rate for a given jump $q'\to q$
depends only on the values $\Psi(q')$ and $\Psi(q)$ corresponding to
the configurations linked by that jump. Discretizing $\mathbb{R}^3$ to a
lattice $\varepsilon \mathbb{Z}^3$, one can obtain Bohmian mechanics as a
limit $\varepsilon\to 0$ of minimal jump processes
\cite{Sudbery,Vink}, whereas greater-than-minimal jump rates lead to
Nelson's stochastic mechanics \cite{stochmech} and similar diffusions,
such as (\ref{diffusion}); see \cite{Vink,Guerra}. If the
Schr\"odinger operator \eqref{Hamil} is approximated in other ways by
operators corresponding to jump processes, e.g., by $H_\varepsilon =
e^{-\varepsilon H} H e^{-\varepsilon H}$, the minimal jump processes
presumably also converge to Bohmian mechanics.
We have reason to believe that there are lots of self-adjoint
operators which do not correspond to any stochastic process that can
be regarded as defined, in any reasonable sense, by
\eqref{mini1}.\footnote{Consider, for example, $H = p \cos p$ where
$p$ is the one-dimensional momentum operator $-i\hbar
\partial/\partial q$. Its formal kernel $\sp{q}{H|q'}$ is the
distribution $-\frac{i}{2} \delta'(q-q'-1) - \frac{i}{2}
\delta'(q-q'+1)$, for which \eqref{mini1} would not have a meaning.
{}From a sequence of smooth functions converging to this distribution,
one can obtain a sequence of jump processes with rates \eqref{mini1}:
the jumps occur very frequently, and are by amounts of approximately
$\pm 1$. A limiting process, however, does not exist.} But such
operators seem never to occur in QFT. (The Klein--Gordon operator
$\sqrt{m^2 c^4 - \hbar^2 c^2 \Delta}$ does seem to have a process,
but it requires a more detailed discussion which will be provided in a
forthcoming work \cite{klein2}.)
\subsection{Minimal Jump Rates}
\label{sec:mini2}
The reasoning of the previous section applies to a far more general
setting than just considered: to arbitrary configuration spaces
$\mathcal{Q}$ and ``generalized observables''---POVMs---defining, for our
purposes, what the ``position representation'' is. We now present this
more general reasoning, which leads to one of the main formulas of
this paper, (\ref{tranrates}).
The process we construct relies on the following ingredients from QFT:
\begin{enumerate}
\item A Hilbert space $\mathscr{H}$ with scalar product $\sp{\Psi}
{\Phi}$.
\item A unitary one-parameter group $U_t$ in $\mathscr{H}$ with
Hamiltonian $H$,
\[
U_t = e^{-\frac{i}{\hbar}tH}\,,
\]
so that in the Schr\"odinger picture the state $\Psi$ evolves
according to
\begin{equation}
i\hbar\frac{d\Psi_t}{dt} = H\Psi_t\,.
\end{equation}
$U_t$ could be part of a representation of the Poincar\'e group.
\item A positive-operator-valued measure (POVM) ${P}(dq)$ on $\mathcal{Q}$
acting on $\mathscr{H}$, so that the probability that the system in the
state $\Psi$ is localized in $dq$ at time $t$ is
\begin{equation} \label{mis}
\mathbb{P}_t(dq)= \sp{\Psi_t}{{P}(dq)| \Psi_t} \,.
\end{equation}
\end{enumerate}
Mathematically, a POVM ${P}$ on $\mathcal{Q}$ is a countably additive set
function (``measure''), defined on measurable subsets of $\mathcal{Q}$, with
values in the positive (bounded self-adjoint) operators on (a Hilbert
space) $\mathscr{H}$, such that ${P}(\mathcal{Q})$ is the identity
operator.\footnote{The countable additivity is to be understood as in
the sense of the weak operator topology. This in fact implies that
countable additivity also holds in the strong topology.} Physically,
for our purposes, ${P}(\,\cdot\,)$ represents the (generalized)
position observable, with values in $\mathcal{Q}$. The notion of POVM
generalizes the more familiar situation of observables given by a set
of commuting self-adjoint operators, corresponding, by means of the
spectral theorem, to a projection-valued measure (PVM): the case where
the positive operators are projection operators. A typical example is
the single Dirac particle: the position operators on
$L^2(\mathbb{R}^3,\mathbb{C}^4)$ induce there a natural PVM ${P}_0(\,\cdot\,)$:
for any Borel set $B\subseteq \mathbb{R}^3$, ${P}_0(B)$ is the projection
to the subspace of functions that vanish outside $B$, or,
equivalently, ${P}_0(B)\Psi(q) = \mathbf{1}_B(q) \, \Psi(q)$ with $\mathbf{1}_B$ the
indicator function of the set $B$. Thus, $\sp{\Psi} {{P}_0 (dq)|
\Psi} = |\Psi(q)|^2 dq$. When one considers as Hilbert space
$\mathscr{H}$ only the subspace of positive energy states, however, the
localization probability is given by ${P}(\,\cdot\,) = P_+
{P}_0(\,\cdot\,) I$ with $P_+:L^2(\mathbb{R}^3,\mathbb{C}^4) \to \mathscr{H}$ the
projection and $I:\mathscr{H} \to L^2(\mathbb{R}^3,\mathbb{C}^4)$ the inclusion
mapping. Since $P_+$ does not commute with most of the operators
${P}_0(B)$, ${P} (\,\cdot\,)$ is no longer a PVM but a genuine
POVM\footnote{This situation is indeed more general than it may seem.
By a theorem of Naimark \cite[p.~142]{Davies}, every POVM ${P}
(\,\cdot\,)$ acting on $\mathscr{H}$ is of the form ${P}(\,\cdot\,) =
P_+ {P}_0 (\,\cdot\,) P_+$ where ${P}_0$ is a PVM on a larger
Hilbert space, and $P_+$ the projection to
$\mathscr{H}$. \label{ft:Naimark}} and consequently does not correspond
to any position operator---although it remains true (for $\Psi$ in the
positive energy subspace) that $\sp{\Psi}{{P}(dq)| \Psi} =
|\Psi(q)|^2 dq$. That is why in QFT, the position observable is
indeed more often a POVM than a PVM. POVMs are also relevant to
photons \cite{ali,kraus}. In one approach, the photon wave function
$\Psi: \mathbb{R}^3 \to \mathbb{C}^3$ is subject to the constraint condition
$\nabla \cdot \Psi = \partial_1 \Psi_1 + \partial_2 \Psi_2 +
\partial_3 \Psi_3 =0$. Thus, the physical Hilbert space $\mathscr{H}$ is
the (closure of the) subspace of $L^2(\mathbb{R}^3,\mathbb{C}^3)$ defined by this
constraint, and the natural PVM on $L^2(\mathbb{R}^3,\mathbb{C}^3)$ gives rise, by
projection, to a POVM on $\mathscr{H}$. So much for POVMs. Let us get
back to the construction of a jump process.
The goal is to specify equivariant jump rates $\sigma = \sigma^{\Psi, H,
{P}}$, i.e., such rates that
\begin{equation}\label{equirates}
\mathscr{L}_\sigma \mathbb{P} = \frac{d\mathbb{P}}{dt} \,.
\end{equation}
To this end, one may take the following steps:
\begin{enumerate}
\item Note that
\begin{equation}\label{dPdt}
\frac{d\mathbb{P}_t(dq)}{dt} = \frac{2}{\hbar} \, im \,
\sp{\Psi_t}{{P}(dq) H| \Psi_t}\,.
\end{equation}
\item Insert the resolution of the identity $I =
\int\limits_{q'\in\mathcal{Q}}
{P}(dq')$ and obtain
\begin{equation}\label{dPdtJ}
\frac{d\mathbb{P}_t(dq)}{dt} =\int\limits_{q'\in\mathcal{Q}}
\mathbb{J}_t(dq,dq') \,,
\end{equation}
where
\begin{equation}\label{Jdef}
\mathbb{J}_t(dq,dq') = \frac{2}{\hbar} \,
im \, \sp{\Psi_t}{{P}(dq)H {P}(dq')| \Psi_t} \,.
\end{equation}
\item Observe that $\mathbb{J}$ is anti-symmetric, $\mathbb{J}(dq',dq) = -
\mathbb{J}(dq,dq')$. Thus, since $x = x^+ - (-x)^+$,
\[
\mathbb{J}(dq,dq') = \left[(2/\hbar) \, im \, \sp{\Psi} {{P}(dq) H
{P}(dq') |\Psi}\right]^+ - \left[(2/\hbar)\, im \, \sp{\Psi}
{{P}(dq') H {P}(dq) |\Psi}\right]^+ .
\]
\item Multiply and divide both terms by $\mathbb{P}(\,\cdot\,)$,
obtaining that
\begin{eqnarray*}
\int\limits_{q'\in\mathcal{Q}} \mathbb{J}(dq,dq') = \int\limits_{q'\in\mathcal{Q}}
\bigg( \hspace{-3ex} &&
\frac{[(2/\hbar) \, im \, \sp{\Psi} {{P}(dq) H {P}(dq')|
\Psi}]^+}
{\sp{\Psi}{{P}(dq')| \Psi}} \mathbb{P}(dq') -
\\-&&
\frac{[(2/\hbar) \, im \, \sp{\Psi} {{P}(dq') H {P}(dq)| \Psi}
]^+} {\sp{\Psi} {{P}(dq)| \Psi}} \mathbb{P}(dq) \bigg) \,.
\end{eqnarray*}
\item By comparison with \eqref{continuity3}, recognize the right hand
side of the above equation as $\mathscr{L}_\sigma \mathbb{P}$, with
$\mathscr{L}_\sigma$ the generator of a Markov jump process with jump
rates
\begin{equation} \label{tranrates}
\sigma(dq|q')= \frac{[(2/\hbar) \, im \, \sp{\Psi} {{P}(dq) H
{P}(dq')| \Psi}]^+}{\sp{\Psi}{{P}(dq')| \Psi}}\,,
\end{equation}
which we call the \emph{minimal jump rates}.
\end{enumerate}
Mathematically, the right hand side of this formula as a function of
$q'$ must be understood as a density (Radon--Nikod{\'y}m derivative)
of one measure relative to another.\footnote{Quite aside from the
previous discussion, it is perhaps worth noting that there are not so
many expressions in $H,{P}$, and $\Psi$ that would meet the formal
criteria for being a candidate for the jump rate. Since the only
connection between abstract Hilbert space and configuration space is
by ${P}$, which leads to \emph{measures} on $\mathcal{Q}$, the only way to
obtain a \emph{function} on $\mathcal{Q}$ is to form a Radon--Nikod{\'y}m
quotient of two measures, $\sigma(q') = A(dq')/B(dq')$. Since $\sigma$
must be a measure-valued function, the numerator should be a
bi-measure (a measure in each of two variables). The simplest measure
one can form from $H,{P}$, and $\Psi$ is $\sp{\Psi}{{P}(dq)|\Psi}$;
the simplest bi-measures are $\sp{\Psi}{H^{n_1} {P}(dq) H^{n_2}
{P}(dq') H^{n_3}| \Psi}$. Jump rates must have dimension 1/time, and
the only object at hand having this dimension is $H/\hbar$. Thus, $H$
can appear only once in the numerator. The expressions $\sp{\Psi}{H
{P}(dq) {P}(dq')| \Psi}$ and $\sp{\Psi}{{P}(dq) {P}(dq') H| \Psi}$
are no good because for PVMs ${P}$ they are concentrated on the
diagonal of $\mathcal{Q} \times \mathcal{Q}$ and hence do not lead to nontrivial
jumps. Let us write $\mu$ for the measure-valued function we have
arrived at:
\[
\mu (dq,q') = \frac{1}{\hbar} \frac{\sp{\Psi}{{P}(dq) H {P}(dq')
| \Psi}} {\sp{\Psi}{{P}(dq')|\Psi}}\,.
\]
This provides \emph{complex} measures, whereas $\sigma(\,\cdot\,|q')$
must be a positive real measure. There are not many ways of forming
a positive real measure from a complex one, the essential ones being
\[
|\mu|, |\mathrm{Re} \, \mu|, |im \, \mu|, (\mathrm{Re} \, \mu)^+, (\mathrm{Re} \, \mu)^-,
(im \, \mu)^+, (im \, \mu)^-
\]
times a numerical constant $\lambda>0$. One could of course form
additional expressions at the price of higher complexity.
This has gotten us already pretty close to the minimal rates
\eqref{tranrates}, which correspond to $\sigma = 2(im \, \mu)^+$. To
proceed further, we might demand the absence of unnecessary jumps;
that means that at any time, either the jump $q_1 \to q_2$ or $q_2 \to
q_1$ is forbidden; this leaves only $\lambda (im \,
\mu)^\pm$. Moreover, $2 (im \, \mu)^+$ is the only expression in the
list that has Bohmian mechanics as a limiting case or implies
equivariance. Furthermore it corresponds to the natural guess
\eqref{Ltilde} for a backward generator, discussed in Section
\ref{sec:mini}.} The plus symbol denotes the positive part of a signed
measure; it can also be understood as applying the plus function, $x^+
= \max (x,0)$, to the density, if it exists, of the numerator.
To sum up, we have argued that with $H$ and $\Psi$ is naturally
associated a Markov jump process $Q_t$ whose marginal distributions
coincide at all times by construction with the quantum probability
measure, $\rho_t(\,\cdot\,) = \mathbb{P}_t(\,\cdot\,)$, so that $Q_t$ is
an equivariant Markov process.
In Section~4 of \cite{crea2A}, we establish precise conditions on
$H,{P}$, and $\Psi$ under which the jump rates \eqref{tranrates} are
well-defined and finite $\mathbb{P}$-almost everywhere, and prove that
in this case the rates are equivariant, as suggested by the steps 1-5
above. It is perhaps worth remarking at this point that any $H$ can be
approximated by Hamiltonians $H_n$ (namely Hilbert--Schmidt operators)
for which the rates \eqref{tranrates} are always (for all $\Psi$)
well-defined and equivariant \cite{crea2A}. Concerning this, see also
the end of Section \ref{sec:mini}.
\subsection{Process Associated with the Free Hamiltonian}
\label{sec:free}
We now address the free Hamiltonian $H_0$ of a QFT. We describe the
process naturally associated with $H_0$, when this is the second
quantized Schr\"odinger or Dirac operator. We will treat more general
free Hamiltonians in the next section. We shall consider here only
Hamiltonians for one type of particle.
We first define the configuration space $\mathcal{Q}$. Let us write
$\mathcal{Q}^{(1)}$ (``one-particle configuration space'') for physical
space; this is typically, but not necessarily, $\mathbb{R}^3$. The space
$\mathcal{Q}$ in which the ``free process'' takes place is the configuration
space for a variable number of identical particles; we call it $\Gamma
\mathcal{Q}^{(1)}$. It can be defined as the space of all finite
subsets-with-multiplicities of $\mathcal{Q}^{(1)}$. A
set-with-multiplicities consists of a set and, for each element $x$ of
the set, a positive integer, called the multiplicity of $x$. The
number of particles in a configuration $q$ is the sum of its
multiplicities, $\#q$. Such configurations describe several identical
particles, some of which may be located at the same position in
space. Equivalently, one could say that $\Gamma \mathcal{Q}^{(1)}$ is the
set of all mappings $n:\mathcal{Q}^{(1)} \to \mathbb{N}\cup\{0\}$ (meaning the
number of particles at a given location) such that
\[
\sum_{{\boldsymbol q} \in \mathcal{Q}^{(1)}} n({\boldsymbol q} ) < \infty\,.
\]
Another equivalent definition is the set of all finite nonnegative
measures $n(\,\cdot\,)$ on $\mathcal{Q}^{(1)}$ that assume only integer
values; the meaning of $n(R)$ is the number of particles in the region
$R$ of physical space. Finally, one can define
\[
\Gamma \mathcal{Q}^{(1)} = \bigcup_{n=0}^\infty \mathcal{Q}^{(n)} \mbox{ where }
\mathcal{Q}^{(n)} = (\mathcal{Q}^{(1)})^n/\mbox{permutations}.
\]
A related space, for which we write $\Gamma_{\!\neq} \mathcal{Q}^{(1)}$, is the space
of all finite subsets of $\mathcal{Q}^{(1)}$; it is contained in $\Gamma
\mathcal{Q}^{(1)}$, after obvious identifications. In fact, $\Gamma_{\!\neq}
\mathcal{Q}^{(1)} = \Gamma \mathcal{Q}^{(1)} \setminus \Delta$, where $\Delta$ is
the set of coincidence configurations, i.e., those having two or more
particles at the same position. $\Gamma_{\!\neq} \mathcal{Q}^{(1)}$ is the union of
the spaces ${\mathcal{Q}}^{(n)}_{\neq}$ for $n=0,1,2, \ldots$, where
${\mathcal{Q}}^{(n)}_{\neq}$ is the space of subsets of $\mathcal{Q}^{(1)}$ with
$n$ elements.
For $\mathcal{Q}^{(1)} = \mathbb{R}^d$, the $n$-particle sector
${\mathcal{Q}}^{(n)}_{\neq}$ is a manifold of dimension $nd$ (see
\cite{identical} for a discussion of Bohmian mechanics on this
manifold). If $d\geq 2$, the set $\Delta$ of coincidence
configurations has codimension $\geq 2$ and thus can usually be
ignored. We can then replace $\Gamma \mathbb{R}^d$ by the somewhat simpler
space $\Gamma_{\!\neq} \mathbb{R}^d$.
The position POVM ${P}^{(1)}$ on $\mathcal{Q}^{(1)}$ (acting on the
one-particle Hilbert space) naturally leads to a POVM we call $\Gamma
{P}^{(1)}$ on $\mathcal{Q} = \Gamma \mathcal{Q}^{(1)}$, acting on Fock space (see
Section \ref{sec:GammaPOVM} for the definition).\footnote{The
coincidence configurations form a null set, $\Gamma {P}^{(1)}(\Delta)
=0$, when $\mathcal{Q}^{(1)}$ is a continuum, or, more precisely, when
${P}^{(1)}$ is nonatomic as a measure.} Since a configuration from
$\Gamma(\mathbb{R}^3)$ defines the number of particles and their positions,
the name ``position observable'' for ${P} = \Gamma {P}^{(1)}$
stretches the meaning of ``position'' somewhat: it now also
encompasses the number of particles.
We now give a description of the free process associated with the
second-quantized Schr\"odinger operator; it arises from Bohmian
mechanics. Fock space $\mathscr{H} = \mathscr{F}$ is a direct sum
\begin{equation}\label{fockspace}
\mathscr{F}= \bigoplus_{n=0}^{\infty} \mathscr{F}^{(n)} ,
\end{equation}
where $\mathscr{F}^{(n)}$ is the $n$-particle Hilbert space. $\mathscr{F}^{(n)}$
is the subspace of symmetric (for bosons) or anti-symmetric (for
fermions) functions in $L^2 (\mathbb{R}^{3n}, (\mathbb{C}^{2s+1})^{\otimes n})$
for spin-$s$ particles. Thus, $\Psi \in \mathscr{F}$ can be decomposed into
a sequence $\Psi = \left( \Psi^{(0)}, \Psi^{(1)}, \ldots, \Psi^{(n)},
\ldots \right)$, the $n$-th member $\Psi^{(n)}$ being an $n$-particle
wave function, the wave function representing the $n$-particle sector
of the quantum state vector. The obvious way to obtain a process on
$\mathcal{Q} = \Gamma \mathbb{R}^3$ is to let the configuration $Q(t)$, containing
$N = \#Q(t)$ particles, move according to the $N$-particle version of
Bohm's law (\ref{Bohm}), guided by $\Psi^{(N)}$.\footnote{As defined,
configurations are unordered, whereas we have written Bohm's law
\eqref{Bohm} for ordered configurations. Thanks to the
(anti\nobreakdash-)symmetry of the wave function, however, all
orderings will lead to the same particle motion. For more about such
considerations, see our forthcoming work \cite{identical}.} This is
indeed an equivariant process since $H_0$ has a block diagonal form
with respect to the decomposition (\ref{fockspace}),
\[
H_0 = \bigoplus_{n=0}^\infty H_0^{(n)}\,,
\]
and $H_0^{(n)}$ is just a Schr\"odinger operator for $n$
noninteracting particles, for which, as we already know, Bohmian
mechanics is equivariant. We used a very similar process in
\cite{crea1} (the only difference being that particles were numbered
in \cite{crea1}).
Similarly, if $H_0$ is the second quantized Dirac operator, we let a
configuration $Q$ with $N$ particles move according to the usual
$N$-particle Bohm--Dirac law \cite[p.~274]{BH}
\begin{equation}\label{BohmDirac}
\frac{dQ}{dt} = c\frac{\Psi^*(Q) \, \alpha_{N} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)}
\end{equation}
where $c$ denotes the speed of light and $\alpha_{N} = ({\boldsymbol \alpha}^{(1)},
\ldots, {\boldsymbol \alpha}^{(N)})$ with ${\boldsymbol \alpha}^{(k)}$ acting on the spin index
of the $k$-th particle.
\subsection{Other Approaches to the Free Process}
\label{sec:free2}
We will give below a general velocity formula, applicable to a wider
class of free Hamiltonians. Alternatively, we can provide a free
process for any $H_0$ if we are given an equivariant process for the
one-particle Hamiltonian $H^{(1)}$. This is based on the particular
mathematical structure of $H_0$, which can be expressed by saying it
arises from a one-particle Hamiltonian $H^{(1)}$ by applying a
``second quantization functor $\Gamma$'' \cite{RS}. That is, there is
an algorithm (in a bosonic or fermionic version) for forming, from a
one-particle Hilbert space $\mathscr{H}^{(1)}$ and a one-particle
Hamiltonian $H^{(1)}$, a Fock space $\mathscr{F} = \Gamma\mathscr{H}^{(1)}$ and
free Hamiltonian $H_0 = \Gamma H^{(1)}$. And parallel to this
``second quantization'' algorithm, there is an algorithm for the
canonical construction, from a given equivariant one-particle Markov
process $Q^{(1)}_t$, of a process we call $\Gamma Q^{(1)}_t$ that
takes place in $\mathcal{Q} = \Gamma \mathcal{Q}^{(1)}$ and is equivariant with
respect to $H_0$. This algorithm may be called the ``second
quantization'' of a Markov process.
The algorithm is described in Section \ref{sec:Gamma}. What the
algorithm does is essentially to construct an $n$-particle version of
$Q^{(1)}_t$ for every $n$, and finally combine these by means of a
random particle number $N = N(t) = \# Q(t)$ which is constant under
the free process, parallel to the fact that the particle number
operator is conserved by $H_0$. We note further that the process
$\Gamma Q^{(1)}_t$ is deterministic if $Q^{(1)}_t$ is. If we take the
one-particle process to be Bohmian mechanics or the Bohm--Dirac
motion, the algorithm reproduces the processes described in the
previous section.
The algorithm leaves us with the task of finding a suitable
one-particle law, which we do not address in this paper. For some
Hamiltonians, such as the Dirac operator, this is immediate, for
others it is rather nontrivial, or even unsolved. The Klein--Gordon
operator $\sqrt{m^2c^4 - \hbar^2c^2\Delta}$ will be discussed in
forthcoming work \cite{klein2}, and for a study of photons see
\cite{photon}.
When $H_0$ is made of differential operators of up to second order
(which includes of course the Schr\"odinger and Dirac operators),
there is another way to characterize the process associated with
$H_0$, a way which allows a particularly succinct description of the
process and a particularly direct derivation and construction. In
fact, we give a formula for its backward generator $L_0$, or
alternatively the velocity (or the forward generator $\mathscr{L}_0$),
in terms of $H_0,{P}$, and $\Psi$.
We begin by defining, for any $H,{P}$, and $\Psi$, an operator
$L$ acting on functions $f:\mathcal{Q} \to \mathbb{R}$, which may or may
not be the backward generator of a process, by
\begin{equation}\label{LH}
Lf(q) = \mathrm{Re} \frac{\sp{\Psi} {{P}(dq) \hat{L} \hat{f} |\Psi}}
{\sp{\Psi} {{P}(dq)|\Psi}} = \mathrm{Re} \frac{\sp{\Psi} {{P}(dq)
\frac{i}{\hbar} [H,\hat{f}] |\Psi}} {\sp{\Psi} {{P}(dq) |\Psi}}.
\end{equation}
where $[\;,\,]$ means the commutator,
\begin{equation}\label{hatf}
\hat{f} = \int\limits_{q \in \mathcal{Q}} f(q) \, {P}(dq)\,,
\end{equation}
and $\hat{L}$ is the ``generator'' of the (Heisenberg) time evolution of
the operator $\hat{f}$,
\begin{equation}\label{hatLdef}
\hat{L}\hat{f} = \frac{d}{d\tau} e^{i H \tau/\hbar} \, \hat{f} \,
e^{-i H \tau/\hbar} \Big|_{\tau =0} = \tfrac{i}{\hbar}
[H,\hat{f}] \,.
\end{equation}
(If ${P}$ is a PVM, then $\hat{f} = f(\hat{q})$, where $\hat{q}$ is
the configuration operator.) \eqref{LH} could be guessed in the
following way: since $Lf$ is in a certain sense, see
\eqref{backgenerator}, the time derivative of $f$, it might be
expected to be related to $\hat{L} \hat{f}$, which is in a certain
sense, see \eqref{hatLdef}, the time derivative of $\hat{f}$. As a
way of turning the operator $\hat{L} \hat{f}$ into a function $Lf(q)$,
the middle term in \eqref{LH} is an obvious possibility. Note that
this way of arriving at \eqref{LH} does not make use of equivariance;
for another way that does, see Section \ref{sec:freeflow}.
The formula for the forward generator equivalent to \eqref{LH} reads
\begin{equation}\label{genH}
\mathscr{L} \rho(dq) = \mathrm{Re} \, \sp{\Psi}{\widehat{\tfrac{d\rho}
{d\mathbb{P}}}\, \tfrac{i}{\hbar} [H, {P} (dq)] |\Psi},
\end{equation}
as follows from \eqref{generatorduality}.
Whenever $L$ is indeed a backward generator, we call it the
\emph{minimal free (backward) generator} associated with $\Psi, H$,
and ${P}$. (The name is based on the concept of minimal process as
explained in Section \ref{sec:mini}.) Then the corresponding process
is equivariant (see Section \ref{sec:freeflow}). This is the case if
(and, there is reason to expect, \emph{only if}) ${P}$ is a PVM and
$H$ is a differential operator of up to second order in the position
representation, in which ${P}$ is diagonal. In that case, the
process is deterministic, and the backward generator has the form $L =
v \cdot \nabla$ where $v$ is the velocity vector field; thus,
\eqref{LH} directly specifies the velocity, in the form of a
first-order differential operator $v \cdot \nabla$. In case $H$ is
the $N$-particle Schr\"odinger operator with or without spin,
\eqref{LH} yields the Bohmian velocity \eqref{Bohm}, and if $H$ is the
Dirac operator, the Bohm--Dirac velocity \eqref{BohmDirac}. To sum
up, in some cases definition \eqref{LH} leads to just the right
backward generator.
To return to our starting point: if the one-particle generator
$\mathscr{L}^{(1)}$ arises from the one-particle Hamiltonian $H^{(1)}$
by \eqref{genH}, then \eqref{genH} also holds between the free generator
$\mathscr{L}_0 = \Gamma \mathscr{L}^{(1)}$ and the free Hamiltonian $H_0
= \Gamma H^{(1)}$. (See Section \ref{sec:freeflow} for details.) In
other words, \eqref{LH} is compatible with the ``second quantization''
algorithm. Thus, in relevant cases \eqref{LH} allows a direct
definition of the free process in terms of $H_0$, just as
\eqref{tranrates} directly defines, in terms of $H_{I}$, the jump
rates.
A relevant point is that the ``second quantization'' of a differential
operator is again a differential operator, in a suitable sense, and
has the same order. Note also that \eqref{LH}, when applied to the
second quantized Schr\"odinger or Dirac Hamiltonian, defines the same
vector field on $\Gamma(\mathbb{R}^3)$ as described in the previous section.
\subsection{Bell-Type QFT}
We briefly summarize what we have obtained. A Bell-type QFT is about
particles moving in physical 3-space; their number and positions are
represented by a point $Q_t$ in configuration space $\mathcal{Q}$. Provided
physical space is $\mathbb{R}^3$, $\mathcal{Q}$ is usually $\Gamma \mathbb{R}^3$ or a
Cartesian product of several such spaces, each factor representing a
different particle species. $Q_t$ follows a Markov process in
$\mathcal{Q}$, which is governed by a state vector $\Psi$ in a suitable
Hilbert space $\mathscr{H}$. $\mathscr{H}$ is related to $\mathcal{Q}$ by means
of a PVM or POVM ${P}$. $\Psi$ undergoes a unitary evolution with
Hamiltonian $H$. The process $Q_t$ usually consists of deterministic
continuous trajectories interrupted by stochastic jumps; more
generally, it arises by process additivity (i.e., by adding
generators) from a free process associated with $H_0$ and a jump
process associated with $H_{I}$. The jump rates are given by
\eqref{tranrates} for $H= H_{I}$. The free process arises from
Bohmian mechanics, or a suitable analogue, by a construction that can
be formalized as the ``second quantization'' of a one-particle Markov
process; when appropriate, it is defined directly by \eqref{LH}. The
process $Q_t$ is equivariant, i.e., $\sp{\Psi_t} {{P}(dq) |\Psi_t}$
distributed.
Examples of Bell-type QFTs can be found in \cite{BellBeables,crea1}
and in Section~\ref{sec:example}. It is our contention that,
essentially, there is a unique Bell-type version of every regularized
QFT. We have to postpone, however, the discussion of operators of the
Klein--Gordon type. We also have to assume that the QFT provides us
with the POVM ${P}(\,\cdot\,)$; this is related to an ongoing
discussion in the literature \cite{NewtonWigner,kraus,Haag}
concerning the right position operator.
\subsection{More on Identical Particles}\label{sec:identical}
The $n$-particle sector of the configuration space (without
coincidence configurations) of identical particles
$\Gamma_{\!\neq}(\mathbb{R}^3)$ is the manifold of $n$-point subsets of
$\mathbb{R}^3$; let $\mathcal{Q}$ be this manifold. The most common way of
describing the quantum state of $n$ fermions is by an anti-symmetric
(square-integrable) wave function $\Psi$ on $\hat\mathcal{Q} := \mathbb{R}^{3n}$;
let $\mathscr{H}$ be the space of such functions. Whereas for bosons
$\Psi$ could be viewed as a function on $\mathcal{Q}$, for fermions $\Psi$
is not a function on $\mathcal{Q}$.
Nonetheless, the configuration observable still corresponds to a PVM
${P}$ on $\mathcal{Q}$: for $B \subseteq \mathcal{Q}$, we set ${P}(B)
\Psi({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n) = \Psi({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n)$ if $\{{\boldsymbol q}_1,
\ldots, {\boldsymbol q}_n\} \in B$ and zero otherwise. In other words, ${P}(B)$
is multiplication by the indicator function of $\pi^{-1}(B)$
where $\pi$ is the obvious projection mapping $\hat\mathcal{Q}
\setminus \Delta \to \mathcal{Q}$, with $\Delta$ the set of coincidence
configurations.
To obtain other useful expressions for this PVM, we introduce the
formal kets $|\hat{q} \rangle$ for $\hat{q} \in \hat\mathcal{Q}$ (to be
treated like elements of $L^2(\hat\mathcal{Q})$), the anti-symmetrization
operator $S$ (i.e., the projection $L^2(\hat\mathcal{Q}) \to \mathscr{H}$), the
normalized anti-symmetrizer\footnote{The name means this: since $S$ is
a projection, $S \Psi$ is usually not a unit vector when $\Psi$ is.
Whenever $\Psi \in L^2(\hat\mathcal{Q})$ is supported by a fundamental
domain of the permutation group, i.e., by a set $\Omega \subseteq
\hat\mathcal{Q}$ on which (the restriction of) $\pi$ is a bijection
to $\mathcal{Q}$, the norm of $S\Psi$ is $1/\sqrt{n!}$, so that $s\Psi$ is
again a unit vector.} $s= \sqrt{n!} \, S$, and the formal kets $|s
\hat{q}\rangle := s|\hat{q} \rangle$ (to be treated like elements of
$\mathscr{H}$). The $|\hat{q} \rangle$ and $|s\hat{q} \rangle$ are
normalized in the sense that
\[
\sp{\hat{q}} {\hat{q}'} = \delta(\hat{q} - \hat{q}') \text{ and }
\sp{s\hat{q}} {s\hat{q}'} = (-1)^{\varrho(\hat{q},\hat{q}')} \,
\delta(q-q'),
\]
where $q=\pi(\hat{q})$, $q'=\pi(\hat{q}')$,
$\varrho(\hat{q},\hat{q}')$ is the permutation that carries
$\hat{q}$ into $\hat{q}'$ given that $q=q'$, and $(-1)^\varrho$
is the sign of the permutation $\varrho$. Now we can write
\begin{equation}\label{idenpovm}
{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)} |\hat{q} \rangle
\langle \hat{q}| \, dq = n! \, S |\hat{q} \rangle \langle
\hat{q}| \, dq = |s\hat{q} \rangle \langle s\hat{q}| \, dq,
\end{equation}
where the sum is over the $n!$ ways of numbering the $n$
points in $q$; the last two terms actually do not depend on the choice
of $\hat{q} \in \pi^{-1}(q)$, the numbering of $q$.
The probability distribution arising from this PVM is
\begin{equation}\label{idenmeasure}
\mathbb{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)}
|\Psi(\hat{q})|^2 \, dq = n! \, |\Psi(\hat{q})|^2 \, dq =
|\sp{s\hat{q}}{\Psi}|^2 \, dq
\end{equation}
with arbitrary $\hat{q} \in \pi^{-1}(q)$.
There is a way of viewing fermion wave functions as being defined on
$\mathcal{Q}$, rather than $\mathbb{R}^{3n}$, by regarding them as cross-sections
of a particular 1-dimensional vector bundle over $\mathcal{Q}$. To this end,
define an $n!$-dimensional vector bundle $E$ by
\begin{equation}\label{idenEdef}
E_q := \bigoplus_{\hat{q} \in \pi^{-1}(q)} \mathbb{C}\,.
\end{equation}
Every function $\Psi:\mathbb{R}^{3n} \to \mathbb{C}$ naturally gives rise to a
cross-section $\Phi$ of $E$, defined by
\begin{equation}
\Phi(q) := \bigoplus_{\hat{q} \in \pi^{-1}(q)} \Psi(\hat{q})\,.
\end{equation}
The anti-symmetric functions form a 1-dimensional subbundle of $E$
(see also \cite{identical} for a discussion of this bundle).
\section{Application to Simple Models}
\label{sec:example}
In this section, we point out how the jump rates of the model in
\cite{crea1} are contained in \eqref{tranrates} and present a
full-fledged Bell-type QFT for the second-quantized Dirac equation in
an external electromagnetic field.
Further cut-off QFTs that may provide interesting examples of
Bell-type QFTs, worth a detailed discussion in a future work
\cite{crea4}, are the scalar self-interacting field (e.g., $\Phi^4$),
QED, and other gauge field theories. We have to postpone the treatment
of these theories because they require discussions lying outside the
scope of this paper, in particular a discussion of the position
representation of photon wave functions in QED, and, concerning
$\Phi^4$, of the appropriate probability current for the Klein--Gordon
equation.
\subsection{A Simple QFT}\label{sec:crea1}
We presented a simple example of a Bell-type QFT in \cite{crea1},
and we will now briefly point to the aspects of this model that are
relevant here. The model is based on one of the simplest possible QFTs
\cite[p.~339]{Schweber}.
The relevant configuration space $\mathcal{Q}$ for a QFT (with a single
particle species) is the configuration space of a variable number of
identical particles in $\mathbb{R}^3$, which is the set $\Gamma(\mathbb{R}^3)$,
or, ignoring the coincidence configurations (as they are exceptions),
the set $\Gamma_{\!\neq} (\mathbb{R}^3)$ of all finite subsets of
$\mathbb{R}^3$. The $n$-particle sector of this is a manifold of dimension
$3n$; this configuration space is thus a union of (disjoint) manifolds
of different dimensions. The relevant configuration space for a theory
with several particle species is the Cartesian product of several
copies of $\Gamma_{\!\neq} (\mathbb{R}^3)$. In the model of
\cite{crea1}, there are two particle species, a fermion and a boson,
and thus the configuration space is
\begin{equation}\label{conffermionboson}
\mathcal{Q} = \Gamma_{\!\neq} (\mathbb{R}^3) \times \Gamma_{\!\neq} (\mathbb{R}^3).
\end{equation}
We will denote configurations by $q=(x,y)$ with $x$ the configuration
of the fermions and $y$ the configuration of the bosons.
For simplicity, we replaced in \cite{crea1} the sectors of $\Gamma_{\!\neq}
(\mathbb{R}^3) \times \Gamma_{\!\neq} (\mathbb{R}^3)$, which are manifolds, by vector
spaces of the same dimension (by artificially numbering the
particles), and obtained the union
\begin{equation}\label{crea1conf}
\hat{\mathcal{Q}} = \bigcup_{n=0}^\infty (\mathbb{R}^3)^n \times
\bigcup_{m=0}^\infty (\mathbb{R}^3)^m \,,
\end{equation}
with $n$ the number of fermions and $m$ the number of bosons. Here,
however, we will use \eqref{conffermionboson} as the configuration
space, since we have already discussed the space $\Gamma_{\!\neq}
(\mathbb{R}^3)$. In comparison with \eqref{crea1conf}, this amounts to
(merely) ignoring the numbering of the particles.
$\mathscr{H}$ is the tensor product of a fermion Fock space and a boson
Fock space, and thus the subspace of wave functions in
$L^2(\hat{\mathcal{Q}})$ that are anti-symmetric in the fermion coordinates
and symmetric in the boson coordinates. Let $S$ denote the appropriate
symmetrization operator, i.e., the projection operator
$L^2(\hat{\mathcal{Q}}) \to \mathscr{H}$, and $s$ the normalized symmetrizer
\begin{equation}\label{sdef}
s\Psi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m) = \sqrt{n!\, m!} \,
S\Psi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m),
\end{equation}
i.e., $s = \sqrt{N! \, M!} \, S$ with $N$ and $M$ the fermion and
boson number operators, which commute with $S$ and with each other.
As in Section \ref{sec:identical}, we denote by $\pi$ the
projection mapping $\hat{\mathcal{Q}} \setminus \Delta \to \mathcal{Q}$,
$\pi({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m) = (\{{\boldsymbol x}_1,
\ldots,{\boldsymbol x}_n\}, \{{\boldsymbol y}_1, \ldots, {\boldsymbol y}_m\})$. The configuration PVM
${P}(B)$ on $\mathcal{Q}$ is multiplication by
$\mathbf{1}_{\pi^{-1}(B)}$, which can be understood as acting on
$\mathscr{H}$, though it is defined on $L^2(\hat{\mathcal{Q}})$, since it is
permutation invariant and thus maps $\mathscr{H}$ to itself. We utilize
again the formal kets $|\hat{q}\rangle$ where $\hat{q} \in \hat{\mathcal{Q}}
\setminus \Delta$ is a numbered configuration, for which we also write
$\hat{q} = (\hat{x},\hat{y}) = ({\boldsymbol x}_1, \ldots, {\boldsymbol x}_n,{\boldsymbol y}_1, \ldots,
{\boldsymbol y}_m)$. We also use the symmetrized and normalized kets $|s\hat{q}
\rangle = s|\hat{q} \rangle$. As in \eqref{idenpovm}, we can write
\begin{equation}\label{crea1povm}
{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)} |\hat{q} \rangle
\langle \hat{q}| \, dq = n!\, m! \, S |\hat{q} \rangle \langle
\hat{q}| \, dq = |s\hat{q} \rangle \langle s\hat{q}| \, dq
\end{equation}
with arbitrary $\hat{q} \in \pi^{-1}(q)$. For the probability
distribution, we thus have, as in \eqref{idenmeasure},
\begin{equation}\label{crea1measure}
\mathbb{P}(dq) = \sum_{\hat{q} \in \pi^{-1}(q)}
|\Psi(\hat{q})|^2 \, dq = n!\, m! \, |\Psi(\hat{q})|^2 \, dq =
|\sp{s\hat{q}}{\Psi}|^2 \, dq
\end{equation}
with arbitrary $\hat{q} \in \pi^{-1}(q)$.
The free Hamiltonian is the second quantized Schr\"odinger operator
(with zero potential), associated with the free process described in
Section~\ref{sec:free}. The interaction Hamiltonian is defined by
\begin{equation}\label{HIdef}
H_{I} = \int d^3{\boldsymbol x} \, \psi^\dag({\boldsymbol x})\, (a^\dag_\varphi({\boldsymbol x}) +
a_{\varphi}({\boldsymbol x}))\, \psi({\boldsymbol x})
\end{equation}
with $\psi^\dag({\boldsymbol x})$ the creation operators (in position
representation), acting on the \emph{fermion} Fock space, and
$a^\dag_\varphi({\boldsymbol x})$ the creation operators (in position
representation), acting on the \emph{boson} Fock space, regularized
through convolution with an $L^2$ function $\varphi:\mathbb{R}^3 \to \mathbb{R}$.
$H_{I}$ has a kernel; we will now obtain a formula for it, see
\eqref{crea1kernel} below. The $|s\hat{q} \rangle$ are connected to
the creation operators according to
\begin{equation}\label{shatqpsia}
|s\hat{q}\rangle = \psi^\dag({\boldsymbol x}_n) \cdots
\psi^\dag({\boldsymbol x}_1) a^\dag({\boldsymbol y}_m) \cdots a^\dag({\boldsymbol y}_1) |0\rangle\,,
\end{equation}
where $|0\rangle \in \mathscr{H}$ denotes the vacuum state. A relevant
fact is that the creation and annihilation operators
$\psi^\dag,\psi,a^\dag$ and $a$ possess kernels. Using the canonical
(anti\nobreakdash-)commutation relations for $\psi$ and $a$, one obtains
from
\eqref{shatqpsia} the following formulas for the kernels of
$\psi({\boldsymbol r})$ and $a({\boldsymbol r})$, ${\boldsymbol r} \in \mathbb{R}^3$:
\begin{align}
\sp{s\hat{q}}{\psi({\boldsymbol r})|s\hat{q}'} &= \delta_{n,n'-1} \,
\delta_{m,m'} \,
\delta^{3n'}(x \cup {\boldsymbol r} -x') \, (-1)^{\varrho((\hat{x},
{\boldsymbol r}),\hat{x}')} \, \delta^{3m}(y-y') \label{psikernel} \\
\sp{s\hat{q}}{a({\boldsymbol r})|s\hat{q}'} &= \delta_{n,n'} \,
\delta_{m,m'-1} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \,
\delta^{3m'}(y \cup {\boldsymbol r} - y') \label{akernel}
\end{align}
where $(x,y) = q = \pi(\hat{q})$, and $\varrho
(\hat{x},\hat{x}')$ denotes the permutation that carries $\hat{x}$ to
$\hat{x}'$ given that $x=x'$. The corresponding formulas for
$\psi^\dag$ and $a^\dag$ can be obtained by exchanging $\hat{q}$ and
$\hat{q}'$ on the right hand sides of \eqref{psikernel} and
\eqref{akernel}. For the smeared-out operator $a_\varphi({\boldsymbol r})$, we
obtain
\begin{equation}\label{aprofilekernel}
\sp{s\hat{q}}{a_\varphi({\boldsymbol r})|s\hat{q}'} = \delta_{n,n'} \,
\delta_{m,m'-1} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum_{{\boldsymbol y}' \in y'}
\delta^{3m}(y- y'\setminus {\boldsymbol y}') \, \varphi({\boldsymbol y}' - {\boldsymbol r})
\end{equation}
We make use of the resolution of the identity
\begin{equation}\label{resolution}
I = \int\limits_{\mathcal{Q}} dq \, |s\hat{q} \rangle \langle
s\hat{q}|\,.
\end{equation}
Inserting \eqref{resolution} twice into \eqref{HIdef} and exploiting
\eqref{psikernel} and \eqref{aprofilekernel}, we find
\begin{equation}\label{crea1kernel}
\begin{split}
\sp{s\hat{q}} {H_{I}| s\hat{q}'} &= \delta_{n,n'} \,
\delta_{m-1,m'} \, \delta^{3n}(x-x') \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum_{{\boldsymbol y} \in y} \delta^{3m'}
(y \setminus {\boldsymbol y} - y') \sum_{{\boldsymbol x} \in x} \varphi({\boldsymbol y} - {\boldsymbol x}) \: \\
&+ \delta_{n,n'} \, \delta_{m'-1,m} \,
\delta^{3n}(x-x') \, (-1)^{\varrho(\hat{x},\hat{x}')}
\sum_{{\boldsymbol y}' \in y'} \delta^{3m} (y - y' \setminus {\boldsymbol y}') \sum_{{\boldsymbol x} \in
x} \varphi({\boldsymbol y}' - {\boldsymbol x})\,.
\end{split}
\end{equation}
By \eqref{crea1povm}, the jump rates \eqref{tranrates} are
\begin{equation}
\sigma(q|q') = \frac{\Big[\tfrac{2}{\hbar} \, im \,
\sp{\Psi}{s\hat{q}} \sp{s\hat{q}}{H_{I}| s\hat{q}'}
\sp{s\hat{q}'}{\Psi}
\Big]^+} {\sp{\Psi}{s\hat{q}'} \sp{s\hat{q}'}{\Psi}} \,.
\end{equation}
More explicitly, we obtain from \eqref{crea1kernel} the rates
\begin{equation}\label{crea1rates}
\begin{split}
\sigma(q|q') &= \delta_{nn'} \,\delta_{m-1,m'} \,\delta^{3n}(x-x')
\sum_{{\boldsymbol y} \in y} \delta^{3m'}(y\setminus {\boldsymbol y}-y') \,
\sigma_{\mathrm{crea}}(q'\cup {\boldsymbol y}|q') \: \\
&+\delta_{nn'}\,\delta_{m,m'-1} \, \delta^{3n}(x-x') \sum_{{\boldsymbol y}' \in
y'} \delta^{3m}(y - y'\setminus {\boldsymbol y}') \, \sigma_{\mathrm{ann}}(q'\setminus
{\boldsymbol y}'|q')
\end{split}
\end{equation}
with
\begin{subequations}
\begin{align}
\sigma_{\mathrm{crea}}(q'\cup {\boldsymbol y}|q')&= \frac{2 \sqrt{m'+1}}{\hbar} \,
\frac{\Big[ im \, \Psi^*(\hat{q}) \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}-{\boldsymbol x}') \, \Psi(\hat{q}')\Big]^+}{ \Psi^*(\hat{q}') \,
\Psi(\hat{q}')} \label{crea1crearate} \\
\sigma_{\mathrm{ann}}(q'\setminus {\boldsymbol y}'|q')&= \frac{2} {\hbar \sqrt{m'}}
\,\frac{\Big[im \, \Psi^*(\hat{q}) \,
(-1)^{\varrho(\hat{x},\hat{x}')} \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}'-{\boldsymbol x}') \, \Psi(\hat{q}') \Big]^+}{ \Psi^*(\hat{q}') \,
\Psi(\hat{q}')} , \label{crea1annrate}
\end{align}
\end{subequations}
for arbitrary $\hat{q}' \in \pi^{-1}(q')$ and $\hat{q} \in
\pi^{-1}(q)$ with $q=(x',y'\cup{\boldsymbol y})$ respectively $q=(x',y'
\setminus {\boldsymbol y}')$. (Note that a sum sign can be drawn out of the plus
function if the terms have disjoint supports.)
Equation \eqref{crea1rates} is worth looking at closely: One can read
off that the only possible jumps are $(x',y') \to (x',y' \cup {\boldsymbol y})$,
creation of a boson, and $(x',y') \to (x',y' \setminus {\boldsymbol y}')$,
annihilation of a boson. In particular, while one particle is created
or annihilated, the other particles do not move. The process that we
considered in \cite{crea1} consists of pieces of Bohmian trajectories
interrupted by jumps with rates \eqref{crea1rates}; the process is
thus an example of the jump rate formula \eqref{tranrates}, and an
example of combining jumps and Bohmian motion by means of process
additivity.
The example shows how, for other QFTs, the jump rates
\eqref{tranrates} can be applied to relevant interaction Hamiltonians:
If $H_{I}$ is, in the position representation, a polynomial in the
creation and annihilation operators, then it possesses a kernel on the
relevant configuration space. A cut-off (implemented here by smearing
out the creation and annihilation operators) needs to be introduced to
make $H_{I}$ a well-defined operator on $L^2$.
If, in some QFT, the particle number operator is not conserved, jumps
between the sectors of configuration space are inevitable for an
equivariant process. And, indeed, when $H_{I}$ does not commute
with the particle number operator (as is usually the case), jumps can
occur that change the number of particles. Often, $H_{I}$ contains
\emph{only} off-diagonal terms with respect to the particle number;
then every jump will change the particle number. This is precisely
what happens in the model of \cite{crea1}.
\subsection{Efficient Calculation of Rates in the Previous Example}
\label{sec:efficient}
We would like to give another, refined way of calculating the explicit
jump rates \eqref{crea1rates} from the definition \eqref{HIdef} of
$H_{I}$. The calculation above is rather cumbersome, partly
\emph{because} of all the $\delta$'s. It is also striking that only
very few transitions $q' \to q$ are actually possible, which suggests
that it is unnecessary to write down a formula for the kernel
$\sp{q}{H_{I}|q'}$ valid for all pairs $q,q'$. Rather than writing
down all the $\delta$ terms as in \eqref{crea1rates}, it is easier
to specify the possible transitions $q' \to q$ and to write down the
rates, such as \eqref{crea1crearate} and \eqref{crea1annrate}, only
for these transitions. Thus, for a more efficient calculation of the
rates, it is advisable to first determine the possible transitions,
and then we need keep track only of the corresponding kernel elements.
\subsubsection{A Diagram Notation}
To formulate this more efficient strategy, it is helpful to regard
$\Psi$
as a cross-section of a fiber bundle $E$ over the Riemannian manifold
$\mathcal{Q}$, or of a countable union $E = \bigcup_i E^{(i)}$ of bundles
$E^{(i)}$ over Riemannian manifolds $\mathcal{Q}^{(i)}$ with $\mathcal{Q} =
\bigcup_i
\mathcal{Q}^{(i)}$. (In the present example, with $\mathcal{Q}$ given by
\eqref{conffermionboson}, we take $i$ to be the pair $(n,m)$ of
particle
numbers, $\mathcal{Q}^{(n,m)}$ to be the $(n,m)$-particle sector, and
$E^{(i)}$
to be defined by \eqref{idenEdef} (with $\pi$ the natural
projection
from $\hat{\mathcal{Q}} \setminus \Delta$, with $\hat{\mathcal{Q}}$ given by
\eqref{crea1conf}, to $\mathcal{Q}$). The $\hat{q} \in \pi^{-1}(q)$ can
be
viewed as defining an orthonormal basis of $E_q$.)
A key element of the strategy is a special diagram notation for
operators. The operators we have in mind are $H_{I}$ and its
building blocks, the field operators. The strategy will start with the
diagrams for the field operators, and obtain from them a diagram for
$H_{I}$. The diagram will specify, for an operator $O$, what the
kernel of $O$ is, while leaving out parts of the kernel that are zero.
So
let us assume that $O$ has kernel $\sp{q}{O|q'}$, i.e., $(O\Psi)(q) =
\int \sp{q}{O|q'} \, \Psi(q') \, dq'$. The diagram
\begin{equation}\label{arrow}
q' \xrightarrow[O]{K(q',\lambda)} F(q',\lambda)
\end{equation}
means that the operator $O$ has \emph{kernel constructed from $F$ and
$K$},
\begin{equation}\label{kernelarrow}
\sp{q}{O|q'} = \int\limits_{\Lambda} d\lambda \, \delta\big(q-
F(q',\lambda)\big) \, K(q',\lambda),
\end{equation}
where $\lambda$ varies in some parameter space $\Lambda$, $F: \mathcal{Q}
\times \Lambda \to \mathcal{Q}$, and $K$ is a function (or distribution) of
$q'$ and $\lambda$ such that
$K(q',\lambda) : E_{q'} \to E_{F(q',\lambda)}$
is a $\mathbb{C}$-linear mapping.
The role of $\lambda$ is to parametrize the possible transitions;
e.g., for the boson creation \eqref{crea1crearate} in the previous
section, $\lambda$ would be the position ${\boldsymbol y}$ of the new boson, and
$\Lambda = \mathbb{R}^3$. The notation \eqref{arrow} does not explicitly
mention what $\Lambda$ and the measure $d\lambda$ are; this will
usually be clear from the context of the diagram. The measure
$d\lambda$ will usually be a uniform distribution over the parameter
space $\Lambda$, such as Lebesgue measure if $\Lambda = \mathbb{R}^d$ or the
counting measure if $\Lambda$ is finite or countably infinite. We may
also allow having a different $\Lambda_{q'}$ for every $q'$.
In words, \eqref{arrow} may be read as: ``According to $O$, the
possible transitions from $q'$ are to $F(q',\lambda)$, and are
associated with the amplitudes $K(q',\lambda)$.'' In fact, when $O =
H$, a jump from $q'$ can lead only to those $q$'s for which $q =
F(q',\lambda)$ for some value of $\lambda$, and the corresponding jump
rate \eqref{tranrates} is
\begin{equation}\label{arrowrates}
\sigma\big(F(q',\lambda)\big|q'\big) = \frac{[(2/\hbar) \, im \,
\Psi^*(F(q',\lambda)) \, K(q',\lambda) \, \Psi(q')]^+} {\Psi^*(q')
\, \Psi(q')},
\end{equation}
provided that for given $q'$, $F(q', \,\cdot\,)$ is an injective
mapping. Here, $\sigma(q|q')$ is the density of the measure
$\sigma(dq|q')$ with respect to the measure on $\mathcal{Q}$
\begin{equation}\label{arrowuniform}
\mu_{q'}(dq) = \int\limits_{\Lambda} d\lambda \, \delta\big(
q-F(q',\lambda) \big) \, dq,
\end{equation}
where $\delta(q-q_0) \, dq$ denotes the measure on $\mathcal{Q}$ with total
weight 1 concentrated at $q_0$. \eqref{arrowuniform}, the image of
$d\lambda$ under the map $F(q',\cdot\,)$, is concentrated on the set
$\{
F(q',\lambda) : \lambda \in \Lambda\}$ of possible destinations and
plays
the role of the ``uniform distribution'' over this set. In other words,
\eqref{arrowrates} is the rate of occurrence, with respect to
$d\lambda$,
of the transition corresponding to $\lambda$. (For the boson creation
rate \eqref{crea1crearate}, $\mu_{q'}(dq)$ turns out the Lebesgue
measure
in ${\boldsymbol y}$ on the subset $\{q' \cup {\boldsymbol y}: {\boldsymbol y} \in \mathbb{R}^3 \setminus q'\}
\subseteq \mathcal{Q}$.)
Given $O$, the choice of $\Lambda, F$, and $K$ is not unique. One
could always choose $\Lambda = \mathcal{Q}$, $F(q',q) = q$, and $K(q',q) =
\sp{q}{O|q'}$, which of course would mean to miss the point of this
notation. The case that $F$ and $K$ do not depend on a parameter
$\lambda$ is formally contained in the scheme \eqref{kernelarrow} by
taking $\Lambda$ to be a one-point set (and $d\lambda$ the counting
measure); in this case \eqref{kernelarrow} means
\begin{equation}\label{kernelarrownolambda}
\sp{q}{O|q'} = \delta(q-F(q')) \, K(q')\,.
\end{equation}
Conversely, whenever $\# \Lambda =1$, the dependence of $F$ and $K$ on
the parameter $\lambda$ is irrelevant.
A basic advantage of the notation \eqref{arrow}, compared to writing
down a formula for $\sp{q}{O|q'}$, is that many $\delta$ factors
become unnecessary. For example, if $O$ is multiplication by
$V(q)$, then ($\Lambda$ is a one-point set and) we have the diagram
\[
q' \xrightarrow[O]{V(q')} q'.
\]
\subsubsection{Operations With Diagrams}
For the product $O_2O_1$ of two operators given by diagrams, we have
the diagram
\begin{equation}\label{productarrow}
q' \xrightarrow[O_2O_1]{K_2(F_1(q',\lambda_1),\lambda_2) \,
K_1(q',\lambda_1)} F_2(F_1(q',\lambda_1),\lambda_2)
\end{equation}
with parameter space $\Lambda_1 \times \Lambda_2$, for which we
also write
\begin{equation}\label{concatarrow}
q' \xrightarrow[O_1]{K_1(q',\lambda_1)} F_1(q',\lambda_1)
\xrightarrow[O_2]{K_2(F_1(q',\lambda_1),\lambda_2)}
F_2(F_1(q',\lambda_1),\lambda_2).
\end{equation}
We thus define the concatenation of two diagrams by means of
the composition of the transition mappings and the product of the
amplitudes,
i.e., using obvious notation,
\begin{equation}\label{shortconcatarrow}
q_1 \xrightarrow{\alpha} q_2 \xrightarrow{\beta} q_3 \quad
\text{means} \quad q_1 \xrightarrow{\alpha\beta} q_3.
\end{equation}
Thus, multiplication of operators corresponds to concatenation of
diagrams.
For the sum $O_1 +O_2$ of two operators given by diagrams with the
same parameter space $\Lambda_1 = \Lambda _2 = \Lambda$ and the same
transition mapping $F_1(q', \lambda) = F_2(q',\lambda) =
F(q',\lambda)$, we have the diagram
\begin{equation}\label{sumarrow}
q' \xrightarrow[O_1 + O_2]{K_1(q',\lambda) + K_2(q',\lambda)}
F(q',\lambda).
\end{equation}
\subsubsection{Diagrams of Creation and Annihilation Operators}
We now write down diagrams for creation and annihilation operators.
In the case that $O = O({\boldsymbol r})$ arises from formally evaluating an
operator-valued distribution $O({\boldsymbol x})$ at ${\boldsymbol x} = {\boldsymbol r}$, the dependence
of $K(q',\lambda)$ on $\lambda$ is in the sense of distributions
rather than functions. More precisely, we have
\begin{equation}\label{Kdistribution}
K(q',\lambda) = D(q',\lambda) \, K_0(q',\lambda)
\end{equation}
where $D$ is a (real-valued) distribution on $\mathcal{Q} \times \Lambda$,
and $K_0$ a mapping-valued function such that for every $q'$ and
$\lambda$, $K_0(q',\lambda)$ is a linear mapping $E_{q'} \to
E_{F(q',\lambda)}$.
For $\psi^\dag({\boldsymbol r})$ and $\psi({\boldsymbol r})$, ${\boldsymbol r} \in \mathbb{R}^3$, we have
(recall that $x'$ is a finite subset of $\mathbb{R}^3$)
\begin{subequations}
\begin{align}
(x',y')& \xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \cup {\boldsymbol r},y')
\qquad\quad (\#\Lambda =1) \\ (x',y')& \xrightarrow[\psi({\boldsymbol r})]
{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x' \setminus {\boldsymbol x}',y') \qquad
(\Lambda = x', \lambda={\boldsymbol x}')
\end{align}
\end{subequations}
using linear mappings $\alpha_{\mathrm{f}}: E_{q'} \to E_{ (x' \cup {\boldsymbol r},y')}$
(``append a fermion'') and $\varepsilon_{\mathrm{f}}: E_{q'} \to E_{ (x'
\setminus {\boldsymbol x}',y')}$ (``erase a fermion''), which can be regarded as
the natural mappings between these fiber spaces. They are defined
through the following properties:
\begin{subequations}
\begin{align}
&\alpha_{\mathrm{f}} \Psi\text{ is appropriately symmetrized} \\
&\big(\alpha_{\mathrm{f}} \Psi\big)((\hat{x}', {\boldsymbol r}), \hat{y}') =
\frac{1}{\sqrt{n'
+1}} \, \Psi(\hat{x}',\hat{y}') \\
&\big(\varepsilon_{\mathrm{f}} \Psi\big) (\hat{x},\hat{y}') =
\sqrt{n'} \, \Psi((\hat{x}, {\boldsymbol x}'),\hat{y}')
\end{align}
\end{subequations}
where $\Psi \in E_{q'}$, and $\hat{x}$ is an arbitrary ordering of the
set $x=x' \setminus {\boldsymbol x}'$. (Recall that the set $\pi^{-1}(q')$ of
the possible orderings of $q'$ forms a basis of $E_{q'}$, so that
every ordering $(\hat{x}', \hat{y}') = \hat{q}' \in
\pi^{-1}(q')$ corresponds to a particular component of
$\Psi$. Thus, $((\hat{x}',{\boldsymbol r}),\hat{y}') \in \pi^{-1}(x' \cup {\boldsymbol r},
y')$ corresponds to a particular component in $E_{(x' \cup {\boldsymbol r}, y')}$.)
For the smeared-out creation and annihilation operators
$a_\varphi^\dag({\boldsymbol r})$ and $a_\varphi({\boldsymbol r})$, we have
\begin{subequations}
\begin{align}
(x',y') &\xrightarrow[a_\varphi^\dag({\boldsymbol r})] {\varphi({\boldsymbol y}-{\boldsymbol r}) \,
\alpha_{\mathrm{b}}} (x',y' \cup {\boldsymbol y}) \qquad (\Lambda = \mathbb{R}^3, \lambda =
{\boldsymbol y}) \\
(x',y') &\xrightarrow[a_\varphi({\boldsymbol r})] {\varphi({\boldsymbol y}'-{\boldsymbol r}) \,
\varepsilon_{\mathrm{b}}} (x', y' \setminus {\boldsymbol y}') \qquad (\Lambda = y',
\lambda = {\boldsymbol y}')
\end{align}
\end{subequations}
where $\alpha_{\mathrm{b}}$ (``append a boson'') and $\varepsilon_{\mathrm{b}}$
(``erase a boson'') are the analogous linear mappings relating
different spaces, $\alpha_{\mathrm{b}}: E_{q'} \to E_{(x',y' \cup {\boldsymbol y})}$ and
$\varepsilon_{\mathrm{b}}: E_{q'} \to E_{(x',y' \setminus {\boldsymbol y}')}$, defined by
the following properties:
\begin{subequations}
\begin{align}
&\alpha_{\mathrm{b}} \Psi\text{ is appropriately symmetrized} \\
&\big(\alpha_{\mathrm{b}} \Psi\big) (\hat{x}',(\hat{y}', {\boldsymbol y})) =
\frac{1}{\sqrt{m'+1}} \, \Psi(\hat{x}',\hat{y}') \\
&\big(\varepsilon_{\mathrm{b}} \Psi\big) (\hat{x}', \hat{y}) =
\sqrt{m'} \, \Psi(\hat{x}', (\hat{y}, {\boldsymbol y}')),
\end{align}
\end{subequations}
where $\hat{y}$ is an arbitrary ordering of the set $y=y' \setminus
{\boldsymbol y}'$, $\hat{x}'$ one of $x'$, $\hat{y}'$ one of $y'$, and $\Psi \in
E_{q'}$.
\subsubsection{Application of the Diagram Method}
Now let us apply the strategy to the example \eqref{HIdef} of the
previous section. For $\psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r}) \,
\psi({\boldsymbol r})$, we have the diagram
\[
q' \xrightarrow[\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x'
\setminus {\boldsymbol x}', y') \xrightarrow[a^\dag_\varphi({\boldsymbol r})]{\varphi({\boldsymbol y}
-{\boldsymbol r}) \, \alpha_{\mathrm{b}}} (x' \setminus {\boldsymbol x}',y' \cup {\boldsymbol y})
\xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup
{\boldsymbol r},y' \cup {\boldsymbol y})
\]
with $\Lambda = x' \times \mathbb{R}^3$. Using the concatenation rule
\eqref{shortconcatarrow}, we can write instead
\[
q' \xrightarrow[\psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r}) \,
\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \,\varphi({\boldsymbol y} -{\boldsymbol r}) \, \alpha_{\mathrm{f}}
\alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup {\boldsymbol r},y' \cup
{\boldsymbol y}).
\]
Integrating over $d{\boldsymbol r}$, we obtain, since $x' \setminus {\boldsymbol x}' \cup {\boldsymbol r}$
may
be replaced by $x'$, which is independent of ${\boldsymbol x}'$,
\begin{equation}\label{creaarrow}
q' \xrightarrow[\int d{\boldsymbol r} \, \psi^\dag({\boldsymbol r}) \, a^\dag_\varphi({\boldsymbol r})
\, \psi({\boldsymbol r})]{\sum\limits_{{\boldsymbol x}' \in x'}\varphi({\boldsymbol y} -{\boldsymbol x}') \,
\alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}}} (x',y' \cup {\boldsymbol y}),
\end{equation}
with $\Lambda = \mathbb{R}^3$. We have now taken care of one of two terms in
\eqref{HIdef}, involving
$a^\dag$ rather than $a$. {}From \eqref{creaarrow} we read off, without
a big calculation, that this term corresponds to jumps $(x',y') \to
(x',y'\cup {\boldsymbol y})$, or creation of a boson. The corresponding jump rate
is given by \eqref{arrowrates}, and reads here:
\begin{equation}\label{crea1arrowcrearate}
\sigma(x',y' \cup {\boldsymbol y}|q') = \frac{2}{\hbar} \, \frac{\Big[im \,
\Psi^*(x',y' \cup {\boldsymbol y}) \sum\limits_{{\boldsymbol x}' \in x'} \varphi({\boldsymbol y} -{\boldsymbol x}')
\, \alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}} \, \Psi(q')\Big]^+}
{\Psi^*(q') \, \Psi(q')}.
\end{equation}
This result agrees with \eqref{crea1crearate}.\footnote{Here is why:
First, $\Psi^*(q') \, \Psi(q') = n'! \, m'! \, \Psi^*(\hat{q}') \,
\Psi(\hat{q}')$ because the inner product in $E_{q'}$ involves
summation over all $\hat{q}' \in \pi^{-1}(q')$. Similarly, the
square bracket in the numerator of \eqref{crea1arrowcrearate}
involves the inner product of $E_{(x',y' \cup {\boldsymbol y}')}$, consisting of
$n'! \, (m'+1)!$ contributions. The numberings $\hat{q}$ and
$\hat{q}'$ in \eqref{crea1crearate} can be so chosen that $\hat{x} =
\hat{x}'$, ${\boldsymbol x}'$ gets the last place of $\hat{x}'$, and $\hat{y} =
\hat{y}' \cup {\boldsymbol y}'$; then $\varrho(\hat{x},\hat{x}')$ is trivial,
and $\alpha_{\mathrm{f}} \alpha_{\mathrm{b}} \varepsilon_{\mathrm{f}} \Psi(\hat{q}) =
(n')^{-1/2}
(m'+1)^{-1/2} (n')^{1/2} \Psi(\hat{q}')$. Thus, the square bracket in
\eqref{crea1arrowcrearate} is $n'! \, m'!\sqrt{m'+1}$ times the
square bracket in \eqref{crea1crearate}.}
We treat the term $\int d{\boldsymbol r} \, \psi^\dag({\boldsymbol r}) \, a_\varphi({\boldsymbol r}) \,
\psi({\boldsymbol r})$ in the same way: We begin with the diagram
\[
q' \xrightarrow[\psi({\boldsymbol r})]{\delta({\boldsymbol x}'-{\boldsymbol r}) \, \varepsilon_{\mathrm{f}}} (x'
\setminus {\boldsymbol x}', y') \xrightarrow[a_\varphi({\boldsymbol r})]{\varphi({\boldsymbol y}'
-{\boldsymbol r}) \, \varepsilon_{\mathrm{b}}} (x' \setminus {\boldsymbol x}',y' \setminus {\boldsymbol y}')
\xrightarrow[\psi^\dag({\boldsymbol r})] {\alpha_{\mathrm{f}}} (x' \setminus {\boldsymbol x}' \cup
{\boldsymbol r},y' \setminus {\boldsymbol y}')
\]
with $\Lambda = x' \times y'$. Then we integrate over $d{\boldsymbol r}$ and
obtain the associated jump rate
\begin{equation}\label{crea1arrowannrate}
\sigma(x',y' \setminus {\boldsymbol y}'|q') = \frac{2}{\hbar} \, \frac{\Big[im
\, \Psi^*(x',y' \setminus {\boldsymbol y}') \sum\limits_{{\boldsymbol x}' \in x'}
\varphi({\boldsymbol y}' -{\boldsymbol x}') \, \alpha_{\mathrm{f}} \varepsilon_{\mathrm{b}} \varepsilon_{\mathrm{f}}
\,
\Psi(q')\Big]^+} {\Psi^*(q') \, \Psi(q')},
\end{equation}
which agrees with \eqref{crea1annrate}. Finally, $H_{I}$ (the sum of
both contributions) corresponds according to \eqref{tranrates} to jumps
which, since the two contributions have no transitions $q' \to q$ in
common
(or, in other words, since their kernels have disjoint supports in
$\mathcal{Q}
\times \mathcal{Q}$), are \emph{either} $q' \to (x',y' \cup {\boldsymbol y})$, with rate
\eqref{crea1arrowcrearate}, \emph{or} $q' \to (x',y' \setminus {\boldsymbol y}')$,
with
rate \eqref{crea1arrowannrate}.
\subsection{Pair Creation in an External Field}
\label{sec:positron}
As our second example, we present the Bell-type version of a
reasonable and often used QFT of electrons and positrons, in which the
electromagnetic field is a background field \cite{Ruijsenaars}. The
Bell-type version exhibits pair creation and annihilation (in the
literal sense) and employs various notions we have introduced: process
additivity, the configuration space $\Gamma_{\!\neq}(\mathbb{R}^3)$ of a variable
number of identical particles, the free process, POVMs which are not
PVMs, and stochastic jumps.
\subsubsection{Fock Space and Hamiltonian}\label{sec:posiFock}
We consider the second quantized Dirac field in an electromagnetic
background field $A_\mu({\boldsymbol x},t)$. In terms of field operators, the
Hamiltonian reads
\begin{equation}\label{fieldhamil}
H= \int d^3 x :{\Phi^*}({\boldsymbol x})\big[-i c\hbar \boldsymbol{\alpha} \cdot
\nabla +\beta m c^2+ e(\boldsymbol{\alpha}\cdot\boldsymbol{A} +
A_0) \big]\Phi({\boldsymbol x}):\;\;,
\end{equation}
with colons denoting normal ordering. Note that $H$ is time-dependent
due to the time-dependence of $A_\mu({\boldsymbol x},t)$; more precisely,
$H_{I}$ is time-dependent while $H_0$ is fixed. As a consequence,
the relevant jump rate \eqref{tranrates} is now time-dependent in
three ways: through $H_{I}$, through $\Psi$, and through $q' =
Q_t$.
We quickly recall what the Hilbert space and the field operators are,
and specify what POVM we use. After that, we construct the associated
process.
The Hilbert space $L^2(\mathbb{R}^3,\mathbb{C}^4)$ of the Dirac equation is split
into the orthogonal sum $\mathscr{H}_+ \oplus \mathscr{H}_-$ of the positive
and negative energy subspaces of the \emph{free} Dirac operator,
\[
h_0= -i c\hbar {\boldsymbol \alpha} \cdot \nabla + \beta mc^2\,.
\]
The 1-electron Hilbert space $\mathscr{H}_\mathrm{e}$ and the 1-positron Hilbert
space $\mathscr{H}_\mathrm{p}$ are copies of $\mathscr{H}_+$, and the Fock space
$\mathscr{F}=\Gamma \mathscr{H}^{(1)}$ arises then from the one-particle
Hilbert space $\mathscr{H}^{(1)} = \mathscr{H}_\mathrm{e} \oplus \mathscr{H}_\mathrm{p}$ in
the usual manner: with the anti-symmetrization operator ${\mathrm{Anti}\,}$,
\begin{equation}\label{elplusposFock}
\mathscr{F}= \bigoplus_{N=0}^\infty {\mathrm{Anti}\,}
(({\mathscr{H}_\mathrm{e}}\oplus{\mathscr{H}_\mathrm{p}})^{\otimes N})\,,
\end{equation}
which can be naturally identified with
\begin{equation}\label{elFockposFock}
\mathscr{H} := \mathscr{F}_\mathrm{e} \otimes \mathscr{F}_\mathrm{p} =
\bigoplus_{n=0}^\infty {\mathrm{Anti}\,} (\mathscr{H}_\mathrm{e}^{\otimes n}) \otimes
\bigoplus_{{\widetilde{n}}=0}^\infty {\mathrm{Anti}\,} (\mathscr{H}_\mathrm{p}^{\otimes {\widetilde{n}}})\,.
\end{equation}
Since $\mathscr{H}_+ \subseteq L^2(\mathbb{R}^3,\mathbb{C}^4)$, $\mathscr{H}$ can be
understood as a subspace of
\begin{equation}\label{elHext}
\mathscr{H}_{\mathrm{ext}} := \bigoplus_{n=0}^\infty
{\mathrm{Anti}\,}(L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes n}) \otimes
\bigoplus_{{\widetilde{n}}=0}^\infty {\mathrm{Anti}\,}(L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes {\widetilde{n}}}) .
\end{equation}
We choose the POVM and configuration space in the way suggested by the
form \eqref{elFockposFock}, rather than \eqref{elplusposFock}:
\begin{equation}
\mathcal{Q} = \Gamma_{\!\neq}(\mathbb{R}^3) \times \Gamma_{\!\neq}(\mathbb{R}^3),
\end{equation}
where the first factor represents electrons and the second
positrons. (Recall from Section \ref{sec:free} that
$\Gamma_{\!\neq}(\mathbb{R}^3)$ denotes the space of all finite subsets
of $\mathbb{R}^3$. Another interesting possibility, suggested by the
representation (\ref{elplusposFock}), is to set $\mathcal{Q} =
\Gamma_{\!\neq}(\mathbb{R}^3)$. This would mean that, insofar as the
configuration is concerned, electrons and positrons are not
distinguished. However, we will not pursue this possibility here.)
The natural POVM ${P}$ (see Section~\ref{sec:GammaPOVM} and
Section~\ref{sec:identical}) can be expressed as an extension from
rectangular sets (the existence of such an extension is proved in
Section~4.4 of \cite{crea2A}):
\[
{P}(B_\mathrm{e} \times B_\mathrm{p}) = \Gamma{P}^{(1)}(B_\mathrm{e}) \otimes
\Gamma{P}^{(1)}(B_\mathrm{p})
\]
with ${P}^{(1)}$ the POVM on $\mathscr{H}_+$ that we considered before,
arising by projection from the natural PVM on $L^2(\mathbb{R}^3,\mathbb{C}^4)$.
Alternatively, ${P}$ can be viewed as arising, by projection to
$\mathscr{H}$, and from $\hat{\mathcal{Q}} = \bigcup_{n=0}^\infty (\mathbb{R}^3)^n
\times \bigcup_{{\widetilde{n}}=0}^\infty (\mathbb{R}^3)^{\widetilde{n}}$ to $\mathcal{Q}$, of the natural
PVM on $\hat{\mathcal{Q}}$ acting on $\mathscr{H}_{\mathrm{ext}}$. Note that ${P}$
represents the usual $|\Psi|^2$ distribution in the sense that for a
configuration $q$ with electrons at ${\boldsymbol x}_1, \ldots, {\boldsymbol x}_n$ and
positrons at ${\widetilde{\vx}}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}}$, we have
\[
\mathbb{P}(dq) = \sp{\Psi}{{P}(dq)|\Psi} = n! {\widetilde{n}}! \,
|\Psi^{(n,{\widetilde{n}})}({\boldsymbol x}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}})|^2 \, d{\boldsymbol x}_1 \cdots d{\widetilde{\vx}}_{\widetilde{n}}
\]
where $\Psi^{(n,{\widetilde{n}})}$ is just the wave function $(\mathbb{R}^3)^{n+{\widetilde{n}}}\to
(\mathbb{C}^4)^{\otimes (n+{\widetilde{n}})}$ we get when we decompose the state vector
in the manner suggested by \eqref{elHext}. $\Psi$ is normalized so
that
\[
\sum_{n,{\widetilde{n}} =0}^\infty \int d{\boldsymbol x}_1 \cdots d{\widetilde{\vx}}_{\widetilde{n}} \,
|\Psi^{(n,{\widetilde{n}})}({\boldsymbol x}_1, \ldots, {\widetilde{\vx}}_{\widetilde{n}})|^2 = 1.
\]
The field operator is defined by
\begin{equation}\label{Phidef}
\Phi(f) = b(P_+ f) + d^*(CP_- f)
\end{equation}
where $f$ is a test function from $L^2(\mathbb{R}^3,\mathbb{C}^4)$, $P_\pm$ is the
projection to $\mathscr{H}_\pm \subseteq L^2(\mathbb{R}^3,\mathbb{C}^4)$, $C$ is the
charge conjugation operator which maps $\mathscr{H}_-$ to $\mathscr{H}_+$
and vice versa, and $b$ is the electron annihilation and $d^*$ the
positron creation operator. Letting ${\boldsymbol e}_i$ be the standard
orthonormal basis of $\mathbb{C}^4$, $i =1,2,3,4$, $\Phi({\boldsymbol x})$ stands
for $\Phi_i({\boldsymbol x}) = \Phi({\boldsymbol e}_i \, \delta(\,\cdot\, -{\boldsymbol x}))$, where
$i$ gets contracted with the ${\boldsymbol \alpha}$ matrices. Similarly, we
define, as usual,
\begin{subequations}
\begin{align}
b_i({\boldsymbol x}) &= b\Big(P_+({\boldsymbol e}_i \,
\delta(\,\cdot\, - {\boldsymbol x})) \Big) \\
\text{and } d_i({\boldsymbol x}) &= d \Big(CP_-({\boldsymbol e}_i \,
\delta(\,\cdot\, - {\boldsymbol x})) \Big).
\end{align}
\end{subequations}
We thus have $\Phi_i({\boldsymbol x}) =
b_i({\boldsymbol x}) + d_i^*({\boldsymbol x})$.
\subsubsection{The Associated Process}
We now describe the associated Markov process. The free part of
(\ref{fieldhamil}),
\[
H_0= \int d^3 x :{\Phi^*}({\boldsymbol x})\big[-i c \hbar
{\boldsymbol \alpha}\cdot\nabla +\beta m c^2 \big]\Phi({\boldsymbol x}):\;\;,
\]
preserves particle numbers (it commutes with the electron and
positron number operators), evolving the $(n,{\widetilde{n}})$-particle sector
of the Fock space according to the free $(n,{\widetilde{n}})$-particle Hamiltonian
\[
H^{(n,{\widetilde{n}})}_0 = \sum_{k=1}^n h^{(k)}_0 + \sum_{{\widetilde{k}}=1}^{\widetilde{n}}
\widetilde{h}^{({\widetilde{k}})}_0\,,
\]
with
\begin{align}
h^{(k)}_0 &= -i c\hbar {\boldsymbol \alpha}^{(k)} \cdot \nabla_k + \beta^{(k)}
mc^2
\nonumber\\
\widetilde{h}^{({\widetilde{k}})}_0 &= -i c\hbar \widetilde\valpha^{({\widetilde{k}})} \cdot
\widetilde{\nabla}_{\widetilde{k}}
+ \widetilde\beta^{({\widetilde{k}})} mc^2\,,\nonumber
\end{align}
where ${\boldsymbol \alpha}^{(k)}$ and $\beta^{(k)}$ act on the $k$-th electron
index in the tensor product representation \eqref{elFockposFock} and
$\widetilde\valpha^{({\widetilde{k}})}$ and $\widetilde{\beta}^{({\widetilde{k}})}$ on the ${\widetilde{k}}$-th
positron index. $\widetilde{\nabla}_{\widetilde{k}}$ is the gradient with
respect to ${\widetilde{\vx}}_{\widetilde{k}}$.
With $H_0$ is associated a deterministic motion of the configuration
in $\mathcal{Q}$, the free process introduced in Section \ref{sec:free}.
During this motion, the actual numbers $N, {\widetilde{N}}$ of electrons and
positrons remain constant, while the positions $(\boldsymbol X_1, \ldots, \boldsymbol X_N,
\widetilde{\boldsymbol X}_1, \ldots, \widetilde{\boldsymbol X}_{\widetilde{N}})=:Q$ move according to Bohm--Dirac velocities
(\ref{BohmDirac}), i.e.
\begin{subequations}\label{elposmotion}
\begin{align}
\dot{\boldsymbol X}_k &= c\frac{\Psi^*(Q) \, {\boldsymbol \alpha}^{(k)} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)} \\
\dot{\widetilde{\boldsymbol X}}_{\widetilde{k}} &= c\frac{\Psi^*(Q) \, \widetilde\valpha^{({\widetilde{k}})} \, \Psi(Q)}
{\Psi^*(Q) \, \Psi(Q)}
\end{align}
\end{subequations}
where numerators and denominators are scalar products in
$(\mathbb{C}^4)^{\otimes (N+{\widetilde{N}})}$.
We turn now to the interaction part. Setting $A ={\boldsymbol \alpha} \cdot
e\boldsymbol{A} + e A_0$, we have that
\begin{subequations}
\begin{align}
H_{I}&= \int d^3 {\boldsymbol x} :{\Phi^*}({\boldsymbol x}) \, A({\boldsymbol x})\,\Phi({\boldsymbol x}):\;\; =\\
&= \sum_{i,itwo=1}^4 \int d^3 {\boldsymbol x} :(b^*_i({\boldsymbol x}) +
d_i({\boldsymbol x})) \, A^{i,itwo} ({\boldsymbol x}) \,
(b_itwo({\boldsymbol x})+d^*_itwo({\boldsymbol x})):\;\; = \\
\begin{split} \label{HIterms}
&= \sum_{i,itwo=1}^4 \int d^3 {\boldsymbol x} \, \Big(b^*_i({\boldsymbol x})
\,
A^{i,itwo}({\boldsymbol x}) \, b_itwo({\boldsymbol x}) + d_i({\boldsymbol x})
\,A^{i,itwo}({\boldsymbol x}) \, b_itwo({\boldsymbol x}) \: + \\
&\quad + \: b^*_i({\boldsymbol x}) \, A^{i,itwo}({\boldsymbol x}) \,
d^*_itwo({\boldsymbol x}) - d^*_itwo({\boldsymbol x}) \,
A^{i,itwo}({\boldsymbol x}) \, d_i({\boldsymbol x}) \Big).
\end{split}
\end{align}
\end{subequations}
Since $H_{I}$ is a polynomial in creation and annihilation
operators, it possesses a kernel and corresponds to stochastic
jumps. To compute the rates, we apply the strategy developed in
Section \ref{sec:efficient}, using diagrams. To this end, we regard
fermionic wave functions again as cross-sections of a bundle $E$,
defined here by
\begin{equation}\label{elposEdef}
E_q = \bigoplus_{\hat{q} \in \pi^{-1}(q)} (\mathbb{C}^4)^{\otimes n}
\otimes (\mathbb{C}^4)^{\otimes {\widetilde{n}}}.
\end{equation}
Fermionic symmetry of a cross-section $\Psi$ of $E$ means that
\begin{equation}\label{Psiantisym}
\Psi\!\!
\begin{array}{l}
{\scriptstyle \varrho(i_1 \ldots i_n),
{\widetilde{\permutation}}({\tilde{\imath}}_1 \ldots {\tilde{\imath}}_{\widetilde{n}})} \\
(\varrho({\boldsymbol x}_1 \ldots {\boldsymbol x}_n), {\widetilde{\permutation}}({\widetilde{\vx}}_1 \ldots
{\widetilde{\vx}}_{\widetilde{n}})) \\ {}
\end{array}
= (-1)^\varrho \, (-1)^{\widetilde{\permutation}} \, \Psi\!\!
\begin{array}{l}
{\scriptstyle i_1 \ldots i_n, {\tilde{\imath}}_1 \ldots
{\tilde{\imath}}_{\widetilde{n}}} \\
({\boldsymbol x}_1 \ldots {\boldsymbol x}_n, {\widetilde{\vx}}_1 \ldots {\widetilde{\vx}}_{\widetilde{n}}) \\ {}
\end{array}
\end{equation}
for all permutations $\varrho \in S_n$ and ${\widetilde{\permutation}} \in
S_{{\widetilde{n}}}$.
The diagrams for $b^*_i({\boldsymbol x}),b_i({\boldsymbol x}),d^*_i({\boldsymbol x})$,
and $d_i({\boldsymbol x})$ are
\begin{subequations}
\begin{align}
(x',{\widetilde{x}}') &\xrightarrow [b^*_i({\boldsymbol x})]
{\sum_itwo {S_+}^{itwo}_{i} ({\boldsymbol x}' - {\boldsymbol x}) \,
\alpha_\mathrm{e}({\boldsymbol e}_itwo)} (x'\cup {\boldsymbol x}',{\widetilde{x}}')\\
(x',{\widetilde{x}}') &\xrightarrow [b_i({\boldsymbol x})]
{\sum_itwo {S_+}^{itwo}_{i} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_itwo)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d^*_i({\boldsymbol x})]
{\sum_itwo {S_-}^{itwo}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_itwo)} (x',{\widetilde{x}}'\cup {\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d_i({\boldsymbol x})]
{\sum_itwo {S_-}^{itwo}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{p}({\boldsymbol e}_itwo)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}')
\end{align}
\end{subequations}
where the matrix function ${S_+}_itwo^i ({\boldsymbol x})$ is defined
as the $itwo$-component of $P_+ ({\boldsymbol e}_i \,
\delta(\,\cdot\,))$, and ${S_-}_itwo^i ({\boldsymbol x})$ as the
$itwo$-component of $CP_- ({\boldsymbol e}_i \, \delta(\,\cdot\,))$.
The linear mappings $\alpha_\mathrm{e}({\boldsymbol e}_itwo): E_{q'} \to E_{(x'
\cup {\boldsymbol x}',{\widetilde{x}}')}$ (``append an electron with spinor
${\boldsymbol e}_itwo$'') and $\varepsilon_\mathrm{e}({\boldsymbol e}_itwo): E_{q'} \to
E_{(x' \setminus {\boldsymbol x}',{\widetilde{x}}')}$ (``erase an electron, contracting with
spinor ${\boldsymbol e}_itwo$'') are defined through their properties that
for $\Psi \in E_{q'}$,
\begin{subequations}
\begin{align}
&\alpha_\mathrm{e} \Psi \text{ is appropriately symmetrized} \\
&\big(\alpha_\mathrm{e}({\boldsymbol e}_itwo) \Psi\big) ((\hat{x}',
{\boldsymbol x}'),\hat{{\widetilde{x}}'})
=
\frac{1}{\sqrt{n'+1}} \, \Psi(\hat{x}',\hat{{\widetilde{x}}'}) \otimes
{\boldsymbol e}_itwo \\
&\big(\varepsilon_\mathrm{e}({\boldsymbol e}_itwo) \Psi\big) (\hat{x}, \hat{{\widetilde{x}}'})
=
\sqrt{n'} \, \Psi_itwo ((\hat{x}, {\boldsymbol x}'),\hat{{\widetilde{x}}'}),
\end{align}
\end{subequations}
where $\hat{x}$ is an arbitrary ordering of $x=x' \setminus {\boldsymbol x}'$,
$\hat{x}'$ one of $x'$, and $\hat{{\widetilde{x}}'}$ one of ${\widetilde{x}}'$. We refer to
the last electron slot when writing the tensor product or taking the
$itwo$-component. $\alpha_\mathrm{p} ({\boldsymbol e}_itwo)$ and
$\varepsilon_\mathrm{p}({\boldsymbol e}_itwo)$ are defined analogously.
For the four terms in \eqref{HIterms}, we thus get the four diagrams
(omitting the multiplication by $A^{i,itwo}({\boldsymbol x})$)
\begin{subequations}
\begin{align}
(x',{\widetilde{x}}') &\xrightarrow [b_itwo({\boldsymbol x})] {\sum_ithree
{S_+}^{ithree}_{itwo} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_ithree)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}')
\xrightarrow
[b^*_i({\boldsymbol x})] {\sum_ifour {S_+}^{ifour}_{i}
({\boldsymbol x}'' - {\boldsymbol x}) \, \alpha_\mathrm{e}({\boldsymbol e}_ifour)} (x'\setminus {\boldsymbol x}' \cup
{\boldsymbol x}'',{\widetilde{x}}') \\
(x',{\widetilde{x}}') &\xrightarrow [b_itwo({\boldsymbol x})] {\sum_ithree
{S_+}^{ithree}_{itwo} ({\boldsymbol x}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{e}({\boldsymbol e}_ithree)} (x'\setminus {\boldsymbol x}',{\widetilde{x}}')
\xrightarrow
[d_i({\boldsymbol x})] {\sum_ifour {S_-}^{ifour}_{i}
({\widetilde{\vx}}' - {\boldsymbol x}) \, \varepsilon_\mathrm{p}({\boldsymbol e}_ifour)} (x'\setminus
{\boldsymbol x}',{\widetilde{x}}'\setminus {\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d^*_itwo({\boldsymbol x})] {\sum_ithree
{S_-}^{ithree}_{itwo} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_ithree)} (x',{\widetilde{x}}'\cup {\widetilde{\vx}}') \xrightarrow
[b^*_i({\boldsymbol x})] {\sum_ifour {S_+}^{ifour}_{i}
({\boldsymbol x}' - {\boldsymbol x}) \, \alpha_\mathrm{e}({\boldsymbol e}_ifour)} (x'\cup {\boldsymbol x}',{\widetilde{x}}' \cup
{\widetilde{\vx}}')\\
(x',{\widetilde{x}}') &\xrightarrow [d_i({\boldsymbol x})] {\sum_ithree
{S_-}^{ithree}_{i} ({\widetilde{\vx}}' - {\boldsymbol x}) \,
\varepsilon_\mathrm{p}({\boldsymbol e}_ithree)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}')
\xrightarrow
[d^*_itwo({\boldsymbol x})] {\sum_ifour
{S_-}^{ifour}_{itwo} ({\widetilde{\vx}}'' - {\boldsymbol x}) \,
\alpha_\mathrm{p}({\boldsymbol e}_ifour)} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}' \cup {\widetilde{\vx}}'').
\end{align}
\end{subequations}
We read off that the first term corresponds to the jump of a single
electron from ${\boldsymbol x}'$ to ${\boldsymbol x}''$, while all other particles remain
where they were, the second to the annihilation of an
electron--positron pair at locations ${\boldsymbol x}'$ and ${\widetilde{\vx}}'$, the third to
the creation of an electron--positron pair at locations ${\boldsymbol x}'$ and
${\widetilde{\vx}}'$, and the last to the jump of a positron from ${\widetilde{\vx}}'$ to
${\widetilde{\vx}}''$. The corresponding jump rates are
\begin{subequations}\label{elposrates}
\begin{align}
\sigma_\mathrm{e} (x'\setminus {\boldsymbol x}' \cup {\boldsymbol x}'',{\widetilde{x}}'|q') &= \frac{[(2/\hbar)
\, im \, \Psi^*(q) \sum_{ithree,ifour}
\chi_\mathrm{e}^{ithree, ifour} ({\boldsymbol x}',{\boldsymbol x}'')
\alpha_\mathrm{e}({\boldsymbol e}_ifour) \varepsilon_\mathrm{e}({\boldsymbol e}_ithree)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_{\mathrm{ann}} (x'\setminus {\boldsymbol x}',{\widetilde{x}}'\setminus {\widetilde{\vx}}'|q') &=
\frac{[(2/\hbar) \, im \, \Psi^*(q) \sum_{ithree,ifour}
\chi_{\mathrm{ann}}^{ithree, ifour} ({\boldsymbol x}',{\widetilde{\vx}}')
\varepsilon_\mathrm{p}({\boldsymbol e}_ifour) \varepsilon_\mathrm{e}({\boldsymbol e}_ithree)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_{\mathrm{crea}} (x'\cup {\boldsymbol x}',{\widetilde{x}}' \cup {\widetilde{\vx}}'|q') &= \frac{[(2/\hbar) \,
im \, \Psi^*(q) \sum_{ithree,ifour}
\chi_{\mathrm{crea}}^{ithree, ifour} ({\boldsymbol x}',{\widetilde{\vx}}')
\alpha_\mathrm{e}({\boldsymbol e}_ifour) \alpha_\mathrm{p}({\boldsymbol e}_ithree)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')} \\
\sigma_\mathrm{p} (x',{\widetilde{x}}'\setminus {\widetilde{\vx}}' \cup {\widetilde{\vx}}''|q') &=
\frac{[(2/\hbar) \, im \, \Psi^*(q) \sum_{ithree,ifour}
\chi_\mathrm{p}^{ithree, ifour} ({\widetilde{\vx}}',{\widetilde{\vx}}'')
\alpha_\mathrm{p}({\boldsymbol e}_ifour) \varepsilon_\mathrm{p}({\boldsymbol e}_ithree)\,
\Psi(q')]^+}{\Psi^*(q') \, \Psi(q')},
\end{align}
\end{subequations}
where $q$ denotes the respective destination, and
\begin{subequations}
\begin{align}
\chi_\mathrm{e}^{ithree, ifour}({\boldsymbol x}',{\boldsymbol x}'') =\quad &
\sum\limits_{i,itwo} \int d^3{\boldsymbol x} \,
{S_+}^ifour_i ({\boldsymbol x}''-{\boldsymbol x}) \, A^{i,itwo}
({\boldsymbol x}) \, {S_+}^ithree_itwo ({\boldsymbol x}'-{\boldsymbol x}) \\
\chi_{\mathrm{ann}}^{ithree, ifour} ({\boldsymbol x}',{\widetilde{\vx}}') =\quad &
\sum\limits_{i,itwo} \int d^3{\boldsymbol x} \,
{S_-}^ifour_i ({\widetilde{\vx}}'-{\boldsymbol x}) \, A^{i,itwo}
({\boldsymbol x}) \, {S_+}^ithree_itwo ({\boldsymbol x}'-{\boldsymbol x}) \\
\chi_{\mathrm{crea}}^{ithree, ifour} ({\boldsymbol x}',{\widetilde{\vx}}') =\quad
&\sum\limits_{i,itwo} \int d^3{\boldsymbol x} \,
{S_+}^ifour_i ({\boldsymbol x}'-{\boldsymbol x}) \, A^{i,itwo} ({\boldsymbol x})
\, {S_-}^ithree_itwo ({\widetilde{\vx}}'-{\boldsymbol x}) \\
\chi_\mathrm{p}^{ithree, ifour} ({\widetilde{\vx}}',{\widetilde{\vx}}'') =
-&\sum\limits_{i,itwo} \int d^3{\boldsymbol x} \,
{S_-}^ifour_itwo ({\widetilde{\vx}}''-{\boldsymbol x}) \, A^{i,itwo}
({\boldsymbol x}) \, {S_-}^ithree_i ({\widetilde{\vx}}'-{\boldsymbol x}).
\end{align}
\end{subequations}
The process for $H_0 + H_{I}$ that we obtain through process
additivity is the motion \eqref{elposmotion} interrupted by stochastic
jumps with rates \eqref{elposrates}.
Note that the jump of a single electron has small probability to be
across a distance much larger than the width of the functions $S_\pm$,
which is of the order of the Compton wavelength of the
electron. Similarly, the distance $|{\boldsymbol x}-{\widetilde{\vx}}|$ of a newly created
pair, or of a pair at the moment of annihilation, has small probability
to be much larger than the width of $S_\pm$. While the jump of a
single electron or positron leaves the number $N$ of electrons and the
number ${\widetilde{N}}$ of positrons unchanged, pair creation and annihilation
can only either decrease or increase both $N$ and ${\widetilde{N}}$ by $1$. As a
consequence, the actual net charge ${\widetilde{N}}-N$ is conserved by the
process.
\section{Second Quantization of a Markov Process}\label{sec:morefree}
\subsection{Preliminaries Concerning the Conditional Density Matrix}
In the next section, we describe the algorithm for the ``second
quantization'' of a process. But before that, we have to introduce, as
a preparation, the notion of a conditional density matrix. In
\cite{DGZ}, we have defined for Bohmian mechanics the
\emph{conditional wave function} of, say, subsystem 1 of a composite
system with configuration space $\mathcal{Q} = \mathcal{Q}_1 \times \mathcal{Q}_2$ by
$\Psi_\mathrm{cond}(q_1) = \Psi(q_1,Q_2)$. {}From a complex wave function $\Psi
: \mathcal{Q} \to \mathbb{C}$, together with the actual configuration $Q_2$ of the
environment of the subsystem in the composite, we thus form a wave
function $\Psi_\mathrm{cond}: \mathcal{Q}_1 \to \mathbb{C}$; for Bohmian mechanics with
spin, in contrast, we would not, in general, obtain a suitable wave
function for subsystems in this way, because $\Psi_\mathrm{cond}$ as just
defined would have more spin indices than appropriate. We can however
still define the \emph{conditional density matrix} for subsystem 1,
\begin{equation}\label{WPsi}
W_{\mathrm{cond} \, s_1,s_1'}(q_1,q_1') = \frac{1}{\gamma} \sum_{s_2}
\Psi_{s_1,s_2} (q_1,Q_2) \, \Psi^*_{s_1',s_2} (q_1', Q_2)
\end{equation}
where the $s$'s are spin indices. In order that $W$, like any
density matrix, have trace 1, the normalizing factor $\gamma$ must be
chosen as
\[
\gamma = \int\limits_{q_1 \in \mathcal{Q}_1} \sum_{s_1,s_2} \Psi^*_{s_1,s_2}
(q_1,Q_2) \, \Psi_{s_1,s_2} (q_1,Q_2) \, dq_1\,.
\]
This $W$ can play most of the roles of the conditional wave
function in spinless Bohmian mechanics. The notion of a conditional
density matrix easily generalizes from the situation just described,
corresponding to wave functions in $L^2(\mathcal{Q},\mathbb{C}^k)$ and the natural
localization PVM, to the situation of any product localization POVM on
any tensor product Hilbert space: for $\mathscr{H} = \mathscr{H}_1
\otimes \mathscr{H}_2$ and ${P}(dq_1 \times dq_2) = {P}_1(dq_1) \otimes
{P}_2(dq_2)$, set
\begin{equation}\label{WPOV}
W_\mathrm{cond} = \frac{\mathrm{tr}_2 \big( |\Psi\rangle\langle\Psi| \, {P}
(\mathcal{Q}_1 \times dq_2) \big)} {\mathrm{tr} \big( |\Psi\rangle\langle\Psi|
\, {P}(\mathcal{Q}_1 \times dq_2) \big)} \Big|_{q_2 = Q_2}\,,
\end{equation}
where $\mathrm{tr}_2$ is the partial trace over $\mathscr{H}_2$. The quotient is
to be understood as a Radon--Nikod{\'y}m derivative in $q_2$. Like
conditional wave functions, conditional density matrices cannot be
defined in orthodox quantum theory, for lack of the configuration
$Q_2$. We stress that conditional density matrices have nothing,
absolutely nothing, to do with statistical ensembles of state vectors
in $\mathscr{H}_1$. Like any density matrix, they do, however, define a
probability distribution on $\mathcal{Q}_1$,
\begin{equation}\label{PW}
\mathbb{P}^{W_\mathrm{cond}}_1 (\,\cdot\,) = \mathrm{tr} \big(W_\mathrm{cond} \,
{P}_1(\,\cdot\,) \big)\,,
\end{equation}
which coincides with the conditional distribution of $Q_1$ given $Q_2$,
\[
\mathbb{P}(Q_1 \in \,\cdot\,|Q_2) = \frac{\sp{\Psi}{{P}_1(\,\cdot\,)
\otimes {P}_2(dq_2)| \Psi}} {\sp{\Psi}{\mathbf{1} \otimes {P}_2(dq_2)|
\Psi}} \Big|_{q_2 = Q_2}\,.
\]
The evolution of $W_\mathrm{cond}$ is not autonomous; it will typically depend
on (and always be determined by) $\Psi_t$ and $Q_{2,t}$. For a given
density matrix $W$ of a system that is not regarded as a subsystem,
however, one can \emph{define} (as usual) the time evolution by $W_t =
e^{-i H t/\hbar} \, W \, e^{i H t/\hbar}$, which gives rise to a
time-dependent distribution $\mathbb{P}^{W_t} (\,\cdot\,) = \mathrm{tr} (W_t
{P}(\,\cdot\,))$. We call a Markov process that is
$\mathbb{P}^{W_t}$-distributed at every time $t$ \emph{equivariant} with
respect to $W$ and $H$. Given the right initial distribution, this is
equivalent to the following condition on the generator:
\begin{equation}\label{Wequi}
\mathscr{L} \mathbb{P}^W (\,\cdot\,) = \frac{2}{\hbar} \, im \,
\mathrm{tr}(W \, {P}(\,\cdot\,) \, H)\,.
\end{equation}
This is the version of (\ref{mainequ}) for density matrices, and
defines an \emph{equivariant generator} with respect to $W$ and $H$.
Since conditional density matrices will play a crucial role in the
construction of the many-particle process, we require that, as part of
the input data of the algorithm, we are given an equivariant generator
$\mathscr{L}^{(1)}_W$ for every density matrix from a dense subset of
the density matrices in $\mathscr{H}^{(1)*} \otimes \mathscr{H}^{(1)}$.
This is not much of a restriction, as all relevant examples of
equivariant generators naturally extend to density matrices: Bohmian
mechanics with spin space $\mathbb{C}^k$ can be extended \cite{Belldensity}
to
\begin{equation}\label{vW}
v^W(q) = \hbar \, im \, \frac{\nabla_{q} \mathrm{tr}_{\mathbb{C}^k} \,
W(q,q')}{\mathrm{tr}_{\mathbb{C}^k} \, W(q,q')} (q'=q)\,,
\end{equation}
Bohm--Dirac to
\begin{equation}\label{vWDirac}
v^W(q) = \frac{\mathrm{tr}_{\mathbb{C}^4} (W(q,q) {\boldsymbol \alpha})}{\mathrm{tr}_{\mathbb{C}^4}
(W(q,q))} \,,
\end{equation}
and minimal jump rates to
\begin{equation}\label{sigmaW}
\sigma^W (dq|q') = \frac{[(2/\hbar)\, im \, \mathrm{tr}(W {P}(dq) H
{P}(dq'))]^+} {\mathrm{tr}(W {P}(dq'))} \,.
\end{equation}
Note also that (\ref{vW}) would not make any sense if $W$ represented
a statistical ensemble \cite{Belldensity}, whereas it makes good sense
for conditional density matrices, expressing the true relation between
the Bohmian velocity for a subsystem arising from (\ref{Bohm}) and the
conditional density matrix (\ref{WPsi}) of that subsystem. Mutatis
mutandis, the same is true of (\ref{vWDirac}). Similarly, in case that
${P}$ is a PVM, (\ref{sigmaW}) expresses the jump rates for a
decoupled subsystem arising from \eqref{tranrates} for the composite
in terms of the conditional density matrix of that subsystem.
\subsection{Algorithm}
\label{sec:Gamma}
The input data of this algorithm are the one-particle Hilbert space
$\mathscr{H}^{(1)}$, configuration space $\mathcal{Q}^{(1)}$, POVM
${P}^{(1)}$, and a family of generators $\mathscr{L}^{(1)} =
\mathscr{L}^{(1)}_W$ labeled by the density matrices $W$ from a dense
subset of the density matrices in $\mathscr{H}^{(1)*} \otimes
\mathscr{H}^{(1)}$. The output is a family of generators $\Gamma
\mathscr{L}^{(1)} = \mathscr{L}_0 = \mathscr{L}_{0,\Psi}$ labeled by the
state vectors $\Psi$ in (a dense subspace of) Fock space. If
$\mathscr{L}^{(1)}_W$ is equivariant with respect to $W$ and $H^{(1)}$,
then $\mathscr{L}_{0,\Psi}$ is equivariant with respect to $\Psi$ and
$H_0$.
The algorithm is based on two procedures for suitably combining
generators for direct sums or tensor products of Hilbert spaces.
\subsubsection{Direct Sums}\label{sec:directsum}
Given a finite or countable sequence of Hilbert
spaces $\mathscr{H}^{(n)}$ with POVMs ${P}^{(n)}$ on configuration
spaces $\mathcal{Q}^{(n)}$, and for each $n$ a family of generators
$\mathscr{L}^{(n)}$ labeled by the vectors in $\mathscr{H}^{(n)}$, there
is a canonically constructed family of generators $\mathscr{L}^\oplus
= \mathscr{L}^\oplus_\Psi$, labeled by the vectors in the direct sum
$\bigoplus_n \mathscr{H}^{(n)}$. The space $\mathcal{Q}$ in which the
corresponding process takes place is the disjoint union of the
$\mathcal{Q}^{(n)}$. If every $\mathscr{L}^{(n)}_{\Psi_n}$ is equivariant
with respect to $\Psi_n \in \mathscr{H}^{(n)}$ and $H^{(n)}$, then
$\mathscr{L}^\oplus_\Psi$ is equivariant with respect to $\Psi \in
\bigoplus_n \mathscr{H}^{(n)}$ and $\bigoplus_n H^{(n)}$.
Here are the details. The POVM ${P} = \bigoplus_n {P}^{(n)}$ on
$\mathcal{Q}$ that naturally arises from the data is given by ${P}(B) =
\bigoplus_n {P}^{(n)} (B \cap \mathcal{Q}^{(n)})$ for $B \subseteq
\mathcal{Q}$. Let $P_n$ denote the projection $\mathscr{H} \to
\mathscr{H}^{(n)}$. The generator $\mathscr{L}^\oplus$ is given by
\begin{equation}
\big( \mathscr{L}_\Psi^\oplus \, \rho \big) \big|_{\mathcal{Q}^{(n)}} =
\mathscr{L}_{P_n\Psi/\|P_n\Psi\|}^{(n)} \big(
\rho \big|_{\mathcal{Q}^{(n)}} \big)\,.
\end{equation}
It generates a (Markov) process $Q_t^\oplus$ such that when
$Q_0^\oplus \in \mathcal{Q}^{(n)}$, it is generated by the state vector
$P_n \Psi/ \|P_n \Psi \|$, i.e., it is a Markov process $Q_t^{(n)}$
in $\mathcal{Q}^{(n)}$ generated by $\mathscr{L}^{(n)}_{P_n \Psi/
\|P_n\Psi\|}$. The equivariance statement follows directly, since
$\|P_n \Psi_t\|^2 = \mathbb{P}_t (\mathcal{Q}^{(n)})$ is invariant under the
evolution generated by $H_0 = \bigoplus_n H^{(n)}$.
\subsubsection{Tensor Products}\label{sec:tensorproduct}
Given a finite sequence of Hilbert spaces
$\mathscr{H}^{[1]}, \ldots, \mathscr{H}^{[n]}$ with POVMs ${P}^{[i]}$ on
configuration spaces $\mathcal{Q}^{[i]}$, and for each $i$ a family of
generators $\mathscr{L}^{[i]} = \mathscr{L}^{[i]}_{W_i}$ labeled
by the density matrices on $\mathscr{H}^{[i]}$, there is a canonically
constructed family of generators $\mathscr{L}^\otimes =
\mathscr{L}^\otimes_W$, labeled by the density matrices on the tensor
product $\mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[n]}$. The
corresponding process takes place in the Cartesian product $\mathcal{Q} =
\mathcal{Q}^{[1]} \times \cdots \times \mathcal{Q}^{[n]}$. If every
$\mathscr{L}^{[i]}_{W_i}$ is equivariant with respect to the density
matrix $W_i$ on $\mathscr{H}^{[i]}$ and the Hamiltonian $H^{[i]}$, then
$\mathscr{L}^\oplus_W$ is equivariant with respect to $W$ on
$\mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[n]}$ and $H =
\sum\limits_i \mathbf{1} \otimes \cdots \otimes H^{[i]} \otimes \cdots
\otimes \mathbf{1} = \sum\limits_i H_i$.
\newcommand{\widehat{q}_i}{\widehat{q}_i}
Here are the details. The POVM that naturally arises from the data
is\footnote{The existence of the tensor product POVM is a
consequence of Corollary~7 in Section~4.4 of \cite{crea2A}.}
\begin{equation}\label{productpovm}
{P}(d{\boldsymbol q}_1 \times \cdots \times d{\boldsymbol q}_n) = {P}^{[1]}(d{\boldsymbol q}_1)
\otimes \cdots \otimes {P}^{[n]}(d{\boldsymbol q}_n).
\end{equation}
For any $q \in \mathcal{Q}$, let ${\boldsymbol q}_i$ denote its $i$-th component and
let $\widehat{q}_i = ({\boldsymbol q}_1, \ldots, {\boldsymbol q}_{i-1}, {\boldsymbol q}_{i+1}, \ldots,
{\boldsymbol q}_n)$. For every $i$ and $\widehat{q}_i$, define
\[
W_i (\widehat{q}_i) = \frac{\mathrm{tr}_{\neq i} \big( W {P}(d{\boldsymbol q}_1 \times \cdots
\times \mathcal{Q}^{[i]} \times \cdots \times d{\boldsymbol q}_n) \big)} {\mathrm{tr} \big(W
{P}(d{\boldsymbol q}_1 \times \cdots \times \mathcal{Q}^{[i]} \times \cdots \times
d{\boldsymbol q}_n) \big)}\,,
\]
where $\mathrm{tr}_{\neq i}$ is the partial trace over all factors except
$\mathscr{H}^{[i]}$. This $W_i$ is the conditional density matrix,
regarded as a function of the configuration $\widehat{q}_i$ of the other
particles. Now consider the process on $\mathcal{Q}$ according to which
the $i$-th particle moves as prescribed by $\mathscr{L}^{[i]}_{W_i}$
while the other particles remain fixed. The generator of this
process is
\begin{equation}\label{Lidef}
\mathscr{L}_i \, \rho := \Big[ \mathscr{L}^{[i]}_{W_i(\widehat{q}_i)} \,
\rho( \,\cdot\,| \widehat{q}_i) \Big] \, \rho_{\neq i}(d\widehat{q}_i)
\end{equation}
where $\rho_{\neq i}$ is the marginal distribution of
$\widehat{Q}_i$ (i.e., $\rho$ integrated over ${\boldsymbol q}_i$) and
$\rho(\,\cdot\,|\widehat{q}_i)$ is the conditional distribution of ${\boldsymbol Q}_i$
given $\widehat{Q}_i = \widehat{q}_i$; the square bracket is a function of
$\widehat{q}_i$ and a measure in $d{\boldsymbol q}_i$. Now define $\mathscr{L}^\otimes_W
\rho = \sum\limits_i \mathscr{L}_i \rho$.
To see that $\mathscr{L}^\otimes$ is equivariant when the
$\mathscr{L}^{[i]}$ are, we have to check (\ref{Wequi}). Note first
that $\mathbb{P}^W(d{\boldsymbol q}_i|\widehat{q}_i) = \mathrm{tr} \big( W_i(\widehat{q}_i) \,
{P}^{[i]}(d{\boldsymbol q}_i) \big)$. Due to the equivariance of
$\mathscr{L}^{[i]}$, for $\rho = \mathbb{P}^W$ the square bracket in
(\ref{Lidef}) equals $(2/\hbar) \, im \, \mathrm{tr} \big( W_i (\widehat{q}_i) \,
{P}^{[i]}(d{\boldsymbol q}_i) \, H^{[i]} \big)$, from which we obtain
(\ref{Wequi}) for $\mathscr{L}_i$ and $H_i$ and hence for
$\mathscr{L}^\otimes$ and $H$.
The definition of $\mathscr{L}^\otimes$ reproduces the many-particles
Bohm law (\ref{Bohm}) with or without spin from the one-particle
version (or, for distinguishable particles, from several different
one-particle versions having different masses and spins). Similarly,
it reproduces the many-particles Bohm--Dirac law (\ref{BohmDirac})
from the one-particle version.
\subsubsection{Second Quantization of the POVM}\label{sec:GammaPOVM}
Let $\mathcal{Q}^{(n)}$ denote the space of all subsets-with-multiplicities
of $\mathcal{Q}^{(1)}$ having $n$ elements (counting in the multiplicities).
${P}^{(1)}$ naturally defines a POVM ${P}^{(1)\otimes n}$ on
$(\mathcal{Q}^{(1)})^n$ acting on $\mathscr{H}^{(1)\otimes n}$ by
${P}^{(1)\otimes n}(d{\boldsymbol q}_1 \times \cdots \times d{\boldsymbol q}_n) =
{P}^{(1)}(d{\boldsymbol q}_1) \otimes \cdots \otimes {P}^{(1)}(d{\boldsymbol q}_n)$, and a
POVM ${P}^{(n)}$ on $\mathcal{Q}^{(n)}$ acting on $\mathscr{F}^{(n)} = P_\pm
\mathscr{H}^{(1)\otimes n}$ (the $n$-particle sector of Fock space, with
$P_\pm$ the projection to the subspace of (anti\nobreakdash-)symmetric
elements of $\mathscr{H}^{(1)\otimes n}$, depending on whether we deal
with fermions or bosons) by
\[
{P}^{(n)}(B) = {P}^{(1)\otimes n} \big\{({\boldsymbol q}_1, \ldots, {\boldsymbol q}_n)
\in (\mathcal{Q}^{(1)})^n : \{{\boldsymbol q}_1, \ldots, {\boldsymbol q}_n\} \in B \big\}
\]
for $B \subseteq \mathcal{Q}^{(n)}$, where $\{{\boldsymbol q}_1, \ldots, {\boldsymbol q}_n\}$ should
be understood as a set-with-multiplicities.\footnote{This agrees with
the definition given in Section \ref{sec:crea1} for the case of a PVM
and the coincidence configurations removed from configuration space.}
Since ${P}^{(n)}(B)$ is invariant under permutations, it maps
symmetric to symmetric and anti-symmetric to anti-symmetric elements
of $\mathscr{H}^{(1)\otimes n}$ and thus acts on $\mathscr{F}^{(n)}$ for
bosonic or fermionic Fock space.\footnote{In case that ${P}^{(1)}$ is
nonatomic, ${P}^{(n)}$ can equivalently be defined in the following
way: For the set $\Delta$ of coincidence configurations we set
${P}^{(n)}(\Delta) =0$, and for volumes $d{\boldsymbol q}_1, \ldots, d{\boldsymbol q}_n$ in
$\mathcal{Q}^{(1)}$ that are pairwise disjoint, we have a corresponding
volume $dq$ in $\mathcal{Q}^{(n)}$, which can be obtained from $d{\boldsymbol q}_1
\times \cdots \times d{\boldsymbol q}_n \subseteq (\mathcal{Q}^{(1)})^n$ by forgetting
the ordering, and we set ${P}^{(n)}(dq) = n! \, P_\pm \,
{P}^{(1)}(d{\boldsymbol q}_1) \otimes \cdots \otimes {P}^{(1)}(d{\boldsymbol q}_n) \,
P_\pm$.} The corresponding POVM on $\mathcal{Q}$ is then ${P} = \Gamma
{P}^{(1)} = \bigoplus_n {P}^{(n)}$; more precisely, for $B \subseteq
\mathcal{Q}$,
\[
{P}(B) = \bigoplus_{n=0}^\infty {P}^{(n)} (B\cap \mathcal{Q}^{(n)})\,.
\]
\subsubsection{Construction of the Free Process}
Equipped with the two procedures for direct sums and tensor products,
we complete the construction of the free process.
The ``tensor product'' procedure above provides a process on
$(\mathcal{Q}^{(1)})^n$ from $n$ identical copies of $\mathscr{L}^{(1)}$.
For a state vector $\Psi^{(n)} \in \mathscr{F}^{(n)} = P_\pm
\mathscr{H}^{(1)\otimes n}$ from either the symmetric or the
anti-symmetric elements of the $n$-fold tensor product space, let $W$
be the projection to $\Psi^{(n)}$; the generator
$\mathscr{L}^\otimes_W$ is permutation invariant because the
tensor-product construction of $\mathscr{L}^\otimes _W$ is permutation
covariant and a permutation can at most change the state vector by a
minus sign, which does not affect the density matrix. Consequently,
the ordering of the configuration is irrelevant and may be ignored. We
thus obtain a process on $\mathcal{Q}^{(n)}$ whose generator we call
$\mathscr{L}^{(n)}$. We now apply the ``direct sum'' procedure to
obtain a process on $\mathcal{Q}$.
\section{Towards a Notion of Minimal Process}
In this section, we investigate the common traits of the Markov
processes relevant to Bell-type QFT, which can be summarized in the
notion of a \emph{minimal process} associated with $\Psi,H$, and
${P}$. We begin with a closer study of the minimal free generator
\eqref{LH}, and then explain why we call the minimal jump rates
``minimal.'' Finally, in Section \ref{sec:mini}, we give an outlook on
the notion of minimal process.
\subsection{Free Process From Differential Operators}
\label{sec:freeflow}
In this section, we discuss some of the details, concerning the two
equivalent formulas \eqref{LH} and \eqref{genH} for the backward and
forward version of the minimal free generator in terms of $H, {P}$,
and $\Psi$, that we omitted in Section \ref{sec:free2}. To begin
with, $L$ as defined by \eqref{LH} satisfies some necessary conditions
for being a backward generator: $Lf(q)$ is real, and $L\mathbf{1} =0$ where
$\mathbf{1}$ is the constant 1 function (this corresponds to $\mathscr{L} \rho
(\mathcal{Q}) =0$, or conservation of total probability). In case $L$ is
indeed a backward generator, the corresponding process is equivariant
because
\[
\mathscr{L} \mathbb{P} (dq) \stackrel{\eqref{genH}}{=} \mathrm{Re} \, \sp{\Psi}
{\hat{\mathbf{1}}\,\frac{i}{\hbar} [H,{P}(dq)] |\Psi} = \frac{2}{\hbar} \,
im \, \sp{\Psi} {{P}(dq) H|\Psi} \stackrel{\eqref{dPdt}}{=}
\dot{\mathbb{P}}(dq)\,.
\]
One way to arrive at formula \eqref{LH} has been described in Section
\ref{sec:free2}. A different way, leading to \eqref{genH}, is to
start from the ansatz $\mathscr{L} \rho = A\frac{d\rho}{d\mathbb{P}}$
where $A$ denotes a (signed-measure-valued) linear operator acting on
functions. Equivariance means $A\mathbf{1} (dq) = \sp{\Psi}{\frac{i}{\hbar}
[H, {P}(dq)] |\Psi}$. This suggests $Af(dq) = \sp{\Psi}{\hat{f}\,
\frac{i}{\hbar} [H, {P}(dq)] |\Psi}$, or $Af(dq) =
\sp{\Psi}{\frac{i}{\hbar} [H, {P}(dq)]\, \hat{f} |\Psi}$, or a
convex combination thereof. Since $Af(dq)$ must be real, we are
forced to choose the combination with coefficients $\frac{1}{2}$ and
$\frac{1}{2}$, or equivalently $Af(dq) = \mathrm{Re}\, \sp{\Psi}{\hat{f}\,
\frac{i}{\hbar} [H, {P}(dq)] |\Psi}$, which is \eqref{genH}.
That $\mathscr{L}$ generates a deterministic process (when it is a
generator at all) is suggested by the following consideration---at
least when $H$ and ${P}$ are time-reversal invariant: replacing
$\Psi$ in \eqref{genH} by $T\Psi$ where $T$ is the anti-linear time
reversal operator (see Section \ref{sec:symm}) changes the sign of
$\mathscr{L}$. The only generators $\mathscr{L}$ such that $-\mathscr{L}$
is also a generator are, presumably, those corresponding to
deterministic motion.
This gives us an opportunity to check for which $H$ \eqref{LH} does
define a process: for a deterministic process we must have $L = v\cdot
\nabla$ where $v$ is the velocity vector field. It is known that
vector fields, understood as first-order differential operators, are
those linear operators $L$ on the space of smooth functions that
satisfy the Leibniz rule $L(fg) = fLg + gLf$. \eqref{LH} is certainly
linear in $f$, so we have to check the Leibniz rule to see whether $L$
is indeed of the form $v\cdot \nabla$ and thus the backward generator
of a process.
We can see no reason why $L$ should satisfy a Leibniz rule unless
${P}$ is a PVM, which implies that
\begin{equation}\label{fPOV}
\hat{f} \, {P}(dq) = f(q) \, {P}(dq)\,,
\end{equation}
and $H$ is such that for all (nice) functions $f$ and $g$,
\begin{equation}\label{Hdiff}
\big[ [ H,\hat{f}] , \hat{g} \big] = \hat{h}
\end{equation}
for some function $h$, which holds if $H$ is a differential operator
of order $\leq 2$. (If $H=-\Delta$, then $h=- 2 \nabla f \cdot
\nabla g$; if $H=-i \, {\boldsymbol \alpha} \cdot \nabla$ for whatever vector of
matrices ${\boldsymbol \alpha}$, or if $H$ is a multiplication operator, then
$h=0$.) To check that the Leibniz rule is obeyed in this case, note
that we then have that $[H, \widehat{fg}] = [H, \hat{f} \hat{g}] =
[H,\hat{f}] \hat{g} + \hat{f} [H,\hat{g}] = \hat{f} [H, \hat{g}] +
\hat{g} [H, \hat{f}] + \big[ [H, \hat{f}], \hat{g} \big]$. Using this
in \eqref{LH}, we find that, due to \eqref{fPOV}, the first two terms
give the Leibniz rule, whereas the last term, due to \eqref{Hdiff},
does not contribute to the real part in \eqref{LH}.
When $\mathscr{H}$ is an $L^2$ space over $\mathcal{Q}$ and ${P}$ the natural
PVM, i.e., when $\Psi$ is a function, \eqref{LH} can be written in the
form
\begin{equation}\label{vH}
L f(q)= \frac{1}{\hbar}\, im \, \frac{\Psi^*(q) \,
([\hat{f},H]\Psi)(q)} {\Psi^*(q) \, \Psi(q)}
\end{equation}
where $\hat{f}$ is the multiplication operator corresponding to $f$.
{}From this, one easily reads off the Bohm velocity \eqref{Bohm} for the
$N$-particle Schr\"odinger operator \eqref{Hamil} with or without
spin. Similarly, we get the Bohm--Dirac theory when $H$ is the Dirac
operator in $\mathscr{H} = {\mathrm{Anti}\,} L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes N}$, $\mathcal{Q}$
the manifold of subsets of $\mathbb{R}^3$ with $N$ elements, and ${P}$ the
obvious PVM. \eqref{vH} also leads to the Bohm--Dirac motion if
$\mathscr{H} = L^2(\mathbb{R}^3,\mathbb{C}^4)^{\otimes N}$, $\mathcal{Q} = \mathbb{R}^{3N}$, and
${P}$ is the natural PVM, but not if $\mathscr{H}$ is the positive
energy subspace because then the appropriate POVM ${P}$ is no longer
a PVM.
To see that the ``second quantization'' algorithm maps minimal free
generators to minimal free generators, or, in other words, preserves
the relation \eqref{genH} between Hamiltonian and generator, observe
first that \eqref{genH} naturally extends to density matrices, and the
extension, if a generator, is equivariant. Next check that the
``direct sum'' and ``tensor product'' procedures of Section
\ref{sec:Gamma} are compatible with \eqref{genH} when ${P}$ is a PVM.
Finally, observe that the (anti\nobreakdash-)symmetrization operator
commutes with the $n$-particle Hamiltonian, with ${P}(B)$ for every
permutation invariant set $B \subseteq (\mathcal{Q}^{(1)})^n$, and with
$\hat{f}$ for every permutation invariant function $f:(\mathcal{Q}^{(1)})^n
\to \mathbb{R}$.
\subsection{Minimality}
\label{sec:mini4}
In this section we explain in what sense the minimal jump rates
\eqref{tranrates}---or \eqref{mini1}---are minimal. In so doing, we
will also explain the significance of the quantity $\mathbb{J}$ defined
in \eqref{Jdef}, and clarify the meaning of the steps taken in
Sections \ref{sec:mini1} and \ref{sec:mini2} to arrive at the jump
rate formulas.
Given a Markov process $Q_t$ on $\mathcal{Q}$, we define the \emph{net
probability current} $j_t$ at time $t$ between sets $B$ and $B'$ by
\begin{eqnarray}\label{jdefcont}
j_t(B,B') = \lim_{\Delta t \searrow 0} \frac{1}{\Delta t}
\hspace{-3ex}
&&
\Big[ \mathrm{Prob}\big\{Q_{t}\in B',Q_{t+\Delta t}\in B \big\} -
\\\nonumber
&&
- \mathrm{Prob} \big\{ Q_{t}\in B, Q_{t+\Delta t} \in B' \big\} \Big]\,.
\end{eqnarray}
This is the amount of probability that flows, per unit time, from $B'$
to $B$ minus the amount from $B$ to $B'$. For a pure jump process, we
have that
\begin{equation}\label{jrate}
j_t(B,B') = \int\limits_{q'\in B'} \sigma_t(B|q')\, \rho_t(dq') -
\int\limits_{q\in B} \sigma_t(B'|q)\, \rho_t(dq)\,,
\end{equation}
so that
\begin{equation}
j_t(B,B') = j_{\sigma,\rho}(B \times B')
\end{equation}
where $j_{\sigma,\rho}$ is the signed measure, on $\mathcal{Q} \times
\mathcal{Q}$, given by the integrand of \eqref{continuity3},
\begin{equation}\label{jsigma}
j_{\sigma,\rho} (dq \times dq') = \sigma(dq|q') \, \rho(dq') -
\sigma(dq'|q) \, \rho(dq)\,.
\end{equation}
For minimal jump rates $\sigma$, defined by \eqref{tranrates} or
\eqref{mini1} (and with the probabilities $\rho$ given by \eqref{mis},
$\rho = \mathbb{P}$), this agrees with \eqref{Jdef}, as was noted
earlier,
\begin{equation}\label{jJ}
j_{\sigma,\rho} = \mathbb{J}_{\Psi,H,{P}} \,,
\end{equation}
where we have made explicit the fact that $\mathbb{J}$ is defined in
terms of the quantum entities $\Psi, H$, and ${P}$. Note that both
$\mathbb{J}$ and the net current $j$ are anti-symmetric, $\mathbb{J}^\mathrm{tr} =
-\mathbb{J}$ and $j^\mathrm{tr} = -j$, the latter by construction and the former
because $H$ is Hermitian. (Here $\mathrm{tr}$ indicates the action on measures
of the transposition $(q,q') \mapsto (q',q)$ on $\mathcal{Q} \times \mathcal{Q}$.)
The property \eqref{jJ} is stronger than the equivariance of the rates
$\sigma$, $\mathscr{L}_\sigma \mathbb{P}_t = d\mathbb{P}_t / dt$: Since, by
\eqref{continuity3},
\begin{equation}
(\mathscr{L}_\sigma \rho) (dq) = j_{\sigma,\rho} (dq \times \mathcal{Q}),
\end{equation}
and, by \eqref{Jdef},
\begin{equation}
\frac{d\mathbb{P}}{dt}(dq) = \mathbb{J}(dq \times \mathcal{Q}),
\end{equation}
the equivariance of the jump rates $\sigma$ amounts to the condition
that the marginals of both sides of \eqref{jJ} agree,
\begin{equation}
j_{\sigma,\rho} (dq \times \mathcal{Q}) = \mathbb{J} (dq \times \mathcal{Q})\,.
\end{equation}
In other words, what is special about processes with rates satisfying
\eqref{jJ} is that not only the single-time \emph{distribution} but
also the \emph{current} is given by a standard quantum theoretical
expression in terms of $H, \Psi$, and ${P}$. That is why we call
\eqref{jJ} the \emph{standard-current property}---defining
\emph{standard-current rates} and \emph{standard-current processes}.
Though the standard-current property is stronger than equivariance, it
alone does not determine the jump rates, as already remarked in
\cite{BD,Roy}. This can perhaps be best appreciated as follows: Note
that \eqref{jsigma} expresses $j_{\sigma,\rho}$ as twice the
anti-symmetric part of the (nonnegative) measure
\begin{equation}
C(dq \times dq') = \sigma(dq|q') \, \rho(dq')
\end{equation}
on $\mathcal{Q} \times \mathcal{Q}$ whose right marginal $C(\mathcal{Q} \times dq')$ is
absolutely continuous with respect to $\rho$. Conversely, from any
such measure $C$ the jump rates $\sigma$ can be recovered by forming
the Radon--Nikod\'ym derivative
\begin{equation}
\sigma(dq|q') = \frac{C(dq \times dq')}{\rho(dq')}\,.
\end{equation}
Thus, given $\rho$, specifying $\sigma$ is equivalent to specifying
such a measure $C$.
In terms of $C$, the standard-current property becomes (with $\rho =
\mathbb{P}$)
\begin{equation}\label{CJ}
2 \, \mathrm{Anti} \, C = \mathbb{J}.
\end{equation}
Since (recalling that $\mathbb{J} = \mathbb{J}^+ - \mathbb{J}^-$ is
anti-symmetric)
\begin{equation}
\mathbb{J} = 2 \, \mathrm{Anti} \, \mathbb{J}^+,
\end{equation}
an obvious solution to \eqref{CJ} is
\[
C = \mathbb{J}^+,
\]
corresponding to the minimal jump rates. However, \eqref{jJ} fixes
only the anti-symmetric part of $C$. The general solution to
\eqref{CJ} is of the form
\begin{equation}
C = \mathbb{J}^+ + S
\end{equation}
where $S(dq \times dq')$ is symmetric, since any two solutions to
\eqref{CJ} have the same anti-symmetric part, and $S \geq 0$, since $S
= C \wedge C^\mathrm{tr}$, because $\mathbb{J}^+ \wedge (\mathbb{J}^+)^\mathrm{tr} =0$.
In particular, for any standard-current rates, we have that
\begin{equation}\label{minimality}
C \geq \mathbb{J}^+, \quad \text{or} \quad \sigma(dq|q') \geq
\frac{\mathbb{J}^+(dq \times dq')}{\mathbb{P}(dq')}.
\end{equation}
Thus, among all jump rates consistent with the standard-current
property, one choice, distinguished by equality in \eqref{minimality},
has the least frequent jumps, or the smallest amount of stochasticity:
the minimal rates \eqref{tranrates}.
\subsection{Minimal Processes}
\label{sec:mini}
We have considered in this paper minimal jump processes, i.e., jump
processes with rates \eqref{tranrates}, associated with integral
operators $H$. There is a more general notion of minimal process, such
that there is a minimal process associated with every Hamiltonian from
a much wider class than that of integral operators; a class presumably
containing all Hamiltonians relevant to QFT. This will be discussed in
detail in a forthcoming work \cite{crea3}.
Bohmian mechanics is, in this sense, the minimal process associated with
the Schr\"odin\-ger Hamiltonian \eqref{Hamil}. The minimal process
associated
with an integral operator is the jump process with minimal rates. When
the
minimal free generator \eqref{LH} exists, i.e., when \eqref{LH} is a
generator, it generates the minimal process associated with $H$. The
minimal process associated with the Hamiltonian of a QFT is the one we
have
obtained in this paper by means of process additivity. The concept of
minimal process directly provides, perhaps always, the process relevant
to a Bell-type QFT.
To begin to convey the notion of the minimal process, we generalize
the standard-current property (cf.\ Section \ref{sec:mini4}) from pure
jump processes to general Markov processes: the net probability
current $j$ of a Markov process defines a bilinear form
\begin{equation}
j_t(f,g) = \lim_{\Delta t \searrow 0} \, \frac{1}{\Delta t} \, \mathbb{E}
\big( f(Q_{t+\Delta t}) g(Q_t) - f(Q_t) g(Q_{t + \Delta t}) \big)
= (g,L_t f) - (f, L_t g)
\end{equation}
where $L_t$ is its backward generator, and $( \;, \, )$ on the right
hand side means the scalar product of $L^2(\mathcal{Q}, \rho_t)$. Then the
Markov process satisfies the \emph{standard-current property} if
$\rho_t = \mathbb{P}_t$ and (for $f$ and $g$ real) $j_t(f,g)$ is equal
to
\begin{equation}
\mathbb{J}_t(f,g) = \frac{2}{\hbar} \, im \, \sp{\Psi_t} {\hat{f} H
\hat{g} |\Psi_t}\,,
\end{equation}
or, in other words, if twice the anti-symmetric part of its backward
generator $L_t$ agrees with the operator corresponding to $\mathbb{J}_t$
as given by $(\mathbb{J}_t f,g) = \mathbb{J}_t(f,g)$, $2 \, \mathrm{Anti}
\, L_t = \mathbb{J}_t$. The minimal process is then the
standard-current process that has, in a suitable sense, the smallest
amount of randomness.
Let us consider some examples. The diffusion process with generator
$\mathscr{L}$ given below (and for $\rho = \mathbb{P}$) has the
standard-current property (in fact, because its ``current velocity''
\cite{stochmech} is $v$) for the Schr\"odinger Hamiltonian
\eqref{Hamil} but is not minimal:
\begin{equation}\label{diffusion}
\mathscr{L} \rho= \frac{\lambda}{2} \Delta \rho -\,\mathrm{div}\, (\rho
\tilde{v}),\mbox{ with } \tilde{v}:= v + \frac{\lambda}{2}
\nabla(\log|\Psi|^2)
\end{equation}
where $\lambda$ is any positive constant (the diffusion constant) and
$v$ is the Bohmian velocity (\ref{Bohm}); this process was already
considered in \cite{Jaekel,Davidson}. Note that Nelson's stochastic
mechanics \cite{stochmech} corresponds to $\lambda=\hbar$. It is
obvious without any mathematical analysis that the smallest amount of
stochasticity corresponds to absence of diffusion, $\lambda =0$, which
yields Bohmian mechanics. Processes like the diffusion
(\ref{diffusion}) for $\lambda > 0$ seem less natural for the
fundamental evolution law of a physical theory since they involve
greater mathematical complexity than is needed for a straightforward
association of a process with $H$ and $\Psi$. Examples of processes
that do not have the standard-current property, for the Schr\"odinger
Hamiltonian \eqref{Hamil}, are provided by the alternative velocity
formulas considered by Deotto and Ghirardi \cite{Deotto}; one can say
that their current is not the one suggested by $H$ and $\Psi$.
We return to the general discussion of the minimal process. As we have
already indicated, when, for a standard-current process, we view
$\mathbb{J}$ as well as its backward generator $L$ as operators on
$L^2(\mathcal{Q}, \mathbb{P})$, then $\frac12 \mathbb{J}$ is the anti-symmetric
(skew-adjoint) part of $L$; thus, only the symmetric (self-adjoint)
part of $L$ remains at our disposal. Since one of the properties of a
backward generator is $L\mathbf{1} =0$, the first possibility $\tilde{L}$ for
$L$ that may satisfy the formal criteria for being a backward
generator is $\tilde{L} f = \frac12 \mathbb{J} f - (\frac12 \mathbb{J}
\mathbf{1})f$. When ${P}$ is a PVM, this is also the operator we obtain by
applying, to an arbitrary quantum Hamiltonian $H$, the formula
\eqref{LH} for what we called the minimal free generator, which we
repeat here for convenience:
\begin{equation}\label{Ltilde}
\tilde{L} f(q) = \mathrm{Re} \, \frac{\sp{\Psi} {{P}(dq) \frac{i}{\hbar}
[H,\hat{f}] |\Psi}} {\sp{\Psi} {{P}(dq)|\Psi}}\,.
\end{equation}
Whereas this formula merely provided an alternative definition of the
free process in Section \ref{sec:free2}, it now plays a different
role: a step towards obtaining the minimal process from the
Hamiltonian $H$. As we have pointed out in Section \ref{sec:free2},
$\tilde{L}$ is also an obvious naive guess for the backward generator
$L$, quite independent of equivariance or the current $\mathbb{J}$,
since $\frac{i} {\hbar} [H,\hat{f}]$ is the time derivative of
$\hat{f}$. Moreover, it manifestly satisfies $\tilde{L} \mathbf{1} =0$. For
the backward generator $L$ of a standard-current process we must have,
when ${P}$ is a PVM, that $L = \tilde{L} + S$ where $S$ is a
symmetric operator and $S\mathbf{1} =0$. For the minimal process, we have to
choose $S$ as small as possible---while keeping $S$ symmetric and $L$
a backward generator.
Suppose ${P}$ is a PVM. Observe then that if $H$ is a differential
operator (as $H_0$ often is) of the kind considered in Section
\ref{sec:free2}, $\tilde{L}$ is itself a backward generator, so that
$S=0$ is a possible, and in fact the smallest, choice. If $H$ is an
integral operator, what keeps $\tilde{L}$, an integral operator as
well, from being a backward generator is that the off-diagonal part of
its $\mathbb{P}$-kernel $(q,\tilde{L} q') = \mathbb{P}(q) \tilde{L}(q,q')
= \frac{1}{\hbar} \, im \, \sp{\Psi}{q} \sp{q}{H|q'} \sp{q'}{\Psi}$
may assume negative values whereas the off-diagonal part of the
$\mathbb{P}$-kernel of $L$, $(q,Lq') = \mathbb{P}(q) \sigma(q|q')$, cannot
be negative. The smallest possible choice of $S$ has as off-diagonal
elements what is needed to compensate the negative values, and this
leads to the minimal jump process, as described in Section
\ref{sec:mini4}. The diagonal part contains only what is needed to
ensure that $S\mathbf{1} =0$. For $H$ of the form $H_0 + H_{I}$, the role
of $S$ is again to compensate negative values off the diagonal, and
the minimal process has velocities determined by $H_0$ via \eqref{LH}
and jump rates determined by $H_{I}$ via \eqref{tranrates}.
In any case, the backward generator of the minimal process is the one
closest, in a suitable sense, to \eqref{Ltilde}. This formula may
thus be regarded as containing the essential structure of $L$, for the
deterministic as well as for the jump part of the process.
Another approach towards a general notion of minimal process may be to
approximate $H$ by Hilbert--Schmidt operators $H_n$, with which are
associated, according to the results of Sections~4.2.1 and 4.2.4 of
\cite{crea2A}, minimal jump processes $Q_n$, and take the limit $n \to
\infty$ of the processes $Q_n$. This leads to a number of mathematical
questions, such as under what conditions on $H, \Psi, {P}$, and $H_n$
does a limiting process exist, and is it independent of the choice of
the approximating sequence $H_n$.
\section{Remarks}\label{sec:remarks}
\subsection{Symmetries}\label{sec:symm}
Process additivity preserves symmetries, in the sense that the process
generated by $\sum \mathscr{L}^{(i)}$ shares the symmetries respected
by all of the building blocks $\mathscr{L}^{(i)}$. This section
elaborates on this statement, and the following ones: The minimal jump
rates \eqref{tranrates} and the minimal free generator \eqref{LH}
share the symmetries of the Hamiltonians with which they are
associated. The ``second quantization'' algorithm preserves the
symmetries respected by the one-particle process.
Here are some desirable symmetries that may serve as examples: space
translations, rotations and inversion, time translations and
reversal, Galilean or Lorentz boosts, global change of phase $\Psi \to
e^{i\theta} \Psi$, relabeling of particles,\footnote{This may mean
two things: changing the artificial labels given to identical
particles, or exchanging two species of particles.} and gauge
transformations.
We focus first on symmetries that do not involve time in any way,
such as rotation symmetry. In this case, a symmetry group $G$ acts on
$\mathcal{Q}$, so that to every $g \in G$ there corresponds a mapping
$\varphi^g:
\mathcal{Q} \to \mathcal{Q}$. In addition, $G$ acts on $\mathscr{H}$ through a
projective unitary (or anti-unitary) representation, so that to every $g
\in G$ there corresponds a unitary (or anti-unitary) operator $U_g$.
Then the theory is $G$-invariant if both the wave function dynamics
and the process on $\mathcal{Q}$ are, i.e., if $H$ is $G$-invariant,
\begin{equation}\label{HGinv}
U_g^{-1} H U_g = H\,,
\end{equation}
and
\begin{equation}\label{QGinv}
\varphi^g(Q_t^\Psi) = Q_t^{U_g \Psi}
\end{equation}
in distribution on path space. A necessary condition for
(\ref{QGinv}) is that the ``configuration observable'' transforms like
the configuration, in the sense that
\begin{equation}\label{povGinv}
U_g^{-1} {P}(\,\cdot\,) U_g = \varphi^g_* {P}(\,\cdot\,)\,,
\end{equation}
where $\varphi_*$ denotes the action of $\varphi$ on measures.
Without (\ref{povGinv}), (\ref{QGinv}) would already fail at time
$t=0$, no matter what the generator is. Given (\ref{povGinv}),
(\ref{QGinv}) is equivalent to the $G$-invariance of the generator:
\begin{equation}\label{LGinv}
\varphi^g_* \mathscr{L}^\Psi \varphi^{g^{-1}}_* =
\mathscr{L}^{U_g \Psi} \,.
\end{equation}
Since $\varphi^g_*$ is a linear operator, it follows immediately that
the sum of $G$-invariant generators is again $G$-invariant. The
minimal jump process, when it exists, is $G$-invariant, as follows
from the fact that $\varphi^g_*\sigma^\Psi(dq|\varphi^g(q')) =
\sigma^{U_g \Psi} (dq|q')$, which can be seen by inspecting the jump
rate formula (\ref{tranrates}). The minimal free generator
\eqref{genH} satisfies \eqref{LGinv} by virtue of \eqref{HGinv} and
\eqref{povGinv}. ``Second quantization'' provides $G$-actions on
$\Gamma \mathcal{Q}^{(1)}$ and $\mathscr{F} = \Gamma \mathscr{H}^{(1)}$ from given
actions on $\mathcal{Q}^{(1)}$ and $\mathscr{H}^{(1)}$; \eqref{HGinv},
\eqref{povGinv} and \eqref{LGinv} are inherited from their 1-particle
versions.
Time-translation invariance is particularly simple. Consider
generators $\mathscr{L}^{(i)}_\Psi$ which do not depend on time except
through their dependence on $\Psi$. Then the same is true of $\sum
\mathscr{L}^{(i)}$. The same can be said of the ``second quantized''
generator, and, provided $H$ is time-independent, of the minimal jump
rates (\ref{tranrates}) and the minimal free generator \eqref{genH}.
Next we consider time reversal. It is represented on $\mathscr{H}$ by an
anti-unitary operator $T$, i.e., an anti-linear operator such that
$\sp{T\Phi}{T\Psi}$ is the conjugate of $\sp{\Phi}{\Psi}$. We assume
that the Hamiltonian is reversible, $THT^{-1} = H$. Then the
reversibility of the theory means that
\begin{equation}\label{QTinv}
Q^\Psi_{-t} = Q_t^{T\Psi}
\end{equation}
in distribution on path space, where the superscript should be
understood as indicating the state vector at $t=0$. The necessary
condition analogous to (\ref{povGinv}) reads
\begin{equation}\label{povTinv}
T^{-1} {P}(\,\cdot\,) T = {P}(\,\cdot\,) \,,
\end{equation}
and given that, (\ref{QTinv}) is equivalent to the $T$-invariance of
the generator:
\begin{equation}\label{LTinv}
\overline{\mathscr{L}}_\Psi = \mathscr{L}_{T\Psi}\,, \mbox{ or }
\overline{L}_\Psi = L_{T\Psi}\,,
\end{equation}
where $\overline\mathscr{L}$ and $\overline{L}$ denote the forward and backward
generator of the time-reversed process. $\overline{L}$ can be computed from
$L$,
for an equivariant Markov process, according to\footnote{To make this
formula plausible, it may be helpful to note that the second term on
the right hand side is just the correction needed to ensure that
$L^\dag\mathbf{1} =0$, a necessary condition for being a backward generator.
If $\mathbb{P}$ were stationary, the second term on the right hand side
would vanish.
Here is a derivation of \eqref{LT}: Let $(f,g) = \int_{q\in \mathcal{Q}}
f(q) \, g(q) \, \mathbb{P}(dq)$ be the scalar product in
$L^2(\mathcal{Q},\mathbb{P})$. It follows from the definition
\eqref{backgenerator} of $L$ that
\[
(g,Lf) = \lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_0)
f(Q_t) - g(Q_0) f(Q_0) \big)\,.
\]
Correspondingly, $\overline{L}$ is characterized (for $f$ and $g$ real) by
\begin{eqnarray*}
(g,\overline{L}f)
&=& \lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_0)
f(Q_{-t}) - g(Q_0) f(Q_0) \big) = \\
&=&\lim_{t\searrow 0} \frac{1}{t} \, \mathbb{E} \big( g(Q_{0}) f(Q_{-t})
- g(Q_{-t}) f(Q_{-t}) \big) \: +\\
&+& \lim_{t\searrow 0} \frac{1}{t}\,\mathbb{E} \big( g(Q_{-t}) f(Q_{-t})
- g(Q_{0}) f(Q_{0}) \big) = \\
&=& (f,Lg) -\int\limits_{q\in\mathcal{Q}} g(q) \, f(q)
\,\dot{\mathbb{P}}(dq)
\stackrel{\eqref{generatorduality}}{=} (Lg,f)-(L(gf),\mathbf{1}) =
(g,L^\dag f) - (fg,L^\dag \mathbf{1})\,,
\end{eqnarray*}
which amounts to \eqref{LT}.}
\begin{equation}\label{LT}
\overline{L} f = L^\dag f - (L^\dag\mathbf{1}) f
\end{equation}
where $^\dag$ denotes the adjoint operator on $L^2(\mathcal{Q},\mathbb{P})$,
with $\mathbb{P}$ given by \eqref{mis}. Since $\overline{L}$ is linear in
$L$, condition (\ref{LTinv}) is preserved when adding (forward or
backward) generators; it is also preserved under ``second
quantization.'' For a pure jump process, (\ref{LTinv}) boils down to
\begin{equation}\label{jumprevers}
\sigma^{\Psi}(dq|q') \, \sp{\Psi}{{P}(dq') |\Psi} = \sigma^{T \Psi}
(dq'|q) \, \sp{\Psi}{{P}(dq) |\Psi}\,,
\end{equation}
which is satisfied for the minimal jump rates, by inspection of
(\ref{tranrates}). The minimal free generator \eqref{LH} changes sign
when replacing $\Psi$ by $T\Psi$, which means the velocity changes
sign, as it should under time reversal (see Section
\ref{sec:freeflow}).
Invariance under Galilean boosts is a more involved story, and as it
is not considered as fundamental in physics anyway, we omit it here.
Lorentz boosts are even trickier, since for more than just one
particle, they even fail to map (simultaneous) configurations into
(simultaneous) configurations. As a result, the problem of Lorentz
invariance belongs in an altogether different league, which shall not
be entered here.
\subsection{On the Notion of Reversibility}
It may appear, and it is in fact a widespread belief, that
stochasticity is incompatible with time reversibility. We naturally
view the past as fixed, and the future, in a stochastic theory, as
free, determined only by innovations. Even Bell expressed such a
belief \cite[p.~177]{Bellbook}. However, from the proper perspective
the conflict disappears, and this perspective is to consider the
path space (of the universe) and the probability measure thereon. If
$t\mapsto Q_t$ is a history of a universe governed by a Bell-type
QFT, then
its time reverse, $t\mapsto Q_{-t}$, is again a possible path of this
Bell-type QFT, though corresponding to a different initial state
vector $T\Psi$ instead of $\Psi$, with $T$ the time reversal
operator as discussed in Section \ref{sec:symm}. More than this, the
distribution of the reversed path $t\mapsto Q_{-t}$ coincides with the
probability measure on path space arising from $T\Psi$.\footnote{We
can be more precise about the meaning of the measure on path space:
as in Bohmian mechanics \cite{DGZ}, its role ``is precisely to
permit definition of the word `typical'.'' \cite[p.~129]{Bellbook}
Consequently, the meaning of the reversibility property of the
measures we just mentioned is that the time reverse of a history
that is typical with respect to $\Psi$, is typical with respect to
$T\Psi$.}
It may also be helpful to think of how the situation appears when
viewed from outside space-time: then the path $Q_t$ corresponds to
the decoration of space-time with a pattern of world lines, and this
pattern is random with respect to a probability measure on what
corresponds to path space, namely the space of all possible
decorations of space-time. Then time reversal is a mere reflection,
and for a theory to be time reversible means the same as being
invariant under this reflection: that we could have had as well the
reflected probability measure, provided we had started with $T\Psi$
instead of $\Psi$.
To sum up, we would like to convey that the sense of reversibility
for Markov processes indeed matches the sense of reversibility that
one should expect from a physical theory.
\subsection{Heisenberg Picture}
In (\ref{mis}), we have applied the Schr\"odinger picture, according
to which the state vector evolves while the operators remain
fixed. Eq.~(\ref{mis}) and the reasoning following it can as well be
translated to the Heisenberg picture where the state vector $\Psi$
is regarded as fixed and the operators ${P}_t(\,\cdot\,)$ as
evolving. Thus, we could equivalently write
\[
\mathbb{P}_t(dq) = \sp{\Psi}{{P}_t(dq)| \Psi}
\]
instead of (\ref{mis}). Similarly, $H_0$ and $H_{I}$ become
time-dependent while their sum is constant. We often use an
ambiguous notation like $\sp{\Psi}{{P}(dq)|\Psi}$ and formula
\eqref{tranrates} since the formulas are equally valid in both
pictures (and, for that matter, in the interaction picture).
Like the jump rate formula \eqref{tranrates}, the formula \eqref{LH}
for the minimal free generator is equally valid in the Heisenberg
picture.
We further remark that in the Heisenberg picture, the following nice
equation holds for a pure jump process with minimal rates when
${P}$ is a PVM:
\begin{equation}\label{twotimes}
\mathrm{Prob}\{Q_{t+dt} \in dq, Q_{t} \in dq'\} = \sp{\Psi} {\{{P}_{t+dt}
(dq), {P}_{t}(dq') \} | \Psi}^+
\end{equation}
for $dq \cap dq' = \emptyset$, where $\{ \;,\, \}$ on the right hand
side means the anti-commutator. The similarity to the one-time
distribution formula
\[
\mathrm{Prob}\{Q_t \in dq\} = \sp{\Psi}{{P}_t(dq) |\Psi}
\]
is striking. Specifying the two-time distribution for infinitesimal
time differences is a way of characterizing a Markov process,
equivalent to specifying the (forward or backward) generator and the
one-time distribution. Thus, for a PVM ${P}$ \eqref{twotimes}
provides another formula for the minimal jump rates
\eqref{tranrates}. A similar formula for the process generated by
the minimal free generator \eqref{LH} is $\mathbb{E} \big(g(Q_t)
f(Q_{t+dt}) \big) = \frac12 \sp{\Psi} {\{\hat{g}_t, \hat{f}_{t+dt}
\} | \Psi}$.
\subsection{Examples of Process Additivity}
\label{sec:known}
Among different versions of Bohmian mechanics we find numerous
examples of process additivity (and, remarkably, no example
\emph{violating} it):
\begin{itemize}
\item The Hamiltonian for $n$ noninteracting particles is the sum of
the Hamiltonians for the individual particles, and it is easy to see
that the vector field defining Bohmian mechanics for the
$n$-particle system is the sum of the vectors fields (each regarded
as vectors fields on $\mathbb{R}^{3n}$) for the particles. As already
mentioned, sums of generators for deterministic processes amount to
sums of the defining vector fields.
Moreover, the vector field for each particle is essentially the
Bohmian one-particle law. To point out that this is a nontrivial
fact, we mention that this is not so for the alternative velocity
formula (10.2) in \cite{Deotto} considered by Deotto and Ghirardi,
for which the velocity of the $i$-th particle differs from the
one-particle law. So Bohmian mechanics of $n$ particles can be
viewed as built from $n$ copies of the one-particle version, in fact
by the ``second quantization'' algorithm of Section \ref{sec:Gamma}.
\item The vector field of Bohmian mechanics for a single spinless
particle may also be seen as arising in this way. If a Hamiltonian
$H=-X^2$ is the negative square of an (incompressible) vector field
(regarded as a first-order differential operator) $X=a({\boldsymbol x}) \!\cdot\!
\nabla$ on $\mathbb{R}^3$ (with $\nabla \!\cdot\! a=0$ ensuring formal
self-adjointness of the square), then the simplest equivariant
process associated with $H$ is given by the velocity vector field
\[
v= \frac{2}{\hbar} \, im \,\frac{a\cdot \nabla \Psi}{\Psi}\, a\, .
\]
The corresponding backward generator is $L = \frac{2}{\hbar} \, im
\, (\frac{X\Psi}{\Psi}) X$. Now $-\frac{\hbar^2}{2}\Delta =
-\sum_{\alpha}{X_{\alpha}}^2$ is the sum of 3 negative squares of
vector fields $X_{\alpha} = \frac{\hbar} {\sqrt{2}} \partial /
\partial x^\alpha$ corresponding to the individual degrees of
freedom. The associated Bohm velocity is the sum of the velocities
corresponding to the squares. So Bohmian mechanics in three
dimensions can be viewed as built of 3 copies of the one-dimensional
version. To point out that this is a nontrivial fact, we mention
that this is not true, e.g., of the velocity formulas (10.1) and
(10.2) in \cite{Deotto}, which do not make sense in dimensions other
than 3.
\item If we add an interaction potential $V$ to $-\frac{\hbar^2}{2}
\Delta$, the Bohm velocity is the appropriate sum, since the
operator $V$ is associated with the trivial motion $v=0$.
\item We may also include an external vector potential ${\boldsymbol A}({\boldsymbol x},t)$ in
the Schr\"odinger equation, that is, replace $- \frac{\hbar^2}{2}
\Delta = - \frac{\hbar^2}{2} \nabla^2$ by $- \frac{\hbar^2}{2}
\big( \nabla + i \frac{e}{\hbar} {\boldsymbol A}({\boldsymbol x},t) \big)^2 = -
\frac{\hbar^2}{2} \Delta - \frac{\hbar^2}{2} (i \frac{e}{\hbar}
\nabla\cdot{\boldsymbol A} + i \frac{e}{\hbar} {\boldsymbol A} \cdot \nabla) +
\frac{e^2}{2} {\boldsymbol A}^2$. The sum of the associated velocities, namely
\[
\hbar \, im \, \frac{\Psi^* \nabla \Psi}{\Psi^* \, \Psi} + e{\boldsymbol A} +
0
\]
equals the velocity one obtains directly, $\hbar \, im \, \Psi^*
(\nabla +i \frac{e}{\hbar} {\boldsymbol A})\Psi/ \Psi^* \, \Psi$.
\item In the Bohm--Dirac theory (\ref{BohmDirac}), however, one can
include an external gauge connection $A_\mu({\boldsymbol x},t)$ in the Dirac
equation without changing the velocity formula. That conforms with
process additivity because the operator $(\gamma^0)^{-1} \gamma^\mu
A_\mu = A_0+\boldsymbol{\alpha}\cdot{\boldsymbol A}$ is associated (termwise)
with $v=0$.
\item In the Dirac Hamiltonian $H = -i c \hbar {\boldsymbol \alpha} \cdot \nabla +
\beta mc^2$, the first term corresponds to the Bohm--Dirac velocity
(\ref{BohmDirac}), whereas the second term corresponds to $v=0$; as
a consequence, the Bohm--Dirac velocity does not depend on the mass.
Moreover, the three components of the Bohm--Dirac velocity are each
equivariant with respect to the corresponding derivative term in
$H$.
\end{itemize}
In addition, we point out cases of process additivity in the ``second
quantization'' algorithm and minimal jump processes.
The ``second quantized'' generator $\Gamma \mathscr{L}^{(1)}$ as
constructed in Section \ref{sec:Gamma} provides an example of
process additivity (or may be viewed as an application of process
additivity):
\[
\mathscr{L}_{H_0, \Psi}= \sum_{n=0}^{\infty}
\mathscr{L}_{H_0^{(n)}, \Psi^{(n)}} \,,
\]
where the generators in the sum correspond to motions in the
respective different sectors of $\mathcal{Q}$.
Suppose we regard the particles as ordered, $Q = ({\boldsymbol Q}_1, \ldots,
{\boldsymbol Q}_N)$. Then another case of process additivity becomes visible:
\[
H_0^{(N)} = \sum_{i=1}^N h_i
\]
where $h_i$ is the one-particle Hamiltonian acting on the $i$-th
particle. Correspondingly,
\[
\mathscr{L}_{H_0^{(N)}} = \sum_{i=1}^N \mathscr{L}_i
\]
where $\mathscr{L}_i$ is equivariant with respect to $h_i$. This
applies not only to Bohmian mechanics (as described earlier in this
section), but generally to the ``second quantization'' procedure as
described in Section \ref{sec:Gamma}. We also note that the ``second
quantization'' algorithm presented in Section \ref{sec:Gamma}
preserves process additivity in the sense that
$\Gamma(\mathscr{L}_1^{(1)} + \mathscr{L}_2^{(1)}) =
\Gamma(\mathscr{L}_1^{(1)}) + \Gamma(\mathscr{L}_2^{(1)})$ while
$\Gamma(H_1^{(1)} + H_2^{(1)}) = \Gamma(H_1^{(1)}) +
\Gamma(H_2^{(1)})$.
\label{sec:miniadd}
We now turn to process additivity among minimal jump processes.
A jump process generated by a sum need not be a minimal jump process
even when its constituents are. But under certain conditions it is.
Two such cases are the ``direct sum'' and ``tensor product'' processes
constructed in Sections \ref{sec:directsum} and
\ref{sec:tensorproduct}: $\mathscr{H} = \bigoplus_n \mathscr{H}^{(n)}$ with
$\mathcal{Q} = \bigcup_n \mathcal{Q}^{(n)}$ and $H =\bigoplus_n H^{(n)}$, and
$\mathscr{H} = \mathscr{H}^{[1]} \otimes \cdots \otimes \mathscr{H}^{[N]}$ with
$\mathcal{Q} = \mathcal{Q}^{[1]} \times \cdots \times \mathcal{Q}^{[N]}$ and $H = \sum_i
\mathbf{1} \otimes \cdots \otimes H^{[i]} \otimes \cdots \otimes \mathbf{1}$, with
$\mathscr{L} = \sum \mathscr{L}_i$ where $\mathscr{L}_i$ acts
nontrivially, in an obvious sense, only on $\mathcal{Q}^{(i)}$ or on
$\mathcal{Q}^{[i]}$. These are special cases of the general fact that
minimality is compatible with additivity whenever the addends of the
Hamiltonian correspond to \emph{different sorts} of jumps. That can be
most easily understood in the case of a PVM corresponding to an
orthonormal basis $\{|q\rangle : q \in \mathcal{Q}\}$ of $\mathscr{H}$: suppose
$H=H_1 + H_2$ and for every pair $q,q'$ either $\sp{q}{H_1|q'} =0$ or
$\sp{q}{H_2 |q'} =0$. Then $\sigma = \sigma_1 + \sigma_2$. The
corresponding condition in the POVM context is that the kernels of
$H_1$ and $H_2$ have disjoint supports. When $H$ is naturally given as
a sum this condition would be expected to be satisfied.
Finally, we remark that the minimal free generator $\mathscr{L} =
\mathscr{L}^H$ as defined in (\ref{genH}) is additive in $H$.
\label{sec:mini3}
\subsection{Second Quantization of a Minimal Jump Process}
We note that the ``second quantization'' of a minimal jump
process associated with a PVM ${P}^{(1)}$, as described in Section
\ref{sec:Gamma}, is the minimal jump process associated with the
second-quantized Hamiltonian; this is a consequence of the
observation that $\mathscr{L}_i$ generates the minimal jump process
for $H_i$ in this case. This fact is probably physically irrelevant
but it is mathematically nice.
\subsection{Global Existence Question}
The rates $\sigma_t$ and velocities $v_t$, together with $\mathbb{P}_t$,
define the process $Q_t$ associated with $H,{P}$, and $\Psi$, which
can be constructed along the lines of Section
\ref{sec:revjump}. However, the rigorous existence of this process,
like the global existence of solutions for an ordinary differential
equation, is no trivial matter. See Section~4.3 of \cite{crea2A} for
a discussion of what must be controlled in order to establish the
global existence of the process, and \cite{crex1} for an example of
such a global existence proof.
\subsection{POVM Versus PVM}
As we have already remarked in footnote \ref{ft:Naimark}, every POVM
${P}$ is related to a PVM ${P}_{\mathrm{ext}}$, the Naimark extension, on a
larger Hilbert space $\mathscr{H}_{\mathrm{ext}}$ according to ${P}(\,\cdot\,) =
P_+ {P}_{\mathrm{ext}}(\,\cdot\,) I$ with $P_+$ the projection $\mathscr{H}_{\mathrm{ext}}
\to \mathscr{H}$ and $I$ the inclusion $\mathscr{H} \to \mathscr{H}_{\mathrm{ext}}$. This
fact allows a second perspective on ${P}$, and sometimes creates a
certain ambiguity as to which process is the suitable one for a
Bell-type QFT, as follows. At several places in this paper, we have
described considerations leading to and methods for defining Markov
processes, in particular minimal jump rates \eqref{tranrates} and the
minimal free generator \eqref{LH}; these considerations and methods
could be applied using either $\mathscr{H}_{\mathrm{ext}}$ and ${P}_{\mathrm{ext}}$ or
$\mathscr{H}$ and ${P}$. One would insist that the state vector $\Psi$
must lie in $\mathscr{H}$, the space of physical states, but even then
one might arrive at different processes starting from ${P}$ or
${P}_{\mathrm{ext}}$. To obtain a process from ${P}_{\mathrm{ext}}$ requires, of course,
that we have a Hamiltonian on $\mathscr{H}_{\mathrm{ext}}$, while $H$ is defined on
$\mathscr{H}$; such a Hamiltonian, however, can easily be constructed
from $H$ by setting $H_{\mathrm{ext}} = I H P_+$.
In some cases, the Naimark extension does not lead to an ambiguity.
This is the case for the jump rate formula \eqref{tranrates}, since
for $\Psi \in \mathscr{H}$, $\sp{\Psi}{{P}_{\mathrm{ext}}(dq)| \Psi} =
\sp{\Psi}{{P}(dq) |\Psi}$ and $\sp{\Psi}{{P}_{\mathrm{ext}}(dq) H_{\mathrm{ext}}
{P}_{\mathrm{ext}}(dq')| \Psi} = \sp{\Psi}{{P}(dq) H {P}(dq') |\Psi}$. This
fact suggests that, generally, the minimal process arising from
$H_{\mathrm{ext}}$ and ${P}_{\mathrm{ext}}$ is the same as the one arising from $H$ and
${P}$.
The situation is different, however, when $H$ is defined on
$\mathscr{H}_{\mathrm{ext}}$ to begin with, and different from $H_{\mathrm{ext}}$. This is
the case with the free Dirac operator $h_0$, defined as a differential
operator on $L^2(\mathbb{R}^3,\mathbb{C}^4)$, which differs from $P_+h_0P_+$.
When we obtained in Section \ref{sec:free2} the Bohm--Dirac motion
\eqref{BohmDirac} from the formula \eqref{LH} for the minimal free
generator, we used $h_0$ and ${P}_{\mathrm{ext}}$. In contrast, the restriction
of $h_0$ to the positive energy subspace, or equivalently $P_+ h_0
P_+$, possesses a kernel; more precisely, it is a convolution operator
$S_+ \star (h_0 S_+) \star$ in the notation of Section
\ref{sec:positron}, and thus corresponds to jumps. The associated
minimal process on $\mathbb{R}^3$ presumably makes infinitely many jumps in
every finite time interval, similar to the example of \cite{crea2A},
Section
3.5.
Thus, there are two processes to choose between, the Bohm--Dirac
motion and the minimal process for $P_+ h_0 P_+$. Both are
equivariant, and thus it is arguably impossible to decide empirically
which one is right. In our example theory in Section
\ref{sec:positron}, we chose the simpler, deterministic one. But we
leave to future work the discussion of which is more likely relevant
to physics, and why.
\subsection{The Role of Field Operators}\label{sec:fields}
The Bell-type QFTs with which we have been concerned in this paper are
models describing the behaviour of \emph{particles} moving in physical
3-space, not of fields on 3-space. We have been concerned here mainly
with a particle ontology, not a field ontology. This focus may be
surprising at first: almost by definition, it would seem that QFT
deals with fields, and not with particles. Consider only the
occurrence (and prominence) of field operators in QFT!
But there is less to this than might be expected. The field operators
do not function as observables in QFT. It is far from clear how to
actually ``observe'' them, and even if this could somehow, in some
sense, be done, it is important to bear in mind that the standard
predictions of QFT are grounded in the particle representation, not
the field representation: Experiments in high energy physics are
scattering experiments, in which what is observed is the asymptotic
motion of the outgoing particles. Moreover, for Fermi fields---the
matter fields---the field as a whole (at a given time) could not
possibly be observable, since Fermi fields anti-commute, rather than
commute, at space-like separation. One should be careful here
not to be taken in by the attitude widespread in quantum theory
of intuitively regarding the operators as ``quantities,'' as if they
represented something out there in reality; see \cite{naive} for a
critique of this attitude.
So let us focus on the role of the field operators in QFT. This seems
to be to relate abstract Hilbert space to space-time: the field
operators are attached to space-time points, unlike the quantum states
$\Psi$, which are usually regarded not as functions but as abstract
vectors. In orthodox quantum field theory the field operators are an
effective device for the specification of Hamiltonians having good
space-time properties. For our purposes here, what is critical is the
connection between field operators and POVMs.
Throughout this paper, the connection between Hilbert space and the
particle positions in physical space has been made through the POVM
${P}$, and through it alone. We now wish to emphasize that the field
operators are closely related to ${P}$, and indeed that field
operators are just what is needed for efficiently defining a POVM
${P}$ on $\Gamma(\mathbb{R}^3)$.
This connection is made through number operators $N(R)$, $R \subseteq
\mathbb{R}^3$. These define a \emph{number-operator-valued measure} (NOVM)
$N(\,\cdot\,)$ on $\mathbb{R}^3$, an ``unnormalized POVM'' ($N(\mathbb{R}^3)$ is
usually not the identity operator and $N(R)$ is usually an unbounded
positive operator) for which the values $N(R)$ commute and are number
operators: $\mathrm{spectrum}(N(R)) \subseteq \{0,1,2,3,\ldots\}$.
(The basic difference, then, between a NOVM and a PVM is that the
spectrum of the positive operators is $\{0,1,2,3,\ldots\}$ rather than
just $\{0,1\}$.)
There is an obvious one-to-one relation between NOVMs $N(\,\cdot\,)$
on $\mathbb{R}^3$ and PVMs ${P}$ on $\Gamma(\mathbb{R}^3)$, given by
\begin{equation}\label{Npov}
N(R) = \int\limits_{q\in\Gamma(\mathbb{R}^3)} n_R(q) \, {P}(dq)
\end{equation}
where $n_R(q) = \#(q \cap R)$ is the number function on
$\Gamma(\mathbb{R}^3)$ for the region $R$. Since \eqref{Npov} is the
spectral decomposition of the commuting family $N(R)$, this
correspondence is one-to-one. (Note that the joint spectrum of the
commuting family $N(R)$ is the set of nonnegative-integer-valued
measures $n_R$ on $\mathbb{R}^3$, one of the definitions of $\Gamma(\mathbb{R}^3)$
given in Section \ref{sec:free}.)
The moral is that a NOVM on $\mathbb{R}^3$ is just a different way of
speaking about a PVM ${P}$ on $\mathcal{Q} = \Gamma(\mathbb{R}^3)$. All other
POVMs arise from PVMs by restriction to a subspace (Naimark's theorem
\cite{Davies}). An easy way to obtain a NOVM $N$ starts with setting
\begin{equation}\label{Ndef}
N(R) = \int_R \widetilde{h}i^*({\boldsymbol x}) \, \widetilde{h}i({\boldsymbol x}) \, d^3{\boldsymbol x}
\end{equation}
for suitable operators $\widetilde{h}i({\boldsymbol x})$. An easy way to ensure that the
$N(R)$
commute is to require that the operators $\widetilde{h}i({\boldsymbol x})$ commute or
anti-commute
with each other and the adjoints $\widetilde{h}i^*({\boldsymbol x}')$ for ${\boldsymbol x}'\neq {\boldsymbol x}$.
An easy way to ensure that the $N(R)$ have nonnegative integer
eigenvalues
is to require that
\begin{equation}\label{ccr}
[\widetilde{h}i({\boldsymbol x}),\widetilde{h}i^*({\boldsymbol x}')]_\pm = \delta({\boldsymbol x}-{\boldsymbol x}')\,,
\end{equation}
where $[ \;,\,]_\pm$ is the (anti\nobreakdash-)commutator, and that
there is a cyclic vacuum state $|0\rangle \in \mathscr{H}$ for which
$\widetilde{h}i({\boldsymbol x})|0 \rangle =0$. The relations \eqref{ccr} are of course
just the usual canonical (anti\nobreakdash-)commutation relations that
field operators are required to satisfy.
Moreover, in gauge theories the connection between matter field $\widetilde{h}i$
and the NOVM is perhaps even more compelling. Consider a gauge theory
with internal state space $V$, equipped with the inner product
$\scalar{\,\cdot\,}{\,\cdot\,}$. Then, given ${\boldsymbol x} \in \mathbb{R}^3$, the
matter field $\widetilde{h}i({\boldsymbol x})$ should formally be regarded as a linear
functional $V \to \mathcal{O}(\mathscr{H})$, $\xi \mapsto \widetilde{h}i_\xi({\boldsymbol x})$,
from the internal state space to operators on $\mathscr{H}$, with
$\widetilde{h}i^*_{\xi^*}({\boldsymbol x}) = (\widetilde{h}i_\xi({\boldsymbol x}))^*$ a linear function $V^* \to
\mathcal{O}(\mathscr{H})$ on the dual of $V$. \eqref{ccr} then becomes
$[\widetilde{h}i_\xi({\boldsymbol x}), \widetilde{h}i_{\eta^*}^*({\boldsymbol x}')] = \delta({\boldsymbol x}-{\boldsymbol x}') \,
\scalar{\eta}{\xi}$. Thus the simplest gauge-invariant object
associated with $\widetilde{h}i$ is the NOVM \eqref{Ndef}, with the integrand
understood as the contraction of the tensor $V \times V^* \to
\mathcal{O}(\mathscr{H})$, $(\xi,\eta) \mapsto \widetilde{h}i_\eta^*({\boldsymbol x}) \,
\widetilde{h}i_\xi({\boldsymbol x})$.
Hence, not only does the notion of particle not conflict with the
prominence of field operators (see Sections \ref{sec:crea1} and
\ref{sec:positron} for explicit examples), but field operators have a
natural place in a theory whose ultimate goal it is to govern the
motion of particles. One of their important roles is to define the
POVM ${P}$ that relates Hilbert space to configuration space.
Quantum theory of fields or quantum theory of particles? A theory of
particle motion exploiting field operators!
\section{Conclusions}
The essential point of this paper is that there is a direct and
natural way of understanding QFT as a theory about moving particles,
an idea pioneered, in the realm of nonrelativistic quantum mechanics,
by de Broglie and Bohm. We leave open, however, three considerable
gaps: the question of the process associated with the Klein--Gordon
operator, the problem of removing cut-offs, and the issue of Lorentz
invariance.
\noindent \textbf{Acknowledgements. }We thank James Taylor of Rutgers
University and Stefan Teufel of Technische Universit\"at M\"unchen for
helpful discussions. R.T.\ gratefully acknowledges support by the
German National Science Foundation (DFG). N.Z.\ gratefully
acknowledges support by INFN and DFG. Finally, we appreciate the
hospitality that some of us have enjoyed, on more than one occasion,
at the Mathematisches Institut of Ludwig-Maximilians-Universit\"at
M\"unchen, at the Dipartimento di Fisica of Universit\`a di Genova,
and at the Mathematics Department of Rutgers University.
\end{document} |
\begin{document}
\left(t \right)itle{Fractional Angular Momenta, Gouy and Berry phases in Relativistic Bateman-Hillion-Gaussian Beams of Electrons }
\author{Robert Ducharme$^{1}$}
\mathrm{e}mail{[email protected]}
\author{Irismar G. da Paz$^{2}$}
\mathrm{e}mail{[email protected]}
\author{Armen G. Hayrapetyan$^{3,4,5}$}
\mathrm{e}mail{[email protected]}
\affiliation{$^{1}$ 2112 Oakmeadow Pl., Bedford, TX 76021, USA}
\affiliation{$^2$ Departamento de F\'{\i}sica, Universidade Federal
do Piau\'{\i}, Campus Ministro Petr\^{o}nio Portela, CEP 64049-550,
Teresina, PI, Brazil}
\affiliation{$^{3}$ d-fine GmbH, Bavariafilmplatz 8, 82031 Gr\"unwald, Germany}
\affiliation{$^{4}$ Mathematical Institute, University of Oxford,
Radcliffe Observatory Quarter, Woodstock Rd, Oxford OX2 6GG, UK}
\affiliation{$^{5}$ Max-Planck-Institut f\"ur Physik komplexer Systeme, N\"othnitzer Str. 38, 01187 Dresden, Germany}
\begin{abstract}
A new Bateman-Hillion solution to the Dirac equation for a relativistic Gaussian electron beam taking explicit account of the $4$-position of the beam waist is presented. This solution has a pure Gaussian form in the paraxial limit but beyond it contains higher order Laguerre-Gaussian components attributable to the tighter focusing. One implication of the mixed mode nature of strongly diffracting beams is that the expectation values for spin and orbital angular momenta are fractional and are interrelated to each other by \left(t \right)extit{intrinsic spin-orbit coupling}. Our results for these properties align with earlier work on Bessel beams [Bliokh \left(t \right)extit{et al.} Phys. Rev. Lett. \left(t \right)extbf{107}, 174802 (2011)] and show that fractional angular momenta can be expressed by means of a Berry phase. The most significant difference arises, though, due to the fact that Laguerre-Gaussian beams naturally contain Gouy phase, while Bessel beams do not.
We show that Gouy phase is also related to Berry phase and that Gouy phase fronts that are flat in the paraxial limit become curved beyond it.
\mathrm{e}nd{abstract}
\pacs{41.85.-p, 03.65.Pm, 03.65.Vf,42.50.Tx}
\title{Fractional Angular Momenta, Gouy and Berry phases in Relativistic Bateman-Hillion-Gaussian Beams of Electrons }
{\left(t \right)extit{Introduction}.---} In the past decade, there has been considerable progress
towards solving the Dirac equation (DE) for the purpose of unveiling detailed properties
of electron vortex beams carrying both spin and orbital angular momenta. The earliest of this work has modelled Bessel beams
as a linear superposition of
Dirac or Dirac-Volkov~\cite{Bliokh:11, Hayrapetyan-Karlovets} plane waves
in contrast to non-relativistic Laguerre-Gaussian (LG)~\cite{Bliokh:07}
and Bessel beams
\cite{Schattschneider:11} acting as solutions to paraxial and non-paraxial wave equations, respectively.
More recently, the attention has been focused on
investigating symmetry properties of relativistic electrons
to better understand the nature of their vortex formation \cite{Bliokh:17,Karlovets:18}
and to construct other types of wave packets~\cite{Karlovets:18,BB}
(see also the debate in Refs.~\cite{BB,SMB,BB-comment}).
A 3-vector position in space is not form invariant under Lorentz transformations. For fully relativistic calculations it is therefore necessary to replace points using point events (4-positions) so that a beam front with velocity u that reaches the waist at time T will have time $t = T + \xi_B / u$ for any other point at a distance $\xi_B$ before $(\xi_B<0)$ or after $(\xi_B>0)$ the waist. The use of point events as energy-momentum waypoint markers has precedent in the derivation of Lienard-Wiechert potentials \cite{SR} and constraint mechanics \cite{AK,CA}. Beam solutions that incorporate a 4-position beam waist have been shown to reduce to traditional beam models \cite{RD1, RD2, RD3} in the non-relativistic limit. Beam solutions that zero out the beam waist as the origin of the beam coordinate system thus carry a hidden non-relativistic 3-vector.
In a typical electron microscope assembly,
a Gaussian beam passes from an electron gun
to a magnetic lens that focuses it to a small waist diameter. Assuming a modest current
of energetic electrons ($\sim$ 100keV), the average separation between them will be large
enough so that electron repulsion can be ignored. Under these conditions, the expected
diameter of the beam waist will be about a hundred times the wavelength of the electrons
unless corrective measures are taken to reduce the strong spherical and chromatic
aberration, which is a normal feature of magnetic lenses \cite{RE}.
Such transmission electron microscopes are by far the only tool
to produce electron vortex beams~\cite{EVB-exp}, which are nowadays widely used in various physical setups~\cite{review-EVB}. This includes vortices in external fields~\cite{EVB-in-external-fields},
scattering~\cite{EVB-with-scattering}, atomic
processes~\cite{EVB-with-radiation} and indicates a further possible application for electron beams to trap
(sub)nanoparticles in a close analogy
with optical trapping via light vortices~\cite{manipulation-via-twist}.
Over the course of last two-three decades, advances in optical instrumentation have made it possible to produce light vortex beams, which typically carry an integer number of orbital angular momentum (OAM) quanta
\cite{review-twisted-light,twisted-light-other}.
This led to
an elucidation of the nature of \left(t \right)extit{fractional} OAM (fOAM) including its clear understanding in terms of Berry phase \cite{FOAM,FOAM-exp}.
Our intention here is to build on existing work both on optical and electron vortices to calculate fractional -- spin and orbital -- angular momenta
for a tightly focused relativistic Gaussian beam of electrons, which contains higher order LG modes. Taking proper account of the 4-position of the beam waist, we evaluate the total energy-momentum of relativistic electrons and derive a fundamental property called the Gouy phase
\cite{gouy1,gouy2}, accumulated along the propagation direction as a result of the transverse localization of the beam~\cite{feng2001-yang2006}. We demonstrate the interplay of the Gouy phase and the fractional angular momenta, quantified by an intrinsic spin-orbit interaction (SOI) term,
with the Berry geometric phase. In view of this, we parameterize the total \left(t \right)extit{shift} of the Gouy phase in a relativistic Gaussian beam, from far field to far field, in terms of the Berry phase. We shall also calculate beyond the paraxial limit to show that Gouy phase fronts that have generally thought to be planar are actually curved.
{\left(t \right)extit{Exact Bateman-Hillion-Gaussian beams from Dirac equation}.---}
In order to achieve our goal, we develop a theory that incorporates Lorentz invariance of relativistic Gaussian solutions of wave equations known
as Bateman-Hillion (BH) solutions \citep{BA-PH,BS-APK} that also take proper account of the beam waist position. We use the BH ansatz to solve the Klein-Gordon equation (KGE) then convert it to a solution of the full DE. Our solutions build on existing approaches for relativistic LG beams~\cite{Karlovets:18,BB}. It must be recognized, however, that our inclusion of the beam waist 4-position matters since it brings a resolution to a well known problem of BH Gaussian beams that electrons move in both directions \cite{BB} additional to certain other benefits that include Lorentz invariance and correspondence to accepted beam models in the non-relativistic and paraxial limits.
Consider a beam of electrons each having a mass of $m$, a $4$-position
$x_\mu = \left( ct, - \mathbf{r} \right)$ and a $4$-momentum
$p_\mu = \left( E / c , - \mathbf{p} \right)$, where $\mu = \{0,1,2,3 \}$
and $c$ is the speed of light in vacuum. It follows the particle has an
energy $E$ and 3-momentum $\mathbf{p}$ at world time $t$ and world position
$\mathbf{r}$. Let us also assume that each electron passes through a beam
waist with a $4$-position $X_\mu = \left(cT, - \mathbf{R} \right)$, where
$\mathbf{R}$ is the world position of the beam waist at world time $T$.
Note that we introduce two different time coordinates since the equality $T=t$
is not form preserving under Lorentz transformations.
The dynamics of each of the electrons in the beam is then
given by the DE expressed as~\cite{PAMD}
\begin{equation} \label{eq: dirac_eq}
(\gamma^\mu \hat{p}_\mu - m c) \, \Psi_{\pm} (x_\mu, X_\mu) \,\, = \,\, 0 \, .
\mathrm{e}nd{equation}
Here,
$\hat{p}_\mu = \imath \hbar \partial / \partial x^\mu$ is
the canonical $4$-momentum operator, $\gamma^\mu$ are the
Dirac matrices, $\hbar$ is the reduced Planck's constant,
while $\Psi_{\pm}(x_\mu, X_\mu)$ represents a bi-spinor wave function
of each individual electron, where ``$\pm$'' stand for
spin-up and -down states, respectively. Equation (\ref{eq: dirac_eq})
also has two negative-energy bi-spinor solutions that we will not
consider since they describe anti-particles.
The DE~(\ref{eq: dirac_eq}) can be simplified using the substitution
\begin{eqnarray} \label{eq: bispinor}
\Psi_\pm \, (x_\mu, X_\mu) \,\, = \,\, \left[ \begin{array}{c}
(\hat{p}_0+m c) \chi_{\pm} \\
\sigma_i \hat{p}_i \chi_{\pm}
\mathrm{e}nd{array} \right]
\Psi \, (x_\mu, X_\mu)
\mathrm{e}nd{eqnarray}
with $\chi_+ = \left( 1 \,\, 0 \right)^T$, $\chi_- = \left( 0 \,\, 1 \right)^T$
being two-component spinors, $\Psi (x_\mu, X_\mu)$ a scalar function, $\sigma_i$ the Pauli matrices
($i = \{ 1 , 2 , 3 \}$), and where the inner product
$\sigma_i \hat{p}_i \mathrm{e}quiv \sigma_1 \hat{p}_1 + \sigma_2 \hat{p}_2 + \sigma_3 \hat{p}_3$ and the superscript T means ``transposed''.
Combining Eqs.~(\ref{eq: dirac_eq}) and (\ref{eq: bispinor}) leads
to the KGE for $\Psi$
\begin{equation} \label{eq: KG}
(\hat{p}_\mu \hat{p}^\mu - m^2c^2)\Psi \, (x_\mu, X_\mu) \,\, = \,\, 0 \, .
\mathrm{e}nd{equation}
The clear understanding here is that the bi-spinor solution $\Psi_{\pm}$ satisfies
the DE provided that the scalar function $\Psi$ acts as a solution of the KGE, a procedure that is also applied in Ref.~\cite{BB} for constructing relativistic wave packets with non-zero OAM.
The solution to the KGE~(\ref{eq: KG}) for the Gaussian beam has been developed
in two recent papers \cite{RD1, RD2}. For our purposes, we start from the BH
based ansatz
\begin{equation} \label{eq: bateman_ansatz}
\Psi \, (x_\mu, X_\mu) \,\, = \,\, C \Phi(\xi_1,\xi_2, \xi_3+\xi_0)\mathrm{e}xp \left( -\imath k_\mu^\prime x^\mu \right),
\mathrm{e}nd{equation}
where $C$ is a constant number, $\xi_\mu = x_\mu - X_\mu$ is the $4$-position of the electron
relative to the beam waist, $k_\mu^\prime$ is the wave $4$-vector and $\Phi(\xi_1,\xi_2, \xi_3+\xi_0)$ is a scalar function incorporating non-trivial vortex and both space- and time-dependent phase structures of the electron beam.
Following Ref \cite{RD2}, we insert the BH ansatz
(\ref{eq: bateman_ansatz}) into the KGE~(\ref{eq: KG}) and
solve the resulting equation for $\Phi( \xi_\rho, \xi_\phi, \xi_3 + \xi_0)$ by utilizing the `radial' $\xi_\rho = \sqrt{\xi_1^2+\xi_2^2}$ and `azimuthal'
$\xi_\phi = \arctan (\xi_2 / \xi_1)$ coordinates. This leads to the LG solution
\begin{eqnarray}
\label{eq: laguerre_gauss_solution}
\Phi_{lp} = a_{\mathrm{e}ll p}\left(
\frac{\sqrt{2}\xi_\rho}{|w|}\right)^{|l|}
\!\!\! L_p^{|l|}\left(
\frac{2 \xi_\rho^2}{|w|^2}\right)
\mathrm{e}xp \left( \! -\frac{ \xi_\rho^2}{w_0 w} +\imath l \xi_{\phi}- \imath
g_{lp} \right) \,
\mathrm{e}nd{eqnarray}
with $a_{\mathrm{e}ll p} \mathrm{e}quiv \sqrt{ 2p! / \left[\pi |w|^2(p+|l|)!\right]}$ and
$k_\mu^\prime k^{\mu \prime} = m^2 c^2/\hbar^2$.
Furthermore, $L_p^{|l|}$ represent the generalized Laguerre polynomials in terms of the radial, $p \geq 0$, and the azimuthal indices, $-\infty < l < \infty$,
\begin{equation} \label{eq: gouy_phase_lp}
g_{lp} \,\, = \,\, (1+|l|+2p)\arctan [ 2 \kappa (\xi_3+\xi_0) ]
\mathrm{e}nd{equation}
is the Gouy phase and $\kappa = [w_0^2(k_3+k_0)]^{-1}$.
The solution (\ref{eq: laguerre_gauss_solution}) also contains the complex parameter
\begin{equation} \label{eq: complex_beam_parameter}
w \,\, = \,\, w_0\left[1+ 2\kappa (\xi_3+\xi_0)\imath \right] \, ,
\mathrm{e}nd{equation}
whose modulus, $|w|$, characterizes the beam radius, such that $w_0$ represents the beam radius at the waist. Note that the Gouy phase (\ref{eq: gouy_phase_lp}) depends on both the \left(t \right)extit{space and time} variables in sharp contrast to only a time-dependent Gouy phase of Ref. \cite{Karlovets:18}. Our setup considers a beam confined in the transverse $(x, y)$-plane and propagating in the longitudinal direction, chosen to be the $z$-axis, meaning that the beam spreads as a function of $z$ and time~$t$. This confinement in two dimensions can be experimentally designed by spherical lenses complementing the one-dimensional case realized by cylindrical lenses. The difference of expressions in round parentheses ($1$ in Eq. (\ref{eq: gouy_phase_lp}) and $3/2$ of Ref. \cite{Karlovets:18}) is a consequence of an altered scenario, when the beam is confined in three dimensions \cite{Karlovets:18}, a situation which is yet to be experimentally generated.
Equations (\ref{eq: bispinor}), (\ref{eq: bateman_ansatz}) and (\ref{eq: laguerre_gauss_solution}) constitute an
exact BH solution to the DE for LG modes of the electron beam. We shall, however, focus our attention only on the Gaussian wave packets leaving a treatment of
more general LG modes for future work as relevant physical conclusions can be drawn already from \left(t \right)extit{relativistic Bateman-Hillion-Gaussian (BHG) beams}, i.e., when $\mathrm{e}ll = p = 0$.
Inserting, therefore, $\Psi =\Psi_{00} = C \Phi_{00}\mathrm{e}xp \left( -\imath
k_\mu^\prime x^\mu \right)$ into Eq.~(\ref{eq: bispinor}) leads to
\begin{equation} \label{eq: bispinor_explicit}
\Psi_\pm = \left( \begin{array}{c}
b\chi_{\pm} \\
\pm \hbar k_3 \chi_{\pm}
\mathrm{e}nd{array} \right) \Psi_{00}
+ \left( \begin{array}{c}
\hbar \kappa \chi_{\pm} \\
\pm \hbar \kappa \chi_{\pm}
\mathrm{e}nd{array} \right) \Psi_{01}
\pm \left( \begin{array}{c}
0 \\
\frac{\sqrt{2} \hbar}{w_0} \chi_{\mp}
\mathrm{e}nd{array} \right) \Psi_{ 10} \, ,
\mathrm{e}nd{equation}
where $k_\mu = (k_0^\prime +\kappa, 0, 0,-k_3^\prime +\kappa)$ will be referred to as the effective wave vector of the electron beam,
\begin{eqnarray}
\nonumber
\Psi_{00} & = &
\frac{\sqrt{2} C}{\sqrt{\pi} w}
\mathrm{e}xp \left( - \frac{\xi_\rho^2}{w_0 w} \right)
\mathrm{e}xp \left( - \imath k_\mu^\prime x^\mu \right) \, ,
\\
\nonumber
\Psi_{01} & = &
\frac{\sqrt{2} C}{\sqrt{\pi} w}
\left( \frac{\left|w\right|^2}{w^2} - \frac{2 \xi_\rho^2}{w^2} \right)
\mathrm{e}xp \left( - \frac{\xi_\rho^2}{w_0 w} \right)
\mathrm{e}xp \left( - \imath k_\mu^\prime x^\mu \right) \, ,
\\
\nonumber
\Psi_{10} & = &
\frac{2 C \xi_\rho}{\sqrt{\pi} w^2}
\mathrm{e}xp \left( - \frac{\xi_\rho^2}{w_0 w} + \imath \xi_\phi \right)
\mathrm{e}xp \left( - \imath k_\mu^\prime x^\mu \right) \, ,
\mathrm{e}nd{eqnarray}
$b \mathrm{e}quiv \hbar k_0 + mc$. We have omitted the arguments $(x_\mu, \xi_\mu)$ for brevity. Equation (\ref{eq: bispinor_explicit}) is the exact solution to the DE
for the lowest order (Gaussian) bi-spinor mode of electron beam.
In the paraxial and semi-relativistic limit
($k_3 \ll k_0$, $k_0 \simeq m c$), we recover Barnett's
solution~\cite{SMB}.
The constant $C$ in Eq.(\ref{eq: bateman_ansatz}) can be determined
from the Dirac current $j_\mu^\pm = \Psi_{\pm}^{\mathrm{d}agger} \gamma_0 \gamma_\mu
\Psi_{\pm}$ using the normalization condition,
$ \langle j_\mu^\pm \left(\xi_1 , \xi_2 , \xi_3 + \xi_0 \right) \rangle = k_\mu/k_0 $~\cite{footnote1},
which implies $C = \left[2(\hbar k_0 b +\hbar^2 \kappa^2)\right]^{-1/2} $
and gives the expected velocity of the beam front $\xi_B/\xi_0 = k_3/k_0 $ where $\xi_B = \xi_3 \sqrt{1 + \xi_\rho^2 / \xi_3^2}$ is distance traveled.
We now eliminate the elapsed time $\xi_0$ in Eq. (\ref{eq: gouy_phase_lp}) using the previous expression to give
\begin{eqnarray}
\label{eq: gouy_phase}
g_{lp} & = & (1+|l|+2p)\arctan \left[ \frac{k_3 \xi_3 +k_0 \xi_B}{\xi_R (k_3+k_0)} \right],
\mathrm{e}nd{eqnarray}
for $\xi_B >> \xi_R$ where $\xi_R = \frac{1}{2}k_3 w_0^2$ represents the Rayleigh range. To recover the standard LG beam formulae we shall instead set $\xi_0 = k_0 \xi_3 /k_3$ consistent with the paraxial approximation $\xi_3 \simeq \xi_r$, inserting this into Eqs. (\ref{eq: gouy_phase_lp}) and (\ref{eq: complex_beam_parameter}) yields the traditional paraxial beam formula
\begin{eqnarray}
\label{eq: gouy_phase_lp_spatial}
g_{lp} & = & (1+|l|+2p)\arctan \left( \xi_3/\xi_R \right),
\\
|w| & = & w_0\sqrt{1+ \left( \xi_3/\xi_R \right)^2} \, ,
\mathrm{e}nd{eqnarray}
The beam radius $|w|$ determines an important relation
\begin{equation} \label{eq: divergence_angle}
\sin \left(t \right)heta_D \,\, = \,\, \lim\nolimits_{\xi_3 \rightarrow \infty}\left(|w|/\xi_3\right)
\,\, = \,\, 2 \, /\left(w_0k_3\right)
\mathrm{e}nd{equation}
between the angular divergence of the beam, $\left(t \right)heta_D$, the longitudinal component of the wave vector and the beam radius at the waist. Figure 1 shows a comparison of curved non-paraxial to planar paraxial Gouy phase fronts calculated using Eqs. (\ref{eq: gouy_phase}) and (\ref{eq: gouy_phase_lp_spatial}) respectively.
\begin{figure}[htp]
\centering
\includegraphics[width=8.2 cm]{curved_fronts.eps}
\c{c}\~{a}ption{A comparison of paraxial (flat) and beyond paraxial (curved) Gouy phase fronts for a 100KeV electron beam. The
beam radius of 5pm is set smaller than occurs in practice to accentuate the difference. }
\label{fig:1}
\mathrm{e}nd{figure}
In order to estimate the magnitude of terms in the solution~(\ref{eq:
bispinor_explicit}), we evaluate the averaged probability density:
\begin{equation}
\nonumber
\langle |\Psi_\pm|^2 \rangle \,\, = \,\, (b^2 + \hbar k_3^2) \langle |\Psi_{00}|^2
\rangle + 2\hbar^2 \kappa^2 \langle |\Psi_{01}|^2 \rangle + \frac{2\hbar^2}{w_0^2}\langle |\Psi_{10}|^2 \rangle \, ,
\mathrm{e}nd{equation}
where we use $\langle |\Psi_{lp}|^2 \rangle = C^2$ to confirm
$\langle j_0 \rangle = \langle |\Psi_\pm|^2 \rangle = 1$.
The cross terms vanish here since the products of the bi-spinors are identically zero. It now follows that
\begin{equation}
2\hbar^2 \kappa^2 \langle |\Psi_{01}|^2 \rangle
/\langle |\Psi_\pm|^2 \rangle \,\, = \,\,
2 \hbar^2 \kappa^2 C^2 \,\, < \,\, 10^{-8}
\mathrm{e}nd{equation}
owing to current imperfections in magnetic lenses that limit $w_0$
to values of about $50\;\mathrm{pm}$ or greater. For our further purposes,
it is therefore reasonable to drop the negligible term
in Eq. (\ref{eq: bispinor_explicit}) giving $\Psi_\pm(x_\mu, \xi_\mu)$ to be
\begin{equation} \label{eq: bispinor_useful}
\Psi_\pm \, = \, \left( \begin{array}{c}
b\chi_{\pm} \\
\pm \hbar k_3 \chi_{\pm}
\mathrm{e}nd{array} \right) \Psi_{00}
\pm \left( \begin{array}{c}
0 \\
\frac{\sqrt{2} \hbar}{w_0} \chi_{\mp}
\mathrm{e}nd{array} \right) \Psi_{10}
\mathrm{e}nd{equation}
and $C \simeq \sqrt{1 / (2\hbar k_0 b) }$ holding to a very high degree of accuracy. Equation
(\ref{eq: bispinor_useful}) completes the solution of the DE for the relativistic BHG beam of electrons
and enables evaluation of linear and angular momenta in the beam.
{\left(t \right)extit{Momentum and energy of the beam.---}
Some of relativistic beam solutions, such as Bessel \cite{Bliokh:11} and Volkov-Bessel modes \cite{Hayrapetyan-Karlovets}, although reasonable in other respects, actually
carry an infinite beam energy. BHG solutions, similar to LG modes, do not share this `problem'. In particular, the expectation values for the $4$-momentum in a beam are determined to be
$p_\mu = \langle \Psi_{\pm}^{\mathrm{d}agger} \hat{p}_{\mu} \Psi_{\pm} \rangle = \hbar k_\mu$.
Inserting this result into the dispersion relation $k_\mu^\prime k^{\mu \prime} = m^2 c^2/\hbar^2$ we
obtain the averaged total energy $E = p_0 c$ of a single Dirac particle in a Gaussian beam
\begin{equation} \label{eq: expected_total_energy}
E \,\,
= \,\, +c\sqrt{p_\rho^2 + p_3^2 + m^2c^2} \, ,
\mathrm{e}nd{equation}
where $p_\rho^2 = \langle \Psi_{\pm}^{\mathrm{d}agger} \hat{p}_\rho^{2} \Psi_{\pm}\rangle = 2\hbar^2/w_0^2$ and $p_3^2 =\hbar^2 k_3^2$ denote the square values of radial and axial momentum respectively.
Equation (\ref{eq: expected_total_energy}) has been obtained elsewhere
for a Klein-Gordon particle in a Gaussian beam, see Ref. \cite{RD2},
which also connects the
stored kinetic energy in the beam to the Bohm potential \cite{PRH1}.
\left(t \right)extit{Fractional angular momenta and non-trivial phase structure of the beam.---}
Expected values for angular momenta of an
electron parallel to the beam axis can be calculated if the explicit forms of the spin angular momentum (SAM),
$\hat{S}_{3} = \left(\hbar/2 \right) \left(t \right)extrm{diag} \left( \sigma_3, \sigma_3 \right)$, and OAM operators,
$\hat{L}_{3} = \left(\hbar/\imath \right)
\left( \xi_1 \partial/\partial x_2 - \xi_2 \partial/\partial x_1\right)$, are employed. Direct evaluation of corresponding integrals leads to
\begin{equation} \label{eq: expected_am}
\langle \Psi_{\pm}^{\mathrm{d}agger} \hat{S}_3 \Psi_{\pm} \rangle \,\, = \,\, \left( 1 - \Delta \right)s\hbar \, , \quad \langle \Psi_{\pm}^{\mathrm{d}agger} \hat{L}_3 \Psi_{\pm} \rangle \,\, = \,\, \Delta s \hbar \, ,
\mathrm{e}nd{equation}
where $s=\pm \frac{1}{2}$, while
\begin{equation} \label{eq: expected_fam}
\Delta \,\, \mathrm{e}quiv \,\, \Delta \left( \left(t \right)heta_D \right) \,\, = \,\,
\left( 1 - m c^2/E \right) \sin^2 \left(t \right)heta_D
\mathrm{e}nd{equation}
represents the intrinsic SOI term. There is a little need for us to dwell on these expressions
since they look like a special case ($\mathrm{e}ll = 0$) of more general relations \left(t \right)extit{but} derived from relativistic Bessel-beam solutions to the DE~\cite{Bliokh:11} (c.f.,~\cite{footnote2}).
Nonetheless, there are two subtle differences in the SOI terms for Bessel and BHG beams. (i) The Bessel-type solutions naturally contain non-paraxiality as a key feature, which is quantified by means of a parameter called opening angle $\left(t \right)heta_0 = \arcsin \left(\sqrt{k_1^2+k_2^2}/k\right)$ similarly appearing in the sine function \cite{Bliokh:11}. In our case of Eq. (\ref{eq: expected_fam}), we have the divergence angle instead, that carries \left(t \right)extit{more} information about the beam radius (at the waist) via Eq.~(\ref{eq: divergence_angle}). (ii) It is true that in the non-relativistic regime ($k \rightarrow 0$) SOI terms vanish for both types of beams. Due to the transverse localization of Gaussian modes, moreover, the first term of the SOI parameter depends \left(t \right)extit{explicitly} on the beam radius at the waist by virtue of Eq. (\ref{eq: expected_total_energy}). Combining these yields an explicit connection between the SOI term and the beamwidth, which can be simplified to
\begin{eqnarray}
\Delta & \approx & 2 \hbar^2/\left(m^2 c^2 w_0^2\right)
\left[1 + 2/ \left( w_0^2 k_3^2 \right) \right]
\mathrm{e}nd{eqnarray}
for existing experimental setups, i.e., when $\hbar / \left(m c w_0\right) \ll 1$ for $w_0 > 50$ pm.
As seen, the SOI term vanishes for large beam radii \left(t \right)extit{independent} of the longitudinal momentum $\hbar k_3$, or else,
for very small divergence angles (c.f., Eqs.~\ref{eq: divergence_angle} and \ref{eq: expected_fam}).
Another implication of expected angular momenta is that the focusing of relativistic Gaussian modes with bi-spinor structure will cause a fraction of angular momentum, $\Delta s \hbar$, to convert from the expected SAM to OAM and vice versa (see the left panel of Fig.~\ref{fig:2} for $s=1/2$). At the same time, the total angular momentum (TAM) of the beam, $\langle \Psi_{\pm}^{\mathrm{d}agger} \hat{J}_3 \Psi_{\pm} \rangle = s \hbar$ with $\hat{J}_3 = \hat{L}_3 + \hat{S}_3$, is conserved along the propagation direction as depicted by the straight green dash-dotted line. As the divergence angle increases from $0$ to $\pi/2$, the stake $\Delta s \hbar$ starts disappearing from the SAM and reappearing as fOAM resulting in the \left(t \right)extit{fractional spin-to-orbit conversion}. For $\left(t \right)heta_D = \pi / 2$, the fractional SAM (fSAM) and fOAM parts contribute in the conserved TAM with the shares $m c^2 s \hbar / E$ and $\left( 1 - m c^2 / E \right)s \hbar$, respectively. Depending on the electron kinetic energy, the spin-to-orbit conversion occurs either fully (for $0.5\;\mathrm{MeV}$), so that the shares by angular momenta are equal, or partially (e.g., for $0.1\;\mathrm{MeV}$) due to the gradual decrease of the SOI term.
\begin{figure}[htp]
\centering
\includegraphics[width=4.2 cm]{AM.eps}
\includegraphics[width=4.2 cm]{GP.eps}
\includegraphics[width=4.2 cm]{AM_lowE.eps}
\includegraphics[width=4.2 cm]{GP_lowE.eps}
\c{c}\~{a}ption{Fractional angular momenta (left panel), the total Gouy
phase shift and Berry phase (right panel) as functions
of the divergence angle
for spin-up electrons with
a kinetic energy of $0.5\;\mathrm{MeV}$ (upper panel) and
of $0.1\;\mathrm{MeV}$ (lower panel).}
\label{fig:2}
\mathrm{e}nd{figure}
Variations in the beamwidth over the infinite length of the beam axis represent an adiabatic cycle in the sense that the beam divergence beyond the waist undoes the convergence occurring ahead of the waist. Such an adiabatic cycle, being inherently characterized by the divergence angle, contributes in the SOI due to the tight focusing of the BHG beam by means of non-zero higher mode bi-spinors proportional to $\Psi_{01}$ and $\Psi_{10}$ (see Eq.~(\ref{eq: bispinor_explicit})). Over the course of such a cyclic adiabatic process, the beam accumulates also a Berry (geometric) phase from these higher mode bi-spinors. The Berry phase can be evaluated exactly in the same manner as for relativistic Bessel beams by making use of the so-called Foldy-Wouthuysen momentum representation \cite{Bliokh:11}. Following Bliokh {\it et al}. we may therefore write the Berry phase as gained due to the non-trivial fOAM as $\gamma_B = 2 \pi \Delta s$.
For a tightly focused BHG beam, the expected Gouy phase
\begin{equation}
\label{Gouy_phase_shift}
\bar{g}_{T} \,\, = \,\,
\sum\nolimits_{lp} \langle \Psi_{\pm}^{\mathrm{d}agger} g_{lp} \Psi_{\pm} \rangle
\,\, = \,\,
\left[1+ 0.5 \, \Delta(\left(t \right)heta_D)\right] g_{00}
\mathrm{e}nd{equation}
is larger than would be the case for a pure Gaussian beam owing to the SOI. The total Gouy phase shift from far field to far field in the beam is therefore given by
\begin{equation}
\mu_T \,\, =\,\,
\lim_{\xi_3 \rightarrow \infty} (\bar{g}_{T}) - \lim_{\xi_3 \rightarrow -\infty}(\bar{g}_{T})
\,\, = \,\, \pi + 0.5 \, |\gamma_B| \, ,
\mathrm{e}nd{equation}
showing that the Gouy phase as well as the fOAM increase in direct proportion to the Berry phase, while the fSAM decreases. The right panel of Fig.~\ref{fig:1} illustrates the growth of both
phases dependent on the divergence angle and the electron kinetic energy. The Berry phase rises as the SOI term increases and retains its maximum value along with the Gouy phase at $\left(t \right)heta_D = \pi / 2$.
The presence of non-vanishing expectation values for the transverse components of the linear momentum implies that the relevant phase angle to consider in this case is not the azimuth phase associated with the fOAM but the Gouy phase~\cite{feng2001-yang2006}.
It thus appears from our results that the Berry phase for this cycle is not the baseline Gouy phase for the paraxial beam but the fractional increase in the Gouy phase above the baseline value that can be explicitly attributed to the adiabatic cycle. Our evidence for this assertion being the direct proportionality between the Gouy phase and the fOAM value is shown in Eq. (\ref{Gouy_phase_shift}).
{\left(t \right)extit{Discussion}.---} Dirac published his quantum theory of the electron in 1928 \cite{PAMD}. Some ninety years later, it is now being extensively applied to understand the effects of transverse localization on electron beams. A significant progress has been made in this direction by deriving (either exact or approximated) solutions to the DE for Bessel \cite{Bliokh:11,Hayrapetyan-Karlovets} and LG beams \cite{Karlovets:18,BB,SMB}. In our paper, we derive a new and exact BHG solution to the DE, which possesses the full relativistic nature of the beam propagation as it takes into account the $4$-position of the beam waist~\cite{footnote3}. This has enabled us to calculate the energy-momentum, fractional angular momenta and the Gouy phase in the beam and demonstrate the presence of the intrinsic SOI leading to the fractional spin-to-orbit conversion. Remarkably, both the fOAM and the Gouy phase are directly proportional to the Berry phase. This both corroborates the earlier finding of Bliokh {\it et al.} \cite{Bliokh:11} for fOAM and takes it a step further with our inclusion of Gouy phase into the evolving understanding of the role geometric phase has to play in relativistic electron beams, predicted earlier for a non-relativistic Gaussian beam~\cite{SM} and demonstrated recently for optical rays~\cite{Malhotra:18}. Additionally, we have found that Gouy phase fronts that have traditionally thought to be planar are in fact curved. This curvature is most apparent in the far fields of strongly diffracting beams.
\begin{thebibliography}{99}
\bibitem{Bliokh:11}
K. Y. Bliokh, M. R. Dennis, and F. Nori, {Phys. Rev. Lett.}
\left(t \right)extbf{107}, 174802 (2011).
\bibitem{Hayrapetyan-Karlovets}
A. G. Hayrapetyan, O. Matula, A. Aiello, A. Surzhykov, and S.
Fritzsche, {Phys. Rev. Lett.} \left(t \right)extbf{112}, 134801 (2014);
D. V. Karlovets, {Phys. Rev. A} \left(t \right)extbf{86}, 062102 (2012).
\bibitem{Bliokh:07}
K. Y. Bliokh, Y. P. Bliokh, S. Savel'ev, and F. Nori,
{Phys. Rev. Lett.} \left(t \right)extbf{99}, 190404 (2007);
\bibitem{Schattschneider:11}
P. Schattschneider and J. Verbeeck, {Ultramicroscopy} \left(t \right)extbf{111},
1461 (2011).
\bibitem{Bliokh:17}
K. Y. Bliokh, M. R. Dennis, and F. Nori,
{Phys. Rev. A} \left(t \right)extbf{96}, 023622 (2017).
\bibitem{Karlovets:18}
D. Karlovets, {Phys. Rev. A} \left(t \right)extbf{98}, 012137 (2018).
\bibitem{BB}
I. Bialynicki-Birula and Z. Bialynicka-Birula, {Phys. Rev.
Lett.} \left(t \right)extbf{118}, 114801 (2017).
\bibitem{SMB}
S. M. Barnett, {Phys. Rev. Lett.} \left(t \right)extbf{118}, 114802 (2017).
\bibitem{BB-comment}
I. Bialynicki-Birula and Z. Bialynicki-Birula,
{Phys. Rev. Lett.} \left(t \right)extbf{119}, 029501 (2017).
\bibitem{SR} M. Saleem and M. Rafique, \mathrm{e}mph{Special Relativity Applications to Particle Physics and the Classical Theory of Fields} (Ellis Horwood,
1992).
\bibitem{AK} A. Komar, {Phys. Rev. D} \left(t \right)extbf{18}, 1887 (1978).
\bibitem{CA} H. W. Crater and P. Van Alstine, {Phys. Rev. D} \left(t \right)extbf{36},3007 (1987).
\bibitem{RD1}
R. Ducharme and I. G. da Paz, {Phys. Rev. A} \left(t \right)extbf{92}, 023853
(2015).
\bibitem{RD2}
R. Ducharme and I. G. da Paz, {Phys. Rev. A} \left(t \right)extbf{94}, 023822
(2016).
\bibitem{RD3} R. Ducharme, {Prog Electromagn Res M} \left(t \right)extbf{42}, 39
(2015).
\bibitem{RE}
R. Erni, M. D. Rossell, C. Kisielowski, and U. Dahmen, {Phys. Rev.
Lett}. \left(t \right)extbf{102}, 096101 (2009).
\bibitem{EVB-exp}
M. Uchida and A. Tonomouro, {Nature} (London) \left(t \right)extbf{464}, 737
(2010); J. Verbeeck, H. Tian, and P. Schattschneider, {Nature}
(London) \left(t \right)extbf{467}, 301 (2010); B. J. McMorran, A. Agrawal, I. M.
Anderson, A. A. Herzing, H. J. Lezec, J. J. McClelland, and J.
Unguris, {Science} \left(t \right)extbf{331}, 192 (2011); J. Verbeeck, P.
Schattschneider, S. Lazar, M. Stoger-Pöllach, S. Löffler, A.
Steiger-Thirsfeld, and G. Van Tendeloo, {Appl. Phys. Lett.}
\left(t \right)extbf{99}, 203109 (2011);
V. Grillo, E. Karimi, G. C. Gazzadi, S. Frabboni, M. R. Dennis, and
R. W. Boyd, {Phys. Rev. X} \left(t \right)extbf{4}, 011013 (2014);
E. Mafakheri, A. H. Tavabi, P.-H. Lu, R. Balboni, F. Venturi, C. Menozzi, G. C. Gazzadi, S. Frabboni, A. Sit, R. E. Dunin-Borkowski, E. Karimi, and V. Grillo, {Appl. Phys. Lett.} \left(t \right)extbf{110}, 093113 (2017);
G. M. Vanacore, G. Berruto, I. Madan, E. Pomarico, P. Biagioni, R. J. Lamb, D. McGrouther, O. Reinhardt, I. Kaminer, B. Barwick, H. Larocque, V. Grillo, E. Karimi, F. J. Garc\'ia de Abajo, and F. Carbone, {Nat. Materials}
\left(t \right)extbf{18}, 573 (2019).
\bibitem{review-EVB}
K. Y. Bliokh, I. P. Ivanov, G. Guzzinati, L. Clark, R. Van Boxem,
A. B\'ech\'e, R.Juchtmans, M. A. Alonso, P. Schattschneider, F. Nori, and
J. Verbeeck, {Phys. Rep.} \left(t \right)extbf{690}, 1 (2017);
J. Harris, V. Grillo, E. Mafakheri, G. C. Gazzadi, S. Frabboni, R. W. Boyd,
and E. Karimi, {Nat. Phys.} \left(t \right)extbf{11}, 629 (2015);
S. M. Lloyd, M. Babiker, G. Thirunavukkarasu, and J. Yuan
{Rev. Mod. Phys.} \left(t \right)extbf{89}, 035004 (2017).
\bibitem{EVB-in-external-fields}
K. Y. Bliokh, P. Schattschneider, J. Verbeeck, and F. Nori,
{Phys. Rev. X} \left(t \right)extbf{2}, 041011 (2012);
C. Greenshields, R. L. Stamps, and S. Franke-Arnold,
{New J. Phys.} \left(t \right)extbf{14}, 103040 (2012);
G. M. Gallatin and B. McMorran, {Phys. Rev. A} \left(t \right)extbf{86}, 012701 (2012);
M. Babiker, J. Yuan, and V. E. Lembessis, {Phys. Rev. A} \left(t \right)extbf{91}, 013806 (2015);
K. van Kruining, A. G. Hayrapetyan, and J. B. G\"otte,
{Phys. Rev. Lett.} \left(t \right)extbf{119}, 030401 (2017);
A. J. Silenko, P. Zhang, and L. Zou, {Phys. Rev. Lett.}
\left(t \right)extbf{119}, 243903 (2017) \left(t \right)extit{ibid.} \left(t \right)extbf{121} 043202 (2018).
\bibitem{EVB-with-scattering}
I. P. Ivanov, {Phys. Rev. D.} \left(t \right)extbf{83}, 093001 (2011) \left(t \right)extit{ibid.} \left(t \right)extbf{85}, 076001 (2012);
V. Serbo, I. P. Ivanov, S. Fritzsche, D. Seipt, and A. Surzhykov,
{Phys. Rev. A} \left(t \right)extbf{92}, 012705 (2015);
I. P. Ivanov, D. Seipt, A. Surzhykov, and S. Fritzsche,
{Phys. Rev. D} \left(t \right)extbf{94}, 076001 (2016);
I. P. Ivanov, N. Korchagin, A. Pimikov, and P. Zhang, {Phys. Rev. Lett.}. \left(t \right)extbf{124}, 192001 (2020).
\bibitem{EVB-with-radiation}
O. Matula, A. G. Hayrapetyan, V. G. Serbo, A. Surzhykov, and S. Fritzsche,
{New J. Phys} \left(t \right)extbf{16}, 053024 (2014);
V. A. Zaytsev, V. G. Serbo, and V. M. Shabaev, {Phys. Rev. A} \left(t \right)extbf{95}, 012702 (2017).
\bibitem{manipulation-via-twist}
D. G. Grier, {Nature}, \left(t \right)extbf{424}, 810 (2003);
E. G. Abramochkin, S. P. Kotova, A. V. Korobtsov, N. N. Losevsky,
A. M. Mayorova, M. A. Rakhmatulin, and V. G. Volostnikov,
{Las. Physics} \left(t \right)extbf{16}, 842 (2006);
K. Toyoda, K. Miyamoto, N. Aoki, R. Morita, and T. Omatsu,
{Nano Lett.}, \left(t \right)extbf{12}, 3645 (2012);
W. Brullot, M. K. Vanbel, T. Swusten, and Thierry Verbiest,
{Sci. Adv.} \left(t \right)extbf{2}, e1501349 (2016).
\bibitem{review-twisted-light}
L. Allen, M. W. Beijersbergen, R.J.C. Spreeuw, and
J.P. Woerdman, {Phys. Rev. A} \left(t \right)extbf{45}, 8185 (1992); L. Allen, M.
Padgett, and M. Babiker, {Prog. Opt.} \left(t \right)extbf{39 }, 291 (1999);
G. Molina-Terriza, J.P. Torres, and L. Torner,
{Nat. Phys.} \left(t \right)extbf{3}, 305 (2007);
S. Franke-Arnold, L. Allen, and M.J. Padgett,
{Laser \& Photon. Rev.} \left(t \right)extbf{2}, 299 (2008);
K. Y. Bliokh and A. Aiello, {J. Opt.} \left(t \right)extbf{15}, 014001 (2013);
D. L. Andrews and M. Babiker, \left(t \right)extit{Angular Momentum of Light},
Cambridge University Press 2013;
M. J. Padgett, {Opt. Express} \left(t \right)extbf{25}, 11265 (2017).
\bibitem{twisted-light-other}
K. Y. Bliokh, M. A. Alonso, E. A. Ostrovskaya, and A. Aiello,
{Phys. Rev. A} \left(t \right)extbf{82}, 063825 (2010);
L. Marrucci, E. Karimi, S. Slussarenko, B. Piccirillo, E. Santamato, E.
Nagali, and F. Sciarrino, {J. Opt.}, \left(t \right)extbf{13}, 064001 (2011);
O. Matula, A. G. Hayrapetyan, V. G. Serbo, A. Surzhykov, and S. Fritzsche,
{J. Phys. B} \left(t \right)extbf{46}, 205002 (2013);
H. M. Scholz-Marggraf, S. Fritzsche, V. G. Serbo, A. Afanasev, and A. Surzhykov
{Phys. Rev. A} \left(t \right)extbf{90}, 013425 (2014);
A. Aiello, P. Banzer, M. Neugebauer, and G. Leuchs,
{Nat. Photon.} \left(t \right)extbf{9}, 789 (2015);
M. Krenn, J. Handsteiner, M. Fink, R. Fickler, R. Ursin, M. Malika,
and A. Zeilinger, {Proc. Natl. Acad. Sci. U.S.A.}
\left(t \right)extbf{113}, 13648 (2016);
M. Erhard, R. Fickler, M. Krenn, and A. Zeilinger,
{Light: Sci. \& Applications} \left(t \right)extbf{7}, 17146 (2018).
\bibitem{FOAM}
M. V. Berry, {J. Opt. A} \left(t \right)extbf{6}, 259 (2004).
\bibitem{FOAM-exp}
J. B. Götte, K. O. Holleran, D. Preece, F. Flossmann, S. Franke-Arnold, S. Barnett, and M. Padgett, {Opt. Express} \left(t \right)extbf{16}, 993 (2008);
D. P. O'Dwyer, C. F. Phelan, Y. P. Rakovich, P. R. Eastham, J. G. Lunney, and J. F. Donegan, {Opt. Express}, \left(t \right)extbf{18}, 16480 (2010).
\bibitem{gouy1}
L. G. Gouy, {C. R. Acad. Sci. Paris} \left(t \right)extbf{110}, 1251 (1890);
L. G. Gouy, {Ann. Chim. Phys. Ser. 6} \left(t \right)extbf{24}, 145 (1891).
\bibitem{gouy2}
T. D. Visser and E. Wolf, {Opt. Commun.} \left(t \right)extbf{283}, 3371 (2010);
J. Yang and H. G. Winful, {Opt. Lett.} \left(t \right)extbf{31}, 104 (2006);
R. W. Boyd, {J. Opt. Soc. Am.} \left(t \right)extbf{70}, 877 (1980);
P. Hariharan and P. A. Robinson, {J. Mod. Opt.} \left(t \right)extbf{43}, 219 (1996);
S. Feng, H. G. Winful, and R. W. Hellwarth, {Opt. Lett.} \left(t \right)extbf{23}, 385
(1998);
D. Chauvat, O. Emile, M. Brunel, and A. Le Floch, {Am. J.
Phys.} \left(t \right)extbf{71}, 1196 (2003);
I. G. da Paz, P. L. Saldanha, M. C. Nemes, and J. G. Peixoto de Faria,
{New J. Phys.} \left(t \right)extbf{13}, 125005 (2011);
X. Pang, D. G. Fischer, and T. D.
Visser, {Opt. Lett.} \left(t \right)extbf{39}, 88 (2014).
\bibitem{feng2001-yang2006}
S. Feng and H. G. Winful, {Opt. Lett.} \left(t \right)extbf{26}, 485 (2001);
J. Yang and H. G. Winful, {Opt. Lett.} \left(t \right)extbf{31}, 104 (2006).
\bibitem{BA-PH}
H. Bateman, {Proc. London Math. Soc.} \left(t \right)extbf{7}, 77 (1909)
\left(t \right)extit{ibid.} \left(t \right)extbf{8}, 223 (1910) \left(t \right)extit{ibid.} \left(t \right)extbf{8}, 469 (1910);
P. Hillion, {J. Math. Phys.} \left(t \right)extbf{33}, 2749 (1992).
\bibitem{BS-APK}
I. M. Besieris and A. M. Shaarawi, {J. Electromagn. Waves and Appl.} \left(t \right)extbf{16}, 1047 (2002)
\left(t \right)extit{ibid.} {Opt. Express} \left(t \right)extbf{27}, 792 (2019);
A. P. Kiselev, {J. Math. Phys.} \left(t \right)extbf{41}, 1934 (2000)
\left(t \right)extit{ibid.} {Optics and Spectroscopy.} \left(t \right)extbf{102}, 603 (2007).
\bibitem{PAMD} P.A.M Dirac, {Proc. Roy. Soc.} \left(t \right)extbf{778}, 223 (1928).
\bibitem{footnote1}
Throughout the text,
$\langle ... \rangle \mathrm{e}quiv
\'{\i}nt_{-\infty}^{+\infty} ... d\xi_1 d\xi_2$.
As $j_\mu\left(\xi_1 , \xi_2 , \xi_3 + \xi_0 \right)$ is a conserved
quantity, we expect and find that $\langle j_\mu \rangle$ is independent
of $\xi_3$ and $\xi_0$ even though no averaging over these
coordinates is carried out.
\bibitem{PRH1} P. R. Holland, {Found. Phys} \left(t \right)extbf{45}, 134 (2015).
\bibitem{footnote2}
Similar SOI term (proportional to the absolute value of OAM) also appears elsewhere as a contribution to the spin part of the magnetic moment of some suitably constructed relativistic vortex wave packet [c.f. Eqs. (23) and (47) of Ref. \cite{Karlovets:18}].
\bibitem{footnote3} Such an approach could be developed also for other types of matter vortex beams~\cite{other_VB}.
\bibitem{other_VB}
A. G. Hayrapetyan, O. Matula, A. Surzhykov, and S. Fritzsche,
{Eur. Phys. J. D} \left(t \right)extbf{67}, 167 (2013);
A. G. Hayrapetyan and S. Fritzsche, {Phys. Scr.}
\left(t \right)extbf{T156}, 014067 (2013);
C. W. Clark, R. Barankov, M. G. Huber, M. Arif, D. G. Cory, and D. A. Pushin,
{Nature} \left(t \right)extbf{525}, 504 (2015);
K. Y. Bliokh and F. Nori, {Phys. Rev. B} \left(t \right)extbf{99}, 174310 (2019);
I. Rond\'on and D. Leykam, {J.\ Phys.: Condens. Matter} \left(t \right)extbf{32}, 104001 (2020).
\bibitem{SM} R. Simon and N. Mukunda, {Phys. Rev. Lett.} \left(t \right)extbf{70}, 880
(1993).
\bibitem{Malhotra:18}
T. Malhotra, R. Guti\'errez-Cuevas, J. Hassett, M. R. Dennis, A. N. Vamivakas, and M. A. Alonso, {Phys. Rev. Lett.} \left(t \right)extbf{120}, 233602 (2018).
\mathrm{e}nd{thebibliography}
\mathrm{e}nd{document} |
{\beta}egin{document}
{\scriptscriptstyle\#}itle[Yang-Mills fields on the Schwarzschild black hole]{Instability
of infinitely-many stationary solutions of the $SU(2)$ Yang-Mills fields on the exterior of the Schwarzschild black hole}
{\alpha}uthor{Dietrich H\"afner, C\'ecile Huneau}
{\alpha}ddress{Universit\'e Grenoble-Alpes, Institut Fourier, 100 rue des
maths, 38610 Gi\`eres, France}
{{\rm r}m e}mail{[email protected]}
{{\rm r}m e}mail{[email protected]}
\maketitle
{\beta}egin{abstract}
We consider the spherically symmetric $SU(2)$ Yang-Mills fields on the
Schwarzschild metric. Within the so called purely magnetic Ansatz
we show that there exists a countable number of stationary solutions which are all nonlinearly
unstable.
{{\rm r}m e}nd{abstract}
\langle s {\rm r}angleetcounter{page}{1}
\partialgenumbering{arabic}
\langle s {\rm r}angleection{Introduction}
\langle s {\rm r}angleubsection{General introduction}
We study the $SU(2)$ Yang-Mills equations on the Schwarzschild
metric, with spherically symmetric initial data fulfilling the so
called purely magnetic Ansatz. This
equation has at least a countable number of stationary solutions. In \cite{GH}
the first author and S. Ghanem showed that the zero curvature
solution is stable within this Ansatz. In this paper we show that the other solutions of this set are
nonlinearly unstable.
Global existence for Yang-Mills fields on ${\mathbb R}^{3+1}$ was shown by
Eardley and Moncrief in a classical result, \cite{EM1} and
\cite{EM2}. Their result was then generalized by Chru\'sciel and
Shatah to general globally hyperbolic curved space-times in
\cite{CS}. Later, the hypotheses of \cite{CS} were weakened in
\cite{G1}.
The purely magnetic Ansatz excludes Coulomb type solutions and
reduces the Yang-Mills equations to a nonlinear scalar wave equation:
{\beta}egin{equation}
\lambdabel{SYM}
\partial_{t}^{2} {W}- \partial_{x}^{2} W+ \frac{(1- \frac{2m}{r}) }{r^2} W(W^2-1)=0.
{{\rm r}m e}nd{equation}
Strong numerical evidence of the existence of a countable number of stationary solutions $(W_n)_{n{\rm i}n \nn}$ in the case of Yang Mills equations coupled with Einstein equations with spherical symmetry was shown in \cite{cbh} (see also \cite{BRZ}).
It was then proved analytically, still in the coupled case, in \cite{SWY}. For sake of completeness, we give an analytical proof of this fact (adapted from \cite{SWY}) in the appendix of this paper. The solution $W_n$
possesses $n$ zeros. The stationary solutions $W_0=\pm 1$ correspond to the zero curvature
solution. Linearizing around a stationary solution $W_n$ leads to the
linear operator
{\beta}egin{equation*}
{\mathcal A}_n=-\partialrtial_x^2+\frac{(1- \frac{2m}{r}) }{r^2}(3W_n^2-1).
{{\rm r}m e}nd{equation*}
In \cite{BRZ} it was numerically observed for the first stationary solutions that ${\mathcal A}_n$ has $n$ negative
eigenvalues. In this paper we show analytically that ${\mathcal A}_n$ has at
least one negative eigenvalue for $n{\bf g}e 1$. An abstract result then
shows that this leads to a nonlinear instability. We will describe in
Section {\rm r}ef{Sec2} a general abstract setting for non linear one
dimensional wave equations. This abstract setting is applied in
Section {\rm r}ef{Sec3} to the Yang-Mills equation.
\langle s {\rm r}angleubsection{The exterior of the Schwarzschild black hole} The exterior
Schwarzschild spacetime is given by ${\mathcal M}={\mathbb R}_t{\scriptscriptstyle\#}imes
{\mathbb R}_{r>2m}{\scriptscriptstyle\#}imes S^2$ equipped with the metric
{\beta}egin{eqnarray}a
\notag
g &=& - (1 - \frac{2m}{r})dt^{2} + \frac{1}{ (1 - \frac{2m}{r})} dr^{2} + r^{2} d{\scriptscriptstyle\#}heta^{2} + r^{2}\langle s {\rm r}angleigman^{2} ({\scriptscriptstyle\#}heta) d\phi^{2} \\
&=& N(-dt^2+d{x}^{2})+r^2d\langle s {\rm r}angleigma^2
{{\rm r}m e}nd{eqnarray}a
where
{\beta}egin{eqnarray}
N &=& (1 - \frac{2m}{r})
{{\rm r}m e}nd{eqnarray}
and $d\langle s {\rm r}angleigma^2$ is the usual volume element on the sphere. The coordinate
$x$ is defined by the requirement
{\beta}egin{equation*}
\frac{dx}{dr}=N^{-1}.
{{\rm r}m e}nd{equation*}
The coordinates $t,r, {\scriptscriptstyle\#}heta, \phi$, are called Boyer-Lindquist coordinates. The
singularity $r=2m$ is a coordinate singularity and can be removed by
changing coordinates, see \cite{HE}. $m$ is the mass of the black hole. We will only
be interested in the region outside the black hole, $r>2m$.
\langle s {\rm r}angleubsection{The spherically symmetric $SU(2)$ Yang-Mills equations on the Schwarz\-schild metric} \lambdabel{sphericallysymmetricYM}
Let $G = SU(2)$, the real Lie group of $2 {\scriptscriptstyle\#}ext{x} 2$ unitary matrices
of determinant 1. The Lie algebra associated to $G$ is $su(2)$, the
antihermitian traceless $2 {\scriptscriptstyle\#}ext{x} 2$ matrices. Let ${\scriptscriptstyle\#}au_{j}$, $j
{\rm i}n \{1, 2, 3 \}$, be the following real basis of $su(2)$:
{\beta}egin{eqnarray*}
{\scriptscriptstyle\#}au_1=\frac{i}{2}\left({\beta}egin{array}{cc} 0 & 1 \\ 1 &
0{{\rm r}m e}nd{array}{\rm r}ight),\quad
{\scriptscriptstyle\#}au_2=\frac{1}{2}\left({\beta}egin{array}{cc} 0 & -1 \\ 1 &
0{{\rm r}m e}nd{array}{\rm r}ight),\quad
{\scriptscriptstyle\#}au_3=\frac{i}{2}\left({\beta}egin{array}{cc} 1 & 0\\ 0 &
-1 {{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{eqnarray*}
Note that
{\beta}egin{eqnarray}a
[{\scriptscriptstyle\#}au_1,{\scriptscriptstyle\#}au_2]={\scriptscriptstyle\#}au_3,\quad [{\scriptscriptstyle\#}au_3,{\scriptscriptstyle\#}au_1]={\scriptscriptstyle\#}au_2,\quad [{\scriptscriptstyle\#}au_2,{\scriptscriptstyle\#}au_3]={\scriptscriptstyle\#}au_1.
{{\rm r}m e}nd{eqnarray}a
We are looking for a connection $A$, that is a one form with values in the Lie algebra $su(2)$ associated to the Lie group $SU(2)$, which satisfies the Yang-Mills equations which are:
{\beta}egin{eqnarray}
{\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}_{{\alpha}} F^{{\alpha}{\beta}} {{\rm r}m e}quiv \nablala_{{\alpha}} F^{{\alpha}{\beta}} + [A_{{\alpha}}, F^{{\alpha}{\beta}} ] = 0, \lambdabel{eq:YM}
{{\rm r}m e}nd{eqnarray}
where $[.,.]$ is the Lie bracket and $F_{{\alpha}{\beta}}$ is the Yang-Mills curvature given by
{\beta}egin{eqnarray}
F_{{\alpha}{\beta}} = \nablala_{{\alpha}}A_{{\beta}} - \nablala_{{\beta}}A_{{\alpha}} + [A_{{\alpha}},A_{{\beta}}], \lambdabel{defYMcurvature}
{{\rm r}m e}nd{eqnarray}
and where we have used the Einstein raising indices convention with respect to the Schwarzschild metric. We also have the Bianchi identities which are always satisfied in view of the symmetries of the Riemann tensor and the Jacobi identity for the Lie bracket:
{\beta}egin{eqnarray}
{\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}_{{\alpha}}F_{\mu\nu} + {\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}_{\mu}F_{\nu{\alpha}} + {\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}_{\nu} F_{{\alpha}\mu} = 0. \lambdabel{eq:Bianchi}
{{\rm r}m e}nd{eqnarray}
The Cauchy problem for the Yang-Mills equations formulates as the following: given a Cauchy hypersurface ${\mathcal S}igmagma$ in $M$, and a ${\mathcal G}$-valued one form $A_{\mu}$ on ${\mathcal S}igmagma$, and a ${\mathcal G}$-valued one form $E_{\mu}$ on ${\mathcal S}igmagma$ satisfying
{\beta}egin{eqnarray}
\lambdabel{YMconstraintsone}
\left.{\beta}egin{array}{rcl} E_{t} &=& 0, \\
{\scriptscriptstyle\#}extbf{D}^{(A)}_{\mu}E^{\mu} &=& 0{{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{eqnarray}
we are looking for a ${\mathcal G}$-valued two form $F_{\mu\nu}$ satisfying the Yang-Mills equations such that once $F_{\mu\nu}$ restricted to ${\mathcal S}igmagma$ we have
{\beta}egin{eqnarray}
F_{\mu t} = E_{\mu} \lambdabel{YMconstraintstwo}
{{\rm r}m e}nd{eqnarray}
and such that $F_{\mu\nu}$ corresponds to the curvature derived from the Yang-Mills potential $A_{\mu}$, i.e. given by {{\rm r}m e}qref{defYMcurvature}. Equations {{\rm r}m e}qref{YMconstraintsone} are the Yang-Mills constraints equations on the initial data.
Any spherically symmetric Yang-Mills potential can be written in the
following form after applying a gauge transformation, see \cite{FM}, \cite{GuHu} and \cite{W},
{\beta}egin{eqnarray}
\lambdabel{SPAA}
A &=& [ -W_{1}(t, r) {\scriptscriptstyle\#}au_{1} - W_{2}(t, r) {\scriptscriptstyle\#}au_{2} ] d{\scriptscriptstyle\#}heta + [ W_{2}
(t, r) \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{1} - W_{1} (t, r) \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{2}] d\phi\nonumber\\
& + & \cos ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{3} d\phi + A_{0} (t, r) {\scriptscriptstyle\#}au_{3} dt + A_{1} (t, r) {\scriptscriptstyle\#}au_{3} dr,
{{\rm r}m e}nd{eqnarray}
where $A_{0} (t, r) $, $A_{1} (t, r) $, $W_{1}(t, r)$, $W_{2}(t, r)$
are arbitrary real functions. We consider here a purely magnetic
Ansatz in which we have $A_0=A_1=W_2=0,\, W_1=:W$. The components of
the curvature are then
{\beta}egin{eqnarray}a
\left.{\beta}egin{array}{rcl} F_{{\scriptscriptstyle\#}heta x} &=& W' {\scriptscriptstyle\#}au_{1},\\
F_{{\scriptscriptstyle\#}heta t} &=& \text{d}ot{W} {\scriptscriptstyle\#}au_{1}, \\
F_{\phi x} &=& W' \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{2}, \\
F_{\phi t} &=&\text{d}ot{W} \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{2}, \\
F_{tx} &=& 0,\\
F_{{\scriptscriptstyle\#}heta\phi} &=& ( W^{2} -1 ) \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{3}. {{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{eqnarray}a
This kind of Ansatz is preserved by the evolution. Also the principal
restriction is $A_0=A_1=0$. The constraint equations then impose that
$W_1$ is proportional to $W_2$, a case which can be reduced to
$W_2=0$. We refer the reader to \cite{GH} for details.
\langle s {\rm r}angleubsection{The initial value problem for the purely magnetic Ansatz} \lambdabel{AnsatzforinitialdataYM}
We look at initial data prescribed on $t=0$ where there exists a
gauge transformation such that once applied on the initial data, the
potential $A$ can be written in this gauge as
{\beta}egin{equation}
\lambdabel{Ansatz}
\left.{\beta}egin{array}{rcl}
A_{t} (t=0) &=& 0, \\
A_{r} (t=0) &=& 0, \\
A_{{\scriptscriptstyle\#}heta} (t=0) &=& -W_0(r){\scriptscriptstyle\#}au_{1}, \\
A_{\phi} (t=0) &=& -W_0( r) \langle s {\rm r}angleigman
({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{2} + \cos ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{3}, {{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{equation}
and, we are given in this gauge the following one form $E_{\mu}$ on $t=0$:
{\beta}egin{equation}
\lambdabel{AnsatzE}\left.{\beta}egin{array}{rcl}
E_{{\scriptscriptstyle\#}heta} (t=0) &=& F_{{\scriptscriptstyle\#}heta t} (0) = W_1(r) {\scriptscriptstyle\#}au_{1}, \\
E_{\phi} (t=0) &=& F_{\phi t} (0) = W_1(r) \langle s {\rm r}angleigman ({\scriptscriptstyle\#}heta) {\scriptscriptstyle\#}au_{2}, \\
E_{r} (t=0) &=& F_{rt} (0) = 0, \\
E_{t} (t=0) &=& F_{tt} (t=0) = 0. {{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{equation}
Notice that with this Ansatz the constraint equations {{\rm r}m e}qref{YMconstraintsone} are
automatically fulfilled
{\beta}egin{eqnarray}
\lambdabel{constraintintheAnsatz}
\notag
( { {\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}}^{{\scriptscriptstyle\#}heta} E_{{\scriptscriptstyle\#}heta} + {{\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}}^{\phi} E_{\phi} + {{\scriptscriptstyle\#}ext{{\beta}f D}^{(A)}}^{r} E_{r} ) (t=0)= 0.
{{\rm r}m e}nd{eqnarray}
The Yang-Mills equations now reduce to
{\beta}egin{equation}
\lambdabel{YMSW}
\left.{\beta}egin{array}{rcl} \mathrm{d}ot{W}-W''+PW(W^2-1)&=&0,\\
W(0)&=&W_0,\\
\partialrtial_t W(0)&=&W_1,{{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{equation}
where
{\beta}egin{equation*}
P=\frac{(1-\frac{2m}{r})}{r^2}.
{{\rm r}m e}nd{equation*}
It is easy to check that the following energy is conserved, see also \cite{GH},
{\beta}egin{equation*}
\mathcal{E}(W,\text{d}ot{W})={\rm i}nt \text{d}ot{W}^2+(W')^2+\frac{P}{2}(W^2-1)^2 dx.
{{\rm r}m e}nd{equation*}
We note by $\text{d}ot{H}^k=\text{d}ot{H}^k({\mathbb R}, dx)$ and
$H^k=H^k({\mathbb R},dx)$, the homogeneous and inhomogeneous Sobolev spaces
of order $k$, respectively.
{\beta}egin{definition}
{\beta}egin{enumerate}
{\rm i}tem We define the spaces $L^4_P$, resp. $L^2_P$, as the completion of
$C_0^{{\rm i}nfty}({\mathbb R})$ for the norm
{\beta}egin{eqnarray}
\Vert v\Vert_{L^4_P}^4:={\rm i}nt P\vert v\vert^4 dx\quad
\mbox{resp.}\quad \Vert v\Vert_{L^2_P}^2:={\rm i}nt P \vert v\vert^2 dx.
{{\rm r}m e}nd{eqnarray}
{\rm i}tem
We also define for $1\le k\le 2$ the space ${\mathcal H}^k$ as the completion
of $C_0^{{\rm i}nfty}({\mathbb R})$ for the norm
{\beta}egin{eqnarray}
\Vert u\Vert^2_{{\mathcal H}^k}=\Vert u\Vert_{\text{d}ot{H}^k}^2+\Vert
u\Vert_{L^4_P}^2.
{{\rm r}m e}nd{eqnarray}
{{\rm r}m e}nd{enumerate}
{{\rm r}m e}nd{definition}
We note that ${\mathcal H}^k$ is a Banach space which contains all constant
functions. It turns out that $\mathcal{E}:={\mathcal H}^1{\scriptscriptstyle\#}imes L^2$ is exactly the space
of finite energy solutions, see \cite{GH} for details. We then have \cite[Theorem 1]{GH}
{\beta}egin{theorem}
\lambdabel{ThGEYM}
Let $(W_0,W_1){\rm i}n {\mathcal H}^2{\scriptscriptstyle\#}imes H^1$. Then there exists a unique strong solution of
{{\rm r}m e}qref{YMSW} with
{\beta}egin{eqnarray*}
W&{\rm i}n&C^1([0,{\rm i}nfty);{\mathcal H}^1)\cap
C([0,{\rm i}nfty);{\mathcal H}^2),\\
\partialrtial_tW&{\rm i}n& C^1([0,{\rm i}nfty);L^2)\cap C([0,{\rm i}nfty);H^1),\\
\langle s {\rm r}angleqrt{P}(W^2-1)&{\rm i}n&C^1([0,{\rm i}nfty);L^2)\cap C([0,{\rm i}nfty);H^1).
{{\rm r}m e}nd{eqnarray*}
{{\rm r}m e}nd{theorem}
We can
reformulate the above theorem in the following way
{\beta}egin{corollary}
\lambdabel{Cor1}
We suppose that the initial data for the Yang-Mills equations is given
after suitable gauge transformation by
{\beta}egin{eqnarray*}
\left.{\beta}egin{array}{rcl} A_t(0)&=&A_r(0)=0,\\
A_{{\scriptscriptstyle\#}hetaeta}(0)&=&-W_0{\scriptscriptstyle\#}au_1,\\
A_{\phi}(0)&=&-W_0\langle s {\rm r}angleigman
{\scriptscriptstyle\#}hetaeta{\scriptscriptstyle\#}au_2+\cos{\scriptscriptstyle\#}hetaeta{\scriptscriptstyle\#}au_3,\\
E_{{\scriptscriptstyle\#}hetaeta}(0)&=&W_1{\scriptscriptstyle\#}au_1,\\
E_{\phi}(0)&=&W_1\langle s {\rm r}angleigman{\scriptscriptstyle\#}hetaeta{\scriptscriptstyle\#}au_2,\\
E_r(0)&=&E_t(0)=0{{\rm r}m e}nd{array}{\rm r}ight\}
{{\rm r}m e}nd{eqnarray*}
with $(W_0,W_1){\rm i}n {\mathcal H}^2{\scriptscriptstyle\#}imes H^1$. Then, the Yang-Mills equation {{\rm r}m e}qref{eq:YM} admits a unique
solution $F$ with
{\beta}egin{eqnarray*}
F_{{\scriptscriptstyle\#}hetaeta x},\, \frac{1}{\langle s {\rm r}angleigman{\scriptscriptstyle\#}hetaeta}F_{\phi x},\, F_{{\scriptscriptstyle\#}hetaeta t},\,
\frac{1}{\langle s {\rm r}angleigman{\scriptscriptstyle\#}hetaeta}F_{\phi
t},\langle s {\rm r}angleqrt{P}\frac{1}{\langle s {\rm r}angleigman{\scriptscriptstyle\#}hetaeta}F_{{\scriptscriptstyle\#}hetaeta\phi}&{\rm i}n&
C^1([0,{\rm i}nfty);L^2)\cap C([0,{\rm i}nfty);H^1).
{{\rm r}m e}nd{eqnarray*}
{{\rm r}m e}nd{corollary}
\langle s {\rm r}angleubsection{Energies}
We now introduce the Yang-Mills energy momentum tensor
{\beta}egin{equation*}
T_{\mu\nu}=\langleF_{\mu{\beta}eta},F_{\nu}^{{\beta}eta}\rangle-\frac{1}{4}g_{\mu\nu}\langleF_{{\alpha}lpha{\beta}eta},F^{{\alpha}lpha{\beta}eta}\rangle.
{{\rm r}m e}nd{equation*}
Here $\langle.,.\rangle$ is an Ad-invariant scalar product on the Lie algebra $su(2)$.
We have
{\beta}egin{equation*}
\nablala^{\nu}T_{\mu\nu}=0.
{{\rm r}m e}nd{equation*}
For a vector field $X^{\nu}$ we define
{\beta}egin{equation*}
J_{\mu}(X)=X^{\nu}T_{\mu\nu}
{{\rm r}m e}nd{equation*}
and the energy on the spacelike slice ${\mathcal S}igmagma_t$
(${\mathcal S}igmagma_{t_0}=\{t=t_0\}$ ) by
{\beta}egin{equation*}
E^{(X)}(F(t))={\rm i}nt_{{\mathcal S}igmagma_t}J_{\mu}(X)n^{\nu}d_{{\mathcal S}igmagma_t}.
{{\rm r}m e}nd{equation*}
By the divergence theorem this energy is conserved if $X$ is
Killing. In particular
{\beta}egin{equation*}
E^{(\p_t)}(F(t))={\rm i}nt _{{\mathcal S}igmagma_t}J_{\mu}(\p_t)n^{\mu}d_{{\mathcal S}igmagma_t}
{{\rm r}m e}nd{equation*}
is conserved. If $F$ is the curvature associated to $(W,\text{d}ot{W})$, then
{\beta}egin{equation*}
E^{(\p_t)}(F(t))=\mathcal{E}(W,\text{d}ot{W}),
{{\rm r}m e}nd{equation*}
see \cite{GH} for details.
\langle s {\rm r}angleubsection{Main result}
We first recall the following result which is implicit in the paper
\cite{BRZ} of P. Bizo\'n, A. Rostworowski and A. Zenginoglu.
{\beta}egin{theorem}
\lambdabel{thstat}
There exists a decreasing sequence $\{a_n\}_{n{\rm i}n \nn^{{\bf g}e1}},\, 0<...<
a_n< a_{n-1}<...<a_1=\frac{1+\langle s {\rm r}angleqrt{3}}{3\langle s {\rm r}angleqrt{3}+5}$ and smooth stationary solutions $W_n$ of {{\rm r}m e}qref{YMSW}
with
{\beta}egin{equation*}
-1\le W_n\le 1,\quad \lim_{x{\rm r}ightarrow -{\rm i}nfty}W_n(x)=a_n,\quad \lim_{x{\rm r}ightarrow
{\rm i}nfty}W_n(x)=(-1)^n.
{{\rm r}m e}nd{equation*}
The solution $W_n$ has exactly $n$ zeros.
{{\rm r}m e}nd{theorem}
{\beta}egin{remark}
There is an explicit formula for the first stationary solution
(see \cite{BCC})
$$W_1 = \frac{c-\frac{r}{2m}}{\frac{r}{2m}+3(c-1)}, \quad c=\frac{3+\langle s {\rm r}angleqrt{3}}{2}.$$
This solution corresponds to $\lim_{x{\rm r}ightarrow -{\rm i}nfty}W_1(x)=a_1=\frac{1+\langle s {\rm r}angleqrt{3}}{3\langle s {\rm r}angleqrt{3}+5}$.
{{\rm r}m e}nd{remark}
We give a detailed proof of this result in the appendix, where we
follow arguments of Smoller, Wasserman, Yau and McLeod. The above
solutions are all nonlinearly instable :
{\beta}egin{theorem}[Main Theorem]
\lambdabel{Mainth}
For all $n{\bf g}e 1$ the solution $W_n$ of {{\rm r}m e}qref{YMSW} is unstable. More precisely there
exists ${{\rm r}m e}psilon_0>0$ and a sequence $(W_{0,n}^m,W_{1,n}^m)$ with $\Vert
(W_{0,n}^m,W_{1,n}^m)-(W_n,0)\Vert_{\mathcal{E}}{\rm r}ightarrow 0,\, m{\rm r}ightarrow {\rm i}nfty$, but for all
$m$
{\beta}egin{equation*}
\langle s {\rm r}angleup_{t{\bf g}e 0}\Vert(W_n^m(t),\partialrtial_tW_n^m(t))-(W_n,0)\Vert_{\mathcal{E}}{\bf g}e {{\rm r}m e}psilon_0>0.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{theorem}
{\beta}egin{remark}
We don't show in this paper that there is no stationary solution with $W(2m)>a_1$. We do not exclude either the fact that there may exist solutions with an infinite number of zeros which tend to zero at infinity. Our main theorem does not apply to this two categories of hypothetical stationary solutions.
{{\rm r}m e}nd{remark}
For $n$ given we construct initial data from $W_n$ as in Section
{\rm r}ef{AnsatzforinitialdataYM}. Let $F_{n}$ be the corresponding
curvature at time $t=0$. We obtain
{\beta}egin{corollary}
\lambdabel{corstat}
For all $n{\bf g}e 1$ the solution $F_n$ of {{\rm r}m e}qref{eq:YM} is
unstable. More precisely there exists ${{\rm r}m e}psilon_0>0$ and a sequence of
initial data giving rise to the curvature $F_{0,n}^m$ with
{\beta}egin{equation*}
E^{(\p_t)}(F_{0,n}^m-F_{n}){\rm r}ightarrow 0,\quad m{\rm r}ightarrow {\rm i}nfty,
{{\rm r}m e}nd{equation*}
but for all $m$
{\beta}egin{equation*}
\langle s {\rm r}angleup_{t{\bf g}e 0}E^{(\p_t)}(F_n^m(t)-F_{n}){\bf g}e {{\rm r}m e}psilon_0,
{{\rm r}m e}nd{equation*}
where $F_n^m(t)$ is the solution associated to the initial data
corresponding to the curvature $F_{0,n}^m$.
{{\rm r}m e}nd{corollary}
{\scriptscriptstyle\#}extbf{Acknowledgments.} The first author acknowledges support from
the ANR funding ANR-12-BS01-012-01. Both authors thank Sari Ghanem for
fruitful discussions on Yang-Mills equations.
\langle s {\rm r}angleection{Abstract setting}
\lambdabel{Sec2}
\langle s {\rm r}angleubsection{Abstract result}
We consider the one dimensional wave equation
{\beta}egin{equation}
\lambdabel{AWE}
\left\{{\beta}egin{array}{rcl} \mathrm{d}ot{u}-u''+Vu&=&F(u),\\
u\vert_{t=0}&=&u_0,\\
\partialrtial_tu\vert_{t=0}&=&u_1 {{\rm r}m e}nd{array}{\rm r}ight.
{{\rm r}m e}nd{equation}
with $\text{d}ot{}=\partialrtial_t,\, '=\partialrtial_x$ and
{\beta}egin{equation}
{\scriptscriptstyle\#}ag{HV}\lambdabel{HV}
V{\rm i}n C({\mathbb R})\cap L^1({\mathbb R}),\, \lim_{\vert x\vert{\rm r}ightarrow {\rm i}nfty}V(x)=0,\, {\rm i}nt_{{\mathbb R}}V(x)dx<0.
{{\rm r}m e}nd{equation}
We also suppose that
{\beta}egin{equation}
{\scriptscriptstyle\#}ag{HF}\lambdabel{HF}
\Vert F(u)-F(v)\Vert_{L^2}\le M_F(\Vert u\Vert_{H^1}+\Vert
v\Vert_{H^1})\Vert u-v\Vert_{H^1}
{{\rm r}m e}nd{equation}
for $\Vert u\Vert_{H^1}\le 1,\Vert v\Vert_{H^1}\le 1$. Let
$X=H^1{\scriptscriptstyle\#}imes L^2$. We then have the following
{\beta}egin{theorem}
The zero solution of {{\rm r}m e}qref{AWE} is unstable. More precisely there
exists ${{\rm r}m e}psilon_0>0$ and a sequence $(u_0^m,u_1^m)$ with $\Vert
(u_0^m,u_1^m)\Vert_X{\rm r}ightarrow 0,\, m{\rm r}ightarrow {\rm i}nfty$, but for all
$m$
{\beta}egin{equation*}
\langle s {\rm r}angleup_{t{\bf g}e 0}\Vert(u^m(t),\partialrtial_tu^m(t))\Vert_X{\bf g}e {{\rm r}m e}psilon_0>0.
{{\rm r}m e}nd{equation*}
Here $u^m(t)$ is the solution of {{\rm r}m e}qref{AWE} with initial data
$(u_0^m,u_1^m)$ and the supremum is taken over the maximal interval of
existence of $u^m(t)$.
{{\rm r}m e}nd{theorem}
Let
{\beta}egin{equation*}
{\mathcal A}=-\partialrtial_x^2+V,\quad D({\mathcal A})=H^2({\mathbb R}).
{{\rm r}m e}nd{equation*}
We note that ${\mathcal A}$ is a selfadjoint operator.
\langle s {\rm r}angleubsection{Spectral analysis of ${\mathcal A}$}
{\beta}egin{proposition}
We have
{\beta}egin{equation*}
\langle s {\rm r}angleigmagma({\mathcal A})=\{-\lambdambda_n^2\}_{n{\rm i}n {\mathcal N}}\cup [0,{\rm i}nfty),
{{\rm r}m e}nd{equation*}
where $-\lambdambda_n^2,\quad \lambdambda_0> \lambdambda_1>....\lambdambda_n> ...>0$ is a finite (${\mathcal N}=\{0,...,N\}$) or infinite $({\mathcal N}=\nn)$ sequence of negative eigenvalues with only possible
accumulation point $0$.
{{\rm r}m e}nd{proposition}
\partialoof
First note that $\langle s {\rm r}angleigmagma({\mathcal A})\cap {\mathbb R}^-\neq {{\rm r}m e}mptyset$. Indeed let $\chi{\rm i}n C_0^{{\rm i}nfty}({\mathbb R}),\, \chi(0)=1,\, \chi{\bf g}e 0,\,
\chi_R(.)=\chi(\frac{.}{R})$. Then
{\beta}egin{equation*}
\langle{\mathcal A}\chi_R,\chi_R\rangle=\frac{1}{R}{\rm i}nt \vert \chi'(x)\vert^2dx+{\rm i}nt
V(x)\chi_R^2dx{\rm r}ightarrow {\rm i}nt V(x) dx<0,\quad R{\rm r}ightarrow
{\rm i}nfty.
{{\rm r}m e}nd{equation*}
We now introduce the comparison operator
{\beta}egin{equation*}
{\mathcal B}=-\partialrtial_x^2.
{{\rm r}m e}nd{equation*}
We compute
{\beta}egin{equation*}
({\mathcal B}-z^2)^{-1}-({\mathcal A}-z^2)^{-1}=({\mathcal A}-z^2)^{-1}V({\mathcal B}-z^2)^{-1}.
{{\rm r}m e}nd{equation*}
Using that $\lim_{x{\rm r}ightarrow \pm {\rm i}nfty} V(x)=0$ we see that this is
a compact operator. By the Weyl criterion
{\beta}egin{equation*}
\langle s {\rm r}angleigmagma_{ess}({\mathcal A})=\langle s {\rm r}angleigmagma_{ess}({\mathcal B})=[0,{\rm i}nfty).
{{\rm r}m e}nd{equation*}
On the other hand we already know that ${\mathcal A}$ has negative
spectrum. It therefore has at least one negative eigenvalue. ${\mathcal A}$
being bounded from below the proposition follows.
$\Box$
\langle s {\rm r}angleubsection{The wave equation as a first order equation}
\langle s {\rm r}angleubsubsection{The linear equation}
The equation
{\beta}egin{equation*}
\mathrm{d}ot{v}+{\mathcal A} v=0
{{\rm r}m e}nd{equation*}
is equivalent to
{\beta}egin{equation*}
\partialrtial_t\psi=L\psi,\quad L=\left({\beta}egin{array}{cc} 0 & i \\
i{\mathcal A} & 0 {{\rm r}m e}nd{array}{\rm r}ight),\quad \psi=\left({\beta}egin{array}{c} v
\\ \frac{1}{i} \partialrtial_t v {{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{equation*}
{\beta}egin{remark}
Let
{\beta}egin{equation*}
{\mathcal A}\phi_0=-\lambdambda^2\phi_0.
{{\rm r}m e}nd{equation*}
Then we have
{\beta}egin{enumerate}
{\rm i}tem $\phi_0{\rm i}n H^2$.
{\rm i}tem Let $\psi^{\pm}_0=\left({\beta}egin{array}{c} \phi_0 \\ \pm\frac{1}{i} \lambdambda
\phi_0{{\rm r}m e}nd{array}{\rm r}ight)$. Then
{\beta}egin{equation*}
L\psi^{\pm}_0=\pm\lambdambda\psi^{\pm}_0.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{enumerate}
{{\rm r}m e}nd{remark}
Let $V_-$ be the negative part of the potential. For $\mu^2>\Vert
V_-\Vert_{{\rm i}nfty}({\bf g}e \lambdambda_0^2)$ we introduce the scalar product
{\beta}egin{equation*}
\langle u,v\rangle_{\mu}=\langle({\mathcal A}+\mu^2)u_0,v_0\rangle+\langleu_1,v_1\rangle
{{\rm r}m e}nd{equation*}
where $\langle.,.\rangle$ is the usual scalar product on ${\mathcal H}=L^2({\mathbb R})$. We note
$\Vert.\Vert_{\mu}$ the corresponding norm. It is easy to check that
the norms $\Vert .\Vert_{\mu}$ and
$\Vert.\Vert_X$ are equivalent.
{\beta}egin{proposition}
$L$ is the generator of a $C^0-$ semigroup $e^{tL}$ on $X$.
{{\rm r}m e}nd{proposition}
\partialoof
Let $\mu^2>\Vert V_-\Vert$ and
{\beta}egin{equation*}
L_{\mu}=\left({\beta}egin{array}{cc} 0 & i\\ i({\mathcal A}+\mu^2) &
0 {{\rm r}m e}nd{array}{\rm r}ight),\quad B_{\mu}=\left({\beta}egin{array}{cc} 0 & 0\\ -i\mu^2 &
0 {{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{equation*}
$iL_{\mu}$ is a selfadjoint operator on $(X, \langle.,.\rangle_{\mu})$ and in particular the
generator of a $C^0-$ semigroup $e^{L_{\mu}t}$. We have
$L=L_{\mu}+B_{\mu}$. $B_{\mu}$ being bounded, we can apply
\cite[Theorem 3.1.1]{Pa} to see that $L$ is the generator of
a $C^0-$ semigroup on $(X, \Vert .\Vert_{\mu})$ and thus on $(X,\Vert .\Vert_X)$.
$\Box$
Let now
{\beta}egin{equation*}
M_i=\left({\beta}egin{array}{cc} \bbbone & \bbbone \\
\frac{\lambdambda_i}{i} & -\frac{\lambdambda_i}{i} {{\rm r}m e}nd{array} {\rm r}ight).
{{\rm r}m e}nd{equation*}
Note that $det M_i=2i\lambdambda_i\neq 0$ and that $M_i$ is thus invertible. We define
$P_i=\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})M_i$ and $X_i=P_iX$. We also define
$X_{{\rm i}nfty}=\bbbone_{{\mathbb R}^+}({\mathcal A})\bbbone_2X.$ Here
$\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})$ and $\bbbone_{{\mathbb R}^+}({\mathcal A})$ are defined by
the spectral theorem. In particular $\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})$ is
the projection on the eigenspace of ${\mathcal A}$ associated to the eigenvalue
$-\lambdambda_i^2$.
{\beta}egin{lemma}
{\beta}egin{equation*}
X=\left(\oplus_{i{\rm i}n {\mathcal N}}X_i{\rm r}ight)\oplus X_{{\rm i}nfty}.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{lemma}
{\beta}egin{remark}
Note that the sum is orthogonal with respect to the scalar product
$\langle.,.\rangle_{\mu}$.
{{\rm r}m e}nd{remark}
\partialoof
Let $(\phi,\psi){\rm i}n X$. We put
{\beta}egin{eqnarray*}
\phi_i&=&\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\phi,\quad
\psi_i=\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\psi,\quad
\left({\beta}egin{array}{c} {\scriptscriptstyle\#}ilde{\phi}_i \\ {\scriptscriptstyle\#}ilde{\psi}_i{{\rm r}m e}nd{array}{\rm r}ight)=M_i^{-1}\left({\beta}egin{array}{c} \phi_i \\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{eqnarray*}
Since ${\mathcal A}$ is self-adjoint, we can write
$$\phi = \langle s {\rm r}angleum_{i{\rm i}n {\mathcal N}} \phi_i + \bbbone_{{\mathbb R}^+}({\mathcal A})\phi, \quad \psi = \langle s {\rm r}angleum_{i{\rm i}n {\mathcal N}} \psi_i + \bbbone_{{\mathbb R}^+}({\mathcal A})\psi.$$
Then
{\beta}egin{equation*}
\left({\beta}egin{array}{c} \phi \\
\psi{{\rm r}m e}nd{array}{\rm r}ight)=\langle s {\rm r}angleum_{i{\rm i}n {\mathcal N}}M_i\left({\beta}egin{array}{c}
{\scriptscriptstyle\#}ilde{\phi}_i \\
{\scriptscriptstyle\#}ilde{\psi}_i{{\rm r}m e}nd{array}{\rm r}ight)+\left({\beta}egin{array}{c}
\bbbone_{{\mathbb R}^+}({\mathcal A})\phi \\ \bbbone_{{\mathbb R}^+}({\mathcal A})\psi {{\rm r}m e}nd{array}{\rm r}ight)
{{\rm r}m e}nd{equation*}
gives the required decomposition. For uniqueness let
{\beta}egin{equation*}
\left({\beta}egin{array}{c} \phi_i \\
\psi_i{{\rm r}m e}nd{array}{\rm r}ight)=\langle s {\rm r}angleum_{i{\rm i}n{\mathcal N}}M_i\left({\beta}egin{array}{c}
{\scriptscriptstyle\#}ilde{\phi}_i \\
{\scriptscriptstyle\#}ilde{\psi}_i{{\rm r}m e}nd{array}{\rm r}ight)+\left({\beta}egin{array}{c}
\phi_{{\rm i}nfty} \\ \psi_{{\rm i}nfty} {{\rm r}m e}nd{array}{\rm r}ight)
{{\rm r}m e}nd{equation*}
Applying $\bbbone_{{\mathbb R}^+}({\mathcal A}),\, \bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})$ to each line immediately gives
{\beta}egin{equation*}
\phi_{{\rm i}nfty}=\bbbone_{{\mathbb R}^+}({\mathcal A})\phi,\quad
\psi_{{\rm i}nfty}=\bbbone_{{\mathbb R}^+}({\mathcal A})\psi,\quad
\left({\beta}egin{array}{c} {\scriptscriptstyle\#}ilde{\phi}_i \\ {\scriptscriptstyle\#}ilde{\psi}_i{{\rm r}m e}nd{array}{\rm r}ight)=M_i^{-1} \left({\beta}egin{array}{c} \phi_i \\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight),
{{\rm r}m e}nd{equation*}
where $\psi_i=\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\psi,\,
\phi_i=\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\phi$.
$\Box$
Let
{\beta}egin{equation*}
X_i^{\pm}=M_i\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})P_{\pm}X,
{{\rm r}m e}nd{equation*}
where $P_+(\phi,\psi)=(\phi,0),\, P_-(\phi,\psi)=(0,\psi)$. Clearly
$X_i=X_i^+\oplus X_i^-$ and thus
{\beta}egin{equation*}
X=\left({\beta}igoplus_{i{\rm i}n {\mathcal N}}(X_i^+\oplus X_i^-){\rm r}ight)\oplus X_{{\rm i}nfty}.
{{\rm r}m e}nd{equation*}
{\beta}egin{remark}
Let $(\phi_i,\psi_i) {\rm i}n X_i^{\pm}$. Then $L (\phi_i,\psi_i)= \pm \lambdambda_i (\phi_i,\psi_i)$.
{{\rm r}m e}nd{remark}
{\beta}egin{remark}
On $X_i$ the norm $\Vert.\Vert_{{\langle s {\rm r}angleqrt{2}\lambdambda_i}}$ is equivalent to
the norm $\Vert.\Vert_X$ and $X_i^+,\, X_i^-$ are orthogonal
with respect to this scalar product. Indeed :
{{\rm r}m e}nd{remark}
{\beta}egin{equation*}
\left\langle\left({\beta}egin{array}{c} \phi \\
\frac{\lambdambda_i}{i}\phi {{\rm r}m e}nd{array}{\rm r}ight),\left({\beta}egin{array}{c}
\psi \\
-\frac{\lambdambda_i}{i}\psi {{\rm r}m e}nd{array}{\rm r}ight){\rm r}ight\rangle_{\langle s {\rm r}angleqrt{2}\lambdambda_i}=\lambdambda_i^2\langle\phi,\psi\rangle-\lambdambda_i^2\langle\phi,\psi\rangle=0.
{{\rm r}m e}nd{equation*}
{\beta}egin{proposition}
\lambdabel{prop3}
{\beta}egin{enumerate}
{\rm i}tem The spaces $X_i,\, X_{{\rm i}nfty}$ are $e^{tL}$ invariant.
{\rm i}tem For all ${{\rm r}m e}psilon>0$ there exists $C_{{{\rm r}m e}psilon}>0$ such that for all $i{\rm i}n {\mathcal N}$ and for
all $t{\rm i}n {\mathbb R}$
{\beta}egin{equation*}
\Vert e^{tL}\vert_{X_i}\Vert_{X{\rm r}ightarrow X}\le C_{{{\rm r}m e}psilon} e^{(\lambdambda_i+{{\rm r}m e}psilon)\vert t\vert}.
{{\rm r}m e}nd{equation*}
{\rm i}tem For all ${{\rm r}m e}psilon>0$ there exists $C_{{{\rm r}m e}psilon}>0$ such that for
all $t{\rm i}n {\mathbb R}$
{\beta}egin{equation*}
\Vert e^{tL}\vert_{X_{{\rm i}nfty}}\Vert_{X{\rm r}ightarrow X} \le C_{{{\rm r}m e}psilon} e^{{{\rm r}m e}psilon \vert
t\vert}.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{enumerate}
{{\rm r}m e}nd{proposition}
\partialoof
(1) We have
{\beta}egin{equation*}
e^{tL}M_i\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\bbbone_2\left({\beta}egin{array}{c}
\phi \\ \psi{{\rm r}m e}nd{array}{\rm r}ight)=M_i\bbbone_{\{-\lambdambda_i^2\}}({\mathcal A})\bbbone_2\left({\beta}egin{array}{c}
e^{t\lambdambda_i} \phi \\ e^{-t\lambdambda_i}\psi {{\rm r}m e}nd{array}{\rm r}ight)
{{\rm r}m e}nd{equation*}
and thus $X_i$ is invariant under the evolution.
The fact that $X_{{\rm i}nfty}$ is invariant follows from the
fact that $\bbbone_{{\mathbb R}^+}({\mathcal A})$ commutes with $L$.
(2) Because of the equivalence of the norms it is sufficient to
estimate the $\Vert.\Vert_{\mu}$ norm. Let
{\beta}egin{equation*}
\left({\beta}egin{array}{c} \phi_i\\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight){\rm i}n X_i.
{{\rm r}m e}nd{equation*}
We compute
{\beta}egin{eqnarray*}
\left\Vert e^{tL}\left({\beta}egin{array}{c} \phi_i \\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight){\rm r}ight\Vert_{\mu}^2
&=&\left\Vert \left({\beta}egin{array}{cc} (\mu^2-\lambdambda_i^2)^{1/2}
& 0 \\ 0 & \bbbone {{\rm r}m e}nd{array}{\rm r}ight)M_i\left({\beta}egin{array}{cc}
e^{t\lambdambda_i} & 0 \\ 0 & e^{-\lambdambda_i
t} {{\rm r}m e}nd{array}{\rm r}ight)M_i^{-1}\left({\beta}egin{array}{c}
\phi_i \\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight){\rm r}ight\Vert_{{\mathcal H}{\scriptscriptstyle\#}imes
{\mathcal H}}\\
&\le&\Vert N_i\Vert^2_{{\mathbb R}^2{\rm r}ightarrow {\mathbb R}^2}\left\Vert \left({\beta}egin{array}{c}
\phi_i \\ \psi_i{{\rm r}m e}nd{array}{\rm r}ight){\rm r}ight\Vert^2_{\mu},
{{\rm r}m e}nd{eqnarray*}
where
{\beta}egin{equation*}
N_i=\left({\beta}egin{array}{cc} (\mu^2-\lambdambda_i^2)^{1/2}
& 0 \\ 0 & \bbbone {{\rm r}m e}nd{array}{\rm r}ight)M_i\left({\beta}egin{array}{cc}
e^{t\lambdambda_i} & 0 \\ 0 & e^{-\lambdambda_i
t} {{\rm r}m e}nd{array}{\rm r}ight)M_i^{-1}\left({\beta}egin{array}{cc} (\mu^2-\lambdambda_i^2)^{-1/2}
& 0 \\ 0 & \bbbone {{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{equation*}
We then estimate uniformly in $i{\rm i}n {\mathcal N}$:
{\beta}egin{eqnarray*}
\Vert N_i\Vert^2_{{\mathbb R}^2{\rm r}ightarrow {\mathbb R}^2}&\lesssimssim& \left\Vert
\frac{1}{2}\left({\beta}egin{array}{cc} e^{t\lambdambda_i}+e^{-t\lambdambda_i} & \frac{1}{i\lambdambda_i}(e^{-t\lambdambda_i}-e^{t\lambdambda_i}) \\
\frac{\lambdambda_i}{i}(e^{t\lambdambda_i }-e^{-t\lambdambda_i}) &
e^{t\lambdambda_i}+e^{-\lambdambda_i t} {{\rm r}m e}nd{array}{\rm r}ight){\rm r}ight\Vert_2^2.\\
{{\rm r}m e}nd{eqnarray*}
We have for $t{\bf g}e 0$
{\beta}egin{eqnarray*}
\frac{1}{\lambdambda_i}(e^{t\lambdambda_i}-e^{-t\lambdambda_i})&=&2\langle s {\rm r}angleum_{i=1}^{{\rm i}nfty}\frac{(t\lambdambda_i)^{2i+1}}{\lambdambda_i(2i+1)!}\\
&\le&2t\langle s {\rm r}angleum_{i=1}^{{\rm i}nfty}\frac{(t\lambdambda_i)^{2i}}{(2i)!}\le
t(e^{t\lambdambda_i }+e^{-t\lambdambda_i})\le {\scriptscriptstyle\#}ilde{C}_{{{\rm r}m e}psilon} e^{(\lambdambda_i+{{\rm r}m e}psilon) t}.
{{\rm r}m e}nd{eqnarray*}
Using that $\lambdambda_i\le \lambdambda_0$ we find uniformly in $i{\rm i}n {\mathcal N}$:
{\beta}egin{equation*}
\Vert N_i\Vert_{{\mathbb R}^2{\rm r}ightarrow {\mathbb R}^2}\lesssimssim
e^{(\lambdambda_i+{{\rm r}m e}psilon)\vert t\vert}.
{{\rm r}m e}nd{equation*}
(3) We consider the case $t{\bf g}e 0$. First note that
{\beta}egin{equation*}
\Vert u\Vert_{X_{{\rm r}m e}psilon}^2=\langle{\mathcal A} u_0,u_0\rangle+\Vert u_1\Vert^2+{{\rm r}m e}psilon^2\Vert u_0\Vert^2
{{\rm r}m e}nd{equation*}
defines a norm on $X_{{\rm i}nfty}$. We estimate for $u(t)=e^{tL}u$
{\beta}egin{eqnarray*}
\frac{d}{dt}\Vert u\Vert_{X_{{\rm r}m e}psilon}^2&=&2{{\rm r}m
Re}\left(\langle{\mathcal A} u_0,\text{d}ot{u}_0\rangle+\langleu_1,\text{d}ot{u}_1\rangle+{{\rm r}m e}psilon^2\langleu_0,\text{d}ot{u}_0\rangle{\rm r}ight)\\
&=&2{{\rm r}m Re}\, {{\rm r}m e}psilon^2\langleu_0,i u_1\rangle\\
&\le&2{{\rm r}m e}psilon^2\Vert u_0\Vert\Vert u_1\Vert\le {{\rm r}m e}psilon^3\Vert
u_0\Vert^2+{{\rm r}m e}psilon\Vert u_1\Vert^2\le {{\rm r}m e}psilon \Vert u\Vert_{X_{{\rm r}m e}psilon}^2.
{{\rm r}m e}nd{eqnarray*}
By the Gronwall lemma we obtain:
{\beta}egin{equation*}
\Vert u(t)\Vert_{X_{{\rm r}m e}psilon}^2\le {\scriptscriptstyle\#}ilde{C}_{{{\rm r}m e}psilon} e^{{{\rm r}m e}psilon t}\Vert
u\Vert^2_{X_{{\rm r}m e}psilon}.
{{\rm r}m e}nd{equation*}
We now claim that on $X_{{\rm i}nfty}$ the $X$ and the $X_{{\rm r}m e}psilon$ norms are
equivalent. Indeed
{\beta}egin{eqnarray*}
\langle{\mathcal A} u_0,u_0\rangle+\Vert u_1\Vert^2+{{\rm r}m e}psilon^2\Vert u_0\Vert^2\lesssimssim \Vert u_0\Vert_{H^1}^2+\Vert u_1\Vert^2.
{{\rm r}m e}nd{eqnarray*}
Also,
{\beta}egin{eqnarray*}
\Vert u_0\Vert_{H^1}^2+\Vert
u_1\Vert^2&=&\langle(-\partialrtial_x^2+V)u_0,u_0\rangle-\langleVu_0,u_0\rangle+\Vert
u_0\Vert^2+\Vert u_1\Vert^2\\
&\lesssimssim& \langle{\mathcal A} u_0,u_0\rangle+\Vert
u_0\Vert^2+\Vert u_1\Vert^2\lesssimssim \Vert u\Vert_{X_{{\rm r}m e}psilon}^2.
{{\rm r}m e}nd{eqnarray*}
Then we can estimate
{\beta}egin{eqnarray*}
\Vert u(t)\Vert_X\lesssimssim \Vert u(t)\Vert_{X_{{\rm r}m e}psilon}\lesssimssim e^{{{\rm r}m e}psilon
t}\Vert u\Vert_{X_{{\rm r}m e}psilon}\lesssimssim e^{{{\rm r}m e}psilon t}\Vert u\Vert_X.
{{\rm r}m e}nd{eqnarray*}
$\Box$
Let $Y=X_0^-\oplus\left({\beta}igoplus_{i=1}^{N}X_i{\rm r}ight)\oplus X_{{\rm i}nfty}.$ We have $X=X^+_0\oplus Y$ and both
spaces are invariant under $e^{tL}$.
{\beta}egin{corollary}
\lambdabel{cor1}
For all ${{\rm r}m e}psilon>0$ there exists $M_{L,{{\rm r}m e}psilon}>0$ such that for all $t{\bf g}e 0$ we have
{\beta}egin{equation*}
\Vert e^{tL}\vert_{Y}\Vert_{X{\rm r}ightarrow X}\le M_{L,{{\rm r}m e}psilon} e^{(\lambdambda_1+{{\rm r}m e}psilon) t}.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{corollary}
\partialoof
Because of the equivalence of the norms $\Vert.\Vert_{X}$ and
$\Vert.\Vert_{\mu}\, (\mu^2>\Vert V_-\Vert_{{\rm i}nfty})$ it is
sufficient to show the estimate with respect to the norm $\Vert.\Vert_{\mu}$. We
choose ${{\rm r}m e}psilon<\lambdambda_1$ and apply Proposition {\rm r}ef{prop3}. Let
{\beta}egin{equation*}
\phi=\phi_0^-+\langle s {\rm r}angleum_{i=1}^{N} \phi_i+\phi_{{\rm i}nfty}
{{\rm r}m e}nd{equation*}
with $\phi_0^-{\rm i}n X_0^-,\, \phi_i{\rm i}n X_i,\, \phi_{{\rm i}nfty}{\rm i}n X_{{\rm i}nfty}$. We
have
{\beta}egin{eqnarray*}
\Vert e^{tL}\phi\Vert_{\mu}^2&=&e^{-\lambdambda_0 t}\Vert \phi_0^-\Vert^2_{\mu}+\langle s {\rm r}angleum_{i=1}^{N}\Vert
e^{tL}\phi_i\Vert^2_{\mu}+\Vert
\phi_{{\rm i}nfty}\Vert^2_{\mu}\\
&\lesssimssim& e^{2(\lambdambda_1+{{\rm r}m e}psilon) t }(\Vert
\phi_0^-\Vert^2_{\mu}+\langle s {\rm r}angleum_{i=0}^{N} \Vert
\phi_i\Vert^2_{\mu}+\Vert
\phi_{{\rm i}nfty}\Vert^2_{\mu})=e^{2(\lambdambda_1+{{\rm r}m e}psilon) t}\Vert
\phi\Vert^2_{\mu}.
{{\rm r}m e}nd{eqnarray*}
$\Box$
Let
{\beta}egin{equation*}
E_0=\bbbone_{\{-\lambdambda_0^2\}}({\mathcal A})M_0P_+M_0^{-1},\, E_1=\bbbone-E_0.
{{\rm r}m e}nd{equation*}
We easily check that
{\beta}egin{equation*}
\forall \psi {\rm i}n X,\, E_0\psi{\rm i}n X_0^+;\quad \forall \psi{\rm i}n X,\,
E_1\psi{\rm i}n Y;\quad E_0+E_1=\bbbone.
{{\rm r}m e}nd{equation*}
\langle s {\rm r}angleubsubsection{The nonlinear equation}
The nonlinear equation writes now as a first order equation
{\beta}egin{equation}
\lambdabel{abstrequ}
\left\{{\beta}egin{array}{rcl} \partialrtial_t\psi&=&L\psi+G(\psi),\\
\psi(0)&=&\psi_0 {{\rm r}m e}nd{array}{\rm r}ight.
{{\rm r}m e}nd{equation}
with
{\beta}egin{equation*}
G(\psi)=\left({\beta}egin{array}{c} 0 \\ F(P_{+}(\psi)) {{\rm r}m e}nd{array}{\rm r}ight).
{{\rm r}m e}nd{equation*}
From hypothesis {{\rm r}m e}qref{HF} we directly obtain
{\beta}egin{equation}
\lambdabel{LipschitzG}
\Vert G(\psi)-G(\phi)\Vert_{X}\le M_F (\Vert \psi\Vert_X+\Vert
\phi\Vert_X)\Vert \psi-\phi\Vert_X
{{\rm r}m e}nd{equation}
for $\Vert \psi\Vert_X\le 1,\, \Vert \phi\Vert_X\le 1$. The
abstract theorem then writes
{\beta}egin{theorem}
The zero solution of {{\rm r}m e}qref{abstrequ} is unstable. More precisely
there exists ${{\rm r}m e}psilon_0>0$ and a sequence $\psi_0^m$ with $\Vert
\psi_0^m\Vert_X{\rm r}ightarrow 0,\, m{\rm r}ightarrow {\rm i}nfty$, but for all $m$
{\beta}egin{equation*}
\langle s {\rm r}angleup_{t{\bf g}e 0}\Vert\psi^m(t)\Vert_X{\bf g}e {{\rm r}m e}psilon_0>0.
{{\rm r}m e}nd{equation*}
Here $\psi^m(t)$ is the solution of {{\rm r}m e}qref{abstrequ} with initial data
$\psi_0^m$ and the supremum is taken over the maximal interval of
existence of $\psi^m$.
{{\rm r}m e}nd{theorem}
\langle s {\rm r}angleubsection{Proof of the abstract theorem}
We note $L_0$ the restriction of $L$ to $X^+_0$ and $L_1$ the
restriction of $L$ to $Y$. For $\psi_0{\rm i}n X^+_0$ with small norm we consider for a certain parameter ${\scriptscriptstyle\#}au>0$ the integral equation
{\beta}egin{equation}
\lambdabel{intequ}
\psi(t)=e^{L_0(t-{\scriptscriptstyle\#}au)}\psi_0+{\rm i}nt_{{\scriptscriptstyle\#}au}^te^{L_0(t-s)}E_0G(\psi)ds+{\rm i}nt_{-{\rm i}nfty}^te^{L_1(t-s)}E_1G(\psi)ds=:{\mathcal I}(\psi).
{{\rm r}m e}nd{equation}
We fix ${{\rm r}m e}psilon>0$ in Corollary {\rm r}ef{cor1} small enough such that
${\scriptscriptstyle\#}ilde{\lambdambda}_1:=\lambdambda_1+{{\rm r}m e}psilon<\lambdambda_0$. We will drop in the
following the index ${{\rm r}m e}psilon$ ($M_L=M_{L,{{\rm r}m e}psilon}$). We fix ${\beta}eta>0$ such that $\lambdambda_0>2{\beta}eta>{\scriptscriptstyle\#}ilde{\lambdambda}_1$. Let
{\beta}egin{equation*}
Z=\{\psi{\rm i}n C([0,{\scriptscriptstyle\#}au];X);\, \Vert\psi\Vert_X\le e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}{\rm r}ho\}.
{{\rm r}m e}nd{equation*}
We equip $Z$ with the norm
{\beta}egin{equation*}
\Vert \psi\Vert_Z=\langle s {\rm r}angleup_{0\le t\le{\scriptscriptstyle\#}au}\Vert e^{-{\beta}eta(t-{\scriptscriptstyle\#}au)}\psi(t)\Vert_X.
{{\rm r}m e}nd{equation*}
Let $\psi_0$ such that $\Vert\psi_0\Vert_X=\frac{{\rm r}ho}{3}$. We
claim that for ${\rm r}ho$ small enough
{\beta}egin{equation*}
{\mathcal I}:{\beta}ar{B}_Z(0,{\rm r}ho){\rm r}ightarrow {\beta}ar{B}_Z(0,{\rm r}ho)
{{\rm r}m e}nd{equation*}
and that it is a contraction on that space. First note that
{\beta}egin{equation*}
{\mathcal I}(\psi)={\mathcal I}_0(\psi)+{\mathcal I}_1(\psi)+{\mathcal I}_2(\psi)
{{\rm r}m e}nd{equation*}
with
{\beta}egin{eqnarray*}
{\mathcal I}_0(\psi)&=&e^{L_0(t-{\scriptscriptstyle\#}au)}\psi_0,\\
{\mathcal I}_1(\psi)&=&-{\rm i}nt_{t}^{{\scriptscriptstyle\#}au}e^{L_0(u-{\scriptscriptstyle\#}au)}E_0G(\psi(t+{\scriptscriptstyle\#}au-u)) du,\\
{\mathcal I}_2(\psi)&=&{\rm i}nt_{-{\rm i}nfty}^te^{L_1(t-s)}E_1G(\psi(s))ds.
{{\rm r}m e}nd{eqnarray*}
We first estimate for $t\leq {\scriptscriptstyle\#}au$
{\beta}egin{equation*}
\Vert {\mathcal I}_0(\psi)\Vert_X= e^{\lambdambda_0(t-{\scriptscriptstyle\#}au)}\Vert \psi_0\Vert_X\le
1/3 e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}{\rm r}ho.
{{\rm r}m e}nd{equation*}
We then estimate for $\psi{\rm i}n {\beta}ar{B}_Z(0,{\rm r}ho)$
{\beta}egin{eqnarray*}
\Vert{\mathcal I}_1(\psi)\Vert_X&\le&M_F\Vert
E_0\Vert{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{\lambdambda_0(u-{\scriptscriptstyle\#}au)}\Vert\psi\Vert_X^2(t+{\scriptscriptstyle\#}au-u)du\\
&\le&M_F\Vert
E_0\Vert{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{\lambdambda_0(u-{\scriptscriptstyle\#}au)}{\rm r}ho^2e^{2{\beta}eta(t-u)}du\\
&\le&M_F\Vert E_0\Vert e^{2{\beta}eta
t}e^{-\lambdambda_0{\scriptscriptstyle\#}au}{\rm r}ho^2{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{(\lambdambda_0-2{\beta}eta)u}du\\
&\le&M_F\Vert E_0\Vert{\rm r}ho^2e^{2{\beta}eta
t}e^{-\lambdambda_0{\scriptscriptstyle\#}au}\frac{1}{\lambdambda_0-2{\beta}eta}e^{(\lambdambda_0-2{\beta}eta){\scriptscriptstyle\#}au}\\
&=&\frac{M_F\Vert E_0\Vert{\rm r}ho^2}{\lambdambda_0-2{\beta}eta}
e^{2{\beta}eta(t-{\scriptscriptstyle\#}au)}\\
&\le&\frac{M_F\Vert E_0\Vert{\rm r}ho^2}{\lambdambda_0-2{\beta}eta}
e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}\le 1/3{\rm r}ho e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ small enough. We then estimate for $\psi{\rm i}n
{\beta}ar{B}_Z(0,{\rm r}ho)$ :
{\beta}egin{eqnarray*}
\Vert {\mathcal I}_2(\psi(t))\Vert_X&\le&M_LM_F\Vert
E_1\Vert{\rm i}nt_{-{\rm i}nfty}^te^{{\scriptscriptstyle\#}ilde{\lambdambda}_1(t-s)}{\rm r}ho^2e^{2{\beta}eta(s-{\scriptscriptstyle\#}au)}ds\\
&\le&\frac{M_LM_F\Vert E_1\Vert{\rm r}ho^2}{2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1}e^{{\scriptscriptstyle\#}ilde{\lambdambda}_1
t}e^{-2{\beta}eta{\scriptscriptstyle\#}au}e^{(2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1)t}\\
&=&\frac{M_LM_F\Vert
E_1\Vert{\rm r}ho^2}{2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1}e^{2{\beta}eta(t-{\scriptscriptstyle\#}au)}\le 1/3{\rm r}ho^{{\beta}eta(t-{\scriptscriptstyle\#}au)}
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ small enough. We have just proven ${\mathcal I} (\psi) {\rm i}n {\beta}ar{B}_Z(0,{\rm r}ho)$.
Let us now show that ${\mathcal I}$ is a contraction. We estimate
{\beta}egin{eqnarray*}
\Vert {\mathcal I}_1(\psi)-{\mathcal I}_1(\phi)\Vert_X&\le&2M_F\Vert
E_0\Vert{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{\lambdambda_0(u-{\scriptscriptstyle\#}au)}{\rm r}ho e^{{\beta}eta(t-u)}\Vert\psi-\phi\Vert_X(t+{\scriptscriptstyle\#}au-u)du\\
&\le&2M_F\Vert
E_0\Vert{\rm r}ho\Vert\psi-\phi\Vert_Z{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{\lambdambda_0(u-{\scriptscriptstyle\#}au)}e^{2{\beta}eta(t-u)}du\\
&=&2M_F\Vert E_0\Vert{\rm r}ho\Vert\psi-\phi\Vert_Ze^{2{\beta}eta
t}e^{-\lambdambda_0{\scriptscriptstyle\#}au}{\rm i}nt_t^{{\scriptscriptstyle\#}au}e^{(\lambdambda_0-2{\beta}eta)u}du\\
&\le&\frac{2M_F\Vert
E_0\Vert{\rm r}ho}{\lambdambda_0-2{\beta}eta}e^{2{\beta}eta(t-{\scriptscriptstyle\#}au)}\le 1/4 e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ sufficiently small. We then estimate
{\beta}egin{eqnarray*}
\Vert {\mathcal I}_2(\psi)-{\mathcal I}_2(\phi)\Vert_X&\le& {\rm i}nt_{-{\rm i}nfty}^t2M_LM_F\Vert
E_1\Vert {\rm r}ho e^{{\scriptscriptstyle\#}ilde{\lambdambda}_1(t-s)}e^{{\beta}eta(s-{\scriptscriptstyle\#}au)}\Vert \psi-\phi\Vert_X ds\\
&\le&2M_LM_F\Vert E_1\Vert{\rm r}ho\Vert
\psi-\phi\Vert_Z{\rm i}nt_{-{\rm i}nfty}^te^{{\scriptscriptstyle\#}ilde{\lambdambda}_1(t-s)}e^{2{\beta}eta(s-{\scriptscriptstyle\#}au)}ds\\
&=&2M_LM_F\Vert E_1\Vert{\rm r}ho\Vert
\psi-\phi\Vert_Ze^{{\scriptscriptstyle\#}ilde{\lambdambda}_1t}e^{-2{\beta}eta
{\scriptscriptstyle\#}au}{\rm i}nt_{-{\rm i}nfty}^te^{(2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1)s}ds\\
&\le&\frac{2M_LM_F\Vert
E_1\Vert{\rm r}ho}{2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1}e^{2{\beta}eta(t-{\scriptscriptstyle\#}au)}\le 1/4e^{{\beta}eta(t-{\scriptscriptstyle\#}au)}
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ sufficiently small.
It follows that for ${\rm r}ho$ sufficiently
small there exists a solution of {{\rm r}m e}qref{intequ} in
${\beta}ar{B}_Z(0,{\rm r}ho)$. We note this solution $\psi(t,{\scriptscriptstyle\#}au)$. We easily
check that $\psi(t,{\scriptscriptstyle\#}au)$ is also solution of {{\rm r}m e}qref{abstrequ} with
initial data satisfying
{\beta}egin{equation*}
\Vert \psi(0,{\scriptscriptstyle\#}au )\Vert_X\le {\rm r}ho e^{-{\beta}eta{\scriptscriptstyle\#}au}{\rm r}ightarrow 0,{\scriptscriptstyle\#}au
{\rm r}ightarrow {\rm i}nfty.
{{\rm r}m e}nd{equation*}
We also estimate
{\beta}egin{eqnarray*}
\Vert \psi({\scriptscriptstyle\#}au)\Vert_X&{\bf g}e&\Vert \psi_0\Vert_X-M_LM_F\Vert
E_1\Vert{\rm i}nt_{-{\rm i}nfty}^{{\scriptscriptstyle\#}au}e^{{\scriptscriptstyle\#}ilde{\lambdambda}_1({\scriptscriptstyle\#}au-s)}{\rm r}ho^2e^{2{\beta}eta(s-{\scriptscriptstyle\#}au)}ds\\
&=&{\rm r}ho/3-M_LM_F\Vert
E_1\Vert{\rm r}ho^2e^{({\scriptscriptstyle\#}ilde{\lambdambda}_1-2{\beta}eta){\scriptscriptstyle\#}au}{\rm i}nt_{-{\rm i}nfty}^{{\scriptscriptstyle\#}au}e^{(2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1)s}ds\\
&{\bf g}e&{\rm r}ho/3-\frac{M_LM_F\Vert E_1\Vert{\rm r}ho^2}{2{\beta}eta-{\scriptscriptstyle\#}ilde{\lambdambda}_1}{\bf g}e{\rm r}ho/6
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ small enough. It follows that $\psi^m(t)=\psi(t,m)$ does
the job.
$\Box$
{\beta}egin{remark}
The theorem is close to \cite[Theorem VII.2.3]{DaKr} and \cite[Theorem 5.1.3]{He}.
However both theorems do not apply directly. Whereas \cite[Theorem
VII.2.3]{DaKr} is restricted to bounded operators, \cite[Theorem
5.1.3]{He} only applies if the linear part is sectorial.
{{\rm r}m e}nd{remark}
\langle s {\rm r}angleection{Application of the abstract result to the Yang-Mills
equation}
\lambdabel{Sec3}
First note that if $W(t,r)$ is solution of the Yang-Mills equation
{{\rm r}m e}qref{YMSW} (written in the $r$ variable), then $ W(2m t,
2m r)$ is solution of the
same equation with $m=1/2$ and vice versa. We can therefore suppose in
the following $m=1/2$.
We linearize around $W=W_n$ and obtain for $v=W-W_n$:
{\beta}egin{equation*}
\mathrm{d}ot{v}-v''+P(3W_n^2-1)v+Pv^2(v+3W_n)=0.
{{\rm r}m e}nd{equation*}
The linear operator
{\beta}egin{equation*}
{\mathcal A}_n=-\partialrtial_x^2+P(3W_n^2-1)
{{\rm r}m e}nd{equation*}
depends on the stationary solution which we don't
know explicitly. We put
{\beta}egin{equation*}
V_n=P(3W_n^2-1).
{{\rm r}m e}nd{equation*}
We first want to apply our abstract result on $X=H^1{\scriptscriptstyle\#}imes L^2$. It is
easy to see that the nonlinear part fulfills the hypotheses of the
abstract theorem. Indeed we have
{\beta}egin{proposition}
\lambdabel{propnonllip}
We have for $\Vert v\Vert_{H^1}\le 1,\, \Vert u\Vert_{H^1}\le 1$:
{\beta}egin{equation*}
\Vert F(v)-F(u)\Vert_{L^2}\lesssimssim(\Vert v\Vert_{H^1}+\Vert u\Vert_{H^1})\Vert u-v\Vert_{H^1}.
{{\rm r}m e}nd{equation*}
{{\rm r}m e}nd{proposition}
\partialoof
We compute
{\beta}egin{equation*}
F(v)-F(u)=P(v^2+u^2+uv+3(W_nv+W_nu))(u-v).
{{\rm r}m e}nd{equation*}
Thus
{\beta}egin{eqnarray*}
\Vert F(v)-F(u)\Vert_{L^2}&\lesssimssim&(\Vert v^2\Vert_{L^2}+\Vert
u^2\Vert_{L^2})\Vert
u-v\Vert_{L^{{\rm i}nfty}}+(\Vert v\Vert_{L^{{\rm i}nfty}}+\Vert v\Vert_{L^{{\rm i}nfty}}\Vert
u\Vert_{L^{{\rm i}nfty}}+\Vert
u\Vert_{L^{{\rm i}nfty}})\Vert u-v\Vert_{L^2}\\
&\lesssimssim&(\Vert v\Vert^2_{L^4}+\Vert u\Vert_{L^4}^2)\Vert u-v\Vert_{H^1}+(\Vert
v\Vert_{H^1}+\Vert
v\Vert_{H^1}\Vert u\Vert_{H^1}+\Vert u\Vert_{H^1})\Vert v-u\Vert_{H^1}\\
&\lesssimssim&(\Vert v\Vert^2_{H^1}+\Vert
u\Vert_{H^1}^2+\Vert v\Vert_{H^1}+\Vert u\Vert_{H^1})\Vert
u-v\Vert_{H^1}\\
&\lesssimssim& (\Vert v\Vert_{H^1}+\Vert u\Vert_{H^1})\Vert
u-v\Vert_{H^1}
{{\rm r}m e}nd{eqnarray*}
for $\vert u\Vert_{H^1}\le 1,\, \Vert v\Vert_{H^1}\le 1$. Here we have used the Gagliardo Nirenberg inequality and the Sobolev
embedding $H^1\langle h \rangleookrightarrow L^{{\rm i}nfty}$.
$\Box$
In the next subsection we will show that
{\beta}egin{equation*}
{\rm i}nt _{{\mathbb R}}V_n(x)dx<0.
{{\rm r}m e}nd{equation*}
\langle s {\rm r}angleubsection{Study of the potential $V_n$}
Going back to the $r$ variable we see that the potential $W_n$
fulfills the following equation
{\beta}egin{equation}
\lambdabel{ym}\left(1-\frac{1}{r}{\rm r}ight)\p_r^2W_n+ \frac{1}{r^2}\p_rW_n+ \frac{1}{r^2}W_n(1-W_n^2)=0
{{\rm r}m e}nd{equation}
with initial data (or boundary condition) $W_n(1)=a_n$, for $0<a_n\leq \frac{1+\langle s {\rm r}angleqrt{3}}{5+3\langle s {\rm r}angleqrt{3}}$. We also have $\lim_{r{\rm r}ightarrow {\rm i}nfty}W_n(r)=(-1)^n$. We will drop the index $n$ in the rest of this subsection.
\langle s {\rm r}angleubsubsection{A bound on $W$}
{\beta}egin{lemma}
We have $-a\leq W\leq a$ for $1\leq r\leq 3$.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
Since the initial data for $W$ are $W(1)=a$ and $W'(1)=-a(1-a^2)<0$, there exists $r_0>1$ such that for $1\leq r \leq r_0$ we have
$$-a\leq W(r)\leq a$$
Then Lemma {\rm r}ef{borne} implies that on this interval we have
$$-a \leq \p_rW(r)\leq a.$$
$W$ is initially decreasing and can not have a local minimum in the
region $W>0$ (this is a consequence of the maximum principle, see Lemma {\rm r}ef{max}). Consequently there exists $r_1>1$ such that $0\leq W\leq
a$ on $[1,r_1]$ and $W(r_1)=0$. Because of the bound of
the derivative we have $r_1{\bf g}e 2$. By the same bound we have $-a\leq W\leq a$ on $[r_1,r_1+1]$.
{{\rm r}m e}nd{proof}
Let $Q(r)=1-\frac{1}{r}-\frac{1}{2r^2}$.
{\beta}egin{proposition}\lambdabel{enc}
We have for $r{\bf g}eq 3$
$$-Q(r)\leq W(r)\leq Q(r)$$
{{\rm r}m e}nd{proposition}
Let
$$L(u,r)= \left(1-\frac{1}{r}{\rm r}ight)\p_r^2u+\frac{1}{r^2}\p_ru+\frac{1}{r^2}u(1-u^2).$$
Before proving Proposition {\rm r}ef{enc}, we need the following lemma
{\beta}egin{lemma}
For $r{\bf g}eq 3$ we have $L(Q,r)<0$ and $L(-Q,r)>0$.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
Since $L$ is odd in $u$, it is sufficient to prove $L(Q,r)<0$. We calculate
{\beta}egin{align*}L(Q,r)=&\left(1-\frac{1}{r}{\rm r}ight)\left(-\frac{2}{r^3}-\frac{3}{r^4}{\rm r}ight)+\frac{1}{r^2}\left(\frac{1}{r^2}+ \frac{1}{r^3}{\rm r}ight)
+\frac{1}{r^2}\left(1-\frac{1}{r}-\frac{1}{2r^2}{\rm r}ight)\left(1-\left(1-\frac{1}{r}-\frac{1}{2r^2}{\rm r}ight)^2{\rm r}ight)\\
=&-\frac{2}{r^4}+\frac{2}{r^5}+ \frac{3}{4r^6}+ \frac{3}{4r^7}+ \frac{1}{8r^8}.
{{\rm r}m e}nd{align*}
Consequently, for $r{\bf g}eq 3$ we have
$$L(Q,r)\leq \frac{1}{r^4}\left(-2+\frac{2}{3}+\frac{3}{4*3^2}+\frac{3}{4*3^3 }+\frac{1}{8*3^4}{\rm r}ight) \leq -\frac{1}{r^4}<0.$$
{{\rm r}m e}nd{proof}
{\beta}egin{proof}[Proof of Proposition {\rm r}ef{enc}]
We have $-a\leq W(3) \leq a$ and
$$a<\frac{11}{18}=1-\frac{1}{3}-\frac{1}{2*9}=Q(3).$$
If the inequality of Proposition {\rm r}ef{enc} is false, there exists $r_1<r_2$ with $r_2$ which can be infinite such that
$$W(r_1)=Q(r_1), \quad W(r_2)=Q(r_2)$$
and $W>Q$ on $]r_1,r_2[$ (The case $W<-Q$ is treated in a similar way). Consider $r_0$ such that $W-Q$ is maximum at
$r_0$. Note that such a maximum always exists
independently if $\lim_{r{\rm r}ightarrow {\rm i}nfty} W(r)=-1$ (in which case
$r_2<{\rm i}nfty$) or $\lim_{r{\rm r}ightarrow
{\rm i}nfty}W(r)=1=\lim_{r{\rm r}ightarrow {\rm i}nfty}Q(r)$. Then we have
$$L(W,r_0)-L(Q, r_0)=-L(Q, r_0)>0$$
so
$$\left(1-\frac{1}{r_0}{\rm r}ight) (\p_r^2W-\p_r^2Q)(r_0)+ \frac{1}{r_0^2}\left(W(1-W^2)-Q(1-Q^2){\rm r}ight)>0$$
Since
$$W(r_0)>Q(r_0){\bf g}eq Q(3)= \frac{11}{18} {\bf g}eq \frac{1}{\langle s {\rm r}angleqrt{3}}$$
and the function $x\mapsto x(1-x^2)$ is decreasing for $x{\bf g}eq \frac{1}{\langle s {\rm r}angleqrt{3}}$ we have
$$\left(W(1-W^2)-Q(1-Q^2){\rm r}ight) \leq 0$$ and consequently
$$\left(1-\frac{1}{r_0}{\rm r}ight) (\p_r^2W-\p_r^2Q)(r_0) >0$$
which is a contradiction with the fact that $W-Q$ is maximum at
$r_0$.
{{\rm r}m e}nd{proof}
\langle s {\rm r}angleubsubsection{A bound on the potential}
We now come back to the potential
$$V=P(3W^2-1)$$
{\beta}egin{proposition}
We have
$${\rm i}nt_{{\mathbb R}} V(x) dx <0.$$
{{\rm r}m e}nd{proposition}
{\beta}egin{proof}
First note that
{\beta}egin{equation*}
{\rm i}nt_{{\mathbb R}} V(x)dx={\rm i}nt_1^{{\rm i}nfty} \frac{3W^2-1}{r^2} dr.
{{\rm r}m e}nd{equation*}
We estimate
$${\rm i}nt_1^3 \frac{3W^2-1}{r^2} \leq {\rm i}nt_1^3 \frac{3a^2-1}{r^2}= \frac{2(3a^2-1)}{3}$$
and
{\beta}egin{eqnarray*}
{\rm i}nt_3^{\rm i}nfty \frac{3W^2-1}{r^2} &\leq& {\rm i}nt_3^{\rm i}nfty \frac{1}{r^2}\left( 3\left(1-\frac{1}{r}{\rm r}ight)^2-1{\rm r}ight)
\leq {\rm i}nt_3^{\rm i}nfty \frac{1}{r^2}\left(2-\frac{6}{r}+
\frac{3}{r^2}{\rm r}ight)=\left[-\frac{2}{r}+\frac{3}{r^2}-\frac{1}{r^3}
{\rm r}ight]_3^{\rm i}nfty\\
&=& \frac{1}{3}+ \frac{1}{27}= \frac{10}{27}
{{\rm r}m e}nd{eqnarray*}
Note that
$$\frac{2(3a^2-1)}{3}+\frac{10}{27}<0$$
because $a\le\frac{1+\langle s {\rm r}angleqrt{3}}{5+3\langle s {\rm r}angleqrt{3}}<
\frac{2}{3\langle s {\rm r}angleqrt{3}}$. Therefore we have
$${\rm i}nt_{{\mathbb R}} V(x) dx<0.$$
{{\rm r}m e}nd{proof}
\langle s {\rm r}angleubsection{Proof of Theorem {\rm r}ef{Mainth}}
The main theorem with $\mathcal{E}$ replaced by $X$ now follows from the
abstract result. In order to be able to replace $X$ by $\mathcal{E}$ we need
the following lemma. We will drop the index $n$.
{\beta}egin{lemma}
\lambdabel{lem2}
Let $\phi_0$ be an eigenfunction of ${\mathcal A}$ with eigenvalue $-\lambdambda^2$. Then we
have
{\beta}egin{eqnarray}
\lambdabel{lem2.1}
{\rm i}nt_{{\mathbb R}} P\vert\phi_0\vert^2{\bf g}e \lambdambda^2{\rm i}nt_{{\mathbb R}} \vert
\phi_0\vert^2.\\
\lambdabel{lem2.2}
-{\rm i}nt V\vert\phi_0\vert^2{\bf g}e 0.
{{\rm r}m e}nd{eqnarray}
{{\rm r}m e}nd{lemma}
\partialoof
Let us first show {{\rm r}m e}qref{lem2.1}. We have
{\beta}egin{equation*}
(-\partialrtial_x^2+V)\phi_0=-\lambdambda^2\phi_0.
{{\rm r}m e}nd{equation*}
Multiplication by $\phi_0$ and integration by parts gives
{\beta}egin{equation}
\lambdabel{lem2.3}
{\rm i}nt \vert \phi_0'\vert^2+{\rm i}nt V\vert \phi_0\vert^2+\lambdambda^2{\rm i}nt
\vert \phi_0\vert^2=0.
{{\rm r}m e}nd{equation}
Now recall that $V=P(3W^2-1)$, thus
{\beta}egin{equation*}
{\rm i}nt P\vert \phi_0\vert^2{\bf g}e \lambdambda^2{\rm i}nt \vert\phi_0\vert^2.
{{\rm r}m e}nd{equation*}
We now show {{\rm r}m e}qref{lem2.2}. From {{\rm r}m e}qref{lem2.3} we obtain :
{\beta}egin{equation*}
-{\rm i}nt V\vert \phi_0\vert^2={\rm i}nt \vert\phi_0'\vert^2+\lambdambda^2{\rm i}nt
\vert\phi_0\vert^2{\bf g}e 0.
{{\rm r}m e}nd{equation*}
$\Box$
Let ${\scriptscriptstyle\#}ilde{{\mathcal H}}^1$ the completion of $C_0^{{\rm i}nfty}$ for
the norm
{\beta}egin{equation*}
\Vert u\Vert_{{\scriptscriptstyle\#}ilde{{\mathcal H}}^1}^2=\Vert u\Vert_{\text{d}ot{H}^1}^2+\Vert
u\Vert_{L^2_P}^2
{{\rm r}m e}nd{equation*}
We put ${\scriptscriptstyle\#}ilde{\mathcal{E}}={\scriptscriptstyle\#}ilde{{\mathcal H}}^1{\scriptscriptstyle\#}imes L^2$.
{{\beta}f Proof of Theorem {\rm r}ef{Mainth}}
We continue using the notations of the abstract setting. We claim that it is sufficient to show the following :
{\beta}egin{equation}
{\scriptscriptstyle\#}ag{IM}\lambdabel{IM}
{\beta}egin{array}{c} \mbox{There exists ${{\rm r}m e}psilon_0>0$ and a sequence $\psi_0^m$ with $\Vert
\psi_0^m\Vert_X{\rm r}ightarrow 0,\, m{\rm r}ightarrow {\rm i}nfty$,}\\ \mbox{but for all $m$}\quad
\langle s {\rm r}angleup_{t{\bf g}e 0}\Vert \psi^m(t)\Vert_{{\scriptscriptstyle\#}ilde{\mathcal{E}}}{\bf g}e
{{\rm r}m e}psilon_0>0.{{\rm r}m e}nd{array}
{{\rm r}m e}nd{equation}
To see this we first note that
{\beta}egin{equation*}
\Vert \psi_0^m\Vert_{\mathcal{E}}\le \Vert \psi_0^m\Vert_X
{{\rm r}m e}nd{equation*}
because
{\beta}egin{equation*}
\left({\rm i}nt P\vert u\vert^4{\rm r}ight)^{1/4}\lesssimssim
\Vert u\Vert^{1/2}_{{\rm i}nfty}\Vert u\Vert_{L^2}^{1/2}\le \Vert u\Vert_{H^1}
{{\rm r}m e}nd{equation*}
by the Sobolev embedding $H^1\langle h \rangleookrightarrow L^{{\rm i}nfty}$. On the other hand
{\beta}egin{equation*}
\Vert u\Vert_{L^2_P}=\left({\rm i}nt P\vert u\vert^2{\rm r}ight)^{1/2}\le
\left({\rm i}nt P{\rm r}ight)^{1/4}\left({\rm i}nt P\vert
u\vert^4{\rm r}ight)^{1/4}\lesssimssim \Vert u\Vert_{L^4_P}
{{\rm r}m e}nd{equation*}
and thus
{\beta}egin{equation*}
\Vert \psi^m(t)\Vert_{\mathcal{E}}{\bf g}trsim \Vert \psi^m(t)\Vert_{{\scriptscriptstyle\#}ilde{\mathcal{E}}}.
{{\rm r}m e}nd{equation*}
Let us now show {{\rm r}m e}qref{IM}. We follow the proof of the main
theorem. We choose
{\beta}egin{equation*}
\psi_0=\left({\beta}egin{array}{c} \phi_0 \\ \frac{\lambdambda_0}{i}
\phi_0{{\rm r}m e}nd{array}{\rm r}ight),\, \phi_0{\rm i}n
\bbbone_{\{-\lambdambda_0^2\}}({\mathcal A}){\mathcal H},\, \Vert
\phi_0\Vert=\frac{1}{3(1+\Vert V_-\Vert_{{\rm i}nfty})^{1/2}}{\rm r}ho.
{{\rm r}m e}nd{equation*}
We estimate
{\beta}egin{eqnarray*}
\Vert
\psi_0\Vert_X^2&=&\langle(-\partialrtial_x^2+V)\phi_0,\phi_0\rangle-\langleV\phi_0,\phi_0\rangle+\Vert
\phi_0\Vert^2+\lambdambda_0^2\Vert \phi_0\Vert^2\\
&\le&(\Vert V_{-}\Vert_{{\rm i}nfty}+1)\Vert\phi_0\Vert^2=1/9{\rm r}ho^2.
{{\rm r}m e}nd{eqnarray*}
Thus the first part of the proof goes through without any changes. We
then have to estimate $\Vert \psi({\scriptscriptstyle\#}au)\Vert_{{\scriptscriptstyle\#}ilde{\mathcal{E}}}$. We estimate
{\beta}egin{eqnarray*}
\Vert
\psi_0\Vert^2_{{\scriptscriptstyle\#}ilde{\mathcal{E}}}&=&\langle{\mathcal A}\phi_0,\phi_0\rangle-\langleV\phi_0,\phi_0\rangle+{\rm i}nt
P\vert\phi_0\vert^2+\lambdambda_0^2\vert \phi_0\vert^2\\
&=&-\langleV\phi_0,\phi_0\rangle+{\rm i}nt P\vert\phi_0\vert^2\\
&{\bf g}e&{\rm i}nt P\vert \phi_0\vert^2{\bf g}e \lambdambda_0^2{\rm i}nt \vert\phi_0\vert^2\\
&=&\lambdambda_0^2\frac{1}{9(1+\Vert V_-\Vert_{{\rm i}nfty})}{\rm r}ho^2.
{{\rm r}m e}nd{eqnarray*}
Here we have used Lemma {\rm r}ef{lem2}. Using
{\beta}egin{equation*}
\Vert u\Vert_{{\scriptscriptstyle\#}ilde{
\mathcal{E}}}\le C_1\Vert u\Vert_X
{{\rm r}m e}nd{equation*}
we find
{\beta}egin{eqnarray*}
\Vert \psi({\scriptscriptstyle\#}au)\Vert_{{\scriptscriptstyle\#}ilde{\mathcal{E}}}{\bf g}e \frac{\lambdambda_0}{3(1+\Vert
V_-\Vert_{{\rm i}nfty})^{1/2}}{\rm r}ho-\frac{2C_1M_LM_F\Vert
E_1\Vert}{2{\beta}eta-\lambdambda_1}{\rm r}ho^2{\bf g}e \frac{\lambdambda_0}{6(1+\Vert V_-\Vert_{{\rm i}nfty})}{\rm r}ho
{{\rm r}m e}nd{eqnarray*}
for ${\rm r}ho$ small enough.
$\Box$
\langle s {\rm r}angleubsection{Proof of Corollary {\rm r}ef{corstat}}
We recall
$$E^{(\p_t)}(F(t))=
\mathcal{E}(W,\text{d}ot{W}).$$
We take the same sequence of data $W_{0,n}^m$ as in Theorem
{\rm r}ef{Mainth}. We first have to show that
{\beta}egin{equation*}
{\rm i}nt P ((W_{0,n}^m)^2-W_n^2)^2{\rm r}ightarrow 0,\quad m{\rm r}ightarrow {\rm i}nfty.
{{\rm r}m e}nd{equation*}
This follows from
{\beta}egin{eqnarray*}
{\rm i}nt P ((W_{0,n}^m)^2-W_n^2)^2&\lesssimssim&{\rm i}nt P (W^m_{0,n}-W_n)^4+{\rm i}nt
PW_n^2(W^m_{0,n}-W_n)^2\\
&\lesssimssim&{\rm i}nt P (W^m_{0,n}-W_n)^4+\left({\rm i}nt P (W^m_{0,n}-W_n)^4{\rm r}ight)^{1/2}{\rm r}ightarrow 0,\quad m{\rm r}ightarrow {\rm i}nfty
{{\rm r}m e}nd{eqnarray*}
by Theorem {\rm r}ef{Mainth}. In the first inequality we have used the estimate
$$(A^2-B^2)^2= (A-B)^2(A+B)^2= (A-B)^2(A-B+2B)^2 \leq 2(A-B)^4+ 8B^2(A-B)^2,$$
and the fact that $\Vert W_n \Vert_{L^{\rm i}nfty}\leq 1$.
Now we have to show that
{\beta}egin{equation}
\lambdabel{eqcor1}
\langle s {\rm r}angleup_{t{\bf g}e 0} {\rm i}nt (\text{d}ot{W}^m_n)^2+((W^m_n)'-W_n')^2+P
((W^m_n)^2-W_n^2)^2{\bf g}e {{\rm r}m e}psilon_1>0.
{{\rm r}m e}nd{equation}
We know by Theorem {\rm r}ef{Mainth} that
{\beta}egin{equation}
\lambdabel{eqcor2}
\langle s {\rm r}angleup_{t{\bf g}e 0}{\rm i}nt (\text{d}ot{W}^m_n)^2+((W^m_n)'-W_n')^2+P
(W^m_n-W_n)^4{\bf g}e {{\rm r}m e}psilon_0>0.
{{\rm r}m e}nd{equation}
We also know from the proof of Theorem {\rm r}ef{Mainth} that this supremum is achieved on the interval
$[0,m]$ and that on this interval
{\beta}egin{equation*}
\Vert W^m_n-W_n\Vert_{L^2}\le {\rm r}ho
{{\rm r}m e}nd{equation*}
for some ${\rm r}ho>0$. Now observe that for $u{\rm i}n H^1$ we have
{\beta}egin{equation}
\lambdabel{eqcor3}
{\rm i}nt Pu^2\le 2\left({\rm i}nt \frac{1}{r^2}
u^2{\rm r}ight)^{1/2}\left({\rm i}nt (u')^2{\rm r}ight)^{1/2}.
{{\rm r}m e}nd{equation}
Indeed by density we can suppose $u{\rm i}n C_0^{{\rm i}nfty}({\mathbb R})$ and then
compute
{\beta}egin{eqnarray*}
{\rm i}nt Pu^2={\rm i}nt \p_x(-\frac{1}{r})u^2=2{\rm i}nt \frac{1}{r}uu'\le 2 \left({\rm i}nt \frac{1}{r^2}
u^2{\rm r}ight)^{1/2}\left({\rm i}nt (u')^2{\rm r}ight)^{1/2}.
{{\rm r}m e}nd{eqnarray*}
Let us now show {{\rm r}m e}qref{eqcor1}. We can suppose that
{\beta}egin{equation*}
\langle s {\rm r}angleup_{t{\bf g}e 0} {\rm i}nt ((W_n^m)'-W_n')^2\le \frac{{{\rm r}m e}psilon^2_0}{4^4{\rm r}ho^2}
{{\rm r}m e}nd{equation*}
because otherwise there is nothing to show. Then we estimate
{\beta}egin{eqnarray*}
\lefteqn{{\rm i}nt \text{d}ot{W_n^m}^2+((W_n^m)'-W'_n)^2+P((W_n^m)^2-W_n^2)^2}\\
&{\bf g}e&{\rm i}nt \text{d}ot{W_n^m}^2+((W_n^m)'-W'_n)^2+\frac{1}{2}P(W_n^m-W_n)^4-4PW_n^2(W_n^m-W_n)^2\\
&{\bf g}e&{\rm i}nt\frac{1}{2}(\text{d}ot{W_n^m}^2+((W_n^m)'-W'_n)^2+P(W_n^m-W_n)^4)\\
&-&4\left({\rm i}nt((W_n^m)'-W_n')^2{\rm r}ight)^{1/2}\left({\rm i}nt\frac{(W_n^m-W_n)^2}{r^2}{\rm r}ight)^{1/2}\\
&{\bf g}e&\frac{1}{2}{\rm i}nt\text{d}ot{W_n^m}^2+((W_n^m)'-W'_n)^2+P(W_n^m-W_n)^4-{{\rm r}m e}psilon_0/4,
{{\rm r}m e}nd{eqnarray*}
where in the first inequality we have used the estimate
{\beta}egin{align*}(A^2-B^2)^2&= (A-B)^2(A-B+2B)^2 = (A-B)^2\left((A-B)^2 +4B(A-B) +4B^2{\rm r}ight) \\
&{\bf g}eq (A-B)^2((A-B)^2-\frac{1}{2}(A-B)^2-8B^2+4B^2)=
\frac{1}{2}(A-B)^4-4B^2(A-B)^2,
{{\rm r}m e}nd{align*}
and in the second inequality we have used {{\rm r}m e}qref{eqcor3} and the fact that $\Vert W_n \Vert_{L^{\rm i}nfty}\leq 1$.
The supremum over $t{\bf g}e 0$ of this expression is ${\bf g}e {{\rm r}m e}psilon_0/4$ by
{{\rm r}m e}qref{eqcor2}.
$\Box$
{\alpha}ppendix
\langle s {\rm r}angleection{Proof of Theorem {\rm r}ef{thstat}}
In this appendix we give an explicit proof of theorem {\rm r}ef{thstat}. We
adapt in the simpler uncoupled case the arguments of Smoller Wasserman
Yau, McLeod \cite{SWYM}; Smoller, Wasserman, Yau \cite{SWY} and
Smoller, Wasserman \cite{SW} to show the existence of infinitely many
solutions. In this appendix we work with the $r$ variable and
\underline{we note $'=\p_r$} in this appendix ! Again we
can suppose that $m=1/2$. Recall that the
stationary equation writes
{\beta}egin{equation}
\lambdabel{eqA.1}
\left(1-\frac{1}{r}{\rm r}ight)W''+\frac{1}{r^2}W'+\frac{1}{r^2}W(1-W^2)=0.
{{\rm r}m e}nd{equation}
\langle s {\rm r}angleubsection{Local solutions}
{\beta}egin{proposition}
Let $0<{\alpha}lpha<1$ and $0\leq a \leq 1$. There exists $r_a>1$ and a unique solution $W {\rm i}n C^{2,{\alpha}lpha}([1,r_a])$ with boundary condition
$$W(1)=a, \quad W'(1)= b , \quad W''(1)=c$$
where
$$b= -a(1-a^2), \quad 2c=-b(1-3a^2)$$
{{\rm r}m e}nd{proposition}
{\beta}egin{proof}
We set $z= W'$ to write the equation as a first order system. We consider
$$X= \{(w,z){\rm i}n C^{(2,{\alpha}lpha) }([1,1+{{\rm r}m e}p]){\scriptscriptstyle\#}imes C^{(1,{\alpha}lpha) }([1,1+{{\rm r}m e}p]), w(1)=a,w'(1)=z(1)=b,w''(1)=z'(1)=c\}$$
and the map $T:(w,z) {\rm i}n X \mapsto (\wht w,\wht z)$ with
{\beta}egin{align*}
\wht w &= a+ {\rm i}nt_1^r z,\\
\wht z &= b- {\rm i}nt_1^r \frac{1}{{\rm r}ho({\rm r}ho-1)}(z+w(1-w^2)).
{{\rm r}m e}nd{align*}
We first show that $T$ preserves the boundary conditions.
We calculate
{\beta}egin{align*}\wht z'=& -\frac{1}{r(r-1)}(z+w(1-w^2))\\
=&-\frac{1}{r(r-1)}\left(z
+a(1-a^2) + {\rm i}nt_1^r w'(1-3w^2)
{\rm r}ight)\\
=&-\frac{1}{r(r-1)}(z-b) -\frac{1}{r(r-1)}{\rm i}nt_1^r w'(1-3w^2)
{{\rm r}m e}nd{align*}
so $\wht z'(r){\rm r}ightarrow -z'(1) -w'(1)(1-3w^2(1))=-c+2c=c$ when $r{\rm r}ightarrow 1$.
We now show that $T$ is a contraction in $B_X(0,A)$ for ${{\rm r}m e}p$ small enough. For this the only difficulty is to estimate
{\beta}egin{align*}
\frac{|\wht z'(r)-\wht z'(1)|}{|r-1|^{\alpha}lpha}
\le & \frac{1}{|r-1|^{\alpha}lpha}\left|-\frac{1}{r(r-1)}(z+w(1-w^2))-c{\rm r}ight|\\
\le&\frac{1}{|r-1|^{\alpha}lpha}\Big|-\frac{1}{r(r-1)}\Big(
b+{\rm i}nt_1^r (z'({\rm r}ho)-z'(1))d{\rm r}ho+c(r-1)\\
&+a(1-a^2)+{\rm i}nt_1^r
(w(1-w^2))'({\rm r}ho)-(w(1-w^2))'(1) d{\rm r}ho+b(1-3a^2)(r-1)\Big)-c\Big|
\\
\le &\frac{1}{|r-1|^{\alpha}lpha}\left|-\frac{1}{r(r-1)}
{\rm i}nt_1^r (z'({\rm r}ho)-z'(1))d{\rm r}ho{\rm r}ight|\\
+& \frac{1}{|r-1|^{\alpha}lpha}\left| -\frac{1}{r(r-1)} \left(-(r-1)c+ {\rm i}nt_1^r
(w(1-w^2))'({\rm r}ho)-(w(1-w^2))'(1) d{\rm r}ho{\rm r}ight)-c
{\rm r}ight|\\
\le & \frac{1}{r(r-1)^{1+{\alpha}lpha}}{\rm i}nt_1^r |z'({\rm r}ho)-z'(1)|
+c\frac{1}{(r-1)^{{\alpha}lpha}}\left(1-\frac{1}{r}{\rm r}ight)\\
+&\frac{1}{(r-1)^{1+{\alpha}lpha}}\|(w(1-w^2))'\|_{C^{1}} (r-1)^2\\
\le & \frac{1}{r(r-1)^{1+{\alpha}lpha}}\|z'\|_{C^{0,{\alpha}lpha}}{\rm i}nt_1^r |{\rm r}ho-1|^{\alpha}lpha + c{{\rm r}m e}p^{1-{\alpha}lpha}+ C(\|w\|_{C^2}) {{\rm r}m e}p^{1-{\alpha}lpha}\\
\le &\frac{1}{1+ {\alpha}lpha}\|z'\|_{C^{0,{\alpha}lpha}}
+c{{\rm r}m e}p^{1-{\alpha}lpha}+ C (\|w\|_{C^{2}}){{\rm r}m e}p^{1-{\alpha}lpha}.
{{\rm r}m e}nd{align*}
Consequently we can show that for ${{\rm r}m e}p$ small enough, $T$ is a contraction, with contracting constant $\frac{1}{1+{\alpha}lpha}+C(A){{\rm r}m e}p^{1-{\alpha}lpha}$, and consequently it has a unique fixed point.
{{\rm r}m e}nd{proof}
As a corollary of the proof of local existence we obtain the continuity of the family of solutions $W_a$ with respect to the initial data $a$.
{\beta}egin{corollary}\lambdabel{cont}
Let $\text{d}eltalta>0$. If $W_a$ is a solution on $[1,R]$ with $-1\leq W_a\leq1$ and $a'$ is sufficiently close to $a$, then $W_{a'}$ is defined on $[1,R]$ we have
$$\|W_a-W_{a'}\|_{C^{2,{\alpha}lpha}([1,R])} \leq \text{d}eltalta.$$
{{\rm r}m e}nd{corollary}
\langle s {\rm r}angleubsection{Basic facts}
{\beta}egin{lemma}\lambdabel{borne}
Let $0<B\leq 1$. As long as $W$ is a $C^2$ solution with $|W|\leq B$ we have $|W'|\leq B$.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
Assume that in $[1,r_0]$ we have $|W|\leq B$.
Then
$$-\frac{B}{r^2}\leq (1-\frac{1}{r})W''+ \frac{1}{r^2}W'\leq \frac{B}{r^2}$$
and consequently
$$\left[\frac{B}{r}{\rm r}ight]_1^r \leq \left[(1-\frac{1}{r})W'{\rm r}ight]_1^r \leq \left[-\frac{B}{r}{\rm r}ight]_1^r$$
so
$$-B \leq W'(r)\leq B.$$
{{\rm r}m e}nd{proof}
{\beta}egin{corollary}
The solution $W$ exists and is $C^{2,{\alpha}lpha}$ as long as $|W|\leq 1$.
{{\rm r}m e}nd{corollary}
We now consider the solution $W$ on $[0,r_a[$ where $r_a$ is the smallest $r$ such that $|W|=1$ if it exists, and $r_a= {\rm i}nfty$ otherwise.
{\beta}egin{lemma}\lambdabel{max}
The solution $W$ cannot have a local minimum with $W>0$ nor a local maximum with $W<0$.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
If $W$ has a positive local minimum at $r_0$ then
$$\left(1-\frac{1}{r_0}{\rm r}ight)W''(r_0)+\frac{1}{r^2}W(r_0)(1-W^2(r_0))=0$$
but $\frac{1}{r^2}W(r_0)(1-W^2(r_0))>0$ (the local minimum cannot be $1$), and $W''(r_0){\bf g}eq 0$, which is a contradiction.
{{\rm r}m e}nd{proof}
{\beta}egin{lemma}\lambdabel{lim}
The solution $W$ can not have a limit $l \neq -1,0,1$.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
Assume that $W{\rm r}ightarrow l$ with $0<l<1$. We can write for $r_1$ big enough and $r_n {\bf g}eq r_1$
$$\left[\frac{l(1-l^2)+{{\rm r}m e}p}{r}{\rm r}ight]_{r_1}^{r_n} \leq \left[(1-\frac{1}{r})W'{\rm r}ight]_{r_1}^{r_n} \leq \left[\frac{l(1-l^2)-{{\rm r}m e}p}{r}{\rm r}ight]_{r_1}^{r_n}.$$
Since $W {\rm r}ightarrow l$ there exists a sequence $r_n {\rm r}ightarrow {\rm i}nfty$ such that $W'(r_n) {\rm r}ightarrow 0$. Letting $n{\rm r}ightarrow {\rm i}nfty$ we obtain
$$ \frac{l(1-l^2)-{{\rm r}m e}p}{r_1} \leq W'(r_1)\left(1-\frac{1}{r_1}{\rm r}ight) \leq \frac{l(1-l^2)+{{\rm r}m e}p}{r_1}$$
so
$$ \frac{l(1-l^2)-{{\rm r}m e}p}{r_1-1} \leq W'(r_1)\leq \frac{l(1-l^2)+{{\rm r}m e}p}{r_1-1}$$
and there exists a constant $C$ such that for $r$ big enough
$$(l(1-l^2)-{{\rm r}m e}p)\ln(r-1) \leq W(r)-C$$
which is a contradiction.
{{\rm r}m e}nd{proof}
\langle s {\rm r}angleubsection{More technical facts}
{\beta}egin{proposition}\lambdabel{ham}
Let $0\leq a \leq 1$. There exists ${{\rm r}m e}p>0$ and $R>0$ such that if
there exists $R<r_0<r_a$ such that $W$ has a local extremum at $r_0$ with $1-{{\rm r}m e}p \leq |W(r_0)|<1$ then $r_a<{\rm i}nfty$ and $W$ has one and only one zero in $[r_0,r_a]$.
{{\rm r}m e}nd{proposition}
{\beta}egin{proof}
We consider the case $W(r_0)>0$. The other case can be treated similarly.
We consider
$$H= r^2\frac{(W')^2}{2} + \frac{W^2}{2}-\frac{W^4}{4}.$$
We calculate
{\beta}egin{align*}H'(r)=&r (W')^2 +r^2\frac{1}{r(1-r)}(W'+W(1-W^2))W' + WW'-W^3W'\\
= &(W')^2\left(r +\frac{r^2}{r(1-r)}{\rm r}ight)+ WW'(1-W^2)\left(1+ \frac{r^2}{r(1-r)}{\rm r}ight)\\
=&(W')^2\left(r +\frac{r^2}{r(1-r)}{\rm r}ight)+ WW'(1-W^2)\frac{1}{1-r}.
{{\rm r}m e}nd{align*}
Let $R$ be such that for $r{\bf g}eq R$ we have
$$\left(r +\frac{r^2}{r(1-r)}{\rm r}ight)>0$$
then for $r{\bf g}eq R$ and $WW'\leq 0$ we have $H'(r)>0$. With our assumption on $r_0$ we can estimate
$$H(r_0) {\bf g}eq \frac{(1-{{\rm r}m e}p)^2}{2}-\frac{1}{4}{\bf g}eq \frac{1}{4\text{d}eltalta}$$
for a suitable $\text{d}eltalta$ which will be precised later, and ${{\rm r}m e}p$ small enough.
Since $W$ has a local maximum at $r_0$, there are two possibilities
{\beta}egin{itemize}
{\rm i}tem We have $r_a= {\rm i}nfty$, $W$ is decreasing on $[r_0,+ {\rm i}nfty[$ and $W{\rm r}ightarrow 0$ at ${\rm i}nfty$.
{\rm i}tem There exists $r_1<r_a$ such that $W(r_1)= 0$ and W is decreasing on $[r_0,r_1]$.
{{\rm r}m e}nd{itemize}
In the first case we obtain for all $r{\bf g}eq r_a$, $H'(r)>0$ so
$$H(r) {\bf g}eq \frac{1}{4\text{d}eltalta}$$
and since $W{\rm r}ightarrow 0$ the expression of $H$ yields the existence of $r_2$ such that for $r{\bf g}eq r_2$
$$W'(r)^2 {\bf g}eq \frac{1}{(2\text{d}eltalta+1)r^2}$$
so
$W'(r)\leq -\frac{1}{\langle s {\rm r}angleqrt{2\text{d}eltalta+1}r}$
and $W(r) \leq W(r_2)-\frac{1}{\langle s {\rm r}angleqrt{2\text{d}eltalta+1}}\ln(r)$ which is a contradiction.
Consequently we are in the second case.
We have $H(r_1){\bf g}eq H(r_0)$ so we can estimate $W'(r_1)$
$$W'(r_1)\leq -\frac{1}{\langle s {\rm r}angleqrt{2\text{d}eltalta}r_1}$$
Moreover, when $-1\leq W\leq 1$ we have
$W(1-W^2)\leq \frac{2}{3\langle s {\rm r}angleqrt{3}}$ and consequently we can write for $r_a>r_2>r_1$
$$\left[W'(r)\left(1-\frac{1}{r}{\rm r}ight){\rm r}ight]_{r_1}^{r_2}
\leq \left[-\frac{2}{3\langle s {\rm r}angleqrt{3}r}{\rm r}ight]_{r_1}^{r_2}$$
and consequently
$$\left(1-\frac{1}{r_2}{\rm r}ight)W'(r_2)\leq
-\left(1-\frac{1}{r_1}{\rm r}ight)\frac{1}{\langle s {\rm r}angleqrt{2\text{d}eltalta}r_1}
+\frac{2}{3\langle s {\rm r}angleqrt{3}r_1}-\frac{2}{3\langle s {\rm r}angleqrt{3}r_2}.$$
For $r_1$ big enough (which is possible by choosing $R$ big enough) and $\text{d}eltalta$ close enough to $1$ (which is possible by choosing ${{\rm r}m e}p$ small enough) we have
$$-\left(1-\frac{1}{r_1}{\rm r}ight)\frac{1}{\langle s {\rm r}angleqrt{2\text{d}eltalta}r_1}
+\frac{2}{3\langle s {\rm r}angleqrt{3}r_1}\leq 0,$$
since $\frac{2}{3\langle s {\rm r}angleqrt{3}}<\frac{1}{\langle s {\rm r}angleqrt{2}}$.
and consequently
$$W'(r_2)\leq -\frac{2}{3\langle s {\rm r}angleqrt{3}(r_2-1)},$$
and therefore $r_a<{\rm i}nfty$, $W(r_a)=-1$ and $W'$ is decreasing on $[r_1,r_a]$. This concludes the proof of Proposition {\rm r}ef{ham}
{{\rm r}m e}nd{proof}
{\beta}egin{proposition}\lambdabel{zero}
Let $N>0$. Then for $a$ small enough, the solution $W$ has more than $N$ zeros on $[1,r_a]$
{{\rm r}m e}nd{proposition}
{\beta}egin{proof}
To count the number of zeros of a solution $W$ we can introduce the function ${\scriptscriptstyle\#}hetaeta$ which is the continuous function such that
$${\scriptscriptstyle\#}an({\scriptscriptstyle\#}hetaeta)= \frac{W'}{W}$$
and $ -\frac{\pi}{2}<{\scriptscriptstyle\#}hetaeta(1)<\frac{\pi}{2}$. Then $W$ has $N$ zero between $1$ and $r_0$ if and only if
$$ -\frac{\pi}{2}-N\pi<{\scriptscriptstyle\#}hetaeta(r_0)<\frac{\pi}{2}-N\pi.$$
It is totally similar to count the number of zero thanks to the function $\psi$ defined by
$${\scriptscriptstyle\#}an(\psi)= \frac{rW'}{W}.$$
We estimate $\psi'$
{\beta}egin{align*}\psi'(r)=&\frac{1}{1+\left(\frac{rW'}{W}{\rm r}ight)^2}\frac{W(W'+rW'')-r(W')^2}{W^2}\\
=&\frac{WW'-W\frac{1}{r-1}(W'+W(1-W^2)) -(rW')^2}{W^2+(rW')^2}\\
=&\frac{WW'\frac{r-2}{r-1}-\frac{1}{r-1}W^2(1-W^2) -(rW')^2}{W^2+(rW')^2}.
{{\rm r}m e}nd{align*}
We first estimate
$$\frac{WW'\frac{r-2}{r-1}}{W^2+(rW')^2}\leq \frac{1}{r}\frac{|rWW'|}{W^2+(rW')^2}\leq \frac{1}{2r}.$$
We assume that $|W|\leq \text{d}eltalta$.
To estimate the other terms we consider three cases
{\beta}egin{itemize}
{\rm i}tem $2|W|^2\leq |rW'|^2$. Then we have
$$\psi'(r) \leq \frac{1}{2r} -\frac{r(W')^2}{W^2+ (rW')^2}
\leq \frac{1}{2r}-\frac{2}{3r}= -\frac{1}{6r}.$$
{\rm i}tem $2|rW'|^2\leq |W|^2$. Then we have
$$\psi'(r) \leq \frac{1}{2r} -\frac{W^2(1-W^2)}{(r-1)(W^2+(rW')^2)}
\leq \frac{1}{2r}-\frac{2(1-\text{d}eltalta^2)}{3(r-1)}
\leq \frac{3-4(1-\text{d}eltalta^2)}{6r}.$$
{\rm i}tem $\frac{1}{2}|W|^2\leq|rW'|^2\leq 2|W|^2$. Then we have
$$\psi'(r) \leq \frac{1}{2r} -\frac{r(W')^2}{W^2+ (rW')^2}
-\frac{W^2(1-W^2)}{(r-1)(W^2+(rW')^2)}
\leq \frac{1}{2r}-\frac{1}{3r}
-\frac{(1-\text{d}eltalta^2)}{3r}\leq \frac{3-4+2\text{d}eltalta^2}{6r}.$$
{{\rm r}m e}nd{itemize}
If we take $\text{d}eltalta$ small enough we then have
$$\psi'(r)\leq -\frac{1}{12r}.$$
Let now $R$ be such that $-\frac{1}{12}\ln(R)\leq -N\pi$.
Thanks to Corollary {\rm r}ef{cont}, we can find $a_0$ small enough such that for $0\leq a \leq a_0$ the solution exists on $[1,R]
$ and satisfies $|W|\leq \text{d}eltalta$ on this interval. Then $\psi(R)-\psi(1)\leq -N\pi$ so $W$ has at least $N$ zero on $[1,R]$. This concludes the proof of Proposition {\rm r}ef{zero}.
{{\rm r}m e}nd{proof}
{\beta}egin{corollary}\lambdabel{limbis}
Let $W$ be a solution with $r_a= {\rm i}nfty$ and a finite number of zeros. Then $W {\rm r}ightarrow \pm 1$.
{{\rm r}m e}nd{corollary}
{\beta}egin{proof}
Because of Lemma {\rm r}ef{max} a solution with a finite number of zeros has a finite limit. Because of Lemma {\rm r}ef{lim} this limit must be $0$ or $\pm 1$. If it was $0$ we could find $R_0$ such that $|W|\leq \text{d}eltalta$ for $r{\bf g}eq R_0$, with $\text{d}eltalta$ defined in the proof of Proposition {\rm r}ef{zero}. then for $r{\bf g}eq R_0$ we have
$$\psi'(r)\leq -\frac{1}{12r},$$
consequently $\psi$ is unbounded from above, so $W$ has an infinite number of zeros.
{{\rm r}m e}nd{proof}
\langle s {\rm r}angleubsection{Proof of the Theorem}
{\beta}egin{lemma}
Let $X_n$ be the set of initial data $a$ such that the corresponding solution has $n$ zeros and satisfies $r_a<{\rm i}nfty$. Then $X_n$ is open and if ${\alpha}lpha$ is a limit point of $X_n$ the corresponding solution satisfies $r_{\alpha}lpha= {\rm i}nfty$ has $m$ zeros, with $m=n$ or $m=n-1$ and tends to $(-1)^m$ at infinity.
{{\rm r}m e}nd{lemma}
{\beta}egin{proof}
The fact that $X_n$ is open is a direct consequence of Corollary {\rm r}ef{cont}. Let ${\alpha}lpha$ be a limit point and let $a_i {\rm i}n X_n$ be such that $a_i {\rm r}ightarrow {\alpha}lpha$.
Assume first that $W_{\alpha}lpha$ is such that $r_a<{\rm i}nfty$. Then we can compare the solution on the fixed interval $[1,r_{\alpha}lpha]$ so Corollary {\rm r}ef{cont} implies that $W_{\alpha}lpha$ has exactly $n$ zeros, so ${\alpha}lpha {\rm i}n X_n$ which is a contradiction. Consequently $r_a= {\rm i}nfty$. This also implies that the sequence of $r_{a_i}$ is not bounded.
Assume now that there exists $R$ such that $W_{\alpha}lpha$ has strictly more than $n$ zeros before $R$. Once again Corollary {\rm r}ef{cont} yields a contradiction.
Assume now that $W_{{\alpha}lpha}$ has $m$ zeros with $m<n$. Then thanks to Corollary {\rm r}ef{limbis} $W_{{\alpha}lpha}$ tend to $(-1)^m$ and Proposition {\rm r}ef{ham} implies that the $W_{a_i}$ have $m$ or $m+1$ zeros.
{{\rm r}m e}nd{proof}
{\beta}egin{proof}[Proof of Theorem {\rm r}ef{thstat}]
Let $\wht X_n$ be the set of initial data $a$ such that the corresponding solution has less than $n$ zeros.
Let ${\alpha}lpha = \min(\wht X_n)$. Proposition {\rm r}ef{zero} implies that ${\alpha}lpha>0$.
There are two case
{\beta}egin{itemize}
{\rm i}tem If ${\alpha}lpha {\rm i}n \wht X_n$, then $W_{{\alpha}lpha}$ is a solution with $m\leq n$ zeros with $r_{{\alpha}lpha}= {\rm i}nfty$. Then Corollary {\rm r}ef{cont} and Proposition {\rm r}ef{ham} imply that for $a$ close to ${\alpha}lpha$ either the solutions have $m$ zeros, either have $m+1$ zeros and $r_a<{\rm i}nfty$. Consequently we have $m=n$ and considering a sequence $a_i<{\alpha}lpha$ converging to ${\alpha}lpha$ we have shown that $X_{n+1}$ is non empty.
{\rm i}tem If ${\alpha}lpha \notin \wht X_n$ then $W_{{\alpha}lpha}$ must be a solution with $r_{{\alpha}lpha}= {\rm i}nfty$ and $k>n$ zeros. But we have shown in the previous point that in a neighborhood of such a solution we can only have solutions with $k$ or $k+1$ zeros, so this case can not occur.
{{\rm r}m e}nd{itemize}
We start the iteration with the function
$$W_1 = \frac{c-r}{r+3(c-1)}, \quad c=\frac{3+\langle s {\rm r}angleqrt{3}}{2},$$
which is a special solution of {{\rm r}m e}qref{ym}, with only one $0$ (see
\cite{BCC}). Note also that
$$W_1(1)= \frac{1+\langle s {\rm r}angleqrt{3}}{5+3\langle s {\rm r}angleqrt{3}}=a_1.$$ We then obtain at least one solution $-1\leq W_{a_n} \leq 1$ for each number of zeros $n$. This concludes the proof of Theorem {\rm r}ef{thstat}.
{{\rm r}m e}nd{proof}
{\beta}egin{thebibliography}{99}
{\beta}ibitem{cbh}
P. Bizo\'n, {{\rm r}m e}mph{Colored black holes}, Phys. Rev. Lett. 64 (1990),
no. 24, 2844-2847.
{\beta}ibitem{BCC} H. Boutaleb-Joutei; A. Chakrabarti; A. Comtet, {{\rm r}m e}mph{Gauge field configurations in curved spacetimes}. I. Phys. Rev. D (3) 20 (1979), no. 8, 1884-1897.
{\beta}ibitem{BRZ}
P. Bizo\'n, A. Rostworowski, A. Zenginoglu, {{\rm r}m e}mph{Saddle-point
dynamics of a Yang-Mills field on the exterior Schwarzschild
spacetime}, Classical Quantum Gravity {{\beta}f 27} (2010), no. 17, 175003, 11 pp.
{\beta}ibitem{CS}
P. Chru\'sciel, J. Shatah, {{\rm r}m e}mph{Global existence of solutions of the Yang-Mills equations on globally hyperbolic four-dimensional Lorentzian manifolds.}
Asian J. Math. 1 (1997), no. 3, 530-548.
{\beta}ibitem{DaKr}
Ju. L. Daleckii, M. G. Krein, {{\rm r}m e}mph{Stability of Solutions of Differential Equations in Banach Spaces}, Translations
of Mathematical Monographs 43, AMS, Providence, 1974.
{\beta}ibitem{EM1}
D. Eardley, V. Moncrief, {{\rm r}m e}mph{The global existence of
Yang-Mills-Higgs fields in 4-dimensional Minkowski space. I. Local
existence and smoothness properties}, Comm. Math. Phys. 83 (1982), no. 2, 171-191.
{\beta}ibitem{EM2}
D. Eardley, V. Moncrief, {{\rm r}m e}mph{The global existence of Yang-Mills-Higgs fields in 4-dimensional Minkowski space. II. Completion of proof},
Comm. Math. Phys. 83 (1982), no. 2, 193-212.
{\beta}ibitem{FM}
P. Forgacs, N. S. Manton, {{\rm r}m e}mph{Space-Time Symmetries In Gauge Theories}, Comm. Math. Phys. {{\beta}f 72}, 15 (1980).
{\beta}ibitem{G1}
S. Ghanem, {{\rm r}m e}mph{The global non-blow up of Yang-Mills curvature on curved
space-times}, to appear in Journal of Hyperbolic Differential Equations, arXiv:1312.5476.
{\beta}ibitem{GH}
S. Ghanem, D. H\"afner, {{\rm r}m e}mph{The decay of the SU(2) Yang-Mills fields
on the Schwarzschild black hole for spherically symmetric small
energy initial data }, arXiv:1604.04477.
{\beta}ibitem{GuHu}
C. Gu, H. Hu, {{\rm r}m e}mph{On the spherically symmetric gauge fields}, Comm. Math. Phys. {{\beta}f 79} (1981), 75-90 .
{\beta}ibitem{HE}
S. W. Hawking \& G. F. R. Ellis,
{{\rm r}m e}mph{The Large Scale Structure of Space-time}, Cambridge: Cambridge University Press, 1973.
{\beta}ibitem{He}
D. Henry, {{\rm r}m e}mph{Geometric Theory of Semilinear Parabolic Equations},
Lecture Notes in Mathematics, Springer 1981.
{\beta}ibitem{SW}
J. Smoller, A. Wasserman, {{\rm r}m e}mph{Existence of infinitely-many smooth,
static, global solutions of the Einstein/Yang-Mills equations},
Comm. Math. Phys. 151 (1993), 303-325.
{\beta}ibitem{SWY}
J. Smoller, A. Wasserman, S.-T. Yau, {{\rm r}m e}mph{Existence of black hole
solutions for the Einstein-Yang/Mills equations},
Comm. Math. Phys. 154 (1993), 377-401.
{\beta}ibitem{SWYM}
J. Smoller, A. Wasserman, S.-T. Yau, J. B. McLeod, {{\rm r}m e}mph{Smooth static solutions of the Einstein/Yang-Mills equations.} Comm. Math. Phys. 143 (1991), no. 1, 115-147.
{\beta}ibitem{Pa}
A. Pazy, {{\rm r}m e}mph{Semigroups of linear operators and applications to
Partial Differential Equations}, Springer 1983.
{\beta}ibitem{W}
E. Witten, {{\rm r}m e}mph{Some Exact Multipseudoparticle Solutions of Classical
Yang-Mills Theory}, Phys. Rev. Lett. {{\beta}f 38} (1977), 121-124.
{{\rm r}m e}nd{thebibliography}
{{\rm r}m e}nd{document} |
\begin{document}
\date{}
\title{space{-0.1cm}
\begin{abstract}
A graph $G$ on $n$ vertices is {\em Hamiltonian} if it contains a cycle of length $n$
and {\em pancyclic} if it contains cycles of length $\ell$ for all $3 \le \ell \le n$.
Write $\alpha(G)$ for the {\em independence number} of $G$, i.e.\ the size of the largest
subset of the vertex set that does not contain an edge, and $\kappa(G)$ for the (vertex)
{\em connectivity}, i.e.\ the size of the smallest subset of the vertex set that can be deleted
to obtain a disconnected graph. A celebrated theorem of Chv\'atal and Erd\H{o}s says that
$G$ is Hamiltonian if $\kappa(G) \ge \alpha(G)$. Moreover, Bondy suggested that almost any non-trivial
conditions for Hamiltonicity of a graph should also imply pancyclicity. Motivated by this,
we prove that if $\kappa(G) \ge 600\alpha(G)$
then $G$ is pancyclic. This establishes a conjecture of Jackson and Ordaz up to a constant factor.
Moreover, we obtain the more general result that if $G$ is Hamiltonian with
minimum degree $\delta(G) \ge 600\alpha(G)$ then $G$ is pancyclic. Improving an old result of
Erd\H{o}s, we also show that $G$ is pancyclic if it is Hamiltonian and $n \ge 150\alpha(G)^3$.
Our arguments use the following theorem of independent interest
on cycle lengths in graphs: if $\delta(G) \ge 300\alpha(G)$ then
$G$ contains a cycle of length $\ell$ for all $3 \le \ell \le \delta(G)/81$.
\end{abstract}
\section{Introduction}
A {\em Hamilton cycle} is a spanning cycle in a graph, i.e.\ a cycle passing through all vertices.
A graph is called {\em Hamiltonian} if it contains such a cycle. Hamiltonicity is one of the most
fundamental notions in graph theory, tracing its origins to Sir William Rowan Hamilton in the 1850's.
Deciding whether a given graph contains a Hamilton cycle is NP-complete,
so we do not expect to have a simple characterisation for this property.
There is a vast literature in graph theory devoted to obtaining sufficient conditions
for Hamiltonicity. For more details we refer the interested reader
to the surveys of Gould \cite{G1,G2}. The classical result giving such a condition is
Dirac's theorem \cite{D}, which says that every graph $G$ with $n \geq 3$ vertices
and minimum degree at least $n/2$ is Hamiltonian.
This theorem was generalised by Bondy \cite{B1}, who showed that the same assumptions imply
that $G$ is {\em pancyclic}, i.e.\ contains cycles of length $\ell$ for all $3 \le \ell \le n$.
In \cite{B2} Bondy proposed the following `metaconjecture' which has had a considerable
influence on research on cycles in graphs.
\nib{Metaconjecture.} Almost any non-trivial condition on a graph which implies that the
graph is Hamiltonian also implies that the graph is pancyclic.
(There may be a simple family of exceptional graphs.)
Another classical condition for a graph to be Hamiltonian
is given by a theorem of Chv\'atal and Erd\H{o}s \cite{CE},
who showed that if a graph $G$ satisfies $\kappa(G) \ge \alpha(G)$ then it is Hamiltonian.
Here $\alpha(G)$ is the {\em independence number}, i.e.\ the size of the largest subset of the
vertex set that does not contain an edge, and $\kappa(G)$ is the (vertex) {\em connectivity},
i.e.\ the size of the smallest subset of the vertex set that can be deleted to obtain a disconnected
graph. Motivated by Bondy's metaconjecture, Amar, Fournier and Germa \cite{AFG}
obtained several results on the lengths of cycles in a graph $G$ that satisfies the
Chv\'atal-Erd\H{o}s condition $\kappa(G) \ge \alpha(G)$. They conjectured that if such a graph $G$
is not bipartite then $G$ contains cycles of length $\ell$ for all $4 \le \ell \le n$.
(The case when $G=C_5$ is a $5$-cycle needs to be excluded.)
Note that the balanced complete bipartite graph $K_{k,k}$ satisfies $\kappa(G)=\alpha(G)=k$
but is not pancyclic, indeed it has no odd cycles. They also made the weaker conjecture that
the same conclusion holds under the additional assumption that $G$ is triangle-free.
Lou \cite{L} proved the stronger statement that if $\kappa(G) \ge \alpha(G)$ and $G$ is
triangle-free then $G$ contains cycles of length $\ell$ for all $4 \le \ell \le n$, unless
$G=C_5$ or $G=K_{k,k}$ for some $k$. Note that the connectivity $\kappa(G)$ is bounded above by the
degree of any vertex. If $\kappa(G) > \alpha(G)$ then there is an edge inside any neighbourhood of $G$,
so in particular $G$ must contain triangles. Jackson and Ordaz \cite{JO} conjectured that
if $\kappa(G)> \alpha(G)$ then $G$ is pancyclic.
To approach these conjectures it is natural to try to prove pancyclicity under a
stronger connectivity assumption. A remarkable theorem of Erd\H{o}s \cite{E},
proving a conjecture of Zarins,
shows that instead of making a connectivity assumption,
it suffices to assume that $G$ is Hamiltonian and the number of vertices
is sufficiently large compared to the independence number.
He showed that if $G$ is a Hamiltonian graph on $n$ vertices with $\alpha(G)=k$
and $n>4k^4$ then $G$ is pancyclic. It then follows from \cite{CE} that
$\kappa(G)\ge 4(\alpha(G)+1)^4$ is sufficient for pancyclicity.
(Various considerably weaker results were subsequently obtained by Flandrin et al.,
e.g.\ \cite{FLMW}, who were presumably unaware of Erd\H{o}s' paper.)
Our main result improves this bound significantly: we show that a connectivity which is only linear in
the independence number suffices for pancyclicity. This establishes the conjecture of
Jackson and Ordaz mentioned above up to a constant factor.
Moreover, we prove that pancyclicity already follows from assuming that $G$ is Hamiltonian
with minimum degree $\delta(G)$ at least linear in the independence number.
\begin{theo}\label{pan-mindeg}
If $G$ is a Hamiltonian graph with $\delta(G) \ge 600\alpha(G)$ then $G$ is pancyclic.
In particular, if $G$ is any graph with $\kappa(G) \ge 600\alpha(G)$ then $G$ is pancyclic.
\end{theo}
Erd\H{o}s \cite{E} remarked that the bound $n>4k^4$ in his result is unlikely to be best possible. He also
noticed that a quadratic lower bound for $n$ in terms of $k$ is necessary
for Hamiltonicity to imply pancyclicity. Our next theorem improves Erd\H{o}s' result and
shows that a cubic dependence of $n$ on $k$ is already sufficient.
\begin{theo}\label{pan-n}
If $G$ is a Hamiltonian graph with $|V(G)| \ge 150\alpha(G)^3$ then $G$ is pancyclic.
\end{theo}
Our arguments will use a theorem of independent interest on cycle lengths in graphs.
\begin{theo} \label{short-cycles}
If $G$ is a graph with $\delta(G) \ge 300\alpha(G)$
then $G$ contains a cycle of length $\ell$ for all $3 \le \ell \le \delta(G)/81$.
\end{theo}
It is instructive to compare Theorem \ref{short-cycles} with a result of
Nikiforov and Schelp \cite{NS}, who showed that when the minimum degree $\delta(G)$ is linear
in the number of vertices then $G$ contains even cycles of all lengths between $4$ and $\delta(G)+1$
and, after excluding some exceptional cases, odd cycles of all lengths between a constant and $\delta(G)+1$.
We refer the reader to the chapter of Bondy in \cite{B3} for other results on cycle lengths
in graphs, and to \cite{GHS,V,SV} as examples of more recent related results.
Next we describe a simple example showing that Theorems \ref{pan-mindeg} and \ref{short-cycles}
are best possible up to the constant factors and that the lower bound for $|V(G)|$ in
Theorem \ref{pan-n} has to be at least quadratic in $\alpha(G)$. Suppose $k \ge 3$ and
let $G$ be the graph on $n=k(2k-2)$ vertices obtained by taking $k$ vertex-disjoint cliques
$X_1,\cdots,X_k$ of size $2k-2$ and adding a matching of size $k$ which has exactly one edge between
$X_i$ and $X_{i+1}$ for all $1 \le i \le k$ (where $X_{k+1}:=X_1$).
Then it is easy to check that $G$ is Hamiltonian, $\alpha(G)=k$ and $\delta(G)=2k-3$,
but $G$ does not contain a cycle of length $2k-1$, so is not pancyclic.
The organisation of this paper is as follows. In the next section we collect various
known results that we will use in our arguments. We present the proofs of our theorems
in Section 3. The final section contains some concluding remarks.
We systematically omit rounding signs for the sake of clarity of presentation.
We also do not make any serious attempt to optimise
absolute constants in our statements and proofs.
\nib{Notation.}
Suppose $G$ is a graph. For a vertex $v$ we let $N(v)$ denote its neighbourhood and
$d(v)=|N(v)|$ its degree. If $X$ is a set of vertices then $G[X]$ is the restriction
of $G$ to $X$, i.e.\ the graph with vertex set $X$ whose edges are edges of $G$ with
both endpoints in $X$. We write $e_G(X)=e(G[X])$ for the number of edges in
$X$. If $X$ and $Y$ are sets of vertices then $e_G(X,Y)$ is the number of edges with
one endpoint in $X$ and the other in $Y$. We omit the subscript $G$ if there is no
danger of ambiguity. A walk in $G$ is a sequence of vertices $W = x_0 \cdots x_t$
such that $x_i$ is adjacent to $x_{i+1}$ for $0 \le i \le t-1$. (The vertices need
not be distinct.) The length $\ell(W)=t$ of $W$ is the number of edges in $W$,
counting multiplicity of repeated edges. A path is a walk in which no vertices are repeated.
A cycle is a walk in which no vertices are repeated, except that the first and last vertices are equal.
\section{Preliminaries}
In this section we collect various results that will be used in our arguments.
We include the short proofs for the convenience of the reader.
\subseteqsection{Degrees}
We start with two well-known propositions on degrees in graphs.
\begin{prop}\label{bip}
Suppose $G$ is a graph with minimum degree at least $d$.
Then $G$ has a bipartite subgraph $B$ with minimum degree at least $d/2$.
\end{prop}
\nib{Proof.} Consider a bipartite subgraph $B$ of $G$ with as many edges as possible.
Let $X$ and $Y$ be the two parts of the bipartition of $B$.
Then any vertex $v \in X$ has at least $d(v)/2$ neighbours in $Y$,
or we could improve the partition by moving $v$ to $Y$.
The same argument applies for $v \in Y$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\begin{prop}\label{avmindeg}
Suppose $G$ is a graph with average degree at least $d$.
Then $G$ has an induced subgraph with minimum degree at least $d/2$.
\end{prop}
\nib{Proof.} Suppose $G$ has $n$ vertices and construct a sequence of graphs
$G_n=G, G_{n-1}, \cdots$ where if $G_i$ has minimum degree less than $d/2$ we
construct $G_{i+1}$ by deleting a vertex of degree less than $d/2$ from $G_i$.
The number of edges deleted in this process is less than $nd/2 \le e(G)$
so it must terminate at some induced subgraph with minimum degree at least $d/2$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Breadth first search trees}
Suppose that $B$ is a graph and $x$ is a vertex of $B$.
We construct a {\em breadth first search tree} $T$ in $B$ starting
at $x$ by the following iterative procedure.
We start with $T_0$ equal to the one-vertex tree on $x$.
Then at step $i \ge 1$, we let $N_i$ be the set of vertices not in $T_{i-1}$ that
have at least one neighbour in the tree $T_{i-1}$, and construct
$T_i$ on the vertex set $V(T_{i-1}) \cup N_i$ by adding an edge
from each vertex $v$ in $N_i$ to a neighbour of $v$ in $T_{i-1}$.
\begin{prop}\label{bfs}
Suppose $B$ is a bipartite graph and $T$ is a breadth first search
tree in $B$ starting from a vertex $x$. Let $N_i$ be the set of
vertices at distance $i$ from $x$ in $T$. Then any vertex in $N_i$
is at distance $i$ from $x$ in $B$. Also, each $N_i$ is an independent
set in $B$ and all edges of $B$ join $N_i$ to $N_{i+1}$ for some $i \ge 0$.
Now suppose also that $B$ has $n$ vertices and minimum degree $d \ge 5$.
Then there is some number $i \ge 0$ such that
$e_B(N_i,N_{i+1}) \ge \frac{d}{4}(|N_i|+|N_{i+1}|)$.
Furthermore, if $m \ge 0$ is the smallest number with
$e_B(N_m,N_{m+1}) \ge \frac{2d}{9}(|N_m|+|N_{m+1}|)$
then $m \ge 1$ and $|N_{i+1}| \ge 2|N_i|$ for $0 \le i \le m-1$.
\end{prop}
\nib{Proof.} Let $T_0, T_1, \cdots$ be the sequence of trees in the
breadth first search construction. For any $v$ in $N_i$, the neighbours in $B$
of $v$ within $V(T_{i-1})$ must lie in $N_{i-1}$, or we would have already added $v$
to $T_{i-1}$. We deduce that the distance in $B$ from $v$ to $x$ is $i$.
Next consider any $y$ and $z$ in $N_i$, let $P$ be the path between $y$ and $z$
in $T_i$ and let $w$ be the closest point to $x$ on $P$. If $w \in N_j$ then
the length of $P$ is $2(i-j)$, which is even, so $yz$ cannot be an edge,
since $B$ is bipartite. This shows that $N_i$ is independent and
all edges of $B$ join $N_i$ to $N_{i+1}$ for some $i \ge 0$.
Now suppose that $B$ has $n$ vertices and minimum degree at least $d$, so that $e(B) \ge dn/2$.
We cannot have $e_B(N_i,N_{i+1}) < \frac{d}{4}(|N_i|+|N_{i+1}|)$ for all $i \ge 0$,
as this would give the contradiction
$$dn/2 \le e(B) = \sum_{i \ge 0} e(N_i,N_{i+1}) < \frac{d}{4} \sum_{i \ge 0} (|N_i|+|N_{i+1}|)
= \frac{d}{4}(2n-1).$$
Consider the smallest $m \ge 0$ with $e_B(N_m,N_{m+1}) \ge \frac{2d}{9}(|N_m|+|N_{m+1}|)$.
Then $m \ge 1$, since $e_B(N_0,N_1)=|N_1|$ and $d \ge 5$,
so $\frac{2d}{9}(|N_0|+|N_{1}|) \ge |N_1|+1$.
We claim that $|N_{i+1}| \ge 2|N_i|$ for $0 \le i \le m-1$.
For suppose that this is not the case, and consider the smallest
$0 \leq i \leq m-1$ for which $|N_{i+1}| < 2|N_i|$. Then $i \ge 1$, since
$|N_0|=1$ and $|N_1| \ge d \ge 5$.
There are at least $d|N_i|$ edges incident to $N_i$,
so we must have $e_B(N_{i-1},N_i) \ge d|N_i|/3$ or $e_B(N_i,N_{i+1}) \ge 2d|N_i|/3$.
In the first case we have
$e_B(N_{i-1},N_i) \ge d|N_i|/3 \ge \frac{2d}{9}(|N_{i-1}|+|N_i|)$,
since $|N_i| \ge 2|N_{i-1}|$. In the other case we have
$e_B(N_i,N_{i+1}) \ge 2d|N_i|/3 \ge \frac{2d}{9}(|N_i|+|N_{i+1}|)$,
since $|N_{i+1}| < 2|N_i|$. Either way we have a contradiction to
the minimality of $m$, so the claim is proved. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Independence number}
Here we give some well-known relationships between degrees, chromatic number
and independence number.
\begin{prop} \label{indep}
Suppose $G$ is a graph on $n$ vertices with maximum degree at most $k$.
Then $G$ contains an independent set of size at least $n/(k+1)$.
\end{prop}
\nib{Proof.} Construct an independent set $S$ greedily by repeatedly choosing
any currently available vertex and then marking all its neighbours as unavailable.
At the end of this process every vertex of $G$ is either in $S$ or marked as
unavailable. At most $k|S|$ vertices have been marked unavailable,
so $n \le |S|+k|S|$, i.e.\ $|S| \ge n/(k+1)$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\begin{prop} \label{chromatic}
Suppose $G$ is a graph for which every induced subgraph has a vertex of degree at most $k$.
Then $G$ has chromatic number at most $k+1$.
\end{prop}
\nib{Proof.}
Define a sequence of induced subgraphs $G_n,\cdots,G_0$ starting from $G_n=G$,
where $G_{i-1}$ is obtained from $G_i$ by deleting a vertex $v_i$ of degree at most $k$.
Consider the vertices in the order $v_1,\cdots,v_n$ and greedily colour them using
$\{1,\cdots,k+1\}$. When we colour $v_i$ we have used at most $k$ colours on its neighbours
in $G_i$, so there is an available colour in $\{1,\cdots,k+1\}$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\begin{prop} \label{indep-mindeg}
Suppose $G$ is a graph with independence number $\alpha(G) \le k$
and $n \ge dk+1$ vertices. Then $G$ contains an induced subgraph $H$ with at most $dk+1$
vertices and minimum degree at least $d$.
\end{prop}
\nib{Proof.} Let $S$ be a set of $dk+1$ vertices of $G$.
The restriction $G[S]$ of $G$ to $S$ must have chromatic number at least $d+1$,
otherwise it would contain an independent set of size at least $|S|/d > k$,
contradicting our assumption on $G$. By Proposition \ref{chromatic} $G[S]$
contains an induced subgraph $H$ with minimum degree at least $d$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Paths}
Now we give some simple tools for manipulating the lengths of paths
in cycles when there is a bound on the independence number.
Given any path or cycle $W$ in a graph $G$, we refer to any set of $t+1$ consecutive points
on $W$ as a {\em $t$-interval} (so $t$ is the length of the interval).
If $J$ is an interval of length at least $2$ such that the endpoints of $J$ are adjacent
then we call $J$ a {\em jump} of $W$ in $G$.
For a jump $J$ we write $\partial J$ for the edge joining the ends of $J$
and $J^o$ for the subinterval of internal points obtained by removing its ends.
\begin{prop}\label{jump}
Suppose $G$ is a graph with independence number $\alpha(G) \le k$,
$W$ is a path or cycle in $G$ and $I$ is an interval of length at least $2k$ on $W$.
Then $I$ contains a jump of $W$ in $G$ with length at most $2k$.
\end{prop}
\nib{Proof.} Starting at one end of $I$ consider the points with positions
$1,3,5,\cdots,2k+1$. This set of $k+1$ points must contain an edge, since
$\alpha(G) \le k$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\begin{prop}\label{shorten}
Suppose $G$ is a graph with independence number $\alpha(G) \le k$
and $P$ is a path of length $p$ in $G$ joining two vertices $x$ and $y$.
Then for any number $1 \leq q \leq p$ there is a path of some length $\ell$
joining $x$ and $y$ with $q \leq \ell \leq q+2k-2$.
\end{prop}
\nib{Proof.} We use induction on $p$. The statement is clearly true for $p \leq 2k-1$,
so suppose that $p \geq 2k$. By Proposition \ref{jump} there is a jump $J$ of $P$ in $G$
of some length $j$ with $2 \le j \le 2k$. Replacing the portion of $P$ along $J$ by the
edge $\partial J$ joining the ends of $J$ gives a path $P'$ of length $p-j+1$ joining $x$ and $y$.
Now for all $q>p-j+1$ we can use the original path $P$,
and for all $q \leq p-j+1$ we can apply the induction hypothesis to $P'$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
We also need the following well-known proposition.
\begin{prop}\label{length}
Suppose $G$ is a graph with minimum degree at least $d$
and $x$ is a vertex of $G$.
Then $G$ contains a path of length at least $d$ starting at $x$.
Furthermore, if $G$ is bipartite then $G$ contains such
a path of length at least $2d-1$.
\end{prop}
\nib{Proof.} Let $P$ be a longest path in $G$ starting at $x$
and let $y$ be the last vertex of $P$. By the minimum degree condition $y$
has at least $d$ neighbours, and these all belong to $P$ by choice of a
longest path, so $P$ contains at least $d+1$ vertices.
Furthermore, if $G$ is bipartite, then $y$ is not adjacent to
any vertex at even distance from $y$ along $P$,
so $P$ contains at least $2d$ vertices. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Hamiltonicity}
Here we give two more substantial lemmas on Hamiltonian graphs which appeared implicitly in \cite{E}.
One facilitates absorption of a vertex to create a Hamiltonian graph
with one more vertex, the other deletion of a vertex to create a Hamiltonian
graph with one fewer vertex.
\begin{lemma} \label{add}
Suppose $G$ is a graph, $x$ is a vertex of degree at least $k+1$ in $G$ and
$H = G \setminus \{x\}$ is a Hamiltonian graph with independence number $\alpha(H) \le k$.
Then $G$ is Hamiltonian.
\end{lemma}
\nib{Proof.}
Suppose that $H$ has $n$ vertices. Label them with $[n]=\{1,\cdots,n\}$
such that $\{i,i+1\}$ is an edge for $1 \le i \le n$, where addition
is mod $n$, i.e.\ $n+1$ is identified with $1$. Let $A \subseteq [n]$ be the
neighbourhood of $x$ and let $A^+ = \{a+1: a \in A\}$.
Since $|A^+| \ge d(x) \ge k+1 > \alpha(H)$ there is an edge
$\{y,z\}$ in $A^+$, where without loss of generality $y<z$.
Now we can form a Hamilton cycle in $G$ by starting at $x$,
going to $z-1 \in A$, decreasing to $y$, using the edge $\{y,z\}$
to get to $z$, increasing to $n$, going to $1$, increasing to $y-1 \in A$,
then ending at $x$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
We remark that the argument in Lemma \ref{add} is the main idea
in the proof of the Chv\'atal-Erd\H{o}s theorem.
\begin{lemma}\label{erdos}
Suppose $G$ is a Hamiltonian graph on $n \ge (2k+1)(k^2+k+1)$ vertices with
independence number $\alpha(G) \le k$. Then $G$ contains a cycle of length $n-1$.
\end{lemma}
\nib{Proof.} Choose a Hamilton cycle $C$ in $G$ and label the vertices as
$v_1,\cdots,v_n$ so that the edges of $C$ are $v_iv_{i+1}$ for $1 \le i \le n$.
(As above we use the convention $v_{n+1}=v_1$.)
Set $s=k^2+k+1$ and let $I_1,\cdots,I_s$ be disjoint $2k$-intervals in $C$.
Proposition \ref{jump} gives jumps $J_1,\cdots,J_s$,
where each $J_i$ is a subinterval of $I_i$ of length at least $2$
and the ends of $J_i$ are adjacent in $G$. We say that $J_i$ is {\em good}
if each internal vertex $v \in J_i^o$ has at least $k+1$ neighbours in $V(G) \setminus J_i^o$.
We claim that there is a good jump. For suppose to the contrary that we can
choose $v_i \in J_i^o$ such that $v_i$ has at most $k$ neighbours in $V(G) \setminus J_i^o$
for $1 \le i \le s$. Then $\{v_1,\cdots,v_s\}$ spans a subgraph of $G$ with
maximum degree at most $k$, so by Proposition \ref{indep} contains an independent
set of size bigger than $k$, contradicting our assumption on $G$.
Thus there is a good jump, say $J_1$. Now we construct a cycle of length $n-1$ as follows.
First we replace the portion of $C$ traversing the jump $J_1$ with the edge $\partial J_1$
between the endpoints of $J_1$. Then we use Lemma \ref{add} to put back the vertices
of $J_1^o$ one by one, increasing the length of the cycle until only one vertex has
not been replaced. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Cycles}
The following lemma of Erd\H{o}s, Faudree, Rousseau and Schelp \cite{EFRS}
will allow us to find a cycle of some particular length,
using a breadth first search tree and the independence assumption.
The proof of this lemma can be found in rather abbreviated form within
the proof of Theorem 1 in \cite{EFRS}. For the convenience of the reader
we include a proof here.
\begin{lemma} \label{efrs}
Suppose $G$ is a graph containing no cycle of length $\ell$,
$T$ is a tree in $G$, $v$ is a vertex of $T$, $h < \ell/2$,
and $Z$ is the set of vertices at distance $h$ in $T$ from $v$.
Then the restriction of $G$ to $Z$ is $(\ell-2)$-colourable,
and so $Z$ contains a subset of size at least
$|Z|/(\ell-2)$ that is independent in $G$.
\end{lemma}
\nib{Proof.}
Fix a plane drawing of $T$ such that in the $(x,y)$-coordinate system
$v$ is at the origin and for $i \ge 0$ points at distance $i$ from $v$ have $x$-coordinate $i$.
Say that a path $z_0z_1 \cdots z_t$ in $G$ using vertices of $Z$
is increasing if the $y$-coordinates of the vertices $z_0,z_1,\cdots,z_t$
forms an increasing sequence.
Given any increasing path $P=z_0z_1 \cdots z_t$ we let $P'$ be the unique path in $T$
from $z_0$ to $z_t$ and we let $v_P$ be the closest point to $v$ on $P'$.
We claim that we can remove either $z_0$ or $z_t$ to obtain a path $Q$
such that $v_Q=v_P$ and $Q'$ has the same length as $P'$.
To see this, we observe that it can only fail if
the path in $T$ from $z_1$ to $v_P$ meets the path
from $z_t$ to $v_P$ before it reaches $v_P$
and the path in $T$ from $z_{t-1}$ to $v_P$ meets the path
from $z_0$ to $v_P$ before it reaches $v_P$.
But this would contradict our choice of a plane drawing of $T$, so the claim holds.
Now we claim that there is no increasing path of length $\ell-2$.
For suppose that $P=z_0z_1 \cdots z_{\ell-2}$ is an increasing path.
Let $\ell'$ be the length of the path $P'$ in $T$ from $z_0$ to $z_{\ell-2}$.
Then $2 \le \ell' \le 2h \le \ell-1$.
We construct a sequence of paths $P_0=P, P_1, \cdots, P_{\ell-3}$ where each
$P_{i+1}$ is obtained from $P_i$ by removing an endpoint in such a way
that $v_{P_{i+1}}=v_{P_i}$, so that for each $i$ we have $v_{P_i}=v_P$
and $P'_i$ has length $\ell'$. Since $P_i$ has length $\ell-2-i$,
$P_i \cup P'_i$ forms a cycle of length $\ell'+\ell-2-i$.
Setting $i=\ell'-2$ we obtain a cycle of length $\ell$,
which contradicts our assumption on $G$, so the claim holds.
Finally, we define a colouring $c:Z\to\{0,1,\cdots,\ell-3\}$
where $c(z)$ is the length of the longest increasing path starting at $z$.
This is a proper colouring of $G[Z]$, as if $z,z' \in Z$ with $z$ below $z'$ (say)
then we can add $zz'$ to any increasing path starting at $z'$,
so $c(z)>c(z')$. Since all colour classes of $c$ are independent
we have an independent set of size at least $|Z|/(\ell-2)$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\subseteqsection{Probability}
Finally we record the standard Chernoff bounds for large deviations
of binomial random variables.
\begin{lemma} (Chernoff bounds, see \cite{AS} Appendix A)
Suppose $X$ is a binomial random variable with parameters $(n,p)$ and $a \ge 0$.
\begin{itemize}
\item[(i)] If $p=1/2$ then $\mathbb{P}(X-n/2>a)
= \mathbb{P}(X-n/2<-a) < e^{-2a^2/n}$.
\item[(ii)] $\mathbb{P}(X-np>a) < e^{-a^2/2pn+a^3/2(pn)^2}$.
\item[(iii)] $\mathbb{P}(X-np<-a) < e^{-a^2/2pn}$.
\end{itemize}
\end{lemma}
\section{Proofs}
In this section we present proofs of our three theorems. Throughout we will suppose that $G$ is
a graph with independence number $\alpha(G) \le k$. Also, we can suppose $k \ge 2$,
otherwise we have the trivial case when $G$ is a complete graph.
We start with a lemma that provides two vertices that are connected
by paths with every length in some interval.
\begin{lemma} \label{consecutive-paths}
Suppose $G$ is a graph on $n$ vertices with independence number $\alpha(G) \le k$
and $B$ is a bipartite subgraph of $G$ with minimum degree $\delta(B) = d > 9k/2$.
Suppose $x$ is a vertex of $B$ and let $N_i$ denote the set of vertices
at distance $i$ from $x$ in $B$. Let $m \ge 1$ be the smallest number with
$e_B(N_m,N_{m+1}) \ge \frac{2d}{9}(|N_m|+|N_{m+1}|)$. Then
\begin{itemize}
\item[(i)] $|N_m| \ge 2^{m-1}d$, $m \le \log_2 \left( \frac{n+d-1}{d} \right)$
and $G$ contains cycles of length $\ell$ for all $3 \le \ell \le |N_m|/k$,
\item[(ii)] there are sets $N'_m \subseteq N_m$ and $N'_{m+1} \subseteq N_{m+1}$
forming the parts of a bipartite subgraph $B'$ of $B$ with minimum degree at least $2d/9$,
\item[(iii)] there is a vertex $y$ in $B'$ such that there is a path between
$x$ and $y$ in $G$ of length $\ell$, for any $\ell$ with $m \le \ell \le m+4d/9-2$.
\end{itemize}
\end{lemma}
\nib{Proof.}
By Proposition \ref{bfs} we have $|N_{i+1}| \ge 2|N_i|$ for $0 \le i \le m-1$.
Since $|N_1| \ge \delta(B) = d$ we have $|N_i| \ge 2^{i-1}d$ for $1 \le i \ge m$.
Applying Lemma \ref{efrs} to $Z=N_i$ for $1 \le i \le m$
we see that $G$ contains cycles of length $\ell$ for $2i+1 \le \ell \le |N_i|/k$.
Since $d > 9k/2$ and $|N_i| \ge 2^{i-1}d$, it is easy to check that $2i+2 \leq |N_i|/k$,
so the intervals $[2i+1, |N_i|/k], 1 \leq i \leq m$ together contain
all integers from $3$ to $|N_m|/k$. Also, $n \ge \sum_{i=0}^m |N_i| \ge 1+(2^m-1)d$
gives the required bound on $m$, so statement (i) holds.
Statement (ii) follows from Proposition \ref{avmindeg}. Indeed,
$N_m$ and $N_{m+1}$ form the parts of a bipartite
subgraph of $B$ with average degree at least $4d/9$. Thus it contains a subgraph $B'$ with parts
$N'_m \subseteq N_m$ and $N'_{m+1} \subseteq N_{m+1}$ with minimum degree at least $d'=2d/9$.
Since $d > 9k/2$ the minimum degree in $B'$ is at least $k+1$. In particular,
$|N'_m| \ge k+1$. Since $\alpha(G) \le k$ there is an edge $yz$ of $G$ in $N'_m$.
We claim that this choice of $y$ satisfies statement (iii).
To see this we give separate arguments for paths of length $m+2t$, $t \ge 0$
and paths of length $m+2t+1$, $t \ge 0$. By Proposition \ref{length},
for $0 \le 2t \le 2d'-2=4d/9-2$ there is a path of length $2t$ in $B'$ from $y$
to a vertex $w$ in $N'_m$, which can be combined with the path in $T$
from $w$ to $x$ to give a path of length $m+2t$ between $x$ and $y$.
Next, consider the bipartite graph $B' \setminus \{y\}$, which has minimum degree at least $d'-1$.
Then, again by Proposition \ref{length}, for $0 \le 2t \le 2d'-4$
we can find a path in $B'$ of length $2t$ from $z$ to a vertex $w \in N'_m$,
which can be combined with the edge $yz$ and the path in $T$
from $w$ to $x$ to give a path of length $m+2t+1$ between $x$ and $y$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
Now we prove our first result, which states that
a graph $G$ on $n$ vertices with independence number
$\alpha(G) \le k$ and minimum degree $\delta(G) = d \ge 300k$
contains a cycle of length $\ell$ for all $3 \le \ell \le d/81$.
\noindent
{\bf Proof of Theorem \ref{short-cycles}.}\,
By Proposition \ref{bip} we can choose a bipartite subgraph $B$ of $G$
with minimum degree $\delta(B) \ge d/2$. Fix any vertex $x$ and let $N_i$
be the set of vertices at distance $i$ from $x$ in $B$.
By Lemma \ref{consecutive-paths}, for some $m \geq 1$ we have
cycles in $G$ of length $\ell$ for all $3 \le \ell \le |N_m|/k$,
where $|N_m| \ge 2^{m-1}(d/2)=2^{m-2}d$. We also have subsets $N'_m \subseteq N_m$ and $N'_{m+1} \subseteq N_{m+1}$
spanning a bipartite subgraph $B'$ of $B$ with minimum degree at least $\frac{2}{9}(d/2)=d/9$.
We can assume that $|N_m| < kd/81$, since otherwise we are done.
Also, by choosing $d/9$ neighbours in $N'_{m+1}$ for each vertex in $N'_m$
and deleting all other vertices of $N'_{m+1}$ we can assume that $|N'_{m+1}| < kd^2/729$.
Next we consider a partition $N'_m = P \cup Q$, where each vertex of $N'_m$
is randomly and independently placed in $P$ or $Q$ with probability $1/2$.
Since every vertex in $N'_{m+1}$ has degree at least $d/9$, by Chernoff bounds,
the probability that there is a vertex in $N'_{m+1}$ having
fewer than $d/36$ neighbours in either $P$ or $Q$ is at most
$2 \cdot (kd^2/729) \cdot e^{-d/72} < 1$, since $d \ge 300k \ge 600$.
Therefore we can choose a partition $N'_m = P \cup Q$ so that every vertex in $N'_{m+1}$
has at least $d/36$ neighbours in $P$ and at least $d/36$ neighbours in $Q$.
Consider the graph $G^*=G[P \cup N'_{m+1}]$ and its bipartite subgraph $B^*$ with parts
$P$ and $N'_{m+1}$, which has minimum degree $d^* \geq d/36 > 9k/2$. Fix any vertex
$x^*$ in $P$ and let $N^*_i$ denote the vertices at distance $i$ from $x^*$ in $B^*$.
By Lemma \ref{consecutive-paths}, we have some $m^* \geq 1$
such that $G^*$ contains cycles of length $\ell$ for all $3 \le \ell \le |N^*_{m^*}|/k$,
where $|N^*_{m^*}| \ge 2^{m^*-1}d^* \geq 2^{m^*}d/72$. We also have
a vertex $y$ such that there is a path between $x^*$ and $y$ in $G^*$
of length $\ell$, for all $\ell$ with $m^* \le \ell \le m^*+d/81-2 \leq m^*+4d^*/9-2$.
We let $y^*$ be either equal to $y$ if $y \in P$ or a neighbour of $y$
in $Q$ if $y \in N'_{m+1}$. In either case we have $y^* \in N'_m$
and there are paths between $x^*$ and $y^*$ in the bipartite subgraph $B_m$
of $G$ with parts $N_m$ and $N_{m+1}$ having any length $\ell$
with $m^*+1 \le \ell \le m^* + d/81 - 2$. Also, $x^*$ and $y^*$ both belong
to $N_m$, so are joined by a path $W$ of some length $\ell_W$ with $2 \le \ell_W \le 2m$,
where all internal vertices of $W$ lie in sets $N_i$ with $i<m$.
Combining $W$ with paths between $x^*$ and $y^*$ in $B_m$ gives cycles of
any length $\ell$ with $2m+m^*+1 \le \ell \le m^* + d/81$.
We already saw that $G$ contains cycles of length $\ell$ for all
$3 \le \ell \le \max\{|N_m|,|N^*_{m^*}|\}/k$.
Since $d \ge 300k$, we have $|N_m|/k \ge 2^{m-2}d/k>4m$ and $|N^*_{m^*}|/k \ge 2^{m^*}d/(72k)>4m^*$.
Therefore $\max\{|N_m|,|N^*_{m^*}|\}/k \ge 2m+m^*+1$, so $G$ contains cycles of
length $\ell$ for all $3 \le \ell \le d/81$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
Next we need another lemma.
\begin{lemma}\label{paths+cycle}
Suppose $G$ is a graph with independence number $\alpha(G) \le k$
and $V(G)$ is partitioned into two parts $A$ and $B$ such that
\begin{itemize}
\item[(i)] $G[A]$ is Hamiltonian,
\item[(ii)] either $|B| \ge (9k+1)k+1$ or $G[B]$ has minimum degree at least $9k+1$,
and
\item[(iii)] every vertex in $B$ has at least $2$ neighbours in $A$.
\end{itemize}
Then $G$ contains a cycle of length $\ell$ for any
$2k+1+\lfloor\log_2(2k+1)\rfloor \le \ell \le |A|/2$.
\end{lemma}
\nib{Proof.}
First we note that $G[B]$ has an induced subgraph $H$
with minimum degree $d \ge 9k+1$ and at most $(9k+1)k+1$ vertices.
Indeed, if $|B| \le (9k+1)k+1$ just take $H=G[B]$,
otherwise apply Proposition \ref{indep-mindeg} to $G[B]$.
By Proposition \ref{bip}, $H$ contains a bipartite subgraph $H'$ with minimum degree $d/2> 9k/2$.
Applying Lemma \ref{consecutive-paths} to $H$ and $H'$, we find vertices $x$ and $y$ and a number
$m \le \log_2 \left( \frac{|V(H)|+d/2-1}{d/2} \right) \le \log_2(2k+1)$,
such that there is a path between $x$ and $y$ in $H$
of length $t$, for any $t$ with $m \le t \le m+2k-2 \leq m+\frac{4}{9}(d/2)-2$.
Since every $v \in S$ has at least $2$ neighbours in $A$ we can
choose neighbours $a$ of $x$ and $b$ of $y$ in $A$ with $a \ne b$.
Let $P$ be the path in $G[A]$ joining $a$ and $b$ obtained by taking the
longer arc of the Hamilton cycle, so that $P$ has length at least $|A|/2$.
We construct a cycle of any length $\ell$ with $2k+1+\log_2(2k+1) \le \ell \le |A|/2$ as follows.
Since $q=\ell-m-2k \geq 1$ we can apply Proposition \ref{shorten} to replace $P$ by a path $P'$
in $G[A]$ between $a$ and $b$ of some length $\ell'$ with $q \leq \ell' \leq q+2k-2$.
Then $m \leq \ell-2-\ell' \leq m+2k-2$, so we can complete $P'$ to a cycle
of length $\ell$ by adding the edges $ax$, $by$ and a path in $H$ of length $\ell-2-\ell'$
between $x$ and $y$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
Using this lemma we prove that if $G$ is a Hamiltonian graph on $n \ge 150k^3$ vertices with
independence number $\alpha(G) \le k$ then $G$ is pancyclic.
\noindent
{\bf Proof of Theorem \ref{pan-n}.}\,
Starting from the graph $G=G_n$ we construct a sequence of subgraphs $G_n, G_{n-1}, \cdots, G_{n-20k^2}$,
where $G_i$ is a Hamiltonian graph on $i$ vertices. Also, for each removed
vertex $v \in V(G) \setminus V(G_i)$ we maintain a set of $2$ neighbours
$\{a_v,b_v\}$ of $v$ which we never delete, i.e.\ they will appear in each subgraph of the sequence.
To achieve this, consider the graph $G_i$, let $C_i$ be a Hamilton cycle in $G_i$
and let $N_i = \bigcup \big\{\{a_v,b_v\}: v \in V(G) \setminus V(G_i) \big\}$.
We claim that we can choose $s=k^2+k+1$ disjoint $2k$-intervals in $C_i$ that avoid $N_i$.
To see this, consider the partition of $C_i$ into intervals defined by consecutive points in $N_i$.
We disregard $N_i$ and any intervals of length less than $2k$, then note that we can cover at least
half of the remaining points by disjoint $2k$-intervals. Since $|N_i| \le 40k^2$ and $n \ge 150k^3$
the number of $2k$-intervals thus obtained is at least $\frac{n-20k^2-40k^2(2k+1)}{2(2k+1)} > s$.
Now, as in the proof of Lemma \ref{erdos}, we can find a good jump $J$ in one of these intervals,
and use it to construct a cycle of length $i-1$. Furthermore, the vertex $v$ removed in this step
has at least $k+1$ neighbours in $V(C_i)$, since it belongs to the good jump $J$,
so we can choose any $2$ of these to be $a_v$ and $b_v$.
This sequence terminates with a Hamiltonian graph $G'=G_{n-20k^2}$ and a set
$S = V(G) \setminus V(G')$ of size $20k^2>(9k+1)k+1$ such that every $v \in S$ has at least
$2$ neighbours in $V(G')$. By Lemma \ref{paths+cycle}
$G$ contains a cycle of length $\ell$ for any $2k+1+\log_2(2k+1) \le \ell \le |V(G')|/2=n/2-10k^2$.
To get cycles of length $\ell$ with $n/2-10k^2 \le \ell \le n$
we can just repeatedly apply Lemma \ref{erdos} starting from $G$.
To obtain the short cycles, note that $n \ge 150k^3 \ge (300k-1)k+1$, since $k \ge 2$,
so by Proposition \ref{indep-mindeg} $G$ has an induced subgraph $G^*$ with minimum degree
$d \ge 300k-1$. Since $(300k-1)/81 \ge 2k+1+\log_2(2k+1)$,
Theorem \ref{short-cycles} implies that $G^*$ contains cycles of length $\ell$
for all $3 \le \ell \le 2k+1+\log_2(2k+1)$. Therefore $G$ is pancyclic. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
Next we need the following lemma, which provides the long cycles
needed in the proof of Theorem \ref{pan-mindeg}.
\begin{lemma} \label{long}
Suppose $k \ge 3$ and $G$ is a Hamiltonian graph on $n \le 150k^3$ vertices
with minimum degree $\delta(G) \ge 600k$ and
independence number $\alpha(G) \le k$. Then $G$ contains
a cycle of length $\ell$ for any $n/12 \le \ell \le n$.
\end{lemma}
\nib{Proof.}
Consider a partition of the vertices of $G$ into sets $X$ and $Y$
where every vertex is placed randomly and independently in $X$ with probability $1/24$
or in $Y$ with probability $23/24$. By Chernoff bounds, the probability
that there is a vertex with less than $25k/2$ neighbours in $X$ is at most
$ne^{-25k/8}$ and the probability that $X$ has size more than $n/16$ is at most $e^{-n/384}$.
Since $k \ge 3$ and $600k \le n \le 150k^3$, both these probabilities are less than $0.4$,
so we can choose such a partition in which $|X| \le n/16$
and every vertex has at least $25k/2$ neighbours in $X$.
Starting from $G_n=G$ we construct a sequence of subgraphs
$G_n, G_{n-1}, \cdots, G_{n/12}$, where $G_i$ is a Hamiltonian graph
with $|V(G_i)|=i$ and $X \subseteq V(G_i)$.
To achieve this, suppose $n/12<i\le n$ and $C_i$ is a Hamilton cycle in $G_i$.
We claim that at least $3i/4$ of the vertices of $G_i$ are internal vertices
in some jump of length at most $8k$ in $C_i$. For if this is false, then by averaging
we could find an interval $I$ of length $8k$ and a set $S \subseteq I$ of size $2k+1$ such that
no vertex in $S$ is an internal vertex of a jump of length at most $8k$.
Consider a subset $S'$ of $S$ of size $k+1$ formed by taking every other vertex
(i.e.\ the first, the third, $\dots$, the $(2k+1)^{\text{st}}$).
Since $\alpha(G) \le k$ there must be an edge within $S'$.
This edge forms a jump of length at most $8k$ with at least one internal vertex in $S$,
giving a contradiction which proves the claim.
Since $i > n/12$ and $|X| \le n/16$ we have $|X| < 3i/4$. Thus we can choose a vertex $y \in Y$
and a jump $J$ of length at most $8k$ so that $y$ belongs to $J^o$ (the set of internal vertices of $J$).
Since every vertex has at least $25k/2$ neighbours in $X$ and $X \subseteq V(G_i)$,
every vertex in $J^o$ has at least $25k/2-8k>k+1$ neighbours in $V(G_i) \setminus J^o$.
We replace the portion of $C_i$ traversing $J$ with the edge $\partial J$
then we use Lemma \ref{add} to put back the vertices of $J^o$ one by one,
until only $y$ has not been put back. Then $G_{i-1} = G_i \setminus y$ is
Hamiltonian with $|V(G_{i-1})|=i-1$ and $X \subseteq V(G_{i-1})$, as required. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
Finally we give the proof of our third theorem, which states
that if $G$ is a Hamiltonian graph with minimum degree $\delta(G) \ge 600k$ and
independence number $\alpha(G) \le k$ then $G$ is pancyclic.
\noindent
{\bf Proof of Theorem \ref{pan-mindeg}.}\,
Let $n$ be the number of vertices in $G$. If $n \ge 150k^3$ then we are done
by Theorem \ref{pan-n} (without even using the minimum degree assumption)
so we can suppose that $n < 150k^3$. We also have $n > \delta(G) \ge 600k$, so $k \ge 3$.
Applying Theorem \ref{short-cycles} we see that $G$ has a cycle of length $\ell$
for all $3 \le \ell \le 7k <\delta(G)/81$. Also, by Lemma \ref{long} we have a cycle
of length $\ell$ for any $n/12 \le \ell \le n$.
For the remaining intermediate cycle lengths we consider a partition of the vertices
into two sets $X$ and $Y$, where vertices are randomly and independently placed
in $X$ with probability $1/2$ or in $Y$ with probability $1/2$.
By Chernoff bounds we can choose this partition so that $|X|, |Y| \ge n/3$
and each vertex has at least $200k$ neighbours in $X$ and at least $200k$ neighbours in $Y$.
Let $n'$ be the smallest number such that there is a subgraph $G'$ of $G$
on $n'$ vertices such that $G'$ is Hamiltonian and $X \subseteq V(G')$.
Let $C'$ be a Hamilton cycle in $G'$ and write $D = V(G) \setminus V(G')$.
Since $X \subseteq V(G')$ we have $n' \ge n/3$ and $D \subseteq Y$.
First we dispose of the case when $|V(G') \cap Y| \le 4k$.
Since every vertex has degree at least $200k$ in $Y$,
the restriction of $G$ to $D=Y \setminus V(G')$ has minimum degree at least $196k$.
Every vertex has at least $200k > 1$ neighbours in $X \subseteq V(G')$,
so applying Lemma \ref{paths+cycle} with $A = V(G')$ and $B=D$,
we obtain a cycle of length $\ell$ for any $2k+1+\log_2(2k+1) \le \ell \le n/6$.
Now we can suppose that $|V(G') \cap Y| \ge 4k+1$.
We can choose an interval $I$ of $C'$ that contains exactly $2k+1$ vertices of $Y$
and has length at most $n'/2$. Then we consider every other vertex of $Y$ in $I$
to obtain a set of size $k+1$, which must contain an edge, since $\alpha(G)\le k$.
This gives a jump $J$ of length at most $n'/2$ such that $1 \le |Y \cap J^o| \le 2k-1$.
Fix $y_0 \in Y \cap J^o$. We replace the portion of $C'$ traversing $J$ by $\partial J$,
and then use Lemma \ref{add} to put back vertices of $J^o \setminus \{y_0\}$ one by one,
while we can find such a vertex with at least $k+1$ neighbours in the current cycle.
By minimality of $n'$ this process terminates before all vertices of $J^o \setminus \{y_0\}$
have been replaced. Thus we obtain a non-empty subset $S$ of $J^o \setminus \{y_0\}$ such that
$G''=G' \setminus (S \cup \{y_0\})$ is Hamiltonian and every vertex in $S$ has at most $k$ neighbours in $V(G'')$.
Since $J$ has length at most $n'/2$ and $n' \geq n/3$ we have $n''=|V(G'')| \ge n/6$.
Also, $V(G'')$ contains $X \setminus S$ and every vertex of $S$
has at least $200k$ neighbours in $X$, of which at most $k$ are in $G''$,
so the restriction $G[S]$ has minimum degree $d \ge 199k$.
Choose a vertex $x \in S$ that is adjacent to a vertex $a$ of $G''$. Such an $x$ exists
since $G'=G''\cup S \cup \{y_0\}$ is Hamiltonian, and in particular $2$-connected.
By Proposition \ref{bip} we can choose a bipartite subgraph $B$ of $G[S]$
with minimum degree at least $d/2$.
Applying Lemma \ref{consecutive-paths} to $G[S]$ and $B$
we obtain a vertex $y \in S$ and a number $m$
such that there is a path between $x$ and $y$ in $G[S]$ of length $\ell$,
for any $\ell$ with $m \le \ell \le m+4k$ (say, since $2d/9-2 > 4k$),
where $m \le \log_2 \left( \frac{n'/2+d/2-1}{d/2} \right) < \log_2 (150k^2/199+1) < 2\log_2 k$.
Let $C''$ be a Hamilton cycle in $G''$. If $y$ has a neighbour $b \ne a$ in $G''$
then we complete the argument as before. We take $P$ to be the longer arc of $C''$
between $a$ and $b$, so that $P$ has length at least $n''/2 \ge n/12$. Then we construct
a cycle of any length $\ell$ with $2k+1+2\log_2 k \le \ell \le n/12$ as follows. Since
$q = \ell-m-2k \ge 1$ we can apply Proposition \ref{shorten} to replace $P$ by a path $P'$ in $G''$
between $a$ and $b$ with some length $\ell'$ with $q \le \ell' \le q+2k-2$.
Then $m \le \ell-\ell'-2 \le m+2k$, so we can complete $P'$ to a cycle of length $\ell$
by adding the edges $ax$, $by$ and a path in $G[S]$ of length $\ell-\ell'-2$ between $x$ and $y$.
Now suppose that $y$ does not have a neighbour $b \ne a$ in $G''$.
We will repeatedly use the following fact.
\noindent $(\star)$ \ \
Any vertex $z$ with at most one neighbour in $G''$ has at least $40k$ neighbours in $D$.
The proof of $(\star)$ is immediate from that fact that $z$ has
at least $200k$ neighbours in $Y$, but at most $|Y \cap J|+1 \le 2k+2$
of these are in $G'$, so $z$ easily has at least $40k$ neighbours in $D$.
Applying $(\star)$ to $z=y$ we can choose a neighbour $y'$ of $y$ in $D$.
Let $Z$ be the connected component of $G[D]$ containing $y'$.
If $Z$ has an induced subgraph $Z'$ with minimum degree at least $20k$ then applying
Lemma \ref{paths+cycle} with $A = V(G')$ and $B=V(Z')$
gives a cycle of length $\ell$ for any $2k+1+\log_2(2k+1) \le \ell \le n/6$.
Now suppose that $Z$ does not have any induced subgraph $Z'$ with minimum degree at least $20k$.
We claim that there is a path of length at most $k$ in $Z$ from $y'$
to a vertex $z$ in $Z$ with at least $2$ neighbours in $G''$.
To see this note first that $|V(Z)| \le 20k^2$ by Proposition \ref{indep-mindeg}
and $Z$ contains at least one vertex with at least $2$ neighbours in $G''$ by $(\star)$.
Now consider a breadth first search tree $T$ in $Z$ starting from $y'$,
and for $i \ge 0$ let $N_i$ be the set of vertices at distance $i$ from $y'$
and let $Z_i$ be the restriction of $Z$ to $\cup_{j=0}^i N_j$.
If every vertex in $Z_i$ has at most one neighbour in $G''$ then by $(\star)$
it has at least $40k$ neighbours in $D$. On the other hand, we assumed that
the minimum degree in $G[Z_i]$ is less than $20k$. Therefore $Z_i$ contains a vertex $z$
with at least $40k-20k=20k$ neighbours in $D \setminus
V(Z_i)$, implying $|N_{i+1}| \ge 20k$. Since $|V(Z)| \le 20k^2$ it follows that $Z_k$
contains a vertex $z$ with at least $2$ neighbours in $G''$, as
claimed.
Suppose that $z \in N_{i-1}$, where $1 \le i \le k+1$.
Combining the paths between $x$ and $y$ in $G[S]$ with the edge $yy'$
and the path of length $i-1$ in $T$ from $y'$ to $z$
we obtain paths between $x$ and $z$ in $G \setminus V(G'')$ of any length $\ell$
with $m+i \le \ell \le m+4k+i$, where we recall that $m < 2\log_2 k$.
Choose a neighbour $b \ne a$ of $z$ in $G''$ and let $P$ be the longer arc of $C''$
between $a$ and $b$, so that $P$ has length at least $n''/2 \ge n/12$. (See Figure 1.)
Now we construct a cycle of any length $\ell$ with $3k+2+2\log_2 k \le \ell \le n/12$ as follows.
Since $q=\ell-m-2k-i \ge 1$ we can apply Proposition \ref{shorten} to replace $P$ by a path
$P'$ in $G''$ between $a$ and $b$ with some length $\ell'$ with $q \le \ell' \le q+2k-2$.
Then $m+i \le \ell-\ell'-2 \le m+2k+i$, so we can complete $P'$ to a cycle of length $\ell$
by adding the edges $ax$, $bz$ and a path in $G[S]$ of length $\ell-\ell'-2$ between $x$ and $z$.
\begin{figure}
\caption{Constructing cycles of intermediate length}
\end{figure}
Since $2k+1+2\log_2k<3k+2+2\log_2k<7k$, in all cases we find cycles of length $\ell$ for $7k \le \ell \le n/12$.
Recall that we also have cycles of length $\ell$ when $3 \le \ell \le 7k$
and when $n/12 \le \ell \le n$. This implies pancyclicity of $G$. \ifvmode\mbox{ }\else\unskip\fi\hskip 1em plus 10fill$\Box$
\section{Concluding remarks}
We have answered the question of Jackson and Ordaz up to a constant factor.
Obviously it would be nice to obtain the exact bound, but perhaps one should
first attempt to prove an asymptotic version, i.e.\ that if
$\kappa(G) \ge (1+o(1))\alpha(G)$ then $G$ is pancyclic.
Also, it would be interesting to give the correct order of magnitude for
the minimum number $n$ of vertices such that any Hamiltonian graph $G$ on $n$
vertices with $\alpha(G)=k$ is pancyclic. We proved that this holds if $n=\Omega(k^3)$,
but it probably can be reduced to $n=\Omega(k^2)$. One way to attack this problem is
to improve the estimate in Lemma \ref{erdos}, which says that any Hamiltonian graph
with independence number $k$ and $n =\Omega(k^3)$ vertices contains a cycle of length $n-1$.
It would be extremely interesting to determine the correct dependence of $n$ on $k$
for this problem of just removing one vertex. Even the following question remains open.
\nib{Question.} Is there an absolute constant $C$ such that any Hamiltonian graph with
independence number $k$ and $n \geq Ck$ vertices contains a cycle of length $n-1$?
A positive answer would be tight up to a constant factor (clearly) and
in combination with Proposition \ref{indep-mindeg} and Theorem \ref{short-cycles}
would immediately imply that a quadratic dependence of $n$ on $k$ is sufficient
for Hamiltonicity to imply pancyclicity.
\end{document} |
\begin{document}
\begin{abstract}
We show that a simple separable unital nuclear nonelementary $C^*$-algebra whose tracial state space has a compact extreme boundary with finite covering dimension admits uniformly tracially large order zero maps from matrix algebras into its central sequence algebra. As a consequence, strict comparison implies $\mathcal Z$-stability for these algebras.
\end{abstract}
\title{$\mathcal{Z}
\section{Introduction}
\noindent
The theme of tensorial absorption is prominent in the theory of operator algebras, particularly where the classification of these algebras is concerned. An important step in Connes' proof that the hyperfinite $\mathrm{II}_1$ factor $\mathcal{R}$ is the unique injective $\mathrm{II}_1$ factor with separable predual exemplifies this theme: such a factor, say $\mathcal{M}$, has the property that $\mathcal{M} \overline{\otimes} \mathcal{R} \cong \mathcal{M}$. Another example arises in the theory of Kirchberg algebras, where the fact, due to Kirchberg, that such algebras absorb the Cuntz algebra $\mathcal{O}_\infty$ tensorially (see \cite{KP:Crelle}) plays a key role in their eventual classification via $\mathrm{K}$-theory. For general nuclear separable $C^*$-algebras, the best tensorial absorption theorem that one can hope for is absorption of the Jiang-Su algebra $\mathcal{Z}$ ($\mathcal{Z}$-stability). $\mathcal{Z}$-stability is a powerful tool for the classification of simple nuclear separable $C^*$-algebras, but is generally difficult to establish. The property of strict comparison, on the other hand, is often easier to verify. Examples show that both properties, although by no means automatic, often occur at the same time, and are closely related to finite topological dimension. These and other considerations led the first and third named authors to conjecture the following connections between regularity properties for $C^*$-algebras:
\begin{conjecture}
Let $A$ be a simple nuclear separable unital infinite-dimensional $C^*$-algebra. The following conditions are equivalent:
\begin{enumerate}
\item $A$ has finite nuclear dimension;
\item $A$ is $\mathcal{Z}$-stable;
\item $A$ has strict comparison.
\end{enumerate}
\end{conjecture}
\noindent
The implications $(1) \Longrightarrow (2)$ and $(2) \Longrightarrow (3)$ have been established by the third named author and R\o rdam, respectively (see \cite{W:Invent2} and \cite{R:IJM}). The reversal of either of these implications is at present only partial; proving $(3) \Longrightarrow (2)$ becomes accessible if one additionally assumes certain local approximation and divisibility properties \cite{W:Invent2} but at least the former assumption should ultimately be unnecessary. In a recent breakthrough, Matui and Sato lifted the local approximation hypothesis, establishing $(3) \Longrightarrow (2)$ for algebras with finitely many extremal tracial states \cite{MS:Acta}. Subsequently it has been an urgent task to remove this restriction on the tracial state space and in this article we extend their result to algebras whose extremal tracial boundary is compact and of finite covering dimension. The problem has received substantial attention; this result has also been discovered by Kirchberg and R\o{}rdam \cite{KR:InPrep} and Sato \cite{S:Pre}.
A word on the idea of our proof is in order, as the details are necessarily technical. A loose but in our case profitable way of thinking of a simple unital separable nuclear $C^*$-algebra $A$ with nonempty tracial state space is as a collection of everywhere nonzero ``sections'', where $A$ is viewed as a kind of noncommutative bundle over its space $T(A)$ of tracial states. For a classical topological vector bundle $\xi$ over a space $X$ we have the property of local triviality: restriction of the bundle to a sufficiently small neighborhood is a Cartesian product of the neighbourhood with a (complex) vector space having the same rank as $\xi$. The complexity of $\xi$ is generated by the way in which these local trivialisations are patched together. The analog of local trivialisation in our case comes from approximately central order zero maps with finite-dimensional domain which are large in a small open neighbourhood in $T(A)$. Our objective is to construct an approximately central order zero map which is globally large over $T(A)$, and so we look to glue together the maps which work locally. To do this we use nuclearity, and in particular the existence of approximate diagonals, to prove that there exist positive approximately central contractions in $A$ which, at the level of traces, represent indicator functions for open subsets of the extreme tracial boundary. Indeed, any continuous strictly positive affine function on $T(A)$ is uniformly realised by positive elements in an approximately central manner (Lemma \ref{L:CT}). Using suitable functions arising from open covers of $\partial_eT(A)$ we can patch together the local trivialisations to arrive at an order zero map which is uniformly bounded away from zero in trace. The bound we obtain depends on the covering dimension of $\partial_eT(A)$ and it is here where finite dimensionality arises.
Throughout the entire paper we work with central sequence algebras. The importance of these to tensorial absorption results dates back to McDuff's characterisation of separably acting II$_1$ factors which absorb the hyperfinite II$_1$ factor \cite{McD:PLMS}. In the $C^*$-setting central sequence algebras are intimately connected with with properties of stablity under tensoring by $\mathcal O_\infty$ and $\mathcal Z$, \cite{K:Able}.
The hypothesis of a compact extreme tracial boundary with finite covering dimension arose in \cite{DT:JFA}, where it was shown that for a simple algebra with strict comparison satisfying the said hypothesis, the Cuntz semigroup is almost divisible. The main result in this paper can be viewed as a proof that this almost divisibility can occur in an almost central manner.
Our paper is organised as follows. In Section \ref{Sect2} we introduce notation and review the relevant background from \cite{MS:Acta}. The main technical result (Lemma \ref{L:Key}) is established in Section \ref{Sect3}, and we show how the case of zero dimensional compact extreme tracial boundaries follows directly from this lemma. In the fourth and last section we extend to higher dimensional boundaries. Our method for doing this differs from those in \cite{KR:InPrep,S:Pre} as we marry the ideas needed to extend \cite{MS:Acta} to the zero dimensional compact extremal case with the geometric sequence arguments developed by the third named author in \cite{W:Invent1,W:Invent2}.
\section{Uniformly tracially large order zero maps}\label{Sect2}
\noindent
Recall that a completely positive (cp) map $\phi:A\rightarrow B$ between $C^*$-algebras is said to be \emph{order zero} if it preserves orthogonality, i.e. if $e,f\in A_+$ have $ef=0$, then $\phi(e)\phi(f)=0$. The structure theory of these maps is developed in \cite{WZ:MJM}, which in particular establishes a functional calculus. Given a completely positive and contractive (cpc) order zero map $\phi:A\rightarrow B$, and a positive contractive function $f\in C_0(0,1]$, there exists a cpc order zero map $f(\phi):A\rightarrow B$. For projections $p\in A$, this map satisfies
\begin{equation}
f(\phi)(p)=f(\phi(p)).
\end{equation}
Secondly, given a cpc order zero map $A\rightarrow B$ and a tracial state $\tau:B\rightarrow\mathbb C$, the composition $\tau\circ\phi$ defines a positive tracial functional on $A$, \cite[Corollary 4.4]{WZ:MJM}. The other key fact we need is that order zero maps with finite dimensional domains are projective in the sense of Lemma \ref{L:Proj} below. This follows from the duality between order zero maps with domain $A$ and $^*$-homomorphisms from the cone $C(A)=C_0(0,1]\otimes A$ (see \cite{WZ:MJM}) and Loring's projectivity of cones over finite dimensional $C^*$-algebras \cite{L:Book}.
\begin{lemma}\label{L:Proj}
Let $A,B,F$ be $C^*$-algebras with $F$ finite dimensional and let $q:A\twoheadrightarrow B$ a surjective $^*$-homomorphism. Given a cpc order zero map $\phi:F\rightarrow B$, there exists a cpc order zero map $\tilde{\phi}:F\rightarrow A$ with $q\circ\tilde{\phi}=\phi$.
\end{lemma}
Given a $C^*$-algebra $A$, we denote the quotient $C^*$-algebra $\ell^\infty(A)/c_0(A)$ by $A_\infty$ and refer to this as the \emph{sequence algebra of $A$}. We have a natural $^*$-homomorphism from $A$ into $\ell^\infty(A)$ obtained by regarding each element of $A$ as a constant sequence in $\ell^\infty(A)$. Following this with the quotient map from $\ell^\infty(A)$ into $A_\infty$, we obtain an embedding $A\hookrightarrow A_\infty$, and we use this to regard $A$ as a $C^*$-subalgebra of $A_\infty$ henceforth. In this way we can form the relative commutant $A_\infty\cap A'$, which is referred to as the \emph{central sequence algebra} of $A$. A bounded sequence $(x_n)_{n=1}^\infty$ in $A$ is said to be \emph{central} if its image $[(x_n)_{n=1}^\infty]$ in $A_\infty$ lies in $A_\infty\cap A'$.
We write $T(A)$ for the tracial state space of $A$. Given a sequence $(\tau_n)_{n=1}^\infty$ in $T(A)$ and a free ultrafilter $\omega\in\beta\mathbb N\setminus\mathbb N$, the trace
$$
(x_n)_{n=1}^\infty\mapsto\lim_{n\rightarrow\omega}\tau_n(x_n)
$$
on $\ell^\infty(A)$ induces a trace on $A_\infty$. We write $T_\infty(A)$ for the collection of all traces on $A_\infty$ arising in this fashion. When $\tau_n=\tau\in T(A)$ for all $n$, we write $\tau_\omega$ for the resulting trace in $T_\infty(A)$. We use the traces in $T_\infty(A)$ to define uniformly tracially large order zero maps into $A_\infty$.
\begin{definition}\label{D:TLM}
Let $A$ be a separable unital $C^*$-algebra with $T(A)\neq\emptyset$. A completely positive and contractive order zero map $\Phi:M_k\rightarrow A_\infty$ is \emph{uniformly tracially large} if $\tau(\Phi(1_k))=1$ for all $\tau\in T_\infty(A)$.
\end{definition}
By Lemma \ref{L:Proj}, every cpc order zero map $M_k\rightarrow A_\infty$ lifts to a cpc order zero map $M_k\rightarrow\ell^\infty(A)$ which consists of a sequence of cpc order zero maps $M_k\rightarrow A$. We can rephrase the uniformly tracially large condition in terms of these liftings and traces on $A$. Indeed, the definition of $T_\infty(A)$ is designed to make it easy to manipulate conditions of the form (\ref{L:TI1}).
\begin{lemma}\label{L:TI}
Let $A$ be a separable unital $C^*$-algebra with $T(A)\neq\emptyset$. Let $\Phi:M_k\rightarrow A_\infty$ be a cpc order zero map. Then $\Phi$ is uniformly tracially large if and only if any lifting $(\phi_n):M_k\rightarrow\ell^\infty(A)$ of $\Phi$ to a sequence of cpc order zero maps satisfies
\begin{equation}\label{L:TI1}
\lim_{n\rightarrow\infty}\min_{\tau\in T(A)}\tau(\phi_n(1_k))=1.
\end{equation}
\end{lemma}
\begin{proof}
That (\ref{L:TI1}) implies that $\Phi$ is uniformly tracially large is immediate. For the converse, suppose $\Phi:M_k\rightarrow A_\infty$ is a uniformly tracially large cpc order zero map but (\ref{L:TI1}) fails for some lifting $(\phi_n)$. Then, there exists $\varepsilon>0$, an increasing sequence $\{m_n\}_{n=1}^\infty$ in $\mathbb N$ and traces $\tau_n\in T(A)$ such that $\tau_n(\phi_{m_n}(1_k))\leq 1-\varepsilon$ for all $n\in\mathbb N$. Given a free ultrafilter $\omega$, the map $\rho:[(x_n)_{n=1}^\infty]\mapsto \lim_{n\rightarrow\omega}\tau_n(x_{m_n})$ defines a trace in $T_\infty(A)$, which has $\rho(\Phi(1_k))\leq 1-\varepsilon$, contrary to hypothesis.
\end{proof}
\begin{remark}\label{R:OT}
In a similar vein, for a fixed trace $\tau\in T(A)$ a cpc order zero map $\Phi:M_k\rightarrow A_\infty$ has $\tau_\omega(\Phi(1_k))=1$ for all $\omega\in\beta\mathbb N\setminus\mathbb N$ if and only if any lifting $(\phi_n)_n$ of $\Phi$ to a sequence of cpc order zero maps $M_k\rightarrow A$ has $\lim_{n\rightarrow\infty}\tau(\phi_n(1_k))=1$.
\end{remark}
Via functional calculus and a standard central sequence technique, uniformly tracially large cpc order zero maps $M_k\rightarrow A_\infty\cap A'$ give rise to the maps produced by \cite[Lemma 3.3]{MS:Acta}.
\begin{lemma}\label{L:MS33}
Let $A$ be a separable unital $C^*$-algebra with $T(A)\neq \emptyset$. Suppose that there exists a uniformly tracially large cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'$. Then the conclusion of \cite[Lemma 3.3]{MS:Acta} holds for $A$. That is there exists a cpc order zero map $\Psi:M_k\rightarrow A_\infty\cap A'$ and a central sequence $(c_n)_{n=1}^\infty$ of positive contractions in $A$ such that
\begin{equation}\label{L:MS33:1}
\lim_{n\rightarrow\infty}\max_{\tau\in T(A)}|\tau(c_n^m)-1/k|=0
\end{equation}
for any $m\in\mathbb N$ and $\Psi(e)=[(c_n)_{n=1}^\infty]$ for some minimal projection $e\in M_k$.
\end{lemma}
\begin{proof}
We need to produce a cpc order zero map $\Psi:M_k\rightarrow A_\infty\cap A'$ such that $\tau(\Psi^m(1_k))=1$ for each $m\in\mathbb N$ and $\tau\in T_\infty(A)$. Given such a map, fix a minimal projection $e\in M_k$ and take a lifting $(\psi_n)_{n=1}^\infty$ of $\Psi$ to a sequence of cpc order zero maps from $M_k$ to $A$. We can then set $c_n=\psi_n(e)$, so that $(c_n)_{n=1}^\infty$ is a central sequence. For each $m\in\mathbb N$ we have
$$
\lim_{n\rightarrow\infty}\min_{\tau\in T(A)}\tau(\psi_n^m(1_k))=1
$$
by Lemma \ref{L:TI}. For each $m,n\in\mathbb N$ and $\tau\in T(A)$, the map $\tau(\psi_n^m(\cdot))$ is a trace on $M_k$ (\cite[Corollary 4.4]{WZ:MJM}), so $\tau(c_n^m)=\tau(\psi_n^m(1_k))/k$. Hence (\ref{L:MS33:1}) holds.
To construct $\Psi$, fix a uniformly tracially large cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'$. Then, for each $m\in\mathbb N$, the map $\Phi^{1/m}:M_k\rightarrow A_\infty\cap A'$ is a cpc order zero map. Lift each $\Phi^{1/m}$ to a sequence $(\phi^{(m)}_n)_{n=1}^\infty$ of cpc order zero maps $M_k\rightarrow A$. Fix a dense sequence $(x_r)_{r=1}^\infty$ in $A$ and for each $s\in\mathbb N$, we can find $r_s$ sufficiently large such that:
\begin{itemize}
\item $\|[\phi^{(s)}_{r_s}(y),x_i]\|\leq \frac{1}{s}\|y\|$, for all $y\in M_k$ and $i\in\{1,\dots,s\}$;
\item $\tau(\phi^{(s)}_{r_s}(1_k)^s)\geq 1-\frac{1}{s}$, for all $\tau\in T(A)$.
\end{itemize}
To obtain the second condition, note that $((\phi^{(s)}_n)^s)_{n=1}^\infty$ is a lifting of $\Phi$ and apply Lemma \ref{L:TI}. The order zero map $\Psi:M_k\rightarrow A_\infty\cap A'$ induced by $(\phi^{(s)}_{r_s})_{s=1}^\infty$ has $\tau(\Psi^m(1_k))=1$ for all $m\in\mathbb N$ and $\tau\in T_\infty(A)$, as required.
\end{proof}
The main result (Theorem 1.1) of \cite{MS:Acta} shows that for a simple separable unital nuclear nonelementary $C^*$-algebra $A$ with finitely many extremal traces and $T(A)\neq\emptyset$, the following properties are equivalent:
\begin{enumerate}[(i)]
\item $A$ is $\mathcal Z$-stable;
\item $A$ has strict comparison;
\item every completely positive map from $A$ to $A$ can be excised in small central sequences (see \cite[Definition 2.1]{MS:Acta} for the definition of this concept);
\item $A$ has property (SI) as defined in \cite[Definition 3.3]{S:JFA} (see \cite[Definition 4.1]{MS:CMP} for the equivalent formulation used in \cite{MS:Acta}).
\end{enumerate}
The implication (i)$\implies$(ii) is due to R\o{}rdam \cite{R:IJM} and holds only assuming that $A$ is unital, separable, simple and exact. The implication (iii)$\implies$(iv) is immediate from the definitions. The proof of the remaining implications (ii)$\implies$(iii) and (iv)$\implies$(i) is valid for any unital simple separable nuclear $C^*$-algebra with $T(A)\neq\emptyset$ for which the conclusion of \cite[Lemma 3.3]{MS:Acta} holds as this is the only place where the extremal trace hypothesis enters play. For the implication (ii)$\implies$(iii) this is set out explicitly in the proof of \cite[Theorem 4.2]{MS:Acta}, and the proof of (iv)$\implies$(i) is readily seen to be a direct argument from property (SI) and the conclusion of \cite[Lemma 3.3]{MS:Acta}. Using Lemma \ref{L:MS33}, we can formulate this result as follows.
\begin{theorem}[Matui-Sato]\label{MS}
Let $A$ be a simple separable unital nuclear $C^*$-algebra with strict comparison. Suppose that for each $k\geq 2$, $A$ admits uniformly tracially large cpc order zero maps $M_k\rightarrow A_\infty\cap A'$. Then $A$ is $\mathcal Z$-stable.
\end{theorem}
We now turn to amenability and Matui-Sato's construction of uniformly tracially large order zero maps for simple separable unital $C^*$-algebras with finitely many extremal traces. Recall that in \cite{H:Invent}, Haagerup showed that nuclear $C^*$-algebras are amenable in the sense of \cite{J:MAMS}. Moreover \cite[Theorem 3.1]{H:Invent} gives additional information on the location of a virtual diagonal witnessing amenability (\cite[Theorem 3.1]{H:Invent}). Combining this with Johnson's Hahn-Banach argument for extraction of an approximate diagonal from a virtual diagonal (\cite[Lemma 1.2]{J:AJM}) gives Lemma \ref{L.Amenable}, which is used in the proof of Sato's lemma below, as well as the construction of central sequences of positive elements with specified tracial behaviour in Section \ref{Sect3}.
\begin{lemma}[Haagerup]\label{L.Amenable}
Let $A$ be a unital nuclear $C^*$-algebra. Then for any finite subset $\mathcal F$ of $A$ and $\eta>0$, there exists $r\in\mathbb N$, contractions $a_1,\dots,a_r\in A$ and positive reals $\lambda_1,\dots,\lambda_r$ with $\sum_{i=1}^r\lambda_i=1$ such that:
\begin{enumerate}
\item\label{L.Amenable1} $\|\sum_{i=1}^r\lambda_i a_ia_i^*-1\|<\eta$;
\item\label{L.Amenable2} $\|\sum_{i=1}^r\lambda_i (xa_i\otimes a_i^*-a_i\otimes a_i^*x)\|_{A\,\widehat{\otimes}\, A}<\eta$ for all $x\in\mathcal F$,
\end{enumerate}
where $A\,\widehat{\otimes}\, A$ is the projective tensor product.
\end{lemma}
Let $N\subset\mathcal B(\Hs)$ be a von Neumann algebra acting on a separable Hilbert space $\mathcal H$. We define $N^\infty$ to be the quotient $C^*$-algebra $\ell^\infty(N)/J$, where $J$ denotes the norm-closed two sided ideal of all strong$^*$-null sequences in $\ell^\infty(N)$. Just as in the norm-closed setting, we can embed $N$ as a subalgebra of constant sequences in $N^\infty$ and so obtain the strong$^*$-central sequence algebra $N^\infty\cap N'$. With this notation we can now state the following result, which has been generalised to the nonnuclear setting in \cite{KR:InPrep}.
\begin{lemma}[Sato, {\cite[Lemma 2.1]{S:arXiv}}]\label{L:Sato}Let $A$ be a separable unital nuclear $C^*$-algebra. Suppose that $A\subset\mathcal B(\Hs)$ is a faithful unital representation of $A$ on a separable Hilbert space and write $N=A''$. Then the natural $^*$-homomorphism
$$
A_\infty\cap A'\rightarrow N^\infty\cap N'
$$
is surjective.
\end{lemma}
Sato's lemma is the key ingredient in \cite[Lemma 3.3]{MS:Acta}, which obtains uniformly tracially large order zero maps when $A$ is separable, simple, unital and nuclear with finitely many extremal traces. For use in Section \ref{Sect3}, we show how to deduce this from the previous lemma using projectivity of order zero maps in the context of a fixed extremal trace. When $A$ has only finitely many extremal traces, a similar argument using the trace obtained from averaging the extremal traces can be used to prove \cite[Lemma 3.3]{MS:Acta}. Recall that a II$_1$ factor $N$ is said to be \emph{McDuff} if it absorbs the hyperfinite II$_1$ factor $R$ tensorially, i.e. $N\cong N\,\overline{\otimes}\,R$. Every McDuff factor $N$ has an abundance of centralising sequences: for each $k\geq 2$, factorise $N\cong N\,\overline{\otimes}\,R$ and by regarding $R$ as the weak closure of the UHF-algebra $M_{k^\infty}$ we can consider the sequence of $n$-th tensor factor embeddings $M_k\rightarrow M_{k^\infty}$ to obtain a unital embedding $M_k\rightarrow N^\infty\cap N'$.
\begin{lemma}[cf. {\cite[Lemma 3.3]{MS:Acta}}]\label{L:OT}Let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra and let $\tau$ be an extremal tracial state on $A$. For $k\geq 2$, there exists a cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'$ with $\tau_\omega(\Phi(1_k))=1$ for all $\omega\in \beta\mathbb N\setminus\mathbb N$.
\end{lemma}
\begin{proof}
Fix $k\geq 2$. Let $\pi_\tau$ denote the GNS-representation associated to $\tau$. As $\tau$ is an extremal trace, and $A$ is nuclear it follows that $\pi_\tau(A)''=N$ is an injective II$_1$ factor. By Connes' theorem \cite[Theorem 5.1]{C:Ann}, $N$ is McDuff. As such, there is a unital embedding $\iota:M_k\hookrightarrow N^\infty\cap N'$. By Lemma \ref{L:Sato} and the projectivity of order zero maps (Lemma \ref{L:Proj}), there exists an order zero map $\Phi:M_k\rightarrow A_\infty\cap A'$ lifting $\iota$. For each $\omega\in\beta\mathbb N\setminus\mathbb N$, the trace $\tau_\omega$ given by $\tau_\omega((x_n)_{n=1}^\infty)=\lim_{n\rightarrow\omega}\tau(x_n)$ is well defined on $N^\infty$ and has $\tau_\omega(\iota(1_k))=1$. Hence $\tau_\omega(\Phi(1_k))=\tau_\omega(\iota(1_k))=1$.
\end{proof}
\begin{remark}
Note that the map $\Phi$ of Lemma \ref{L:OT} already has $\tau_\omega(\Phi^m(1_k))=1$ for all $m\in\mathbb N$ and $\omega\in\beta\mathbb N\setminus\mathbb N$. We do not have to run the argument of Lemma \ref{L:MS33} to obtain this here.
\end{remark}
\section{Approximately central functions on the trace space}\label{Sect3}
\noindent
Recall that the tracial state space $T(A)$ of a separable unital $C^*$-algebra is a compact (in the weak$^*$-topology) convex subset of the state space of $A$, and so the Krein-Milman theorem shows that $T(A)$ is the closed convex hull of its extreme points $\partial_eT(A)$. Further, $T(A)$ forms a metrisable Choquet simplex: every point of $T(A)$ is the barycentre of a unique measure supported on $\partial_eT(A)$, \cite{A:Book}. If additionally $\partial_eT(A)$ is compact, then $T(A)$ is known as a Bauer simplex. In this case we have a natural identification of $\mathrm{Aff}(T(A))=\{f:T(A)\rightarrow \mathbb R \mid f\text{ is continuous and affine}\}$ with $C_{\mathbb R}(\partial_eT(A))$ given by restriction (see \cite{G:Book}). Our objective in this section is Lemma \ref{L:Key} which enables us to produce a finite collection of cpc order zero maps with large sum when $T(A)$ has compact extreme boundary of finite covering dimension.
The covering dimension of a compact Hausdorff space $X$ can be defined in a number of equivalent fashions (see \cite{P:Book}). We use the colouring formulation as follows. For $m\in\{0,1,\dots,\}$, say that $\dim X\leq m$ if and only if every finite open cover $\mathcal U$ of $X$ admits an $(m+1)$-colourable refinement $\mathcal V$: that is $\mathcal V$ is an open cover of $X$ with the property that every $V\in\mathcal V$ is contained in some element of $\mathcal U$ (i.e. $\mathcal V$ refines $\mathcal U$) and there exists a function $c:\mathcal V\rightarrow\{0,1,\ldots,m\}$ such that if $V,V'\in\mathcal V$ have $c(V)=c(V')$, then $V\cap V'=\emptyset$ (i.e. $\mathcal V$ can be $(m+1)$-coloured, in that each element of $\mathcal V$ can be assigned a colour such that two sets of the same colour are disjoint). We need a slight strengthening so that the sets in $\mathcal V$ form a closed cover of $X$. This is well known, but we include a proof for completeness.
\begin{lemma}\label{L:CD}
Let $X$ be a compact Hausdorff topological space with $\dim(X)\leq m$. Then for each finite open cover $\mathcal U$ of $X$, there exists a finite cover $\mathcal V$ consisting of closed sets refining $\mathcal U$ such that there is an $(m+1)$-colouring $c:\mathcal V\rightarrow\{0,1,\dots,m\}$ of $\mathcal V$ with the property that if $V,V'\in\mathcal V$ have $c(V)=c(V')$, then $V\cap V'=\emptyset$.
\end{lemma}
\begin{proof}
Given a finite open cover $\mathcal U$ of $X$, we can find an open cover $\tilde{\mathcal V}$ refining $\mathcal U$ which is $(m+1)$ colourable. Construct a partition of unity $(f_V)_{V\in\tilde{V}}$ subordinate to $\tilde{\mathcal V}$, i.e. $0\leq f_V\leq 1$ for all $V$, $\sum_{V\in \tilde{\mathcal V}}f_V(x)=1$ for all $x\in X$ and the support of each $f_V$ is contained in $V$. Let $\mathcal V$ be the collection of the supports of the $f_V$. This consists of closed sets, and refines $\tilde{\mathcal V}$ so is $(m+1)$-colourable. As every point $x\in X$ lies in the support of some $f_V$ it follows that $\mathcal V$ covers $X$.
\end{proof}
The following lemma of Lin (\cite{L:JFA}, based on work of Cuntz and Pedersen \cite{CP:JFA}) enables us to realise strictly positive elements of $\mathrm{Aff}(T(A))$ via positive elements of $A$.
\begin{lemma}[Lin {\cite[Theorem 9.3]{L:JFA}}, following Cuntz, Pedersen {\cite{CP:JFA}}]\label{T.Lin}
Let $A$ be a simple unital $C^*$-algebra with non-empty tracial state space and let $f\in\mathrm{Aff}(T(A))$ be strictly positive. Then for any $\varepsilon>0$, there exists $x\in A_+$ with $f(\tau)=\tau(x)$ for all $\tau\in T(A)$ and $\|x\|\leq\|f\|+\varepsilon$.
\end{lemma}
Given any positive contraction $e$ in a nuclear $C^*$-algebra $A$ we can apply Haagerup's approximate diagonal to $e$ to produce a central sequence of positive contractions which has the same tracial behaviour as $e$. In particular, we can witness strictly positive elements of $\mathrm{Aff}(T(A))$ via central sequences of positive contractions.
\begin{lemma}\label{L:CT}
Let $A$ be a simple separable unital nuclear $C^*$-algebra with a non-empty trace space and let $f$ be a positive affine continuous function on $T(A)$ with $\|f\|\leq 1$. Then there exists $(e_n)\in A_\infty\cap A'$ consisting of positive contractions in $A$ with
\begin{equation}\label{L:CT:E1}
\lim_{n\rightarrow\infty}\sup_{\tau\in T(A)}|\tau(e_n)-f(\tau)|=0.
\end{equation}
\end{lemma}
\begin{proof}
Define a sequence $(f_n)_{n=1}^\infty$ of continuous affine strictly positive functions on $T(A)$ by defining
$$
f_n(\tau)=\frac{1}{3n}+\Big(1-\frac{2}{3n}\Big)f(\tau),
$$
for $\tau\in T(A)$. By construction each $f_n$ is strictly positive and has $\|f_n\|\leq 1-\frac{1}{3n}$ and $|f_n(\tau)-f(\tau)|\leq \frac{1}{n}$ for all $\tau\in T(A)$. For each $n\in\mathbb N$, take $\varepsilon=\frac{1}{3n}$ in Lemma \ref{T.Lin} to obtain $x_n\in A_+$ with $\|x_n\|\leq 1$ such that $\tau(x_n)=f_n(\tau)$ for all $\tau\in T(A)$. By Haagerup's theorem (Lemma \ref{L.Amenable}), we can find an approximate diagonal $(\sum_{i=1}^{l_n}\lambda^{(n)}_ia^{(n)}_i\otimes a^{(n)}_i{}^*)_{n=1}^\infty$ in $A\odot A$ such that each $\|a_i^{(n)}\|\leq 1$ and $\lambda^{(n)}_i$ are positive reals with $\sum_{i=1}^{l_n}\lambda^{(n)}_i=1$ for all $n$ and
\begin{align}
&\Big\|\sum_{i=1}^{l_n}\lambda^{(n)}_ia_i^{(n)}a_i^{(n)}{}^*-1_A \Big\|\stackrel{n\rightarrow\infty}{\rightarrow}0;\label{L:CT1}\\
&\Big\|\sum_{i=1}^{l_n}\lambda^{(n)}_iba_i^{(n)}\otimes a_i^{(n)}{}^*-\sum_{i=1}^{l_n}\lambda^{(n)}_ia_i^{(n)}\otimes a_i^{(n)}{}^*b\Big\|_{A\,\widehat{\otimes}\, A}\stackrel{n\rightarrow\infty}{\rightarrow}0,\quad b\in A.\label{L:CT2}
\end{align}
Define $e_n=\sum_{i=1}^{l_n}\lambda^{(n)}_ia_i^{(n)}x_na^{(n)}_i{}^*$. These are positive contractions in $A$. For each $n\in\mathbb N$, the map $y\otimes z\mapsto yx_nz$ is contractive with respect to the projective tensor norm, so condition (\ref{L:CT2}) ensures that $(e_n)$ is a central sequence. For $\tau\in T(A)$, we estimate
\begin{align*}
|\tau(x_n-e_n)|&=\Big|\tau\Big(x_n-\sum_{i=1}^{l_n}\lambda^{(n)}_ia_i^{(n)}x_na^{(n)}_i{}^*\Big)\Big|\\
&=\Big|\tau\Big(\Big(1_A-\sum_{i=1}^{l_n}\lambda^{(n)}_ia^{(n)}_i{}^*a_i^{(n)}\Big)x_n\Big)\Big|\\
&\leq\tau\Big(1_A-\sum_{i=1}^{l_n}\lambda^{(n)}_ia^{(n)}_i{}^*a_i^{(n)}\Big)\|x_n\|\\
&=\tau \Big(1_A-\sum_{i=1}^{l_n}\lambda^{(n)}_ia^{(n)}_ia_i^{(n)}{}^*\Big)\|x_n\|\\
&\leq \Big\|1_A-\sum_{i=1}^{l_n}\lambda^{(n)}_ia^{(n)}_ia_i^{(n)}{}^* \Big\|.
\end{align*}
Then (\ref{L:CT:E1}) follows from this estimate, (\ref{L:CT1}) and the fact that $|f(\tau)-\tau(x_n)|\leq \frac{1}{n}$ for all $\tau\in T(A)$.
\end{proof}
The next lemma enables us to convert central sequences which are tracially orthogonal to norm orthogonal sequences. The argument has its origins in Kishimoto's work \cite{K:JFA}, and our proof is based on \cite[Lemma 3.2]{MS:Acta}.
\begin{lemma}\label{L:KT}
Let $A$ be a separable unital $C^*$-algebra with non-empty trace space $T(A)$. Let $T_0\subset T(A)$ be non-empty and suppose $(e^{(1)}_n)_{n=1}^\infty,\ldots,(e^{(L)}_n)_{n=1}^\infty$ are sequences of positive contractions in $A_+$ representing elements of $A_\infty\cap A'$ such that
\begin{equation}\label{KT:E1}
\lim_{n\rightarrow\infty}\sup_{\tau\in T_0}|\tau(e_n^{(l)}e_n^{(l')})|=0,\quad l\neq l'.
\end{equation}
Then there exist positive elements $\tilde{e}^{(l)}_n\leq e^{(l)}_n$ so that:
\begin{enumerate}[(i)]
\item $(\tilde{e}_n^{(l)})_n$ represents an element of $A_\infty\cap A'$;\label{KT:1}
\item $\lim_{n\rightarrow\infty}\sup_{\tau\in T_0}|\tau(\tilde{e}^{(l)}_n-e_n^{(l)})|=0$ for all $l$;\label{KT:2}
\item $(\tilde{e}_n^{(l)})_n\perp (\tilde{e}_n^{(l')})_n$ in $A_\infty\cap A'$ for $l\neq l'$.\label{KT:3}
\end{enumerate}
\end{lemma}
\begin{proof}
For each $l\in\{1,\dots,L\}$ and $n\in\mathbb N$ define
$$
g^{(l)}_n=(e_n^{(l)})^{1/2}\Big(\sum_{l'\neq l}e_n^{(l')}\Big)(e_n^{(l)})^{1/2},
$$
so $(g^{(l)}_n)_{n=1}^\infty$ is a central sequence for each $l$. The hypothesis (\ref{KT:E1}) gives
$$
\sup_{\tau\in T_0}\tau(g_n^{(l)})\leq \sum_{l'\neq l}\sup_{\tau\in T(A)}\tau(e_n^{(l)}e_n^{(l')})\stackrel{n\rightarrow\infty}{\rightarrow}0.
$$
For $r\in\mathbb N$, define the continuous function $f_r:[0,\infty)\rightarrow [0,1]$ by $f_r(t)=\min(1,rt)$ and note that
$$
\inf_{t\geq 0}(1-f(t))t\leq 1/r.
$$
For $l\in\{1,\dots,L\}$ and $n,r\in\mathbb N$, define positive contractions by
$$
x_{n,r}^{(l)}=(e_n^{(l)})^{1/2}\left(1-f_r(g_n^{(l)})\right)(e_n^{(l)})^{1/2}.
$$
These satisfy $x_{n,r}^{(l)}\leq e_n^{(l)}$ and for each $l$ and $r$, the sequence $(x_{n,r}^{(l)})_{n=1}^\infty$ represents an element of $A_\infty\cap A'$.
For each $s\in\mathbb N$ and $l\in\{1,\dots,L\}$, we have
$$
\sup_{\tau\in T_0}\tau((g_n^{(l)})^s)\leq\|g_n^{(l)}\|^{s-1}\sup_{\tau\in T(A)}\tau(g_n^{(l)})\stackrel{n\rightarrow\infty}{\rightarrow}0.
$$
By choosing suitable polynomial approximations to $f_r(t)$ on $[0,L-1]$, it follows that
\begin{equation}\label{KT:E2}
\sup_{\tau\in T_0}\tau(e_n^{(l)}-x_{n,r}^{(l)})=\sup_{\tau\in T_0}\tau((e_n^{(l)})^{1/2}f_r(g_n^{(l)})(e_n^{(l)})^{1/2})\leq\|e^{(l)}_n\|\sup_{\tau\in T_0}\tau(f_r(g_n^{(l)}))\stackrel{n\rightarrow\infty}{\rightarrow}0,
\end{equation}
for each $l\in\{1,\dots,L\}$ and $r\in\mathbb N$.
For each $l$, we compute exactly as in \cite[Lemma 3.2]{MS:Acta}, to obtain
\begin{align}
\left\|x_{n,r}^{(l')}x_{n,r}^{(l)}\right\|^2&=\left\|x_{n,r}^{(l)}(x_{n,r}^{(l')})^2x_{n,r}^{(l)}\right\|\nonumber\\
&\leq\Big\|x_{n,r}^{(l)}\Big(\sum_{j\neq l}x_{n,r}^{(j)}\Big)x_{n,r}^{(l)}\Big\|\nonumber\\
&=\Big\|(e_n^{(l)})^{1/2}\left(1-f_r(g_n^{(l)})\right)(e_n^{(l)})^{1/2}\Big(\sum_{j\neq l}x_{n,r}^{(j)}\Big)(e_n^{(l)})^{1/2}\left(1-f_r(g_n^{(l)})\right)(x_n^{(l)})^{1/2}\Big\|\nonumber\\
&=\big\|(e_n^{(l)})^{1/2}\left(1-f_r(g_n^{(l)})\right)g_n^{(l)}\left(1-f_r(g_n^{(l)})\right)(e_n^{(l)})^{1/2}\big\|\nonumber\\
&\leq\left\|\left(1-f_r(g_n^{(l)})\right)g_n^{(l)}\right\|\leq 1/r,\label{KT:E3}
\end{align}
for every $n,r\in\mathbb N$ and $l'\neq l$.
Fix a countable dense sequence $(y_s)_{s=1}^\infty$ in $A$. For each $r\in\mathbb N$, use (\ref{KT:E2}) and the fact that for each $l\in\{1,\dots,L\}$, $(x_{n,r}^{(l)})_{n=1}^\infty$ is a central sequence to obtain $N_r\in\mathbb N$ such that
\begin{itemize}
\item $\|[x^{(l)}_{n,r},y_s]\|\leq 1/r$ for $s\in\{1,\dots,r\}$;
\item $\sup_{\tau\in T_0}\tau(e_{n}^{(l)}-x_{n,r}^{(l)})<1/r$
\end{itemize}
for $n\geq N_r$. We may assume that $N_r<N_{r+1}$ for all $r$. Set $N_0=0$. For $n\in\mathbb N$, let $r_n\in\mathbb N$ be such that $N_{r_n}<n\leq N_{r_{n+1}}$ so that $r_n\rightarrow\infty$ as $n\rightarrow\infty$ and define $\tilde{e}_n^{(l)}=x_{n,r_n}^{(l)}$. The two conditions above give conditions (\ref{KT:1}) and (\ref{KT:2}), while (\ref{KT:3}) is a consequence of (\ref{KT:E3}).
\end{proof}
We are now in position to give the main technical lemma which is already enough to handle the $0$-dimensional compact extreme boundary case.
\begin{lemma}\label{L:Key}
Let $m\geq 0$, $k\geq 2$ and let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ such that $\partial_eT(A)$ is compact with $\dim(\partial_eT(A))\leq m$. Then for each finite set $\mathcal F\subset A$ and $\varepsilon>0$, there exist cpc order zero maps $\phi^{(0)},\dots,\phi^{(m)}:M_k\rightarrow A$ such that
\begin{equation}\label{L:Key:1}
\|[\phi^{(i)}(x),y]\|\leq \varepsilon\|x\|,
\end{equation}
for all $i\in\{0,\dots,m\}$, $x\in M_k$, $y\in\mathcal F$ and such that for each $\tau\in\partial_eT(A)$, there exists $i(\tau)\in \{0,\dots,m\}$ such that $\tau(\phi^{(i(\tau))}(1_k))>1-\varepsilon$.
\end{lemma}
\begin{proof}
Fix $k\geq 2$, a finite subset $\mathcal F\subset A$ and $\varepsilon>0$. For each $\tau\in \partial_eT(A)$, use Lemma \ref{L:OT} to provide a cpc order zero map $\Phi_\tau:M_k\rightarrow A_\infty\cap A'$ with $\tau_\omega(\Phi(1_k))=1$ for all $\omega\in\beta\mathbb N\setminus\mathbb N$. By Remark \ref{R:OT}, we can go sufficiently far down a sequence of cpc order zero maps from $M_k$ into $A$ which lift $\Phi_\tau$ to find a cpc order zero map
$\phi_\tau:M_k\rightarrow A$ with $\tau(\phi_\tau(1_k))>1-\varepsilon$ and
\begin{equation}\label{L:Key:6}
\|[\phi_\tau(x),y]\|<\varepsilon\|x\|,
\end{equation}
for all $x\in M_k$, $y\in\mathcal F$, $\tau\in\partial_eT(A)$. Define an open neighbourhood of $\tau$ in $\partial_eT(A)$ by $U_\tau=\{\rho\in\partial_eT(A):\rho(\phi_\tau(1_k))>1-\varepsilon\}$. Then $\partial_eT(A)=\bigcup_{\tau\in\partial_eT(A)}U_\tau$, so by compactness there exist $\tau_1,\dots,\tau_L\in\partial_eT(A)$ such that $\partial_eT(A)=\bigcup_{l=1}^LU_{\tau_l}$. As $\dim(\partial_eT(A))\leq m$, Lemma \ref{L:CD} gives a finite cover $\mathcal V$ of $\partial_eT(A)$ consisting of closed sets such that $\mathcal V$ refines $\mathcal U=\{U_{\tau_1},\dots,U_{\tau_L}\}$ and a colouring $c:\mathcal V\rightarrow \{0,1,\dots,m\}$ such that if $c(V)=c(V')$, then $V$ and $V'$ are disjoint. For each $i\in\{0,\dots,m\}$, write $\mathcal V^{(i)}=c^{-1}(\{i\})=\{V^{(i)}_1,\dots,V^{(i)}_{L_i}\}$.
Fix $i\in\{0,\dots,m\}$. For each $j=0,\dots,L_i$, choose a continuous function $f^{(i)}_j:\partial_eT(A)\rightarrow [0,1]$ on $\partial_eT(A)$ with $f^{(i)}_j=1$ on $V^{(i)}_j$ and $f^{(i)}_j=0$ on $\bigcup_{j'\neq j}V^{(i)}_{j'}$. This is possible as elements of $\mathcal V^{(i)}$ are pairwise disjoint closed subsets of $\partial_eT(A)$. As $\partial_eT(A)$ is compact, we can extend $f^{(i)}_j$ to a continuous affine function on $T(A)$ also denoted $f^{(i)}_j$. Now apply Lemma \ref{L:CT} to obtain central sequences $(e_n^{(i,j)})_{n=1}^\infty$ of positive contractions such that
$$
\lim_{n\rightarrow\infty}\sup_{\tau\in T(A)}|\tau(e^{(i,j)}_n)-f^{(i)}_j(\tau)|=0,
$$
for each $j=1,\dots,L_i$. Thus
\begin{equation}\label{L:Key:3}
\lim_{n\rightarrow\infty}\inf_{\tau\in \mathcal V^{(i)}_j}\tau(e^{(i,j)}_n)=1,\quad \lim_{n\rightarrow\infty}\sup_{\tau\in\bigcup_{j'\neq j}V^{(i)}_{j'}}\tau(e^{(i,j)}_n)=0.
\end{equation}
By construction
$$
\lim\sup_{\tau\in\bigcup_{s=1}^{L_i}V_s^{(i)}}\tau(e_n^{(i,j)}e_n^{(i,j')})=0
$$
for all $j\neq j'$ in $\{1,\dots,L_i\}$. Therefore we can apply Lemma \ref{L:KT} with $T_0=\bigcup_{s=1}^{L_i}V^{(i)}_s$ to obtain central sequences $(\tilde{e}^{(i,j)}_n)_{n=1}^\infty$ of positive contractions with $\tilde{e}^{(i,j)}_n\leq e^{(i,j)}_n$,
\begin{equation}\label{L:Key:2}
\lim_{n\rightarrow\infty}\|\tilde{e}_n^{(i,j)}\tilde{e}_n^{(i,j')}\|=0
\end{equation}
for $j\neq j'$ and
\begin{equation}\label{L:Key:4}
\lim_{n\rightarrow\infty}\sup_{\tau\in\bigcup_{s=1}^{L_i}V_s^{(i)}}\tau(e^{(i,j)}_n-\tilde{e}^{(i,j)}_n)=0,
\end{equation}
for $j\in\{1,\dots,L_i\}$. In this way (\ref{L:Key:3}) and (\ref{L:Key:4}) give
\begin{equation}\label{L:Key:5}
\lim_{n\rightarrow\infty}\inf_{\tau\in \mathcal V^{(i)}_j}\tau(\tilde{e}^{(i,j)}_n)=1.
\end{equation}
For $i\in\{0,\dots,m\}$ and $j\in\{1,\dots,L_i\}$, there exists $l(i,j)\in\{1,\dots,L\}$ such that $V^{(i)}_j\subset U_{\tau_{l(i,j)}}$. For $i\in\{0,\dots,m\}$ and $n\in\mathbb N$, define maps $\psi^{(i)}_n:M_k\rightarrow A$ by
\begin{equation}\label{L:Key:8}
\psi^{(i)}_n(x)=\sum_{j=1}^{L_i}\tilde{e}^{(i,j)}_n{}^{1/2}\phi_{\tau_{l(i,j)}}(x)\tilde{e}^{(i,j)}_n{}^{1/2},
\end{equation}
for $x\in M_k$. For each $i$, the sequences $(\psi^{(i)}_n)_{n=1}^\infty$ induce maps $\Psi^{(i)}:M_k\rightarrow A_\infty$. Further, by (\ref{L:Key:2}), we have $(\tilde{e}^{(i,j)}_n)\perp (\tilde{e}^{(i,j')}_n)$ in $A_\infty\cap A'$, so that $\Psi^{(i)}$ is a sum of $L_i$ pairwise orthogonal cpc order zero maps and so is cpc and order zero. The condition $(\tilde{e}^{(i,j)}_n)\perp (\tilde{e}^{(i,j')}_n)$ in $A_\infty\cap A'$ also allows us to use (\ref{L:Key:6}) to obtain
\begin{equation}\label{L:Key:9}
\|[\Psi^{(i)}(x),y]\|\leq \max_{j\in\{1,\dots,L_i\}}\|[\phi_{\tau_{l(i,j)}}(x),y]\|<\varepsilon\|x\|,
\end{equation}
for all $i\in\{0,\dots,m\}$, $x\in M_k$, $y\in\mathcal F$. For $\rho\in V^{(i)}_j$, we have $\rho(\phi_{\tau_{l(i,j)}}(1_k))>1-\varepsilon$ as $V^{(i)}_j\subset U_{\tau_{l(i,j)}}$, giving
\begin{align}
\rho(\psi^{(i)}_n(1_k))&\geq \rho(\tilde{e}^{(i,j)}_n{}^{1/2}\phi_{\tau_{l(i,j)}}(1_k)\tilde{e}^{(i,j)}_n{}^{1/2})\nonumber\\
&=\rho(\tilde{e}^{(i,j)}_n\phi_{\tau_{l(i,j)}}(1_k))\nonumber\\
&=\rho(\phi_{\tau_{l(i,j)}}(1_k))-\rho((1_A-\tilde{e}^{(i,j)}_n)\phi_{\tau_{l(i,j)}}(1_k))\nonumber\\
&>(1-\varepsilon)-\rho(1_A-\tilde{e}^{(i,j)}_n).\label{L:Key:7}
\end{align}
Combining (\ref{L:Key:7}) with (\ref{L:Key:5}) gives
\begin{equation}\label{L:Key:10}
\liminf_{n\rightarrow\infty}\inf_{\rho\in\bigcup_{j=1}^{L_i}V_j^{(i)}}\rho(\psi^{(i)}_n(1_k))>1-\varepsilon.
\end{equation}
For each $i\in\{0,\dots,m\}$, take a lifting $(\phi^{(i)}_n)_{n=1}^\infty$ of $\Psi^{(i)}$ to a sequence of cpc order zero maps $M_k\rightarrow A$. We claim that for $n$ sufficiently large, the maps $\phi^{(0)}_n,\dots,\phi^{(m)}_n$ satisfy the properties claimed in the statement of the lemma. Indeed, since
$$\sup_{\substack{x\in M_k\\\|x\|\leq 1}}\|\phi^{(i)}_n(x)-\psi^{(i)}_n(x)\|\rightarrow 0,$$
(\ref{L:Key:9}) gives
$$
\|[\phi^{(i)}_n(x),y]\|<\varepsilon\|x\|
$$
for all $n$ sufficiently large and for all $i\in\{0,\dots,m\}$, $x\in M_k$, $y\in\mathcal F$. By (\ref{L:Key:10}), we have
\begin{equation}\label{L:Key:11}
\liminf_{n\rightarrow\infty}\inf_{\rho\in\bigcup_{j=1}^{L_i}V_j^{(i)}}\rho(\phi^{(i)}_n(1_k))>1-\varepsilon,
\end{equation}
and so for all $n$ sufficiently large we have
$$
\rho(\phi^{(i)}_n(1_k))>1-\varepsilon,
$$
for all $i\in\{0,\dots,m\}$ and $\rho\in\bigcup_{j=1}^{L_i}V_j^{(i)}$ . Since $\bigcup_{i=0}^m\bigcup_{j=1}^{L_i}V^{(i)}_j=\partial_eT(A)$, the result follows with $i(\rho)=\min\{i:\rho\in \bigcup_{j=1}^{L_i}V_j^{(i)}\}$.
\end{proof}
In the zero dimensional case, we immediately obtain uniformly tracially large order zero maps from the previous lemma.
\begin{theorem}\label{T:0D}
Let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ and $\partial_eT(A)$ compact and zero dimensional. Then for each $k\geq 2$, $A$ admits uniformly tracially large order zero maps $M_k\rightarrow A_\infty\cap A'$.
\end{theorem}
\begin{proof}
Fix $k\geq 2$. Take a nested sequence $(\mathcal F_n)_{n=1}^\infty$ of finite subsets of $A$ whose union is dense in $A$. For each $n$, Lemma \ref{L:Key} gives a cpc order zero map $\phi_n:M_k\rightarrow A$ with
$$
\|[\phi_n(x),y]\|\leq \frac{1}{n}\|x\|,
$$
for all $y\in\mathcal F_n$ and $x\in M_k$, and
\begin{equation}\label{T:0D:1}
\tau(\phi_n(1_k))>1-\frac{1}{n}
\end{equation}
for all $\tau\in\partial_eT(A)$ and all $n\in\mathbb N$. By convexity, (\ref{T:0D:1}) holds for all $\tau\in T(A)$ and $n\in\mathbb N$. Thus the sequence $(\phi_n)_{n=1}^\infty$ induces a uniformly tracially large cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'$ by Lemma \ref{L:TI}.
\end{proof}
\begin{corollary}
Let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ and $\partial_eT(A)$ compact and zero dimensional. Suppose $A$ has strict comparison, then $A$ is $\mathcal Z$-stable.
\end{corollary}
\begin{proof}
This follows immediately from Theorems \ref{MS} and \ref{T:0D}.
\end{proof}
\section{Higher dimensional compact extreme boundaries}\label{Sect4}
In this last section we extend the previous work to higher dimensional compact extreme boundaries. The argument is based on the techniques developed in \cite{W:Invent1,W:Invent2}. The starting point is to use the finite dimensional compact extreme boundary to obtain a finite collection of cpc order zero maps with a large tracial sum in the sense of Lemma \ref{L:S1} below. It is at this point in the argument where it is critical that we can obtain the family of order zero maps in Lemma \ref{L:Key} not just with large tracial sum but so that for each $\tau\in \partial_eT(A)$ one member of the family is large in $\tau$.
\begin{lemma}\label{L:S1}
Let $m\in\mathbb N$ and let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ and $\partial_eT(A)$ is compact with $\dim(\partial_eT(A))\leq m$. Then, for $k\geq 2$ and any separable subspace $X\subset A_\infty$, there exist cpc order zero maps $\phi^{(0)},\dots,\phi^{(m)}:M_k\rightarrow A_\infty\cap A'\cap X'$ such that
\begin{equation}\label{L:S1:2}
\tau\Big(\sum_{i=0}^m\phi^{(i)}(1_k)b\Big)\geq\tau(b),
\end{equation}
for all $\tau\in T_\infty(A)$ and all $b\in (A_\infty)_+$.
\end{lemma}
\begin{proof}
Fix $k\geq 2$ and a separable subspace $X\subset A_\infty$. Let $(y^{(i)})_{i=1}^\infty$ be a countable dense subset of $C^*(A,X)\subset A_\infty$ and lift each $y^{(i)}$ to a sequence $(y^{(i)}_n)_{n=1}^\infty$ in $\ell^\infty(A)$. For $n\in\mathbb N$, define a finite subset of $A$ by $\mathcal F_n=\{y^{(i)}_m:1\leq i,m\leq n\}$. For each $n$, use Lemma \ref{L:Key} to obtain cpc order zero maps $\phi^{(0)}_n,\dots,\phi^{(m)}_n:M_k\rightarrow A$ with
$$
\|[\phi^{(i)}_n(x),y]\|\leq \frac{1}{n}\|x\|,
$$
for $i\in\{0,\dots,m\}$, $x\in M_k$, $y\in\mathcal F_n$ and $n\in\mathbb N$ and such that for each $\rho\in \partial_eT(A)$ and $n\in\mathbb N$, there exists $i(\rho,n)$ with $\rho(\phi^{(i(\rho,n))}_n(1_k))>1-1/n$. These maps induce cpc order zero maps $\phi^{(0)},\dots,\phi^{(m)}:M_k\rightarrow A_\infty\cap A'\cap X'$.
Consider $\rho\in \partial_eT(A)$. For $n\in\mathbb N$, $a\in A_+$ with $\|a\|\leq 1$, we have
\begin{align}
\rho\Big(\sum_{i=0}^m\phi^{(i)}_n(1_k)a\Big)&\geq\rho(\phi_n^{(i(\rho,n))}(1_k)a)\nonumber\\
&=\rho(a)-\rho\Big(\big(1_A-\phi_n^{(i(\rho,n))}(1_k)\big)a\Big)\nonumber\\
&\geq\rho(a)-\rho(1_A-\phi_n^{(i(\rho,n))}(1_k))\nonumber\\
&\geq\rho(a)-\frac{1}{n}.\label{L:S1:1}
\end{align}
By convexity, the estimate (\ref{L:S1:1}) holds for all $\rho\in T(A)$. Given a sequence $(\tau_n)_{n=1}^\infty$ in $T(A)$, a sequence $(b_n)_{n=1}^\infty$ of positive contractions in $A$ representing $b\in A_\infty$, and a free ultrafilter $\omega\in\beta\mathbb N\setminus\mathbb N$, taking limits in (\ref{L:S1:1}) gives
$$
\lim_{n\rightarrow\omega}\tau_n\Big(\sum_{i=0}^m\phi^{(i)}_n(1_k)b_n\Big)\geq\lim_{n\rightarrow\omega}\tau_n(b_n).
$$
That is
$$
\tau\Big(\sum_{i=0}^m\phi^{(i)}(1_k)b\Big)\geq\tau(b),
$$
for all $\tau\in T_\infty(A)$ and all $b\in (A_\infty)_+$, verifying (\ref{L:S1:2}).
\end{proof}
Before proceeding, we extract a standard central sequence argument from \cite{W:Invent2}. Writing $\beta=\frac{1}{\overline{k}+1}$ and following the proof of \cite[Proposition 4.6]{W:Invent2} verbatim from equation (38) through to the 4th displayed equation on page 288 of \cite{W:Invent2}, one obtains the following lemma.
\begin{lemma}\label{W:Extract}
Let $A$ be a separable unital $C^*$-algebra, let $X\subset A_\infty$ be a separable subspace and let $0<\beta<1$. Suppose that for each $\eta>0$, there exist orthogonal positive contractions $d^{(0)}_\eta,d^{(1)}_\eta$ in $A_\infty\cap A'\cap X'$ with
$$
\tau(d^{(i)}_\eta b)\geq \beta\tau(b)-\eta
$$
for $i\in\{0,1\}$, $\tau\in T_\infty(A)$ and contractions $b\in C^*(A,X)_+$. Then there exist orthogonal positive contractions $d^{(0)},d^{(1)}\in A_\infty\cap A'\cap X'$ with
$$
\tau(d^{(i)}b)\geq\beta\tau(b),
$$
for $i=\{0,1\}$, $\tau\in T_\infty(A)$ and contractions $b\in C^*(A,X)_+$.
\end{lemma}
We can now use Lemma \ref{L:S1} to establish a version of \cite[Proposition 4.6]{W:Invent2}. Essentially the argument is the same as the deduction of (36) and (37) from (33) in \cite{W:Invent2}, but since the maps arising in our proof have slightly different domains, we give the details for completeness. For $0\leq\eta_1<\eta_2$, we denote by $g_{\eta_1,\eta_2}$ the continuous piecewise linear function on $\mathbb R$ given by
\begin{equation}\label{Defg}
g_{\eta_1,\eta_2}(t)=\begin{cases}1,&t\geq \eta_2;\\\frac{t-\eta_1}{\eta_2-\eta_1},&\eta_1<t<\eta_2;\\0,&t\leq\eta_1.\end{cases}
\end{equation}
\begin{lemma}\label{L:S2}
Given $m\geq 0$ and $k\geq 1$ there is $1\leq L_{m,k}\in\mathbb N$ such that, given a simple separable unital nuclear nonelementary $C^*$-algebra $A$ with $T(A)\neq\emptyset$ such that $\partial_eT(A)$ is compact with $\dim(\partial_eT(A))\leq m$, and a separable subspace $X\subset A_\infty$, then there exist pairwise orthogonal contractions
$$
d^{(1)},\dots,d^{(k)}\in A_\infty\cap A'\cap X'
$$
such that
$$
\tau(d^{(i)}b)\geq \frac{1}{L_{m,k}}\tau(b)
$$
for all $i\in\{1,\dots,k\}$, $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+\subset A_\infty$.
\end{lemma}
\begin{proof}
When $k=1$, we can take $L_{m,k}=1$ and $d^{(1)}=1_{A_\infty}$. We prove the statement when $k=2$. Once the statement is established for $k=2$, the general case follows by induction using exactly the same argument as in the last two paragraphs of the proof of \cite[Proposition 4.6]{W:Invent2}.
Define $L_{m,2}=2(m+1)$ and fix a separable subspace $X\subset A_\infty$. By Lemma \ref{L:S1}, there exist cpc order zero maps $\phi^{(0)},\dots,\phi^{(m)}:M_{2(m+1)}\rightarrow A_\infty\cap A'\cap X'$ such that
\begin{equation}\label{L:S2:2}
\tau\Big(\sum_{i=0}^m\phi^{(i)}(1_{2(m+1)})b\Big)\geq\tau(b),
\end{equation}
for all $b\in (A_\infty)_+$ and $\tau\in T_\infty(A)$. For contractions $b\in C^*(A,X)_+$, the maps $\phi^{(i)}(\cdot)b$ are cpc and order zero, so $\tau(\phi^{(i)}(\cdot)b)$ is a trace on $M_{2(m+1)}$, \cite[Corollary 4.4]{WZ:MJM}. Thus
$$
\tau(\phi^{(i)}(e_{11})b)=\frac{1}{2(m+1)}\tau(\phi^{(i)}(1_{2m})b),
$$
for all $i\in\{0,\dots,m\}$, all contractions $b\in C^*(A,X)_+$ and all $\tau\in T_\infty(A)$. Summing over $i$, we have
\begin{equation}\label{L:S2:1}
\tau\Big(\sum_{i=0}^m\phi^{(i)}(e_{11})b\Big)=\frac{1}{2(m+1)}\tau\Big(\sum_{i=0}^m\phi^{(i)}(1_{2(m+1)})b\Big)\geq\frac{1}{2(m+1)}\tau(b),
\end{equation}
for all $b\in C^*(A,X)_+$ and all $\tau\in T_\infty(A)$.
For $\eta>0$, define
$$
d_\eta^{(0)}=g_{\eta,2\eta}\Big(\sum_{i=0}^m\phi^{(i)}(e_{11})\Big),\quad d_\eta^{(1)}=1_{A_\infty}-g_{0,\eta}\Big(\sum_{i=0}^m\phi^{(i)}(e_{11})\Big)
$$
so that $d^{(0)}_\eta$ and $d^{(1)}_\eta$ are pairwise orthogonal positive contractions in $A_\infty\cap A'\cap X'$. We have $d^{(0)}_\eta+\eta1_A\geq \sum_{i=0}^m\phi^{(i)}(e_{11})$ so (\ref{L:S2:1}) gives
\begin{equation}\label{L:S2:3}
\tau(d^{(0)}_\eta b)\geq\frac{1}{2(m+1)}\tau(b)-\eta,
\end{equation}
for contractions $b\in C^*(A,X)_+$ and $\tau\in T_\infty(A)$. For a contraction $b\in C^*(A,X)_+$ and $\tau\in T_\infty(A)$ we have
\begin{align}
\tau((1_{A_\infty}-d^{(1)}_\eta)b)&=\tau\Big(g_{0,\eta}\Big(\sum_{i=0}^m\phi^{(i)}(e_{11})\Big)b\Big)\nonumber\\
&\leq\lim_{l\rightarrow\infty}\tau\Big(\Big(\sum_{i=0}^m\phi^{(i)}(e_{11})\Big)^{1/l} b\Big)\nonumber\\
&\leq\sum_{i=0}^m\lim_{l\rightarrow\infty}\tau\left((\phi^{(i)}(e_{11}))^{1/l}b\right)\label{L:S2:4}\\
&=\sum_{i=0}^m\lim_{l\rightarrow\infty}\tau\left((\phi^{(i)})^{1/l}(e_{11})b\right)\nonumber\\
&=\sum_{i=0}^m\lim_{l\rightarrow\infty}\frac{1}{2(m+1)}\tau\left((\phi^{(i)})^{1/l}(1_{2(m+1)})b\right)\label{L:S2:5}\\
&\leq\frac{m+1}{2(m+1)}\tau(b)=\frac{1}{2}\tau(b).\label{L:S2:6}
\end{align}
Here (\ref{L:S2:4}) uses the fact that $\langle \sum_{i=0}^m\phi^{(i)}(e_{11})\rangle\leq \sum_{i=0}^m\langle \phi^{(i)}(e_{11})\rangle$ in the Cuntz semigroup $\mathrm{Cu}(C^*(\phi^{(i)}(e_{11}):i=0,\dots,m))$ and $a\mapsto\lim_{l\rightarrow\infty}\tau(a^{1/l}b)$ is a dimension function on $C^*(\phi^{(i)}(e_{11}):i=0,\dots,m)\subset A_\infty\cap A'\cap X'$. The equality (\ref{L:S2:5}) follows as for each $i$ and $l$, the map $\tau((\phi^{(i)})^{1/l}(\cdot)b)$ is a trace on $M_{2(m+1)}$, \cite[Corollary 4.4]{WZ:MJM}. The estimate (\ref{L:S2:6}) gives
\begin{equation}\label{L:S2:7}
\tau(d^{(1)}_\eta b)\geq \Big(1-\frac{1}{2}\Big)\tau(b)=\frac{1}{2}\tau(b)
\end{equation}
for all $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+$.
Thus
$$
\tau(d^{(i)}_\eta b)\geq\frac{1}{2(m+1)}\tau(b)-\eta
$$
for $i=0,1$, $\tau\in T_\infty(A)$ and contractions $b\in C^*(A,X)_+$. As such the $k=2$ case of the lemma follows from Lemma \ref{W:Extract}.
\end{proof}
Combining the previous two lemmas we obtain order zero maps $M_k\rightarrow A_\infty\cap A'$ which are nowhere small in trace in the presence of compact finite dimensional extremal boundary.
\begin{proposition}\label{P:S3}
Given $m\geq 0$ there is $0<\alpha_{m}\leq 1$ such that, given $k\geq 2$, a simple separable unital nuclear nonelementary $C^*$-algebra $A$ such that $T(A)\neq\emptyset$ and $\partial_eT(A)$ is compact with $\dim(\partial_eT(A))\leq m$, and a separable subspace $X\subset A_\infty$, there exists a cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'\cap X'$ such that
$$
\tau(\Phi(1_k)b)\geq \alpha_{m}\tau(b),
$$
for all $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+\subset A_\infty$.
\end{proposition}
\begin{proof}
Fix $m\geq 0$, $k\geq 2$ and a separable subspace $X\subset A_\infty$. Suppose that $A$ is simple separable unital nuclear with $T(A)\neq\emptyset$ and $\partial_eT(A)$ compact and with $\dim(\partial_eT(A))\leq m$. By Lemma \ref{L:S1}, there are cpc order zero maps $\phi^{(0)},\ldots,\phi^{(m)}:M_k\rightarrow A_\infty\cap A'\cap X'$ with
\begin{equation}\label{P:S3:1}
\tau\Big(\sum_{i=0}^m\phi^{(i)}(1_k)b\Big)\geq\tau(b),
\end{equation}
for all $\tau\in T_\infty(A)$ and all $b\in (A_\infty)_+$. By Lemma \ref{L:S2}, there exists $L_{m,m+1}\in\mathbb N$ and pairwise orthogonal contractions $d^{(0)},\dots,d^{(m)}$ in $A_\infty\cap A'\cap X'\cap (\mathrm{span}\{\phi^{(i)}(M_k):i=0,\dots,m\})'$ such that
\begin{equation}\label{P:S3:4}
\tau(d^{(i)}b)\geq \frac{1}{L_{m,m+1}}\tau(b),
\end{equation}
for all $i\in\{0,\dots,m\}$, $b\in C^*(A,X,\phi^{(j)}(M_k):j=0,\dots,m)_+$ and $\tau\in T_\infty(A)$.
Define $\Phi:M_k\rightarrow A_\infty\cap A'\cap X'$ by
$$
\Phi(x)=\sum_{i=0}^m\phi^{(i)}(x)d^{(i)}.
$$
This is cpc and order zero as the $d^{(i)}$'s are pairwise orthogonal and commute with the image of the $\phi^{(j)}$'s. Given a contraction $b\in C^*(A,X)_+$ and $\tau\in T_\infty(A)$, we have
\begin{align}
\tau(\Phi(1_k)b)&=\tau\Big(\sum_{i=1}^m\phi^{(i)}(1_k)d^{(i)}b\Big)\nonumber\\
&\geq \frac{1}{L_{m,m+1}}\tau\Big(\sum_{i=1}^m\phi^{(i)}(1_k)b\Big)\label{P:S3:2}\\
&\geq\frac{1}{L_{m,m+1}}\tau(b)\label{P:S3:3},
\end{align}
where (\ref{P:S3:2}) follows from (\ref{P:S3:4}) and (\ref{P:S3:3}) from (\ref{P:S3:1}). Thus we can take $\alpha_m=\frac{1}{L_{m,m+1}}$.
\end{proof}
A geometric sequence argument in the spirit of \cite{W:Invent1,W:Invent2} can be used to obtain uniformly tracially large order zero maps from Proposition \ref{P:S3}. The proof we give below uses the estimates from the geometric series argument in \cite[Lemma 5.11]{W:Invent2} but we simplify the calculations a little by taking a maximality approach. We work abstractly from the conclusion of Proposition \ref{P:S3} and so begin by noting that this implies the conclusion of Lemma \ref{L:S2}. We continue to use the functions $g_{\eta_1,\eta_2}$ defined in (\ref{Defg}).
\begin{lemma}\label{L:GS}
Let $k\geq 2$ and suppose $A$ is a separable unital $C^*$-algebra with the property that there exists $\alpha>0$ with the property that for all separable subspaces $X\subset A_\infty$, there exists a cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'\cap X'$ such that
\begin{equation}\label{L:GS:1}
\tau(\Phi(1_k)b)\geq \alpha\tau(b),
\end{equation}
for all $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+\subset A_\infty$. Then $A$ admits uniformly tracially large cpc order zero maps $M_k\rightarrow A_\infty\cap A'$.
\end{lemma}
\begin{proof}
Fix $k\geq 2$. First note that the hypothesis gives that there exists some $\gamma>0$ with the property that for any separable subspace $X\subset A_\infty$, there exist pairwise orthogonal positive contractions $d^{(0)},d^{(1)}\in A^\infty\cap A'\cap X'$ such that
\begin{equation}\label{L:GS:2}
\tau(d^{(i)}b)\geq \gamma\tau(b)
\end{equation}
for $i=\{0,1\}$, $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+\subset A_\infty$. Indeed, one can take a cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'\cap X'$ satisfying (\ref{L:GS:1}). For a contraction $b\in C^*(A,X)_+$, $\Phi(\cdot)b$ defines a cpc order zero map on $M_k$ so that $\tau(\Phi(\cdot)b)$ is a trace on $M_k$ for each $\tau\in T_\infty(A)$ (\cite[Corollary 4.4]{WZ:MJM}). As such
$$
\tau(\Phi(e)b)=\frac{\mathrm{Tr}_{M_k}(e)}{\mathrm{Tr}_{M_k}(1_k)}\tau(\Phi(1_k)b)\geq\frac{\mathrm{Tr}_{M_k}(e)}{\mathrm{Tr}_{M_k}(1_k)}\alpha\tau(b)
$$
for all $e\in (M_k)_+$, $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+$. Taking $d^{(i)}=\Phi(e^{(i)})$ for a pair $e^{(0)},e^{(1)}$ of orthogonal projections in $M_k$ of normalized trace at least $1/3$ we can take $\gamma=\alpha/3$.
Let $\alpha_0>0$ be the supremum of all $\alpha\geq 0$ with the property that for each separable subspace $X\subset A_\infty$, there exists a cpc order zero map $\Phi:M_k\rightarrow A_\infty\cap A'\cap X'$ satisfying (\ref{L:GS:1}). We must prove that $\alpha_0=1$, so suppose to the contrary that $0<\alpha_0<1$. Fix a separable subspace $X\subset A_\infty$ and take $\varepsilon>0$ such that
\begin{equation}\label{L:GS:6}
\alpha_1=\Big(\big(1-(\alpha_0-\varepsilon)\gamma\big)(\alpha_0-3\varepsilon)+(\alpha_0-\varepsilon)\gamma\Big)>\alpha_0.
\end{equation}
Find a cpc order zero map $\Phi_0:M_k\rightarrow A_\infty\cap A'\cap X'$ such that
\begin{equation}\label{L:GS:4}
\tau(\Phi_0(1_k)b)\geq (\alpha_0-\varepsilon)\tau(b)
\end{equation}
for all $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+\subset A_\infty$. Find pairwise orthogonal contractions $d^{(0)},d^{(1)}\in A_\infty\cap A'\cap X'\cap \Phi_0(M_k)'$ such that
\begin{equation}\label{L:GS:8}
\tau(d^{(i)}f(\Phi_0)(1_k)b)\geq \gamma\tau(f(\Phi_0)(1_k)b)
\end{equation}
for $i=\{0,1\}$, $f\in C_0(0,1]$, $\tau\in T_\infty(A)$ and $b\in C^*(A,X)_+$.
Define $$
\Phi_1(\cdot)=g_{2\varepsilon,3\varepsilon}(\Phi_0)(\cdot)+d^{(0)}\left(g_{\varepsilon,2\varepsilon}-g_{2\varepsilon,3\varepsilon}\right)(\Phi_0)(\cdot).
$$
This is certainly cpc. To see that $\Phi_1$ is order zero, note that for $x\in (M_k)_+$ we have
$$
\Phi_1(x)\leq g_{\varepsilon,2\varepsilon}(\Phi_0)(x)
$$
as $d^{(0)}$ commutes with $C^*(\Phi_0(M_k))$. Given $e,f\in (M_k)_+$ with $ef=0$, we have $$g_{\varepsilon,2\varepsilon}(\Phi_0)(e)^{1/2}g_{\varepsilon,2\varepsilon}(\Phi_0)(f)^{1/2}=0.$$ As $\Phi_1(e)^{1/2}\leq g_{\varepsilon,2\varepsilon}(\Phi_0)(e)^{1/2}$, there exists a sequence $(z_m)_{m=1}^\infty$ of contractions in $A$ with $z_mg_{\varepsilon,2\varepsilon}(\Phi_0)(e)^{1/2}\rightarrow \Phi_1(e)^{1/2}$ (see \cite[Lemma A-1]{H:MMJ}). Similarly there exists a sequence of contractions $(w_m)_{m=1}^\infty$ in $A$ with $g_{\varepsilon,2\varepsilon}(\Phi_0)(f)^{1/2}w_m\rightarrow \Phi_1(f)^{1/2}$. As such
$$
\Phi_1(e)\Phi_1(f)=\lim_{m\rightarrow\infty}\Big(\Phi_1(e)^{1/2}z_mg_{\varepsilon,2\varepsilon}(\Phi_0)(e)^{1/2}g_{\varepsilon,2\varepsilon}(\Phi_0)(f)^{1/2}w_m\Phi_1(f)^{1/2}\Big)=0.
$$
Define
$$
h=d^{(1)}\left(g_{0,\varepsilon}-g_{\varepsilon,2\varepsilon}\right)(\Phi_0)(1_k)+(1_{A_\infty}-g_{0,\varepsilon}(\Phi_0)(1_k)).
$$
We have $d^{(1)}\perp d^{(0)}$ and $\left(g_{0,\varepsilon}-g_{\varepsilon,2\varepsilon}\right)(\Phi_0)(1_k)\perp g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k)$ so that $$d^{(1)}\left(g_{0,\varepsilon}-g_{\varepsilon,2\varepsilon}\right)(\Phi_0)(1_k)\perp\Phi_1(1_k).$$ Also $(1_{A_\infty}-g_{0,\varepsilon}(\Phi_0)(1_k))\perp \Phi_1(1_k)$ so that $h\perp\Phi_1(1_k)$.
Now use the hypothesis again to find a cpc order zero map $\Phi_2:M_k\rightarrow A_\infty\cap A'\cap X'\cap\{h\}'$ such that
$$
\tau(\Phi_2(1_k)hb)\geq(\alpha_0-\varepsilon)\tau(hb)
$$
for all $b\in C^*(A,X)_+$ and $\tau\in T_\infty(A)$. The estimate (\ref{L:GS:8}) gives
\begin{eqnarray}
\tau(\Phi_2(1_k)hb)\geq (\alpha_0-\varepsilon)\tau(hb)&\geq &(\alpha_0-\varepsilon)\big(\gamma\tau\big((g_{0,\varepsilon}-g_{\varepsilon,2\varepsilon})(\Phi_0)(1_k)b\big) \nonumber \\
&& +\tau\big((1_{A_\infty}-g_{0,\varepsilon}(\Phi_0)(1_k))b\big)\big)\nonumber\\
&\geq &(\alpha_0-\varepsilon)\gamma\tau\big((1_{A_\infty}-g_{\varepsilon,2\varepsilon}(\Phi_0)(1_k))b\big)\label{L:GS:3}
\end{eqnarray}
as $\gamma\leq 1$.
Define $\Psi:M_k\rightarrow A_\infty\cap A'\cap X'$ by $\Psi(x)=\Phi_1(x)+\Phi_2(x)h$. This is cpc and order zero as $\Phi_1(\cdot)$ and $\Phi_2(\cdot)h$ are cpc and order zero with orthogonal ranges. Then for $b\in C^*(A,X)_+$, we have
\begin{align}
\tau(\Psi(1_k)b)&=\tau(\Phi_1(1_k)b)+\tau(\Phi_2(1_k)hb)\nonumber\\
&\geq \tau\Big(\big(g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k)+d^{(0)}(g_{\varepsilon,2\varepsilon}-g_{2\varepsilon,3\varepsilon})(\Phi_0)(1_k)\big)b\Big)\nonumber\\
&\quad+(\alpha_0-\varepsilon)\gamma\tau\big((1_{A_\infty}-g_{\varepsilon,2\varepsilon}(\Phi_0)(1_k))b\big)\nonumber\\
&\geq \tau\big(g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k)b\big)+(\alpha_0-\varepsilon)\gamma\tau\big((1_{A_\infty}-g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k))b\big)\nonumber\\
&=\big(1-(\alpha_0-\varepsilon)\gamma\big)\tau\big(g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k)b\big)+(\alpha_0-\varepsilon)\gamma\tau(b)\label{L:GS:5}
\end{align}
using (\ref{L:GS:8}), (\ref{L:GS:3}) and the crude estimate $(\alpha_0-\varepsilon)<1$. As $g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_k)\geq \Phi_0(1_k)-2\varepsilon 1_A$, (\ref{L:GS:4}) gives
$$
\tau(g_{2\varepsilon,3\varepsilon}(\Phi_0)(1_kb))\geq (\alpha_0-3\varepsilon)\tau(b)
$$
for all $b\in C^*(A,X)_+$. Combining this with (\ref{L:GS:5}) gives
$$
\tau(\Psi(1_k)b)\geq \Big(\big(1-(\alpha_0-\varepsilon)\gamma\big)(\alpha_0-3\varepsilon)+(\alpha_0-\varepsilon)\gamma\Big)\tau(b)=\alpha_1\tau(b)
$$
for all $b\in C^*(A,X)_+$. The choice of $\varepsilon$ in (\ref{L:GS:6}) ensures that $\alpha_1>\alpha_0$, giving the required contradiction.
\end{proof}
\begin{theorem}\label{T:UTL}
Let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ and $\partial_eT(A)$ compact and $\dim(\partial_eT(A))<\infty$. Then for each $k\geq 2$, $A$ admits uniformly tracially large cpc order zero maps $M_k\rightarrow A_\infty\cap A'$.
\end{theorem}
\begin{proof}
This follows from Proposition \ref{P:S3} and Lemma \ref{L:GS}.
\end{proof}
\begin{corollary}\label{MainCor}
Let $A$ be a simple separable unital nuclear nonelementary $C^*$-algebra with $T(A)\neq\emptyset$ and $\partial_eT(A)$ compact and $\dim(\partial_eT(A))<\infty$. If $A$ has strict comparison, then $A$ is $\mathcal Z$-stable.
\end{corollary}
\begin{proof}
This follows from Theorems \ref{MS} and \ref{T:UTL}.
\end{proof}
\end{document} |
\begin{document}
\title{Hybrid Bounds on Twisted L-Functions Associated to Modular Forms}
\author{Chan Ieong Kuan}
\date{\today}
\begin{abstract}
For $f$ a primitive holomorphic cusp form of even weight $k \geq 4$, level $N$, and $\chi$ a Dirichlet character mod $Q$ with $(Q,N) = 1$, we establish the following hybrid subcovex bound for $t \in \mathbb{R}$:
\[ L(\half + it, f_\chi) \ll Q^{\frac{3}{8} + \varepsilon} (1+|t|)^{\frac{1}{3-2\theta} + \varepsilon} \]
where $\theta$ is the best bound toward the Ramanujan-Petersson conjecture at the infinite place. The implied constant only depends on $f$ and $\varepsilon$. This is done via amplification and taking advantage of a shifted convolution sum of two variables as defined and analyzed in \cite{Jeff}.
\end{abstract}
\maketitle
\section{Introduction} \label{sec:intro}
\subsection{Hybrid Bounds}
The growth of $L$-functions on the critical line $\operatorname{Re} s = \half$ has been one of the most studied problems in analytic number theory. This paper is concerned with $L$-functions of a holomorphic cusp form $f$, twisted by a character $\chi$ of conductor $Q$. By using functional equation and Phragm\'en-Lindel\"of principle, one can obtain the convexity bound
\[ L(\half+it, f_\chi) \ll (Q(1+|t|))^{\half+\varepsilon}, \]
where we suppress the level and weight aspects here.
Throughout the years, there have been many attempts at lowering the exponents, most of which have focused on one chosen aspect. Since our result concerns $Q$- and $t$-aspects, we will state some known results in these directions.
In the $t$-aspect, Good showed in \cite{good1983square} that for $f$ a holomorphic cusp form of the full modular group,
\[ L(\half+it,f) \ll (1+|t|)^{\frac{1}{3} + \varepsilon} \]
Meurman showed the same result for $f$ Maass forms of full modular group in \cite{meurman1990}. For number fields, subconvexity results were proved in Petridis and Sarnak \cite{PetridisSarnak2001} and Diaconu and Garrett \cite{Diaconu2010}.
In the $Q$-aspect, the first subconvexity result was obtained by Duke, Friedlander and Iwaniec \cite{Duke1993} for holomorphic cusp forms of full level. Later, Bykovskii showed in \cite{bykovskii1998trace} that for general level,
\[ L(\half+it, f_\chi) \ll_t Q^{\frac{3}{8} + \varepsilon}, \]
with a polynomial dependence in $(1+|t|)$, provided that the nebentypus of $f$ is trivial. This same bound without the nebentypus restriction is obtained in Hoffstein and Hulse \cite{Jeff}, and Blomer and Harcos \cite{blomer2008hybrid}. In \cite{blomer2008hybrid}, $f$ can also be taken as a Maass form.
Hhybrid bounds in $Q$- and $t$-aspects have been worked on by Blomer and Harcos in \cite{blomer2008hybrid}, Munshi \cite{munshi2012circle} and Han Wu \cite{Wu2012Burgess}, which, following the method of Michel and Venkatesh, uses amplification. The bound obtained is:
\[ L(\half+it,f_\chi) \ll (Q(1+|t|))^{\frac{3}{8} + \frac{\theta}{4} + \varepsilon}, \]
where no complementary series with parameter $>\theta$ appears as a component of a cuspidal automorphic representation of $GL_2(\mathbb{A})$.
One thing to note is that these hybrid bounds do not reach the best known exponents in the $t$-aspect. In this work, we partially resolve this situation by proving the following result.
\begin{theorem} \label{thm_main}
For $f$ a primitive holomorphic cusp form of even weight $k \geq 4$, level $N$, and $\chi$ a Dirichlet character mod $Q$, where $(Q,N) = 1$, we have
\[ L(\half + it, f_\chi) \ll (1+|t|)^{\frac{1}{3-2\theta} + \varepsilon} Q^{\frac{3}{8}+\frac{\theta}{4} + \varepsilon}, \]
where $\theta$ is a bound toward the Ramanujan-Petersson conjecture.
\end{theorem}
\begin{remark}
A bound of $\theta$ for congruence subgroups of $\operatorname{SL}_2(\mathbb{Z})$ is $\frac{7}{64}$ by Kim and Sarnak \cite{Kim:2003aa}. It should be noted that our theorem does not currently cover the case of Maass forms as the corresponding shifted convolution is not analyzed yet.
\end{remark}
Our work also uses amplification. The major difference between this work and \cite{Wu2012Burgess} is that we treat non-Archimedian and Archimedian places differently while Wu treated them uniformly. As such, we need more precise control on the $t$-aspect, which is achieved by relating the problem to the shifted convolution sum of two variables analyzed in \cite{Jeff}.
\subsection{Structure of this paper}
Our goal is to bound $L(\half + it, f_\chi)$ in the $Q$ and $t$-aspects. First, we quote relevant results from \cite{Jeff} in section \operatorname{Re}f{sec:prep}. We then apply amplification methods in section \operatorname{Re}f{sec:amp}, reducing the problem to understanding the growth in $Q$ and $t$ of the following expression, where $G$ and $\mathcal{L}$ are amplification parameters and $\alpha = \frac{1}{\log (Q(1+|t|))}$.
\begin{align*}
&\varphi(Q) G \sum_{\substack{l_1,l_2 \sim \mathcal{L} \\ l_1,l_2 \text{ prime}}} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0]\\
&\phantom{\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V \left( \frac{m_1}{x} \right) V \left( \frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|}
\end{align*}
We then separate the analysis of this expression into the ``diagonal" portion ($m_1l_1 = m_2l_2$) and two ``off-diagonal" portions ($m_1l_1 = m_2l_2 + h_0Q$ and $m_2l_2 = m_1l_1 + h_1Q$, for $h_0, h_1 \geq 1$).
In section \operatorname{Re}f{sec:diag}, we analyze the diagonal term with inverse Mellin transforms (propositions \operatorname{Re}f{eq_S_d1_prop} and \operatorname{Re}f{eq_S_d2_propo}).
For the off-diagonal portions, the first thing to note is they have the same contribution up to conjugation. Our analysis relies heavily on the shifted convolution sum of two variables $Z_Q(s,w)$ from \cite{Jeff}. By inverse Mellin transforms, we relate the off-diagonal term to a four-fold integral involving $Z_Q(s,w)$. This is done in section \operatorname{Re}f{sec:off-d-setup} (proposition \operatorname{Re}f{eq_S_o1_int}).
The analysis of the off-diagonal then splits into the discrete part and the continuous part, due to the fact that such a splitting exists for $Z_Q(s,w)$. The analysis of each part is done by moving lines of integration, with the primary goal of reducing the $x$-exponent as much as possible, and a secondary goal of reducing contribution of the $t$-aspect where possible. The results can be found in propositions \operatorname{Re}f{eq_S_o1d_Zres_propo}, \operatorname{Re}f{eq_S_o1d_Z_sp2}, and \operatorname{Re}f{propo_total_cts}.
In the last section, we put the results of the previous sections together. Choosing $\mathcal{L}$ and $G$ optimally yields the theorem.
\section{Preparations}\label{sec:prep}
Throughout this paper, fix a holomorphic cusp form $f$ of even weight $k \geq 4$, level $N$:
\[ f(z) = \sumkinf{n}{1} A(n) n^{\frac{k-1}{2}} e(nz) = \sumkinf{n}{1} a(n) e(nz), e(z) = e^{2\pi iz} \]
\subsection{Shifted convolution of two variables}
The most crucial object of this paper is the shifted convolution of two variables $Z_Q(s,w)$, analyzed in \cite{Jeff}. We quote several of the results here for convenience. Fix $\ell_1,\ell_2$ be primes relatively prime to $NQ$ and of size $\mathcal{L}$. The definition of $Z_Q(s,w)$ is as follows:
\begin{equation} \label{eq_Zq_defn2}
Z_Q(s,w) := \sum_{\substack{h_0,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + h_0Q}} \frac{A(m_1) \conj{A(m_2)} \left( 1+\frac{h_0Q}{l_2m_2} \right)^{\frac{k-1}{2}}}{(l_2m_2)^s (h_0Q)^{w + \frac{k-1}{2}}}
\end{equation}
In \cite{Jeff}, it is shown that
\[ \lim_{\delta \to 0} Z_Q(s,u;\delta) = Z_Q(s,u) \]
and that $Z_Q(s,u;\delta)$ has the following spectral expansion
\begin{align}
&Z_Q(s,s'-s-\tfrac{k}{2} + 1;\delta) \notag\\ =& \frac{(4\pi)^k (l_1l_2)^{\frac{k-1}{2}} 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \Bigg( \sum_j L_Q(s',\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\qquad + \mathcal{V}_{N[l_1,l_2]} \sum_{\mathfrak{a}} \invmellin{0} \zeta_{\mathfrak{a},Q}(s',-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \Bigg) \label{eq_Z_decomp}
\end{align}
where the $\mathfrak{a}$-sum is over cusps of $\Gamma_0(N[l_1,l_2])$ and
\begin{align}
L_Q(s',\conj{u_j}) :&= \sum_{h \geq 1} \frac{\conj{\rho_j(-hQ)}}{(hQ)^{s'}}, \notag \\
\zeta_{\mathfrak{a},Q}(s',z) :&= \sum_{h \geq 1} \frac{\rho_\mathfrak{a}(-hQ,z)}{2(hQ)^{s'}}, \notag \\
U(z) :&= y^k f(l_1z) \conj{f(l_2z)}, \label{eq_L_kappa_U} \\
\mathcal{V}_{N} :&= \frac{\pi [\operatorname{SL}_2(\mathbb{Z}):\Gamma_0(N)]}{3} \notag
\end{align}
$\rho_j(n)$ being $n$-th Fourier coefficient of Maass form $u_j$ and $\rho_{\mathfrak{a}}(n,z)$ being $n$-th Fourier coefficient of Eisenstein series at cusp $\mathfrak{a}$ with holormophic argument at $\half + z$. $[l_1,l_2]$ denotes the least common multiple of $l_1$ and $l_2$.
In \cite{Jeff}, it is also shown that in \eqref{eq_Z_decomp}, if we are to take the limit as $\delta$ goes to $0$, we actually require $\operatorname{Re} s < \half - \frac{k}{2}$ for the sum and integral there to be absolutely convergent.
\subsection{Some useful analytic information}
The properties of the $M$ and $Z$ functions that are relevant for this work are quoted in the following two propositions:
\begin{proposition} \label{eq_M_s_poles}
Let $z \in \mathbb{C} - \half \mathbb{Z}$. Then $M(s,\tfrac{z}{i};\delta)$ has simple poles at $s = \half \pm z - r$, for $r$ a nonnegative integer. We denote the following:
\[ \operatorname{Re}s{s=\half \pm z - r} M(s,\tfrac{z}{i}; \delta) = c_r(\pm z; \delta), \]
where $c_r(\pm z,\delta)$ has the following explicit expression as $\delta \to 0$:
\[ \lim_{\delta \to 0} c_r(\pm z, \delta) = \frac{(-1)^r \sqrt{\pi} 2^{\mp z + r} \Gamma(\pm 2z - r) \Gamma(\half \mp z + r)}{r! \Gamma(\half + z) \Gamma(\half - z)} \]
And we have the following values at $z = \pm \half$, as $\delta \to 0$:
\begin{align}
c_r(-\half;\delta) &\to -\frac{2^{r+\half} \sqrt{\pi}}{2(r+1)!} \text{ for } r \geq 0 \\
c_0(\half; \delta) &\to \sqrt{\frac{\pi}{2}} \\
c_r(\half; \delta) &\to \frac{2^{r-\half} \sqrt{\pi}}{2r!} \text{ for } r \geq 1
\end{align}
Also for $\operatorname{Re} (s+z) \leq \half + \max(0, |\operatorname{Re} z|)$, with $s$ and $z$ at least a distance $\varepsilon$ away from poles,
\begin{equation} \label{eq_M_delta0}
\lim_{\delta \to 0} M(s,\tfrac{z}{i}; \delta) = \frac{\sqrt{\pi} 2^{\half - s} \Gamma(s-\half+z) \Gamma(s-\half-z) \Gamma(1-s)}{\Gamma(\half+z) \Gamma(\half - z)}
\end{equation}
\end{proposition}
\begin{proposition} \label{eq_Z_s_poles}
$Z(s,u;\delta)$ has simple poles at $s = \half \pm it_j - r$, where $r \in \mathbb{Z}_{\geq 0}$. Taking the residue at those points and $\delta \to 0$, we have the following:
\[ \lim_{\delta \to 0} \operatorname{Re}s{s=\half + it_j - r} Z(s,s'-s-\tfrac{k}{2} + 1;\delta) = (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(s'; \conj{u_j}), \]
where $c_{r,j}$ has growth, when $\widetilde{T} \gg 1$,
\begin{equation} \label{eq_crj_growth}
\sum_{|t_j| \sim \widetilde{T}} |c_{r,j}|^2 e^{\pi|t_j|} \ll \log (\widetilde{T}) (l_1l_2)^{-k} \widetilde{T}^{2r+1}
\end{equation}
\end{proposition}
We will also give an explicit expression for $\zeta_{\mathfrak{a},Q}(s',-z)$:
\begin{proposition} \label{eq_Zeta_explicit}
For cusp $\mathfrak{a}=\frac{b}{c}$ of $\Gamma_0(N)$, we write $c = c_0 c_1$, where $(c_0, \frac{N}{c}) = 1$. Then we have the following:
\begin{align}
&\zeta_{\mathfrak{a},Q}(s',-z) \notag \\
=& \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)} \left( \frac{(c,\frac{N}{c})}{cN}\right)^{\frac{1}{2}-z} \left( \frac{c}{(c,\frac{N}{c})} \right)^{1-s'-z} \frac{Q^{-(s'+z)}}{\varphi((c,\frac{N}{c}))} \\
&\qquad \qquad \times \sum_{\chi ((c,\frac{N}{c}))} \frac{\conj{\chi(-Qu)} L^{((c,\frac{N}{c}))}(s'+z,\conj{\chi}) L^{(\frac{N}{c})}(s'-z,\chi)}{L^{(N)}(1-2z,\chi^2)} \sum_{n | c_1^\infty} \frac{G_n(\chi)}{n^{s'+z}} \notag \\
&\qquad \qquad \times \prod_{p | c_0} (1-p^{-(1-z-s')}\chi(p)) \prod_{p^\gamma \| Q} (\sigma_{2z}^{(\chi^2)}(p^\gamma) - \conj{\chi(p)} p^{-(s'-z)} \sigma_{2z}^{(\chi^2)}(p^{\gamma-1})) \notag
\end{align}
where $n | c_1^\infty$ means that $n$ runs over all integers such that $n | c_1^k$ for sufficiently large integer $k$,
\begin{align*}
L^{(c)}(s,\chi) &= \prod_{p \nmid c} (1-\chi(p)p^{-s})^{-1} \\
\sigma_{z}^{\chi}(n) &= \sum_{d | n} \chi(d) d^z \\
G_n(\chi) &= \sum_{\substack{a \pmod{(c,\frac{N}{c})} \\ (a,c,\frac{N}{c}) = 1}} \chi(a) e \left(\frac{a n}{(c,\frac{N}{c})} \right), e(x) = e^{2\pi ix}
\end{align*}
For $\operatorname{Re} (s'+z) > 0$ and $\operatorname{Re} (s'-z) > 0$, the only poles come from the trivial character term.
\end{proposition}
\subsection{Some miscellaneous estimates and bounds}
We will also require the following estimate concerning $L$-functions:
\begin{proposition}
For $\operatorname{Re} s' \geq \half$ and $\theta$ being a bound toward Ramanujan-Petersson conjecture,
\begin{equation} \label{eq_L_growth}
\sum_{|t_j| \sim \widetilde{T}} |L_Q(s', \conj{u_j})|^2 e^{-\pi |t_j|} \ll Q^{-2s' + 2\theta} \mathcal{L}^{2+\varepsilon} (1+|s'|+ \widetilde{T})^{2+\varepsilon}
\end{equation}
\end{proposition}
Together with \eqref{eq_crj_growth} and the following fact
\[ \sum_{|t_j| \sim \widetilde{T}} |\langle U, u_j \rangle|^2 e^{\pi |t_j|} \ll \mathcal{L}^{-2k} \widetilde{T}^{2k} \log t,\]
we have the following proposition:
\begin{proposition} \label{propo_j_sums}
With the same notations as before,
\begin{align}
\sum_{|t_j| \sim \widetilde{T}} L_Q(s', \conj{u_j}) \conj{\langle U, u_j \rangle} &\ll Q^{-s'+\theta} \mathcal{L}^{1-k+\varepsilon} \widetilde{T}^{1+k+\varepsilon} \label{eq_L_inner} \\
\sum_{|t_j| \sim \widetilde{T}} L_Q(s', \conj{u_j}) c_{r,j} &\ll Q^{-s'+\theta} \mathcal{L}^{1-k+\varepsilon} \widetilde{T}^{\frac{3}{2}+r+\varepsilon} \label{eq_L_crj}
\end{align}
\end{proposition}
These equations can be proved by Cauchy's inequality with facts quoted before the proposition.
We will finally note some equivalent facts with Eisenstein series and a very particular functional equation involving Eisenstein series at the $0$-cusp:
\begin{proposition} \label{inner_prod_props}
The inner product $\langle U, E_\mathfrak{a}(*,s) \rangle$ has the following properties:
\begin{enumerate}[leftmargin=1.5em]
\item $\displaystyle \operatorname{Re}s{s=1} \langle U, E_\mathfrak{a}(*,s) \rangle = \frac{(l_1l_2)^{-\frac{k-1}{2}}}{l_1 \mathcal{V}_{Nl_1}} \langle f,f \rangle$ if $l_1 = l_2$.
\item $\displaystyle \operatorname{Re}s{s=1} \langle U, E_\mathfrak{a}(*,s) \rangle = \frac{(l_1l_2)^{-\frac{k-1}{2}}}{l_1l_2 \mathcal{V}_{Nl_1l_2}} \langle f,f \rangle E_{l_1,l_2}(1)$ if $l_1 \neq l_2$.
\item $\displaystyle [l_1,l_2] \mathcal{V}_{N[l_1,l_2]} \int_{-T}^T \sum_{\mathfrak{a}} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-it) \conj{\langle U, E_\mathfrak{a}(*,\frac{1}{2}+it) \rangle} \,dt \ll Q^{-s'} \mathcal{L}^{1-k+\varepsilon} T^{1+k+\varepsilon}$
\end{enumerate}
\end{proposition}
\begin{proposition} \label{FE_Eisen_0}
The Eisenstein series at $0$-cusp has the following functional equation:
\[ E_0(z,s) = \sum_{\mathfrak{a} = \frac{b}{c}} \frac{\sqrt{\pi} \Gamma(s-\frac{1}{2})}{\Gamma(s)} \frac{\varphi(\frac{N}{c})}{\varphi((c,\frac{N}{c}))} \left( \frac{(c,\frac{N}{c})}{N \cdot \frac{N}{c}} \right)^s \frac{\zeta^{(c)}(2s-1)}{\zeta^{(N)}(2s)} E_\mathfrak{a}(z,1-s) \]
\end{proposition}
\section{Amplifying both aspects} \label{sec:amp}
Our aim here is to understand the growth of $L(\half +it, f_\chi)$ in $t$- and $Q$-aspects. Since it is sufficient to prove the result on eigenforms, so we assume $f$ is an eigenform. We perform our investigation by averaging around $\half + it$ for a small interval as well as applying the amplification technique.
For this, we choose a rapidly decreasing function $V: \mathbb{R} \to \mathbb{R}$ such that its Mellin transform $v(s)$ is meromorphic between $-5 < \operatorname{Re} s < 5$. Moreover, $v(s)$ should only have a simple pole at $s=0$ with residue $1$ and exponential decay in $\operatorname{Im} s$ as $\operatorname{Im} s \to \infty$. An example of this is $v(s) = \frac{1}{5} \Gamma \left(\frac{s}{5} \right)$. Specifying $v(s)$ is enough, as:
\begin{equation}
V(x) = \invmellin{2} v(s) x^{-s} \,d s \label{eq_easy_mellin}
\end{equation}
We start by writing the $L$-function as a rapidly converging series:
\begin{lemma} \label{lemma_approx}
As $x \to \infty$,
\begin{equation} \label{eq_approx}
L\left( \half + it, f_\chi \right) = \sum_{n \geq 1} \frac{A(n)\chi(n)}{n^{\half + it}} V \left( \frac{n}{x} \right) + O(x^{-\varepsilon}).
\end{equation}
\end{lemma}
\begin{proof}
Consider the following inverse Mellin transform:
\begin{equation}
I_0 := \invmellin{2} L \left( \half + it + s, f_\chi \right) v(s) x^s \,d s
\end{equation}
On the one hand, since the argument of $L$-function in $I_0$ is in the region of absolute convergence, we have:
\begin{align*}
I_0= \invmellin{2} \sum_{n \geq 1} \frac{A(n) \chi(n)}{n^{\half + s + it}} v(s) x^s \,d s = \sum_{n \geq 1} \frac{A(n) \chi(n)}{n^{\half + it}} V \left( \frac{n}{x} \right)
\end{align*}
On the other hand, we can move the line of integration to $-\half - \varepsilon$, picking up the only simple pole at $s = 0$ and obtain:
\begin{equation*}
I_0 = L\left(\half + it, f_\chi \right) + O(x^{-\varepsilon})
\end{equation*}
Putting the two equivalent expressions of $I_0$ together proves the lemma.
\end{proof}
Our aim here is to get the bound on $L(\half + it, f_\chi)$. To this end, we first amplify the character and obtain:
\begin{equation} \label{eq_most_important}
|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \leq \sum_{\psi (Q)} |L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)}\psi(l)|^2 \end{equation}
where the first summation runs over all Dirichlet characters $\psi$ mod $Q$ and the $l$-sums are running over primes that are relatively prime to $QN$. The parameter $\mathcal{L}$ is to be chosen optimally later, subject to $\mathcal{L} < Q$.
Next we perform amplification on the $t$-aspect, with modified ideas based upon \cite{Hansen}. The result is the following:
\begin{lemma} \label{lemma_one_use}
\begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \!\!\!\!\! \int \limits_{|r| \leq A} \!\!\!\!\! \inttoinf{-\infty} \!\!\!\! |L(\half+i(t+r+r'),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,\frac{d r' d r}{\pi (1+\left( \frac{r'}{G} \right)^2)} \notag \\
& \qquad \qquad + O(\log^3 (Q(1+|t|))) + O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2), \label{eq_amp_t_single}
\end{align}
where $A := \sqrt{10 \log (Q(1+|t|))}$, $\alpha := \frac{1}{\log (Q(1+|t|))}$ and $G \geq 2A$.
\end{lemma}
\begin{remark}
In the proof, we will see that the introduction of $G$ into the integral is via the positivity of the integrand. This leads to the desire to minimize $G$ subject to the constraint above.
\end{remark}
\begin{proof}
The proof relies on estimating $L(\sigma + it, f_\psi)$ by averaging $L(\sigma - \alpha + ir, f_\psi)$ over $r$ in a small interval centered around $t$. Each integral expression defined below is essentially illustrating this fact.
First we will show that $L(\half + it, f_\psi)$ is approximable by averaging the $L$-function over a small interval. To this end, consider the following integral:
\begin{equation} \label{eq_I1_defn}
I_1 := \invmellin{2} L(\half + it + s, f_\psi) \frac{e^{s^2}}{s} \,d s
\end{equation}
On the one hand, $I_1$ is $O(1)$ by bounding the $L$-function by a constant. On the other hand, if we move the line of integration down to $\operatorname{Re} s = -\alpha$, then we have:
\[ I_1 = L(\half+it, f_\psi) + \invmellin{-\alpha} L(\half+it+s,f_\psi) \frac{e^{s^2}}{s} \,d s. \]
When put together with \eqref{eq_I1_defn}, we derive that
\[ L(\half+it,f_\psi) = O(1) + \frac{1}{2\pi} \inttoinf{-\infty} L(\half-\alpha+it+ir, f_\psi) \frac{e^{(-\alpha+ir)^2}}{\alpha-ir} \,d r. \]
After taking absolute values and squaring both sides, one gets:
\begin{equation} \label{eq_critline}
| L(\half+it,f_\psi) |^2 \ll \left( \inttoinf{-\infty} | L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2-r^2}}{\sqrt{\alpha^2 + r^2}} \,d r \right)^2 + O(1)
\end{equation}
To continue our investigation, we will split the integral into two parts, $|r| \leq A$ and $|r| > A$, where $A = \sqrt{10 \log (Q(1+|t|))}$.
We start by examining the part of the integral with $|r| > A$, applying convexity for the $L$-function:
\begin{align*}
\int_{|r| > A} |L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2 - r^2}}{\sqrt{\alpha^2 + r^2}} \,d r &\ll \int_{|r| > A} (Q|t+r|)^{\half+\alpha} \frac{e^{\alpha^2 - r^2}}{r} \,d r \\
& \ll (Q(1+|t|))^{\half + \alpha} e^{\alpha^2 - A^2} \ll 1
\end{align*}
For the part $|r| \leq A$, we apply Cauchy's inequality and functional equation:
\begin{align*}
&\left( \int_{|r| \leq A} |L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2 - r^2}}{\sqrt{\alpha^2 + r^2}} \,d r \right)^2 \\
\leq &\int_{|r| \leq A} |L(\half-\alpha+it+ir, f_\psi)|^2 \,d r \cdot \int_{|r| \leq A} \frac{e^{2\alpha^2 - 2r^2}}{\alpha^2 + r^2} \,d r \\
\ll &\alpha^{-2} (Q(1+|t|))^{4\alpha} \int_{|r| \leq A} |L(\half+\alpha+it+ir, f_\psi)|^2 \,d r
\end{align*}
Putting these into \eqref{eq_critline}, recalling $A = \sqrt{10 \log (Q(1+|t|))}$ and $\alpha = \frac{1}{\log (Q(1+|t|))}$, we get
\begin{align}
|L(\half+it,f_\psi)|^2 &\ll \log^{2}(Q(1+|t|)) \int_{|r| \leq A} |L(\half+\alpha + i(t+r) ,f_\psi)|^2 \,d r + O(1) \notag
\end{align}
We will multiply both sides by $|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2$, obtaining:
\begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll &\log^{2}(Q(1+|t|)) \!\!\!\! \int \limits_{|r| \leq A} \!\!\!\! |L(\half+\alpha + i(t+r) ,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \,d r \!+\! O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2) \label{eq_critline_1}
\end{align}
Now, we approximate $L(\half + \alpha + it_2, f_\psi) \sum_{l \sim \mathcal{L}} \psi(l) \conj{\chi(l)}$ by an integral over a small interval on the critical line. To achieve this, we construct the following auxilary integral:
\begin{equation} \label{eq_I2_defn}
I_2 := \invmellin{2} L(\half + \alpha + it_2 + s, f_\psi) \left(\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{-s} \right) \frac{e^{s^2}}{s} \,d s
\end{equation}
Doing the same analysis as before and combining expressions, we obtain:
\begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \int \limits_{|r| \leq A} \int \limits_{|r'| \leq A_r} |L(\half+i(t+r)+ir',f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,d r' \,d r \notag \\
&\qquad \qquad + O(\log^2 (Q(1+|t|)) A) + O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2) \label{eq_final_form_1}
\end{align}
where $A_r = \sqrt{10 \log(Q|t+r|)}$. Note that all inequalities are independent of $Q$, $L$ and $t$, as long as $L \ll Q$. Also note that we can make $A_r$ uniform by enlarging the region to $|r'| \leq \sqrt{40 \log(Q(1+|t|))} = 2A$, the inequality still holding due to positivity of the integrand.
Continuing to use the positivity of the integrand, for $G \geq 3A$, the integrand is bounded by:
{
\allowdisplaybreaks
\begin{align}
& \int_{|r| \leq A} \int_{|r'| \leq 2A} |L(\half+i(t+r)+ir',f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,d r' \,d r \notag \\
\ll & \int_{|r| \leq G} \! |L(\half+i(t+r),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \notag \\
\ll & \int_{|r| \leq A} \inttoinf{-\infty} |L(\half+it+ir,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{dr}{\pi (1+\left( \frac{r}{G} \right)^2)} \label{eq_final_form_2}
\end{align}
}
Putting this into \eqref{eq_final_form_1} gives the proposition.
\end{proof}
Putting this proposition together with \eqref{eq_most_important}, we derive the following:
\begin{proposition} \label{propo_amp}
With the same values of $A$, $\alpha$ and the same constraint on $G$ as in lemma \operatorname{Re}f{lemma_one_use},
\begin{align}
&|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \inttoinf{-\infty} \sum_{\psi (Q)} |L(\half+i(t+r),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \notag \\
&+ O(Q \log^3 (Q(1+|t|))) + O(Q\mathcal{L}) \label{eq_final_form_3}
\end{align}
\end{proposition}
\begin{proof}
The only part that requires a proof is the last error term. In particular, we should show that
\[ \sum_{\psi (Q)} |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \ll Q\mathcal{L} \]
Starting with the left-hand side, we have:
\begin{align*}
\sum_{\psi(Q)} |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 &= \sum_{\psi(Q)} \sum_{l_1, l_2 \sim \mathcal{L}} \conj{\chi(l_1)} \psi(l_1) \chi(l_2) \conj{\psi(l_2)} = \varphi(Q) \sum_{l} 1\ll Q\mathcal{L}
\end{align*}
The second equality is obtained by summing over the characters, which implies that $l_1 = l_2$ since $l_1 \equiv l_2 (Q)$ and $\mathcal{L} < Q$.
\end{proof}
Our next immediate goal is to execute the character sum and the $r$-integral in \eqref{eq_final_form_3}. Replacing the $L$-series with \eqref{eq_approx}, up to $O(x^{-\varepsilon})$, one obtains:
\begin{equation} \label{eq_S_defn}
S := \inttoinf{-\infty} \sum_{\psi (Q)} \left| \sum_{m \geq 1} \sum_{l \sim \mathcal{L}} \frac{A(m)\psi(m)\psi(l) \conj{\chi(l)} l^{\alpha} }{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)}
\end{equation}
We apply Parseval here to obtain:
{\allowdisplaybreaks
\begin{align*}
S &= \inttoinf{-\infty} \sum_{\substack{a \text{ mod } Q \\ (a,Q) = 1}} \varphi(Q) \left| \sum_{m \geq 1} \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ ml \equiv a (Q)}} \frac{A(m) \conj{\chi(l)} l^{\alpha}}{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&\leq \inttoinf{-\infty} \sum_{a \text{ mod } Q} \varphi(Q) \left| \sum_{m \geq 1} \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ ml \equiv a (Q)}} \frac{A(m) \conj{\chi(l)} l^{\alpha}}{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&= \varphi(Q) \inttoinf{-\infty} \sum_{m_1, m_2 \geq 1} \sum_{\substack{l_1,l_2 \sim \mathcal{L} \\ l_1,l_2 \text{ prime} \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\alpha}}{m_1^{\half+it+ir} m_2^{\half-it-ir} l_1^{ir} l_2^{-ir}} \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q)} \qqquad \qqquad \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&=\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0]\\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right) V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \\
&=: S_d + S_{o_1} + S_{o_2}
\end{align*}
}
where the diagonal portion $S_d$ and the off-diagonal portions, $S_{o_1}$ and $S_{o_2}$, are defined as follows:
{\allowdisplaybreaks
\begin{align}
S_d :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 = m_2l_2}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \label{eq_diag_defn} \\
S_{o_1} :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \label{eq_o1_defn} \\
S_{o_2} :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_1 \geq 1 \\ m_2l_2 = m_1l_1 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \label{eq_o2_defn}
\end{align}
}
\begin{remark}
$S_{o_2} = \conj{S_{o_1}}$.
\end{remark}
At this point, we have converted the problem into studying $S_d$ and $S_{o_1}$.
\section{The diagonal portion $S_d$} \label{sec:diag}
In this section, we focus on analyzing $S_d$. The analysis breaks $S_d$ into two sums, $S_{d_1}$ corresponding to $l_1 = l_2$ and $S_{d_2}$ corresponding to $l_1 \neq l_2$:
\[ S_d = S_{d_1} + S_{d_2}, \]
where
\begin{align}
S_{d_1} :&= \varphi(Q) G \sum_{l} l^{2\alpha} \sum_{m \geq 1} \frac{|A(m)|^2}{m} V\left(\frac{m}{x} \right)V\left(\frac{m}{x} \right) \label{eq_S_d1} \\
S_{d_2} :&= \varphi(Q) G \sum_{\substack{l_1,l_2 \\ l_1 \neq l_2}} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 = m_2l_2}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \label{eq_d2_defn}
\end{align}
\subsection{The case of $l_1 = l_2$} \label{sec:diag_ez}
For $S_{d_1}$, note that the $m$-sum does not depend on $l$. The contribution of $S_{d_1}$ is as follows:
\begin{proposition} \label{eq_S_d1_prop}
As $x \to \infty$,
\begin{equation}
S_{d_1} = \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \frac{(4\pi)^k}{\Gamma(k)}\langle f,f\rangle \log x + O(QG\mathcal{L}^{1 + 2\alpha}) + O(x^{-\varepsilon})
\end{equation}
\end{proposition}
\begin{proof}
Applying \eqref{eq_easy_mellin} twice to \eqref{eq_S_d1}, we obtain:
\begin{align}
S_{d_1} &= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \dblinvmellin{2}{2} \sum_{m \geq 1} \frac{|A(m)|^2}{m^{1+s+w}} x^{s+w} v(s) v(w) \,d s \,d w \notag \\
&= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \dblinvmellin{2}{4} \sum_{m \geq 1} \frac{|A(m)|^2}{m^{1+s}} x^s v(s-w)v(w) \,d s \,d w \label{eq_S_d1_int}
\end{align}
Moving the line of integration of $s$ down to $\operatorname{Re} s = -\frac{1}{3} - \varepsilon$, we pick up simple poles at $s = 0, s = w$, obtaining:
\begin{equation} \label{eq_S_d1_break}
S_{d_1} = \operatorname{Re}s{s=w} S_{d_1} + \operatorname{Re}s{s=0} S_{d_1} + O(x^{-\varepsilon})
\end{equation}
For the residue at $s = w$, by moving the line of integration $\operatorname{Re} w = 2$ down to $-\frac{1}{3}- \varepsilon$, we have:
\begin{align}
S_{d_1, \operatorname{Re}s{s=w}} &= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \left( \frac{(4\pi)^k}{\Gamma(k)}\langle f,f\rangle \log x + O(1) + O(x^{-\frac{1}{3} - \varepsilon}) \right). \label{eq_S_d1_sw}
\end{align}
We continue with the residue at $s=0$:
\begin{equation} \label{eq_S_d1_s0}
S_{d_1, \operatorname{Re}s{s=0}} = \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \frac{(4\pi)^k}{\Gamma(k)} \langle f,f\rangle \left( \invmellin{2} v(-w)v(w) \,d w \right)
\end{equation}
This is just $O(QG\mathcal{L}^{1 + 2\alpha})$, upon noting the $l$-sum is $O(\mathcal{L}^{1+2\alpha})$ and the $w$-integral above is a constant.
Now plugging \eqref{eq_S_d1_sw} and \eqref{eq_S_d1_s0} into \eqref{eq_S_d1_break}, we have the proposition once we note that the $l$-sum is $O(\mathcal{L}^{1+2\alpha})$.
\end{proof}
\subsection{The case of $l_1 \neq l_2$} \label{sec:diag_nez}
In $S_{d_2}$, we have the condition $m_1l_1 = m_2l_2$. When $l_1 \neq l_2$, it implies $m_1 = l_2 m$, $m_2 = l_1m$ for some positive integer $m$. Hence, we have:
\begin{equation} \label{eq_S_d2}
S_{d_2} = \varphi(Q) G \sum_{\substack{l_1,l_2 \\ l_1 \neq l_2}} \frac{\conj{\chi(l_1)}\chi(l_2)}{l_2^{\half+it} l_1^{\half-it}} (l_1l_2)^{\alpha} \sum_{m \geq 1} \frac{A(l_2m)\conj{A(l_1m)}}{m} V\left(\frac{l_2m}{x} \right)V\left(\frac{l_1m}{x} \right)
\end{equation}
Using the same methods to the proof of proposition \operatorname{Re}f{eq_S_d1_prop}, we have the following:
\begin{proposition} \label{eq_S_d2_propo}
As $x \to \infty$,
\begin{align}
S_{d_2} = \varphi(Q) G \sum_{\substack{l_1,l_2\\ l_1 \neq l_2}} \frac{\conj{\chi(l_1)}\chi(l_2)}{l_2^{\half+it} l_1^{\half-it}} (l_1l_2)^{\alpha} \frac{(4\pi)^k}{\Gamma(k)}\langle f,f \rangle &E_{l_1,l_2}(1) \log (x/l_2) \notag \\
&+ O(QG\mathcal{L}^{1+2\alpha+\varepsilon}) + O(x^{-\varepsilon}),
\end{align}
where $E_{l_1,l_2}(s)$ is defined as follows:
\[ E_{l_1,l_2}(s) := \left( \sum_{m \geq 1} \frac{A(l_2m)\conj{A(l_1m)}}{m^s} \right) \left( \sum_{m \geq 1} \frac{|A(m)|^2}{m^s} \right)^{-1}, \]
which is essentially a product of ratios of Euler factors at the primes $l_1$ and $l_2$. $E_{l_1,l_2}(s)$ is analytic for $\operatorname{Re} s > 0$ and is bounded independent of $l_1,l_2$ in the region.
\end{proposition}
We have now obtained a complete understanding of the diagonal sum, so our next focus is to understand the off-diagonal sums $S_{o_1}$ and $S_{o_2}$. By the remark after \eqref{eq_o2_defn} , it is sufficient for us to understand $S_{o_1}$.
\section{Off-diagonal portion $S_{o_1}$, setting up the integrals} \label{sec:off-d-setup}
Recall from \eqref{eq_o1_defn},
\begin{align}
S_{o_1} &= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \log \left(\tfrac{l_1m_1}{l_2m_2} \right)} \label{eq_S_o1_sum}
\end{align}
We will show that this object can be converted into studying a four-fold integral involving the $Z_Q(s,w)$ function:
\begin{proposition} \label{eq_S_o1_int}
As $G \to \infty$, we have
\begin{align}
S_{o_1} &= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\quad \times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} Z_Q\left(s, s'-s-\tfrac{k}{2} + 1\right) \notag \\
&\phantom{\invmellin{c_1}} \times \betaF{s' - s + \half - \beta}{w+s+\beta + \frac{k}{2}-1-s'+it}{w+\frac{k-1}{2}+it} l_1^{w-\half} l_2^{s'-w} \notag\\
&\phantom{\invmellin{c_1}} \times x^{s' - \half} v(s'-w)v(w-\half) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w
\end{align}
where $\operatorname{Re} w = \gamma_1 = 1 + 2\theta + 2\varepsilon$, $\operatorname{Re} s = \gamma_2 = \frac{3}{4}$, $\operatorname{Re} s' = \gamma_3 = \frac{5}{4}$ and $\operatorname{Re} \beta = \gamma_4 = \frac{1}{2}+\theta+\varepsilon$.
\end{proposition}
\begin{remark}
As long as $G$ is chosen such that $G = (1+|t|)^{a} \log^b(Q)$ with $a > 0$ and $b > 4$, then $G \geq 3A = \sqrt{90 \log (Q(1+|t|))}$ for large $Q$ and $t$.
\end{remark}
\begin{proof}
We will focus on the innermost sum of $S_{o_1}$. For convenience, we define:
\[ T_{o_1} := \sum_{\substack{h_0,m_2 \geq 1\\ m_1l_1 = m_2l_2 + h_0Q}} \!\!\!\!\!\! \frac{A(m_1)\conj{A(m_2)}}{m_1^{\half+it}m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{- G \log \left( \frac{m_1l_1}{m_2l_2} \right)} \]
Starting with the definition of $T_{o_1}$, we will substitute in $m_1l_1 = m_2l_2 + h_0Q$ in several places:
\[ T_{o_1} = l_1^{\half+it} l_2^{\half-it} \sum_{m_2,h_0} \frac{A(m_1)\conj{A(m_2)}V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right)} {(m_2l_2+h_0Q)^{\half+it}(m_2l_2)^{\half-it}} e^{- G \log \left( 1 + \frac{h_0Q}{m_2l_2} \right)} \]
We apply \eqref{eq_easy_mellin} twice, resulting in:
\begin{align}
T_{o_1} = l_1^{\half+it} l_2^{\half-it} &\dblinvmellin{c_1}{c_2} \! \sum_{m_2,h_0} \!\! \frac{A(m_1) \conj{A(m_2)}}{(m_2l_2+h_0Q)^{\half+w+it} (m_2l_2)^{\half + s-it}} \notag \\
&\qquad \quad \times v(s)v(w) x^{s+w} l_2^s l_1^w \left( 1 + \frac{h_0Q}{m_2l_2} \right)^{-G} \,d s \,d w \notag
\end{align}
We quote this identity from \cite{GradsteynRhyzik}:
\begin{equation}
\invmellin{\gamma} \betaF{u}{\beta-u}{\beta} t^{-u} \,d u = (1+t)^{-\beta}, \label{eq_neg_bino}
\end{equation}
where $0 < \gamma < \operatorname{Re} \beta$.
Manipulating the expression from before and using \eqref{eq_neg_bino}, we end up introducing the $Z_Q$ function defined above:
{
\allowdisplaybreaks
\begin{align*}
T_{o_1} = &l_1^{\half+it} l_2^{\half-it} \triinvmellin{c_1}{c_2}{c_3} \! \sum_{m_2,h_0} \!\! \frac{A(m_1) \conj{A(m_2)} \left(1 + \frac{h_0Q}{m_2l_2}\right)^{\frac{k-1}{2}}}{(m_2l_2)^{s+w+1}} x^{s+w}\allowdisplaybreaks[0] \\
& \quad \times \left(1+\frac{h_0Q}{m_2l_2}\right)^{-(w+\frac{k}{2}+it)} \!\! v(s)v(w) l_2^s l_1^w \left( \frac{h_0Q}{ m_2l_2} \right)^{-\beta} \!\! \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d s \,d w \,d \beta \\
= &l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c_1}{c_2}{c_3}{c_4} \!\!\!\! Z_Q\left(s+w+1-u-\beta, u+\beta-\tfrac{k-1}{2}\right) \allowdisplaybreaks[0] \\
& \times \betaF{u}{w+\frac{k}{2}-u+it}{w+\frac{k}{2}+it} v(s)v(w)x^{s+w} l_2^s l_1^w \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta
\end{align*}
}
From here, we will do a series of change of variables. First we change $u \mapsto u - \beta$:
\begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \quadinvmellin{c'_1}{c'_2}{c'_3}{c'_4} Z_Q(s+w+1-u, u-\tfrac{k-1}{2}) x^{s+w} l_2^s l_1^w \\
&\times \betaF{u-\beta}{w+\frac{k}{2}+\beta- u+it}{w+\frac{k}{2}+it} v(s)v(w) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta
\end{align*}
Now we do $s \mapsto s+u-1$:
\begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c''_1}{c''_2}{c''_3}{c''_4} \!\!\! Z_Q(s\!+\!w, u-\tfrac{k-1}{2}) x^{s-1+u+w} l_2^{s+u-1} l_1^w \\
&\times \betaF{u-\beta}{w+\frac{k}{2}+\beta - u+it}{w+\frac{k}{2}+it}v(s+u-1) v( w) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta
\end{align*}
Then we will change $s \mapsto s-w, u \mapsto u+\frac{k-1}{2}$:
\begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c'''_1}{c'''_2}{c'''_3}{c'''_4} \!\! Z_Q(s, u) x^{s+u+\frac{k-3}{2}} l_2^{s-w+u+\frac{k-3}{2}} l_1^w\\
&\times \betaF{u+\frac{k-1}{2}-\beta}{w-u+\half+\beta+it}{w+\frac{k}{2}+it}v(s-w+u+\frac{k-1}{2}-1) v( w) \\
& \times \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta
\end{align*}
Finally we will get rid of $u$ and introduce $s' = s + u + \frac{k}{2} - 1$, while also doing $w \mapsto w-\half$:
\begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \!\!\! Z_Q\left(s, s'-s-\tfrac{k}{2} + 1\right) x^{s' - \half} l_2^{s'-w} l_1^{w-\half} \\
&\qquad \qquad \qquad \qquad \qquad \times \betaF{s' - s + \half -\beta}{s + w+\tfrac{k}{2} - 1 - s' +\beta+it}{w+\tfrac{k-1}{2}+it}\\
&\qquad \qquad \qquad \qquad \qquad \times v(s'-w) v(w-\half) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w
\end{align*}
This ends the change of variables. We can take the following values for the $\gamma_i$'s: $\operatorname{Re} s = 2$, $\operatorname{Re} s' = 2 + \frac{k}{2} + \varepsilon$, $\operatorname{Re} w = 1 + 2\theta + 2\varepsilon$ and $\operatorname{Re} \beta = 2$.
We can move $\operatorname{Re} \beta$ down to $\frac{3}{4}$ without hitting poles. Next, we can move $\operatorname{Re} s'$ to $\frac{5}{2} + \varepsilon$ without hitting poles. Now we can move $\operatorname{Re} s$ down to $\frac{3}{4}$ without picking up poles. Then we will move $\operatorname{Re} s'$ to $\frac{5}{4}$, again without poles. Finally, moving $\operatorname{Re} \beta$ to $\frac{1}{2}+\theta+\varepsilon$ does not hit any pole. This proves the proposition.
\end{proof}
We will separate our analysis of $S_{o_1}$ into 2 parts, the first part corresponding to the discrete spectrum $S_{o_1}^d$ and the second to the continuous spectrum $S_{o_1}^c$. We will also replace $Z_Q(s,s'-s-\tfrac{k}{2} + 1)$ with $\lim_{\delta \to 0} Z_Q(s,s'-s-\tfrac{k}{2} + 1; \delta)$, taking the $\delta$ limit where it is convenient to do so. In our analysis, our top priority is to bring down the effects of $x$; the second priority is to bring down the $t$-contribution.
\section{Off-diagonal, discrete spectrum}
We will be looking at the growth of the discrete terms in this section:
\begin{align}
S_{o_1}^d = & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} x^{s'-\half} l_2^{s'-w} l_1^{w-\half} \notag \\
&\times v( s'-w) v( w-\half) \betaF{s' - s + \half -\beta}{s + w+\beta - s' + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times\sum_{t_j} L_Q(s',\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w \label{eq_S_o1d_5int}
\end{align}
Our goal here is twofold: to bring down the $x$-exponent as much as possible and to take the $\delta$-limit. As such, the natural thing to do is to bring $\operatorname{Re} s'$. Hence, we move $\operatorname{Re} s'$ down to $\operatorname{Re} s' = \frac{1}{2} - \varepsilon$, hitting simple poles at $s' = w$ and $s' = s + \beta - \frac{1}{2}$. We break our analysis into bounding these two residues and the moved integral.
\subsection{The residue at $s'=w$}
In this section, we will show the following:
\begin{proposition} \label{eq_S_o1d_Zres_propo}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\mathcal{L} \ll Q$,
\[ \operatorname{Re}s{s'=w} S_{o_1}^d \ll G^{1+\varepsilon} Q^{\half+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} + x^{-\varepsilon}, \]
\end{proposition}
\begin{remark}
In the proof of this proposition, we also justify the reason of having this condition on $G$ .
\end{remark}
\begin{proof}
Taking the residue at $s'=w$, we have:
\begin{align}
\operatorname{Re}s{s'=w} S_{o_1}^d = & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_1}{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} (xl_1)^{w-\half} \notag \\
&\times v( w-\half) \betaF{w- s + \half -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times\sum_{t_j} L_Q(w,\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \,d w \label{eq_S_o1d_leadsp}
\end{align}
This really suggests moving $\operatorname{Re} w$ to $\frac{1}{2}-\varepsilon$, which picks up simple poles at $w=s+\beta-\frac{1}{2}$ and $w=\frac{1}{2}$.
\subsubsection{The residue at $w=s+\beta-\frac{1}{2}$}
We first write down the residue:
\begin{align}
&\operatorname{Re}s{w=s+\beta-\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} (xl_1)^{s+\beta-1} v( s+\beta-1) \notag \\
&\times\sum_{t_j} L_Q(s+\beta-\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_S_o1d_leadswp}
\end{align}
Here, we move $\operatorname{Re} \beta$ down to $\frac{1}{4}-\varepsilon$, passing through a simple pole at $\beta = 1-s$. The residue is:
\begin{align}
&\operatorname{Re}s{\beta=1-s} \operatorname{Re}s{w=s+\beta-\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
&\times \invmellin{\gamma_2} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} M(s,t_j,\delta) \frac{\Gamma(1-s)\Gamma(G-1+s)}{\Gamma(G)} \,ds \label{eq_S_o1d_lead}
\end{align}
Now to take the limit as $\delta \to 0$, it is necessary for us to move $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta -\varepsilon$, which results in us picking up poles at $s = \frac{1}{2} \pm it_j - r$, for integers $0 \leq r \leq \frac{k}{2}$. For integer $r$, using the notation from proposition \operatorname{Re}f{eq_Z_s_poles} the residue is:
\begin{align}
&\sum_{\pm t_j} \operatorname{Re}s{s=\frac{1}{2}+it_j-r} \operatorname{Re}s{\beta=1-s} \operatorname{Re}s{w=s+\beta-\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \sum_{t_j} (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(\frac{1}{2},\conj{u_j}) \frac{\Gamma(\frac{1}{2}-it_j+r)\Gamma(G-\frac{1}{2}-it_j+r)}{\Gamma(G)} \label{eq_S_o1d_end1}
\end{align}
Using the fact that $\Gamma(G-\frac{1}{2}-it_j-r) \ll \Gamma(G-\frac{1}{2}+\theta-r)$ and \eqref{eq_L_crj}, we can see that the above expression is bounded by $O(Q^{\frac{1}{2}+\theta+\varepsilon} G^{\frac{1}{2}+\theta-r+\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$.
For the moved integral with $\operatorname{Re} s = \gamma_2' = \frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$, we take $\delta \to 0$, obtaining:
\begin{align}
&\operatorname{Re}s{\beta=1-s} \operatorname{Re}s{w=s+\beta-\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_2'} \frac{(4\pi)^k (l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(s+k-1)} \notag \\
&\quad \times \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \frac{\Gamma(1-s)^2\Gamma(s-\frac{1}{2}+it_j)\Gamma(s-\frac{1}{2}-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \conj{\langle U, u_j \rangle} \frac{\Gamma(G-1+s)}{\Gamma(G)} \,d s \label{eq_S_o1d_lead_moved}
\end{align}
By using Stirling's approximation, \eqref{eq_L_inner} and splitting the integral according to the relative sizes of $|t_j|$ and $|\operatorname{Im} s|$, one can derive the bound $O(Q^{\frac{1}{2}+\theta+\varepsilon} G^{\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$.
\subsubsection{Residue at $w=\frac{1}{2}$}
The residue is:
\begin{align}
&\operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\times \betaF{1- s -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_S_o1d_2ndw}
\end{align}
Similar as before, we need to move $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to take the limit. In doing so, we pick up simple poles at $s = 1-\beta$, $s=1-\beta-\frac{k}{2}-it$ and $s=\frac{1}{2}\pm it_j-r$ for $0 \leq r \leq \frac{k}{2}$.
Investigating the pole at $s=1-\beta$, the residue is:
\begin{align}
&\operatorname{Re}s{s=1-\beta} \operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= &- \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
&\times \invmellin{\gamma_4} \frac{(4\pi)^k 2^{\half-\beta} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(k-\beta) 2\sqrt{\pi}} M(1-\beta,t_j,\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d\beta \label{eq_S_o1d_2ndws}
\end{align}
Changing variable $\beta = 1-s$, we see that the contour line becomes $\operatorname{Re} s = \frac{1}{2}-\theta-\varepsilon$. This is pretty much the same term as \eqref{eq_S_o1d_lead}. Thus, the same bound applies.
Next up is the residue at $s=1-\beta-\frac{k}{2}-it$, for which we also take $\delta \to 0$:
\begin{align}
&\operatorname{Re}s{s=1-\beta-\frac{k}{2}-it} \operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \invmellin{\gamma_4} \frac{(4\pi)^k(l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(\frac{k}{2}-\beta-it) } \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j})\conj{\langle U, u_j \rangle}\notag \\
&\qquad \qquad \times \frac{\Gamma(\beta+\frac{k}{2}+it)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it+it_j)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \,d \beta \label{eq_S_o1d_2ndwLs}
\end{align}
Changing variable $\beta \mapsto \beta - it$, the expression becomes:
\begin{align}
&\operatorname{Re}s{s=1-\beta-\frac{k}{2}-it} \operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \invmellin{\gamma_4} \frac{(4\pi)^k(l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(\frac{k}{2}-\beta-it) } \frac{\Gamma(\beta-it)\Gamma(G-\beta+it)}{\Gamma(G)} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j})\conj{\langle U, u_j \rangle}\notag \\
&\qquad \qquad \times \frac{\Gamma(\beta+\frac{k}{2})\Gamma(\frac{1}{2}-\beta-\frac{k}{2}+it_j)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \,d \beta \label{eq_S_o1d_2ndwLs_break}
\end{align}
Note that there is enough exponential decay in $\beta$ even if we sum $t_j$ first. Hence, we can easily see that this term is bounded by $O(G^{\frac{1}{2}+\varepsilon} Q^{\frac{1}{2}+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$.
We now keep track of the residues at $s=\frac{1}{2}+it_j-r$:
\begin{align}
&\sum_{\pm t_j} \operatorname{Re}s{s=\frac{1}{2}+it_j-r} \operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_4} \sum_{t_j} (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(\frac{1}{2},\conj{u_j}) \notag \\
&\times \betaF{\frac{1}{2}-it_j+r -\beta}{\beta + \tfrac{k-1}{2} +it_j-r+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_dis_res_rj}
\end{align}
Upon changing variable $\beta \mapsto \beta-it_j$, the expression above is related to the upcoming lemma.
\begin{lemma} \label{propo_crazy}
For $\operatorname{Re} \beta = a \geq \frac{1}{2}+r+\varepsilon$ and $\mathcal{L} \ll Q$,
\begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j) \Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
\ll & G^{1-a + \theta} \left( (1+|t|)^{a-r-\half+\varepsilon} + (1+|t|)^{1 + r+ \varepsilon} \right) Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon} \label{eq_dis_res_bound}
\end{align}
In fact, to minimize this, the optimal choices are $\operatorname{Re} \beta = a = \frac{3}{2} + 2r + \varepsilon$ and $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$. With these choices, the bound is $O(G^{1+\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon})$.
\end{lemma}
This relates to \eqref{eq_dis_res_rj} by moving lines of integration and picking up relevent poles. We will delay the proof of this lemma to the end of this section.
Going back to the integral in \eqref{eq_dis_res_rj}, we will deal with the case $r=0$ first. We move the line of integration up to $\operatorname{Re} \beta = \frac{3}{2} + \varepsilon$, picking up a residue at $\beta = \frac{3}{2}$. Then by lemma \operatorname{Re}f{propo_crazy} with the optimal choice of $G$, the moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3 + 2\alpha + \varepsilon})$. The residue is:
\begin{align}
- \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}&l_2^{-it} (\tfrac{k}{2}+it) \notag \\
&\times \sum_{t_j} L_Q(\half,\conj{u_j}) c_{0,j} \frac{\Gamma(\tfrac{3}{2}-it_j) \Gamma(\beta-\frac{3}{2}+it_j)}{\Gamma(G)} \notag
\end{align}
With the same choice of $G$ as above, this residue is also $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3+2\alpha+\varepsilon})$. We assume the same choice of $G$ from now on.
For $1 \leq r \leq \frac{k}{2}$, we move the line of integration in \eqref{eq_dis_res_rj} up to $\operatorname{Re} \beta = \frac{3}{2} + 2r + \varepsilon$. The moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3+2\alpha+\varepsilon})$ as shown in lemma \operatorname{Re}f{propo_crazy}. The residues for a particular $r$ are as follows:
\begin{align}
&\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\times \sum_{m=0}^{r+1} \frac{(-1)^m}{m!} \frac{\Gamma(\tfrac{k}{2}+m+it)}{\Gamma(\tfrac{k}{2}+it)} \frac{\Gamma(\half+r+m-it_j)\Gamma(G-\frac{1}{2}-r-m+it_j)}{\Gamma(G)} \notag
\end{align}
This whole sum is $o(GQ^{\half+\theta}\mathcal{L}^{3+2\alpha+\varepsilon})$.
Last but not least, we bound the moved integral, where $\operatorname{Re} s = \gamma_2'=\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$:
\begin{align}
&\operatorname{Re}s{w=\frac{1}{2}}\operatorname{Re}s{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2'}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\times \betaF{1- s -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_dis_moved}
\end{align}
Note that for $\operatorname{Re} s < \half - \frac{k}{2}$, we note the following bound which is true regardless of relative sizes of $|t_j|$ and $|\operatorname{Im} s|$:
\[ \lim_{\delta \to 0} \frac{2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} \ll (1+|t_j|)^{2\operatorname{Re} s - 2} (1+|\operatorname{Im} s|)^{3-3\operatorname{Re} s - k} \]
Hence, at $\operatorname{Re} s = \half - \frac{k}{2} - \theta - \varepsilon$, using the above bound and \eqref{eq_L_inner},we have:
\begin{align*}
\lim_{\delta \to 0} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} &\sum_j L_Q(\half,\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\ll (1+|\operatorname{Im} s|)^{\frac{3}{2} +\frac{k}{2} + 3\theta +3\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon}
\end{align*}
Using the fact noted above and the methods of proving lemma \operatorname{Re}f{propo_crazy}, we have the following auxiliary lemma:
\begin{lemma} \label{propo_crazy2}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{5}{2} + k + 5\theta + 2\varepsilon$,
\begin{align}
&G \dblinvmellin{\gamma'_2}{a} \sum_{t_j} \lim_{\delta \to 0} \frac{(4\pi)^k 2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} L_Q(\half,\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
& \qquad \qquad \qquad \qquad \quad \times \betaF{1 - s - \beta}{s+\beta + \tfrac{k}{2} - 1 +it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \notag \\
\ll & G^{1+\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon} \label{eq_dis_moved_bound}
\end{align}
\end{lemma}
Again in \eqref{eq_dis_moved}, we move $\operatorname{Re} \beta$ to $\frac{5}{2} + k + 5\theta + 2\varepsilon$, hitting poles at $\beta = 1 - s + \ell$, where $0 \leq \ell \leq \frac{k}{2} + 2$. Using the proposition above and $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^3 Q$, the moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$. The residue at $\beta = 1-s + \ell$ is:
\begin{align}
\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} &\conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{\gamma'_2} \sum_j \frac{(4\pi)^k 2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} L_Q(\half,\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
& \qquad \qquad \times \frac{(-1)^\ell}{\ell !} \frac{\Gamma(\tfrac{k}{2} + \ell +it)}{\Gamma(\tfrac{k}{2}+it)} \frac{\Gamma(1-s+\ell)\Gamma(G+s-1-\ell)}{\Gamma(G)} \,d s \notag
\end{align}
These residues are $o(G Q^{\half+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} )$. Putting all these together, we see the residue at $w=\frac{1}{2}$ is bounded by $O(G Q^{\half+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} )$.
\subsubsection{The moved integral at $w=\frac{1}{2}-\varepsilon$}
We see that the $x$-exponent is $O(x^{-\varepsilon})$. Moving the line of integration for $\operatorname{Re} s$ such that we can take the $\delta$-limit just as demonstrated before, we see that this term is $O(x^{-\varepsilon})$.
Putting all the cases so far together, we finish proving proposition \operatorname{Re}f{eq_S_o1d_Zres_propo}.
\end{proof}
\subsection{Contribution from the pole $s'=s+\beta-\frac{1}{2}$}
\begin{proposition} \label{eq_S_o1d_Z_sp2}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$,
\[ \operatorname{Re}s{s'=s+\beta-\frac{1}{2}} S_{o_1}^d \ll x^{-\varepsilon}, \]
\end{proposition}
\begin{proof}
We start by writing down the residue at $s'=s+\beta-\frac{1}{2}$:
\begin{align}
&\operatorname{Re}s{s'=s+\beta-\frac{1}{2}} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\!\!\times \triinvmellin{\gamma_1}{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} x^{s+\beta-1} l_2^{s+\beta-\frac{1}{2}-w} l_1^{w-\half} v( w-\tfrac{1}{2}) \notag \\
&\!\!\times v( s+\beta-\tfrac{1}{2}-w) \sum_{t_j} L_Q(s+\beta-\tfrac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \,d w \label{eq_S_o1d_sp2_first}
\end{align}
We move $\operatorname{Re} \beta$ down to $\frac{1}{4}-\varepsilon$, during which we do not encounter any poles. Hence, this term is $O(x^{-\varepsilon})$, after moving $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to enable us taking $\delta \to 0$.
\end{proof}
\subsection{Contribution of the moved integral at $\operatorname{Re} s' = \frac{1}{2}-\varepsilon$}
We see that the $x$-exponent is negative. Moving $\operatorname{Re} s$ down to enable us taking the $\delta$-limit, this term is also $O(x^{-\varepsilon})$.
\subsection{Proof of lemma \eqref{propo_crazy}}
For convenience, we write $\beta = a + it_\beta$, where $a, t_\beta \in \mathbb{R}$. We will split the object to be analyzed as follows:
\begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
=& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} (P_1 + P_2 + P_3), \notag
\end{align}
where
\begin{align}
P_1 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \leq |t| - \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_2 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ \left| |t_\beta| - |t| \right| \leq \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_3 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \geq |t| + \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag
\end{align}
In each part, we seek to bound the integrand using Stirling's formula.
\subsubsection{The case where $|t_\beta| \leq |t| - \log^4 |t|$: $P_1$}
The ratio of gammas is bounded by:
\begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \notag \\
\ll &G^{-a+\theta} (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r-a} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag
\end{align}
We further separate this case by the relative sizes of $|t_j|$ and $|t|$:
\begin{enumerate}[leftmargin=*]
\item If $|t_j| \geq |t|$, then we can further conclude that the integrand is bounded by:
\[ (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r-a} (1+|t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_j|} e^{\frac{\pi}{2} |t_\beta|} G^{-a+\theta} \]
Executing the $\beta$-integral and then summing over such $|t_j|$'s using the bound in proposition \operatorname{Re}f{propo_j_sums}, we have that:
\[ G \sum_{|t_j| \geq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_1 \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item If $|t_j| \leq |t|$, then we separate the integral as follows:
\[ P_1 = P_{1,1} + P_{1,2} + P_{1,3}, \]
where
\begin{align*}
P_{1,1} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \leq |t_j| - \log^4 |t_j|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_{1,2} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ \left| |t_\beta| - |t_j| \right| \leq \log^4 |t_j|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_{1,3} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \geq |t_j| + \log^4 |t_j| \\ |t_\beta| \leq |t| - \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag
\end{align*}
\begin{enumerate}[leftmargin=*]
\item In the subcase $|t_\beta| \leq |t_j| - \log^4 |t_j|$, this subcase has the same effect as case 1:
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,1} \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item In the subcase $\left| |t_\beta| - |t_j| \right| \leq \log^4 |t_j|$, the integrand is bounded by
\[ (1 + |t|)^{a-r-\half+\varepsilon} (1+|t_\beta|)^{r-a} G^{-a+\theta}. \]
Now executing the $\beta$-integral and then summing $|t_j|$, we obtain that
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,2} \ll G^{1-a+\theta} (1 + |t|)^{1+r+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item In the last subcase, $|t_j| + \log^4 |t_j| \leq |t_\beta| \leq |t| - \log^4 |t|$, the integrand is bounded by:
\[ (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r+\theta-\half} e^{-\frac{\pi}{2} |t_\beta|} e^{\frac{\pi}{2}|t_j|} G^{-a+\theta}.\]
Now executing the $\beta$-integral and then summing $|t_j|$, we obtain that
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,3} \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\end{enumerate}
\end{enumerate}
Hence, in total:
\begin{align}
G \sum_{j} L_Q(\half,\conj{u_j}) &c_{r,j} P_1 \notag \\
&\ll G^{1-a+\theta} \left( (1 + |t|)^{1+r+\varepsilon} + (1+|t|)^{a-r-\half+\varepsilon} \right) Q^{-\half} L^{1-k+\varepsilon} \notag
\end{align}
\subsubsection{The case where $\left| |t_\beta| - |t| \right| \leq \log^4 |t|$: $P_2$}
The ratio of gammas is bounded by:
\begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)}\notag \\
&\qqquad \qqquad \ll G^{-a+\theta} (1 + |t|)^{r-a-\frac{k-1}{2}} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag
\end{align}
Separating cases by the relative sizes of $|t_j|$ and $|t|$, we can show that this term is completely overshadowed by $P_1$.
\subsubsection{The case where $|t_\beta| \geq |t| + \log^4 |t|$: $P_3$}
The ratio of gammas is bounded by:
\begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \notag \\
\ll &G^{-a+\theta} (1 + |t|)^{-\frac{k-1}{2}} (1+|t_\beta|)^{\frac{k}{2}-1} e^{-\pi|t_\beta|} e^{\pi|t|} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag
\end{align}
Executing the $\beta$-integral will show that the result will have decay in $|t|$ that is faster than every polynomial. Hence, this case is also negligible.
Putting the cases together, we realize that:
\begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
\ll& G^{1-a+\theta} \left( (1 + |t|)^{1+r+\varepsilon} + (1+|t|)^{a-r-\half+\varepsilon} \right) Q^{-\half} L^{1-k+\varepsilon} \notag
\end{align}
Our goal is to minimize $G$. In order to do this, we want to increase $a$, looking at the term $(1+|t|)^{1+r+\varepsilon}$, which has a fixed exponent. However, the other term dominates if $a$ is too large, with increasingly worse behavior as $a$ increases. Hence, we set the exponents of the terms to equal each other. This gives $a = \frac{3}{2} + 2r + \varepsilon$ (the $\varepsilon$ is added in to avoid the pole on the line $\operatorname{Re} \beta = \frac{3}{2} + 2r$.)
Finally, we can proceed to get a lower bound of $G$ such that the above is $O(G^{1+\varepsilon} Q^{-\half} L^{1-k+\varepsilon})$, such that the contribution in $(1+|t|)$-aspect is at most as much as in $S_d$. It turns out that $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ when $r=0$. The $G$ required for larger values of $r$ has a smaller $(1+|t|)$-exponent. This proves the proposition.
\section{Off-diagonal, continuous spectrum}
Quoting the continuous spectrum from \eqref{eq_Z_decomp}, the object to analyze here is:
\begin{align}
S_{o_1}^c = &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \invmellin{0} \zeta_{\mathfrak{a},Q}(s',-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{s' - s + \half - \beta}{s + w+ \beta - s' + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times v( s'-w) v( w-\half) x^{s'-\half} l_2^{s'-w} l_1^{w-\half} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s' \,d s \,d w \label{eq_S_o1c_6int}
\end{align}
We will move $\operatorname{Re} z$ slightly to the right to a curve $C$, which has the property that if $z$ is any complex number between 0 and $C$, $\zeta^*(1-2z) \neq 0$.
For the most part, there will be a lot of similiarities in how we analyze this 5-fold integral compared to how we analyze the discrete spectrum expression in the last section. In particular, we start by moving $\operatorname{Re} s'$ down to $\frac{1}{2}-\varepsilon$, passing through simple poles at $s'=w$, $s'=1+z$, $s'=1-z$ and $s'=s+\beta-\frac{1}{2}$. As seen in the case of discrete spectrum, the moved integral here is $O(x^{-\varepsilon})$.
\subsection{The residue at $s'=w$}
This is the pole that has most of the contribution from the continuous part.
\begin{proposition} \label{propo_total_cts}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$,
\begin{align}
\operatorname{Re}s{s'=w} S_{o_1}^c =&-\frac{\varphi(Q) G}{2 \Gamma(k)} \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} (4\pi)^k \langle f,f \rangle \log x \cdot b_{l_1,l_2} \notag \\
&+ O(G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha+\varepsilon}) + O(G^{1+\varepsilon} Q^{\half+\varepsilon} \mathcal{L}^{3 + 2\alpha + \varepsilon}) + O(x^{-\varepsilon}), \label{prop_total_cts}
\end{align}
where
\begin{align}
b_{l_1,l_2} = \begin{cases} l_1^{-1} & \text{ if $l_1 = l_2$} \\ (l_1l_2)^{-1} E_{l_1,l_2}(1) & \text{ if $l_1 \neq l_2$} \end{cases} \label{eq_defn_b_l1l2}
\end{align}
\end{proposition}
\begin{remark}
Note the sum of the $(\log x)$-terms above is exactly $-\half$ of the $(\log x)$-terms in $S_d$.
\end{remark}
\begin{proof}
We first write down the residue at $s'=w$:
\begin{align}
\operatorname{Re}s{s'=w} S_{o_1}^c
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} M(s,\tfrac{z}{i},\delta) \betaF{w - s + \half - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times v( w-\half) (xl_1)^{w-\half} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(w,-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \,d \beta \,d s \,d w \label{eq_S_o1c_sp_lead}
\end{align}
At this point, it is natural to move $\operatorname{Re} w$ to $\frac{1}{2}-\varepsilon$, which yields residue terms at $w=1+z$, $w=1-z$, $w=s+\beta-\frac{1}{2}$ and $w=\frac{1}{2}$. The moved integral is $O(x^{-\varepsilon})$.
\subsubsection{The residue at $w=1+z$}
The residue here is:
\begin{align}
&\operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a=\frac{b}{c}} \left( \frac{(c,\frac{N}{c})}{N\frac{N}{c}}\right)^{\frac{1}{2}-z} \frac{\varphi(\frac{N}{c})}{ \varphi((c,\frac{N}{c}))} \frac{\zeta(1+2z)}{\zeta^{(N)}(1-2z)} \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)}\notag \\
&\times v( \tfrac{1}{2}+z) (xl_1)^{\half+z} \prod_{p | c} (1-p^{2z}) Q^{-(1+2z)} \prod_{p^\gamma \| Q} (\sigma_{2z}(p^\gamma) - p^{-1} \sigma_{2z}(p^{\gamma-1})) \,d z \,d \beta \,d s\label{eq_S_o1c_sw_lead}
\end{align}
Applying function equation to $\zeta(1+2z)$ and doing some simplifications, we obtain:
\begin{align}
&\operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\pi^{\frac{1}{2}+z}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \!\!\! \prod_{p^\gamma \| Q} \!\! (\sigma_{-2z}(p^\gamma) - p^{-1-2z} \sigma_{-2z}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a=\frac{b}{c}}\frac{\sqrt{\pi}\Gamma(-z)}{\Gamma(\frac{1}{2}-z)} \left( \frac{(c,\frac{N}{c})}{N\frac{N}{c}}\right)^{\frac{1}{2}-z} \frac{\varphi(\frac{N}{c})}{ \varphi((c,\frac{N}{c}))} \frac{\zeta^{(c)}(-2z)}{\zeta^{(N)}(1-2z)} \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)\Gamma(\frac{1}{2}+z)}\notag \\
&\times v( \half+z) (xl_1)^{\half+z} \,d z \,d \beta \,d s\label{eq_S_o1c_sw_lead2}
\end{align}
At this point, we use functional equation of Eisenstein series at $0$-cusp \eqref{FE_Eisen_0}, simplifying this further to:
\begin{align}
&\operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\pi^{\frac{1}{2}+z}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \!\!\! \prod_{p^\gamma \| Q} \!\! (\sigma_{-2z}(p^\gamma) - p^{-1-2z} \sigma_{-2z}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,\half-z) \rangle} \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} \notag \\
&\times M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)\Gamma(\frac{1}{2}+z)} v( \half+z) (xl_1)^{\half+z} \,d z \,d \beta \,d s\label{eq_S_o1c_sw_sim}
\end{align}
We now move $\operatorname{Re} z$ to $-\frac{1}{2}-\varepsilon$, picking up poles at $z=s+\beta-\frac{3}{2}$ and $z=\frac{1}{2}-s$. There is technically also a pole at $z=-\frac{1}{2}$, but its residue becomes $0$ when $\delta \to 0$. The moved integral is $O(x^{-\varepsilon})$.
We investigate the residue at $z=s+\beta-\frac{3}{2}$:
\begin{align}
&\operatorname{Re}s{z=s+\beta-\frac{3}{2}} \operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half}\pi^{s+\beta-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{3-2s-2\beta}(p^\gamma) - p^{2-2s-2\beta} \sigma_{3-2s-2\beta}(p^{\gamma-1}))M(s,\tfrac{s+\beta-\frac{3}{2}}{i},\delta)\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,2-s-\beta) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)v(s+\beta-1)}{N^{s+\beta-1}\Gamma(G)\Gamma(s+\beta-1)} (xl_1)^{s+\beta-1} \,d \beta \,d s\label{eq_S_o1c_key}
\end{align}
We move $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$, hitting a pole from the $M$-function at $\beta = 2-2s$. Again, there is technically a pole at $\beta=1-s$, but the residue vanishes upon taking $\delta \to 0$. The moved integral is again $O(x^{-\varepsilon})$. Taking $\delta \to 0$, the residue at $\beta = 2-2s$ is:
\begin{align}
&\operatorname{Re}s{\beta=2-2s} \operatorname{Re}s{z=s+\beta-\frac{3}{2}} \operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_2} \frac{(4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} \notag \\
&\times Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1})) \frac{\Gamma(2s-1)}{\Gamma(s)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \frac{\Gamma(2-2s)\Gamma(G-2+2s)v(1-s)}{N^{1-s}\Gamma(G)\Gamma(1-s)} (xl_1)^{1-s} \,d s\label{eq_S_o1c_key2}
\end{align}
We move $\operatorname{Re} s$ up to $1+\varepsilon$, encountering a double pole at $s=1$. The moved integral is $O(x^{-\varepsilon})$. The residue here is:
\begin{align}
&\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} \left( - \frac{(4\pi)^k}{4\Gamma(k)} \langle f, f\rangle b_{l_1,l_2} \log(xl_1) + c_1 \right),
\end{align}
where $b_{l_1,l_2}$ is as defined from \eqref{eq_defn_b_l1l2} and
\begin{align*}
c_1 = \operatorname{Re}s{s=1} \Big(& \frac{ (4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1})) \frac{\Gamma(2s-1)}{\Gamma(s)} \\
&\qquad \qquad \times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \frac{\Gamma(2-2s)\Gamma(G-2+2s)v(1-s)}{N^{1-s}\Gamma(G)\Gamma(1-s)} \Big)
\end{align*}
The one thing we need to know about $c_1$ is that it is $O(Q^{\varepsilon} G^{\varepsilon} \mathcal{L}^{1-k+\varepsilon})$.
Continuing the investigating of residue terms, we look at the residue at $z=\frac{1}{2}-s$:
\begin{align}
&\operatorname{Re}s{z=\frac{1}{2}-s} \operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \betaF{2 - 2s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{ \tfrac{k}{2}+1-s+it} \notag \\
&\times \frac{\Gamma(2s-1)}{\Gamma(s)}\frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{1-s}\Gamma(G)\Gamma(1-s)} v(1-s) (xl_1)^{1-s}\,d \beta \,d s\label{eq_S_o1c_sw_2nd}
\end{align}
We move $\operatorname{Re} s$ up to $1+\varepsilon$, hitting a simple pole at $s=1$. The moved integral is $O(x^{-\varepsilon})$. The residue at $s=1$ is:
\begin{align}
&\operatorname{Re}s{s=1} \operatorname{Re}s{z=\frac{1}{2}-s} \operatorname{Re}s{w=1+z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{\gamma_4} \frac{(4\pi)^k}{2\Gamma(k)} \langle f, f\rangle b_{l_1,l_2} \betaF{- \beta}{\beta+ \tfrac{k}{2} +it}{ \tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \notag \\
\ll& Q \mathcal{L}^{1+2\alpha+\varepsilon} G^{1+\varepsilon} \label{eq_S_o1c_sw_2nd_d}
\end{align}
Putting together all the cases here, we have:
\begin{lemma}
For $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$,
\begin{align}
& \operatorname{Re}s{w=1+z}\operatorname{Re}s{s' = w} S_{o_1}^c \notag \\
= &-\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} (4\pi)^k \langle f,f \rangle \frac{\log x}{4\Gamma(k)} \cdot b_{l_1,l_2} \notag \\
&+ O(G^{1+\varepsilon} Q^{1+\varepsilon} L^{1+2\alpha+\varepsilon}) + O(x^{-\varepsilon}), \label{prop_w_+z}
\end{align}
where $b_{l_1,l_2}$ is defined as in \eqref{eq_defn_b_l1l2}.
\end{lemma}
\subsubsection{Residue at $w=1-z$}
Looking at the $\zeta_{\mathfrak{a},Q}(w,-z)$, it turns out that the pole only occurs at the $0$-cusp. Hence, the residue term here is:
\begin{align}
&\operatorname{Re}s{w=1-z} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)} \left( \frac{1}{N}\right)^{\frac{1}{2}-z} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_0(*,\half+z) \rangle} \prod_{p^\gamma \| Q} (\sigma_{2z}(p^\gamma) - p^{-(1-2z)} \sigma_{2z}(p^{\gamma-1})) \notag \\
&\times \betaF{\frac{3}{2}-z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{-z+ \tfrac{k+1}{2}+it} v( \tfrac{1}{2}-z) (xl_1)^{\half-z} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_lead}
\end{align}
Moving the $z$-line of integration to $-C$, and changing variable from $z \mapsto -z$, we see that this term is exactly the same as the residue at $w=1+z$.
\subsubsection{Residue at $w=s+\beta-\frac{1}{2}$}
\begin{align}
&\operatorname{Re}s{w=s+\beta-\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}v( s+\beta-1)\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(s+\beta-\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} (xl_1)^{s+\beta-1} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_be}
\end{align}
Here we move $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$, picking up a simple pole at $\beta = 1-s$. The moved integral is again $O(x^{-\varepsilon})$. The residue is:
\begin{align}
&\operatorname{Re}s{\beta=1-s} \operatorname{Re}s{w=s+\beta-\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(1-s)\Gamma(G-1+s)}{\Gamma(G)} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \,d s \label{eq_S_o1c_sp_be_p}
\end{align}
In order to take the $\delta$-limit, we move $\operatorname{Re} s$ down to $\frac{1}{2}-\frac{k}{2}-\varepsilon$, encountering poles at $s=\frac{1}{2} \pm z - r$, where $r$ is an integer such that $0 \leq r \leq \frac{k}{2}$. For $r$ being such an integer, taking limit as $\delta \to 0$, we have the residue:
\begin{align}
&\left( \operatorname{Re}s{s=\frac{1}{2}+z-r} + \operatorname{Re}s{s=\frac{1}{2}-z-r} \right) \operatorname{Re}s{\beta=1-s} \operatorname{Re}s{w=s+\beta-\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{C} \frac{(-1)^r (4\pi)^k}{2r!} \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \Big( \frac{\Gamma(2z-r) \Gamma(\frac{1}{2}-z+r) \Gamma(\frac{1}{2}-z+r)\Gamma(G-\frac{1}{2}+z-r)}{\Gamma(\frac{1}{2}+z)\Gamma(\frac{1}{2}-z) \Gamma(k-\frac{1}{2}+z-r)\Gamma(G)} \notag \\
&\qquad + \frac{\Gamma(-2z-r) \Gamma(\frac{1}{2}+z+r) \Gamma(\frac{1}{2}+z+r)\Gamma(G-\frac{1}{2}-z-r)}{\Gamma(\frac{1}{2}+z)\Gamma(\frac{1}{2}-z) \Gamma(k-\frac{1}{2}-z-r)\Gamma(G)} \Big)\,d z \label{eq_S_o1c_sp_beps}
\end{align}
We see that there is enough exponential decay to guarantee convergence, and that this term is $O(G^{\frac{1}{2}-r+\varepsilon} Q^{\frac{1}{2}+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$.
For the moved integral, it is treated in the same way as \eqref{eq_S_o1d_lead_moved}, and can be shown to be $O(Q^{\frac{1}{2}+\varepsilon} G^{\frac{1}{2}-\frac{k}{2}-\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$
\subsubsection{The residue at $w=\frac{1}{2}$}
The residue is:
\begin{align}
&\operatorname{Re}s{w=\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\mathcal{V}_{N[l_1,l_2]}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle}\notag \\
&\times M(s,\tfrac{z}{i},\delta) \betaF{1 - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_Lw}
\end{align}
Now we move $\operatorname{Re} s$ down to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to enable us to take the $\delta$-limit. In doing so, we pick up poles at $s=1-\beta$, $s=1-\frac{k}{2}-\beta-it$, and $s=\frac{1}{2}\pm z -r$, where $0 \leq r \leq \frac{k}{2}$ is an integer.
The residue at $s=1-\beta$ is:
\begin{align}
&\operatorname{Re}s{s=1-\beta} \operatorname{Re}s{w=\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= -&\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_4}{C} \frac{(4\pi)^k 2^{\half-\beta}}{\Gamma(k-\beta) 2\sqrt{\pi}} M(1-\beta,\tfrac{z}{i},\delta) \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \label{eq_S_o1c_sp_Lw_Hs}
\end{align}
Changing variable $\beta =1-s$, we see that the contour line becomes $\operatorname{Re} s = \frac{1}{2}-\theta-\varepsilon$. This is pretty much the same term as \eqref{eq_S_o1c_sp_be_p} and the bound there applies.
Taking the $\delta$-limit, the analysis of the residue at $s=1-\frac{k}{2}-\beta-it$ is very similar to that of \eqref{eq_S_o1d_2ndwLs}, yielding the bound $O(G^{\half+\varepsilon} Q^{\half + \varepsilon} \mathcal{L}^{3 + 2\alpha + \varepsilon})$.
The residues at $s=\frac{1}{2} \pm z -r $ are:
\begin{align}
&\left( \operatorname{Re}s{s=\frac{1}{2}+z-r} + \operatorname{Re}s{s=\frac{1}{2}-z-r} \right) \operatorname{Re}s{w=\frac{1}{2}} \operatorname{Re}s{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \frac{(4\pi)^k }{2\sqrt{\pi}} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_4}{C} \mathcal{V}_{N[l_1,l_2]}\sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z)\conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \Big( \frac{c_r(z,\delta) 2^{z-r}}{\Gamma(k-\frac{1}{2}+z-r)}\betaF{\frac{1}{2}-z+r - \beta}{z-r + \beta+ \tfrac{k-1}{2}+it}{\tfrac{k}{2}+it} \notag \\
&\qquad + \frac{c_r(-z,\delta) 2^{-z-r}}{\Gamma(k-\frac{1}{2}-z-r)}\betaF{\frac{1}{2}+z+r - \beta}{-z-r + \beta+ \tfrac{k-1}{2}+it}{\tfrac{k}{2}+it} \Big) \,d z \,d \beta \label{eq_S_o1c_sp_Lws}
\end{align}
Similar to the previous section, we can prove the following proposition as in lemma \operatorname{Re}f{propo_crazy} with slight modifications:
\begin{lemma} \label{propo_crazy3}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{3}{2} + 2r + \varepsilon$,
\begin{align}
&\lim_{\delta \to 0} G \dblinvmellin{a}{C} \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\half,-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \Big( \frac{2^{z-r}c_r(z,\delta)}{2\sqrt{\pi}\Gamma(k-\half + z -r)} \betaF{\half - z + r - \beta}{ \beta + \tfrac{k-1}{2} + z - r+it}{\tfrac{k}{2}+it} \notag \\
&\phantom{\times} + \frac{2^{-z-r}c_r(-z,\delta) }{2\sqrt{\pi}\Gamma(k-\half - z -r)} \betaF{\half + z + r -\beta}{ \beta + \tfrac{k-1}{2} - z - r+it}{\tfrac{k}{2}+it} \Big) \,d z \,d \beta \notag \\
\ll & G^{1+\varepsilon} \mathcal{L}^{1-k+\varepsilon} Q^{-\half + \varepsilon} \times [l_1,l_2]^{-\frac{1}{2}} \label{eq_cts_res_bound}
\end{align}
\end{lemma}
By the same arguments that we made after proving lemma \operatorname{Re}f{propo_crazy}, we can conclude that the double integral \eqref{eq_S_o1c_sp_Lws} is $O(G^{1+\varepsilon} Q^{\half + \varepsilon} \mathcal{L}^{2 + 2\alpha + \varepsilon})$.
For the moved integral, we estimate with the following lemma, which can be proved in the same way as lemma \operatorname{Re}f{propo_crazy}:
\begin{lemma} \label{propo_crazy4}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{5}{2} + k + 2\varepsilon$,
\begin{align}
&\lim_{\delta \to 0} G \triinvmellin{\gamma'_2}{\gamma_4}{C} \frac{(4\pi)^k \mathcal{V}_{N[l_1,l_2]} 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\qquad \times \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\half,-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\qquad \times \betaF{1 - s-\beta}{s +\beta+\tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \notag \\
\ll & G^{1+\varepsilon} \mathcal{L}^{1-k+\varepsilon} Q^{-\half+\varepsilon} [l_1,l_2]^{-\frac{1}{2}}
\end{align}
\end{lemma}
By the same arguments that we made after stating lemma \operatorname{Re}f{propo_crazy2}, we can conclude that the moved triple integral \eqref{eq_S_o1c_sp_Lw} is $O(G^{1+\varepsilon} Q^{\half+\varepsilon} L^{2+2\alpha+\varepsilon})$.
All these results combined together gives proposition \operatorname{Re}f{propo_total_cts}.
\end{proof}
\subsection{The residue at $s'=1 \pm z$}
Calculating the contribution of the residue at $s'=1+z$ is very similar to the subcase $w=1+z$ in the previous section, except that we do not have any poles coming from the $v$ functions. As such, we will omit the details and only state the result here:
\begin{lemma}
For $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$,
\begin{align}
& \operatorname{Re}s{s' = 1+z} S_{o_1}^c \ll G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha+\varepsilon} + x^{-\varepsilon}, \label{prop_s_+z}
\end{align}
\end{lemma}
The residue at $s'=1-z$ can be shown to be the same as $s'=1+z$, and hence has the same contribution as above.
\subsection{The residue at $s'=s+\beta-\frac{1}{2}$}
The residue is:
\begin{align}
&\operatorname{Re}s{s'=s+\beta-\frac{1}{2}} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(s+\beta-\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times v( s+\beta-\frac{1}{2}-w) v( w-\half) x^{s+\beta-1} l_2^{s+\beta-\frac{1}{2}-w} l_1^{w-\half} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \,d w \label{eq_S_o1c_Lsp}
\end{align}
Moving the line of integration of $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$ does not pass through any poles, and hence this is $O(x^{-\varepsilon})$.
\section{Proof of Theorem \operatorname{Re}f{thm_main}}
Putting the results of the sections together (in particular, propositions \operatorname{Re}f{eq_S_d1_prop}, \operatorname{Re}f{eq_S_d2_propo}, \operatorname{Re}f{eq_S_o1d_Zres_propo}, \operatorname{Re}f{eq_S_o1d_Z_sp2}, and \operatorname{Re}f{propo_total_cts}), we obtain that $S$ as defined in $\eqref{eq_S_defn}$ has the following bound:
\[ S = O(G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha + \varepsilon}) + O(G^{1+\varepsilon} Q^{\half + \theta+\varepsilon} \mathcal{L}^{3+2\alpha+ \varepsilon}) + O(x^{-\varepsilon}), \]
where $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\alpha = \frac{1}{\log (Q(1+|t|))}$.
Plugging this into the right-hand side of proposition \operatorname{Re}f{propo_amp}, we have:
\begin{align*}
&|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \\
\ll & (1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} \left( Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha + \varepsilon} + Q^{\half +\theta+ \varepsilon} \mathcal{L}^{3+2\alpha+ \varepsilon} \right) + x^{-\varepsilon} + Q\mathcal{L} + (1+|t|)^\varepsilon Q^{1+\varepsilon}
\end{align*}
Taking $x \to \infty$, we can drop the $x$-term above. Note that
\[ \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ (l,QN) = 1}} 1 \asymp \frac{\mathcal{L}}{\log \mathcal{L}}. \]
Hence, we can conclude that:
\begin{align*}
|L(\half + it, f_\chi)|^2 \ll &(1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} \left( Q^{1+\varepsilon} \mathcal{L}^{-1+2\alpha + \varepsilon} + Q^{\half +\theta+ \varepsilon} \mathcal{L}^{1+2\alpha+ \varepsilon} \right)
\end{align*}
In order to balance the effects of the two terms, we set $L = Q^{\frac{1}{4} - \frac{\theta}{2} + \varepsilon}$. Recalling $\alpha = \frac{1}{\log (Q(1+|t|))}$, we have:
\[ |L(\half + it, f_\chi)|^2 \ll (1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} Q^{\frac{3}{4}+\frac{\theta}{2}+\varepsilon} \]
Taking square roots of the above, this is the theorem.
\end{document} |
\begin{document}
\title{An Extreme Value Bayesian Lasso for \ the Conditional {Left and Right Tails}
\begin{abstract}\footnotesize
We introduce a novel regression model for the conditional
{left and right tail} of a possibly heavy-tailed response. The proposed model can
be used to learn the effect of covariates on an extreme value setting
via a Lasso-type specification based on a Lagrangian restriction. Our
model can be used to track if some covariates are significant for the
{lower values}, but not for the {(right)} tail---and vice-versa; in addition to this, the
proposed model bypasses the need for conditional threshold selection
in an extreme value theory framework. We assess the finite-sample
performance of the proposed methods through a simulation study that
reveals that our method recovers the true conditional distribution
over a variety of simulation scenarios, along with being accurate on
variable selection. Rainfall data are used to showcase how the
proposed method can learn to distinguish between key drivers of
moderate rainfall, against those of extreme rainfall. \\
\noindent \textsc{key words:} Conditional tail; Extended Generalized Pareto
distribution; Heavy-tailed response; Lasso; $L_1$-Penalization;
Nonstationary extremes; Statistics of extremes; Variable selection.
\mathrm{e}nd{abstract}
\section{\large{\textsf{INTRODUCTION}}}\label{introduction}
Learning about {the drivers} of risk is key {in} a variety of
fields, including climatology, environmental sciences,
finance, forestry, and hydrology. Mainstream approaches for learning
about such drivers or predictors of risk include, for instance,
\cite{davison1990}, \cite{chavez-demoulin2005}, \cite{eastoe2009},
\cite{wang2009}, \cite{chavez-demoulin2016}, and \cite{huser2016}.
{The} main contribution of this paper rests on a Bayesian regression model
for the conditional {lower values (i.e.}{,}{~left tail)} and conditional {(right)} tail of a possibly heavy-tailed
response. Our model pioneers the development of regression methods in
an extreme value framework that allow for some covariates to be
significant for the {lower values} but not for the tail {(and vice-versa)}. Some
further comments on the proposed model are in order. First, the
proposed model bypasses the need for (conditional) threshold
selection; such selection is particularly challenging {in} a regression
framework as it entails selecting a function of the covariate (say,
$u_{\mathbf{x}}$) rather than a single scalar. Second, our method models both
the conditional {lower values} and the conditional tail, {offering} a full portrait of the conditional distribution, while still {being} able to extrapolate beyond observed data into the conditional
tail. Finally, our method is directly tailored for variable selection
in an extreme value framework; in particular, it can be used to track
what covariates are significant for the {lower values} and tails.
In an extreme value framework, interest focuses on modelling the most
extreme observations{,} disregarding the central part of the
distribution. Usually, {efforts center on modelling} {extreme values using the} {generalized extreme value distribution or the generalized Pareto distribution} \citep[][]{embrechts1997, coles2001,
beirlant2004}. {Many} observations are disregarded using
the latter approaches, and the choice of the block size or threshold
are far from straightforward. Moreover, in many situations of applied
interest{,} it would be desirable to model both the {lower values} of the data
along with the extreme values in a regression framework. Our model
builds over \cite{papas2013} and \cite{naveau2016} who proposed an
{extended generalized Pareto distribution} (EGPD) {to jointly model}
low, moderate and extreme observations---without the need of threshold
selection; other interesting options for modeling both the bulk and
the tail of a distribution, include extreme value mixture models
\citep[e.g.{,}][]{frigessi2002, behrens2004, carreau2009, cabras2011,
macdonald2011, nascimento2012} {as well as composition-based approaches \citep{stein2020, stein2021}.}
The proposed model can be regarded as a Bayesian Lasso-type model for
the {lower values} and tail of a {possibly heavy-tailed response supported on the positive real line}. The Bayesian Lasso was
introduced by \cite{park2008} as a Bayesian version of Tibshirani's
Lasso \citep{tibshirani1996}. Roughly speaking, the {frequentist}
Lasso is a regularization method, that shrinks some regression
coefficients, and sets others to {zero; as such it is naturally tailored for variable selection}. {The frequentist
Lasso solves a least squares type of optimization problem, where an}
{$L_1$-penalization} {is incorporated via a constraint.} {The
starting point for the Bayesian Lasso is the fact that the posterior
mode generated from a Laplace prior coincides with the solution to
the Lasso least squares optimization problem with} {$L_1$-penalization}
{\citep[][Section~4.2.3]{reich2019}. It should be noted however that
the Bayesian Lasso does not set coefficients to zero.}
{Shrinkage in a Bayesian context has been traditionally achieved
via priors with peaks at zero such as the Laplace.
Other examples of continuous shrinkage priors include the Horseshoe
\citep{carvalho2010}, Dirichlet--Laplace \citep{bhat2015}, and R2D2
\citep{zhang2020}.} {There are also} {discrete shrinkage priors
\citep{george1993} which are arguably more interpretable. Despite
all these considerable advances in Bayesian variable selection, the
Bayesian Lasso is simply one way to go, one that is associated}
{$L_1$-penalization}{, but other priors could be used to meet a similar
target, leading to other geometries for the corresponding
penalization.} For a recent review of Bayesian regularization methods{,} see
\cite{polson2019}.
As we clarify below (Section~\ref{first}), our
model has also {links} with quantile regression. Indeed, by
modeling both the conditional {lower values} and the conditional tail, our model
bridges quantile regression \citep{koenker1978} with
extremal quantile regression \citep{chernozhukov2005}. Finally, as a
byproduct, this paper contributes to the literature on Bayesian
inference for Pareto distributions
\citep{arnold1989, de2003, castellanos2007, de2010, villa2017}.
The rest of this paper unfolds as follows. In Section~\ref{methods} we
introduce the proposed methods. {We assess} the
performance of the proposed methods {on simulated examples} {in Section~\ref{simulation}}, and report the main findings of
our numerical studies. A data illustration is included in
Section~\ref{application}{.}
{\section{\large\textsf{EXTREME VALUE BAYESIAN LASSO}}\label{methods}}
To streamline the presentation, the proposed methods are introduced in
a step-by-step fashion, with the most flexible version of our model
being introduced in Section~\ref{extension}.
{\subsection{\large\textsf{THE EXTENDED GENERALIZED PARETO FAMILY}}\label{egpd}}
Our starting point for modeling is the so-called extended generalized
Pareto distribution (EGPD), as proposed in \cite{papas2013} and
\cite{naveau2016}{; the EGPD is a distribution over the positive real line, whose} cumulative distribution function {is}
$F(y) = {G\{H(y)\}}$,
{where $G: [0, 1] \to [0, 1]$ is a carrier function, {which tilts} the generalized Pareto distribution (GPD) function}
\begin{equation*}
H(y)=
1-\left( 1+\frac{\xi y}{\sigma}\right)
^{-1 / \xi},
\mathrm{e}nd{equation*}
defined on $\{y \in (0, \infty): 1 + \xi y / \sigma > 0\}$.
Here, $\sigma>0$ is a scale parameter, $\xi \in \mathbb{R}$ is the shape
parameter; the case $\xi = 0$ should be understood
by taking the limit $\xi \to 0$. The
shape parameter $\xi$ is {termed} extreme value index and it
controls the rate of decay of the tail
. Following
\cite{naveau2016}, we assume that the carrier function $G$ obeys the following conditions:
\begin{itemize}
\item[A.] $\underset{v \to 0^+}{\lim}{\{1 - G(
1-v)\}/v=a}$, with $a>0$.
\item[B.] $\underset{v \to 0^+}{\lim} {G\left\lbrace v\,w(v)\right\rbrace /G(v)=b}$, with $b>0$ and $w(v) > 0$ {such that} $w(v)=1+o(v)$ as $v\rightarrow 0^+$.
\item[C.] $\underset{v \to 0^+}{\lim}{G(v)/v^{\kappa}=c}$, with $c>0$.
\mathrm{e}nd{itemize}
Assumption~A ensures a Pareto-type tail, whereas Assumptions B--C
ensure that the {lower values are} driven by the carrier $G$. {In more detail,
Assumption~A implies that
\begin{equation*}
\lim_{y \to y_*} \frac{1 - F(y)}{1 - H(y)} = a,
\mathrm{e}nd{equation*}
and thus it can be understood as a tail-equivalence
condition \citep[Section~3.3]{embrechts1997}, where $y^* = \inf\{y:
F(y) < 1\}$ is the so-called right endpoint, here assumed to be positive.
Since tail-equivalence implies that both $F$ and $H$ are {in} the same
domain of attraction \citep[]{resnick1971}, it follows from Assumption~A
that $\xi$ can be literally interpreted as the extreme value index
of $F(y) = {G\{H(y)\}}$.} {We note further that Assumption~C implies that
small values follow a Weibull type GPD, and that the role and need of
Assumption B is in fact questionable \citep{tencaliec2019}; actually, the
latter paper makes no reference to it.}
For parsimony reasons{,} we focus on modeling $G$ using a parametric
class, so that $G(v) \mathrm{e}quiv G_{\ensuremath{\bm\kappa}}(v)$, with
$\ensuremath{\bm\kappa} \in \mathbb{R}^q$. The canonical example of a parametric
carrier is $G_\kappa(v) = v^{\kappa}$, with $\kappa > 0$
{controlling} the shape of the lower tail, with a larger value of
$\kappa$ leading to less mass close to zero; we refer to the EGPD
distribution with the latter carrier as the canonical EGPD.
Below, we use the notation $Y \sim \text{EGPD}_G(\ensuremath{\bm\kappa}, \sigma, \xi)$ {to}
denote that $Y$ follows an EGPD with parameters $(\ensuremath{\bm\kappa}, \sigma, \xi)$
{and} carrier $G${, and $\mathscr{G}$ to} represent the
space of all carrier functions $G: (0, \infty) \to [0, 1]$ obeying
Assumptions~{A--C}.
\subsection{\large\textsf{A FIRST CONDITIONAL MODEL FOR THE {LOWER VALUES} AND TAIL OF A POSSIBLY HEAVY-TAILED RESPONSE}}\label{first}
The first version of our model {specifies} the conditional distribution function
\begin{equation}\label{model}
F(y \mid \mathbf{x}) = G_{\ensuremath{\bm\kappa}(\mathbf{x})}(H(y)),
\mathrm{e}nd{equation}
where $\mathbf{x} = (x_1, \dots, x_p) \in \mathbb{R}^p$ is a vector of covariates {and} $\ensuremath{\bm\kappa}(\mathbf{x})$ is a {vector-valued} function with components given by inverse link functions
\begin{equation}\label{links0}
\ensuremath{\bm\kappa}(\mathbf{x}) ={ [\kappa_1(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\beta}_1), \dots, \kappa_q(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\beta}_q)],}
\mathrm{e}nd{equation}
and $G_{\ensuremath{\bm\kappa}(\mathbf{x})} \in \mathscr{G}$, for every $\mathbf{x}
\in \mathcal{X} \subseteq \mathbb{R}^p$; here and below, $\ensuremath{\bm\beta}_j =
(\beta_{1, j}, \dots, \beta_{p, j}) \in \mathbb{R}^p$, for $j = 1,
\dots, q$.
For reasons that will become evident {later}, {we do not allow for now} $(\sigma, \xi)$ to depend on the {covariates} $\mathbf{x}$, but we will extend the specification in \mathrm{e}qref{model} {in Section}~\ref{extension}. {The next examples} {illustrate} {that different types of carrier functions exist, and {thus that} there exist different forms of tilting the GPD distribution function via a carrier function; the examples also highlight that, given the freedom to choose a $G$ obeying Assumptions~A--C, the model is quite flexible and that thus it would be challenging to numerically illustrate all its instances.}
\begin{example}[Power carrier, exponential link]\label{powerc}\normalfont
The canonical embodiment of the first version of our model in \mathrm{e}qref{model} is
obtained by specifying
\begin{equation}\label{Gx}
G_{\kappa(\mathbf{x})}(v) = v^{\kappa(\mathbf{x})}, \quad \kappa(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}).
\mathrm{e}nd{equation}
\mathrm{e}nd{example}
\begin{example}[Beta carrier, exponential link]\label{betac}\normalfont
Another variant of \mathrm{e}qref{model} is obtained by specifying
\begin{equation}\label{Gx}
G_{\kappa(\mathbf{x})}(v) = 1 - Q_{\kappa(\mathbf{x})}(1 - v^{\kappa(\mathbf{x})}), \quad \kappa(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}),
\mathrm{e}nd{equation}
where
\begin{equation*}
Q_{\kappa(\mathbf{x})}(v) = \frac{1 + \kappa(\mathbf{x})}{\kappa(\mathbf{x})} v^{1 / \kappa(\mathbf{x})} \bigg(1 - \frac{v}{1 + \kappa(\mathbf{x})} \bigg)
\mathrm{e}nd{equation*}
is the distribution function of a Beta distribution with parameters $(1 / \kappa(\mathbf{x}), 2)$.
\mathrm{e}nd{example}
\begin{example}[Power mixture carrier, exponential links]\label{powerm}\normalfont
Still another variant of {\mathrm{e}qref{model}} is obtained by specifying
\begin{equation*}
G_{\ensuremath{\bm\kappa}(\mathbf{x})}(v) = \pi v^{\kappa_1(\mathbf{x})} +
(1 - \pi) v^{\kappa_2(\mathbf{x})},
\mathrm{e}nd{equation*}
with $0 < \pi < 1$ and
\begin{equation*}
\kappa_1(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}_1), \quad
\kappa_2(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}_2).
\mathrm{e}nd{equation*}
The conditions on the intercepts $\beta_{1,1} > \beta_{1,2}$ and $\beta_{j,1} =
\beta_{j,2}$, for $j = 2, \dots, q$, leads to $\kappa_1(\mathbf{x}) >
\kappa_2(\mathbf{x})$ and is used for {identifiability} purposes.
\mathrm{e}nd{example}
\noindent A consequence of \mathrm{e}qref{model} is that
\begin{equation} \label{qr2}
F^{-1}(p \mid \mathbf{x}) =
\begin{cases}
\frac{\sigma}{\xi} [\{1 - G^{-1}_{\ensuremath{\bm\kappa}(\mathbf{x})}(p)\}^{-\xi} - 1], & \xi \neq 0,\\
- \frac{\sigma}{\xi} \log\{1 - G^{-1}_{\ensuremath{\bm\kappa}(\mathbf{x})}(p)\}, & \xi = 0,
\mathrm{e}nd{cases}
\mathrm{e}nd{equation}
where $F^{-1}(p \mid \mathbf{x}) = \inf\{y: F(y \mid \mathbf{x}) \geq p\}$, for $0 < p < 1$. Equation~\mathrm{e}qref{qr2} warrants some remarks on links with quantile regression. The first version of our model in \mathrm{e}qref{model} can be regarded as a model that bridges quantile regression \citep{koenker1978} with extremal quantile regression \citep{chernozhukov2005}, in the sense that it offers a way to model both moderate and high quantiles. Quantile regression \citep{koenker2005} allows for each $\tau$th conditional quantile to have its own slope $\ensuremath{\bm\beta}_\tau$, according to the following linear specification
\begin{equation}\label{qr}
F^{-1}(\tau \mid \mathbf{x}) = \mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\beta}_\tau,
\quad 0 < \tau < 1.
\mathrm{e}nd{equation}
In the same way that high empirical quantiles fail to
extrapolate into the tail of a distribution, the standard version of
quantile regression in \mathrm{e}qref{qr} is unable to extrapolate into the
tail of the conditional response.
{We describe next} Bayesian Lasso modeling and {tackle} inference for the first version of our model.
For parsimony, below we will focus on the version of the model that sets $q = 1$ in \mathrm{e}qref{links0}, so that $\kappa(\mathbf{x}) = {\kappa_1}(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta})$. {We underscore that what is studied in the next section applies with some minor modifications to the full model presented above; the focus on $q = 1$ (to which Examples 1~2 apply) in the next section eases however notation, and it obviates the need for discussing again the {identifiability} issues that we have alluded to already in Example~3.}
\subsection{\large\textsf{REGULARIZATION AND BAYESIAN INFERENCE}}\label{inference}
Let $\{(\mathbf{x}_i, y_i)\}_{i = 1}^n$ be a random sample from
$F(\mathbf{x}, y)$. We propose a Bayesian Lasso-type specification
for the model in \mathrm{e}qref{model} so {as} to regularize the
log likelihood. Specifically, let $G_{\kappa(\mathbf{x})} \in \mathscr{G}$, with $\kappa(\mathbf{x})$ being a function of $\mathbf{x}$, so that the likelihood becomes
\begin{equation*}
L(\ensuremath{\bm\beta}, \sigma, \xi) = \prod_{i = 1}^n h(y_i) g_{\ensuremath{\bm\kappa}(\mathbf{x}_i)}{\{H(y_i)\}},
\mathrm{e}nd{equation*}
where $\ensuremath{\bm\beta} = (\ensuremath{\bm\beta}_1, \dots, \ensuremath{\bm\beta}_q)$, $h(y) = {\sigma^{-1}} (1 + \xi y / \sigma)_{{+}}^{-1/\xi - 1}$ is the density of the generalized Pareto distribution, $g_{\ensuremath{\bm\kappa}(\mathbf{x})} = \ensuremath{\mathrm{d}} G_{\ensuremath{\bm\kappa}(\mathbf{x})} / \ensuremath{\mathrm{d}} y$, and {$(a)_{+} = \max(0, a)$ is the positive part function.}
In a Bayesian context{,} {the constrained optimization problem underlying the Lasso can be equivalently rewritten as the} posterior mode {of regression parameters, provided that one assumes that those} {are (a priori)} independent {with} identical Laplace priors (i.e.{,}~double exponential), that
is, {$\pi(\ensuremath{\bm\beta}) \propto \prod_{j=1}^{q} \text{e}^{-\lambda / 2 \, |\beta_j|}$}; see \cite{tibshirani1996} and \citet[][Section~4.2.3]{reich2019}. Following \cite{park2008}, we assume a Gamma prior on $\lambda^2$. The hierarchical representation of the first version of our Bayesian Lasso conditional EGPD for an heavy tailed response is thus:
\begin{mdframed}
\textbf{Extreme Value Bayesian Lasso for the Conditional {Lower Values} and Tail}\\
(1st version)
\begin{enumerate}
\item \textbf{Likelihood}
pace{-0.4cm}
\begin{equation*}
y_i \mid \mathbf{x}_i,\ensuremath{\bm\beta},\sigma, \xi, \lambda^2 \sim {\text{EGPD}_G[\kappa(\mathbf{x}_i), \sigma, \xi]}, \quad i = 1, \dots, n,
\mathrm{e}nd{equation*}
\begin{equation}\label{links}
\kappa(\mathbf{x}) = k(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\beta}).
\mathrm{e}nd{equation}
pace{-1cm}
\item \textbf{Priors}
pace{-0.2cm}
\begin{equation*}
\begin{cases}
\begin{split}
{\ensuremath{\bm\beta}_j} &\mid \lambda \overset{\text{iid}}{\sim} \Lap(\lambda), \\
\lambda^2 &\sim \Ga(a_{\lambda},b_{\lambda}), \\
\sigma &\sim \Ga(a_\sigma,b_\sigma), \\
\xi &\sim \, \pi_\xi.
\mathrm{e}nd{split}
\mathrm{e}nd{cases}
\mathrm{e}nd{equation*}
\mathrm{e}nd{enumerate}
\mathrm{e}nd{mdframed}
{When the goal is to set a prior on the space of heavy-tailed distributions, then it may be sensible} to set
$\pi_\xi = \text{Gamma}(a_\xi, b_\xi)$, whereas $\pi_\xi = \text{N}(\mu_\xi, \sigma_\xi)$ may be sensible if all domains attraction are equally likely a priori. Since the posterior has no closed{-}form expression{,} we resort to {Markov Chain
Monte Carlo (MCMC)} methods to sample {from the posterior. }
{A} shortcoming of the first version of the {model} is that it only allows for the effect
of covariates on the {lower values}, but not on the tail; this is a consequence
of the fact that only the part of the model that drives the {lower values},
$\kappa(\textbf{x})$, is indexed by a covariate. {This issue is
addressed in the next section.}
\subsection{\large\textsf{EXTENSIONS FOR COVARIATE-ADJUSTED TAIL}}\label{extension}
In practice some covariates can be significant for the {lower values} but not
for the tail---or the other way around. Thus, we extend the specification from
Sections~\ref{first}--\ref{inference} by also allowing parameters
underlying the tail {to depend on covariates}. Specifically, we
consider the following specification:
\begin{equation}\label{egpdx}
F(y \mid \mathbf{x})=G_{\kappa(\mathbf{x})}{\{H(y \mid \mathbf{x})\}},
\mathrm{e}nd{equation}
where $H(y \mid \mathbf{x})$ is a reparametrized conditional
generalized Pareto distribution, with parameters $\nu(\mathbf{x}) = \sigma(\mathbf{x}) \{1 + \xi(\mathbf{x})\}$ and $\xi(\mathbf{x})$, that is
\begin{equation*}
H(y \mid \mathbf{x})= 1-\left[ 1+\frac{\xi(\mathbf{x})\{1 + \xi(\mathbf{x})\} y}{\nu(\mathbf{x})}\right]^{-1 / \xi}.
\mathrm{e}nd{equation*}
Here, $\kappa(\mathbf{x})$ is a function as in \mathrm{e}qref{links}
whereas $\nu(\mathbf{x}) = {\phi}(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\alpha})$ and $\xi(\mathbf{x}) =
\mu(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\gamma})$, with {$\phi$} and $\mu$ being inverse-link functions.
The canonical embodiment of the full version of our model is obtained by
specifying $G_{\kappa(\mathbf{x})}(v) = v^{\kappa(\mathbf{x})}$ along
with
\begin{equation*}\label{Gx2}
\kappa(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}), \quad
\nu(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\alpha}), \quad
\xi(\mathbf{x}) = {\mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\gamma})},
\mathrm{e}nd{equation*}
\noindent {assuming $\xi(\mathbf{x}) > 0$.} The schematic representation below summarizes our model:
\begin{mdframed}
\textbf{Extreme Value Bayesian Lasso for the Conditional {Lower Values} and Tail}\\
(2nd version)
\begin{enumerate}
\item \textbf{Likelihood}
pace{-0.4cm}
\begin{equation*}
y_i \mid \mathbf{x}_i,\ensuremath{\bm\alpha}, \ensuremath{\bm\beta}, \ensuremath{\bm\gamma}, \lambda_\alpha \sim \text{EGPD}_G{[}\kappa(\mathbf{x}_i), \sigma(\mathbf{x}_i), \xi(\mathbf{x}_i){]}, \quad i = 1, \dots, n,
\mathrm{e}nd{equation*}
\begin{equation}\label{links2}
{
\kappa(\mathbf{x}) = k(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}), \quad
\nu(\mathbf{x}) = \mathrm{e}ll(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\alpha}), \quad
\xi(\mathbf{x}) = \mu(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\gamma}).\\
}
\mathrm{e}nd{equation}
pace{-1cm}
\item \textbf{Priors}
\begin{equation*}
{
\begin{cases}
\begin{split}
{\beta_i} &\mid {\lambda_\beta} \overset{\text{iid}}{\sim} \Lap({\lambda_\beta}), \quad {\lambda_\beta \sim \Ga(a_\beta, b_\beta)}, \\
{\alpha_i} &\mid {\lambda_\alpha} \overset{\text{iid}}{\sim} \Lap({\lambda_\alpha}), \quad {\lambda_\alpha \sim \Ga(a_\alpha, b_\alpha)}, \\
{\gamma_i} &\mid {\lambda_\gamma} \overset{\text{iid}}{\sim} \Lap({\lambda_\gamma}), \quad {\lambda_\gamma \sim \Ga(a_\gamma, b_\gamma)}.\\
\mathrm{e}nd{split}
\mathrm{e}nd{cases}
}
\mathrm{e}nd{equation*}
\mathrm{e}nd{enumerate}
\mathrm{e}nd{mdframed}
\noindent The specification in \mathrm{e}qref{egpdx} warrants some remarks:
\begin{enumerate}
\item \textit{The need for a reparametrization}: While it would seem
natural to simply index $(\sigma, \xi)$ with a covariate and to
proceed as in Sections~\ref{first}--\ref{inference}, similarly to
\cite{chavez-demoulin2005} and \cite{cabras2011}, who work in a GPD
setting we found that approach to be computationally unstable; in
particular, in our case the latter parametrization leads to poor
mixing and to small effective sample sizes.
\item \textit{Any {potential} reparametrization should keep parameters
for the {lower values} and tails separated}: Since we aim to learn which covariates
are important for the {lower values} and tail, any reparametrization to be made
should not mix the parameters for the conditional {lower values} and tail, i.e., we look for
reparametrizations of the type
\begin{equation*}
(\kappa, \sigma, \xi) \mapsto {\{}\kappa, \nu(\sigma, \xi), \xi{\}}
\quad \text{or} \quad
(\kappa, \sigma, \xi) \mapsto {\{}\kappa, \sigma, \zeta(\sigma, \xi){\}}.
\mathrm{e}nd{equation*}
\item \textit{Fisher information}: Parameter orthogonality
\citep[][Section~9.2]{young2005} requires the computation of the
Fisher information matrix. {The} canonical EGPD
is a particular case of the so-called {generalized} Feller--Pareto distribution \citep{kleiber2003}, with $(a, b, p, q, r) = (1, \sigma / \xi, \kappa, 1, 1 / \xi)$, as
\begin{equation*}
h_{\text{GFP}}(y) = \frac{a r y^{a - 1}}{b^a B(p, q)} \left\{1 + \left(\frac{y}{b}\right)^a\right\}^{-r q - 1}
\left[1 - \left\{1 + \left(\frac{y}{b}\right)^a\right\}^{-r} \right]^{p - 1}, \quad y > 0,
\mathrm{e}nd{equation*}
where $B(p, q) = \int_0^1 t^{p - 1} (1 - t)^{q - 1} \, \ensuremath{\mathrm{d}} t$,
the entries of the Fisher information matrix follow from
\citet[][Section~3]{mahmoud2015}; yet the matrix is rather intricate
and thus we decided to look for a more sensible way to proceed than
attempting to achieve parameter orthogonality.
\mathrm{e}nd{enumerate}
The reparametrization $(\sigma, \xi) \mapsto (\sigma(1 + \xi), \xi)$
is orthogonal for the case of the GPD {for $\xi > - 1 / 2$} \citep{chavez-demoulin2005}, but it is only approximately
orthogonal for the EGPD in a neighborhood of $\kappa = 1$. Heuristically, this can
be seen by contrasting the likelihood of the canonical EGPD ($l$) with
that of the GPD ($l^*$); to streamline the argument, we concentrate on the single
observation case $(y)$ but the derivations below {holds} more generally. The
starting point is that
\begin{equation}\label{proxl}
l(\ensuremath{\bm\psi}) = l(\kappa, \ensuremath{\bm\theta}) = l^*(\ensuremath{\bm\theta}) + \log \kappa + (\kappa - 1) \log\{H(y)\},
\mathrm{e}nd{equation}
{where} $\ensuremath{\bm\psi}= (\sigma, \xi, \kappa)$ and $\ensuremath{\bm\theta} = (\sigma, \xi)$,
and thus it follows from \mathrm{e}qref{proxl} that ${l(\ensuremath{\bm\psi})} \approx l^*(\ensuremath{\bm\theta})$, in a neighborhood of $\kappa = 1$; a similar relation holds for the corresponding Fisher informations,
\begin{equation*}
I_{\ensuremath{\bm\psi}} = -\E_{\ensuremath{\bm\psi}} \left(\frac{\partial^2 l}{\partial \ensuremath{\bm\psi} \partial \ensuremath{\bm\psi}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}} \right)
= \left(
\begin{matrix}
I_{\ensuremath{\bm\theta}} & I_{\ensuremath{\bm\theta}, \kappa} \\
I_{\kappa, \ensuremath{\bm\theta}} & I_{\kappa}
\mathrm{e}nd{matrix}
\right), \quad
I_{\ensuremath{\bm\theta}}^* = -\E_{\ensuremath{\bm\theta}} \left(\frac{\partial^2 l^*}{\partial \ensuremath{\bm\theta} \partial \ensuremath{\bm\theta}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}} \right),
\mathrm{e}nd{equation*}
where $\E_{\ensuremath{\bm\theta}}(\cdot) = \int \cdot ~h_{\ensuremath{\bm\theta}}(y) \, \ensuremath{\mathrm{d}} y$ and $\E_{\ensuremath{\bm\psi}}(\cdot) = \int \cdot ~h_{\ensuremath{\bm\theta}}(y) g{\{}H_{\ensuremath{\bm\theta}}(y){\}}\, \ensuremath{\mathrm{d}} y$. {Indeed,} matrix calculus \citep[e.g.,][Chapter~9]{schott2016} can be used to show that that
\begin{equation*}
I_{\ensuremath{\bm\theta}} = -\E_{\ensuremath{\bm\psi}}\left(\frac{\partial^2 l^*}{\partial \ensuremath{\bm\theta} \partial \ensuremath{\bm\theta}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}}\right) - (\kappa - 1) \E_{\ensuremath{\bm\psi}}(M_{\ensuremath{\bm\theta}}),
\mathrm{e}nd{equation*}
where
\begin{equation*}
M_{\ensuremath{\bm\theta}} = \left(H \frac{\partial^2 H}{\partial \ensuremath{\bm\theta} \partial \ensuremath{\bm\theta}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}} - \frac{\partial H}{\partial \ensuremath{\bm\theta}} \frac{\partial H}{\partial \ensuremath{\bm\theta}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}}\right) {H^{-2}}, \quad H \mathrm{e}quiv H(y),
\mathrm{e}nd{equation*}
thus suggesting that $I_{\ensuremath{\bm\theta}} \approx I_{\ensuremath{\bm\theta}}^*$ in a neighborhood
of $\kappa = 1$. Despite the fact the reparametrization is only
quasi-orthogonal for $(\sigma, \xi)$ in an EGPD framework,
{numerical studies} reported in Section~\ref{simulation} indicate very
good {performances in terms of accuracy of fitted
regression estimates and QQ-plots of randomized quantile residuals \citep{dunn1996}}.
\section{\large\textsf{SIMULATION STUDY}}\label{simulation}
\subsection{\large\textsf{SIMULATION SCENARIOS AND ONE-SHOT EXPERIMENTS}}\label{oneshot}
In this section{,} we describe the simulation scenarios and present one-shot experiments so {as} to illustrate the proposed method; a Monte Carlo study {is} presented in Section~\ref{mc}. For now, we focus on describing the setting over which we simulate the data and on discussing the numerical experiments. {Code for implementing our methods is available from the Supplementary Material; we used \texttt{jags} \citep{plummer2019} as we have more experience with it, but \texttt{stan} \citep{gelman2015} would look like another natural option that could potentially yield faster mixing and more effective sample sizes at little extra programming cost.} {Throughout we use uninformative Gamma priors, Gamma(0.1, 0.1), for the hyperparameters $\lambda_k$, $\lambda_\nu$, and $\lambda_{\xi}$.}
Data are simulated according to \mathrm{e}qref{egpdx}, with power carrier $G_{\kappa(\mathbf{x})}(v) = v^{\kappa(\mathbf{x})}$ and with link functions
\begin{equation*}
\kappa_{\mathbf{x}} = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\beta}), \quad
\nu_{\mathbf{x}} = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\alpha}), \quad
{
\xi_{\mathbf{x}} = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\gamma}).
}
\mathrm{e}nd{equation*}
\begin{figure}[H]\centering
\textbf{Scenario~1} \hspace{5.3cm} \textbf{Scenario~2} \hspace{1.5cm} \\
\footnotesize (light effects for {lower values}, light effects for tail) \hspace{0.5cm} (light effects for {lower values}, large effects for tail)\\
\includegraphics[width=0.465\textwidth]{conditional_density1_seed2_final_band250_lambdas-1.png}
\includegraphics[width=0.465\textwidth]{conditional_density2_seed2_final_band250_lambdas-1.png}\\
\textbf{Scenario~3} \hspace{5.3cm} \textbf{Scenario~4} \hspace{1.5cm} \\ \centering
(large effects for {lower values}, light effects for tail) \hspace{0.5cm} (large effects for {lower values}, large effects for tail)\\
\includegraphics[width=0.465\textwidth]{conditional_density3_seed2_final_band250_lambdas-1.png}
\includegraphics[width=0.465\textwidth]{conditional_density4_seed2_final_band250_lambdas-1.png}
\caption{\label{scenariosim} \footnotesize Cross sections of
{posterior mean conditional density} (solid) along with {pointwise} credible bands against true (dashed) for a one-shot experiment with $n = 500$; the cross sections result from
conditioning on $\mathbf{x} = (0.25, \dots, 0.25)$ (left) and $\mathbf{x} = (0.50,
\dots, 0.50)$ (right).
}
\mathrm{e}nd{figure}
\noindent {We consider four scenarios, described below, based on the following vectors for the regression coefficients:}
\begin{itemize}
\item \textbf{Scenario~1}---Light effects for {lower values}, light effects for tail: $\ensuremath{\bm\beta}=\mathbf{a}; \ensuremath{\bm\alpha}=\mathbf{b}; \ensuremath{\bm\gamma}=\mathbf{c}$.
\item \textbf{Scenario~2}---Light effects for {lower values}, large effects for tail: $\ensuremath{\bm\beta}=\mathbf{a}; \alpha=2\mathbf{b}; \gamma=2\mathbf{c}$
\item \textbf{Scenario~3}---Large effects for {lower values}, light effects for tail: $\ensuremath{\bm\beta}=2\mathbf{a}; \ensuremath{\bm\alpha}=b; \ensuremath{\bm\gamma}=\mathbf{c}$
\item \textbf{Scenario~4}---Large effects for {lower values}, large effects for tail: $\ensuremath{\bm\beta}=2\mathbf{a}; \alpha=2\mathbf{b}; \gamma=2\mathbf{c}$
\mathrm{e}nd{itemize}
where {$\mathbf{a}$, $\mathbf{b}$, $\mathbf{c} \in \mathbb{R}^{10}$ have components }
{
\begin{equation*}
a_1 = a_3 = 0.3; a_6 = a_{10} = -0.3; \quad
b_2 = -0.3; b_5 = b_8 = 0.3; \quad
c_1 = c_4 = c_9 = 0.3; c_{10} = -0.3;
\mathrm{e}nd{equation*}}
{and zero otherwise}.
For each of the scenarios above, we {simulated} $n = 500$ observations, $\{(\mathbf{x}_i, y_i)\}_{i = 1}^n$, from a conditional EGPD with a canonical carrier function; this is about the same number of observations {available} in the data application {of} Section~\ref{application}. The covariates are simulated from independent standard uniform distributions. Figure~\ref{scenariosim} shows cross sections of the true and conditional densities for Scenarios~1--4 estimated using our {methods.}
As {can be} seen from these figures{,} the method recovers satisfactorily well the true conditional density---especially keeping in mind {the sample size}.
\begin{figure}
\begin{minipage}{0.48\linewidth}\hspace{1cm}
\textbf{Scenario~1}\\ \footnotesize
(light effects for {lower values}, light effects for tail) \\
\centering
\includegraphics[scale = .07]{results_s1_n250_m250_final_lambdas2.png}
\mathrm{e}nd{minipage}
\begin{minipage}{0.48\linewidth}\hspace{1cm}
\textbf{Scenario~2}\\ \footnotesize
(light effects for {lower values}, large effects for tail)\\
\centering
\includegraphics[scale = .07]{results_s2_n250_m250_final_lambdas2.png}
\mathrm{e}nd{minipage}\\
\begin{minipage}{0.48\linewidth}\hspace{1cm}
\textbf{Scenario~3}\\ \footnotesize
(large effects for {lower values}, light effects for tail)\\
\centering
\includegraphics[scale = .07]{results_s3_n250_m250_final_lambdas2_corrigido.png}
\mathrm{e}nd{minipage}
\begin{minipage}{0.48\linewidth}\hspace{1cm}
\textbf{Scenario~4}\\ \footnotesize
(large effects for {lower values}, large effects for tail)\\
\centering
\includegraphics[scale = .07]{results_s4_n250_m250_final_lambdas2.png}
\mathrm{e}nd{minipage}
\caption{\label{boxplots} \footnotesize Side-by-side boxplots with regression coefficient estimates for Monte Carlo simulation study ($n = 250$) plotted against the true values (\red{---}). The values 1--9 represent the coefficient indices.}
\mathrm{e}nd{figure}
\subsection{\large\textsf{MONTE CARLO SIMULATION STUDY}}\label{mc}
To assess the finite-sample performance of the proposed methods{,} we now present the results of a Monte Carlo simulation study, where we repeat 250 times the one-shot experiments from Section~\ref{oneshot}. We consider the {different} sample sizes {of} $n = 100$, $n = 250$, and $n = 500$.
{Figure~\ref{boxplots} presents side-by-side boxplots of the coefficient estimates for each scenario for the case $n = 250${, and we can see that} the estimates tend to be close to the true values thus suggesting that the proposed methods are able to learn what covariates are significant for the {lower values}, but not for the tail (and vice-versa); further {simulation} experiments (results not shown) indicate that performance may deteriorate if the number of common effects between {left and right tails} is large. The coefficient estimates for the cases $n = 100$ and $n = 500$ are reported in the Supplementary Material{;} as expected{,} the larger the sample size{,} the more accurate the estimates.} {The frequency of variable selection table presented in the Supplementary Material suggests a satisfactory performance of the method in terms of variable selection.}
\noindent We now move to the conditional density. To compare the fitted conditional density against the true as the sample size increases, we resort to the {mean integrated squared error} (MISE):
\begin{equation*}
\text{MISE} = \E\bigg[\int\int \{\widehat{f}(y \mid \mathbf{x}) - f(y \mid \mathbf{x})\}^2 \, \ensuremath{\mathrm{d}} \mathbf{x} \, \ensuremath{\mathrm{d}} y\bigg],
\mathrm{e}nd{equation*}
{where the expectation is taken so to summarize the average behavior of the randomness on the double integral that stems from the posterior mean estimate $\widehat{f}$ \citep[][Section~2.3]{wand1995}.}
Figure~\ref{mise} {shows} a boxplot {of} the MISE {of} each run of the simulation experiment {for Scenario~1}. {As can be} seen from Figure~\ref{mise}, MISE tends to decrease as the {sample size} increases, thus indicating that a better performance of the proposed methods is to be expected on larger samples; {the same holds for the remainder scenarios as can be seen from the boxplots of MISE available in the Supplementary Material}. {To examine the quality of each fit corresponding to a simulated dataset, $\{(\textbf{x}_i, y_i)\}_{i = 1}^n$, we use randomized quantile residuals \citep{dunn1996} adapted to our model, that is, $\{\varepsilon_i\}_{i = 1}^n = [\Phi^{-1}\{\widehat{F}(y_i|\mathbf{x}_i)\}]_{i = 1}^n$, where $\Phi^{-1}$ is the quantile function of the standard Normal distribution and $\widehat{F}$ is the posterior mean estimate of the conditional distribution function. Figure~\ref{residuals} depicts the
corresponding posterior mean QQ-plot of randomized quantile residuals against the theoretical standard
normal quantiles, and it evidences an acceptably good fit of the model for Scenarios~1--4. } {Finally, to supplement the analysis, we also report in the Supplementary Material numerical experiments that tend to support the claim that the performance of the proposed methods is relatively satisfactory at separating covariate effects for the lower values and tail (Section 2.1 of Supplementary Material), and that this also holds in a misspecified setting. Despite the overall good performance, in all fairness not everything is perfect; specifically, our supporting numerical experiments reveal that: i) the correspondence between ``non-constant'' effects on lower quantiles and coefficients of $\kappa(\mathbf{x})$ is weaker for Scenario~3; ii) bias appears on the lower conditional quantile function estimates, under misspecification.}
\begin{figure}[H]
\centering
\begin{minipage}{0.48\linewidth}\centering\hspace{.7cm}
\textbf{Scenario~1}\\
\footnotesize (light effects for {lower values}, light effects for tail)\\
\mathrm{e}nd{minipage}
\begin{minipage}{0.48\linewidth}\centering\hspace{.7cm}
\textbf{Scenario~2}\\
\footnotesize (light effects for {lower values}, large effects for tail)\\
\mathrm{e}nd{minipage}
\includegraphics[width=0.45\textwidth]{residuals2_lambdas2_s1_m250.pdf}
\includegraphics[width=0.45\textwidth]{residuals2_lambdas2_s2_m250.pdf}\\
pace{0.5cm}
\begin{minipage}{0.48\linewidth}\centering\hspace{.7cm}
\textbf{Scenario~3}\\
\footnotesize (large effects for {lower values}, light effects for tail)\\
\mathrm{e}nd{minipage}
\begin{minipage}{0.48\linewidth}\centering\hspace{.7cm}
\textbf{Scenario~4}\\
\footnotesize (large effects for {lower values}, large effects for tail)\\
\mathrm{e}nd{minipage}
\includegraphics[width=0.45\textwidth]{residuals2_lambdas2_s3_m250.pdf}
\includegraphics[width=0.45\textwidth]{residuals2_lambdas2_s4_m250.pdf}\\
\caption{\label{rqrmc} \footnotesize {QQ-plots of randomized quantile residuals for Monte Carlo simulation study; each trajectory corresponds to a posterior mean QQ-plot from each simulated dataset.}}
\mathrm{e}nd{figure}
\begin{figure}
\centering
\textbf{Scenario~1}\\
(light effects for {lower values}, light effects for tail)\\
\includegraphics[width=0.45\textwidth]{mise_m250_s1_lambdas2_log.pdf}
\caption{\label{mise} \footnotesize Side-by-side boxplots of MISE {on the log scale} for Monte Carlo simulation study {for Scenario~1}.}
\mathrm{e}nd{figure}
\section{\large\textsf{DRIVERS OF MODERATE AND EXTREME RAINFALL IN MADEIRA}}\label{application}
\subsection{\large\textsf{MOTIVATION, APPLIED CONTEXT, AND DATA DESCRIPTION}}
{We now showcase the proposed methodology with a climatological
illustration with data from Funchal, Madeira (Portugal); the island
of Madeira is an archipelago of volcanic origin located in the
Atlantic Ocean {about 900km} southwest of mainland Portugal. Prior to
fitting the proposed model, we start by providing some background on
the scientific problem of interest and by describing the data.
Madeira has suffered a variety of extreme rainfall events over the
last two centuries, including the flash floods of October 1803
(800--1000 casualties) and those of February 2010---the latter with
a death toll of 45 people \citep{fragoso2012, santos2017} and with
an estimated damage of 1.4 billion {Euros} \citep{baioni2011}. Such
violent rainfall events are often followed by damaging landslides
events, including debris, earth, and mud flows.}
{To analyze such rainfall events in Funchal, Madeira, we have
gathered data from the National Oceanic and Atmospheric Administration
(\url{www.noaa.gov}). Specifically, we have gathered total monthly
precipitation (.01 inches), as well as the following potential drivers
for extreme rainfall: Atlantic multi-decadal Oscillation (AMO), El
Ni\~no-Southern Oscillation (expressed by NINO34 index) (ENSO),
North Pacific Index (NP), Pacific Decadal Oscillation (PDO),
Southern Oscillation Index (SOI), and North Atlantic Oscillation
(NAO). The sample period covers January 1973 to June 2018, thus
including episodes such as the violent flash floods of 1979, 1993,
2001, and 2010 \citep{baioni2011}. After eliminating the dry events
(i.e.{,}~zero precipitation) and the missing precipitation data
(two observations), we are left with a total of 532 observations.}
{The potential drivers for extreme rainfall above have been
widely examined in the climatological literature, mainly on large
landmasses. In particular, it has been suggested that in North
America ENSO, PDO, and NAO play a key role governing the occurrence
of extreme rainfall events \citep{kenyon2010, zhang2010, whan2017};
yet for the UK, while NAO is believed to impact the occurrence of
extreme rainfall events, no influence of PDO nor AMO has been
detected \citep{brown2018}. The many peculiarities surrounding
Madeira climate (e.g.{,}~Azores {A}nticyclone, Canary {C}urrent, Gulf
{S}tream, \textit{etc}{.}), along with the negative impact that flash
floods and landslide events produce on the island, motivate us to
ask: i) whether such drivers are relevant for explaining extreme
rainfall episodes in Madeira; ii) whether such drivers are also
relevant for moderate rainfall events.}
\subsection{\large\textsf{TRACKING DRIVERS OF MODERATE AND EXTREME RAINFALL}}
{One of the goals of the analysis is to use the lenses of our model so
to learn what are the drivers of moderate and extreme rainfall in
Funchal. To conduct such inquiry, we use {the
full model from Section~\ref{extension} (see {Eq.~\mathrm{e}qref{links2}}), with power carrier $G_{\kappa(\mathbf{x})}(v) = v^{\kappa(\mathbf{x})}$ and with link functions
\begin{equation*}
\kappa(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\beta}), \quad
\nu(\mathbf{x}) = \mathrm{e}xp(\mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}}\ensuremath{\bm\alpha}), \quad
\xi(\mathbf{x}) = \mathbf{x}^{\ensuremath{\mathrm{\scriptscriptstyle T}}} \ensuremath{\bm\gamma}.
\mathrm{e}nd{equation*}
Covariates have been standardized {and} {we used} a flat Normal prior, $\text{N}(0, 100^2)$, for the intercepts,
and {uninformative Gamma priors, $\Ga(0.1, 0.1)$}, for the
{hyperparameters $\lambda_k$, $\lambda_\nu$, $\lambda_\xi$; here an identity link was used for
$\xi(\mathbf{x})$ as fitting a GPD to the response via likelihood methods suggested no compelling evidence
in favor of an heavy-tailed response.}
After a burn-in period of {10\,000}
iterations, we collected a total of 20\,000 posterior samples. The
results of MCMC convergence diagnostics were satisfactory; in
particular, the effective sample sizes {were} acceptably high, ranging
from about {1000 to 5\,000}, and all values of Geweke's diagnostic {were}
satisfactory, ranging from about {$-3.2$ to $3.0$}. Figure~\ref{bands}
depicts the {credible intervals} for each regression coefficient.}
{To assess the fit of the proposed model we resort once more to randomized
quantile residuals \citep{dunn1996}. Figure~\ref{residuals} evidences an acceptably good fit of the model;} {while the pointwise bands in Figure \ref{residuals} are narrow, they result from acceptably high effective sample sizes as mentioned above.}\begin{figure}
\begin{minipage}{0.32\linewidth}
\centering
\includegraphics[scale = .22]{postbetak2_lambdas_prior.pdf}
\mathrm{e}nd{minipage}
\begin{minipage}{0.32\linewidth}
\centering
\includegraphics[scale = .22]{postbetanu2_lambdas_prior.pdf}
\mathrm{e}nd{minipage}
\begin{minipage}{0.32\linewidth}
\centering
\includegraphics[scale = .22]{postbetaxi2_lambdas_prior.pdf}
\mathrm{e}nd{minipage}
\caption{\label{bands} \footnotesize {Credible intervals} for regression coefficients
for inverse-link functions; the dots ($\bullet$) represent the
posterior means and the dashed line represents the reference line
$\beta = 0$.}
\mathrm{e}nd{figure}
\begin{figure}
\centering
\includegraphics[scale = .3]{residuals2_lambdas2_prior.pdf}
\caption{\label{residuals} \footnotesize {QQ-plot of} randomized quantile residuals; the dashed line represents the posterior mean plotted along with {pointwise} credible bands.}
\mathrm{e}nd{figure}
{{Figure~\ref{bands}} {(middle and right panels)} {suggests} that the key drivers for extreme
rainfall in Funchal are NAO and ENSO, along with a possible modest
influence of AMO on the magnitude of extreme rainfall events. To put
it differently, such results hint that a higher NAO, ENSO, and AMO
tend to be associated with extreme rainfall episodes in
Funchal. Such findings are thus reasonably in line with those of
\citet{kenyon2010} \citet{zhang2010}, and \citet{whan2017} for North
America. Yet, similarly to \citet{brown2018}, we find no relevant
impact of PDO, {given the rest}, on extreme rainfall spells.} {Interestingly, of
all these potencial drivers for extreme rainfall, {only ENSO}
seems to be statistically associated with moderate rainfall; see
Figure~\ref{bands} (left). Also, NP is the most relevant driver of
moderate rainfall, and yet the analysis suggests it plays no role on
extreme rainfall. Additionally, Figure~\ref{bands} (left) suggests
that an increase in NP leads {to a heavier} left tail (i.e.{,}~dryer
months), whereas an increase in ENSO leads to less mass close to
zero (i.e.{,}~rainy months). {Finally, to supplement the analysis,
we also present conditional quantiles in the Supplementary Material
so to directly assess how rainfall itself is impacted by all these
potential drivers.
}
\section{\large\textsf{CLOSING REMARKS}}\label{discussion}
In this paper we have introduced a Lasso-type model for the {lower values} and
tail of a possibly heavy-tailed response. The proposed model: i) bypasses
the need for threshold selection ($u_{\mathbf{x}}$); ii) models both the
conditional {lower values} and conditional tails; iii) it is naturally tailored
for variable selection. {In addition, contrarily to GPD-based approach of \cite{davison1990} the proposed model does not suffer from lack of threshold stability, as indeed our model does not depend on a threshold; the fact that the Davison--Smith approach does not retain the threshold stability property is known since \citep[][Section~2.2]{eastoe2009}, and it implies that selection of covariates in the parameters of the GPD is not invariant to threshold choice.} {Yet, other approaches that circumvent the threshold stability issue are available beyond our model, including an inhomogeneous point process formulation for extremes \citep[][Chapter 7]{coles2001}.} Interestingly, the proposed model can be regarded as a Lasso-type model for quantile
regression that is tailored for both the {lower values} and for extrapolating
into the tails. As a byproduct, our paper has {links} with the Bayesian
literature on Bayesian distributional regression \citep{umlauf2018},
and with the recent paper {of} \cite{groll2019}.
{Despite the considerations above on threshold selection there
is no `free lunch'; indeed, here the parameters of the model and the
choice of the carrier dictate the behavior of the quantiles in the
whole range of the distribution, while the impact of some parameters
is smaller in the lower or upper tail. Yet, competing
EGPD models that stem from different choices for the carrier function can be
formally compared and ranked via standard model selection criteria, such
as Log Pseudo Marginal Likelihood \citep[e.g.][]{geisser1979, gelfand1994}.}
Some final comments on future research are in order. A version of our
model that includes additionally a regression model for point masses
at zero would be natural for a variety of contexts, such as for
modeling long periods without rain or droughts. Semicontinuous
responses that consist of a mixture of {zeros} and a continuous positive
distribution are indeed common in practice
\citep[e.g.{,}][]{olsen2001}.
Another natural avenue for future research would endow the model
with further flexibility by resorting to a
generalized additive model, where the smooth function of each
covariate is modeled using B-spline basis functions. The latter
extension would however require a group lasso \citep{yuan2006},
shrinking groups of regression coefficients (per smooth function) towards
zero. While {we focus here} on modeling positive random variables, another
interesting extension of the proposed model would consider the left
{endpoint} itself as a parameter {rather} than fixing it at zero.
Finally, from an inference perspective it would also seem natural
defining a semiparametric Bayesian version of our model that would set
a prior directly on the covariate-specific carrier function $G_{\mathbf{x}}$ {rather than specifying a power carrier function in advance, }and to model the conditional density as $F(y \mid \mathbf{x})=G_{\mathbf{x}}{\{H(y \mid \mathbf{x})\}}$. This approach would however require setting a prior on the space $\mathscr{G}$ of all carrier functions so {as} to ensure that $G_{\mathbf{x}}$ obeys Assumptions~{A--C}, for all $\mathbf{x}$. While priors on
spaces of functions are an active field of research
\citep{ghosal2015}, definition of priors over $\mathscr{G}$ is to our
knowledge an open problem; {a natural line of attack to
construct such a prior would be via
random Bernstein polynomials \citep{petrone2002}, with weight
constraints derived from Lemma~1 of \cite{tencaliec2019}; {indeed, by modeling the carrier
function with a Bernstein polynomial basis a higher level of flexibility could be
achieved in comparison to that offered by Examples~\ref{powerc}--\ref{powerm}.
}}
\section*{\large\textsf{ACKNOWLEDGMENTS}} \label{acknowledgements}\footnotesize
We are grateful to the Editor, the Associate Editor, and two Referees for their insightful and constructive re- marks on an earlier version of the paper. We thank participants of IWSM (International Workshop on Statistical Modeling) 2019 and of EVA 2019 for constructive comments and discussions. We also thank, without implicating, Vanda In\'acio de Carvalho, Ioannis Papastathopoulos, Philippe Naveau, Ant\'onia Turkman, and Feridun Turkman for helpful comments and fruitful discussions. This work was partially supported by FCT (Funda\c c\~ao para a Ci\^encia e a Tecnologia, Portugal), through the projects PTDC/MAT-STA/28649/2017 and UID/MAT/00006/2020.
\renewcommand\refname{\textsf{REFERENCES}}
\mathrm{e}nd{document} |
\begin{document}
\title{Three routes to the exact asymptotics for the one-dimensional
quantum walk}
\begin{abstract}
We demonstrate an alternative method for calculating the asymptotic
behaviour of the discrete one-coin quantum walk on the infinite line, via
the Jacobi polynomials that arise in the path integral representation.
We calculate the asymtotics using a method that is significantly easier
to use than the Darboux method. It also provides a single integral
representation for the wavefunction that works over the full range of
positions, $n,$ including throughout the transitional range where the
behaviour changes from oscillatory to exponential. Previous analyses of
this system have run into difficulties in the transitional range, because
the approximations on which they were based break down here. The fact
that there are two different kinds of approach to this problem (Path
Integral vs. Schr\"{o}dinger wave mechanics) is ultimately a manifestation
of the equivalence between the path-integral formulation of quantum
mechanics and the original formulation developed in the 1920s. We also
discuss how and why our approach is related to the two methods that have
already been used to analyse these systems.
\end{abstract}
\section{Introduction}
The discrete quantum walk has been discussed in several recent papers
\cite{NayaknV,Ambainis01}.
The first authors to discuss the quantum random walk were
Y. Aharonov, Davidovich and Zagury, in \cite{earliest} where they
described a very simple realization of the quantum random walk in quantum
optics. Some further early results were due to Meyer, in \cite{Meyer96}.
He proved that for a discrete (unitary) quantum walk on the line to have
non-trivial behaviour, its motion must be assisted by an additional
``coin'' degree of freedom which is conventionally taken to be two
dimensional. This spin-like degree of freedom is sometimes called the
{\emph{chirality,}} and can take the values {\small{RIGHT}} and
{\small{LEFT}}, or a superposition of these. Meyer therefore considered
the wave function as a two component vector of amplitudes of the particle
being at point $n$ at time $t$.
Let
\begin{equation}\label{psidef}
\Psi(n,t)=
\begin{pmatrix}
\psi_{L}(n,t) \\
\psi_{R}(n,t)
\end{pmatrix}
\end{equation}
where we shall label the chirality of the top component {\small{LEFT}} and
the bottom {\small{RIGHT}}. This paper is concerned with the dynamics
of a test particle performing an unbiased quantum walk on the integer
points on the line. At each time step the chirality of the particle
evolves according to a unitary Hadamard transformation
\begin{align}\label{spinstep}
|\rm{R}\rangle \mapsto \frac{1}{\sqrt{2}}(|\rm{R}\rangle + |\rm{L}\rangle) \\
|\rm{L}\rangle \mapsto \frac{1}{\sqrt{2}}(|\rm{R}\rangle - |\rm{L}\rangle)
\end{align}
and then the particle moves according to its (new) chirality state.
Therefore, the particle obeys the recursion relations
\begin{align}\label{recursion}
\Psi_{\rm{L}}(n,t+1)&=-\frac{1}{\sqrt{2}}\Psi_{\rm{L}}(n+1,t)
+\frac{1}{\sqrt{2}}\Psi_{\rm{R}}(n-1,t) \\
\Psi_{\rm{R}}(n,t+1)&=\frac{1}{\sqrt{2}}\Psi_{\rm{L}}(n+1,t)
+\frac{1}{\sqrt{2}}\Psi_{\rm{R}}(n-1,t).
\end{align}
Meyer and subsequent authors have considered two approaches to the
Hadamard walk, the {\emph{Path Integral}} and {\emph{Schr\"{o}dinger}}
approaches, which reflect two complementary ways of formulating quantum
mechanics \cite{FeynmanHibbs}. We refer to the paper by Ambainis, Bach,
Nayak, Vishwanath and Watrous \cite{Ambainis01} for proper definitions and
references. We shall refine the asymptotic analysis of this paper.
The behaviour of the Hadamard walk is very different from the classical
random walk on the integer points on the real line. One way of understanding
this is as a result of quantum interference. Destructive interference
suppresses the probability amplitude in the vicinity of the origin, and
constructive interference reinforces it away from the origin. The net effect
of this is that the quantum walk spreads out much faster. Figure \ref{walk}
shows the discrete quantum walk on the infinite line at $t=100.$ We have only
plotted the distribution for even values of $n$ in Figure \ref{walk}. If a
walk's initial distribution has its support confined to a set of nodes which
all have the same parity (all $n$s either even or odd) the support of the
distribution will ``tick'' between different parities at each step.
\begin{figure}
\caption{The discrete quantum walk on the line. The probability
distribution is shown for a walk that started at the origin
with its coin in the state $|\rm{R}
\label{walk}
\end{figure}
The probability distribution for the quantum walk depends not only on the
evolution law in \eqref{spinstep}, but also on the initial conditions.
Throughout this paper we will only consider walks that start at $n=0$ with the
coin in the initial state $|\rm{R}\rangle.$ The convention that the walk
starts in the position $n=0$ is inherited from the motivation for studying
these systems as toy models of quantum algorithms: the computer is always
started with its registers in the state $|00\ldots\rangle.$
The choice of initial coin state was made because the Hadamard walk is an
unbiased walk \cite{NayaknV}. This means that even though some starting
conditions result in an asymmetric probability distribution, we can always
find some other starting state that will produce a walk with the opposite
bias.
The $|\rm{R}\rangle$ starting state produces a distribution that is maximally
biased to the right. Likewise, if we had chosen to start the walk in the
state $|\rm{L}\rangle,$ this would have produced a distribution that was
maximally biased to the left. This distribution is the exact mirror-image
of that produced by starting in the state $|\rm{R}\rangle.$ Thus, reversing
the starting condition just relabels $n$ to $-n.$ As the coin space is
two-dimensional, we can now invoke the linearity of quantum mechanics, and
note that we can obtain the behaviour for any initial condition
$|\Phi\rangle=a|\rm{R}\rangle+b|\rm{L}\rangle$ by forming the corresponding
linear combination of the evolutions for the initial condition basis states
$|\rm{R}\rangle,|\rm{L}\rangle.$
\section{Related Work}
We now very briefly describe the methods previous authors have used. They
have so far followed two approaches to determine the limiting behaviour of
the $\psi$-functions as $t \rightarrow \infty$. The translational invariance
of this problem means that it has a simple description in momentum space,
and the Schr\"{o}dinger approach relies on that fact. We will describe
this in more detail in the section below. Beginning with the recursion
relation \eqref{recursion}, Nayak and Vishwanath \cite{NayaknV} showed
that
\begin{equation}
\tilde{\Psi}(k,t)=\left(M_{k}\right)^{t} \tilde{\Psi}(k,0)
\end{equation}
where
\begin{equation}
\tilde{\Psi}(k,0)=\sum_{n}\Psi(n,0)e^{ikn}
\end{equation}
and $M_{k}$ is the matrix
\begin{equation}
M_{k}=\frac{1}{\sqrt{2}}
\begin{pmatrix}
-e^{-ik} & e^{-ik} \\
e^{ik} & e^{ik}
\end{pmatrix}.
\end{equation}
They diagonalize the matrix $M_{k}$, finding the eigenvalues
$\lambda_{k}^{1}=e^{i\omega_{k}}$ and
$\lambda_{k}^{2}=e^{i(\pi-\omega_{k})}$ where
$\omega_{k}=\arcsin\left(\frac{\sin k}{\sqrt{2}}\right), \quad
\omega_{k}\in [-\pi/2,\pi/2]$. They then write the $\psi$-functions
in terms of the eigenvalues $\left(\lambda_{k}^{1}\right)^{t}$
and $\left(\lambda_{k}^{2}\right)^{t}$ and their associated eigenvectors
and formally invert the original fourier transform to obtain the closed
form integral representations for the wavefunction,
\begin{align}
\psi_{\rm{L}}(n,t) &= \frac{1+(-1)^{n+t}}{2}\int_{-\pi}^{\pi}
\frac{dk}{2\pi}
(1+\frac{\cos k}{\sqrt{1+\cos^2k}})e^{-i(\omega_kt+kn)} \\
\psi_{\rm{R}}(n,t) &= \frac{1+(-1)^{n+t}}{2}\int_{-\pi}^{\pi}
\frac{dk}{2\pi}
\frac{e^{ik}}{\sqrt{1+\cos^2k}}e^{-i(\omega_kt+kn)}.
\end{align}
They then approximate these, using a combination of the method of
stationary phase in one range, and integration by parts in the
other. (Note that the Left-Right labelling convention in \cite{NayaknV} is
the opposite to ours.)
There is another approach based on the {\emph{Path Integral}} formulation
of quantum mechanics, that Ambainis {\emph{et al.}} discuss. The
$\psi$-functions are expressed in terms of Jacobi polynomials (as in Lemma
\ref{second} below) of the form
\begin{equation}
J_{j}^{(aj+\gamma,bj+\beta)}(0), \; j=(t-n)/2-1=(1-\gamma)t/2 -1.
\end{equation}
One may then derive the asymptotic behaviour of the $\psi$-functions as
$t \rightarrow \infty$ by determining the asymptotic behaviour of these
Jacobi polynomials as $m \rightarrow \infty$. This has been done in two
ways. Ambainis {\emph{et al.}} use the approach due to Chen and Ismail
\cite{ChenIsmail}, which employs the {\emph{Darboux}} method. Here one
begins with the Srivastava-Singhal generating
function
\begin{equation}
\sum_{j=0}^{\infty} J_{j}^{(\gamma + aj,\beta + bj)}(w)z^{j}
=(1+{\bf{u}})^{-\gamma}(1+{\bf{v}})^{-\beta}[1+(1+a){\bf{u}}
+(1+b){\bf{v}}]^{-1}
\end{equation}
where ${\bf{u}}$ and ${\bf{v}}$ are defined to be power series in $w$ that
satisfy the equations
\begin{align}
{\bf{u}}&=-\frac{1}{2}(w+1)z(1+{\bf{u}})^{-\lambda}(1+{\bf{v}})^{-\mu-1} \\
{\bf{v}}&=-\frac{1}{2}(w-1)z(1+{\bf{u}})^{-\lambda-1}(1+{\bf{v}})^{-\mu}.
\end{align}
Chen and Ismail use the Darboux method to calculate the asymptotics of
these Jacobi polynomials. This method starts from the idea that if
$f(z)=\sum_{n \ge 0}a_{n}z^{n}$ then the radius of convergence is
$\mathcal{R}=\lim_{n \rightarrow \infty}
\left(\frac{1}{|a_{n}|}\right)^{1/n}$ when this limit exists. Suppose
there is a {\emph{comparison function}} $g(z)=\sum_{n \ge 0}b_{n}z^{n}$
such that $g(z)-f(z)$ has a larger radius of convergence than $f(z),$ then
$b_{n}-a_{n}=O(s^{-n})$ where $s > \mathcal{R}.$ If the asymptotic
behaviour of $b_{n}$ is known, then since $a_{n}\sim b_{n}$ then we know
the asymptotic behaviour of $a_{n}.$ Chen and Ismail use the
Srivastava-Singhal description of the generating function for Jacobi
polynomials to determine its singularities on the radius of convergence
and give comparison functions at each singularity to determine the
asymptotic behaviour of the Jacobi polynomials.
It is interesting to note that the reciprocals of these two singularities
(when normalized by dividing by $\left(\sqrt{2}\right)^{t}$) are the
eigenvalues that arise in the Schr\"{o}dinger method.
\section{Overview and Results}
The other way to obtain the asymptotic behaviour of these Jacobi
polynomials is to follow the method in Gawronkski and Shawyer's paper,
\cite{Gawyer}. This is the third way to analyse these systems, and will
be the way that we follow in much of this paper. It is a refinement of
the methods in the paper by Saff and Varga \cite{Saff}. This uses the
method of {\bf{steepest-descents}}. We will outline this method below but
for further details we recommend \cite{Arfken,Wong} and also the book by
Olver \cite{Olver} which describes both the steepest-descent method and the
method of Darboux very clearly.
The saddle-points that feature in this method are also related
to the eigenvalues that arise in the Schr\"{o}dinger method. We will
detail this relationship below. If $\zeta$ is a saddlepoint then
$\exp(h(\zeta))$ is an eigenvalue from the Schr\"{o}dinger method, for a
function $h(\zeta)$ that we define below.
We now describe our results. We shall see that it is possible to obtain
explicit asymptotic expansions that are uniformly convergent. The system
displays two types of behaviour, with the transitions between the
different behaviours governed by a parameter, $\alpha =n/t$. The behaviour
changes qualitatively over three ranges, which are respectively
$0 \le |\alpha| \le \frac{1}{\sqrt{2}}-\varepsilon,
\frac{1}{\sqrt{2}}-\varepsilon <|\alpha| <\frac{1}{\sqrt{2}}+\varepsilon,$
and $\frac{1}{\sqrt{2}} +\varepsilon \le |\alpha|<1-\varepsilon$, where
$\varepsilon$ is a positive number. Our methods give error terms of the
form $O(t^{-N-1/2})$ where $N$ is some positive integer, and the expansion
for each range holds uniformly. We stop at $O(t^{-5/2})$ since we have no
application for the more precise estimates.
The first range is arguably the most interesting. Here the
asymptotic behaviour of the $\psi$-functions is oscillatory as
$t\rightarrow \infty$. We will use a result by Gawronkski and Shawyer
\cite{Gawyer} who obtained the leading term and the factor $1+O(t^{-1})$
for the error term for Jacobi polynomials. When this result is applied to
the $\psi$-functions, we obtain a refinement of Theorem 2 of
\cite{Ambainis01} who found the leading term.
The second, boundary, range is treated using the method of coalescing
saddle-points as described in R. Wong's book \cite{Wong}. A uniform
asymptotic expansion is possible which involves the Airy function. This
is also interesting because the $\psi$-functions change from an oscillating,
polynomially bounded asymptotic behaviour to an exponentially small
behaviour as $|\alpha|$ changes from below $1/\sqrt{2}$ to above
$1/\sqrt{2}$. The calculation of the asymptotics for these polynomials is
novel to the best of the authors' knowledge. The third range is the
immediate vicinity of $|\alpha| = 1/\sqrt{2}.$ As the behaviour of our
integral representation is well understood, we can obtain uniformly
convergent asymptotics thoughout this transitional range.
The main interest of our results lies not in the more accurate asymptotic
expansions for the $\psi$-functions in the first and third ranges of
$\alpha,$ although Ambainis {\emph{et al.}} did ask for a uniform method
to do the asymptotics for these ranges. In fact, Gawronkski and Shawyer
have already provided this method \cite{Gawyer}. The fact that the uniform
asymptotic behaviour for the Jacobi polynomials for the transistion from
polynomially bounded oscillating behaviour to exponential decay can be
found in terms of the Airy function seems rather more interesting. It is
somewhat surprising that this has not (to our knowledge) been published
before for this family of polynomials: Wong's book \cite{Wong} shows how
to obtain the asymptotic behaviour in terms of Airy functions. Perhaps the
reason for this omission is simply the previous lack of an application,
which the quantum walk now provides.
\section{The Feynman Path Integral}
We begin with the Feynman path integral appproach following Meyer. We will
represent the components of the vector-valued wavefunction as a normalized
sum over signed paths. Meyer \cite{Meyer96} proved
\begin{lemma}[Meyer \cite{Meyer96}]\label{start}
Let $-n\le t<n$. Define $l=\frac{t-n}{2}$. The amplitudes of position $n$
after $t$ steps of the Hadamard walk are:
\begin{align}
\psi_{R}(n,t)&=\frac{1}{\sqrt{2^{t}}}
\sum_{s} \binom{l-1}{s-1} \binom{t-l}{s}(-1)^{t-s} \\
\psi_{L}(n,t)&=\frac{1}{\sqrt{2^{t}}}
\sum_{s}\binom{l-1}{s} \binom{t-l}{s}(-1)^{t-s-1},
\end{align}
except for the endpoints where $t=n,$ which have to be handled separately.
\end{lemma}
(See \cite{Meyer96} for the endpoints. For a derivation of these
formulae, see Appendix A of \cite{Multicoin}.)
We will follow the approach used by Ambainis {\emph{et al.}}
\cite{Ambainis01} and Meyer \cite{Meyer96} but in greater detail to
obtain the following lemma. The standard notation in \cite{AnStegun} for
Jacobi polynomials is to write them as $J_{n}^{(\alpha,\beta)}(z),$ but to
avoid confusion with $\alpha=n/t$ we will write these as
$J_{r}^{(u,v)}(z).$ (Note that in the following lemma, we have reintroduced
the external phase that was omitted in \cite{Meyer96} and \cite{Ambainis01}.)
\begin{lemma}[Ambainis {\emph{et al.}}\cite{Ambainis01}]\label{second}
When $ n \equiv t$ mod 2 and $J_{r}^{(u,v)}(z)$ denotes a Jacobi
polynomial,
\begin{equation}
\psi_{R}(n,t)(-1)^{(t-n)/2} =
\begin{cases}
-(\frac{t+n}{t-n})2^{-n/2-1}J_{(t-n)/2-1}^{(1,n)}(0),
& \text{ when }0\le n<t \\
-2^{n/2-1}J_{(t+n)/2-1}^{(1,-n)}(0), & \text{ when }-t\le n<0.
\end{cases}
\end{equation}
Also,
\begin{equation}
\psi_{L}(n,t)(-1)^{(t-n)/2} =
\begin{cases}
(-1)^{n+1}2^{-n/2-1}J_{(t-n)/2-1}^{(0,n+1)}(0), & \text{ when }0 \le n <t \\
(-1)^{n+1}2^{n/2} J_{(t+n)/2}^{(0,-n-1)}(0), & \text{ when }-t \le n < 0.
\end{cases}
\end{equation}
\end{lemma}
{\bf{Remark:}} As Ambainis {\emph{et al.}} \cite{Ambainis01} point out
\begin{align}
&\psi_{L}(-n,t)=-\psi_{L}(n-2,t),
&\psi_{R}(-n,t)=\frac{t-n}{t+n}\psi_{R}(n,t).
\end{align}
Proof of Lemma~\ref{second}: We use two formulae from Abromowitz and
Stegun \cite{AnStegun} to prove these results. The first is the
Pfaff-Kummer transformation \cite{AskeyRoy} (15.3.4 of \cite{AnStegun})
\begin{equation}\label{hypergeomone}
\;_{2}F_{1}(a,b;c;z)
=(1-z)^{-a}\;_{2}F_{1}(a,c-b;c;\frac{z}{z-1}).
\end{equation}
The second is the representation of a Jacobi polynomial as a $\;_2F_1$,
see 15.4.6 of \cite{AnStegun}
\begin{equation}\label{hypergeomtwo}
\;_{2}F_{1}(-j,u+1+v+j;u+1;z) =
\frac{j!}{(u+1)_{j}} J_{j}^{(u,v)}(1-2z).
\end{equation}
Now the first sum, say $S,$ in Lemma~\ref{start} is
$\;_{2}F_{1}(1-l,-(t-l);1;-1)$ so by \eqref{hypergeomone}
\begin{equation}
S=2^{(t-n)/2-1} \;_{2}F_{1}(1-(t-n)/2,-1+(t-n)/2+n+1+1;1;1/2).
\end{equation}
Now by \eqref{hypergeomtwo} we obtain
\begin{equation}
S=2^{(t-n)/2-1}J_{(t-n)/2-1}^{(0,n+1)}(0).
\end{equation}
This proves the first part of Lemma~\ref{second} for $n\ge 0.$ To derive
the second part, for $n < 0$ we see
\begin{multline}
S = \;_{2}F_{1}(-(t+n)/2,1-1+(n-t)/2;1;-1) \\
= 2^{(t+n)/2} \;_{2}F_{1}(-(t+n)/2,1+(n+t)/2-n-1;1;1/2)
= 2^{(t+n)/2}J_{(t+n)/2}^{(0,-n-1)}(0).
\end{multline}
The results for the first sum are now proved.
The second sum, say $T$, can be treated in much the same way since
\begin{multline}
T=(l-t) \;_{2}F_{1}(1-l,l-t+1;2;-1) =2^{l-1}(l-t)
\;_{2}F_{1}(1-l,2-l+t-1;2;1/2)\\
=2^{(t-n)/2-1}(l-t) \;_{2}F_{1}(-(l-1),2+l-1+n;2;1/2) \\
=(l-t)2^{(t-n)/2-1}J_{(t-n)/2-1}^{(1,n)}(0)/l.
\end{multline}
The result for nonnegative $n$ now follows. If $n$ is negative
\begin{multline}
T= \;_{2}F_{1}(-(n+t)/2+1,1-l;2;-1)\frac{t+n}{2} \\
=\frac{t+n}{2}2^{(t+n)/2-1} \;_{2}F_{1}(-(t+n)/2-1,2+(n+t)/2-1-;2;1/2)\\
=\frac{t+n}{2}2^{(t+n)/2-1}
\;_{2}F_{1}(-((t+n)/2-1),2+((n+t)/2-1)-t;2;1/2)\\
=\frac{t+n}{t+n}2^{(t+n)/2-1}J_{(t+n)/2-1}^{(1,-n)}(0).
\end{multline}
This completes the proof of Lemma~\ref{second}.
We consider first of all $\psi_{L}(n,t),\; n \ge 0.$ (As was observed by
Ambainis {\emph{et al.,}} there is a symmetry between positive and
negative $n$.) We let
\begin{equation}
m=(1-\alpha)t/2-1
\end{equation}
so that
\begin{equation}
1+n=1+\alpha t=\frac{1+\alpha}{1-\alpha}+\frac{2\alpha}{1-\alpha} m
\end{equation}
and
\begin{equation}
J_{(1-\alpha)t/2-1}^{(0,n+1)}(0) =
J_{m}^{(0+0\cdot m, \frac{1+\alpha}{1-\alpha}+\frac{2\alpha m}{1-\alpha})}(0).
\end{equation}
\subsection{The oscillatory range:$|\alpha| < 1/\sqrt{2}-\varepsilon$}
For $0\le \alpha <2^{-1/2}-\varepsilon, \quad 0<\varepsilon \le 2^{-1/2},$
we may use either the Chen-Ismail \cite{ChenIsmail} results or the
Gawronkski-Shawyer \cite{Gawyer} results. We use the results by
Gawronkski and Shawyer since they have been proved to hold uniformly over
this range of $\alpha.$ For the sake of consistency of notation and ease
of reference we will state the Gawronkski and Shawyer result in a more
restricted form than they obtained in their paper, but it is sufficient
for our purposes.
Gawronkski and Shawyer write, using the integral representation in equation
(4.46) of Szeg{\"{o}}'s book \cite{Szego},
\begin{equation}
J_{m}^{(am+\gamma,bm+\beta)}(z) =\frac{1}{2\pi i}
\int_{\Gamma}e^{mh(\zeta)}g(\zeta) d\zeta
\end{equation}
where
\begin{align}
h(\zeta)&=\ln\left(\frac{\zeta^{2}-1}{2(\zeta-z)}\right)
+a\ln\left(\frac{1-\zeta}{1-z}\right)
+b\ln\left(\frac{1+\zeta}{1+z}\right) \\
g(\zeta)&=\left(\frac{1-\zeta}{1-z}\right)^{\gamma}
\left(\frac{1+\zeta}{1+z}\right)^{\beta}
\frac{1}{\zeta-z}
\end{align}
and $\Gamma$ is a contour circling the origin. These integrals are of the
form
\begin{equation}\label{steepdef1}
I(m)=\int_{\Gamma} g(\zeta)e^{-m h(\zeta)}\;d\zeta
\end{equation}
and as such can be approximated in the limit as $m \to \infty$ using the
method of steepest descents \cite{Arfken,Olver,Wong}. This relies the fact
that in this limit (i.e., $t \to \infty$) the only parts of the integrand
that contribute significantly to the integral are those regions where the
function in the exponent, $h,$ has a maximum. This is because in the long
time limit the exponential term in the integrand behaves more and more like
$\delta$-function(s) centred on point(s) where $h$ is maximal. (Note that
a stationary point of $h$ is only a maximum along a given contour of
integration, $\Gamma.$ This is because the stationary points of an analytic
function can only be saddle-points, so whether they appear to be maxima or
minima depends on the path taken through them.)
In order to make use of this phenomenon, we need to be able to assume that the
imaginary part of $h$ is approximately constant in the vicinity of these
saddle-points, otherwise the integrand will oscillate unmanageably in the
asymptotic limit. (We don't care if it oscillates wildly elsewhere, as the
contribution from those regions will be negligibly small.) The way to
achieve this is to choose the contour so that it passes through these
saddle-points along the path of steepest descent for the real part of the
exponent.
We therefore choose the path $\Gamma$ so it goes through the two saddle-points
$\zeta^{+},\zeta^{-}$ determined by
\begin{equation}
h'(\zeta)=0(\text{ or }(1+\alpha)\zeta^{2}-2\alpha \zeta +1-\alpha=0).
\end{equation}
Thus the imaginary part of $h(\zeta)$ is fixed which implies that
the real part of $h(\zeta)$ has a maximum at the saddle-point.
(The numbers $e^{h(\zeta^{+})}, e^{h(\zeta^{-})}$ are the reciprocals of
the singularities found by Chen and Ismail using Darboux's method.)
Gawronkski and Shawyer use a steepest descent contour which goes through
the saddle-points at $\zeta =\zeta^{+}$ or $\zeta^{-}$ and the
points $\zeta=\pm 1.$ The contour must be modified slightly near the
singularities at $\zeta =\pm 1.$ The contours are leaf-shapes defined by
\begin{equation}
\Im (h(\zeta))=\Im (h(\zeta^{+})).
\end{equation}
So (for example) when $\alpha =1/2$ this is \begin{equation}
\Im(\ln(\zeta^{2}-1)-\ln 2-\ln \zeta +2\ln(1+\zeta)) =\Im
(\ln((\zeta^{+})^{2}-1)-\ln 2 -\ln \zeta^{+}+2\ln(1+\zeta^{+}))
\end{equation}
where
\begin{equation}
\zeta^{+}=\frac{\alpha+i \sqrt{1-2\alpha^{2}}}{1+\alpha}
=\frac{1+i\sqrt{2}}{3}.
\end{equation}
Figure \ref{leaf} is an example of the steepest descent curve for the
oscillatory range of $\alpha.$
\begin{figure}
\caption{Steepest descent and ascent curves for the oscillatory range
of $\alpha.$ The descent curve is the leaf-shape, and the two
lines running off to $\pm \infty$ are the steepest ascent
curves.}
\label{leaf}
\end{figure}
The Gawronkski and Shawyer result is stated in terms of the parameters
$A,B,D,C_{1},C_{2},\theta,\rho(\theta)$ and $\xi(\theta)$ defined as
follows:
\begin{align}
&\cos \theta=-\frac{\alpha^{2}}{1-\alpha^{2}}\quad \quad
\text{ so } \quad \frac{\pi}{2} \le \theta <\pi, \\
&A=0, \quad \quad B=\alpha, \quad \quad D=1-\alpha^{2}, \\
&C_{1}=1, \quad C_{2}=\frac{1+\alpha}{1-\alpha}.
\end{align}
They then show that $\rho(\theta)$ increases monotonically
from $0$ to $\pi$ as $\theta$ increases from $0$ to $\pi$ or equivalently
as $|\alpha|$ increases from $0$ to $1/\sqrt{2}$. Furthermore
\begin{align}
\rho(\theta)&=\theta +\frac{2\alpha}{1-\alpha}\left(\frac{\theta}{2}-
\arctan\left(\frac{1+B^{2}-D}{2B}\tan \theta /2\right)\right), \\
\xi(\theta)&=\frac{\theta}{2}+\frac{\pi}{4}+\frac{1+\alpha}{1-\alpha}
\left(\frac{\theta}{2}-\arctan\left(\frac{1+B^{2}-D}{2B}
\tan \theta/2\right)\right).
\end{align}
Here $-\pi/2 < \arctan u <\pi/2$ for real $u$.
\begin{theorem}[Gawronkski and Shawyer \cite{Gawyer}]\label{GSthm1}
\begin{multline}\label{GS1}
J_{m}^{(0+0\cdot m,\frac{1+\alpha}{1-\alpha}
+\frac{2\alpha m}{1-\alpha})}(0)
=\left(\pi m(1+B)\sin \theta/2 \cos \theta /2\right)^{-1/2} \times \\
\left(C_{2}^{-1}(1+B)\left((C_{2}-1)^{2}/4+
C_{2}\cos^{2}\theta/2\right)^{1/2}\right)^{-\frac{2\alpha m}{1-\alpha}
-\frac{1+\alpha}{1-\alpha}} \left(\sin
(m\rho(\theta)+\xi(\theta))+O(1/m)\right)
\end{multline}
as $m$ (i.e., $t$) $\rightarrow \infty$.
\end{theorem}
Now, by direct substitution,
\begin{align}
\left(\pi m(1+B)\sin(\theta /2)\cos(\theta /2\right)^{-1/2}
&=2\left(\frac{1}{\pi t \sqrt{1-2\alpha^{2}}}\right)^{1/2}\label{GS+2} \\
C_{2}^{-1}(1+B)&=1-\alpha\label{GS+3} \\
\left((C_{2}-1)^{2}/4+C_{2}\cos^{2}(\theta /2)\right)^{1/2}
&=\frac{1}{\sqrt{2}(1-\alpha)}\label{GS+4}
\end{align}
Thus from equations \eqref{GS1},\eqref{GS+2},\eqref{GS+3} and \eqref{GS+4},
\begin{multline}
J_{m}^{(0,2\alpha/(1-\alpha)+(1+\alpha)/(1-\alpha))}(0)=
2\left(\frac{1}{\pi t \sqrt{1-2\alpha{^2}}}\right)^{1/2} \times \\
2^{(2\alpha((1-\alpha)t-1))/(1-\alpha)+(1+\alpha)/(1-\alpha))/2}
\sin\left((1-\alpha)t/2-1)\rho(\theta)+\xi(\theta)\right)(1+O(1/t)).
\end{multline}
We have that
\begin{equation}
\alpha t-\frac{\alpha}{1-\alpha}+\frac{1+\alpha}{2(1-\alpha)}
=\frac{\alpha t}{2}+\frac{1}{2}
\end{equation}
so
\begin{equation}
J_{m}^{(0,\frac{2\alpha m}{1-\alpha}+\frac{1+\alpha}{1-\alpha})}(0)
=\frac{2\sqrt{2}\;2^{n/2}}{(\pi t \sqrt{1-2\alpha^{2}})^{1/2}} \cdot
\sin((1-\alpha)t/2-1)\rho(\theta)+\xi(\theta))(1+O(1/t)).
\end{equation}
Now it follows that for $-t \le n \le t,$
\begin{equation}
\psi_{L}(n,t)=\frac{\sqrt{2}}{(\pi t
\sqrt{1-2\alpha^{2}})^{1/2}}
\sin((1-\alpha)t/2-1)\rho(\theta)+\xi_{L}(\theta))(1+O(1/t))
\end{equation}
where
\begin{align}
\rho(\theta)&=\frac{\theta}{1-\alpha}-\frac{2\alpha}{1-\alpha}
\arctan\left(\frac{\alpha}{\sqrt{1-\alpha^{2}}}\right)\label{rhothe1} \\
\xi_{L}(\theta)&=\frac{\theta}{1-\alpha}+\pi/4 -\frac{1+\alpha}{1-\alpha}
\arctan\left(\frac{\alpha}{\sqrt{1-\alpha^{2}}}\right).\label{xi1}
\end{align}
When we consider $\psi_{\rm{R}}(n,t),\; 0 \le n \le t$ we find that the
power of $2$ does not change. (Gawronkski and Shawyer use the symbol
$\alpha$ as a dummy variable in their Jacobi polynomials. To avoid
confusion, we will call this parameter $\kappa.$) For
$\psi_{\rm{L}}(n,t),\;\kappa$ was set to be zero. To do the calculation
for $\psi_{\rm{R}}(n,t)$ we need to set $\kappa=1.$ Note also that we
must reset $\tau$ (which they call $\beta$) to be
$\tau=b=2\alpha/(1-\alpha).$ Thus $\rho(\theta)$ does not change.
However, $\xi(\theta)$ does change as we will specify below. We (as
Gawronkski and Shawyer do) use
\begin{equation}
\frac{\pi}{2}=\lim_{A\rightarrow 0+}\left(\frac{1+A^{2}-B^{2}+D}{2A}
\tan\frac{\theta}{2}\right)
\end{equation}
to obtain
\begin{equation}\label{psiR1}
\zeta_{\rm{R}}(\theta)=\zeta_{\rm{L}}(\theta)-\frac{\pi}{2}+
\arctan\left(\frac{\alpha}{\sqrt{1-\alpha^{2}}}\right).
\end{equation}
Thus we have:
\begin{theorem}\label{thmone}
Let $\varepsilon > 0$ be any constant and $\alpha$ be in the
interval $(-1/\sqrt{2}+\varepsilon,1/\sqrt{2}-\varepsilon).$ Then as $t
\rightarrow \infty$ we have uniformly for $0\le
|\alpha|<1/\sqrt{2}-\varepsilon,$
\begin{align}
\psi_{\rm{R}}(n,t)&=\left(\frac{1+\alpha}{1-\alpha}\right)^{1/2}
\frac{\sqrt{2}}{(\pi t \sqrt{1-2\alpha^{2}})^{1/2}}
\sin\left((\frac{1-\alpha}{2}t-1)\rho(\theta)+
\xi_{\rm{R}}(\theta)\right)(1+O(1/t)) \\
\psi_{\rm{L}}(n,t)&=\frac{\sqrt{2}}{(\pi t \sqrt{1-2\alpha^{2}})^{1/2}}
\sin\left((\frac{1-\alpha}{2}t-1)\rho(\theta)+
\xi_{\rm{L}}(\theta)\right)(1+O(1/t))
\end{align}
with $\rho ,\xi_{\rm{L}}$ and $\xi_{\rm{R}}$ defined by equations
\eqref{rhothe1}, \eqref{xi1} and \eqref{psiR1}. Here
\begin{equation}
\theta =\arccos(-\alpha^{2}/(1-\alpha^{2})), \quad
\frac{\pi}{2} \le \theta < \pi.
\end{equation}
\end{theorem}
{\emph{Remark:}} The term of the form $1/\sqrt{1-2\alpha^2}$ gives us
forewarning that this term is going to become very large when
$|\alpha|=1/\sqrt{2}.$ This is consistent with the graph in figure
\ref{walk}. In fact this term actually diverges at this value of
$\alpha,$ but this is a symptom of the breakdown of this approximation,
which is why we included the $\varepsilon$ in the statement of the
theorem. In this transitional range, Theorem \ref{SVthm1} below is the
appropriate form to use.
Our Theorem \ref{thmone} agrees with Theorem 2 of Ambainis {\emph{et
al.,}} as expected, and it also gives an estimate for the error term. We
skip the proof that the answers are identical for the $\psi$-functions as
the probability function $p(n,t)$ defined by
\begin{equation}
p(n,t)=\psi_{\rm{L}}^{2}(n,t)+\psi_{\rm{R}}^{2}(n,t)
\end{equation}
is more interesting and we will show that for this function and its
moments, our answers are identical. We will use the identity
$\sin^{2}A=(1-\cos 2A)/2.$ Recall that $\alpha =n/t$, where we temporarily
think of $t$ as fixed and let $n$ vary. We may then use $B(n,t)$ to
denote a bounded function with bounded derivatives and $A(n,t)$ for a
function such that it and its derivatives are bounded away from $0,$ thus
for $\psi_{\rm{R}}(n,t)$ we have
\begin{align}
A(n,t) &= \frac{1-\alpha}{2}\rho(\theta)+
\frac{\xi_{\rm{R}}(\theta)-\rho(\theta)}{t} \\
B(n,t) &= \left(\frac{1+\alpha}{1-\alpha}\right)^{1/2}
\frac{\sqrt{2}}{(\pi t \sqrt{1-2\alpha^{2}})^{1/2}}
\end{align}
and for $\psi_{\rm{L}}(n,t)$ we have instead
\begin{align}
A(n,t) &= \frac{1-\alpha}{2}\rho(\theta)+
\frac{\xi_{\rm{L}}(\theta)-\rho(\theta)}{t} \\
B(n,t) &= \frac{\sqrt{2}}{(\pi t \sqrt{1-2\alpha^{2}})^{1/2}}.
\end{align}
This enables us to write:
\begin{multline}
\int_{\alpha_{1}}^{\alpha_{2}} B(n,t)\cos(A(n,t)t) \; d\alpha = \\
\int_{\alpha_1}^{\alpha_2} \left(B(n,t)
\frac{d}{d\alpha}\left(\frac{\sin(A(n,t)t)}{tA'(n,t)}\right)
+ B(n,t)\frac{\sin(A(n,t)t)A''(n,t)}{t (A'(n,t))^2} \right)\;d\alpha = \\
\int_{\alpha_1}^{\alpha_2} B(n,t)
\frac{d}{d\alpha}\left(\frac{sin(A(n,t)t)}{tA'(n,t)}\right)
\;d \alpha + O(1/t) = \\
\left[\frac{B(n,t)\sin(A(n,t)t)}{A'(n,t)t}\right]_{n_{1},t_{1}}^{n_{2},t_{2}}
-\int_{n_{1},t_{1}}^{n_{2},t_{2}}
\frac{B'(n,t)\sin(A(n,t)t)}{A'(n,t)t}\; d\alpha +O(1/t) = O(1/t)
\end{multline}
by a simple integration by parts, where
$\alpha_1=n_1/t_1,\; \alpha_2=n_2/t_2.$ If we write $p(\alpha)=tp(n,t)$
(following Ambainis {\emph{et al.}}) and note that the $O$-term is uniform
for $-1/\sqrt{2}+\varepsilon <\alpha < 1/\sqrt{2}-\varepsilon,$ then that
observation and our Theorem \ref{thmone} give us that
\begin{equation}
\int_{\alpha_{1}}^{\alpha_{2}}p(\alpha) \; d\alpha
=\int_{\alpha_{1}}^{\alpha_{2}}\left(\frac{1}{\pi\sqrt{1-2\alpha^{2}}}
+\left(\frac{1+\alpha}{1-\alpha}\right)\frac{1}{\pi\sqrt{1-2\alpha^{2}}}
\right)\;d\alpha+O(1/t)
\end{equation}
provided $|\alpha|<1/\sqrt{2}-\varepsilon$. Note for the quantum walk
$p(\alpha)$ is 0 when $n$ and $t$ have unequal parity so for the quantum
walk we have
\begin{equation}
\int_{\alpha_{1}}^{\alpha_{2}}p(\alpha)\;d \alpha
=\frac{1}{\pi}\int_{\alpha_{1}}^{\alpha_{2}}
\frac{1}{(1-\alpha)\sqrt{1-2\alpha^{2}}} \;d\alpha +O(1/t)
\end{equation}
To confirm that we have a probability distribution, we must verify that
the function integrates to $1:$
\begin{align}
\int_{\varepsilon-1/\sqrt{2}}^{-\varepsilon+1/\sqrt{2}}p(\alpha)\;d\alpha
&\sim
\frac{1}{\pi}\int_{\varepsilon-1/\sqrt{2}}^{-\varepsilon+1/\sqrt{2}}
\frac{1}{(1-\alpha)\sqrt{1-2\alpha^{2}}}\;d\alpha \\
&\sim
\frac{1}{\pi}\int_{\varepsilon-1/\sqrt{2}}^{-\varepsilon+1/\sqrt{2}}
\frac{1}{(1-\alpha^{2})\sqrt{1-2\alpha^{2}}}\;d\alpha.
\end{align}
If we let $\cos k=-\alpha/\sqrt{1-\alpha^{2}},$ and
$\sin k = \sqrt{1-2\alpha^{2}}/\sqrt{1-\alpha^{2}},$ then we can write
\begin{equation}
\frac{dk}{d\alpha}=\frac{1}{(1-\alpha^{2})\sqrt{1-2\alpha^{2}}}
\end{equation}
so
\begin{equation}
\frac{1}{\pi}\int_{\varepsilon-1/\sqrt{2}}^{-\varepsilon+1/\sqrt{2}}
p(\alpha)\;d\alpha
=\frac{1}{\pi} \int_{-\pi+\delta(\varepsilon)}^{\delta(-\varepsilon)}\;dk
=1-2\pi^{-1}\delta(\varepsilon)
\end{equation}
where ${\lim}_{\varepsilon \rightarrow 0} \;\delta(\varepsilon)=0,$ as
required. The correction term appears because we have only performed the
integration over the oscillatory range of the probability function, as
this is where it has almost all of its support.
We can now write down the $m$-th moment of the distribution:
\begin{equation}
\sim\frac{1}{\pi}\int_{-1/\sqrt{2}}^{1/\sqrt{2}}
\frac{\alpha^{m}}{(1-\alpha)\sqrt{1-2\alpha^{2}}}\;d\alpha.
\end{equation}
Thus the first moment is
\begin{multline}
\frac{1}{\pi} \int_{-1/\sqrt{2}}^{1/\sqrt{2}}
\frac{\alpha^{2}-1}{(1-\alpha^{2})\sqrt{1-2\alpha^{2}}}\;d\alpha
+\frac{1}{\pi}\int_{-1/\sqrt{2}}^{1/\sqrt{2}}\frac{1}{(1-\alpha)
\sqrt{1-2\alpha^{2}}}\;d\alpha \\
=-\frac{1}{\pi}\int_{-1/\sqrt{2}}^{1/\sqrt{2}}
\frac{1}{\sqrt{1-2\alpha^{2}}}\;d\alpha + 1=1-1/\sqrt{2}.
\end{multline}
The second moment can be seen to be also equal to 1-$1/\sqrt{2}$.
\subsection{The exponential range: $|\alpha| > 2/\sqrt{2}+\varepsilon$}
We now consider the range $\alpha \ge 1/\sqrt{2}+\varepsilon$ where
$0<\epsilon < 1-1/\sqrt{2}$. The Gawronkski and Shawyer results are an
extension and refinement of the results of Saff and Varga \cite{Saff}. We
state the results as Saff and Varga do. They write
\begin{equation}
J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta)}(0)=
\int_{\Gamma}e^{mh(\zeta)}g(\zeta)d\zeta
\end{equation}
where
\begin{align}
h(\zeta)&=\ln(\zeta^{2}-1)-\ln 2-\ln\zeta+
\frac{2\alpha}{1-\alpha}\ln(1+\zeta), \\
g(\zeta)&=\frac{1}{2\pi i}(1+\zeta)^{\beta}(1-\zeta)^{\gamma}
\frac{1}{\zeta}.
\end{align}
They choose $\zeta$ to be the saddle-point $\zeta^{-},$ where
\begin{equation}
\zeta^{-}=\frac{\alpha-\sqrt{2\alpha^{2}-1}}{1+\alpha}.
\end{equation}
Using the saddlepoint method they derive
\begin{theorem}[Saff and Varga \cite{Saff}]\label{SVthm1}
\begin{equation}
J_{m}^{(\gamma,2\alpha m/(1+\alpha)+\beta}(0)\sim i\exp[mh(\zeta^{-})]
g(\zeta^{-})\left(\frac{2\pi}{mh''(\zeta^{-})}\right)^{1/2}
\end{equation}
where
\begin{equation}\label{hdprime1}
h^{''}(\zeta)=-2\frac{\zeta^{2}+1}{(\zeta^{2}-1)^{2}}
+\frac{1}{\zeta^{2}}-\frac{2\alpha}{(1-\alpha)(1+\zeta^{2})}.
\end{equation}
\end{theorem}
Now
\begin{align}
\zeta-\zeta^{-1}&=\frac{\alpha-\sqrt{2\alpha^{2}-1}}{1+\alpha}
-\frac{\alpha+\sqrt{2\alpha^{2}-1}}{1-\alpha}
=-2\frac{\alpha^{2}+2\sqrt{2\alpha^{2}-1}}{1-\alpha^{2}},\\
1+\zeta &=\frac{1+2\alpha-\sqrt{2\alpha^{2}-1}}{1+\alpha}.
\end{align}
Then according to Saff and Varga,
\begin{multline}
J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta}(0)
\sim \left(\frac{\alpha^{2}+\sqrt{2\alpha^{2}-1}}
{1-\alpha^{2}}\right)^{m}\left(\frac{1+2\alpha
-\sqrt{2\alpha^{2}-1}}{1+\alpha}\right)^{2\alpha m/(1-\alpha)}
\times\\
\frac{1}{2\pi}
\left(\frac{1+2\alpha-\sqrt{2\alpha^{2}-1}}{1+\alpha}\right)^{\beta}
\left(\frac{1+\sqrt{2\alpha^2 -1}}{1+\alpha} \right)^{\gamma}
\frac{1+\alpha}{\alpha-\sqrt{2\alpha^{2}-1}}
\left(\frac{2 \pi}{mh^{''}(\zeta)}\right)^{1/2}
\end{multline}
where $h^{''}(\zeta)$ is defined in \eqref{hdprime1}.
Gawronkski and Shawyer show that the $\sim$ symbol can be replaced by
$1+O(1/t)$ and that this expansion holds uniformly for $\alpha \in
[1/\sqrt{2}+\epsilon,1-\epsilon].$
Note that when $\alpha \rightarrow 1/\sqrt{2},$
\begin{equation}
\left|J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta)}(0)\right|^{1/m}
\rightarrow \left(\frac{1+\sqrt{2}}{1+1/\sqrt{2}}\right)^
{(\frac{2\alpha}{1-\alpha})(\frac{1-\alpha}{2}t)}
=2^{\alpha t/2}=2^{n/2}
\end{equation}
so the asymptotic estimate above metamorphoses into the asymptotic
estimate in Theorem \ref{thmone}. We shall state our results for positive
$\alpha$ as those for negative $\alpha$ follow immediately. These results
refine the estimates of Ambainis {\emph{et al.}} \cite{Ambainis01}.
\begin{theorem}\label{thmtwo}
If $\alpha \ge 1/\sqrt{2}+\varepsilon$, then uniformly for
$\alpha \in [1/\sqrt{2} + \varepsilon,1-\varepsilon],$
\begin{align}
\psi_{\rm{R}}(n,t)&=C_{\rm{R}}\frac{1}{t^{1/2}}B^{t}(\alpha)(1+O(1/t))\\
\psi_{\rm{L}}(n,t)&=C_{\rm{L}}\frac{1}{t^{1/2}}B^{t}(\alpha)(1+O(1/t))
\end{align}
where $B(\alpha), \; C_{\rm{L}}, \; C_{\rm{R}}$ are defined in the
asymptotic expansion of
\begin{equation}
2^{-n/2-1}J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta)}(0)
\end{equation}
following from the above. (Note that for $C_{\rm{L}}, \; \gamma=0$ and
$\beta=(1+\alpha)/(1-\alpha)$ while for $C_{\rm{R}}, \; \gamma=1, \;
\beta=2\alpha/(1-\alpha)$ in the above.) Thus
\begin{equation}
B(\alpha) = \left(\frac{1+2\alpha - \sqrt{2\alpha^2 -1}}{1+\alpha}
\right)^{\alpha}
\left(\frac{\alpha^2+\sqrt{2\alpha^2-1}}{1-\alpha^2}
\right)^{\frac{1-\alpha}{2}}
\end{equation}
and
\begin{equation}
C_{\rm{R}}=\left(\frac{1+\sqrt{2\alpha^2-1}}{1+2\alpha-\sqrt{2\alpha^2-1}}
\right) \times C_{\rm{L}}(\alpha)
\end{equation}
where
\begin{multline}
C_{\rm{L}}(\alpha) = \frac{1}{\sqrt{2\pi}}
\left(\frac{1+2\alpha-\sqrt{2\alpha^2-1}}{1+\alpha})
\right)^{\frac{1+\alpha}{1-\alpha}}
\left(\frac{1+\alpha}{\alpha-\sqrt{2\alpha^2-1}}\right)
\times \frac{2}{1-\alpha} \times \\
\left(\left(\frac{\alpha+\sqrt{2\alpha^2-1}}{1-\alpha}\right)^2
-\frac{(1+\alpha)^2}{(1-\alpha)(2\alpha+1-\sqrt{2\alpha^2-1})}\right. \\
\left.- \frac{\alpha(\sqrt{2\alpha^2-1} - (1+2\alpha))(1+\alpha)^2}
{(1+\alpha(1-\alpha+\sqrt{2\alpha^2-1}))^2}
\right)^{-1/2}
\end{multline}
to reconstruct the complete wavefunction.
The asymptotics for $\alpha$ in the range
$-1+\epsilon \le \alpha \le -1/\sqrt{2}-\varepsilon$ follow from the spatial
symmetry between $-n$ and $n$.
\end{theorem}
Figure \ref{evanesce} shows the steepest descent curves for this
range.
\begin{figure}
\caption{Steepest descent and ascent curves for $|\alpha|>1/\sqrt{2}
\label{evanesce}
\end{figure}
{\emph{Remark:}}
Saff and Varga show that $B(\alpha)$ is a decreasing function of
$\alpha$ for $\alpha \in [1/\sqrt{2}+\varepsilon,1-\varepsilon].$ Thus the
$\psi-$ functions decrease exponentially in $t$ in this range.
\subsection{The transitional range: $2^{-1/2}-\varepsilon < \alpha <
2^{-1/2} + \varepsilon$}
We now consider the range $1/\sqrt{2}-\varepsilon<\alpha <1/\sqrt{2}+
\varepsilon$. Theorems \ref{thmone} and \ref{thmtwo} exhibit an
oscillating sine term times $t^{-1/2}$ for
$|\alpha|<1/\sqrt{2}-\varepsilon$ and an exponentially small estimate for
$|\alpha|>1/\sqrt{2}+\varepsilon.$ Saff and Varga \cite{Saff} show that if
$\alpha =1/\sqrt{2}$ then
\begin{equation}
J_{m}^{(\gamma,2\alpha/(1-\alpha)+\beta)}(0)
\sim\frac{g(2\alpha-1)}{3}\left(\frac{6}{mh^{(3)}(2\alpha-1)}\right)^{1/3}
e^{i\pi /3}\Gamma(1/3)
\end{equation}
(See also Ambainis-et al, \cite{Ambainis01}.) The asymptotic behaviour is
therefore qualitatively different in the three cases
$|\alpha|<1/\sqrt{2}-\varepsilon, \quad \alpha=1/\sqrt{2},$ and $\quad
|\alpha|>1/\sqrt{2}+\varepsilon.$
We now apply the analysis for coalescing saddle-points as described in R.
Wong's book \cite{Wong}, to get a uniform asymptotic expansion for a range
of $\alpha$ extending a positive distance on each side of
$\alpha=1/\sqrt{2}$. The asymptotic behaviour is described in terms
of the Airy function, $Ai(x).$ These functions cannot be written out
explicitly, but (like Bessel functions) they can be written in terms
of integral representations that are well understood. It suffices for
us to know that as $x \rightarrow -\infty$ the behaviour of these
functions is oscillatory,
\begin{align}
Ai(-x) &\sim \frac{1}{\sqrt{\pi}}\cos(\frac{2}{3}x^{3/2}-\pi/4)/x^{1/4}, \\
Ai'(-x)&\sim \frac{x^{1/4}}{\sqrt{\pi}}\sin(\frac{2}{3}x^{3/2}-\pi/4)
\end{align}
but that when $x \rightarrow \infty$ they behave like
\begin{align}
Ai(x) &\sim \frac{1}{2\sqrt{\pi}}\pi x^{1/4}
\exp\left(-\frac{2}{3}x^{3/2}\right), \\
Ai'(x) &\sim -\frac{1}{2\sqrt{\pi}}x^{1/4}
\exp\left(-\frac{2}{3}x^{3/2}\right)
\end{align}
which decreases faster than any power of $x$.
Recall that we have the two saddle-points
$\zeta^{-},\zeta^{+}=(\alpha \mp \sqrt{2\alpha^{2}-1})/(1+\alpha)$.
Following Wong we define the variables $\zeta$ and $\eta$ by
\begin{align}
\zeta^{3/2}&=\frac{3}{4}(h(\zeta^{+})-h(\zeta^{-})) \\
\text{ or }\quad
2\zeta^{1/2}=\zeta^{+}-\zeta^{-}&=2i\sqrt{1-2\alpha^{2}}/(1+\alpha),\\
\eta&=\frac{1}{2}(h(\zeta^{-})+h(\zeta^{+}))
\end{align}
We must choose $\zeta^{-},\zeta^{+}$ as we have because we want the range
of $\alpha > 1/\sqrt{2}$ to give positive $\zeta$ so that there is
exponential decay as $\zeta \rightarrow \infty$. Note also (as Saff and
Varga point out) that if $\alpha =1/\sqrt{2}$ then
\begin{align}
h^{'}(\alpha)&=h^{''}(\alpha)=0 \\
h^{'''}(\alpha)&=-\frac{1+\alpha}{24\alpha^{2}(1-\alpha)^{3}}.
\end{align}
Thus
\begin{equation}
h(1/\sqrt{2}+\delta)
=-\frac{1+\alpha}{24\alpha^{2}(1-\alpha)^{3}}\delta^{3}+O(\delta^{4})
\end{equation}
so
\begin{equation}
mh(\alpha)=o(1) \quad \text{ if } \quad \delta =o(m^{-1/3}).
\end{equation}
Also $h(\zeta^{+})\sim h(\zeta^{-})$ so $\eta \sim h(\zeta^{-})$
for this range of $\alpha$.
Suppose for now that equation 4.31 in chapter VII of Wong \cite{Wong}
holds, which in our notation becomes
\begin{equation}
J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta)}(0)
=2\pi i e^{-m \eta}\left(\frac{Ai(m^{2/3}\zeta)}{m^{1/3}}
a_{0}+\frac{Ai^{'}(m^{2/3})}{m^{2/3}}
b_{0}\right)(1+O(m^{-1/3}))
\end{equation}
where $a_{0}$ and $b_{0}$ are independent of $m.$ (In Wong's book, our $m$
is his $\lambda$ and our $\zeta$ is his $t.$ Our Jacobi polynomial is his
$I(\lambda;\alpha),$ but his $\alpha$ is something else.) To obtain an
expression for $\psi_{\rm{L}}$ we need to set $\gamma=0$ and
\begin{equation}
\beta= \frac{1+\alpha}{1-\alpha}
\end{equation}
as in Theorem
\ref{thmone}.
Likewise, for $\psi_{\rm{R}},$ we will need to set $\gamma=1$ and
\begin{equation}
\beta=\frac{2\alpha}{1-\alpha}.
\end{equation}
The previous argument shows that the term $m\eta =o(1)$ if $\delta
=o(m^{-1/3})$ and so the $e^{m\eta}$ term is asymptotic to $1.$ Note
furthermore that if $\alpha = 1/\sqrt{2}+\delta$ then the definition of
$\zeta$ implies that
\begin{equation}
\zeta=-\frac{1-2\alpha^{2}}{1+\alpha^{2}} \quad \quad
=\frac{-4\sqrt{2}}{(1+\sqrt{2})^{2}}\delta+O(\delta^{2}).
\end{equation}
Thus
\begin{equation}
m^{2/3}\zeta
=-\frac{4\sqrt{2}}{(1+\sqrt{2})^{2}}m^{2/3}\delta+O(\delta^{2}).
\end{equation}
When we use this estimate in the asymptotic behaviour of $Ai(m^{2/3}\zeta)$
and $Ai^{'}(m^{2/3}\zeta)$ we see that the Airy functions give terms
superpolynomially small if $\delta > m^{-2/3+\epsilon}.$ If $\delta
=m^{-\eta},\; \eta < 2/3$ however we see that the $\psi$-functions are
only polynomially small. A similar argument shows that if $|\alpha| <
2^{-1/2}-m^{-2/3+\epsilon}$ then the behaviour of the $\psi$
is oscillatory.
We can therefore write that the transition from oscillatory behaviour
bounded below by a power of $m$ or $t$ to bounded above by a
superpolynomially small function occurs when $\alpha$ varies
by $O(t^{-2/3})$ from $1/\sqrt{2}$.
We may use equation 4.31 in chapter VII of Wong \cite{Wong} if the
transformation
\begin{equation}
h(\zeta)=u^{3}/3-\zeta u+\eta
\end{equation}
is single-valued on the contour of integration. We may use the
Gawronkski-Shawyer contour of integration (which Saff and Varga also use).
The transformation will be one-to-one if on the path of integration,
\begin{equation}
\frac{d\zeta}{du}\ne 0 \text{ and } \frac{du}{d\zeta}\ne 0.
\end{equation}
Now
\begin{equation}
\frac{d\zeta}{du}=\frac{u^{2}-\zeta}{h^{'}(\zeta)}
\end{equation}
and the bottom derivative is only zero at the saddle-points. The only
saddle-points are at $\zeta^{-}$ and $\zeta^{+}$. Wong shows that the
choice of $\zeta^{-},\zeta^{+}$ we used implies that $\frac{d\zeta}{du}\ne
0.$ The only place the numerator is zero is at $\zeta^{-},\zeta^{+}$ and
the denominator is analytic and therefore bounded on the contour of
integration. We find from Wong's book that
\begin{theorem}\label{thmthree}
There is a positive $\varepsilon$ so that uniformly for some $a_{0}$ and
$b_{0}$ (defined below)
\begin{equation}
J_{m}^{(\gamma,2\alpha m/(1-\alpha)+\beta)}(0) =2\pi i e^{-m\eta}\left(
\frac{Ai(m^{2/3}\zeta)}{m^{1/3}}a_{0}+\frac{Ai'(m^{2/3}\zeta)}{m^{2/3}}
b_{0}\right)(1+O(m^{-1/3})).
\end{equation}
When $ n \equiv t$ mod 2 and $J_{r}^{(u,v)}(z)$ denotes a Jacobi
polynomial,
\begin{equation}
\psi_{L}(n,t) = 2^{-n/2-1}
J_{m}^{(0,\frac{2\alpha m}{1-\alpha}+\frac{1+\alpha}{1-\alpha}}(0),
\quad \text{ when }0 \le n < t,
\end{equation}
where we can use the Remark following lemma \ref{second} to
obtain the wavefunction for negative $n.$
Also,
\begin{equation}
\psi_{R}(n,t) = (\frac{1+\alpha}{1-\alpha})2^{-n/2-1}
J_{m}^{(1,\frac{2\alpha m}{1-\alpha}+\frac{2\alpha}{1-\alpha})}(0),
\quad \text{ when }0\le n <t,
\end{equation}
and use the symmetry properties to obtain the other half as before.
\end{theorem}
So this integral representation is valid for all values of $-1+\varepsilon
\leq \alpha \leq 1-\varepsilon.$ In this theorem,
\begin{equation}
\phi_{0}(u)=g(\zeta)\frac{d\zeta}{du}
\end{equation}
and
\begin{align}
a_{0}&=\frac{1}{2}[\phi_{0}(\zeta^{1/2})+\phi_{0}(-\zeta^{1/2})], \\
b_{0}&=\frac{1}{2}[\phi_{0}(\zeta^{1/2})-\phi_{0}(-\zeta^{1/2})].
\end{align}
{\emph{Remark:}}
Theorem \ref{thmthree} gives the asymptotic behaviour in a rather
convoluted way. It is really only useful for $|\alpha|$ very near to
$1/\sqrt{2},$ where the behaviour undergoes a qualitative change from
oscillatory to exponential decay. For other values of $\alpha,$ Theorems
\ref{thmone} and \ref{thmtwo} give much simpler expressions for the
wavefunction.
Figure \ref{drop} shows the steepest descent curve when the two
saddle-points coalesce.
\begin{figure}
\caption{Steepest descent and ascent curves when the saddle-points
coalesce. The tear-drop shape and the line from $\zeta^{\pm}
\label{drop}
\end{figure}
\subsection{Error bounds for the method of steepest descents.}
We now discuss very briefly the error-terms in our results so far. We
restrict our attention to $\psi_{L}(n,t)$ and Theorem \ref{thmone} above,
but similar comments apply to the other results in this paper and
$\psi_{R}(n,t)$. In order to apply Theorem 7.1 of Olver we expand
$h(\zeta)$ and $g(\zeta)$ in powers of $\zeta-\zeta^{+}$:
\begin{align}
e^{mh(\zeta)}&= e^{mh(\zeta^{+})
+\sum_{r=3}^{\infty}b_{r}(\zeta-\zeta^{+})^{r}} \\
g(\zeta)&=g(\zeta^{+})+\sum_{r=1}c_{r}(\zeta-\zeta^{+})^{r}
\end{align}
and write
\begin{equation}
e^{\sum_{r=3}^{\infty}b_{r}(\zeta-\zeta^{+})^{r}}
\left(g(\zeta^{+})+\sum_{r=1}^{\infty}c_{r}(\zeta-\zeta^{+})^{r}\right)
=\sum_{s=0}^{\infty}a_{s}(\zeta-\zeta^{+})^{s}.
\end{equation}
The steepest descent contour that Gawronkski and Shawyer use naturally
separates into two pieces, a piece $\Gamma_{1}$ above the real axis and a
piece $\Gamma_{2}$ below the real axis. Notice that $\Gamma_{1}$ is the
mirror image of $\Gamma_{2}$ in the real axis. With this notation, Theorem
7.1 in Olver's book \cite{Olver} says
\begin{equation}
\int_{\Gamma_{1}}e^{-mh(\zeta)}g(\zeta)
\sim 2e^{-mh(\zeta^{+})}\sum_{s=0}^{\infty}
\Gamma(s+1/2)\frac{a_{2s}}{m^{s+1/2}}.
\end{equation}
Here
\begin{align}
a_{0}&=\frac{g}{(2h'')^{1/2}} \\
a_{2}&=\left\{2g''-\frac{2h'''}{h''}
+\left(\frac{5(h''')^{2}}{6(h'')^{2}}-\frac{h''''}{2h''}\right)g
\right\}\frac{1}{(2h'')^{1/2}}
\end{align}
when $g,h$ and their derivatives are evaluated at $\zeta=\zeta^{+}.$ The
asymptotic expansion of the integral over $\Gamma_{2}$ is the complex
conjugate of the integral over $\Gamma_{1}$. It is therefore possible to
derive complete asymptotic expansions for $\psi_{\rm{L}}(n,t)$ and
$\psi_{\rm{R}}(n,t)$ and certainly Gawronkski and Shawyer were aware of
this. The $s=0$ term is the asymptotic formula of Theorem \ref{thmone}
above. Since we do not need more precision than in Theorem \ref{thmone}
for this application, we do not pursue this further.
Olver then explains how to derive numerical estimates for the error term.
This is somewhat complicated so we refer the reader to section 10 of
chapter 4 of Olver's book. One needs to compute the maximum of certain
quantities on the contour of integration. Since we do not have
applications of such bounds we will not pursue that here. Olver also
shows how to derive explicit numerical error bounds when the asymptotic
expansion immediately above is truncated at any value of $s$.
\section{The Schr\"{o}dinger Approach}
Nayak and Vishwanath \cite{NayaknV} start from the recursion relations in
\eqref{recursion} and use the Fourier transform
\begin{equation}
\tilde\Psi(k,t)=\sum_{n}\Psi(n,t)e^{ikn}
\end{equation}
where $\psi(n,t)$ is defined by \eqref{psidef} and obtain
\begin{equation} \tilde\Psi(k,t+1)=M_{k}\tilde\Psi(k,t)
\end{equation}
when
\begin{equation}
\sqrt{2}M_{k}=
\begin{pmatrix}
-e^{-ik} & e^{-ik} \\
e^{ik} & e^{ik}
\end{pmatrix}.
\end{equation}
The eigenvalues of $M_{k}$ are
\begin{align}
&\lambda^{1}=e^{i\omega_{k}}, &\lambda^{2}=e^{i(\pi-\omega_{k})}
\end{align}
where $\omega_{k}\in[-\pi/2,\pi/2]$ and satisfies $\sin \omega_{k} =\sin
k/\sqrt{2}.$
It follows that
\begin{equation}
\tilde\Psi_{k}(t)=M_{k}^{t}\tilde\Psi(k,0).
\end{equation}
They deduce from this that
\begin{align}
\tilde\Psi_{\rm{L}}(k,t)&=\frac{1}{2}
\left(1+\frac{\cos k}{\sqrt{1+\cos^{2}k}}\right)
e^{i\omega_{k}t} + \frac{(-1)^{t}}{2}
\left(1-\frac{\cos k}{\sqrt{1+\cos^{2}k}}\right) e^{-i\omega_{k}t}\\
\tilde\Psi_{\rm{R}}(k,t)&=\frac{e^{-ik}}{2\sqrt{1+\cos^{2}k}}
\left(e^{i\omega_{k}}-(-1)^{t}e^{-\omega_{k}t}\right).
\end{align}
Formally inverting the original Fourier transform (using Cauchy's integral
formula) and some ingenious manipulations produce
\begin{align}\label{novel}
\Psi_{\rm{L}}(n,t)&=\int_{-\pi}^{\pi} \frac{dk}{2\pi}
\frac{-ie^{ik}}{\sqrt{1+\cos^{2}k}}e^{-i(\omega_{k}t-kn)} \\
\Psi_{\rm{R}}(n,t)&=\int_{-\pi}^{\pi} \frac{dk}{2\pi}
\left(1+\frac{\cos k}{\sqrt{1+\cos^{2}k}}\right)e^{-i(\omega_{k}t-kn)}
\end{align}
where $\omega_{k}=\arcsin\left(\frac{\sin k}{\sqrt{2}}\right) \in
[-\pi/2,\pi/2]$.
They then apply the method of stationary phase to obtain a weaker version
of Theorem \ref{thmone} above, and integration by parts to show that the
wavefunction decays superpolynomially fast for $|\alpha|>1/\sqrt{2},$
which gives them a much weaker version of Theorem \ref{thmtwo} (Ambainis
{\emph{et al.}} show that this decay is exponential, but they were unable
to obtain uniform asymptotics). Of course both approaches consider the
same functions $\Psi_{\rm{L}}(n,t)$ and $\Psi_{\rm{R}}(n,t)$, the
differences are just the representation of the generating functions and
the choice of contour of integration, as we will discuss below.
If $|\alpha| < \frac{1}{\sqrt{2}}$ each eigenvalue is minus the complex
conjugate of the other, so the $\psi$-functions have an oscillating
factor. When we find the stationary points of the phase, we obtain an
equation for $k$ at the critical points, $k_{\alpha}.$ This is
\begin{equation}
\cos k_{\alpha} = \frac{-\alpha}{\sqrt{1-\alpha^2}}.
\end{equation}
When $|\alpha| < 1/\sqrt{2}$ this has solutions which are real and
distinct. The two solutions merge at $\alpha=1/\sqrt{2},$ and then become
complex. When $\alpha$ is outside the range $|\alpha| \leq 1/\sqrt{2}$
the phase has no stationary point on the real axis. We have been unable
to find a method for approximating these integrals. It is worth noting that
\eqref{novel} are themselves integral representations of Jacobi polynomials
as a function of its parameters.
The exponentially decaying solutions are counter-intuitive in other ways.
As we mentioned above, for this case $k_{\alpha}$ is complex, so instead
of seeing the oscillatory behaviour we might be expecting, instead the
wavefunction decays within an exponential envelope. This is rather like
the phenomenon of evanescent waves. These can also occur classically:
consider an electromagnetic wave incident on the surface of a conductor.
These waves cannot propagate in conductors, as the latter will not sustain
charge gradients. However, the wave does impinge a finite distance into
the conductor (the ``skin depth'') over which its amplitude decays
exponentially. Mathematically, this is equivalent to a complex wave-number.
Evanescence can also occur in quantum mechanics, typically in regions that
are classically forbidden to the particle. Strangely, both these scenarios
involve the presence of some kind of barrier, but no such barrier is present
for the quantum walk. These regions are not classically forbidden to the
particle, it's just very unlikely to be there.
\subsection{The relationship between the two approaches}
In the paper by McClure and Wong \cite{Wongpaper} the authors
show that the methods of stationary phase can be reduced to the method of
steepest descent under quite general conditions, as the same results can be
obtained from either method, with exactly the same convergence
properties. We can see this intuitively as follows. Using steepest
descents, we have the integral
\begin{equation}\label{steepdef2}
\frac{1}{2\pi i} \int_{\Gamma} g (\zeta) e^{-mh(\zeta)} \; d\zeta.
\end{equation}
If the contour $\Gamma$ goes through the saddlepoint $\zeta^+$ we can
deform $\Gamma$ to the curve $|\zeta|=|\zeta^+|=r$ or $\zeta=re^{i\theta},$
for some dummy variable $\theta.$ This produces an integral
\begin{equation}
\frac{1}{2\pi}\int_{-\pi}^{\pi} e^{-mf(re^{{i\theta}})} g(re^{i\theta})
re^{i\theta} \; d\theta.
\end{equation}
If we separate $f$ into its real and imaginary parts, then we obtain
\begin{equation}
\frac{1}{2\pi}\int_{-\pi}^{\pi}e^{-m(\Re(f(re^{i\theta}))
+i\Im(f(re^{i\theta})))}g(re^{i\theta})re^{i\theta}\;d\theta.
\end{equation}
For a quantum system undergoing unitary evolution, $\Re(f)=0$ and since
$r$ is a constant, this integral is of the form required for the
stationary phase approximation. So we will write
$\mu(\theta)=\Im(f(re^{i\theta})).$
The stationary points are those $\theta$ for which $\mu(\theta)'=0$ but
since
\begin{equation}
\frac{d\mu(\theta)}{d\theta} = h'(\zeta) \frac{d \zeta}{d \theta}
\end{equation}
we see that the stationary points are identical to the saddle-points.
With the method of steepest descents the integrand has a very small
absolute value away from the saddle-point. By contrast, in the method of
stationary phase the oscillations of the kernel $\mu$ become arbitrarily
rapid away from the stationary point, and so self-cancel so long as
$g(re^{i\theta})$ is sufficiently smooth. (Readers requiring further
details are referred to the lucid exposition in \cite{Wongpaper}.)
\section{Conclusion}
We have developed a new way of analysing the discrete quantum walk on the
infinite line in terms of Airy functions, which has the advantage of being
able to handle the dramatic changes in the asymptotic behaviour of this
system in a uniform manner. We have also probed the mathematical
relationship between the path-integral and Schr\"{o}dinger approaches to
solving this problem. Previous authors have found the methods of
integration by parts and stationary phase to be problematic over some
parts of the range of $\alpha.$ By contrast, the method of steepest descents
yields a unified treatment of the system.
\subsection*{Acknowledgments}
We are grateful to Rod Wong for drawing our attention to reference
\cite{Wongpaper} and Nico Temme for kindly generating the steepest
descent curves. HAC was supported by MITACS, The Fields Institute, and
the NSERC CRO project ``Quantum Information and Algorithms.'' The research
of MEHI was partially supported by NSF grant DMS 99-70865. The research
of LBR was partially supported by an NSERC operating grant.
We are also grateful to an anonymous referee for a very thorough reading
of this paper which helped us to clarify the notation considerably.
\end{document} |
\begin{document}
\title{Deterministic Distribution of Orbital Angular Momentum Multiplexed Continuous-variable Entanglement and Quantum Steering}
\title{Deterministic Distribution of Orbital Angular Momentum Multiplexed Continuous-variable Entanglement and Quantum Steering}
\section{Introduction}
Einstein-Podolsky-Rosen (EPR) entanglement plays a crucial role in quantum information processing, such as quantum communication, quantum computation, and quantum precision measurement \cite{EPREntanglement,BraunsteinRMP,KimbleQuanInt,WeedbrookRMP,PhysicsReport}. Besides entanglement, quantum steering, which stands between entanglement \cite{EPREntanglement} and Bell nonlocality \cite{Bell} in the hierarchy of quantum correlations \cite{SteeringRMP}, has been identified as a useful quantum resource. Different from entanglement and Bell nonlocality, quantum steering shows unique asymmetry or even one-way characteristics \cite{WisemanPRL,OneWayNatPhot,ANUexp,OneWayPryde,OneWayGuo,OneWayQin,XiaoY2017,cvdv,WangPRL2020}, and thus allows asymmetric quantum information processing. For example, quantum steering enables one-side device independent quantum key distribution \cite{SteeringQKD,SchnabelOneSidedQKD,PKLamOneSidedQKD}.
Multiplexing provides an efficient method to enhance the data-carrying capability in both classical and quantum communication systems by combining multiple channels into a single channel. By utilizing different degrees of freedom (DOFs) of light, such as wavelength \cite{WaveMultiplexing1,WaveMultiplexing2}, polarization \cite{PolarMultiplexing}, temporal \cite{TimeMultiplexing1,TimeMultiplexing2,TimeMultiplexing3} or spatial \cite{SpatialMultiplexing1,SpatialMultiplexing2} modes, different types of multiplexing can be realized. Orbital angular momentum (OAM) of light \cite{AllenOAM} has also been found to be an attractive DOF to realize multiplexing due to its infinite range of possibly achievable topological charges \cite{OAMMultiplexing1,OAMMultiplexing2}. OAM has found applications in discrete-variable quantum information processing, such as high dimensional OAM entanglement generation \cite{ZeilingerPNAS}, and 18-qubit entanglement with six photons’ three DOFs including OAM \cite{Lu18modes}.
\begin{figure*}
\caption{(a) Experimental setup for the generation and distribution of OAM multiplexed CV quantum entanglement and steering in a lossy or noisy channel. Pr: probe beam; Conj: conjugate beam; LO\(_{-l, P}
\end{figure*}
Four-wave mixing (FWM) process in warm alkali vapor cell has found a wide range of applications \cite{QinLight,QinPRL,EntangledImages,JingHexaEnt,OAMFWM2008,SU11,PooserOptica}. Especially, spatial-multi-mode advantage of the FWM process, attributed to its cavity-free configuration, makes it an ideal optical parametric amplifier to generate entangled images \cite{EntangledImages} and reconfigurable multipartite entanglement \cite{JingHexaEnt}. Quantum correlated twin beams carrying OAM were generated based on the FWM process in rubidium vapor \cite{OAMFWM2008}. OAM multiplexed bipartite and multipartite CV entangled states have been generated based on the FWM process \cite{JingBiOAM,JingTriOAM,JingHexaOAM}. Furthermore, OAM multiplexed deterministic all-optical quantum teleportation has also been demonstrated by utilizing OAM multiplexed bipartite CV entangled state generated from the FWM process \cite{JingQuanTele}. To enhance the data-carrying capacity in quantum communication based on OAM multiplexed CV entangled states, it is essential to distribute them in lossy and noisy quantum channels towards practical applications. The distribution of weak coherent field and single photons carrying OAM in fiber, free space, and underwater have been experimentally investigated \cite{OAMDistribution1,OAMDistribution2,OAMDistribution3}. However, it remains unclear whether the quantum entanglement and steering of OAM multiplexed CV entangled states are more sensitive to loss and noise than commonly used Gaussian mode with $l=0$.
Here, we present the deterministic distribution of OAM multiplexed CV quantum entanglement and steering in lossy and noisy channels. In the experiment, the OAM multiplexed entangled fields are generated deterministically based on the FWM process in warm cesium vapor and distributed deterministically in quantum channels. We show that the CV entangled states carrying topological charges $l=1$ and $l=2$ are as robust against loss as Gaussian mode with $l=0$. Sudden death of entanglement and quantum steering of high-order OAM multiplexed CV entangled state is observed in the presence of noise. Our results pave the way for applying OAM multiplexed CV entanglement and quantum steering in high data-carrying capacity quantum communication.
\section{The principle and experimental setup}
Figure 1(a) shows the schematic of experimental setup, and Fig. 1(b) shows the double-$\Lambda$ energy level structure used for the FWM process, which is formed from the $^{133}$Cs $D_{1}$ line with an excited level ($6P_{1/2}, F'=4$) and two ground levels ($6S_{1/2}, F=3$ and $F=4$). The pump beam is about 1.6 GHz blue detuned from $6S_{1/2}, F=3\rightarrow6P_{1/2}, F'=4$ transition, and the probe beam is 9.2 GHz red shifted relative to the pump beam. The pump and probe beams are combined by a Glan-laser (GL) polarizer and then cross each other in the center of the cesium vapor cell at an angle of 6 mrad \cite{MaOL}. The gain of the FWM process is around 3 with a pump power of 240 mW and a probe power of 3 $\mu$W. By injecting the probe beam carrying topological charge $l$ of OAM mode, conjugate beam carrying topological charge $-l$ of OAM mode is generated on the other side of the pump, which satisfies OAM conservation in the FWM process. The topological charge of OAM mode $l=1$ or $l=2$ is added to the probe beam by passing it through a vortex phase plate (VPP). The pump beam is filtered out by using a Glan-Thompson (GT) polarizer with an extinction ratio of $10^{5}$:1 after the vapor cell.
The Conj field is kept by Alice, while the Pr field is distributed to a remote quantum node owned by Bob through a lossy or noisy channel. The lossy channel is simulated by a half-wave plate (HWP) and a polarization beam splitter (PBS). The noisy channel is modeled by combining the Pr field with an auxiliary beam at a PBS followed by a HWP and a PBS. The auxiliary beam carries the same frequency and topological charge with the Pr field, and is modulated by an amplitude modulator and a phase modulator with white noise \cite{WangPRL2020}. To characterize the OAM multiplexed CV entangled state, its covariance matrix (CM) is experimentally measured by utilizing two sets of balanced homodyne detectors (BHDs). In order to extract the CV quadrature information carried by the OAM mode with a topological charge $l$, local oscillator (LO) with opposite topological charge $-l$ is required. In our experiment, the spatially mode-matched LO beams used in the BHDs are obtained from a second set of FWM process which is around 5 mm above the first set of FWM process in the same vapor cell \cite{EntangledImages}. More details of the experimental parameters can be found in Appendix B.
\begin{figure*}
\caption{\label{Loss}
\label{Loss}
\label{Loss}
\end{figure*}
The Hamiltonian of the OAM multiplexed FWM process can be expressed as \cite{JingBiOAM}:
\begin{equation}
\hat{H}=\sum_{l}i\hbar\gamma_{l}\hat{a}^{\dagger}_{l,P}\hat{a}^{\dagger}_{-l,C}+h.c.
\end{equation}
where \(\gamma_{l}\), which is defined as the interaction strength of each OAM pair, is proportional to the pump power. \(\hat{a}^{\dagger}_{l,P}\) and \(\hat{a}^{\dagger}_{-l,C}\) are the creation operators related to OAM modes of the Pr and Conj fields, respectively. Since the pump beam does not carry OAM (\(l=0\)), the topological charges of the Pr and Conj fields are opposite due to OAM conservation in the FWM process. The output state of the OAM multiplexed FWM process is:
\begin{equation}
\ket{\Psi}_{out}=\ket{\Psi}_{-l}\otimes\cdots\otimes\ket{\Psi}_{0}\otimes\cdots\otimes\ket{\Psi}_{l}
\end{equation}
where \(\ket{\Psi}_{l}=\ket{\psi_{l,P},\psi_{-l,C}}\) presents a series of independent OAM multiplexed CV EPR entangled state generated in the FWM process. $\ket{\psi_{l,P}}$ and $\ket{\psi_{-l,C}}$ represent Pr field carrying topological charge $l$ and Conj field carrying topological charge $-l$ , respectively.
All Gaussian properties of the CV entangled state \(\ket{\Psi}_{l}\) can be determined by the covariance matrix $\sigma_{AB}$ with the matrix element $\sigma _{ij}=\langle \hat{\xi}_{i}\hat{\xi}_{j}+\hat{\xi}_{j}\hat{\xi}_{i}\rangle /2-\langle \hat{\xi}_{i}\rangle \langle \hat{\xi}_{j}\rangle $, where $\hat{\xi}\equiv (\hat{X}_{-l,C}, \hat{Y}_{-l,C}, \hat{X}_{l,P}, \hat{Y}_{l,P})^{T}$, $\hat{X}=\hat{a}+\hat{a}^{\dag}$ and $\hat{Y}=(\hat{a}-\hat{a}^{\dag})/i$ represent amplitude and phase quadratures of the Conj and Pr fields, respectively. The covariance matrix of the OAM multiplexed entangled state after distribution in a lossy or noisy channel is as following
\begin{equation}
\sigma_{AB,\delta,\eta}=\left(\begin{array}{cccc}
V_{a} & 0 & V_{c} & 0 \\
0 & V_{a} & 0 & -V_{c} \\
V_{c} & 0 & V_{b} & 0 \\
0 & -V_{c} & 0 & V_{b}
\end{array}
\right)
\end{equation}
with $V_{a}=\frac{V+V^{\prime}}{2}$, $V_{b}=\eta\frac{V+V^{\prime}}{2}+(1-\eta)(1+\delta)$ and $V_{c}=\sqrt{\eta} \frac{V^{\prime}-V}{2}$. $V$ and $V^{\prime}$ represent the variances of squeezed and anti-squeezed quadratures of the optical mode, respectively. The derivation of the covariance matrix can be found in Appendix A. For pure squeezed states, $VV^{\prime}=1$. While $VV^{\prime}>1$ after pure squeezed states suffer from loss or noise. $\eta$ and $\delta$ represent transmission efficiency and excess noise of the quantum channel, respectively. We have $\delta=0$ in a lossy channel, while we have $\delta>0$ in a noisy channel. The submatrices $\sigma _{A}=V_{a} I$ and $\sigma _{B}=V_{b} I$, with $I$ as the identity matrix, are the covariance matrices corresponding to the states of Alice's and Bob's subsystems, respectively. The CV entangled state is a symmetric state and an asymmetric state when $\sigma _{A}=\sigma _{B}$ and $\sigma _{A}\neq \sigma _{B}$, respectively.
\begin{figure}
\caption{\label{Noise}
\label{Noise}
\label{Noise}
\end{figure}
\begin{figure*}
\caption{\label{Steering}
\label{Steering}
\end{figure*}
The Peres-Horodecki criterion of positivity under partial transpose (PPT) criterion is a sufficient and necessary criterion to characterize the entanglement of CV bipartite entanglement \cite{PPTCriterion}. If the smallest symplectic eigenvalue \(\nu\) of the partially transposed covariance matrix is smaller than 1, bipartite entanglement exists. Otherwise, it's a separable state. Furthermore, smaller \(\nu\) represents stronger entanglement.
Quantum steering for bipartite Gaussian states of CV systems
can be quantified by \cite{QuantificationPRL}
\begin{equation}
\mathcal{G}^{A\rightarrow B}(\sigma _{AB})=
\mbox{$\max\big\{0,\,
\frac12 \ln {\frac{\det \sigma_{A}}{\det \sigma_{AB}}}\big\}$},
\end{equation}
where $\mathcal{G}
^{A\rightarrow B}(\sigma _{AB})>0$ represents that Alice has the ability to
steer Bob's state. Similarly, we have
\begin{equation}
\mathcal{G}^{B\rightarrow A}(\sigma _{AB})=
\mbox{$\max\big\{0,\,
\frac12 \ln {\frac{\det \sigma_{B}}{\det \sigma_{AB}}}\big\}$},
\end{equation}
which represents Bob's ability to steer Alice's state. From the expressions of $\mathcal{G}
^{A\rightarrow B}(\sigma_{AB})$ and $\mathcal{G}^{B\rightarrow A}(\sigma_{AB})$, it can be seen that Alice and Bob have the same steerability if $\det \sigma _{A}=\det \sigma _{B}$ is satisfied; i.e., the bipartite Gaussian state is a symmetric state. If the state is an asymmetric state, the steerabilities of Alice and Bob will be different.
\section{Results}
To verify the OAM property of the optical fields, we measure the spatial beam patterns of quantum states \(\ket{\Psi}_{1}\) and \(\ket{\Psi}_{2}\) transmitted through a lossy channel, which are shown in the top rows of Fig. \ref{Loss}(a) and \ref{Loss}(b), respectively. It is obvious that the Pr and Conj fields are both Laguerre-Gaussian beams. To infer their topological charges, they are passed through a tilted lens and imaged on a camera. As shown in the bottom rows of Fig. \ref{Loss}(a) and \ref{Loss}(b), the number of dark stripes gives the number of the topological charge and the direction gives its sign \cite{TopologicalCharge}. As the transmission efficiency of the Pr field decreases, its optical intensity also decreases, while its topological charge remains unchanged. Additional beam patterns of the Pr and Conj fields can be found in Appendix D.
The covariance matrices of the OAM multiplexed entangled states are reconstructed by measuring the noise variances of the amplitude and phase quadratures of the Conj and Pr fields $\Delta ^{2}\hat{X}_{-l,C}$, $\Delta ^{2}\hat{Y}_{-l,C}$, $\Delta ^{2}\hat{X}_{l,P}$, and $\Delta ^{2}\hat{Y}_{l,P}$, as well as their correlation variances of amplitude and phase quadratures $\Delta ^{2}(\hat{X}_{l,P}-\hat{X}_{-l,C}) $, and $\Delta ^{2}(\hat{P}_{l,P}+\hat{P}_{-l,C}) $, respectively. Details about the measurement of the covariance matrices can be found in Appendix C. Based on the covariance matrix of each OAM multiplexed entangled state at different loss and noise levels, its quantum entanglement and quantum steering characteristics are evaluated experimentally.
Fig. \ref{Loss}(c) shows the dependence of PPT values of the CV bipartite entangled state carrying different topological charges on the transmission efficiency of the Pr field. The correlation and anti-correlation levels of the initial CV entangled states carrying topological charges \(l=0\), \(l=1\), and \(l=2\) are all around $-3.3$ dB and 6.1 dB, which correspond to $V=0.47$ and $V^{\prime}=4.11$, respectively. The entanglement between the Pr and Conj fields degrades as the transmission efficiency decreases. However, the entanglement is robust against loss, i.e., it always exists until the transmission efficiency reaches 0. It is obvious that the CV bipartite entangled state carrying topological charges \(l=1\), \(l=2\) are as robust to loss as their Gaussian counterpart \(l=0\).
Figure \ref{Noise} shows the dependence of PPT values of the CV bipartite entangled state carrying different topological charges in noisy channels. Compared with the results in Fig. \ref{Loss}(c), the entanglement disappears at a certain transmission efficiency of the Pr field in the presence of excess noise, which demonstrates the sudden death of CV quantum entanglement. Furthermore, the higher the excess noise is, the sooner entanglement disappears. The transmission efficiencies where entanglement starts to disappear are $\eta=0.10, 0.28$ and 0.44, respectively, for the excess noise $\delta=0.15, 0.5$ and 1 in the units of shot-noise level (SNL). We show that OAM multiplexed CV entangled states carrying high order topological charges \(l=1\), \(l=2\) exhibit the same decoherence tendency as their Gaussian counterpart \(l=0\) in noisy channels.
The dependence of steerabilities $\mathcal{G}^{A\rightarrow B}$ and $\mathcal{G}^{B\rightarrow A}$ on the transmission efficiency $\eta$ and topological charge $l$ in lossy and noisy channels are shown in Fig. \ref{Steering} (a) and Fig. \ref{Steering} (b), respectively. In a lossy channel, the steerabilities for both directions always decrease when the transmission efficiency decreases. One-way steering is observed in the region of $0<\eta <0.72$ for OAM multiplexed CV entangled state carrying different topological charges \(l=0\), \(l=1\), and \(l=2\). In a noisy channel, where the excess noise \(\delta=0.15\) (in the units of SNL) exists, the steerabilities $\mathcal{G}^{A\rightarrow B}$ and $\mathcal{G}^{B\rightarrow A}$ are lower than those in the lossy channel \cite{OneWayQin,npjDeng}. Futhermore, Alice loses its steerability in the region of $0<\eta <0.45$, while Bob loses its steerability in the region of $0<\eta <0.75$, which confirms sudden death of quantum steering in a noisy channel \cite{npjDeng}. It is worth noting that the CV entangled state carrying topological charges \(l=1\), \(l=2\) has the same steerabilities as their counterpart \(l=0\).
\section{Conclusion}
The distribution of OAM multiplexed CV entanglement and quantum steering in quantum channels with homogeneous loss and noise, such as fiber channels, are experimentally simulated in our work. There are also other quantum channels with inhomogeneous loss and noise, such as atmospheric turbulence and diffraction. Recently, it has been shown that other optical fields carrying OAM, such as vector beams, are turbulence-resilient in atmospheric turbulence \cite{VectorBeam}. Thus it is worthwhile to investigate the turbulence-resilient characteristics of OAM multiplexed CV quantum entanglement and steering, which have the potential to substantially improve the quantum communication distance and fidelity.
In summary, we experimentally demonstrate quantum steering of OAM multiplexed optical fields and investigate the distribution of OAM multiplexed CV entanglement and quantum steering in quantum channels. We show that the decoherence property of CV entanglement and quantum steering of the OAM multiplexed optical fields carrying topological charges $l=1$ and $l=2$ are the same as that of the counterpart Gaussian mode with $l=0$ in lossy and noisy channels. The sudden death of entanglement and quantum steering of high-order OAM multiplexed optical fields is observed in the presence of excess noise. Our results demonstrate the feasibility to improve the quantum communication capacity in practical quantum channels by utilizing OAM multiplexed CV entanglement and quantum steering.
\begin{figure*}
\caption{Detailed experimental schematic for distributing OAM multiplexed CV entanglement in a noisy channel. The lossy channel is realized by blocking the auxiliary beam. D-shaped mirrors are utilized to combine or separate light beams with small distances. HWP: half-wave plate; PBS: polarization beam splitter; EOM: electro-optic modulator; VPP: vortex phase plate; GL: Glan-laser polarizer; GT: Glan-Thompson polarizer; Pr: probe beam; Conj: conjugate beam; AM: amplitude modulator; PM: phase modulator; M: mirror; DM: D-shaped mirror; PZT: piezoelectric ceramics; BS: 50:50 beam splitter; BHD: balanced homodyne detector; SA: spectrum analyzer.}
\end{figure*}
\begin{figure*}
\caption{The measured quantum correlation noises for initially generated OAM multiplexed CV entangled states carrying topological charge \(l=0\) (a), \(l=1\) (b) and \(l=2\) (c), respectively. Brown curve at 0 dB shows the SNL. Other six curves show the noise variances of $\Delta ^{2}
\end{figure*}
\begin{figure*}
\caption{The images of OAM modes of the Pr beam and Conj beam generated from FWM process. (a) \(l=0\); (b) \(l=1\); (c) \(l=2\). From left to right: Conj beam, leaked pump beam, and Pr beam.}
\end{figure*}
\section*{APPENDIX A: Theoretical model}
The Hamiltonian of the orbital-angular momentum (OAM) multiplexed four-wave mixing (FWM) process can be expressed as:
\begin{equation}
\hat{H}=\sum_{l}i\hbar k_{l}\hat{a}_{0,Pump}\hat{a}_{0,Pump}\hat{a}^{\dagger}_{l,P}\hat{a}^{\dagger}_{-l,C}e^{i\theta}+h.c.
\end{equation}
where $\hat{a}_{0,Pump}$, $\hat{a}^{\dagger}_{l,P}$, and $\hat{a}^{\dagger}_{-l,C}$ are the annihilation operator of the pump field, the creation operators related to OAM modes of the Pr and Conj fields, respectively. $\theta$ is the phase of the pump field. The first term represents the process in which two pump photons are converted into a Pr photon and a Conj photon. $k_{l}$ is interaction strength of each OAM pair. Since the pump beam does not carry OAM (\(l=0\)), the topological charges of the Pr and Conj fields are opposite due to OAM conservation in the FWM process. The second term h.c. is the Hermitian conjugate of the first term, and represents the reversed process in which a Pr photon and a Conj photon are converted to two pump photons.
The pump field is much stronger than the Pr and Conj fields in the FWM process, so it can be regarded as classical field. By combing the intensity of the pump field $|\alpha_{Pump}|^{2}$ with $k_{l}$, i.e. $|\alpha_{Pump}|^{2}k_{l}=\gamma_{l}$, and taking $\theta=0$, the Hamiltonian can be simplified as \cite{JingBiOAM}:
\begin{equation}
\hat{H}=\sum_{l}i\hbar\gamma_{l}\hat{a}^{\dagger}_{l,P}\hat{a}^{\dagger}_{-l,C}+h.c.
\end{equation}
The output state of the OAM multiplexed FWM process is as following:
\begin{equation}
\ket{\Psi}_{out}=\ket{\Psi}_{-l}\otimes\cdots\otimes\ket{\Psi}_{0}\otimes\cdots\otimes\ket{\Psi}_{l}
\end{equation}
where $\ket{\Psi}_{l}=\hat{S}(\gamma_{l})\ket{vac}=\ket{\psi_{l,P},\psi_{-l,C}}$ means the EPR entangled state with Pr and Conj fields carrying topological charge $l$ and $-l$, respectively. $\ket{vac}$ presents vacuum state, and $\hat{S}(\gamma_{l})=e^{\gamma_{l}(\hat{a}^{\dagger}_{l,P}\hat{a}^{\dagger}_{-l,C}-\hat{a}_{l,P}\hat{a}_{-l,C})}$ presents two-mode squeezing operator for the vacuum state carrying OAM topological charge $l$. It is obvious that a series of CV EPR entangled states carrying independent topological charges are generated in the FWM process, i.e., OAM multiplexing is realized.
All Gaussian properties of the CV Gaussian entangled state \(\ket{\Psi}_{l}\) can be determined by its covariance matrix $\sigma_{AB}$, with the matrix element $\sigma _{ij}=\langle \hat{\xi}_{i}\hat{\xi}_{j}+\hat{\xi
}_{j}\hat{\xi}_{i}\rangle /2-\langle \hat{\xi}_{i}\rangle \langle \hat{\xi}
_{j}\rangle $, where $\hat{\xi}\equiv (\hat{X}_{-l,C}, \hat{Y}_{-l,C}, \hat{X}_{l,P}, \hat{Y}_{l,P})^{T}$, $\hat{X}=\hat{a}+\hat{a}^{\dag}$ and $\hat{Y}=(\hat{a}-\hat{a}^{\dag})/i$ represent amplitude and phase quadratures of the Conj and Pr fields.
The covariance matrix of CV bipartite entangled state can be written as:
\begin{equation}
\sigma_{AB}=\left(\begin{array}{cccc}
\frac{V+V^{\prime}}{2} & 0 & \frac{V^{\prime}-V}{2} & 0 \\
0 & \frac{V+V^{\prime}}{2} & 0 & \frac{V-V^{\prime}}{2} \\
\frac{V^{\prime}-V}{2} & 0 & \frac{V+V^{\prime}}{2} & 0 \\
0 & \frac{V-V^{\prime}}{2} & 0 & \frac{V+V^{\prime}}{2}
\end{array}
\right) =\left(
\begin{array}{cc}
\frac{V+V^{\prime}}{2} I & \frac{V^{\prime}-V}{2} Z \\
\frac{V^{\prime}-V}{2} Z & \frac{V+V^{\prime}}{2} I
\end{array}
\right)
\end{equation}
where $V$ and $V^{\prime}$ represent the variances of squeezed and anti-squeezed quadratures of the optical mode, respectively. $I$ and $Z$ are
the Pauli matrices:
\begin{equation}
I=\left(
\begin{array}{cc}
1 & 0 \\
0 & 1
\end{array}
\right) ,\quad Z=\left(
\begin{array}{cc}
1 & 0 \\
0 & -1
\end{array}
\right)
\end{equation}
Then we consider the distribution of CV entangled state \(\ket{\Psi}_{l}\) in a lossy and noisy channel. Let $\hat{a}_{l, P}$ and $\hat{a}_{-l, C}$ represent the annihilation operators of the Pr and Conj fields, respectively. After the Pr field $\hat{a}_{l, P}$ is distributed in a lossy channel, it becomes $\hat{a}^{\prime}_{l, P}=\sqrt{\eta }\hat{a}_{l, P}+\sqrt{1-\eta }\hat{\mu}$, where $\hat{\mu}$ represents vacuum state with variance of 1. Similarly, the Pr field becomes $\hat{a}^{\prime}_{l, P}=\sqrt{\eta }\hat{a}_{l, P}+\sqrt{1-\eta }(\hat{\epsilon}+\hat{\mu})$ after it is distributed in a noisy channel with excess noise $\Delta ^{2}(\hat{X}_{\epsilon})=\Delta ^{2}(\hat{Y}_{\epsilon})=\delta$ \cite{OneWayQin}. $\delta=0$ means that there is no excess noise, and only loss exists in the channel. $\delta>0$ means that there exists excess noise in the channel. So the covariance matrix of the CV entangled state \(\ket{\Psi}_{l}\) after distribution in the lossy or noisy channel is as following:
\begin{equation}
\sigma_{AB,\delta,\eta}=\left(\begin{array}{cccc}
V_{a} & 0 & V_{c} & 0 \\
0 & V_{a} & 0 & -V_{c} \\
V_{c} & 0 & V_{b} & 0 \\
0 & -V_{c} & 0 & V_{b}
\end{array}
\right) =\left(
\begin{array}{cc}
V_{a} I & V_{c} Z \\
V_{c} Z & V_{b} I
\end{array}
\right)
\end{equation}
with $V_{a}=\frac{V+V^{\prime}}{2}$, $V_{b}=\eta\frac{V+V^{\prime}}{2}+(1-\eta)(1+\delta)$ and $V_{c}=\sqrt{\eta} \frac{V^{\prime}-V}{2}$.
\section*{APPENDIX B: Details of the Experiment}
The Ti:sapphire laser (Coherent MBR-110) is about 1.6 GHz blue detuned from $^{133}$Cs D1 line $6S_{1/2}, F=3\rightarrow6P_{1/2}, F'=4$ transition with a total power of 1.2 W. As shown in Fig. 5, the laser beam is split into two beams by a polarization beam splitter (PBS$_{1}$). The horizontally polarized beam is split into two beams by PBS$_{2}$ and they serve as the pump beams for two sets of four-wave mixing (FWM) processes in the same cesium vapor cell. The vertically polarized beam, with a power of 30 mW, passes through a resonance electro-optic modulator (EOM, Qubig GmbH PM - Cs) and three successive temperature-stabilized etalons to realize 9.2 GHz frequency red shift \cite{MaOL}. The OAM of topological charge $l=1$ or $l=2$ is added to the frequency red-shifted beam by passing it through a vortex phase plate (VPP, RPC Photonics). Then the frequency red-shifted beam is split into three beams, among which the first two beams serve as the probe beams of two FWM processes, and the third beam serves as the auxiliary beam for noisy channel. The pump and probe beams are combined in a Glan-laser (GL) polarizer and then cross each other in the center of the cesium vapor cell (Traid Technology Inc.) at an angle of 6 mrad. The vapor cell is 25 mm long and its temperature is stabilized at $103^{\circ}$C.
The two sets of FWM processes are constructed in the same cesium vapor cell with a hight difference of 5 mm. The bottom FWM process is used to generate OAM multiplexed CV entangled state, while the top FWM process is used to generate spatially matched local oscillators (LOs) with the Pr and Conj fields. The pump power in the bottom FWM process for generating OAM multiplexed CV entanglement is 240 mW. The probe gain of the bottom FWM process is around 3, and the degree of initially generated CV entanglement is around $-$3.3$\pm$0.1 dB. The pump power and seed probe power of the top FWM processes are 450 mW and 100 $\mu$W, respectively, so that the shot-noise level (SNL) is around 10 dB higher than the electronic noise of the homodyne detector. The bottom FWM process is weakly seeded with a probe power of around 3 $\mu$W for relative phase locking of the Pr/Conj fields and their LOs in the balanced homodyne detections.
The lossy channel is simulated by a half-wave plate (HWP$_{10}$) and PBS$_{6}$. The noisy channel is modeled by combining the vertically polarized Pr field with an horizontally polarized auxiliary beam at PBS$_{5}$ followed by HWP$_{10}$ and PBS$_{6}$. The auxiliary beam carries the same frequency and topological charge with the Pr field, and is modulated by an amplitude modulator (AM) and a phase modulator (PM) with excess noise at 1.2 MHz . The amount of excess noise is adjusted by tuning the amplitude of the signal applied to the AM and PM, and evaluated in the units of SNL. For example, excess noise \(\delta=1\) corresponds to noise level that is 3 dB higher than the SNL. By tuning HWP$_{10}$, the lower transmission efficiency of the Pr field is, the higher excess noise is coupled to the Pr field. In practical quantum communication protocols, higher excess noise is coupled to quantum entangled state as the communication distance increases, accompanying lower transmission efficiency. Therefore, our experimental setting is similar to the realistic scenarios in practical noisy quantum channel. To characterize the OAM multiplexed CV entangled state, its covariance matrix is experimentally obtained by utilizing two sets of balanced homodyne detectors (BHDs, Thorlabs PDB450A). The interference visibilities for the two sets of BHDs are both around 99$\%$. The electrical gains of these two BHDs are both $10^5$ V/A. The original photodiodes are replaced by high quantum efficiency (QE) photodiodes with QE=98$\%$ at 895 nm (First sensor). To measure amplitude quadrature $\hat{X}$ or phase quadrature $\hat{P}$ of the Pr/Conj fields, the relative phases between them and their LOs are locked by applying feedback signal from proportional–integral–derivative circuits and high-voltage amplifiers to piezoelectric ceramics (PZT).
\section*{APPENDIX C: Measurement of the Covariance matrix }
To reconstruct covariance matrix of the CV quantum entangled state, we perform 6 different measurements on the output optical modes. These measurements include the variances of the amplitude and phase quadratures of the Conj field and Pr field $\Delta ^{2}\hat{X}_{-l,C}$, $\Delta ^{2}\hat{Y}_{-l,C}$, $\Delta ^{2}\hat{X}_{l,P}$, $\Delta ^{2}\hat{Y}_{l,P}$, as well as noise variances of their joint amplitude or phase quadrature $\Delta ^{2}(\hat{X}_{l,P}-\hat{X}_{-l,C}) $, and $\Delta ^{2}(\hat{Y}_{l,P}+\hat{Y}_{-l,C}) $, respectively.
$\Delta ^{2}\hat{X}_{-l,C}$ and $\Delta ^{2}\hat{Y}_{-l,C}$ ($\Delta ^{2}\hat{X}_{l,P}$ and $\Delta ^{2}\hat{Y}_{l,P}$) are experimentally measured by locking the relative phase of Conj (Pr) field and its corresponding local oscillator of BHD$_{1}$ (BHD$_{2}$) at amplitude quadrature or phase quadrature. $\Delta ^{2}(\hat{X}_{l,P}-\hat{X}_{-l,C})$ and $\Delta ^{2}(\hat{Y}_{l,P}+\hat{Y}_{-l,C}) $ are experimentally measured by locking the relative phases of BHD$_{1}$ and BHD$_{2}$ at amplitude quadrature or phase quadrature, and then subtracting or adding the photocurrents with a radio-frequency subtractor or adder. The SNL is achieved by blocking the Pr and/or Conj field, so that only the noise of vacuum is measured. The settings of the spectrum analyzer (Agilent E4411B) are 30 kHz resolution bandwidth, 100 Hz video bandwidth, and zero span at 1.2 MHz.
Fig. 6 shows the measured 6 noise variance levels of the CV entanglement of OAM multiplexed CV entangled state before being distributed in lossy and noisy channels ($\delta=0,\eta=1$). As shown in Fig. 6, the noise levels of amplitude and phase quadratures are $\Delta ^{2}\hat{X}_{-l,C}=\Delta ^{2}\hat{Y}_{-l,C}=\Delta ^{2}\hat{X}_{l,P}=\Delta ^{2}\hat{Y}_{l,P}$=3.6$\pm$0.1 dB, and the noise levels of correlated quadratures are $\Delta ^{2}(\hat{X}_{l,P}-\hat{X}_{-l,C}) $=$\Delta ^{2}(\hat{Y}_{l,P}+\hat{Y}_{-l,C}) $=$-$3.3$\pm$0.1 dB. Our FWM process works in the amplification regime, so amplitude quadratures and phase quadratures of the Pr/Conj fields show strong correlations and anti-correlations, respectively. It is clear that the degrees of entanglement for CV entangled states carrying topological charge \(l=1\), \(l=2\) are close to that of their spatially Gaussian counterpart \(l=0\).
With the measured 6 noise variances, the cross correlation matrix elements are calculated via
\begin{align}
Cov( \hat{\xi}_{i},\hat{\xi}_{j}) & =\frac{1}{2}\left[ \Delta
^{2}\left( \hat{\xi}_{i}+\hat{\xi}_{j}\right) -\Delta ^{2}\hat{\xi}_{i}-\Delta ^{2}
\hat{\xi}_{j}\right] , \\
Cov( \hat{\xi}_{i},\hat{\xi}_{j}) & =-\frac{1}{2}\left[ \Delta
^{2}\left( \hat{\xi}_{i}-\hat{\xi}_{j}\right) -\Delta ^{2}\hat{\xi}_{i}-\Delta ^{2}
\hat{\xi}_{j}\right] . \notag
\end{align}
In the experiment, we obtain all the covariance matrices of quantum states with different transmission efficiencies and excess noise, and then calculate the smallest symplectic eigenvalue $\nu$ of the partially transposed covariance matrix, $\mathcal{G}^{A\rightarrow B}(\sigma _{AB})$ and $\mathcal{G}^{B\rightarrow A}(\sigma _{AB})$ to verify whether quantum entanglement and quantum steering exist.
\section*{APPENDIX D: Supplemental beam patterns}
Supplemental beam patterns of the OAM modes of the Pr and Conj beams generated from the FWM process are shown in Fig. 7. Pr and Conj beams carrying different topological charges $l=0$, $l=1$, and $l=2$ are generated by removing or switching different VPPs, and their beam patterns are taken after the GT polarizer with a camera. It is obvious that the dark hollow patterns of the Laguerre-Gaussian beams are enlarged as the topological charge increase. Furthermore, the beam patterns of the Pr and Conj fields are symmetric with respect to the pump beam.
\noindent\textbf{Funding} National Natural Science Foundation of China (NSFC) (Grants No. 11974227, No. 11834010, No. 61905135, and No. 62005149); Fund for Shanxi ``1331 Project" Key Subjects Construction; Research Project Supported by Shanxi Scholarship Council of China (2021-003).
\noindent\textbf{Disclosures.} The authors declare no conflicts of interest.
$^{\dag }$These authors contributed equally to this Letter.
\end{document} |
\begin{document}
\title{On cubics and quartics through a canonical curve}
\author{Christian Pauly}
\mathfrak{m}aketitle
\begin{abstract}
We construct families of quartic and cubic hypersurfaces through a
canonical curve, which are parametrized by an open subset in a
Grassmannian and a Flag variety respectively. Using
G. Kempf's cohomological obstruction theory, we show that
these families cut out the
canonical curve and that the quartics are birational (via a
blowing-up of a linear subspace) to quadric bundles over the
projective plane, whose Steinerian curve equals the canonical curve.
\end{abstract}
\section{Introduction}
Let $C$ be a smooth nonhyperelliptic curve of genus $g \geq 4$, which
we consider as an embedded curve $\iota_\omega :
C \hookrightarrow \mathfrak{m}athbb{P}^{g-1}$ by its canonical linear series $|\omega|$.
Let $I = \bigoplus_{n \geq 2} I(n)$ be
the graded ideal of the canonical curve. It was classically known
(Noether-Enriques-Petri theorem, see e.g. \cite{acgh} p. 124) that the
ideal $I$ is generated by its elements of degree $2$, unless $C$ is trigonal
or a plane quintic.
It was also classically known how to construct some distinguished
quadrics in $I(2)$. We consider a double point of the
theta divisor $\Theta \subset \mathrm{Pic}^{g-1}(C)$, which corresponds by
Riemann's singularity theorem to a
degree $g-1$ line bundle $L$ satisfying $\mathrm{dim} \: |L| = \mathrm{dim} \:
|\omega L^{-1}| = 1$ and we observe that the morphism
$\iota_L \times \iota_{\omega L^{-1}} : C \longrightarrow C' \subset |L|^* \times
|\omega L^{-1}|^* = \mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1$ (here $C'$ denotes the image curve)
followed by the Segre embedding
into $\mathfrak{m}athbb{P}^3$ factorizes through the canonical space
$|\omega|^*$, i.e.,
$$
\begin{array}{ccc}
C & \hookrightarrow & |\omega|^* \\
\downarrow & & \downarrow^{\pi} \\
\mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1 & \hookrightarrow & \mathfrak{m}athbb{P}^3,
\end{array}
$$
where $\pi$ is projection from a $(g-5)$-dimensional vertex $\mathfrak{m}athbb{P} V^\perp$
in $|\omega|^*$.
We then define the quadric $Q_L := \pi^{-1}(\mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1)$,
which is a rank
$\leq 4$ quadric in $I(2)$ and coincides with the projectivized tangent
cone at the double point $[L] \in \Theta$ under the identification
of $H^0(C,\omega)^*$ with the tangent space $T_{[L]} \mathrm{Pic}^{g-1}(C)$.
The main result, due to M. Green \cite{green}, asserts that the set of
quadrics $\{ Q_L \}$, when $L$ varies over the double points of $\Theta$,
linearly spans $I(2)$. From this result one infers a constructive Torelli
theorem by intersecting all quadrics $Q_L$ --- at least for $C$ general
enough.
The geometry of the theta divisor $\Theta$ at a double point $[L]$
can also be exploited to produce higher degree elements in the ideal
$I$ as follows: we expand in a suitable set of coordinates a local
equation $\theta$ of $\Theta$ near $[L]$ as $\theta = \theta_2 +
\theta_3 + \ldots$, where $\theta_i$ are homogeneous forms of degree $i$.
Having seen that $Q_L = \mathfrak{m}athrm{Zeros}(\theta_2)$, we denote by $S_L$ the
cubic $\mathfrak{m}athrm{Zeros}(\theta_3) \subset |\omega|^*$,
the osculating cone of $\Theta$ at $[L]$. The cubic $S_L$ has many nice
geometric properties: under the blowing-up of the vertex $\mathfrak{m}athbb{P} V^\perp
\subset S_L$, the cubic $S_L$ is transformed into a quadric bundle
$\tilde{S}_L$ over $\mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1$ and it was shown by
G. Kempf and F.-O. Schreyer \cite{ks} that the Hessian and
Steinerian curves of $\tilde{S}_L$ are $C' \subset \mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1$ and
$C \subset |\omega|^*$ respectively,
which gives another proof of Torelli's theorem.
In this paper we construct and study distinguished cubics and
quartics in the ideal $I$ by adapting the methods of \cite{ks} to
rank-$2$ vector bundles over $C$. Our construction basically goes as
follows (section 2): we consider a general $3$-plane $W \subset
H^0(C, \omega)$ and define the rank-$2$ vector bundle $E_W$ as
the dual of the kernel of the evaluation map in $\omega$ of
sections of $W$. The bundle $E_W$ is stable and admits a theta divisor
$D(E_W)$ in the Jacobian $JC$. Since $D(E_W)$ contains the origin
${\mathfrak{m}athcal O} \in JC$ with multiplicity $4$, the projectivized tangent cone to
$D(E_W)$ at ${\mathfrak{m}athcal O}$ is a quartic hypersurface in $\mathfrak{m}athbb{P} T_{\mathfrak{m}athcal O} JC = |\omega|^*$,
denoted by $F_W$ and which contains the canonical curve. We
therefore obtain a rational map from the Grassmannian $\mathrm{Gr}(3,H^0(\omega))$
to the ideal of quartics $|I(4)|$
\begin{equation}\leftarrowbel{fmap4}
\mathfrak{m}athbf{F}_4 \ : \ \mathrm{Gr}(3,H^0(\omega)) \dashrightarrow |I(4)|, \qquad W \mathfrak{m}apsto F_W.
\end{equation}
Our main tool to study the tangent cones $F_W$ is G. Kempf's
cohomological obstruction
theory \cite{kempf1},\cite{kempf2},\cite{ks} which in our set-up leads
to a simple criterion (Proposition \ref{simpl})
for $b \in \mathfrak{m}athbb{P} T_{\mathfrak{m}athcal O} JC = |\omega|^*$ to belong to $F_W$.
We deduce in particular from this criterion
that the cubic polar $P_x(F_W)$ of $F_W$ with respect to a point
$x \in W^\perp$ also contains the canonical curve. Here
$W^\perp$ denotes the annihilator of $W \subset H^0(\omega)$.
We therefore obtain a
rational map from the flag variety $\mathfrak{m}athrm{Fl}(3,g-1,H^0(\omega))$
parametrizing pairs $(W,x)$ to the ideal of cubics $|I(3)|$
\begin{equation}\leftarrowbel{fmap3}
\mathfrak{m}athbf{F}_3 \ : \ \mathfrak{m}athrm{Fl}(3,g-1,H^0(\omega)) \dashrightarrow |I(3)|,
\qquad (W,x) \mathfrak{m}apsto P_x(F_W).
\end{equation}
Our two main results can be stated as follows.
{\bf (1)} Like the cubic osculating cones $S_L$, the quartic
tangent cones $F_W$ transform under the blowing-up of the vertex
$\mathfrak{m}athbb{P} W^\perp \subset F_W$ into a quadric bundle $\tilde{F}_W \rightarrow
\mathfrak{m}athbb{P} W^* = \mathfrak{m}athbb{P}^2$. Their Hessian and Steinerian curves are the
plane curve $\Gamma$, image under the projection with center $\mathfrak{m}athbb{P}
W^\perp$, $\pi: C \rightarrow \Gamma \subset \mathfrak{m}athbb{P} W^*$, and the canonical
curve $C \subset |\omega|^*$ (Theorem \ref{mainthm}). This surprising
analogy with the osculating cones $S_L$ remains however
unexplained.
{\bf (2)} Let us denote by $|F_4| \subset |I(4)|$ and $|F_3| \subset
|I(3)|$ the linear subsystems spanned by the quartics $F_W$ and the cubics
$P_x(F_W)$ respectively. Then we show
(Theorem \ref{mainthm2}) that both base loci of $|F_4|$ and $|F_3|$ coincide
with $C \subset |\omega|^*$,i.e., the quartics $F_W$ (resp. the
cubics $P_x(F_W)$) cut out the canonical curve.
The starting point of our investigations was the question asked by
B. van Geemen and G. van der Geer (\cite{vgvg} page 629) about ``these
mysterious quartics'' which arise as tangent cones to $2\theta$-divisors
in the Jacobian having multiplicity $\geq 4$ at the origin. In that
paper the authors implicitly conjectured that the base locus
of $|F_4|$ equals $C$, which was subsequently proved by G. Welters
\cite{welt}. Our proof follows from the fact that $|F_4|$ contains
all squares of quadrics in $|I(2)|$.
This paper leaves many questions unanswered (section 7), like e.g.
finding explicit equations of the quartics $F_W$, their syzygies,
the dimensions of $|F_3|$ and $|F_4|$. The techniques used here
also apply when replacing $|\omega|^*$ by Prym-canonical space
$|\omega \alpha|^*$, and generalizing rank-$2$ vector bundles to
symplectic bundles.
{\bf Acknowledgements:} Many results contained in this paper arose from
discussions with Bert van Geemen, whose influence on this work is
considerable. I would like to thank him for these enjoyable and
valuable discussions.
\section{Some constructions for rank-$2$ vector bundles with ca- nonical
determinant}
In this section we briefly recall some known results from \cite{bv},
\cite{vgi} and \cite{pp} on rank-$2$ vector bundles over $C$.
\subsection{Bundles $E$ with $\mathrm{dim} \: H^0(C,E) \geq 3$}
Let $W \subset H^0(C,\omega)$ be a $3$-plane. We denote by $[W] \in
\mathrm{Gr}(3,H^0(\omega))$ the corresponding point in the Grassmannian and
by $\mathcal{B} \subset \mathrm{Gr}(3,H^0(\omega))$ the codimension $2$ subvariety
consisting of $[W]$ such that the net $\mathfrak{m}athbb{P} W \subset |\omega|$
has a base point. For $[W] \in \hspace{-4mm}/\ \mathcal{B}$ we consider (see \cite{vgi}
section 4) the rank-$2$ vector bundle $E_W$ defined by the exact sequence
\begin{equation} \leftarrowbel{esw}
0 \longrightarrow E^*_W \longrightarrow {\mathfrak{m}athcal O}_C \otimes W \mathfrak{m}ap{ev} \omega \longrightarrow 0.
\end{equation}
Here $E^*_W$ denotes the dual bundle of $E_W$. We have $\mathrm{det} \: E_W =
\omega$ and $W^* \subset H^0(C,E_W)$. We denote by $\mathfrak{m}athcal{D}$ the
effective divisor in $|{\mathfrak{m}athcal O}_{\mathrm{Gr}}(g-2)|$ defined by the condition
$$ [W] \in \mathfrak{m}athcal{D} \iff \mathrm{dim} \: H^0(C,E_W) \geq 4.$$
Moreover $\mathcal{B} \subset \mathfrak{m}athcal{D}$ and $E_W$ is stable if $[W] \in \hspace{-4mm}/\ \mathfrak{m}athcal{D}$.
Let $W^\perp \subset H^0(\omega)^* = H^1({\mathfrak{m}athcal O})$ denote the
annihilator of $W \subset H^0(\omega)$. We call the projective
subspace $\mathfrak{m}athbb{P} W^\perp \subset |\omega|^*$ the {\em vertex} and
denote by
$$ \pi: |\omega|^* \dashrightarrow \mathfrak{m}athbb{P} W^*, \qquad \pi:
C \rightarrow \Gamma \subset \mathfrak{m}athbb{P} W^*,$$
the projection with center $\mathfrak{m}athbb{P} W^\perp$. If $[W] \in \hspace{-4mm}/\ \mathcal{B}$,
then $C \mathcal{C}p \mathfrak{m}athbb{P} W^\perp = \emptyset$ and $\pi$ restricts to
a morphism $C \rightarrow \mathfrak{m}athbb{P} W^*$. Its image is a plane curve $\Gamma$
of degree $2g-2$. We note that
$E_W = \pi^* (T(-1))$, where $T$ is the tangent bundle of
$\mathfrak{m}athbb{P} W^* = \mathfrak{m}athbb{P}^2$.
Conversely any bundle $E$ with $\mathrm{det} \: E = \omega$ and $\mathrm{dim} \:
H^0(C,E) \geq 3$ is of the form $E_W$.
\subsection{Bundles $E$ with $\mathrm{dim} \: H^0(C,E) \geq 4$}
Following \cite{bv} (see also \cite{pp} section 5.2) we associate
to a bundle $E$ with $\mathrm{dim} \: H^0(C,E) = 4$ a rank $\leq 6$ quadric
$Q_E \in |I(2)|$, which is defined as the inverse image of the
Klein quadric under the dual $\mathfrak{m}u^*$ of the exterior product map
$$ \mathfrak{m}u^*: |\omega|^* \longrightarrow \mathfrak{m}athbb{P} (\mathfrak{m}athcal{L}ambda^2 H^0(E)^*) \supset
\mathrm{Gr}(2, H^0(E)^*), \qquad Q_E:=(\mathfrak{m}u^*)^{-1} \left( \mathrm{Gr} \right).$$
Composing with the previous construction, we obtain a rational
map
$$ \alpha : \mathfrak{m}athcal{D} \dashrightarrow |I(2)|, \qquad \alpha([W]) = Q_{E_W}.$$
Moreover given a $Q \in |I(2)|$ with $\mathfrak{m}athrm{rk} \ Q \leq 6$ and
$\mathfrak{m}athrm{Sing} \ Q \mathcal{C}p C = \emptyset$, it is easily shown
that
$$ \alpha^{-1}(Q) = \{ [W] \in \mathfrak{m}athcal{D} \ | \ \mathfrak{m}athbb{P} W^\perp \subset Q \}.$$
If $\mathfrak{m}athrm{rk} \ Q = 6$, then $\alpha^{-1}(Q)$ has two connected
components, which are isomorphic to $\mathfrak{m}athbb{P}^3$.
\begin{lem} \leftarrowbel{quaw}
We have $[W] \in \hspace{-4mm}/\ \mathfrak{m}athcal{D}$ if and only if the linear map induced by restricting
quadrics to the vertex $\mathfrak{m}athbb{P} W^\perp$
$$res: I(2) \longrightarrow H^0(\mathfrak{m}athbb{P} W^\perp,{\mathfrak{m}athcal O}(2))$$
is an isomorphism.
\end{lem}
\begin{proof}
It is enough to observe that the two spaces have the
same dimension and that a nonzero
element in $\mathrm{ker} \: res$ corresponds to a $Q \in |I(2)|$ with
$\mathrm{rk} \: Q \leq 6$.
\end{proof}
\subsection{Definition of the quartic $F_W$}
We will now define the main object of this paper. Given $[W]
\in \hspace{-4mm}/\ \mathcal{B}$, we consider the $2\theta$-divisor $D(E_W) \subset
JC$ (see e.g. \cite{bv},\cite{vgi},\cite{pp}), whose
set-theoretical support equals
$$ D(E_W) = \{ \xi \in JC \ | \ \mathrm{dim} \: H^0(C, \xi \otimes E_W) > 0 \}.$$
Since $\mathfrak{m}ult_{\mathfrak{m}athcal O} D(E_W) \geq \mathrm{dim} \: H^0(C, E_W) \geq 3$ and since
any $2\theta$-divisor is symmetric, the first nonzero term of the Taylor
expansion of a local equation of $D(E_W)$ at the origin ${\mathfrak{m}athcal O}$ is a
homogeneous polynomial $F_W$ of degree $4$. The hypersurface
in $|\omega|^* = \mathfrak{m}athbb{P} T_{\mathfrak{m}athcal O} JC$ associated to $F_W$ is also denoted by $F_W$.
Here we restrict
attention to the case $\mathrm{dim} \: H^0(C,E_W) = 3 \ \text{or} \ 4$.
We have
$$ F_W := \mathfrak{m}athrm{Cone}_{\mathfrak{m}athcal O} (D(E_W)) \subset |\omega|^*.$$
The study of the quartics $F_W$ for $[W] \in \mathrm{Gr}(3,H^0(\omega)) \setminus
\mathfrak{m}athcal{D}$ is the main purpose of this paper. If $[W] \in \mathfrak{m}athcal{D}$, the quartics
$F_W$ have already been described in \cite{pp} Proposition 5.12.
\begin{prop}
If $\mathrm{dim} \: H^0(C,E_W) = 4$, then $F_W$ is a double quadric
$$ F_W = Q^2_{E_W}.$$
\end{prop}
Since $|I(2)|$ is linearly spanned by rank $\leq 6$ quadrics (see \cite{pp}
section 5), we obtain the following fact, which will be used in section 6.
\begin{prop} \leftarrowbel{f4sq}
The linear subsystem $|F_4|$ contains all squares of quadrics in $|I(2)|$.
\end{prop}
Although we will not use that fact, we mention that the rational map
\eqref{fmap4} is given by a linear subsystem $\mathfrak{m}athbb{P} \Gamma \subset
|\mathfrak{m}athcal{J}_\mathcal{B}(g-1)|$, where $\mathfrak{m}athcal{J}_\mathcal{B}$ is the ideal sheaf of
the subvariety $\mathcal{B}$. If $g=4$, the inclusion is an equality (see \cite{opp}
section 6). If $g>4$, a description of $\mathfrak{m}athbb{P} \Gamma$ is not known.
\section{Kempf's cohomological obstruction theory}
In this section we outline Kempf's deformation theory \cite{kempf1}
and apply it to the study of the tangent cones $F_W$ of the divisors
$D(E_W)$.
\subsection{Variation of cohomology}
Let $\mathfrak{m}athcal{E}$ be a vector bundle over the
product $C \times S$, where $S = \mathrm{Spec}(A)$ is an affine neighbourhood of the
origin of $JC$.
We restrict attention to the case
$$\mathfrak{m}athcal{E} = \pi_C^* E_W \otimes \mathfrak{m}athcal{L},$$
for some $3$-plane $W$,
and recall that Kempf's deformation theory was applied \cite{kempf1},
\cite{kempf2}, \cite{ks} to the case $\mathfrak{m}athcal{E} = \pi_C^* M \otimes \mathfrak{m}athcal{L}$,
for a line bundle $M$ over $C$. The line bundle $\mathfrak{m}athcal{L}$ denotes
the restriction of a Poincar\'e line bundle over $C \times JC$ to the
neighbourhood $C \times S$. The fundamental idea to study the
variation of cohomology, i.e. the two upper-semicontinuous
functions on $S$
$$ s \mathfrak{m}apsto h^0(C\times \{s\}, \mathfrak{m}athcal{E} \otimes_A \mathbb{C}_s), \qquad
s \mathfrak{m}apsto h^1(C\times \{s\}, \mathfrak{m}athcal{E} \otimes_A \mathbb{C}_s), $$
where $\mathbb{C}_s = A/\mathfrak{m}_s$ and $\mathfrak{m}_s$ is the maximal ideal of $s \in S$, is
based on the existence of an approximating homomorphism.
\begin{thm}[Grothendieck, \cite{kempf1} section 7] \leftarrowbel{gro}
Given a family $\mathfrak{m}athcal{E}$ of vector bundles over $C \times S$,
there exist two flat $A$-modules $F$ and $G$ of finite type and
an $A$-homomorphism $\alpha : F \rightarrow G$ such that for all $A$-modules $M$,
we have isomorphisms
$$ H^0(C\times S , \mathfrak{m}athcal{E} \otimes_A M) \cong \mathrm{ker} \:(\alpha \otimes_A id_M),
\qquad
H^1(C\times S , \mathfrak{m}athcal{E} \otimes_A M) \cong \mathrm{coker} \:(\alpha \otimes_A id_M).$$
\end{thm}
By considering a smaller neighbourhood of the origin, we may assume
the $A$-modules $F$ and $G$ to be locally free (Nakayama's lemma). Moreover
(\cite{kempf1} Lemma 10.2) by restricting further the neighbourhood,
we may find an approximating homomorphism $\alpha : F \rightarrow G$ such that
$\alpha \otimes \mathbb{C}_0 : F \otimes_A A/\mathfrak{m}_0 \rightarrow G \otimes_A A/\mathfrak{m}_0$ is the
zero homomorphism.
We apply this theorem to the family $\mathfrak{m}athcal{E} = \pi^*_C E_W \otimes \mathfrak{m}athcal{L}$, for
$[W] \in \hspace{-4mm}/\ \mathfrak{m}athcal{D}$.
Since by Riemann-Roch $\chi(\mathfrak{m}athcal{E} \otimes \mathbb{C}_s) = \chi(E_W \otimes \mathfrak{m}athcal{L}_s)
= 0$, $\forall s \in S$, and since $h^0(C,E_W) = 3$, the local equation
$f$ of the divisor
$$ D(E_W)_{|S} = \{ s \in S \: | \: h^0(C \times \{s\}, E_W \otimes \mathfrak{m}athcal{L}_s) > 0
\} $$
is given at the origin ${\mathfrak{m}athcal O}$ by the determinant of
a $3 \times 3$ matrix of regular functions
$f_{ij}$ on $S$, with $1 \leq i,j \leq 3$, which vanish at ${\mathfrak{m}athcal O}$, i.e.,
the $A$-modules $F$ and $G$ are free and of rank $3$. Hence
$$ f = \mathrm{det} \: (f_{ij}). $$
The linear part of the regular functions $f_{ij}$ is related to the
cup-product as follows (\cite{kempf1} Lemma 10.3 and Lemma 10.6): let
$\mathfrak{m} = \mathfrak{m}_0$ be the maximal ideal of the origin ${\mathfrak{m}athcal O} \in S$ and consider
the exact sequence of $A$-modules
$$ 0 \longrightarrow \mathfrak{m}/\mathfrak{m}^2 \longrightarrow A/\mathfrak{m}^2 \longrightarrow A/\mathfrak{m} \longrightarrow 0. $$
After tensoring with $\mathfrak{m}athcal{E}$ over $C \times S$ and taking cohomology,
we obtain a coboundary map
$$H^0(C,E_W) = H^0(C \times \{s \} , \mathfrak{m}athcal{E} \otimes_A A/\mathfrak{m} )
\mathfrak{m}ap{\delta} H^1(C \times \{s \} , \mathfrak{m}athcal{E} \otimes_A \mathfrak{m}/\mathfrak{m}^2 ) =
H^1(C,E_W) \otimes \mathfrak{m}/\mathfrak{m}^2, $$
where $\mathfrak{m}/\mathfrak{m}^2$ is the Zariski cotangent space at ${\mathfrak{m}athcal O}$ to $JC$.
Note that we have a canonical isomorphism $(\mathfrak{m}/\mathfrak{m}^2)^* \cong H^1({\mathfrak{m}athcal O})$
and that a tangent vector $b \in H^1({\mathfrak{m}athcal O})$ gives, by composing with the
linear form $l_b : \mathfrak{m}/\mathfrak{m}^2 \rightarrow \mathbb{C}$, a linear map $\delta_b : H^0(E_W) \rightarrow
H^1(E_W)$. As in the line bundle case \cite{kempf1}, one proves
\begin{lem}
For any nonzero $b \in H^1({\mathfrak{m}athcal O}) = T_{{\mathfrak{m}athcal O}} JC$, we have
\begin{enumerate}
\item
The linear map $\delta_b: H^0(E_W) \rightarrow H^1(E_W)$ coincides with
the cup-product $(\cup b)$ with the class $b$, and is {\em
skew-symmetric} after identifying $H^1(E_W)$ with $H^0(E_W)^*$
(Serre duality).
\item
The coboundary map $\delta : H^0(E_W) \rightarrow H^1(E_W) \otimes \mathfrak{m}/\mathfrak{m}^2$ is
described by a skew-symmetric $3 \times 3$ matrix $(x_{ij})$, with
$x_{ij} \in H^1({\mathfrak{m}athcal O})^*$. Moreover the linear form $x_{ij}$
coincides with the differential $(df_{ij})_0$ of $f_{ij}$ at the
origin ${\mathfrak{m}athcal O}$.
\end{enumerate}
\end{lem}
The coboundary map $\delta$ induces a linear map
$$ \mathfrak{m}athcal{D}elta : H^1({\mathfrak{m}athcal O}) \longrightarrow \mathfrak{m}athcal{L}ambda^2 H^0(E_W)^*, \qquad
b \longmapsto \delta_b,$$
which coincides with the dual of the multiplication map of global
sections of $E_W$. Moreover
$$\mathrm{ker} \: \mathfrak{m}athcal{D}elta = W^\perp = \{ x_{12} = x_{13} = x_{23} = 0 \}. $$
Using a flat structure \cite{kempf2} we can write the power
series expansion of the regular functions $f_{ij}$ around ${\mathfrak{m}athcal O}$
$$ f_{ij} = x_{ij} + q_{ij} + \cdots, $$
where $x_{ij}$ and $q_{ij}$ are linear and quadratic polynomials
respectively. We easily calculate the expansion of $f$: by
skew-symmetry its cubic term is zero, and its quartic
term equals
$$ F_W : q_{11} x^2_{23} + q_{22} x^2_{13} + q_{33} x^2_{12} + x_{12}x_{23}
(q_{13} + q_{31}) - x_{12} x_{23} (q_{12} + q_{21}) - x_{12} x_{13}
(q_{23} + q_{32}). $$
We straightforwardly deduce from this equation the following properties
of $F_W$.
\begin{prop} \leftarrowbel{singver}
\begin{enumerate}
\item The quartic $F_W$ is singular along the vertex $\mathfrak{m}athbb{P} W^\perp$.
\item For any $x \in W^\perp$, the cubic polar $P_x(F_W)$ is singular
along the vertex $\mathfrak{m}athbb{P} W^\perp$.
\end{enumerate}
\end{prop}
\subsection{Infinitesimal deformations of global sections of $E_W$}
We first recall some elementary facts on principal parts. Let
$V$ be an arbitrary vector bundle over $C$ and let $\mathrm{Rat}(V)$ be the
space of rational sections of $V$ and $p$ be a point of $C$.
The space of principal parts of $V$ at $p$ is the quotient
$$ \mathrm{Prin}_p(V) = \mathrm{Rat}(V)/\mathrm{Rat}_p(V), $$
where $\mathrm{Rat}_p(V)$ denotes the space of rational sections of $V$ which are
regular at $p$. Since a rational section of $V$ has only finitely
many poles, we have a natural mapping
\begin{equation}\leftarrowbel{prinpart}
\mathfrak{m}athrm{pp} : \mathrm{Rat}(V) \longrightarrow \mathrm{Prin}(V) := \bigoplus_{p \in C} \mathrm{Prin}_p(V),
\qquad
s \longmapsto \left( s \ \mathfrak{m}od \ \mathrm{Rat}_p(V) \right)_{p \in C}.
\end{equation}
Exactly as in the line bundle case (\cite{kempf1} Lemma 3.3), one proves
\begin{lem}
There are isomorphisms
$$ \mathrm{ker} \: \mathfrak{m}athrm{pp} \cong H^0(C, V), \qquad \mathrm{coker} \: \mathfrak{m}athrm{pp}
\cong H^1(C, V). $$
\end{lem}
In the particular case $V = {\mathfrak{m}athcal O}$, we see that a tangent vector
$b \in H^1({\mathfrak{m}athcal O}) = T_{{\mathfrak{m}athcal O}} JC$ can be represented
by a collection $\beta = \left( \beta_p \right)_{p \in I}$
of rational functions $\beta_p \in \mathrm{Rat}({\mathfrak{m}athcal O})$, where $p$
varies over a finite set of points $I \subset C$. We then define
$\mathfrak{m}athrm{pp}(\beta) = \left( \omega_p \right)_{p \in I}
\in \mathrm{Prin}({\mathfrak{m}athcal O})$, where $\omega_p$ is the principal part of $\beta_p$ at $p$.
We denote by $[\beta] = b$ its cohomology class
in $H^1({\mathfrak{m}athcal O})$. Note that we can define powers of $\beta$ by
$\beta^k := \left( \beta_p^k \right)_{p \in I}$.
\noindent
For $i \geq 1$, let $D_i$ be the infinitesimal
scheme $\mathrm{Spec}(A_i)$, where $A_i$ is
the Artinian ring $\mathbb{C}[\epsilon]/\epsilon^{i+1}$.
As explained in \cite{kempf2} section 2, a tangent vector $b \in H^1({\mathfrak{m}athcal O})$
determines a morphism
$$ \mathrm{exp}_{i,b} : D_i \longrightarrow JC,$$
with $\mathrm{exp}_{i,b}(x_0) = {\mathfrak{m}athcal O}$, where $x_0$ is the closed point of $D_i$. Let
$\mathfrak{m}athbb{L}_{i+1}(b)$ denote the pull-back of the Poincar\'e
sheaf $\mathfrak{m}athcal{L}$ under the morphism $\mathrm{exp}_{i,b} \times id_C$.
Note that we have the following
exact sequences
\begin{equation} \leftarrowbel{esext1}
D_1 \times C : \qquad 0 \longrightarrow \epsilon {\mathfrak{m}athcal O} \longrightarrow \mathfrak{m}athbb{L}_2(b) \longrightarrow {\mathfrak{m}athcal O} \longrightarrow 0,
\end{equation}
\begin{equation} \leftarrowbel{esext2}
D_2 \times C : \qquad 0 \longrightarrow \epsilon^2 {\mathfrak{m}athcal O} \longrightarrow \mathfrak{m}athbb{L}_3(b) \longrightarrow
\mathfrak{m}athbb{L}_2(b) \longrightarrow 0.
\end{equation}
The second arrows in each sequence correspond to the restriction to
the subschemes $\{x_0 \} \times C \subset D_1 \times C$ and
$D_1 \times C \subset D_2 \times C$ respectively.
As above we choose a representative $\beta$ of $b$. Following
\cite{kempf2} section 2, one shows that the space
of global sections $H^0(C \times D_i, \mathfrak{m}athbb{L}_{i+1}(b) \otimes E)$,
with $E = E_W$ and $[W] \in \hspace{-4mm}/\ \mathfrak{m}athcal{D}$, is isomorphic to the
$A_i$-module
\begin{equation} \leftarrowbel{glosecllbeta}
V_i(\beta) = \{
f = f_0 + \cdots + f_i \epsilon^i \in \mathrm{Rat}(E) \otimes A_i \ \
\text{such that}
\
f \mathrm{exp} (\epsilon \beta) \ \text{is regular} \ \forall p \in C \}.
\end{equation}
An element $f \in V_i(\beta)$ is called an $i$-th order deformation
of the global section $f_0 \in H^0(E)$.
In the case $i=2$, the condition $f \in V_i(\beta)$ is equivalent
to the following three elements,
\begin{equation} \leftarrowbel{expdef}
f_0, \qquad f_1 + f_0 \beta, \qquad
f_2 + f_1 \beta + f_0 \frac{\beta^2}{2},
\end{equation}
being regular at all points $p \in C$ --- for $i=1$, we consider the
first two elements. Alternatively this means
that their classes in $\mathrm{Prin}(E)$ are zero. We note that,
given two representatives $\beta = \left( \beta_p \right)_{p \in I}$
and $\beta' = \left( \beta'_p \right)_{p \in I'}$ with $[\beta] =
[\beta']$, the two subspaces $V_i(\beta)$ and $V_i(\beta')$ of
$\mathrm{Rat}(E) \otimes A_i$ are different and that any rational
function $\varphi \in \mathrm{Rat}({\mathfrak{m}athcal O})$
satisfying $\mathfrak{m}athrm{pp} (\varphi) = \mathfrak{m}athrm{pp}
(\beta' - \beta)$ induces an isomorphism
$V_i(\beta) \cong V_i(\beta')$.
\noindent
We consider a class $b \in H^1({\mathfrak{m}athcal O}) \setminus W^\perp$ and
a representative $\beta$ such that $[\beta] = b$. By taking
cohomology of \eqref{esext1} tensored with $E$, we observe that
a first order deformation of $f_0$, i.e., a global section
$f = f_0 + f_1 \epsilon \in V_1(\beta) \cong
H^0(C \times D_1, \mathfrak{m}athbb{L}_2(b) \otimes E)$
always exists. Since $\mathrm{rk} \: (\cup b) = 2$, the global section
$f_0$ is uniquely determined up to a scalar
$$ f_0 \cdot \mathfrak{m}athbb{C} = \mathrm{ker} \: \left( \cup b : H^0(E) \longrightarrow H^1(E) \right).$$
Moreover any two first order deformations of $f_0$ differ
by an element in $\epsilon H^0(E)$.
\noindent
We now state a criterion for a tangent vector
$b = [\beta]$ to lie on the quartic tangent cone $F_W$ in terms
of a second order deformation of $f_0 \in H^0(E)$.
\begin{lem} \leftarrowbel{criterion}
A cohomology class $b = [\beta] \in H^1({\mathfrak{m}athcal O}) \setminus W^\perp$ is contained
in the cone over the quartic $F_W$ if and only if there exists a
global section
$$f = f_0 + f_1 \epsilon + f_2 \epsilon^2 \in V_2(\beta) \cong
H^0(C \times D_2 , \mathfrak{m}athbb{L}_3(b) \otimes E).$$
\end{lem}
\begin{proof}
The proof is similar to \cite{ks} Lemma 4. We work over the Artinian
ring $A_4$, i.e., $\epsilon^5 = 0$.
By Theorem \ref{gro} applied to the family $\mathfrak{m}athbb{L}_5(b) \otimes E$ over
$C \times D_4$, there exists an
approximating homomorphism of $A_4$-modules
\begin{equation} \leftarrowbel{approxhomo}
A_4^{\oplus 3} \mathfrak{m}ap{\varphi} A_4^{\oplus 3},
\end{equation}
such that $\mathrm{ker} \: \varphi_{|D_2} \cong H^0(C \times D_2, \mathfrak{m}athbb{L}_3(b) \otimes E)$,
$\mathrm{coker} \: \varphi_{|D_2} \cong H^1(C \times D_2, \mathfrak{m}athbb{L}_3(b) \otimes E)$,
and $\varphi \otimes \mathfrak{m}athbb{C}_0 = 0$. We denote by $\varphi_{|D_2}$ the
homomorphism obtained from \eqref{approxhomo} by projecting to $A_2$.
Note that any $A_4$-module is free. The matrix $\varphi$ is
equivalent to a matrix
$$ M:= \left( \begin{array}{ccc}
\epsilon^u & 0 & 0 \\
0 & \epsilon^v & 0 \\
0 & 0 & \epsilon^w
\end{array}
\right).
$$
Since $\varphi \otimes \mathfrak{m}athbb{C}_0 = 0$, we have $u,v,w \geq 1$. Moreover
we can order the exponents so that $1 \leq u \leq v \leq w$. It follows
from the definition of $D(E_W)$ as a determinant divisor that the
pull-back of $D(E_W)$ by $\mathrm{exp}_4: D_4 \longrightarrow JC$ is given by the
equation (in $A_4$)
$$ \mathrm{det} \: M = \epsilon^{u+v+w}.$$
We immediately see that $b \in F_W$ if and only if $u+v+w \geq 5$.
Let us now restrict $\varphi$ to $D_1$,i.e., we project \eqref{approxhomo}
to $A_1$. Since we assume $b \in \hspace{-4mm}/\ W^\perp = \mathrm{ker} \: \mathfrak{m}athcal{D}elta$, the
restriction $\varphi_{|D_1}$ is nonzero and by skew-symmetry of rank $2$,
i.e., $u=v=1$ and $w \geq 2$. Hence $b \in F_W$ if and only if
$w \geq 3$.
On the other hand the $A_2$-module $\mathrm{ker} \: \varphi_{|D_2} \cong
H^0(C \times D_2, \mathfrak{m}athbb{L}_3(b) \otimes E)$ has length $2+w$. Let $\mathfrak{m}u$
be the multiplication by $\epsilon^2$ on this $A_2$-module. Then
by \eqref{glosecllbeta} the $A_2$-module $\mathrm{ker} \: \mathfrak{m}u$ is
isomorphic to the $A_1$-module $H^0(C \times D_1, \mathfrak{m}athbb{L}_2(b) \otimes
E)$, which is of length $4$, provided $b \in \hspace{-4mm}/\ W^\perp$. Hence
we obtain that $w \geq 3$ if and only if there exists an $f \in
H^0(C \times D_2, \mathfrak{m}athbb{L}_3(b) \otimes E)$ such that $\mathfrak{m}u(f) = \epsilon^2
f_0$. This proves the lemma.
\end{proof}
\section{Study of the quartic $F_W$}
In this section we prove geometric properties of the quartic $F_W$.
\subsection{Criteria for $b \in F_W$}
We now show that the criterion of Lemma \ref{criterion} simplifies
to a criterion involving only a first order deformation $f = f_0 +
f_1 \epsilon \in V_1(\beta)$ of $f_0$. As above we assume $b \in \hspace{-4mm}/\
W^\perp$.
First we observe that the rational differential form $f_1 \wedge f_0$
is independent of the choice of the representative
$\beta$, i.e., $f_1 \wedge f_0$
only depends on the cohomology class $b =[\beta]$: suppose we take
$\beta' = \left( \beta_p \cdot \varphi \right)_{p \in I}$,
where $\varphi \in \mathrm{Rat}(\omega)$. Then $f_0$ and $f_1$
transform into $f'_0 = f_0$ and $f'_1 = f_1 + \varphi f_0$,
from which it is clear that $f'_1 \wedge f'_0 = f_1 \wedge f_0$.
Secondly one easily sees that $f_0 = \pi(b)$ (section 2.1) and that,
under the canonical identification $\mathfrak{m}athcal{L}ambda^2 W^* = \mathfrak{m}athcal{L}ambda^2 H^0(E) =
W$, the $2$-plane $H^0(E) \wedge f_0$ coincides with the intersection
$V_b := H_b \mathcal{C}p W$, where $H_b$ denotes the hyperplane determined by $b \in
H^1({\mathfrak{m}athcal O})$.
It follows from these two remarks that, given $b$ and $W$, the
form $f_1 \wedge f_0$ is well-defined up to a regular differential
form in $V_b \subset W$.
\begin{prop} \leftarrowbel{simpl}
We have the following equivalence
$$ b \in F_W \qquad \iff \qquad f_1 \wedge f_0 \in H_b.$$
\end{prop}
\begin{proof}
Since $f_1 \wedge f_0$ does not depend on $\beta$, we may choose a
$\beta$ with simple poles at the points $p \in I$. By Lemma \ref{criterion}
and relation \eqref{expdef} we see that $b \in F_W$ if and only if
the cohomology class $[f_1 \beta + f_0 \frac{\beta^2}{2}]$ is zero
in $H^1(E) / \mathrm{im} \: (\cup b)$ --- we recall that $f_1$ is defined up
to $H^0(E)$.
First we will prove that $[f_0 \frac{\beta^2}{2}] \in
\mathrm{im} \:(\cup b)$. The commutativity of the upper right triangle of the
diagram (see e.g. \cite{kempf1})
$$
\begin{array}{ccccccc}
& & & & H^0(E) & & \\
& & & & & & \\
& & & & \downarrow \cdot \frac{\beta^2}{2} & \searrow \ \cup
[\frac{\beta^2}{2}] & \\
& & & & & & \\
H^0(E) & \longrightarrow & H^0(E(2I)) & \longrightarrow & E(2I)_{|2I} & \longrightarrow & H^1(E) \\
& & & & & & \\
& & \mathcal{C}p & & \mathcal{C}p & \nearrow & \\
& & & & & & \\
& & \mathrm{Rat}(E) & \mathfrak{m}ap{\mathfrak{m}athrm{pp}} & \mathrm{Prin}(E) & &
\end{array}
$$
implies that $[f_0 \frac{\beta^2}{2}] = f_0 \cup [\frac{\beta^2}{2}]$.
Moreover the skew-symmetric cup-product map
$\cup b$
$$\cup b = \wedge \overline{b}: \ H^0(E) = W^*
\longrightarrow H^1(E) = W = \mathfrak{m}athcal{L}ambda^2 W^*$$
identifies with the exterior product $\wedge \overline{b}$, where
$\overline{b}= \pi(b) \in W^*$. It is clear that
$\mathrm{im} \: (\cup b) = \mathrm{im} \: (\wedge \overline{b}) =
\mathrm{ker} \: (\wedge \overline{b})$, where $\wedge \overline{b}$ also
denotes the linear form
\begin{equation} \leftarrowbel{wedgef0}
\wedge \overline{b}: \ \mathfrak{m}athcal{L}ambda^2 W^* \longrightarrow \mathfrak{m}athcal{L}ambda^3 W^* \cong \mathfrak{m}athbb{C}.
\end{equation}
As already observed, we have $f_0 = \overline{b}$. Denoting by
$c \in W^*$ the class $\pi([\frac{\beta^2}{2}])$, we see that the relation
$(f_0 \wedge c) \wedge \overline{b} =
\overline{b} \wedge c \wedge \overline{b} = 0$ implies that
$f_0 \cup [\frac{\beta^2}{2}] \in \mathrm{ker} \: (\wedge \overline{b}) =
\mathrm{im} \: (\cup b)$.
Therefore the previous condition simplifies to
$[f_1 \beta] \in \mathrm{im} \:(\cup b)$. We next observe that the linear form
$\wedge \overline{b}$
on $H^1(E)$ \eqref{wedgef0} identifies with the exterior product map
$$ H^1(E) \mathfrak{m}ap{\wedge f_0} H^1(\omega) \cong \mathfrak{m}athbb{C}.$$
Since we have a commutative diagram
$$
\begin{array}{rccccc}
f_1 \in & H^0(E(I)) & \mathfrak{m}ap{\cdot \beta} & \mathrm{Prin}(E) & \longrightarrow & H^1(E) \\
& & & & & \\
& & & \downarrow \ \wedge f_0 & & \downarrow
\ \wedge f_0 \\
& & & & & \\
f_1 \wedge f_0 \in & H^0(\omega) & \mathfrak{m}ap{\cdot \beta} & \mathrm{Prin}(\omega) &
\longrightarrow & H^1(\omega),
\end{array}
$$
and since $f_1 \wedge f_0 \in H^0(\omega) \subset \mathrm{Rat}(\omega)$,
we easily see that the condition $[f_1 \beta] \in \mathrm{im} \:(\cup b)$ is
equivalent to $f_1 \wedge f_0 \in H_b =
\mathrm{ker} \: \left( \cup b : H^0(\omega) \longrightarrow H^1(\omega) \right).$
\end{proof}
In the following proposition we give more details on the element
$f_1 \wedge f_0 \in H^0(\omega)$. We additionally assume that
$\pi(b) \in \hspace{-4mm}/\ \Gamma$, which implies that the
global section $f_0 \in H^0(E)$
does not vanish at any point and hence determines an exact sequence
\begin{equation} \leftarrowbel{extes}
0 \longrightarrow {\mathfrak{m}athcal O} \mathfrak{m}ap{f_0} E \mathfrak{m}ap{\wedge f_0} \omega \longrightarrow 0.
\end{equation}
The coboundary map of the associated long exact sequence
\begin{equation} \leftarrowbel{extcl}
\cdots \longrightarrow H^0(\omega) \mathfrak{m}ap{\cup e} H^1({\mathfrak{m}athcal O}) \longrightarrow \cdots
\end{equation}
is symmetric and coincides (e.g. \cite{kempf1} Corollary 6.8)
with cup-product $\cup e$ with
the extension class $e \in \mathfrak{m}athbb{P} H^1(\omega^{-1}) = |\omega^2|^*$. Moreover
$\cup e$ is the image of $e$ under the dual of the multiplication map
\begin{equation} \leftarrowbel{mapext}
H^1(\omega^{-1}) = H^0(\omega^2)^* \hookrightarrow
\mathrm{Sym}^2 H^0(\omega)^*, \qquad e \longmapsto \cup e.
\end{equation}
We note that $\mathfrak{m}athrm{corank}(\cup e) = 2$ and that $\mathrm{ker} \: (\cup e) =
V_b$. Hence $(f_1 \wedge f_0) \cup e$ is well-defined.
\begin{prop} \leftarrowbel{quad}
If $\pi(b) \in \hspace{-4mm}/\ \Gamma$, then $f_1 \wedge f_0 \in \hspace{-4mm}/\ \mathrm{ker} \: (\cup e)$ and
we have (up to a nonzero scalar)
$$(f_1 \wedge f_0) \cup e = b \in H^1({\mathfrak{m}athcal O}).$$
\end{prop}
\begin{proof}
We keep the notation of the previous proof. The condition
$f_1 \wedge f_0 \in V_b$ implies that $f_1$ is a regular section
and, by \eqref{expdef}, that $f_0$ vanishes at the support of $b$, i.e.,
$\pi(b) \in \Gamma$. As for the equality of the proposition,
we introduce the rank-$2$ vector bundle $\hat{E}$ which
is obtained from $E$ by
(positive) elementary transformations at the points $p \in I$ and
with respect to the line in $E_p$ spanned by the nonzero
vector $f_0(p)$. Then we have $E \subset \hat{E} \subset E(I)$ and
$\hat{E}$ fits into the exact sequence
$$ 0 \longrightarrow E \longrightarrow \hat{E} \longrightarrow {\mathfrak{m}athcal O}_I \longrightarrow 0. $$
Moreover $f_1 \in H^0(\hat{E})$, which follows from condition
\eqref{expdef}. We also have the following exact sequences
$$
\begin{array}{ccccccccccc}
0 & \longrightarrow & {\mathfrak{m}athcal O}(I) & \longrightarrow & \hat{E} & \mathfrak{m}ap{\wedge f_0} & \omega & \longrightarrow & 0
& \qquad & (\hat{e}) \\
& & & & & & & & & & \\
& & \cup & & \cup & & \Vert & & & & \\
& & & & & & & & & & \\
0 & \longrightarrow & {\mathfrak{m}athcal O} & \mathfrak{m}ap{f_0} & E & \mathfrak{m}ap{\wedge f_0} & \omega & \longrightarrow
& 0 & \qquad & (e),
\end{array}
$$
and the extension class $\hat{e} \in H^1(\omega^{-1}(D))$ is obtained
from $e$ by the canonical projection $H^1(\omega^{-1}) \rightarrow
H^1(\omega^{-1}(I))$. Taking the associated long exact sequences, we
obtain
$$
\begin{array}{cccccc}
f_1 \in & H^0(\hat{E}) & \mathfrak{m}ap{\wedge f_0} & H^0(\omega) & \mathfrak{m}ap{\cup \hat{e}} &
H^1({\mathfrak{m}athcal O}(I)) \\
& & & & & \\
& \cup & & \Vert & & \uparrow \ \pi_I \\
& & & & & \\
& H^0(E) & \mathfrak{m}ap{\wedge f_0} & H^0(\omega) & \mathfrak{m}ap{\cup e} & H^1({\mathfrak{m}athcal O}),
\end{array}
$$
where the two squares commute. This means that
$$ \pi_I \left( (f_1 \wedge f_0) \cup e \right) = (f_1 \wedge f_0) \cup
\hat{e} = 0. $$
Since $f_1 \wedge f_0$ does not depend on $\beta$ (nor on $I$), the
latter relation holds for any $I$ with $I = \mathrm{supp} \: \beta$. Hence,
denoting by $\leftarrowngle I \rightarrowngle$ the linear span in $|\omega|^*$ of the
support $I$ of $\beta$, we obtain
$$ (f_1 \wedge f_0) \cup e \in \bigcap_{I = \mathrm{supp} \: \beta} \mathrm{ker} \: \ \pi_I
= \bigcap_{b \in \leftarrowngle I \rightarrowngle} \leftarrowngle I \rightarrowngle = b.$$
\end{proof}
\subsection{Geometric properties of $F_W$}
\begin{prop} \leftarrowbel{geomprop}
For any $[W] \in \hspace{-4mm}/\ \mathfrak{m}athcal{D}$ we have the following
\begin{enumerate}
\item The quartic $F_W$ contains the canonical curve $C$,i.e.,
$F_W \in |I(4)|$.
\item The quartic $F_W$ contains the secant line $\overline{pq}$,
with $p \not= q$,
if and only if $\overline{pq} \mathcal{C}p \mathfrak{m}athbb{P} W^\perp \not= \emptyset$ or
$\mathrm{dim} \: W \mathcal{C}p H^0(\omega(-2p-2q)) >0$.
\item Let $\Sigma$ be the set of points $p$ at which the tangent line
$\mathfrak{m}athbb{T}_p(C)$ intersects the vertex $\mathfrak{m}athbb{P} W^\perp$. Then $\Sigma$ is empty for
general $[W]$ and finite for any $[W]$. Moreover any point $p \in
C \setminus \Sigma$ is smooth on $F_W$ and the embedded tangent space
$\mathfrak{m}athbb{T}_p(F_W)$ is the linear span of $\mathfrak{m}athbb{T}_p(C)$ and $\mathfrak{m}athbb{P} W^\perp$.
\end{enumerate}
\end{prop}
\begin{proof}
All statements are easily deduced from Proposition \ref{simpl}. Given a
point $p \in C$ we denote by $\mathfrak{p}_p \in \mathrm{Prin}_p({\mathfrak{m}athcal O})$
the principal part supported at $p$ of a rational function with a
simple pole at $p$. Then the class
$[\mathfrak{p}_p] \in H^1({\mathfrak{m}athcal O})$ is proportional to $i_\omega(p) \in |\omega|^*
= \mathfrak{m}athbb{P} H^1({\mathfrak{m}athcal O})$ and the section $f_0$ vanishes at $p$. Hence
$f_0 \mathfrak{p}_p \in \mathrm{Prin}(E)$ is everywhere regular and we may choose
$f_1 =0$. This proves part 1. See also \cite{pp}.
As for part 2, we introduce $\beta_{\leftarrowmbda,\mathfrak{m}u} = \leftarrowmbda \mathfrak{p}_p + \mathfrak{m}u \mathfrak{p}_q
\in \mathrm{Prin}({\mathfrak{m}athcal O})$ for $\leftarrowmbda, \mathfrak{m}u \in \mathfrak{m}athbb{C}$ and denote by $s_p$ and
$s_q$ the global sections $\pi([\mathfrak{p}_p])$ and $\pi([\mathfrak{p}_q])$, which
vanish at $p$ and $q$ respectively. Then one checks that $f_0 =
\leftarrowmbda s_p + \mathfrak{m}u s_q \in \mathrm{ker} \: (\cup [\beta_{\leftarrowmbda,\mathfrak{m}u}])$ and
$\mathfrak{m}athrm{pp}(f_1) = \leftarrowmbda \mathfrak{m}u (s_q \mathfrak{p}_p + s_p \mathfrak{p}_q) \in \mathrm{Prin}(E)$.
With this notation the condition of Proposition \ref{simpl}
transforms into
\begin{equation} \leftarrowbel{resfw}
0 = l_{\leftarrowmbda, \mathfrak{m}u}(f_0 \wedge f_1) = \leftarrowmbda \mathfrak{m}u (\leftarrowmbda^2
\gamma_p + \mathfrak{m}u^2 \gamma_q),
\end{equation}
where $l_{\leftarrowmbda, \mathfrak{m}u}$ is the linear form defined by
$[\beta_{\leftarrowmbda,\mathfrak{m}u}] \in H^1({\mathfrak{m}athcal O})$.
The scalars $\gamma_p$ and $\gamma_q$ are the
values of the section $s_p \wedge s_q \in W \mathcal{C}p
H^0(\omega(-p-q))$ at $p$ and $q$ respectively. We now conclude noting that
$s_p \wedge s_q = 0$ if and only if $\overline{pq} \mathcal{C}p \mathfrak{m}athbb{P} W^\perp
\not= \emptyset$.
As for part 3, we first observe that the assumption $\Sigma = C$ implies
that the restriction $\pi_{|C} : C \rightarrow \mathfrak{m}athbb{P} W^*$ contracts $C$ to a point,
which is impossible. Next we consider the tangent vector $t_q$ at $p$
given by the direction $q$. By putting $\leftarrowmbda = 1$ and $\mathfrak{m}u = \epsilon$,
with $\epsilon^2 = 0$, into equation \eqref{resfw} we obtain that
$t_q \in \mathfrak{m}athbb{T}_p(F_W)$ if and only if $\epsilon \gamma_p = 0$, i.e.,
$\pi(q) \in \mathfrak{m}athbb{T}_{\pi(p)}(\Gamma)$. Hence $\mathfrak{m}athbb{T}_p(F_W) =
\pi^{-1}(\mathfrak{m}athbb{T}_{\pi(p)}(\Gamma))$,
which proves part 3.
\end{proof}
\subsection{The cubic polar $P_x(F_W)$}
Firstly we deduce from Propositions \ref{simpl} and \ref{quad} a
criterion for $b \in P_x(F_W)$, with $x \in W^\perp$. Let $H_x$ be
the hyperplane determined by $x \in H^1({\mathfrak{m}athcal O})$. As above we
assume $b \in \hspace{-4mm}/\ W^\perp$ and $\pi(b) \in \hspace{-4mm}/\ \Gamma$, i.e.,
the pencil $V = V_b$ is base-point-free.
\begin{prop} \leftarrowbel{cricub}
We have the following equivalence
$$ b \in P_x(F_W) \qquad \iff \qquad f_1 \wedge f_0 \in H_x.$$
\end{prop}
\begin{proof}
We recall from section 4.1 that $\cup e$ induces a symmetric
isomorphism $\cup e : (V^\perp)^* \mathfrak{m}ap{\mathfrak{m}athrm{Sing} \:m} V^\perp$ and we
denote by $Q^* \subset \mathfrak{m}athbb{P} (V^\perp)^*$ and $Q \subset \mathfrak{m}athbb{P} V^\perp$
the two associated smooth quadrics. Note that $Q$ and $Q^*$ are
dual to each other. Combining Propositions \ref{simpl}, \ref{quad}
and \ref{singver} (1) we see that the restriction of the quartic
$F_W$ to the linear subspace $\mathfrak{m}athbb{P} V^\perp \subset |\omega|^*$
splits into a sum of divisors
$$ \left(F_W \right)_{|\mathfrak{m}athbb{P} V^\perp} = 2 \mathfrak{m}athbb{P} W^\perp + Q.$$
We also observe that $Q$ only depends on $V$ (and on $W$) and not on $b$.
Taking the polar with respect to $x \in W^\perp$, we obtain
$$ \left(P_x(F_W) \right)_{|\mathfrak{m}athbb{P} V^\perp} = 2 \mathfrak{m}athbb{P} W^\perp + P_x(Q).$$
Finally we see that the condition $b \in P_x(Q)$ is equivalent
to $f_0 \wedge f_1 = (\cup e)^{-1} (b) \in H_x$.
\end{proof}
We easily deduce from this criterion some properties of $P_x(F_W)$.
\begin{prop} \leftarrowbel{cubcc}
The cubic $P_x(F_W)$ contains the canonical
curve $C$, i.e., $P_x(F_W) \in |I(3)|$.
\end{prop}
\begin{proof}
We first observe that the two closed conditions of Proposition
\ref{cricub} are equivalent outside $\pi^{-1}(\Gamma)$. Hence they
coincide as well on $\pi^{-1}(\Gamma)$ and we can drop the
assumption $\pi(b) \in \hspace{-4mm}/\ \Gamma$. Now, as in the proof of
Proposition \ref{geomprop}(1), we may choose $f_1 =0$.
\end{proof}
\begin{prop}
We have the following properties
$$ \bigcap_{x \in W^\perp} P_x(F_W) = S_W \cup \mathfrak{m}athbb{P} W^\perp \cup
\bigcup_{n \geq 2} \mathfrak{m}athcal{L}ambda_n,$$
$$ F_W \mathcal{C}p S_W = C \cup \mathfrak{m}athcal{L}ambda_1, \qquad \text{and} \qquad
\bigcup_{n \geq 0} \mathfrak{m}athcal{L}ambda_n \subset F_W,$$
where $S_W$ is an irreducible surface. For $n \geq 0$, we denote by
$\mathfrak{m}athcal{L}ambda_n$ the union of $(n+1)$-secant $\mathfrak{m}athbb{P}^n$'s to the
canonical curve $C$, which intersect the vertex $\mathfrak{m}athbb{P}
W^\perp$ along a $\mathfrak{m}athbb{P}^{n-1}$. If $W$ is general, then $\mathfrak{m}athcal{L}ambda_n =
\emptyset$ for $n \geq 2$ and $\mathfrak{m}athcal{L}ambda_1$ is the union of
$2(g-1)(g-3)$ secant lines.
\end{prop}
\begin{proof}
We consider $b$ in the intersection of all $P_x(F_W)$ and we first
suppose that $\pi(b) \in \hspace{-4mm}/\ \Gamma$. Then by Propositions
\ref{simpl} and \ref{cricub} we have
$$ f_0 \wedge f_1 \in \bigcap_{x \in W^\perp} H_x = W.$$
Hence we obtain that $\mathfrak{m}athbb{P} V^\perp \mathcal{C}p \bigcap_{x \in W^\perp}
P_x(F_W)$ is reduced to the point $(\cup e)(W) \in \mathfrak{m}athbb{P} V^\perp$.
On the other hand a standard computation shows that $S_W$ is the
image of $\mathfrak{m}athbb{P}^2$ under the linear system of the adjoint curves of
$\Gamma$. Hence $S_W$ is irreducible.
If $\pi(b) \in \Gamma$, we denote by $p_1,\ldots,p_{n+1} \in C$
the points such that $\pi(p_i) = \pi(b)$. Then $f_0$ vanishes at
$p_1,\ldots,p_{n+1}$. Since $f_1 \wedge f_0$ does not depend on
the support of $b$, we can choose $\mathrm{supp} \: b$ such that $p_i \in \hspace{-4mm}/\
\mathrm{supp} \: b$. Then $f_1$ is regular at $p_i$ and we deduce that $f_1
\wedge f_0 \in H^0(\omega(-\sum p_i)) \mathcal{C}p W = V_b.$ Now any
rational $f_1$ satisfying $f_1 \wedge f_0 \in V_b = \mathrm{im} \: (\wedge
f_0)$ is regular everywhere, which can only happen when $f_0$
vanishes at the support of $b$. By uniqueness we have $\mathrm{supp} \: b
\subset \{ p_1,\ldots,p_{n+1} \}$ and $b \in \mathfrak{m}athcal{L}ambda_n$. Note that
$\mathfrak{m}athcal{L}ambda_0 = C$. This proves the first equality.
If $b \in F_W \mathcal{C}p S_W$, we have $f_1 \wedge f_0 \in W \mathcal{C}p H_b =
V_b$ and we conclude as above. Note that $\mathfrak{m}athcal{L}ambda_1$ is contained
in $S_W$ and is mapped by $\pi$ to the set of ordinary double
points of $\Gamma$.
\end{proof}
For any $[W] \in \mathrm{Gr}(3,H^0(\omega)) \setminus \mathfrak{m}athcal{D}$ we introduce the subspace
of $I(3)$
$$ L_W = \{ R \in I(3) \ | \ R \ \ \text{is singular along the vertex} \ \
\mathfrak{m}athbb{P} W^\perp \}. $$ Then Propositions \ref{cubcc} and
\ref{singver}(2) imply that $P_x(F_W) \in L_W$. More precisely,
we have
\begin{prop}
The restriction of the polar map of the quartic $F_W$ to its vertex
$\mathfrak{m}athbb{P} W^\perp$
$$ \mathfrak{m}athbf{P}: \ \ W^\perp \longrightarrow L_W, \qquad x \longmapsto P_x(F_W),$$
is an isomorphism.
\end{prop}
\begin{proof}
First we show that $\mathrm{dim} \: L_W = g-3$. We choose a complementary
subspace $A$ to $W^\perp$,i.e. $H^0(\omega)^* = W^\perp \oplus A$,
and a set of coordinates $x_1,\ldots,x_{g-3}$ on $W^\perp$ and
$a_1,a_2,a_3$ on $A$. This enables us to expand a cubic $F \in
S^3 H^0(\omega)$
$$ F=F_3(x) + F_2(x)G_1(a) + F_1(x)G_2(a) + G_3(a), \qquad
F_i \in \mathfrak{m}athbb{C}[x_1,\ldots,x_{g-3}], \ G_i \in \mathfrak{m}athbb{C}[a_1,a_2,a_3],$$
with $\mathrm{deg} \: F_i = \mathrm{deg} \: G_i = i$. Let $\mathfrak{m}athcal{S}_A$ denote the
subspace of cubics singular along $\mathfrak{m}athbb{P} A$,i.e. $G_2 = G_3 = 0$.
We consider the linear map
$$ \alpha : I(3) \longrightarrow \mathfrak{m}athcal{S}_A, \qquad F \longmapsto F_3(x) + F_2(x)
G_1(a).$$
Since by Lemma \ref{quaw} any monomial $x_ix_j \in H^0(\mathfrak{m}athbb{P} W^\perp, {\mathfrak{m}athcal O}(2))$
lifts to a quadric $Q_{ij} \in I(2)$, we observe that the monomials
$x_ix_jx_k$ and $x_ix_ja_l$, which generate $\mathfrak{m}athcal{S}_A$, also
lift e.g. to $Q_{ij}x_k$ and $Q_{ij}a_l$ in $I(3)$. Hence
$\alpha$ is surjective and $\mathrm{dim} \: L_W = \mathrm{dim} \: \mathrm{ker} \: \alpha$ is easily
calculated. One also checks that this computation does not depend on $A$.
In order to conclude, it will be enough to show that $\mathfrak{m}athbf{P}$
is injective. Suppose that the contrary holds,i.e., there exists
a point $x \in W^\perp$ with $P_x(F_W) = 0$. Given any base-point-free
pencil $V\subset W$ and any $b \in V^\perp$, we obtain by Proposition
\ref{cricub} that $f_0 \wedge f_1 \in H_x$. Since $\cup e:
(V^\perp)^* \mathfrak{m}ap{\mathfrak{m}athrm{Sing} \:m} V^\perp$ is an isomorphism, we see that for
$b \in \hspace{-4mm}/\ (\cup e)^{-1}(H_x)$ the element $f_0 \wedge f_1$ must be
zero. This implies that $b \in \mathfrak{m}athcal{L}ambda$ and since $b$ varies in
an open subset of $|\omega|^*$, we obtain $\mathfrak{m}athcal{L}ambda = |\omega|^*$,
a contradiction.
\end{proof}
\subsection{The quadric bundle associated to $F_W$}
Let $\tilde{\mathfrak{m}athbb{P}}^{g-1}_W \rightarrow |\omega|^*$ denote the blowing-up
of $|\omega|^*$ along the vertex $\mathfrak{m}athbb{P} W^\perp \subset |\omega|^*$.
The rational projection $\pi : |\omega|^* \dashrightarrow \mathfrak{m}athbb{P}^2 =
\mathfrak{m}athbb{P} W^*$ resolves into a morphism $\tilde{\pi} :
\tilde{\mathfrak{m}athbb{P}}^{g-1}_W \rightarrow \mathfrak{m}athbb{P}^2$. Since $F_W$ is
singular along $\mathfrak{m}athbb{P} W^\perp$ (Proposition \ref{singver} (2)), the
proper transform $\tilde{F}_W \subset \tilde{\mathfrak{m}athbb{P}}^{g-1}_W$ admits
a structure of a quadric bundle $\tilde{\pi} : \tilde{F}_W \rightarrow
\mathfrak{m}athbb{P}^2$.
The contents of Propositions \ref{geomprop} and \ref{cubcc} can be
reformulated in a more geometrical way.
\begin{thm} \leftarrowbel{mainthm}
For any $[W] \in \mathrm{Gr}(3,H^0(\omega)) \setminus \mathfrak{m}athcal{D}$, the quadric bundle
$\tilde{\pi}: \tilde{F}_W \rightarrow \mathfrak{m}athbb{P}^2$ has the following properties
\begin{enumerate}
\item Its {\em Hessian} curve is $\Gamma \subset \mathfrak{m}athbb{P}^2$.
\item Its {\em Steinerian} curve is the
(proper transform of the) canonical curve $C \subset
|\omega|^*$.
\item The rational {\em Steinerian map} $\mathfrak{m}athrm{St} : \Gamma
\dashrightarrow C$, which associates to a singular quadric
its singular point, coincides with the adjoint map $\mathfrak{m}athrm{ad}$
of the plane curve $\Gamma$. Moreover the closure of the image
$\mathfrak{m}athrm{ad}(\mathfrak{m}athbb{P}^2)$ equals $S_W$.
\end{enumerate}
\end{thm}
\begin{rem}
We note that Theorem \ref{mainthm} is analogous to the main
result of \cite{ks} (replace $\mathfrak{m}athbb{P}^2$ with $\mathfrak{m}athbb{P}^1 \times \mathfrak{m}athbb{P}^1$).
In spite of this striking similarity and the relation between
the two parameter spaces $\mathfrak{m}athrm{Sing} \:ng$ and $\mathrm{Gr}(3,H^0(\omega))$
(see \cite{pp}), we were unable to find a common frame for both
constructions.
\end{rem}
\section{The cubic hypersurface $\Psi_V \subset \mathfrak{m}athbb{P}^{g-3}$ associated to a
base-point-free pencil $\mathfrak{m}athbb{P} V \subset |\omega|$}
In this section we show that the symmetric cup-product maps
$\cup e \in \mathrm{Sym}^2 H^0(\omega)^*$ (see \eqref{extcl}) arise as
polar quadrics of a cubic hypersurface $\Psi_V$, which will
be used in the proof of Theorem \ref{mainthm2}.
Let $V$ denote a base-point-free pencil of $H^0(\omega)$. We consider the
exact sequence given by evaluation of sections of $V$
\begin{equation} \leftarrowbel{esv}
0 \longrightarrow \omega^{-1} \longrightarrow {\mathfrak{m}athcal O}_C \otimes V \mathfrak{m}ap{ev} \omega \longrightarrow 0.
\end{equation}
Its extension class $v \in \mathfrak{m}athrm{Ext}^1(\omega,\omega^{-1}) \cong
H^1(\omega^{-2}) \cong H^0(\omega^3)^*$ corresponds to the
hyperplane in $H^0(\omega^3)$, which is the image of the
multiplication map
\begin{equation} \leftarrowbel{hyp}
\mathrm{im} \: \left(V \otimes H^0(\omega^2) \longrightarrow H^0(\omega^3) \right).
\end{equation}
We consider the cubic form $\Psi_V$ defined by
$$ \Psi_V : \mathrm{Sym}^3 H^0(\omega) \mathfrak{m}ap{\mathfrak{m}u} H^0(\omega^3) \mathfrak{m}ap{\bar{v}} \mathfrak{m}athbb{C},$$
where $\mathfrak{m}u$ is the multiplication map and $\bar{v}$ the linear form
defined by the extension class $v$. It follows from the description
\eqref{hyp} that $\Psi_V$ factorizes through the quotient
$$ \Psi_V : \mathrm{Sym}^3 \mathfrak{m}athcal{V} \longrightarrow \mathfrak{m}athbb{C},$$
where $\mathfrak{m}athcal{V} := H^0(\omega)/V$. We also denote by $\Psi_V \subset
\mathfrak{m}athbb{P} \mathfrak{m}athcal{V}$ its associated cubic hypersurface.
A $3$-plane $W \supset V$ determines a nonzero vector $w$ in
the quotient $\mathfrak{m}athcal{V} = H^0(\omega)/V$ and a general $w$ determines
an extension \eqref{extes} --- recall that $W^* \cong H^0(E)$. Hence
we obtain an injective linear map $\mathfrak{m}athcal{V} \hookrightarrow H^1(\omega^{-1}),
w \mathfrak{m}apsto e$, which we compose with \eqref{mapext}
$$\Phi: \mathfrak{m}athcal{V} \hookrightarrow H^1(\omega^{-1}) = H^0(\omega^2)^*
\hookrightarrow \mathrm{Sym}^2 H^0(\omega)^*, \qquad w \mathfrak{m}apsto e \mathfrak{m}apsto \cup e.$$
Since $V \subset \mathrm{ker} \: (\cup e)$, we note that $\mathrm{im} \: \Phi \subset \mathrm{Sym}^2
\mathfrak{m}athcal{V}^*$.
We now can state the main result of this section.
\begin{prop}
The linear map $\Phi: \mathfrak{m}athcal{V} \rightarrow \mathrm{Sym}^2 \mathfrak{m}athcal{V}^*$ coincides with the polar map
of the cubic form $\Psi_V$, i.e.,
$$ \forall w \in \mathfrak{m}athcal{V}, \qquad \Phi(w) = P_w(\Psi_V).$$
\end{prop}
\begin{proof}
This is straightforwardly read from the diagram
obtained by relating the exact sequences \eqref{esv} and \eqref{esw}
via the inclusion $V \subset W$. We leave the details to the reader.
\end{proof}
We also observe that, by definition of the Hessian hypersurface (see
e.g. \cite{dk} section 3), we have an equality among degree
$g-2$ hypersurfaces of $\mathfrak{m}athbb{P} \mathfrak{m}athcal{V} = \mathfrak{m}athbb{P}^{g-3}$
\begin{equation} \leftarrowbel{hesspsi}
\mathrm{Hess}(\Psi_V) = \mathfrak{m}athcal{D} \mathcal{C}p \mathfrak{m}athbb{P} \mathfrak{m}athcal{V},
\end{equation}
where we use the inclusion $\mathfrak{m}athbb{P} \mathfrak{m}athcal{V} \subset \mathrm{Gr}(3,H^0(\omega))$.
\begin{rem}
We recall (see \cite{dk} (5.2.1)) that the Hessian and Steinerian of a cubic
hypersurface coincide and that the Steinerian map is a rational involution
$i$. In the case of the cubic $\Psi_V$, the involution
$$ i: \mathrm{Hess}(\Psi_V) \dashrightarrow \mathrm{Hess}(\Psi_V)$$
corresponds to the involution of \cite{bv} Propositions 1.18 and
1.19, i.e., $\forall w \in
\mathfrak{m}athcal{D} \mathcal{C}p \mathfrak{m}athbb{P} \mathfrak{m}athcal{V}$, the bundles $E_w$ and $E_{i(w)}$ are related
by the exact sequence
$$ 0 \longrightarrow E_{i(w)}^* \longrightarrow {\mathfrak{m}athcal O}_C \otimes H^0(E_w) \mathfrak{m}ap{ev} E_w \longrightarrow 0.$$
Since we will not use that result, we leave its proof to the reader.
\end{rem}
\section{Base loci of $|F_3|$ and $|F_4|$}
Let us denote by $|F_3| \subset |I(3)|$ and $|F_4| \subset |I(4)|$
the linear subsystems spanned by the image of the rational maps
$\mathfrak{m}athbf{F}_3$ and $\mathfrak{m}athbf{F}_4$ respectively. Then we have the following
\begin{thm} \leftarrowbel{mainthm2}
The base loci of $|F_3|$ and $|F_4|$ coincide with the canonical curve
$C \subset |\omega|^*$.
\end{thm}
\begin{proof}
Let $b \in \mathcal{B}s |F_3|$ and let us suppose that $b \in \hspace{-4mm}/\ C$. We consider
a base-point-free pencil $V \subset H_b$. With the notation of
section 5, we introduce the rational map
$$ r_b : \mathfrak{m}athbb{P} \mathfrak{m}athcal{V} \dashrightarrow \mathfrak{m}athbb{P} \mathfrak{m}athcal{V}, \qquad w \mathfrak{m}apsto
r_b(w) = w', \qquad \text{with} \ \tilde{\Psi}_V (w,w', \cdot) = b,$$
where $\tilde{\Psi}_V$ is the symmetric trilinear form of $\Psi_V$.
We note (Proposition \ref{quad}) that, for $w \in \hspace{-4mm}/\ \mathfrak{m}athbb{P}(H_b/V)$, the
element $r_b(w)$ is collinear with the nonzero
element $f_0 \wedge f_1 \ \mathfrak{m}od \ V$ and that $r_b$ is defined
away from the hypersurface $\mathrm{Hess}(\Psi_V)$, which we assume to be
nonzero. Since $b \in \mathcal{B}s|F_3|$ we obtain by Proposition \ref{cricub}
that
$$r_b(w) = \left( \bigcap_{x \in W^{\perp}} H_x \right) \ \text{mod}
\ V = W \ \text{mod} \ V = w.$$
Hence $r_b$ is the identity map (away from $\mathrm{Hess}(\Psi_V)$).
This implies that $\tilde{\Psi}_V(w,w,\cdot) = b$
for any $w \in \mathfrak{m}athbb{P} \mathfrak{m}athcal{V}$, hence $\Psi_V = x_0^3$, where $x_0$ is the
equation of the hyperplane $\mathfrak{m}athbb{P}(H_b/V) \subset \mathfrak{m}athbb{P} \mathfrak{m}athcal{V}$. This in
turn implies that $\mathrm{Hess}(\Psi_V) = 0$, i.e., $\mathfrak{m}athbb{P} \mathfrak{m}athcal{V} \subset \mathfrak{m}athcal{D}$.
Since for a general $[W] \in \mathrm{Gr}(3,H^0(\omega))$ the pencil $V = W
\mathcal{C}p H_b$ is base-point-free, we obtain that a general $[W]$ lies on the
divisor $\mathfrak{m}athcal{D}$, which is a contradiction.
As for $|F_4|$, we recall that the fact $\mathcal{B}s |F_4| = C$ follows from
\cite{welt}. Alternatively, it can also be deduced by noticing
(see Proposition \ref{f4sq}) that $\mathcal{B}s |F_4| \subset \mathcal{B}s |I(2)|$. Hence, if
$C$ is not trigonal nor a plane quintic, we are done. In the other
cases, the result can be deduced from Proposition \ref{geomprop} ---
we leave the details to the reader.
\end{proof}
\section{Open questions}
\subsection{Dimensions}
The projective dimensions of the linear systems $|F_3|$ and $|F_4|$ are
not known for general $g$. The known values of $\mathrm{dim} \: |F_4|$ for a general curve
$C$ are given as follows (see \cite{pp}).
$$
\begin{array}{|c|c|c|c|c|}
\hline
g & 4 & 5 & 6 & 7 \\
\hline
\mathrm{dim} \: |F_4| & 4 & 15 & 40 & 88 \\
\hline
\end{array}
$$
The examples of \cite{pp} section 6 show that $\mathrm{dim} \: |F_4|$
depends on the gonality of $C$. Moreover it can be shown that
$|F_4| \not= |I(4)|$.
\subsection{Prym-canonical spaces and symplectic bundles}
The construction of the quartic hypersurfaces $F_W$ admit various
analogues and generalizations, which we briefly outline.
{\bf{(1)}}
Let $P_\alpha := \mathfrak{m}athrm{Prym}(C_\alpha/C)$ denote the Prym
variety of the \'etale double cover $C_\alpha \rightarrow C$ associated
to the nonzero $2$-torsion
point $\alpha \in JC$. Given a general $3$-plane $Z \subset
H^0(C, \omega \alpha)$, we associate the rank-$2$ vector bundle
$E_Z$ defined by
$$ 0 \longrightarrow E_Z^* \longrightarrow {\mathfrak{m}athcal O}_C \otimes Z \mathfrak{m}ap{ev} \omega \alpha \longrightarrow 0.$$
By \cite{ip} Proposition 4.1 we can associate to $E_Z$ the
divisor $\mathfrak{m}athcal{D}elta(E_Z) \in |2\Xi|$, where $\Xi$ is a symmetric
principal polarization on $P_\alpha$. Its projectivized tangent
cone at the origin $0 \in P_\alpha$ is a quartic hypersurface
$F_Z$ in the Prym-canonical space $\mathfrak{m}athbb{P} T_0P_\alpha \cong |\omega
\alpha|^*$. Kempf's obstruction theory equally applies to the
quartics $F_Z$. We note that $F_Z$ contains the Prym-canonical
curve $i_{\omega \alpha}(C) \subset |\omega \alpha|^*$.
{\bf{(2)}} Let $W$ be a vector space of dimension $2n+1$, for $n \geq 1$.
We consider a {\em general} linear map
$$ \Phi : \mathfrak{m}athcal{L}ambda^2 W^* \longrightarrow H^0(C,\omega).$$
By taking the $n$-th symmetric power $\mathrm{Sym}^n \Phi$ and using the
canonical maps $\mathrm{Sym}^n (\mathfrak{m}athcal{L}ambda^2 W^*) \rightarrow \mathfrak{m}athcal{L}ambda^{2n} W^* \cong W$ and
$\mathrm{Sym}^n H^0(\omega) \rightarrow H^0(\omega^{\otimes n})$, we obtain a
linear map
$$ \alpha: W \longrightarrow H^0(\omega^{\otimes n}),$$
which we assume to be injective. We then define the rank $2n$ vector
bundle $E_\Phi$ by
$$ 0 \longrightarrow E^*_\Phi \longrightarrow {\mathfrak{m}athcal O}_C \otimes W \mathfrak{m}ap{ev} \omega^{\otimes n} \longrightarrow 0.$$
The bundle $E_\Phi$ carries an $\omega$-valued symplectic form and
the projectivized tangent cone at ${\mathfrak{m}athcal O} \in JC$
to the divisor $D(E_\Phi)$ is a hypersurface $F_\Phi$ in $|\omega|^*$
of degree $2n+2$. Moreover $F_\Phi \in |I(2n+2)|$.
\flushleft{Christian Pauly \\
Laboratoire J.-A. Dieudonn\'e \\
Universit\'e de Nice-Sophia-Antipolis \\
Parc Valrose \\
06108 Nice Cedex 2 \\
France \\
e-mail: [email protected]}
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Polarization-orbital angular momentum duality assisted entanglement observation for indistinguishable photons}
\author{Nijil Lal}
\email{[email protected]}
\affiliation{Physical Research Laboratory, Ahmedabad 380009, India}
\author{Sarika Mishra}
\affiliation{Physical Research Laboratory, Ahmedabad 380009, India}
\affiliation{Indian Institute of Technology, Gandhinagar 382355, India}
\author{Anju Rani}
\affiliation{Physical Research Laboratory, Ahmedabad 380009, India}
\affiliation{Indian Institute of Technology, Gandhinagar 382355, India}
\author{Anindya Banerji}
\affiliation{Physical Research Laboratory, Ahmedabad 380009, India}
\author{ Chithrabhanu Perumangattu}
\affiliation{Centre for Quantum Technologies, National University of Singapore, 3 Science Drive 2, S117543, Singapore}
\author{R. P. Singh}
\affiliation{Physical Research Laboratory, Ahmedabad 380009, India}
\date{\today}
\begin{abstract}
Duality in the entanglement of identical particles manifests that entanglement in only one variable can be revealed at a time. We demonstrate this using polarization and orbital angular momentum (OAM) variables of indistinguishable photons generated from parametric down conversion. We show polarization entanglement by sorting photons in even and odd OAM basis, while sorting them in two orthogonal polarization modes reveals the OAM entanglement. The duality assisted observation of entanglement can be used as a verification for the preservation of quantum indistinguishability over communication channels. Indistinguishable photons entangled in complementary variables could also evoke interest in distributed quantum sensing protocols and remote entanglement generation.
\end{abstract}
\maketitle
\section{\label{sec:level1}Introduction}
\maketitle
Complementarity is a unique manifestation of quantum mechanics, like entanglement. Introduced as a concept by Niels Bohr \cite{bohrcompli} and developed through further rigorous scientific discussion, complementarity broadly states that objects possess mutually exclusive properties such that the full knowledge of one property precludes full knowledge of the conjugate one. The wave-particle duality \cite{WZineq} and Heisenberg's uncertainty principle \cite{Kraus} are closely associated with the complementarity principle. \textit{Welcher Weg} experiments demonstrate the complementarity between distinguishability and interference visibility implying that quantum interference will take place only if the measurement does not distinguish between the interfering pathways \cite{greenberger88,qeraser,pittman,whichwayAKpati}. In other words, indistinguishability leads to quantum interference. It is interesting to note that interference is related to coherence, which is a wave property, while distinguishability is associated with localized variables, which is a particle-like property \cite{Mandel91,QDuality}. Indistinguishability of photons evoke great interest in quantum information protocols \cite{indis_1,indis_2}. In fact, many studies have aimed towards reducing the distinguishability in entangled systems \cite{branning,torresindist,killoran2014} to achieve higher visibility of quantum interference.
Indistinguishability requires perfect overlap in spatio-temporal position, energy, polarization etc. which can be demonstrated through two-photon interference experiments. Indistinguishable photons could be generated in the output of a Hong-Ou-Mandel interferometer \cite{hom1987}. A source of highly indistinguishable and entangled photon pairs is crucial to various quantum information applications \cite{sc_indist1,sc_indist2, lo_indist3}. However, it is not possible to observe the entanglement between such indistinguishable photons unless we sort and separate them in terms of a physical variable, such as their position, momentum, polarization, orbital angular momentum (OAM) etc. For a general case of degenerate, non-collinear type-II spontaneous parametric down-conversion (SPDC) output, we can write the identical photons in terms of their different degrees of freedom as,
\begin{equation}
\left|\Psi\right\rangle=\frac{1}{\sqrt{2}}\left(\left|H, \mathbf{k}_{i}\right\rangle\left|V, \mathbf{k}_{s}\right\rangle+\left|V, \mathbf{k}_{i}\right\rangle\left|H, \mathbf{k}_{s}\right\rangle\right)
\end{equation}
where the state is written in terms of polarization and linear momentum. This can also be written as a polarization entangled state,
\begin{equation}
\left|\Psi\right\rangle=\frac{1}{\sqrt{2}}\left(\left|H\right\rangle_{k_{i}}\left|V\right\rangle_{k_{s}}+\left|V\right\rangle_{k_{i}}\left|H\right\rangle_{k_{s}}\right)
\end{equation}
using their linear momentum as a label to differentiate the subsystems. It is also possible to express the same state as,
\begin{equation}
\left|\Psi\right\rangle=\frac{1}{\sqrt{2}}\left(\left|k_{i}\right\rangle_{H}\left|k_{s}\right\rangle_{V}+\left|k_{s}\right\rangle_{H}\left|k_{i}\right\rangle_{V}\right).
\end{equation}
which is entangled in linear momentum and the individual subsystems are labelled by their polarization. This complimentary behaviour of two independent degrees of freedom of identical particles is called duality in entanglement \cite{bose2013duality,bhattigsa,moreva2015bell}. To demonstrate this, we use another degree of freedom, namely orbital angular momentum. Considering a type-II SPDC process, the generated photon pairs are independently entangled in polarization and OAM, following the birefringence properties of the crystal and OAM conservation ($l_{p} = l_{1} + l_{2}$) \cite{mair2001,ali_NJP,nijil_JMO}. The corresponding state can be expressed as,
\begin{equation}
\begin{aligned}
|\Psi\rangle= &\frac{1}{2}( |H, l_{1}, \boldsymbol{k}_{i}\rangle|V, l_{2}, \boldsymbol{k}_{s}\rangle+|H, l_{2}, \boldsymbol{k}_{i}\rangle|V, l_{1}, \boldsymbol{k}_{s}\rangle \\
& + |V, l_{1}, \boldsymbol{k}_{i}\rangle|H, l_{2}, \boldsymbol{k}_{s}\rangle+|V, l_{2}, \boldsymbol{k}_{i}\rangle|H, l_{1}, \boldsymbol{k}_{s}\rangle ).
\end{aligned}
\label{eq:klabel}
\end{equation}
For a collinear output, the linear momentum labelling given in Eq. (\ref{eq:klabel}) becomes unavailable since $k_i = k_s = k$ and it becomes impossible to observe the entanglement. However, one can write,
\begin{equation}
\begin{aligned}
|\Psi\rangle &=\frac{1}{\sqrt{2}}\left(|H\rangle_{l_{1}}|V\rangle_{l_{2}}+|V\rangle_{l_{1}}|H\rangle_{l_{2}}\right) \otimes|\boldsymbol{k}\rangle \\
&\equiv \frac{1}{\sqrt{2}}\left(|l_{1}\rangle_{H} |l_{2}\rangle_{V}+|l_{2}\rangle_{H} |l_{1}\rangle_{V}\right) \otimes|\boldsymbol{k}\rangle .
\end{aligned}
\end{equation}
In most protocols involving the entanglement of orbital angular momentum of photons, the infinite dimensional OAM spectrum in the output of SPDC is restricted to a two-dimensional basis by the post-selection of the twin-photons. Due to this post-selection, a large amount of generated photons which belong to the other states in the infinite dimensional OAM basis are lost. A method to avoid this loss is to use an alternate basis defined by the even and odd states of OAM \cite{cbhanuevenodd, cbhanuhyper}.
In this work, we propose that the OAM of twisted photons defined in their even-odd basis can be used to separate the otherwise completely indistinguishable photons in the collinear output. For a pump beam carrying an odd OAM value, the SPDC photons will be generated in pairs of even and odd OAM states, following the conservation of OAM. For pump OAM, $l_p = 1$, in a collinear type-II SPDC process where the idler-signal pairs are generated in orthogonal polarization states, the output OAM state can be written as,
\begin{equation}
\begin{aligned}
|\Psi\rangle_{SPDC} &= \sum_{m=-\infty}^{+\infty} c_{m,1-m}|m\rangle_{H}|1-m\rangle_{V}\\
&= c_{0,1}|0\rangle_{H}|1\rangle_{V}+c_{1,0}|1\rangle_{H}|0\rangle_{V}\\ &\hspace{0.3cm} + c_{2,-1}|2\rangle_{H}|-1\rangle_{V}+c_{-1,2}|-1\rangle_{H}|2\rangle_{V} + ...\\
&= \frac{1}{\sqrt{2}}(|E\rangle_{H}|O\rangle_{V}+|O\rangle_{H}|E\rangle_{V}).
\label{eq:evenoddHV}
\end{aligned}
\end{equation}
Here, the twin photons are indistinguishable in every other degree of freedom, including their spatial position, except for their polarization and OAM. However, it is not possible to make individual measurements on these photons unless we separate them under some label. We use their polarization state to label these individual photons and observe the entanglement in the even-odd basis of the OAM. In the same way, the state in Eq. (\ref{eq:evenoddHV}) can be written by labelling them as even and odd OAM states,
\begin{equation}
|\Psi\rangle_{SPDC} = \frac{1}{\sqrt{2}}(|H\rangle_{E}|V\rangle_{O}+|V\rangle_{E}|H\rangle_{O}).
\end{equation}
Hence, depending upon whether the polarization or OAM has been used for the labelling, we can observe the entanglement in the other degree of freedom. It can be seen that the distinguishability of the associated particles reveal the entanglement. Experimentally, this means that the method that we use to distinguish signal and idler photons dictates the degree of freedom in which the photons are entangled.
Sorting of photons based on the polarization can be achieved using a simple polarizing beam splitter which separates H and V, revealing the entanglement in even-odd OAM states. To observe the polarization entanglement, we use an even-odd OAM sorter. The conventional and commonly used method for selecting and measuring the OAM component of photons is the phase-flattening technique. The spiral phase corresponding to a desired OAM state is flattened using spiral phase plates or holograms displayed on spatial light modulators. The resulting fundamental Gaussian mode is then coupled to a single mode fiber for measurement. However, such projective measurements based on phase-flattening have shortcomings in terms of efficiency and dependence on pump characteristics \cite{pflimitns}. Moreover phase flattening projects the photon into one of the OAM states and hence cannot be used in even-odd sorting. Using the two-dimensional even-odd basis for the twin-photon OAM states, the efficiency of entanglement protocols could be increased.
Figure \ref{fig:sorter_MZ_a} illustrates a basic set up for an even-odd OAM sorter that involves a Mach-Zehnder interferometer with a Dove prism in each arms \cite{leachint,sorterCNOT}. A dove prism flips the OAM from $+l$ to $-l$ during the internal reflection. When two Dove prisms are kept in the two arms of an interferometer, it introduces an OAM dependent relative phase $2l\alpha$ where $\alpha$ is the relative rotation of the Dove prisms. The two Dove prisms are oriented perpendicular to one another and hence $\alpha = \pi/2$. This introduces a phase $l\pi$ between the two arms of the interferoemeter.
\begin{figure}
\caption{Interferometric sorter for even and odd OAM states of light using Dove prism in (a) Mach-Zehnder arrangement and (b) Folded Mach-Zehnder arrangement. In both figures, the orientation of the two Dove prisms are perpendicular to each other.}
\label{fig:sorter_MZ_a}
\label{fig:sorter_MZ_b}
\label{fig:sorter_MZ}
\end{figure}
The relative phase difference would turn out to be odd multiples of $\pi$ for all odd OAM orders and even multiples of $\pi$ for even OAM orders. As a result, the constructive interference will take place in different output ports for even and odd OAM values. To overcome the stability concerns, the Mach-Zehnder arrangement in Fig. \ref{fig:sorter_MZ_a} could be reconfigured as a more robust polarizing Sagnac interferometer \cite{cbhanu_sagnacsorter,slussarenkosorter}. However, a polarizing interferometer destroys the indistinguishability in polarization. In this experiment, we adopt a folded Mach-Zehnder arrangement [Fig. \ref{fig:sorter_MZ_b}] to set up a robust interferometer without affecting the indistinguishability as well as the duality.
\section{Methodology}
We take a periodically-poled type-II KTP (ppKTP) crystal to observe the duality in entanglement of polarization and OAM of twin photons generated in SPDC. In type-II SPDC, the idler and signal photons will have perpendicular polarizations along ordinary and extra-ordinary axes of the crystal. The output polarization state can be written as,
\begin{equation}
\Psi = \frac{1}{\sqrt{2}} \ket{H}_i \ket{V}_s \pm \ket{V}_i \ket{H}_s .
\end{equation}
Before going for the collinear indistinguishable photons, we consider the non-collinear generation of photon-pairs where the two photons are emitted along directions following the phase-matching condition,
\begin{equation*}
{\bold{k}_i} + {\bold{k}_s} \approx {\bold{k}_p}
\end{equation*}
where $\bold{k}$ is the linear momentum vector. The down-converted output forms a cone of correlated signal-idler photon pairs following the non-collinear phase matching condition.
\begin{figure}
\caption{Experimental setup to observe the entanglement in a non-collinear type-II SPDC from a ppKTP crystal ($\chi^{(2)}
\label{fig:noncoll_setup}
\end{figure}
The experimental schematic is given in Fig. \ref{fig:noncoll_setup}. We use a Toptica TopMode (405 nm) laser as the pump which is loosely focused within the crystal using the lens, $\mathrm{L_1}$ (f=50 cm) such that the paraxial approximation for OAM conservation is valid. A band-pass filter (BF, 810 $\pm$ 5 nm) blocks the residual pump while transmitting the down-converted output.
\begin{figure}
\caption{The collimated SPDC output images obtained using an EMCCD kept at different distances from the crystal plane. A 10 cm lens placed after the SPDC output collimates the diverging cone of photon pairs.}
\label{fig:collimNC}
\end{figure}
With this spectral filtering, we consider our source to be giving degenerate photon pairs in the setups discussed in this work. The diverging cone of SPDC photons is collimated using lens, $\mathrm{L_2}$ (f=10 cm), for the overall length of the experiment [Fig. \ref{fig:collimNC}]. The low intensity photon distributions are imaged using an EMCCD (Andor iXon3). Signal and idler photons are then coupled to single mode fibers (SMF) through fiber coupling systems, FC (Thorlabs CFC-5X-B), consisting of a fiber launcher and an aspherical lens ($\mathrm{L_{FC}}$, f = 4.6 mm). These photons are detected at single photon counting modules, SPCM (Excelitas AQRH-16-FC), whose output is given to a coincidence counter, CC (ID Quantique ID800 TDC), to obtain the coincidence counts. To observe maximum entanglement, maximum overlap between the two polarization modes has to be ensured.
\begin{figure}
\caption{The crystal position and tilt are adjusted such that the SPDC output corresponding to both the polarizations are spatially overlapping. Diametrically opposite regions (in red circles) corresponds to the entangled photons.}
\label{fig:OverlapHV}
\end{figure}
The crystal position and tilt for pump incidence are adjusted such that good spatial overlap between the H-polarized cone and V-polarized cone is achieved [Fig. \ref{fig:OverlapHV}]. The correlated photons will be falling along diametrically opposite points following the phase matching conditions. Two regions, marked in red circles, are selected and coupled to the detector system.
The polarization projections are done using the combination of half wave plate (HWP) and a polarizer (P) kept in each arm of the SPDC output [Fig. \ref{fig:noncoll_setup}].
\begin{figure}
\caption{Polarization correlations corresponding to the spatially separated photons in the non-collinear down conversion pair. Error bars indicate statistical uncertainty of one standard deviation.}
\label{fig:vis_noncoll}
\end{figure}
The half wave plate, $\mathrm{HWP_1}$, in the idler arm is kept at fixed angles of rotation, $\theta_1$ = 0$\degree$, 45$\degree$, 90$\degree$ and 135$\degree$ and the second half wave plate kept in the signal arm ($\mathrm{HWP_2}$) is rotated as $\theta_2$ to observe the polarization correlation. The experimentally observed polarization correlations, corresponding to the projections in HV basis as well as DA basis, are given as the visibility curves in Fig. \ref{fig:vis_noncoll}. The normalized coincidences are plotted along y-axis with the variation of $\theta_2$ for (blue) $\theta_1$ = 0$\degree$, (red) $\theta_1$ = 45$\degree$, (green) $\theta_1$ = 90$\degree$ and (purple) $\theta_1$ = 135$\degree$ where $\theta_1$ and $\theta_2$ correspond to the rotation of $\mathrm{HWP_1}$ and $\mathrm{HWP_2}$ respectively in Fig. \ref{fig:noncoll_setup}. The observed visibilities in two bases are 96.4$\pm$0.2\% (HV basis) and 94.1$\pm$0.3\% (DA basis) respectively. The Bell parameter is estimated to be, S = 2.69 $\pm$ 0.03.
The non-collinear down-converted photon pairs discussed above are labelled by their spatial positions, and hence distinguishable. A collinear output can be assumed to give indistinguishable photon pairs since they are not separated in spatial modes.
\begin{figure}
\caption{Variation of the spatial distribution of the SPDC output from a type-II ppKTP crystal with different temperature values for quasi-phase matching. We observe that collinear output is obtained for a temperature, 40 $\degree$C.}
\label{fig:tempPM}
\end{figure}
For a periodically poled crystal, the phase matching will be governed by the temperature of the crystal \cite{ppKTP_1,ppKTP_2} and hence the crystal is mounted on a temperature controlling oven. By varying the temperature, one can achieve collinear phase matching condition as shown in Fig. \ref{fig:tempPM}. For our crystal, the collinear output is obtained at 40 $\degree$C.
\subsection{Observation of polarization entanglement through OAM sorting}
We use a double Mach-Zehnder OAM sorting interferometer to separate the photons on the basis of their OAM and reveal the entanglement in polarization. A double Mach-Zehnder type interferometer could be understood as a normal Mach-Zehnder interferometer, folded back such that the input and output beam splitters become the same. Such a configuration will have the stability of a common path interferometer, since both the arms see same optical components, and the ease of inserting independent components in the interfering arms as in a Mach-Zehnder interferometer.
The collinear correlated pairs of photons having even and odd OAM orders are sent to a double Mach-Zehnder interferometer containing two Dove prisms which are kept in the individual paths as given in Fig. \ref{fig:OAMsortPOLent}. The Dove prisms are kept such that their relative orientation is perpendicular to each other. The individual photons undergo single photon interference within the interferometric sorter. Photons carrying an odd OAM will constructively interfere in the odd port (O) whereas photons having even OAM will show up in the even port (E). The polarization projections are done using the combination of a half wave plate and a polarizer kept in each output port.
\begin{figure}
\caption{Schematic to sort the even-odd states of OAM from a collinear SPDC with pump carrying OAM ($l_{p}
\label{fig:OAMsortPOLent}
\end{figure}
At first, an alignment laser beam (810 nm, Thorlabs) is used to verify the sorting of even and odd OAM modes. The collinear output is then sent along the same path. Coincidences are maximized in the detectors kept in ports E and O, and polarization projection measurements are carried out to observe the entanglement visibility.
\subsection{Observation of OAM entanglement through polarization sorting}
The entanglement in the even-odd basis of orbital angular momentum can be observed by separating the indistinguishable photons with their polarization as a label.
\begin{figure}
\caption{Schematic to observe entanglement in the even-odd basis of OAM by sorting photons in polarization. Indistinguishable photons in the type-II down-converted pairs are sorted in polarization using a polarizing beam splitter (PBS).}
\label{fig:POLsortOAMent}
\end{figure}
A polarizing beam splitter (PBS) separates the H and V polarized photons in the transmitted and reflected ports respectively as given in Fig. \ref{fig:POLsortOAMent}. OAM projections are done using a spatial light modulator (SLM) through phase-flattening and coupling to single mode fibers. Additional lenses are used to image the crystal plane to the SLM, as well as the SLM plane to the coupling fibre tip for effective coupling. The combination of two lenses, $\mathrm{L_2}$ and $\mathrm{L_3}$ in each arm, image the modes generated in the crystal plane onto the SLM planes. The lens after the SLM, $\mathrm{L_4}$ along with the aspheric lens within the fiber coupler, $\mathrm{L_{FC}}$, image the modes generated after phase-flattening at the SLM onto the fiber tip. The lenses are chosen such that the spatial mode sizes match the mode field diameter of the fiber coupling system. OAM projections in the even-odd basis are carried out with the help of identical spatial light modulators kept in the two arms. The holograms corresponding to superpositions of even orders and odd orders act as the counterparts of D and A projections in the HV basis.
\section{Results and Discussion}
While passing through the OAM sorting setup given in Fig. \ref{fig:OAMsortPOLent}, the down converted photons alternately choose between the even and odd ports depending upon their OAM value. Before making polarization entanglement measurements on these photons, the action of even-odd sorting within our setup needs to be verified.
\begin{figure}
\caption{Verification of sorting of even and odd OAM states in the folded Mach-Zehnder sorter. The top two rows correspond to the outputs in the even and odd ports when pumped with a Gaussian ($l_p$ =0) and the bottom two rows correspond to that for a pump carrying $l_p$ =1 OAM.}
\label{fig:sorted}
\end{figure}
The OAM state of the photons in the output ports of the sorter are measured using the standard technique involving phase-flattening through SLM and coupling to single mode fibers. The singles in each port corresponding to different OAM values are represented as a grayscale chart in Fig. \ref{fig:sorted}. The values are normalized with respect to the number photons in the Gaussian mode when pumped with a Gaussian beam, being the largest among all. When pumped with a Gaussian beam, photons are down converted in pairs of odd-odd or even-even pairs, following the conservation of OAM, and thus the photon pairs end up in the same port. It can be easily seen from the chart that photons carrying even and odd OAM values line up in the corresponding ports and their intensities are defined by the OAM spectrum of the SPDC output. For a pump carrying OAM, $l_p = 1$, the pairs are generated in even-odd pairs and they go to different ports. This is evident from how the corresponding intensity values are distributed between the two ports. For example, 0 in even port and 1 in odd port show similar intensity since they are generated together and so on. Moreover, the stark complementary behaviour in the intensity corresponding to even and odd ports for a partiular OAM value shows the effective sorting in the setup.
Figure \ref{fig:polviscoll} shows the polarization correlations between the even and odd output ports of the sorter [Fig. \ref{fig:OAMsortPOLent}]. The indistinguishable photons are efficiently sorted under the label of their orbital angular momentum and polarization correlations are observed in both HV as well as DA basis.
\begin{figure}
\caption{Polarization correlations corresponding to projections in the output ports of the even-odd sorted collinear SPDC output. Error bars indicate statistical uncertainty of one standard deviation.}
\label{fig:polviscoll}
\end{figure}
The normalized coincidences are plotted along y-axis with the variation of $\theta_2$ for (blue) $\theta_1$ = 0$\degree$, (red) $\theta_1$ = 45$\degree$, (green) $\theta_1$ = 90$\degree$ and (purple) $\theta_1$ = 135$\degree$. The observed visibilities are 77.5$\pm$0.3\% (HV basis) and 71.6$\pm$0.3\% (DA basis).
The Bell parameter is estimated to be, S = 2.11 $\pm$ 0.03. It can be seen in the plot that the minima corresponding to different visibility profiles are not going completely to zero. This is due to the possible leakage of even OAM modes into the odd port (and vice versa). The reduced visibility can be understood as a manifestation of the imperfections in the sorting interferometer. Hence, with an improved interferometric sorter, it is possible to obtain near unity visibility.
The OAM correlations between the H and V output ports of the polarizing beam splitter [Fig. \ref{fig:POLsortOAMent}] in the even-odd basis of OAM is given in Fig. \ref{fig:oamviscoll}. The indistinguishable photons are efficiently sorted under the label of their polarization and OAM visibility is observed in both EO as well as $\mathrm{D_{EO}A_{EO}}$ basis. The normalized coincidences are plotted along y-axis with the variation of $\theta_2$ for (blue) $\theta_1$ = 0$\degree$, (red) $\theta_1$ = 45$\degree$, (green) $\theta_1$ = 90$\degree$ and (purple) $\theta_1$ = 135$\degree$.
\begin{figure}
\caption{OAM correlations corresponding to two orthogonal polarization projections in the collinear SPDC output. Error bars indicate statistical uncertainty of one standard deviation.}
\label{fig:oamviscoll}
\end{figure}
A calculation of visibility gives 92.7$\pm$0.3\% (EO basis) and 80.9$\pm$0.3\% ($\mathrm{D_{EO}A_{EO}}$ basis). The Bell parameter is estimated to be, S = 2.46 $\pm$ 0.08.
While one is required to alight multiple interferometers along with sorters to undertake a general set of projective measurements in the true linear and diagonal even-odd basis \cite{cbhanuevenodd}, we have carried out the measurements through OAM projections using SLM. The use of SLM however introduces efficiency constraints and limitations in exploting all the available OAM modes. Since our aim is to demonstrate the duality of entanglement in our setup, we have made the measurements in a basis defined by $l \in \{1,2\}$ where 1 and 2 represents the odd and even OAM states respectively. However, for practical applications, the projective measurements in the even-odd basis needs to be done as given in Ref. \cite{cbhanuevenodd} in order to explore all the available photons. The difference between the visibility of E and O curves in Fig. \ref{fig:oamviscoll} can be easily understood as the result of our choice of a reduced OAM basis consisting of only 1 and 2. Equal visibility could be achieved by doing the measurements in the full even-odd basis as mentioned above.
\section{Conclusions}
In this paper, we demonstrate the duality in entanglement of a collinear, indistinguishable pair of photons generated in a spontaneous parametric down conversion process. We show polarization entanglement for indistinguishable photons by sorting the photon OAM using a double Mach-Zehnder even-odd sorter. This method can increase the availability of entangled photons since we are not eliminating any photon from the generated output in contrast to the case of limiting them to two-dimensional OAM bases such as ($+l,-l$) or ($0,l$). All the down-converted photons are sorted using an even-odd sorter in order to observe the polarization entanglement of otherwise indistinguishable collinear photons. Similarly, we demonstrate OAM entanglement by sorting photons using a simple polarizing beam splitter and executing OAM projections on the photon pairs in the even-odd basis.
The entanglement studies of systems that display duality can give identical results in both the variables. This could evoke great interest in studying entanglement unaffected by the mutual interaction of particles. By further improving the efficiency of the sorter and incorporating all available OAM modes in even-odd projections, the entanglement measures estimated in the paper could be improved and could be shown to be more identical than obtained. In addition, duality assisted observation of entanglement can be used as a test for verification of indistinguishability of photons in quantum information processing. The measurement of entanglement in the complimentary variables could also reveal any change in the indistinguishability of photons over communication channels which could have arisen due to possible eavesdropping. The indistinguishable photons entangled in complementary variables may also find applications in distributed quantum sensing through phase estimation as well as remote entanglement generation in a quantum network.
\end{document} |
\begin{document}
\chapter*{Stability properties of the ENO method}
\textbf{\large
Ulrik Skre Fjordholm\footnote{Department of Mathematical Sciences, NTNU, 7491 Trondheim, Norway. \newline Email: [email protected]}$^,$\footnote{Research supported in part by the grant \textit{Waves and Nonlinear Phenomena} (WaNP) from the Research Council of Norway.}
}
{\small
\textbf{Abstract}
We review the currently available stability properties of the ENO reconstruction procedure, such as its monotonicity and non-oscillatory properties, the sign property, upper bounds on cell interface jumps and a total variation-type bound. We also outline how these properties can be applied to derive stability and convergence of high-order accurate schemes for conservation laws.
}
\section{Introduction}
The ENO (Essentially Non-Oscillatory) reconstruction method is a method of recovering---to a high degree of accuracy---a function $v$, given only discrete information such as a finite number of point values $v_i = v(x_i)$ or local averages
\[
\avg{v}_i = \Xint-_{I_i} v(x)\ dx, \qquad i\in\mathbb{Z}
\]
over \textit{cells} $I_i = [x_{i-\hf}, x_{i+\hf})$. (Here and below we will denote $\Xint-_I = \frac{1}{|I|}\int$.) The method was first developed as a means of increasing the order of accuracy of numerical methods for hyperbolic conservation laws. Solutions of these types of PDEs are at best \textit{piecewise} smooth, and can have large jump discontinuities. The ENO method accomplishes the feat of approximating $v$ to a high degree of accuracy in smooth parts, while avoiding ``Gibbs-like'' oscillations near the discontinuities. The purpose of this paper is to review the currently known stability properties of the ENO method, and the application of these to numerical methods for hyperbolic conservation laws.
The ENO reconstruction method was originally developed by Ami Harten~\cite{Har86} and further developed and analyzed by Harten, Osher, Engquist and Chakravarthy in a series of papers \cite{HO87,HEOC86,HEOC87}. Since then it has been generalized and applied to a number of areas. In this paper we will concentrate on the one-dimensional version of the ENO reconstruction method, and its application to approximate one-dimensional scalar conservation laws. Thus, we leave out a large body of work on multi-dimensional generalizations of ENO, related ``ENO-type'' reconstruction methods, and applications of ENO to systems of conservation laws, as well as other fields such as data compression/representation and image analysis/reconstruction. Multi-dimensional ENO methods were introduced by Shu and Osher on Cartesian (tensor-product) meshes \cite{SO88}, and generalized to unstructured (triangular) meshes by Harten and Chakravarthy \cite{HC91} and by Abgrall and Lafon \cite{AL93} (see also \cite{Abg94}). Related ``ENO-type'' methods include the highly successful Weighted ENO method \cite{LOC94,JS96}, biased ENO \cite{Shu90}, ENO-SR (subcell resolution) \cite{Har89} and its multi-dimensional generalization GENO (geometric ENO) \cite{SKS97}, and ENO-EA (edge adapted) \cite{ACDDM08}. For applications of ENO apart from conservation laws we mention in particular Harten's work on multiresolution methods \cite{Har96}; see also \cite{AD00}.
Here follows an outline of the rest of the paper. In Section \ref{sec:enorecon} we motivate and describe the ENO reconstruction method. In Section \ref{sec:conslaws} we briefly describe the application of the ENO method to (scalar) conservation laws; we show that the resulting second-order accurate scheme is convergent; and we derive a list of \textit{a priori} bounds that imply convergence of (one class of) higher-order ENO-type schemes. Section \ref{sec:enostab} is the main section of the paper. We start by listing some immediate stability properties of the ENO method, and move on to describing some of the more non-trivial properties such as the sign property, upper bounds on jumps and the ``essentially non-oscillatory'' property.
We have attempted to make this paper as self-contained as possible. In particular, Sections \ref{sec:enorecon} and \ref{sec:enostab} should be accessible also to readers without a background in PDEs.
\section{The ENO reconstruction method}\label{sec:enorecon}
For the sake of completeness we describe here the ENO reconstruction method. We refer to the review article by Shu and Zhang \cite{SZ16} for further details.
Let us fix a partition $(I_i)_{i\in\mathbb{Z}}$ of the real line, where each \textit{cell} $I_i$ is an interval $I_i = [x_{i-\hf}, x_{i+\hf})$ of length $\mathcal{D}x_i = x_{i+\hf}-x_{i-\hf}$, bounded from above by $\mathcal{D}x=\max_i\mathcal{D}x_i$. Let $(\avg{v}_i)_{i\in\mathbb{Z}}\subset\mathbb{R}$ be a given collection of numbers, which we interpret as the cell averages of some unknown function $v$,
\begin{equation}\label{eq:avgvdef}
\avg{v}_i = \Xint-_{I_i} v(x)\ dx.
\end{equation}
The ENO (Essentially Non-Oscillatory) reconstruction method \cite{Har86,HEOC87} aims to \emph{reconstruct} $v$ by producing a collection of $(k-1)$th order polynomials $p_i = p_i(x)$ which approximate $v$ to $k$th order:
\begin{equation}\label{eq:kthorderapprox}
p_i(x) = v(x) + e(x)\mathcal{D}x_i^{k} \qquad \forall\ x\inI_i,
\end{equation}
where $e(x)$ denotes the leading-order error term. The reconstruction is required to conserve mass, in the sense that
\begin{equation}
\Xint-_{I_i} p_i(x)\ dx = \avg{v}_i \qquad \forall\ i\in\mathbb{Z},
\end{equation}
and is required to be as ``non-oscillatory'' as possible.
The properties of accuracy and mass conservation are automatically satisfied if $p_i$ interpolates the cell average values $\avg{v}_j$ over any of the $k$ \emph{stencils}
\[
\{s,\dots,s+k-1\}, \qquad i-k+1 \leqslant s \leqslant i.
\]
Thus, $p_i$ is defined as the unique $(k-1)$th order polynomial which satisfies
\begin{equation}\label{eq:cellavginterp}
\Xint-_{I_j} p_i(x)\ dx = \avg{v}_j \qquad \text{for } j=s,\dots,s+k-1,
\end{equation}
for some integer $s = s_i\in\{i-k+1,\dots,i\}$ called the \emph{stencil index}. The problem of finding $p_i$ satisfying \eqref{eq:cellavginterp} is a somewhat nonstandard interpolation problem, and Harten \cite{Har86} suggested two approaches.
In the \textit{reconstruction via deconvolution} (RD) approach, it is observed that \eqref{eq:avgvdef} is a convolution of $v$ with the indicator function over $I_i$. Taylor expanding $v$ and comparing with \eqref{eq:cellavginterp} results in an upper triangular linear system for $p_i$.
In the \textit{reconstruction via primitive function} (RP) approach we define the primitive of $v$,
\begin{equation}\label{eq:primitive}
V(x) = \int_{-\infty}^x v(x)\ dx
\end{equation}
(the lower limit of this integral is irrelevant), and observe that $V$ is precisely known at every cell interface,
\[
V(x_{i+\hf}) = \sum_{j\leqslant i} \mathcal{D}x_j\avg{v}_j.
\]
If we let $P_i$ be the unique $k$th order polynomial which interpolates $V$ over the points $\{x_{s-\hf}, \dots, x_{s+k-\hf}\}$, then the $(k-1)$th order polynomial $p_i(x) = \frac{d}{dx}P_i(x)$ satisfies \eqref{eq:cellavginterp}.
The RD approach requires a uniform mesh (i.e.\ $\mathcal{D}x\equiv$ const.), whereas the RP approach works for any (one-dimensional) mesh. Even on a uniform mesh, the RD and RP approaches are \textit{not} equivalent, i.e.\ they yield distinct reconstructions $p_i$. We are unaware of any further work on the RD methodology beyond the original papers by Harten et al.\ \cite{HO87,HEOC86,HEOC87}, and we will concentrate on RP for the remainder of this paper. (See also Remark \ref{rem:rdsignprop}.)
\subsubsection*{Choosing the stencil index}
The algorithm to select the stencil index $s_i$ is what characterizes the ENO reconstruction procedure. A naive choice of the stencil index could be the all-upwind or all-downwind stencils $s_i\equiv i-k+1$ or $s_i\equiv i$; however, given the possible non-smoothness or discontinuity of $v$, these choices would lead to ``Gibbs phenomena''---large oscillations in non-smooth regions.
Harten \cite{Har86} proposed an iterative, data-dependent algorithm to compute $s_i$. The algorithm is based upon the divided differences of $V$, defined as
\[
\begin{cases}
V[x_{i+\hf}] = V(x_{i+\hf}) \\
V[x_{i-\hf}, \dots, x_{j+\hf}] = \frac{V[x_{i+\hf}, \dots, x_{j+\hf}] - V[x_{i-\hf}, \dots, x_{j-\hf}]}{x_{j+\hf} - x_{i-\hf}} & \forall\ i<j.
\end{cases}
\]
Starting with the stencil $\{x_{i-\hf}, x_{i+\hf}\}$, the ENO stencil selection procedure adds either the left or right point $x_{i-\thf}$ or $x_{i+\thf}$, depending on which of the divided differences $V[x_{i-\thf},x_{i-\hf},x_{i+\hf}]$ or $V[x_{i-\hf},x_{i+\hf},x_{i+\thf}]$ is the smallest. This process is then iterated, ending up with an interpolation stencil $\{x_{s_i-\hf}, \dots, x_{s_i+k+\hf}\}$ for some $s_i\in\{i-k+1,\dots,i\}$. Recalling that the $k$th divided difference of $V$ is an approximation of the $(k-1)$th derivative of $v$,
\[
V[x_{i-\hf}, \dots, x_{i+k-\hf}] = \frac{V^{(k)}(\xi)}{k!} = \frac{v^{(k-1)}(\xi)}{k!}, \qquad \xi\in[x_{i-\hf}, x_{i+k-\hf}],
\]
we see that the ENO procedure iteratively adds a new point to the interpolation stencil ``in the direction of smoothness''.
Since $V[x_{i-\hf},x_{i+\hf}] = \avg{v}_i$, we can write
\[
V[x_{i-\hf}, \dots, x_{j+\hf}] = [\avg{v}_i,\dots,\avg{v}_j] \qquad \forall\ i\leqslant j
\]
where the ``cell-average divided differences'' are defined as
\begin{equation}\label{eq:avgdivdiff}
\begin{cases}
[\avg{v}_{i}] = \avg{v}_i \\
[\avg{v}_{i},\dots,\avg{v}_{j}] = \frac{[\avg{v}_{i+1},\dots,\avg{v}_{j}] - [\avg{v}_{i},\dots,\avg{v}_{j-1}]}{x_{j+\hf}-x_{i-\hf}} & \forall\ i<j.
\end{cases}
\end{equation}
We summarize the ENO procedure using this notation.
\begin{framed}
\begin{algorithm}\label{alg:eno}
\textbf{(ENO Stencil Selection Procedure)}
\begin{algorithmic}
\State{$s_i^1 = 0$}
\For{$\ell=1,\dots,k-1$}
\If{$\left|\bigl[\avg{v}_{s_i^\ell-1}, \dots, \avg{v}_{s_i^\ell+\ell-1}\bigr]\right| < \left|\bigl[\avg{v}_{s_i^\ell}, \dots, \avg{v}_{s_i^\ell+\ell}\bigr]\right|$}
\State{$s_i^{\ell+1} = s_i^\ell-1$}
\Else
\State{$s_i^{\ell+1} = s_i^\ell$}
\EndIf
\EndFor
\State{$s_i = s_i^k$}
\State {Let $P_i$ interpolate $V$ over $\{x_{s_i-\hf},\dots,x_{s_i+k-\hf}\}$}
\State {Define $p_i(x) = \frac{d}{dx}P_i(x)$}
\end{algorithmic}
\end{algorithm}
\end{framed}
The implications of the ENO stencil selection procedure are easiest to see with the Newton form of the interpolating polynomial $P_i$. It is straightforward to show by induction that the Newton form of $P_i$ can be expressed as
\[
P_i(x) = \sum_{\ell=0}^k V[x_{s_i^\ell-\hf}, \dots, x_{s_i^\ell+\ell-\hf}] \prod_{m=0}^{\ell-1}(x-x_{s_i^{\ell-1}+m-\hf}),
\]
where we have defined $s_i^{-1}=s_i^0 = i$. After differentiating and using the notation \eqref{eq:avgdivdiff} we get
\begin{equation}\label{eq:enoexpression}
\begin{split}
p_i(x) &= \sum_{\ell=1}^k V[x_{s_i^\ell-\hf}, \dots, x_{s_i^\ell+\ell-\hf}] \sum_{n=0}^{\ell-1}\prod_{\substack{m=0\\m\neq n}}^{\ell-1}(x-x_{s_i^{\ell-1}+m-\hf}) \\*
&= \sum_{\ell=1}^{k} [\avg{v}_{s_i^{\ell}}, \dots, \avg{v}_{s_i^{\ell}+\ell-1}] \sum_{n=0}^{\ell-1}\prod_{\substack{m=0\\m\neq n}}^{\ell-1}(x-x_{s_i^{\ell-1}+m-\hf})
\end{split}
\end{equation}
(see also \cite[p.\ 81]{Har86}). Thus, the ENO stencil selection procedure chooses each index $s_i^\ell$ so that the above coefficients $[\avg{v}_{s_i^{\ell}}, \dots, \avg{v}_{s_i^{\ell}+\ell-1}]$ are as small as possible, thereby obtaining the least oscillatory polynomial possible.
Note that both the ENO stencil selection procedure and the formula for $p_i$ can be written completely in terms of the divided differences of $\avg{v}$. Thus, it is not necessary to compute the primitive $V$ or its divided differences.
\begin{remark}\label{rem:pointeno}
There is also a point-value version of the ENO reconstruction method. Given the point-values $v_i=v(x_i)$ of some function $v$, this method employs a similar algorithm to obtain a reconstruction $p_i(x) = v(x) + O(\mathcal{D}x_i^k)$. The reconstruction $p_i$ is given by the $(k-1)$th order polynomial interpolating $(v_j)_{j\in\mathbb{Z}}$ over the points $x_{s_i},\dots,x_{s_i+k-1}$, where $s_i$ is obtained by replacing every occurence of $\avg{v}_j$ in Algorithm \ref{alg:eno} by $v_j$. See \cite{SO88} for further details and \cite{Fjo13,FMT12a} for a stability analysis.
\end{remark}
\section{Application to conservation laws}\label{sec:conslaws}
The ENO method was originally developed as a means of increasing the order of accuracy of finite volume schemes for hyperbolic conservation laws. We consider here only one-dimensional, scalar conservation laws
\begin{equation}\label{eq:cl}
\begin{split}
\partial_t u + \partial_x f(u) = 0 \\
u(x,0) = u_0(x).
\end{split}
\end{equation}
To establish the notation and some useful identities, we briefly review this setting in Section \ref{sec:fvm}. We refer to the article by Shu and Zhang \cite{SZ16} for further details. In Section \ref{sec:tvdeno} we see that the second-order ENO method results in a TVD, convergent finite volume scheme for scalar conservation laws. In Section \ref{sec:convhighorder} we review one approach to obtaining convergent higher-order accurate schemes.
Below we use the notation from Section \ref{sec:enorecon}. Furthermore, we denote
\begin{equation}
\jmp{\avg{v}}_{i+\hf} = \avg{v}_{i+1}-\avg{v}_i, \qquad \mean{\avg{v}}_{i+\hf} = \frac{\avg{v}_i+\avg{v}_{i+1}}{2}.
\end{equation}
\subsection{Finite volume methods}\label{sec:fvm}
A (semi-discrete) finite volume method for \eqref{eq:cl} aims to compute an approximation of the cell averages
\[
\avg{v}_i(t) \approx \Xint-_{I_i} u(x,t)\,dx \qquad \forall\ t\geqslant0
\]
of the exact (entropy) solution of \eqref{eq:cl}. A consistent, conservative finite volume method for \eqref{eq:cl} is then of the form
\begin{equation}\label{eq:fvmsd}
\frac{d}{dt}\avg{v}_i(t) = - \frac{1}{\mathcal{D}x_i}\bigl(F_{i+\hf} - F_{i-\hf}\bigr)
\end{equation}
for some $F_{i+\hf} = F\bigl(\avg{v}_{i-m+1},\dots,\avg{v}_{i+m}\bigr)$, and $F$ is a numerical flux function such as the Godunov, Lax--Friedrichs or Engquist--Osher fluxes. One class of (formally) high-order accurate schemes is obtained by letting
\begin{equation}\label{eq:musclflux}
F_{i+\hf} = F\bigl(v_{i+\hf}^-,\, v_{i+\hf}^+\bigr)
\end{equation}
for some monotone flux $F$. Here, $v_{i+\hf}^\pm$ are the reconstructed cell interface values
\begin{equation}\label{eq:reconcellinterface}
v_{i+\hf}^- = p_i(x_{i+\hf},t), \quad v_{i+\hf}^+ = p_{i+1}(x_{i+\hf},t), \quad p(x,t) = \mathcal{R}(\avg{v}(t))(x)
\end{equation}
for some reconstruction operator $\mathcal{R}$ such as ENO.
To obtain a fully discrete method, we discretize the temporal domain $t\in[0,\infty)$ into discrete points $t^n = n\mathcal{D}t$ for some $\mathcal{D}t>0$ (which we for simplicity assume is constant), and the aim is to approximate
\[
\avg{v}_i^n \approx \Xint-_{I_i} u(x,t^n)\,dx \qquad \forall\ i\in\mathbb{Z}.
\]
An explicit, fully discrete finite volume method for \eqref{eq:cl} is then of the form
\begin{equation}\label{eq:fvm}
\avg{v}_i^{n+1} = \avg{v}_i^n - \frac{\mathcal{D}t}{\mathcal{D}x_i}\bigl(F_{i+\hf}^n - F_{i-\hf}^n\bigr)
\end{equation}
for some $F_{i+\hf}^n = F\bigl(\avg{v}_{i-m+1}^n,\dots,\avg{v}_{i+m}^n\bigr)$. This scheme is \emph{total variation diminishing} (TVD) if
\begin{equation}\label{eq:tvd}
{\rm TV}(\avg{v}^{n+1}) \leqslant {\rm TV}(\avg{v}^n),
\end{equation}
so-called after Harten \cite{Har83}.
The scheme \eqref{eq:fvm} can be viewed as a (first-order accurate) Forward Euler discretization of \eqref{eq:fvmsd} (see \cite[Section II.3.3]{GR91} for a rigorous derivation; cf.\ also \cite[p.\ 352]{HEOC86}). Higher-order accurate methods can be obtained using multi-step methods or Strong Stability Preserving (SSP) Runge--Kutta methods \cite{GST01}, which consist of convex combinations of \eqref{eq:fvm}.
\subsection{TVD ENO schemes}\label{sec:tvdeno}
Consider now the (formally) second-order accurate scheme \eqref{eq:fvm} with a flux \eqref{eq:musclflux} using a second-order reconstruction method. Any second-order reconstruction $(p_i)_{i\in\mathbb{Z}}$ of cell averages $(\avg{v}_i)_{i\in\mathbb{Z}}$ must necessarily be of the form
\begin{equation}\label{eq:lininterp}
p_i(x) = \avg{v}_i + \sigma_i(x-x_i)
\end{equation}
where $\sigma_i\in\mathbb{R}$ is the slope of $p_i$. This slope is commonly written in the \emph{slope limited} form
\begin{equation}\label{eq:slopedef}
\sigma_i = \varphi(\theta_i^+)\jmp{\avg{v}}_{i+\hf}, \qquad \theta_i^+ = \frac{\jmp{\avg{v}}_{i-\hf}}{\jmp{\avg{v}}_{i+\hf}}
\end{equation}
for some $\varphi : \mathbb{R}\to\mathbb{R}$ called a \emph{slope limiter}.
Using Harten's work \cite{Har83}, Sweby \cite{Swe84} showed that if the slope limiter satisfies
\begin{equation}\label{eq:swebycondition}
\left|\varphi(\theta_1) - \frac{\varphi(\theta_2)}{\theta_2}\right| \leqslant 2 \qquad \forall\ \theta_1,\theta_2\in\mathbb{R}
\end{equation}
then the explicit discretization \eqref{eq:fvm} is both total variation diminishing (TVD) and uniformly bounded, so the computed solution satisfies
\[
{\rm TV}(\avg{v}^n) \leqslant {\rm TV}(\avg{v}^0), \qquad \|\avg{v}^n\|_{L^\infty} \leqslant \|\avg{v}^0\|_{L^\infty} \qquad \forall\ n\in\mathbb{N}.
\]
As a consequence, there is a subsequence $\mathcal{D}t_m, \mathcal{D}x_m \to 0$ for which the computed solutions converge towards a weak solution.
It is not hard to see that the second-order ENO reconstruction can be written as \eqref{eq:lininterp}, \eqref{eq:slopedef} with the slope limiter
\begin{equation}\label{eq:enolimiter}
\varphi(\theta) = \begin{cases}
\theta & \text{if } |\theta|<1 \\
1 & \text{if } |\theta|\geqslant1.
\end{cases}
\end{equation}
Although this limiter does not lie in the ``TVD region'' introduced by Sweby \cite{Swe84}, it \textit{does} satisfy \eqref{eq:swebycondition}. Therefore, the scheme \eqref{eq:fvm}, \eqref{eq:musclflux} using second-order ENO reconstruction is both TVD and uniformly bounded, and hence converges (subsequentially) towards a weak solution.
\subsection{Convergence of high-order schemes}\label{sec:convhighorder}
A uniform bound on the total variation of a sequence of approximate solutions---such as the bound \eqref{eq:tvd} provided by TVD schemes---prevents the buildup of high-frequency oscillations, a necessary requirement for the strong convergence of the sequence. However, it is well-known that any TVD scheme for \eqref{eq:cl} is at most second-order accurate when measured in $L^1$. Thus, any proof of stability or convergence of higher (than second) order accurate schemes must necessarily relax the TVD requirement, while still preventing high-frequency oscillations.
We present here one class of convergent, high-order accurate schemes, the so-called TECNO schemes \cite{Fjo13,FMT12b}. As a motivation we first derive the necessary \textit{a priori} bounds for a parabolic regularization of \eqref{eq:cl}, which can be thought of as the effective (modified) equation of the numerical scheme. We then perform the analogous computations for the TECNO schemes.
\subsubsection{Motivation}
Consider the following regularization of \eqref{eq:cl}:
\begin{equation}\label{eq:clreg}
\begin{split}
\partial_t v^\eps &+ \partial_x f(v^\eps) = \eps \partial_{xx}v^\eps \\
& v^\eps(x,0) = v_0^\eps(x)
\end{split}
\end{equation}
(where $v_0^\eps$ converges to $u_0$ as $\eps\to0$). The term $\eps\partial_{xx}v^\eps$ can be thought of as the numerical viscosity of a numerical scheme, and $\eps \sim \mathcal{D}x^k$, where $k$ is the order of accuracy of the method. Multiplying \eqref{eq:clreg} by $2v^\eps$ we obtain
\begin{equation}\label{eq:energyregular}
\partial_t (v^\eps)^2 + \partial_x q(v^\eps) = \eps\partial_{xx}(v^\eps)^2 - 2\eps(\partial_xv^\eps)^2,
\end{equation}
where $q$ satisfies $q'(u)=2uf'(u)$ for all $u\in\mathbb{R}$. Integrating \eqref{eq:energyregular} over $x\in\mathbb{R}$, $t\in[0,T]$ gives
\begin{equation}\label{eq:energyregintegrated}
\int_\mathbb{R} v^\eps(x,T)^2\ dx = \int_\mathbb{R} u_0^\eps(x)^2\ dx - 2\eps \int_0^T\int_\mathbb{R}(\partial_x v^\eps)^2\ dxdt.
\end{equation}
Thus, we have the two bounds
\begin{subequations}\label{eq:aprioribounds}
\begin{equation}
\|v^\eps(T)\|_{L^2(\mathbb{R})} \leqslant \|u_0\|_{L^2(\mathbb{R})}
\end{equation}
\begin{equation}\label{eq:regwtvbound}
2\eps\int_0^T\int_\mathbb{R}(\partial_x v^\eps)^2\ dxdt \leqslant \|u_0\|_{L^2(\mathbb{R})}^2
\end{equation}
\end{subequations}
for all $\eps>0$, i.e., a uniform $L^2$ bound and a ``weak TV bound''. From these, compensated compactness techniques can be used to show that a subsequence $v^{\eps'}$ converges to a weak solution of \eqref{eq:cl} as $\eps'\to0$. Since the second term on the right-hand side of \eqref{eq:energyregular} is non-positive, we find that any strong limit $u=\lim_{\eps'\to0} v^{\eps'}$ satisfies the entropy condition
\begin{equation}\label{eq:entrcond}
\partial_t u^2 + \partial_x q(u) \leqslant 0.
\end{equation}
We conclude that the \textit{whole} sequence $(v^\eps)_{\eps>0}$ converges strongly to the (unique) entropy solution of \eqref{eq:cl}.
\subsubsection{TECNO schemes}\label{sec:tecno}
We consider now the semi-discrete finite volume method \eqref{eq:fvmsd} with a numerical flux function of the form
\begin{equation}\label{eq:entrstabflux}
F_{i+\hf} = F^*_{i+\hf} - c_{i+\hf}\jmpr{v}_{i+\hf}.
\end{equation}
Here, $\jmpr{v}_{i+\hf} = v_{i+\hf}^+ - v_{i+\hf}^-$ is the cell interface jump in the reconstructed values (cf.\ \eqref{eq:reconcellinterface}) for some reconstruction operator $\mathcal{R}$, to be determined. The diffusion constant $c_{i+\hf}$ is some number satisfying $c_{\max}\geqslant c_{i+\hf}\geqslant c_{\min}>0$, and $F^*$ is a Lipschitz continuous numerical flux, to be determined. Note that if the reconstructed values satisfy, say,
\begin{equation}\label{eq:upperjmpbound}
|\jmpr{v}_{i+\hf}| \leqslant C|\jmp{\avg{v}}_{i+\hf}|
\end{equation}
for some $C>0$ independent of $v$, then $F$ is Lipschitz continuous---a natural assumption in the convergence analysis of finite volume schemes.
Multiplying \eqref{eq:fvmsd} by $2\avg{v}_i(t)$ we obtain
\begin{align*}
\frac{d}{dt}\avg{v}_i^2 + 2\avg{v}_i\frac{F^*_{i+\hf} - F^*_{i-\hf}}{\mathcal{D}x_i}
&= 2\avg{v}_i\frac{c_{i+\hf}\jmpr{v}_{i+\hf} - c_{i-\hf}\jmpr{v}_{i-\hf}}{\mathcal{D}x_i} \\
&= 2\frac{c_{i+\hf}\mean{v}_{i+\hf}\jmpr{v}_{i+\hf} - c_{i-\hf}\mean{v}_{i-\hf}\jmpr{v}_{i-\hf}}{\mathcal{D}x_i} \\
&\quad - \frac{c_{i+\hf}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf} + c_{i-\hf}\jmp{\avg{v}}_{i-\hf}\jmpr{v}_{i-\hf}}{\mathcal{D}x_i}.
\end{align*}
\textit{Assuming} that we can write $2\avg{v}_i(F^*_{i+\hf} - F^*_{i-\hf}) = (Q^*_{i+\hf} - Q^*_{i-\hf})$ (as in the step from \eqref{eq:clreg} to \eqref{eq:energyregular}) for some ``numerical entropy flux $Q^*$'', we can define $Q_{i+\hf} = Q^*_{i+\hf} - 2c_{i+\hf}\mean{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf}$ and obtain
\begin{equation}\label{eq:tecnoenergyestimate}
\frac{d}{dt}\avg{v}_i^2 + \frac{Q_{i+\hf} - Q_{i-\hf}}{\mathcal{D}x_i} = -\frac{c_{i+\hf}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf} + c_{i-\hf}\jmp{\avg{v}}_{i-\hf}\jmpr{v}_{i-\hf}}{\mathcal{D}x_i}.
\end{equation}
Summing over $i\in\mathbb{Z}$ and integrating over $t\in[0,T]$, we get
\begin{equation}
\sum_{i\in\mathbb{Z}} \avg{v}_i(T)^2\mathcal{D}x_i = \sum_{i\in\mathbb{Z}}\avg{v}_i(0)^2\mathcal{D}x_i - 2\int_0^T\sum_{i\in\mathbb{Z}}c_{i+\hf}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf}\, dt
\end{equation}
(compare with \eqref{eq:energyregintegrated}). Assuming now that
\begin{equation}\label{eq:signprop}
\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf} \geqslant 0 \qquad \forall\ i\in\mathbb{Z},
\end{equation}
i.e.\ that the jumps $\avg{v}_{i+1}-\avg{v}_i$ and $v_{i+\hf}^+ - v_{i+\hf}^-$ have the same sign, we can conclude that
\begin{subequations}
\begin{equation}
\|v_\mathcal{D}x(T)\|_{L^2(\mathbb{R})} \leqslant \|v_\mathcal{D}x(0)\|_{L^2(\mathbb{R})},
\end{equation}
\begin{equation}\label{eq:wrecontvbound}
2\int_0^T\sum_{i\in\mathbb{Z}}c_{i+\hf}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf}\,dt \leqslant \|v_\mathcal{D}x(0)\|_{L^2(\mathbb{R})}^2
\end{equation}
\end{subequations}
(compare with \eqref{eq:aprioribounds}). The property \eqref{eq:signprop} also ensures that the right-hand side of \eqref{eq:tecnoenergyestimate} is non-positive, so that
\[
\frac{d}{dt}\avg{v}_i^2 + \frac{Q_{i+\hf} - Q_{i-\hf}}{\mathcal{D}x_i} \leqslant 0
\]
(compare with \eqref{eq:entrcond}), i.e.\ a discrete entropy inequality is satisfied.
The bound \eqref{eq:wrecontvbound} is not quite a weak TV bound like \eqref{eq:regwtvbound}---for this we would need a bound of the form
\begin{equation}\label{eq:wtvbound}
\int_0^T\sum_{i\in\mathbb{Z}}|\jmp{\avg{v}}_{i+\hf}|^p\,dt \leqslant C
\end{equation}
for some $p\geqslant1$ and $C>0$ independent of $\mathcal{D}x$.
We have thus arrived at a list of properties which enable a convergence proof of the finite volume method \eqref{eq:fvm}: The upper bound on reconstructed jumps \eqref{eq:upperjmpbound}, the \textit{sign property} \eqref{eq:signprop}, and the ``weak TV bound'' \eqref{eq:wtvbound}.
The sign property and the upper bound have been proven for the ENO reconstruction method and are discussed in Sections \ref{sec:signprop} and \ref{sec:upperjmpbound}, respectively. For $k=2$ it has been proven---and conjectured for $k>2$---that the ``reconstructed TV bound'' \eqref{eq:wrecontvbound} implies the ``weak TV bound'' \eqref{eq:wtvbound}. This is discussed in Section \ref{sec:enoconjecture}. We refer to this conjecture as the \textit{ENO TV conjecture}.
In \cite{Fjo13,FMT12b} the authors constructed schemes of the form \eqref{eq:fvmsd}, \eqref{eq:entrstabflux} which uses the ENO reconstruction method---the so-called \textit{TECNO schemes}. We summarize the main convergence theorem here and refer to \cite{Fjo13} for the proof.
\begin{theorem}
For every $k$ for which the ENO TV conjecture holds, we have the following. If the approximate solution computed by the $k$th order TECNO method is $L^\infty$-bounded, then the sequence of approximate solutions converges to the entropy solution of \eqref{eq:cl} as $\mathcal{D}x\to0$.
\end{theorem}
\begin{remark}
With some extra effort, the above computation can be generalized from the square entropy $v^2$ to arbitrary entropies $\eta(v)$. See the review article by Tadmor \cite{Tad16} (see also \cite{Tad03,FMT12b}) for more information on so-called entropy stable methods.
\end{remark}
\section{ENO stability properties}\label{sec:enostab}
In this section we review the currently known stability properties of the ENO reconstruction method. In Section \ref{sec:easyprops} we summarize some immediate (but nevertheless useful) properties of the ENO reconstruction. In Section \ref{sec:signprop} we prove the \textit{sign property} of the ENO method, and in Section \ref{sec:upperjmpbound} we prove an upper bound on the jump $\jmpr{v}=v_{i+\hf}^+-v_{i+\hf}^-$. We discuss the ENO TV conjecture in Section \ref{sec:enoconjecture}. Recall from Section \ref{sec:convhighorder} that all of these properties are essential for the convergence of the high-order TECNO schemes.
In Section \ref{sec:enomeshdep} we prove some well-known \textit{mesh dependent} properties of ENO. As it turns out, the sign property is a necessary ingredient in a rigorous proof of some of these properties. We conclude in Section \ref{sec:enodeficiency} by mentioning some deficiencies of ENO.
\subsection{Immediate properties}\label{sec:easyprops}
\subsubsection{Mesh invariance and linearity}\label{sec:enolinear}
Under the mapping $x \to a+bx$ for any $a\in\mathbb{R}$ and $b>0$, the reconstructed polynomial is $p_i(\frac{x-a}{b})$.
If $(\avg{v}_i)_{i\in\mathbb{Z}}$ is replaced by $(\alpha\avg{v}_i + \beta)_{i\in\mathbb{Z}}$ for any $\alpha,\beta\in\mathbb{R}$, then the ENO reconstruction $p_i(x)$ is replaced by $\alpha p_i(x)+\beta$.
\subsubsection{Discontinuity across cell edges}\label{sec:disccelledges}
As a rule of thumb, the ENO reconstruction $p=\mathcal{R}(\avg{v})$ is discontinuous \textit{at least} at every $k$th cell interface $x_{i+\hf}$. To see this, note that neighboring cells with the same stencil index $s_i=s_{i+1}$ have the same reconstruction $p_i=p_{i+1}$ (and are thus continuous at $x_{i+\hf}$), whereas if $s_i<s_{i+1}$ then $p_i\neq p_{i+1}$, and hence $p_i(x_{i+\hf})\neq p_{i+1}(x_{i+\hf})$ (except in very rare cases, such as when $v$ is itself a $(k-1)$th order polynomial). Since $s_i$ must change at least at every $k$th index $i$, this yields a discontinuity in $p$.
At points of discontinuity $x_{i+\hf}$, the size of the jump $p_{i+1}(x_{i+\hf})-p_i(x_{i+\hf})$ is $O(\mathcal{D}x^k)$ (see Section \ref{sec:signprop}). Note that the cell interface jump $p_{i+1}(x_{i+\hf})-p_i(x_{i+\hf})$ can---and often will---be zero even when $\avg{v}_{i+1}-\avg{v}_i\neq 0$.
\subsubsection{Uniform $k$th order accuracy}\label{sec:uniformacc}
Let $v\in C^\infty(\mathbb{R})$ with primitive $V(x)$ defined in \eqref{eq:primitive}. Through a Taylor expansion of $V$ it is easy to see that the ENO reconstruction $p=\mathcal{R}(\avg{v})$ of $(\avg{v}_i)_{i\in\mathbb{Z}}$ is a $k$th order approximation of $v$. More specifically, $p_i$ satisfies the relation \eqref{eq:kthorderapprox} with an error term $|e(x)| \leqslant C\|\frac{d^k v}{dx^k}\|_{L^\infty}$ for some $C = C_k$. In each cell $I_i$, the error term $e(x)$ is continuous (but not Lipschitz continuous) with at least one zero. It is discontinuous only at those cell interfaces $x_{i+\hf}$ where $p$ is discontinuous (see Section \ref{sec:disccelledges}).
\subsection{The sign property}\label{sec:signprop}
Consider a reconstruction procedure $\mathcal{R}$, mapping a collection of cell averages $(\avg{v}_i)_{i\in\mathbb{Z}}$ to a piecewise polynomial function $\sum_i p_i\mathbbm{1}_{I_i}$. As before, define the cell interface values $v_{i+\hf}^- = p_i(x_{i+\hf})$ and $v_{i+\hf}^+ = p_{i+1}(x_{i+\hf})$ and the jump $\jmpr{v}_{i+\hf} = v_{i+\hf}^+-v_{i+\hf}^-$. We say that $\mathcal{R}$ satisfies the \emph{sign property} if for every $i\in\mathbb{Z}$,
\begin{equation}\label{eq:signpropexact}
\begin{split}
\text{if}\quad \jmp{\avg{v}}_{i+\hf} > 0\quad & \text{then}\quad \jmpr{v}_{i+\hf} \geqslant 0 \\
\text{if}\quad \jmp{\avg{v}}_{i+\hf} < 0\quad & \text{then}\quad \jmpr{v}_{i+\hf} \leqslant 0 \\
\text{if}\quad \jmp{\avg{v}}_{i+\hf} = 0\quad & \text{then}\quad \jmpr{v}_{i+\hf} = 0.
\end{split}
\end{equation}
As we have seen in Section \ref{sec:convhighorder}, the sign property implies that the diffusion coefficient in finite volume schemes of the form \eqref{eq:fvm}, \eqref{eq:entrstabflux} has the right sign.
\begin{figure}
\caption{ENO reconstruction of randomly chosen cell averages. Black lines: cell averages. Red curves: reconstruction. Squares: cell interface values.}
\label{fig:eno}
\end{figure}
The sign property is illustrated in Figure \ref{fig:eno}, which shows a third-, fourth- and fifth-order ENO reconstruction of randomly chosen cell averages. Even though the reconstructed polynomial may have large variations within each cell, its jumps at cell interfaces always have the same sign as the jumps of the cell averages.
In \cite{Fjo13,FMT12a} it was shown that the $k$th order ENO reconstruction satisfies the sign property, for any $k\in\mathbb{N}$ and for any mesh $(x_{i+\hf})_{i\in\mathbb{Z}}$. We provide here a sketch of the proof.
\begin{proof}[Proof of ENO sign property (sketch)]
The first step is to derive the following expression for the jump in reconstructed values:
\begin{equation}\label{eq:jmpexpr}
\jmpr{v}_{i+\hf} = \sum_{s=s_i}^{s_{i+1}-1} [\avg{v}_s,\dots,\avg{v}_{s+k}] X_{i,s}
\end{equation}
where
\[
X_{i,s} := (x_{s+k+\hf}-x_{s-\hf}) \prod_{\substack{m=0\\m\neq i-s}}^{k-1}(x_{i+\hf} - x_{s+m+\hf}).
\]
When $s_i=s_{i+1}$, i.e.\ the neighboring stencils are the same, then \eqref{eq:jmpexpr} yields $\jmpr{v}_{i+\hf}=0$ and the reconstruction is continuous across $x_{i+\hf}$. Observe that \eqref{eq:jmpexpr} expresses $\jmpr{v}_{i+\hf}$ in terms of only $k$th order divided differences of $\avg{v}$, instead of divided differences of order $1, \dots, k-1$, as one might expect from \eqref{eq:enoexpression}. In particular, when $k=1$ we get $\jmpr{v}_{i+\hf} = [\avg{v}_i,\avg{v}_{i+1}](x_{i+\thf}-x_{i-\hf}) = \jmp{v}_{i+\hf}$, as expected.
The proof of \eqref{eq:jmpexpr} amounts to a simple manipulation of Newton polynomials, but the idea is quite clear: Both $v_{i+\hf}^-$ and $v_{i+\hf}^+$ are $k$th order approximations of $v(x_{i+\hf})$, with truncation terms of the form $[\avg{v}_s,\dots,\avg{v}_{s+k}] = \frac{1}{k!}\frac{d^k v}{dx^k}(\xi)$.
The next step is to show that each summand in \eqref{eq:jmpexpr} has the same sign as $\jmp{\avg{v}}_{i+\hf}$. Because ${\rm sgn}( X_{i,s}) = (-1)^{s+k+1}$, we need only to show that
\begin{equation}\label{eq:signdivdiff}
\jmp{\avg{v}}_{i+\hf} [\avg{v}_s,\dots,\avg{v}_{s+k}] (-1)^{s+k+1} \geqslant 0 \qquad \forall\ s=s_i^k,\dots,s_{i+1}^k-1.
\end{equation}
The proof of \eqref{eq:signdivdiff} is obvious for $k=1$. Assume that \eqref{eq:signdivdiff} holds for some $k\geqslant1$. It suffices to consider the case $\jmp{\avg{v}}_{i+\hf} > 0$, so we have
\[
[\avg{v}_s,\dots,\avg{v}_{s+k}] (-1)^{s+k+1} \geqslant 0 \qquad \text{for } s = s_i^k,\dots,s_{i+1}^k-1.
\]
The fact that $[\avg{v}_s,\dots,\avg{v}_{s+k+1}] (-1)^{s+k+2} \geqslant 0$ for $s = s_i^{k+1},\dots,s_{i+1}^{k+1}-1$ then follows by writing out the definition of these $(k+1)$th divided differences in terms of $k$th divided difference and using the induction hypothesis and the ENO choice of $s^{k+1}$. We refer to \cite{Fjo09,FMT12a} for the full proof.
\end{proof}
We emphasize that the sign property is mesh independent, in the sense that it holds for \textit{any} mesh $(x_{i+\hf})_{i\in\mathbb{Z}}$, regardless of the mesh width $\mathcal{D}x_i$.
\begin{remark}
The ``point-value version'' of ENO (see Remark \ref{rem:pointeno}) also satisfies the sign property \eqref{eq:signpropexact}; see \cite{Fjo13,FMT12a}.
\end{remark}
\begin{remark}\label{rem:rdsignprop}
It is easy to confirm by numerical experiments that the ``RD'' (reconstruction with deconvolution) ENO method does \emph{not} satisfy the sign property. Indeed, Figure 3b of \cite{HEOC87}, which shows a fourth order RD ENO reconstruction, clearly violates the sign property at the fifth cell interface from the left.
\end{remark}
\subsection{Upper bound on jumps}\label{sec:upperjmpbound}
In \cite{Fjo13,FMT12a} it was shown that the ENO reconstruction procedure satisfies---in addition to the sign property---an upper bound on the jumps in the reconstructed polynomial. More precisely, for every $k\in\mathbb{N}$, the $k$th order ENO reconstruction satisfies
\begin{equation}\label{eq:upperbound}
0 \leqslant \frac{\jmpr{v}_{i+\hf}}{\jmp{\avg{v}}_{i+\hf}} \leqslant C_{k,i} \qquad \forall\ i\in\mathbb{Z},
\end{equation}
where $C_{k,i}$ depends only on $k$ and on the ratios $|I_j|/|I_\ell|$ of neighboring cell sizes. (Note that the first inequality in \eqref{eq:upperbound} is merely a restatement of the sign property \eqref{eq:signpropexact}.) Recall from Section \ref{sec:tecno} that his bound ensures Lipschitz continuity of the numerical flux \eqref{eq:entrstabflux}.
In the case of a uniform mesh, $|I_i|\equiv$ const., the constant $C_{k,i}\equiv C_k$ can be computed explicitly; see Table \ref{tab:upperBound}.
\begin{table}
\begin{center}
\begin{tabular}{|c|c|}
\hline $k$ & Upper bound $C_k$ \\
\hline 1 & 1 \\
\hline 2 & 2 \\
\hline 3 & $10/3 = 3.333\dots$ \\
\hline 4 & $16/3 = 5.333\dots$ \\
\hline 5 & $128/15 = 8.533\dots$ \\
\hline 6 & $208/15 = 13.866\dots$ \\
\hline
\end{tabular}
\end{center}
\caption{The upper bound in \eqref{eq:upperbound} for a uniform mesh.}
\label{tab:upperBound}
\end{table}
By way of an example, it was also found that the upper bound \eqref{eq:upperbound} is sharp. Indeed, if
\[
\avg{v}_i = \begin{cases}
0 & \text{if $i$ is odd} \\
1 & \text{if $i$ is even and $i\leqslant 4$} \\
1-\eps & \text{if $i$ is even and $i > 4$.}
\end{cases}
\]
for any $\eps>0$, then the upper bound in \eqref{eq:upperbound} is attained in the limit $\eps\to0$. Figure \ref{fig:worstcase} shows these worst-case scenarios for $k=2,3,4,5$ and $\eps=10^{-10}$.
\begin{figure}
\caption{Worst case cell interface jumps for $k=2, 3, 4, 5$.}
\label{fig:worstcase}
\end{figure}
\subsection{The ENO TV conjecture}\label{sec:enoconjecture}
Any compactness argument for numerical approximations of the conservation law \eqref{eq:cl} requires some ``weak TV bound'' of the form \eqref{eq:wtvbound}. To conclude such a bound on the basis of the available ``weak reconstructed TV bound'' \eqref{eq:wrecontvbound}, it would seem that a lower bound of the form $|\jmpr{v}_{i+\hf}| \geqslant |\jmp{\avg{v}}_{i+\hf}|$ for all $i$ is required. However, such a bound is impossible due to the possibility that $\jmpr{v}_{i+\hf}=0$ even when $\jmp{\avg{v}}_{i+\hf} \neq 0$ (see Section \ref{sec:disccelledges}).
In \cite{Fjo13} the following inequality was conjectured for the $k$th order ENO reconstruction method:
\begin{equation}\label{eq:tecnodiffusionbound}
\sum_{i\in\mathbb{Z}}|\jmp{\avg{v}}_{i+\hf}|^{k+1} \leqslant C\|\avg{v}\|_{L^\infty}^{k-1}\sum_{i\in\mathbb{Z}}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf}
\end{equation}
for some $C>0$ independent of $\avg{v}$ and $\mathcal{D}x$. Clearly, if this were to hold then the ``weak reconstructed TV bound'' \eqref{eq:wrecontvbound}, together with an $L^\infty$ bound on $\avg{v}$, would imply \eqref{eq:wtvbound}. The only case for which \eqref{eq:tecnodiffusionbound} has been proven is for $k=2$, and we include the proof here. For the sake of simplicity we assume that the mesh is uniform.
\begin{proof}[Proof of \eqref{eq:tecnodiffusionbound} for $k=2$]
Denote $\mathcal{D}elta \avg{v}_i = \avg{v}_{i+1}-\avg{v}_i$, and iteratively $\mathcal{D}elta^k \avg{v}_i = \mathcal{D}elta^{k-1} (\mathcal{D}elta \avg{v})_i$. The formula \eqref{eq:jmpexpr} yields
\begin{align*}
\sum_{i\in\mathbb{Z}}\jmp{\avg{v}}_{i+\hf}\jmpr{v}_{i+\hf} &= \sum_{i\in\mathbb{Z}}|\mathcal{D}elta \avg{v}_i| \sum_{j=s_i^2}^{s_{i+1}^2-1} a_i|\mathcal{D}elta^2 \avg{v}_j| \\*
&\geqslant a \sum_{i\in\mathbb{Z}}|\mathcal{D}elta \avg{v}_i| \sum_{j=s_i^2}^{s_{i+1}^2-1}|\mathcal{D}elta^2 \avg{v}_j|
\end{align*}
for constants $a_i \geqslant a > 0$ only dependent on $i$ and $s_i^2$. For every $j\in\mathbb{Z}$ there is precisely one index $i\in\mathbb{Z}$ such that $j\in \{s_i^2,\dots,s_{i+1}^2-1\}$, and we denote this index $i$ by $i=\iota_j^2$. Thus, we can write
\[
\sum_{i\in\mathbb{Z}}|\mathcal{D}elta \avg{v}_i| \sum_{j=s_i^2}^{s_{i+1}^2-1}|\mathcal{D}elta^2 \avg{v}_j| = \sum_{j\in\mathbb{Z}} |\mathcal{D}elta \avg{v}_{\iota_j^2}| |\mathcal{D}elta^2 \avg{v}_j|.
\]
It is straightforward to show that for $k=2$, the index $\iota$ is given by
\begin{equation}
\iota_j^2 = \begin{cases}
j & \text{if } |\mathcal{D}elta \avg{v}_j| > |\mathcal{D}elta \avg{v}_{j+1}| \\
j+1 & \text{if } |\mathcal{D}elta \avg{v}_j| \leqslant |\mathcal{D}elta \avg{v}_{j+1}|,
\end{cases}
\end{equation}
and as a consequence,
\begin{equation}\label{eq:iota2}
|\mathcal{D}elta \avg{v}_{\iota_j^2}| = \max\left(|\mathcal{D}elta \avg{v}_j|, |\mathcal{D}elta \avg{v}_{j+1}|\right).
\end{equation}
Starting with the left-hand side of \eqref{eq:tecnodiffusionbound} with $k=2$, we get
\begin{align*}
\sum_{i\in\mathbb{Z}} |\mathcal{D}elta \avg{v}_i|^3 &= \sum_{i\in\mathbb{Z}} |\mathcal{D}elta \avg{v}_i| \mathcal{D}elta \avg{v}_i \mathcal{D}elta \avg{v}_i \\
\text{\textit{(summation-by-parts)}}\qquad\qquad &= -\sum_{i\in\mathbb{Z}} \avg{v}_{i+1} \mathcal{D}elta\left(|\mathcal{D}elta \avg{v}_i|\mathcal{D}elta \avg{v}_i\right) \\
&= -\sum_{i\in\mathbb{Z}} \avg{v}_{i+1} \left(\left(\mathcal{D}elta|\mathcal{D}elta \avg{v}_i|\right)\mathcal{D}elta \avg{v}_i + |\mathcal{D}elta \avg{v}_i|\mathcal{D}elta^2 \avg{v}_i\right) \\
&\leqslant 2\sum_{i\in\mathbb{Z}} |\avg{v}_{i+1}||\mathcal{D}elta^2 \avg{v}_i||\mathcal{D}elta \avg{v}_i| \\
\text{\textit{(relabeling $i\mapsto j$ and using \eqref{eq:iota2})}}\qquad &\leqslant 2\|\avg{v}\|_{L^\infty} \sum_{j\in\mathbb{Z}} |\mathcal{D}elta^2 \avg{v}_j||\mathcal{D}elta \avg{v}_{\iota_j^2}|
\end{align*}
This completes the proof.
\end{proof}
\subsection{Mesh dependent properties}\label{sec:enomeshdep}
The ``mesh dependent properties'' of ENO are those properties which are satisfied asymptotically as $\mathcal{D}x\to0$. In other words, for a \emph{fixed} underlying function $v(x)$, these are properties of ENO that are satisfied on \emph{sufficiently fine} meshes. Although these properties function as a proof-of-concept of the ENO reconstruction method, they are of limited value in applications to numerical methods for conservation laws \eqref{eq:cl} because for such applications, the cell averages in question will themselves depend (nonlinearly) on the mesh. As such, these properties cannot be used in a proof of stability or convergence of numerical schemes for \eqref{eq:cl}.
Below, we use the term ``shock'' to refer to any jump discontinuity of the underlying function $v$. For simplicity we will assume that $\mathcal{D}x_i\equiv$ const.
\subsubsection{Uniform $k$th order accuracy up to discontinuities}\label{sec:uniformaccshock}
If $v$ is a {piecewise} $C^\infty$ function with finitely many jump discontinuities (``shocks''), then for \emph{sufficiently small $\mathcal{D}x$}, the ENO reconstruction is a $k$th order approximation of $v$ in all cells not containing a shock \cite{HEOC86}. Indeed, if $\mathcal{D}x$ is sufficiently small then there are at least $k$ cells in-between the shocked cells. Moreover, the $\ell$th divided difference $[\avg{v}_s,\dots,\avg{v}_{s+\ell}]$ over any stencil containing a shocked cell behaves as $O(\mathcal{D}x^{-\ell})$. Thus, if $\mathcal{D}x$ is small enough then in every non-shocked cell, the ENO stencil selection procedure can, and will, select an ENO stencil $\{s_i,\dots,s_i+k-1\}$ \textit{not} containing a shock. The property of uniform $k$th order accuracy then follows as in Section \ref{sec:uniformacc}.
\subsubsection{Monotonicity in shocked cells}\label{sec:monotonicity}
Harten et al.\ proved in \cite{HEOC86} that the primitive $P_i$ of the ENO reconstruction $p_i$ will be \emph{monotone} in every cell containing a discontinuity of $V$. This property is of limited value since \textit{(a)} the primitive $V$ is always continuous, and \textit{(b)} we are primarily interested in $p_i$, not $P_i$. However, it turns out that the same property in fact holds for the ENO reconstruction $p_i$ (see Figure \ref{fig:enoMonotone}).
\begin{figure}
\caption{Monotonicity of fourth-order ENO reconstruction in a shocked cell.}
\label{fig:enoMonotone}
\end{figure}
\begin{proposition}\label{prop:monotonicity}
Let $v$ be a piecewise $C^\infty$ function with finitely many shocks. Then for sufficiently small $\mathcal{D}x$, the ENO reconstruction of $v$ is monotone in every cell containing a jump discontinuity---more precisely, it is strictly increasing at positive jumps and strictly decreasing at negative jumps.
\end{proposition}
\begin{proof}
\newcommand{i}{i}
The proof is similar in spirit to the proof of \cite[Theorem 4.1]{HEOC86}.
By choosing $\mathcal{D}x$ sufficiently small, we may assume that shocked cells are at least $k$ cells from one another, and hence it suffices to consider the case $v=w+H$, where $w$ is Lipschitz continuous and $H$ is piecewise constant with a single jump discontinuity at $x=\bar{x}\in(x_{i-\hf},x_{i+\hf})$, for some index $i\in\mathbb{Z}$. By the linearity of the ENO method (see Section \ref{sec:enolinear}), we may assume that
\[
H(x) = \begin{cases}
0 & \text{if } x<\bar{x}\\
1 & \text{if } x>\bar{x}.
\end{cases}
\]
Moreover, we may assume that $k\geqslant3$ since the cases $k=1$ (piecewise constant reconstruction) and $k=2$ (piecewise linear reconstruction) are immediate.
Let $S = \{s_i^k, \dots, s_i^k+k-1\}$ denote the ENO reconstruction stencil in cell $i$ and let $I=\bigcup_{j\in S} I_j$. We can write $p_i = q + G$, where $q$ and $G$ are $(k-1)$th order polynomials which interpolate $(\avg{w}_j)_{j\in S}$ and $(\avg{H}_j)_{j\in S}$, respectively. Since $w$ is Lipschitz continuous we have $|[\avg{w}_j,\dots,\avg{w}_{j+\ell}]|\leqslant C \mathcal{D}x^{-\ell}$ for all $j,\ell$, so from \eqref{eq:enoexpression} we get
\begin{equation}\label{eq:qlipschitz}
\left\|\frac{dq}{dx}\right\|_{L^\infty(I)} \leqslant C
\end{equation}
for some $C$ independent of $\mathcal{D}x$.
Since $G$ interpolates $(\avg{H}_j)_{j\in S}$, there is at least one point $y_j \in I_j$ for every $j\in S$, $j\neq i$ such that
\[
G(y_j) = H(y_j) = \begin{cases}
0 & \text{if } j<i \\
1 & \text{if } j>i.
\end{cases}
\]
If there is more than one such root in cell $I_j$ we select the root $y_j$ which is closest to $\bar{x}$. By Rolle's theorem, the function $\frac{dG}{dx}$ has a zero in every interval of the form
\begin{equation}\label{eq:rolleintervals}
\begin{split}
(y_{j-1},y_j) \qquad &\text{for } s_i < j < i \\
(y_j, y_{j+1}) \qquad &\text{for } i < j < s_i+k-1.
\end{split}
\end{equation}
Note that cell $I_i$ intersects none of the above intervals. We will show that $\frac{dG}{dx}$ cannot have a zero in $(y_{i-1},y_{i+1})\supset I_i$. Choosing $\mathcal{D}x$ small enough and using \eqref{eq:qlipschitz}, we can then conclude that also $p_i=q+G$ must be monotone in $I_i$.
We divide into two cases:
\noindent
\textbf{Case 1:} $s_i \in \{i, i-k+1\}$, i.e.\ there are no cells in the stencil either to the left or to the right of $I_i$. In this case there are exactly $k-2$ intervals of the form \eqref{eq:rolleintervals}. Since the $(k-2)$th order polynomial $\frac{dG}{dx}$ can have at most $k-2$ zeros, it cannot have another zero in $I_i$.
\noindent
\textbf{Case 2:} $s_i \notin \{i, i-k+1\}$. In this case there are exactly $k-3$ intervals of the form \eqref{eq:rolleintervals}. From the jump expression \eqref{eq:jmpexpr} and the sign property (see Section \ref{sec:signprop}), we get
\begin{align*}
(p_{i+1}-p_i)(x_{i+\hf}) &= \sum_{s=s_i}^{s_{i+1}-1}[\avg{v}_s,\dots,\avg{v}_{s+k}]X_{i,s} \\*
&\geqslant \sum_{s=s_i}^{s_{i+1}-1}|[\avg{v}_s,\dots,\avg{v}_{s+k}]|\mathcal{D}x^k \\*
&\geqslant b_{i+\hf}
\end{align*}
for some $b_{i+\hf} > 0$ independent of $\mathcal{D}x$. (Here, we have used the fact that $[\avg{v}_s,\dots,\avg{v}_{s+k}] \sim \mathcal{D}x^{-k}$ for all $s\in\{i-k,\dots,i\}$). Similarly, we get
\[
(p_i-p_{i-1})(x_{i-\hf}) \geqslant b_{i-\hf}
\]
for some $b_{i-\hf} > 0$ independent of $\mathcal{D}x$. Thus,
\[
\begin{cases}
G(x_{i-\hf}) = (p_i-q)(x_{i-\hf}) \geqslant b_{i-\hf} + (p_{i-1}-q)(x_{i-\hf}) = b_{i-\hf} + O(\mathcal{D}x), \\
G(x_{i+\hf}) = (p_i-q)(x_{i+\hf}) \leqslant -b_{i+\hf} + (p_{i+1}-q)(x_{i+\hf}) = 1-b_{i+\hf} + O(\mathcal{D}x).
\end{cases}
\]
Choosing $\mathcal{D}x$ small enough that the ``$O(\mathcal{D}x)$'' terms are smaller than $b_{i\pm\hf}$, we find that
\[
G(y_{i-1}) = 0, \qquad G(x_{i-\hf})>0, \qquad G(x_{i+\hf})<1, \qquad G(y_{i+1})=1,
\]
and hence,
\[
\frac{dG}{dx}(y_{i-1}) \geqslant 0, \qquad \frac{dG}{dx}(y_{i+1}) \geqslant 0.
\]
Thus, if $\frac{dG}{dx}$ has a zero in $(y_{i-1},y_{i+1})$, there must be at least two of them (or one zero with multiplicity at least 2). But the $(k-2)$th order polynomial $\frac{dG}{dx}$ already has $k-3$ zeros in the intervals \eqref{eq:rolleintervals}, so it cannot any zeros in $(y_{i-1},y_{i+1}).$
\end{proof}
\subsubsection{Essentially non-oscillatory}
The ``essentially non-oscillatory'' property, from which ENO derives its name, can be roughly stated as follows: Up to a term of order $\mathcal{D}x^{k}$, the total variation of the ENO reconstruction $p$ is less than that of $v$. As with the monotonicity property, Harten et al.\ \cite{HEOC86} proved this only for the primitives $P$, $V$, not for the reconstruction $p$ itself. However, with Proposition \ref{prop:monotonicity} in place we can establish this result also for $p$.
\begin{theorem}
Assume that $v$ is piecewise $C^\infty$ with finitely many jump discontinuities. Then for sufficiently small $\mathcal{D}x$, there exists a function $z=z(x)$ such that
\[
z(x) = p(x) + O(\mathcal{D}x^{k})\ \forall\ x, \qquad {\rm TV}(z) \leqslant {\rm TV}(v),
\]
where $p=\mathcal{R}(\avg{v})$ is the ENO reconstruction of $v$.
\end{theorem}
\begin{proof}
Let $\mathcal{D}x$ be sufficiently small that $p(x) = v(x) + O(\mathcal{D}x^k)$ in all non-shocked cells (see Section \ref{sec:uniformaccshock}). Decrease $\mathcal{D}x$ further such that $p$ is monotone in all shocked cells (see Section \ref{sec:monotonicity}). We choose $z(x)=v(x)$ in non-shocked cells, and $z(x)=p(x)$ in shocked cells. After an $O(\mathcal{D}x^{k})$ modification near the interfaces $x_{i+\hf}$ between shocked and non-shocked cells, the sign property implies that the total variation does not increase at these points.
\end{proof}
\begin{remark}
Although the above theorem says nothing about ${\rm TV}(p)$, it may be shown that ${\rm TV}(p) \leqslant {\rm TV}(v) + O(\mathcal{D}x^k)$ for sufficiently small $\mathcal{D}x$.
\end{remark}
\subsection{ENO deficiencies}\label{sec:enodeficiency}
Despite satisfying numerous stability properties, the ENO reconstruction method suffers from some deficiencies which makes it less attractive for certain applications such as numerical methods for linear conservation laws.
\subsubsection{$\mathcal{R}$ is discontinuous}
The ENO reconstruction $\mathcal{R} : (\avg{v}_i)_{i\in\mathbb{Z}} \to p$ is discontinuous, in the sense that a small change in $\avg{v}_i$ (such as round-off errors) can change the switch in the ENO stencil selection procedure, thus producing a different reconstruction $p_j$. Although this stencil switching might not be a problem in practice, the discontinuous nature of ENO-based methods makes their analysis significantly more difficult.
\subsubsection{Inefficient use of information}
Although the final ENO reconstruction $p_i$ in a cell only relies on $k$ values, the ENO stencil selection procedure depends on all $2k-1$ neighboring points. This is an inefficient use of information; using all $2k-1$ points would potentially give up to $(2k-1)$th order accuracy in smooth parts of the solution. This situation is exacerbated in multiple dimensions.
The WENO method uses a much more compact interpolation stencil and might therefore be more suitable for multi-dimensional problems.
\subsubsection{Instabilities in linear problems}
\begin{figure}
\caption{Fourth-order divided differences (top row) and stencil offset $r_i$ (bottom row) at $t=0$ (left), $t\approx 0.02$ (middle) and $t\approx 0.04$ (right).}
\label{fig:sin4}
\end{figure}
Rogerson and Meiburg \cite{RM90} reported on a series of numerical experiments with an ENO-based fourth-order finite difference schemes for the periodic linear advection equation
\[
\partial_t u + \partial_x u = 0, \qquad x\in[-\pi,\pi).
\]
They observed the expected fourth-order convergence rate with $u_0(x) = \sin(x)$, but with $u_0(x)=\sin(x)^4$ they observed a decay in the convergence rate at moderately high values of $N$, the number of meshpoints.
We approximate the above initial value problem using a Godunov-type finite volume scheme with fourth-order ENO reconstruction and a fourth-order Runge-Kutta time integrator. (Rogerson and Meiburg computed with the so-called ENO-Roe method \cite{SO89}, but the problem persists in other variants of ENO method and hence seems to be inherent to the ENO reconstruction procedure.) Figure \ref{fig:sin4} (top row) shows the fourth-order divided difference $[\avg{v}_i,\dots,\avg{v}_{i+4}]$ at various times. High-frequency oscillations appear quickly at the critical points $x=0$, $x=\pm\frac{\pi}{2}$, $x=\pm\pi$, and over time these oscillations propagate into the lower-order divided differences, finally polluting the solution $\avg{v}_i$. The oscillations near $x=\pm\frac{\pi}{2}$ stay bounded, whereas the oscillations near $x=0$, $x=\pm\pi$ grow unboundedly.
Figure \ref{fig:sin4} (bottom row) shows the \textit{stencil offset} $r_i^k = i -s_i^k \in \{0,\dots,k-1\}$. (Recall from Section \ref{sec:disccelledges} that every interface $x_{i+\hf}$ where $r_{i+1}\geqslant r_i$ will have a discontinuity in the reconstruction, which might lead to larger truncation errors.) Near the oscillatory points $x=0$, $x=\pm\pi$, the ENO method selects the stencils $r_i^3=0$ and $r_i^3 = 3$. Rogerson and Meiburg \cite{RM90} call these stencils \textit{linearly unstable}: setting $r_i^3 \equiv 0$ or $\equiv3$ for all $i$ will give an unconditionally unstable, divergent scheme, whereas $r_i\equiv1$ or $\equiv2$ gives a stable, convergent scheme. Although this heuristic explanation might very well be the root of the problem, the nonlinear nature of ENO makes this problem very hard to analyze rigorously. Further discussion can be found in \cite[Section 5]{AL93}, \cite{Har87b} and \cite{Shu90}. We mention in closing that the WENO method does not exhibit these instabilities for this particular problem \cite{Shu97}.
\section{Summary}
The ENO method has been enormously influential in the numerics community for hyperbolic conservation laws. Despite of its highly nonlinear (even discontinuous) nature, it yields expressions and formulas which are rather easy to analyze, and enjoys several surprising properties such as the non-oscillatory property, the sign property and upper bounds on discontinuities. As discussed in Section \ref{sec:enodeficiency}, certain ENO-based finite volume methods suffer from instabilities which prevent convergence. A rigorous analysis of this problem would be highly interesting (not to mention difficult), and might lead to provably stable ENO-type methods.
\end{document} |
\begin{document}
\title{One-dimensional symmetry for solutions of Allen Cahn fully nonlinear equations.}
\begin{abstract}
This article presents some qualitative results for solutions of the fully nonlinear elliptic equation
$F(\nabla u, D^2 u) + f(u)=0$ in ${\rm I}\!{\rm R}^N$. Precisely under some additional assumptions on $f$, if $-1\leq u\leq 1$ and $\lim _{x_1\rightarrow \pm \infty} u(x_1, x^\prime) = \pm 1$ uniformly with respect to $x^\prime$, then the solution depends only on $x_1$. \end{abstract}
\section{Introduction}
The sliding method was crystalized in \cite{BN} by Berestycki and Nirenberg in order to prove monotonicity of solutions of
\begin{equation}\label{eqq1} \Delta u +f(u)=0\quad \mbox{ in } {\cal O}mega\subset{\rm I}\!{\rm R}^N.
\end{equation}
This powerful method uses two features of the Laplacian, comparison principle and invariance with respect to translation. The idea is: Fix a direction;
first slide in that direction enough for the intersection of the slided domain with ${\cal O}mega$ to be small enough or "narrow enough" for the maximum principle to hold. This allows to compare the value of the solution at different points of the domain. Then continue "sliding" until reaching a critical position.
Coupling simplicity with ductility, the sliding method of \cite{BN} has been incredibly influential, it is possible to count over two hundred citations of the work (e.g. through google scholar). We shall here only recall the work by Berestycki, Hamel and Monneau \cite{BHM} where the technic is used to prove the so called Gibbons conjecture . This was simultaneously and independently solved by Barlow, Bass and Gui \cite{BBG} and Farina \cite{F}. Precisely in \cite{BHM}, they prove that if $f$ is a $C^1([-1,1])$ function decreasing near $-1$ and $1$, with $f(-1)=f(1)=0$ (typically, $f(u)=u-u^3$) then the solutions of
(\ref{eqq1}) in ${\rm I}\!{\rm R}^N$ that converge uniformly to 1 or -1 at infinity in some fixed direction, say $x_1$, are in fact one dimensional i.e functions of $x_1$ alone.
In \cite{BHM}, the sliding method is coupled with a maximum principle (comparison principle) in unbounded domains contained in some cone.
As is well known the Gibbons conjecture is a weak form of the famous De Giorgi's conjecture which states that for $f(u)=u-u^3$, the level sets of monotone, entire solutions of (\ref{eqq1}) are hyperplanes for $N\leq 8$. This result has been proved in dimension 2 and 3 respectively by Ghoussoub and Gui \cite{GG} and by Ambrosio, Cabr\'e \cite{AC}, while Del Pino, Kowalcyk and Wei \cite{DKW} have proved that it does not hold for $N>8$ by constructing a counter example.
Savin has proved the case $4\leq N\leq 8$, with the further condition that
the limit be $\pm 1$ in a direction at infinity, in that case this condition is not assumed to be uniform with respect to the other variables. See also \cite{VSS} for analogous results concerning the $p$-Laplacian.
In the present note we extend Gibbons conjecture to fully nonlinear operators. Precisely, we consider entire bounded solutions of
\begin{equation}\label{eq1}
F(\nabla u, D^2 u) + f(u)=0\quad \mbox{in}\quad {\rm I}\!{\rm R}^N,
\end{equation}
where $F(\nabla u, D^2 u):=|\nabla u|^\alpha \tilde F(D^2u)$ with $\alpha>-1$ and $\tilde F$ is uniformly elliptic.
With the same conditions on the nonlinearities of $f$ as in \cite{BHM}, we prove that for any solution such that
$\lim _{x_1\rightarrow \pm \infty} u(x_1, x^\prime) = \pm 1$ uniformly with respect to $x^\prime$ and such that $|\nabla u |>0$ in $R^N$ then $\partial_{x_1} u\geq 0$ and
$u$ is a function of $x_1$ alone.
Many remarks are in order. Let us note that in the case $\alpha\leq 0$, some recent regularity results \cite{BD9} prove that locally Lipschitz solutions are in fact ${\cal C}^{1, \beta}$ for some $\beta <1$, and this regularity is sufficient to prove the results enclosed here. For $\alpha>0$ the ${\cal C}^{1}$ regularity is a consequence of the hypothesis on the positivity of the norm of the gradient.
A key ingredient in the proof of this result, which is of independent interest, is the following, strong comparison principle.
\begin{prop}\label{strict0}
Suppose that ${\cal O}mega$ is some open set, and $x_o, r$ such that $B(x_o, r) \subset {\cal O}mega$.
Suppose that $f$ is ${\cal C}^1$ on ${\rm I}\!{\rm R}$ , and that $u$ and $v$ are, respectively, ${\cal C}^1$ bounded sub- and super-solutions of
$$F(\nabla w, D^2 w)+ f(w) =0\quad \mbox{in}\quad {\cal O}mega$$
such that $u\geq v$ and $\nabla v \neq 0$ (or $\nabla u\neq 0$) in $B(x_o, r)$, then,
either $u> v$ or $u\equiv v$ in $B(x_o, r)$.
\end{prop}
Observe that the condition that the gradient needs to be different from zero cannot be removed. Indeed, for any $m,k\in Z$ with $k\leq m$ the functions
$$u_{k,m}(x)=\left\{\begin{array}{lc}
1 & \mbox{for}\ x_1\geq (2m+2)\pi\\
\cos x_1 & \mbox{for}\ (2k+1)\pi\leq x_1\leq (2m+2)\pi\\
-1 & \mbox{for}\ x_1\leq (2k+1)\pi
\end{array}
\right.
$$
are viscosity solutions of
$$|\nabla u|^2(\Delta u)+(u-u^3)=0,$$ and they are ${\cal C}^{1,\beta}$ for all $\beta <1$.
Observe that e.g. $u_{0,0}\geq u_{0,i}$ for all $i\geq 1$ and $u_{0,0}(2\pi,y)=u_{0,i}(2\pi,y)$ but the functions don't coincide.
This example suggests that there may be solutions that are not one dimensional if the condition on the gradient is removed.
When $\alpha=0$, De Silva and Savin in \cite{DSS}, have proved the analogue of De Giorgi's conjecture for uniformly elliptic operators in dimension 2. With $f$ as above, they prove that if there exists a one dimensional monotone solution i.e. $g:{\rm I}\!{\rm R}\mapsto [-1.1]$ such that $u(x)=g(\eta\cdot x)$ is a solution of
\begin{equation}\label{eqq2}
\tilde F(D^2u)+f(u)=0\quad\mbox{in}\quad{\rm I}\!{\rm R}^2
\end{equation}
satisfying $\lim_{t\rightarrow\pm\infty}g(t)=\pm1$
then, all monotone bounded solutions of (\ref{eqq2}) are one dimensional, i.e. their level sets are straight lines.
Let us mention that without any further assumptions on $f$ solutions may not exists. Indeed, let $\tilde F(D^2u)= {\cal M}^+_{a,A} (D^2 u)$ where for any symmetric matrix $M$ with eigenvalues $e_i$,
$${\cal M}^+_{a,A}(M)=a\sum_{e_i<0} e_i +A\sum_{e_i>0} e_i.$$
Then, as shown in the last section, for $a<A$ there are no one dimensional solutions of
$$ {\cal M}^+_{a,A} (D^2 u)+u-u^3=0,$$
that satisfy the asymptotic conditions.
In that section we study conditions on $f$ that guarantee existence of solutions of the ODE
$$ |u^\prime |^\alpha {\cal M}^+_{a,A} (u^{\prime\prime})+f(u)=0$$
that satisfy $\lim_{x\rightarrow\pm\infty}u(x)=\pm 1$.
While completing this work, we have received a paper by Farina and Valdinoci, \cite{FV}, who treats Gibbons conjecture in a very general setting that includes the case $\alpha = 0$.
\section{ Assumptions and known results}
In the whole paper we shall suppose the following hypotheses on the operator $F$.
Let $S$ be the set of $N\times N$ symmetric matrices, and let $\alpha >-1$. Then $F$ is defined on ${\rm I}\!{\rm R}^N\setminus\{0\}\times S $ by
\begin{equation}\label{deff}
F( p, M) = |p|^\alpha \tilde F( M),
\end{equation}
where $\tilde F$ satisfies
$$\tilde F(tM)=t\tilde F(M)\quad\mbox{ for any }\quad t\in {\rm I}\!{\rm R}^+, M\in S,$$
and there exist $A\geq a>0$ such that for any $M$ and any $N\in S$ such that $N\geq 0$
\begin{equation}\label{eqaAF}
a tr(N)\leq \tilde F(M+N)-\tilde F(M) \leq A tr(N).
\end{equation}
\begin{exe}
1) Let $0< a < A$ and ${\cal M}_{a,A}^+(M)$ be the Pucci's operator
${\cal M}_{a,A}^+ (M) = Atr(M^+)-a tr(M^-)$ where $M^\pm$ are the positive and negative part of $M$, and
${\cal M}_{a,A}^-(M)=- {\cal M}_{a,A}^+ (-M)$.
Then $F$ defined as
$$F( p,M) = |p|^\alpha {\cal M}_{a,A}^\pm (M) $$
satisfies the assumptions.
2) Let $B$ be a symmetric positive definite matrix then $F( p,M) = |p|^\alpha(tr(BM) )$, is another example of operator satisfying the assumptions.
\end{exe}
We now recall what we mean by viscosity solutions in our context :
\begin{defi}\label{def1}
Let ${\cal O}mega$ be a bounded domain in
${\rm I}\!{\rm R}^N$, let $g$ be a continuous function on ${\cal O}mega\times {\rm I}\!{\rm R}$, then
$v$, continuous on $\overline{{\cal O}mega}$ is called a viscosity super-solution (respectively sub-solution)
of
$F(\nabla u,D^2u)=g(x,u)$ if for all $x_0\in {\cal O}mega$,
-Either there exists an open ball $B(x_0,\delta)$, $\delta>0$ in ${\cal O}mega$
on which
$v$ is a constant $ c
$ and
$0\leq g(x,c)$, for all $x\in B(x_0,\delta)$
(respectively
$0\geq g(x, c)$ for all $x\in B(x_0,\delta)$)
-Or
$\forall \varphi\in {\mathcal C}^2({\cal O}mega)$, such that
$v-\varphi$ has a local minimum (respectively local maximum) at $x_0$ and $\nabla\varphi(x_0)\neq
0$, one has
$$
F( \nabla\varphi(x_0),
D^2\varphi(x_0))\leq g(x_0,v(x_0)).
$$
(respectively
$$F( \nabla\varphi(x_0),
D^2\varphi(x_0))\geq g(x_0,v(x_0))).$$
A viscosity solution is a function which is both a super-solution and a sub-solution.
\end{defi}
\begin{rema}
When $F$ is continuous in $p$, and $F(0,0)=0$, this definition is equivalent to the classical definition
of viscosity solutions, as in the User's guide \cite{CIL}.
\end{rema}
We now give a definition that will be needed in the statement of our main theorem.
\begin{defi}\label{ddd} We shall say that $|\nabla u|\geq m>0$ in ${\cal O}mega$ in the viscosity sense, if for all $ \varphi\in {\mathcal C}^2({\cal O}mega)$, such that
$u-\varphi$ has a local minimum or a local maximum at some $x_0\in{\cal O}mega$,
$$|\nabla\varphi(x_0)|\geq m.$$
\end{defi}
In our context, since the solutions considered have their gradient different from zero everywhere, the viscosity solutions can be intended in the classical meaning.
We begin to recall some of the results obtained in \cite{BD5} which will be needed in this article.
\begin{theo}\label{thcomp1}
Suppose that $c$ is a continuous and bounded function satisfying $c\leq 0$.
Suppose that $f_1$ and $f_2$ are continuous and bounded and that
$u$ and $v$ satisfy
\begin{eqnarray*}
F( \nabla u, D^2 u)+c(x)|u|^\alpha u& \geq & f_1\quad
\mbox{in}\quad {\cal O}mega, \\
F( \nabla v,D^2 v)
+ c(x) |v|^{\alpha}v & \leq & f_2 \quad \mbox{in}\quad
{\cal O}mega , \\
u \leq v && \quad \mbox{on}\quad \partial{\cal O}mega.
\end{eqnarray*}
If $f_2< f_1$ then $u \leq v$ in ${\cal O}mega$.
Furthermore, if $c<0$ in ${\cal O}mega$ and $f_2\leq f_1$ then $u \leq v$ in ${\cal O}mega$.
\end{theo}
\begin{prop}\label{remhopf}
Suppose that ${\cal O}$ is a smooth bounded domain. Let
$u$ be a solution of
\begin{equation}\label{123}
F( \nabla u, D^2 u) \leq 0
\quad \mbox{in }\quad {\cal O}. \end{equation}
If there exists some constant $c_o$, such that $u>c_o$ inside ${\cal O}$ and $u(\bar x)=c_o$ with $\bar x\in \partial {\cal O}$, then
$$\liminf_{t\rightarrow 0^+} \frac{u(\bar x-t\vec n)-u(\bar x)}{t}>0,$$
where $\vec n$ is the outer normal to $\partial {\cal O}$ at $\bar x$.
\end{prop}
\begin{rema} \label{remrem} In particular Proposition \ref{remhopf} implies that a non constant super-solution of (\ref{123}) in a domain ${\cal O}mega$ has no interior minimum.
If $c_o = 0$, the result can be extended in the following manner : Suppose that $\beta\geq \alpha $, that $c$ is continuous and bounded, and $u$ is a nonnegative solution of
$$F(\nabla u, D^2 u) + c(x) u^{1+\beta} \leq 0$$
then either $u\equiv 0$ or $u>0$ in ${\cal O}mega$. In that last case, if $u = 0$ on some point $x_o\in \partial {\cal O}mega$, then $\partial_{\vec n} u(x_o)>0$.
\end{rema}
We now recall the regularity results obtained in \cite{BD9}.
\begin{theo}\label{reg}
Suppose that ${\cal O}mega$ is a bounded ${\cal C}^2$ domain and
$\alpha \leq 0$. Suppose that $ g$ is continuous on ${\cal O}mega \times {\rm I}\!{\rm R}$ . Then the bounded solutions of
\begin{equation} \label{012}\left\{ \begin{array}{lc}
F(\nabla u, D^2 u) = g(x, u(x)) &\mbox{ in }\ {\cal O}mega,\\
u = 0 &\ \mbox{ on } \ \partial {\cal O}mega,
\end{array}\right.
\end{equation}
satisfy
$u\in {\cal C}^{1,\beta}(\overline{{\cal O}mega})$, for some $\beta \in (0, 1)$ .
Furthermore if ${\cal O}mega$ is a domain (possibly unbounded) of ${\rm I}\!{\rm R}^N$ and if $u$ is bounded and locally Lipschitz then $u\in {\cal C}_{loc}^{1,\beta}({\cal O}mega)$ for some $\beta\in (0,1)$.
\end{theo}
When $\alpha>0$, ${\cal C}^1$ regularity results are not known except for the one dimensional case or the radial case, however here, since the solutions that we consider have the gradient bounded away from zero, this regularity is just a consequence of classical results and a priori estimates.
Indeed next theorem is just an application of Theorem 1.2 of \cite{CDV}, which in turn is the extension of Caffarelli's classical result:
\begin{theo}\label{IA}
Suppose that ${\cal O}mega$ is a (possibly unbounded) domain, and that $g$ is ${\cal C}^1$ and bounded. Let $u$ be a bounded solution of
\begin{equation}\label{1a}
F(\nabla u, D^2 u) = g(u)\ \mbox{ in }\ {\cal O}mega.
\end{equation}
If $|\nabla u|\geq m>0$ in ${\cal O}mega$ in the sense of Definition \ref{ddd}, there exists $\beta\in (0,1)$ and $C=C(a,A,N,|g(u)|_\infty, m)$ such that
if $B(y, \rho) \subset {\cal O}mega$,
\begin{equation} \label{eqholdloc} \|u\|_{{\cal C}^{1,\beta}(B(y, \frac{\rho}{2}))}\leq C \sup_ {B(y, \rho)}|u|.
\end{equation}
\end{theo}
{\em Proof.} We introduce the operator:
$$G(v,\nabla v, D^2v):=\tilde F(D^2v)-g(v)\sup\left(|\nabla v|,\frac{m}{2}\right)^{-\alpha}.$$
If $u$ is a solution of (\ref{1a}) such that in the viscosity sense $|\nabla u|\geq m>0$, then it is a solution of
$$ G(u,\nabla u, D^2 u)=0\quad \mbox{in}\quad {\cal O}mega.$$
Indeed, e.g. if $\varphi\in {\cal C}^2$ is such that
$(u-\varphi)(x)\geq (u-\varphi)(\bar x)$ for some $\bar x\in{\cal O}mega$, then $|\nabla \varphi |(\bar x)\geq m$ and
$$|\nabla \varphi|^\alpha(\bar x) \tilde F(D^2\varphi(\bar x))\geq g(u(\bar x)){\rm I}\!{\rm R}ightarrow \tilde F(D^2\varphi (\bar x))- |\nabla \varphi (\bar x)|^{-\alpha} g(u(\bar x))\geq 0.$$
In order to apply Theorem 1.2 of \cite{CDV}, it is enough to remark that, $G$ does not depend on $x$ and therefore the condition on the modulus of continuity is automatically satisfied.
Furthermore,
the dependence on the gradient is Lipschitz, where the Lipschitz constant depends on $m$ and $|g(u)|_\infty$.
Applying Theorem 1.2 of \cite{CDV} we have obtained the above estimate and
$u\in C^{1,\beta}({\cal O}mega)$. This ends the proof.
\section{Comparison principles}
As mentioned in the introduction, we begin by proving a strong comparison principle, that extends the one obtained in \cite{BD9}.
\begin{prop}\label{strict}
Suppose that ${\cal O}mega$ is some open subset of ${\rm I}\!{\rm R}^N$,
$f$ is ${\cal C}^1$ on ${\rm I}\!{\rm R}$ . Let $u$ and $v$ be ${\cal C}^1$ bounded sub-solution and super-solution of
$$F(\nabla u, D^2 u)+ f(u) =0\quad \mbox{in}\ {\cal O}mega.$$
Suppose that ${\cal O}$ is some connected subset of ${\cal O}mega$, with $u\geq v$ and $\nabla v \neq 0$ (or $\nabla u\neq 0$) on ${\cal O}$ , then
either $u> v$ or $u\equiv v$ in ${\cal O}$.
\end{prop}
\begin{rema} Of course when $\alpha=0$ the strong comparison principle is classical and holds without requiring that the gradient be different from zero.
\end{rema}
\noindent{\em Proof of Proposition \ref{strict}.}
We write the proof in the case $\alpha< 0$, the changes to bring when $\alpha>0$ being obvious.
We argue as in \cite {BD9}. Suppose that $x_o$ is some point where $u(x_o)>v(x_o)$ (if such point doesn't exist we have nothing to prove).
Suppose by contradiction that there exists some point $x_1$ such that $u(x_1) = v(x_1)$. It is clear that it can be chosen in such a way that, for $R = |x_1-x_o|$,
$u>v$ in $B(x_o, R)$ and $x_1$ is the only point in the closure of that ball on which $u$ and $v$ coincide.
Without loss of generality, one can assume that $B(x_o, {3R\over 2}) \subset {\cal O}$.
We can assume without loss of generality that $v$ is the function whose gradient is bounded away from zero.
Let then $L_1 = \inf_{B(x_o, {3R\over 2})} |\nabla v|>0$, $L_2 = \sup_{B(x_o, {3R\over 2})} |\nabla v|$.
We will prove that there exist two constants $c>0$ and $\delta >0 $ such that
$$ u \geq v + \delta ( e^{-c|x-x_o|}- e^{-3cR\over 2})\equiv v+ w\quad \mbox{in}\quad {R\over 2}\leq |x-x_o| = r\leq {3R\over 2}.$$
This will contradict the fact that $u(x_1) = v(x_1)$.
Let $\delta \leq \displaystyle \min_{|x-x_o|= {R\over 2} }(u-v)$, so that
$$u\geq v+w\quad \mbox{on} \quad
\partial\left(B(x_o, {3R\over 2})\setminus \overline{B(x_o, {R\over 2}})\right).$$
Define
$$\gamma(x) = \left\{ \begin{array}{lc}
{f(u(x))-f(v(x))\over u(x)-v(x)} &\mbox{ if } \ u(x) \neq v(x)\\
f^\prime (u(x)) & \mbox{ if } \ u(x) = v(x).
\end{array}\right.$$
Since $f$ is ${\cal C}^1$ and the functions $u$ and $v$ are bounded, $\gamma$ is continuous and bounded.
We write
$$
f(u) =\gamma (x) (u-v) + f(v), $$
$$ F(\nabla u, D^2 u) -(|\gamma |_\infty+1) (u-v) = -f(v)+ (-\gamma-|\gamma|_\infty-1) (u-v)\leq F(\nabla v, D^2 v). $$
We shall prove that, for $c$ chosen conveniently,
$$F(\nabla v, D^2 v) < F(\nabla (v+ w), D^2(v+ w)) -(|\gamma|_\infty+1)
w,$$
this will imply that
$$F(\nabla u, D^2 u)-(|\gamma|_\infty+1) u \leq F(\nabla (v+ w), D^2(v+ w)) -(|\gamma|_\infty+1) (v+ w).$$
Let $\varphi$ be some test function for $v$ from above,
a simple calculation on $w$ implies that, if $c \geq {1\over a}({2(2A(N-1) \over R} )$ then
\begin{eqnarray*}
|\nabla \varphi+ \nabla w|^\alpha &\cdot&\tilde F (x, D^2 \varphi+ D^2 w)-(|\gamma|_\infty+1) w\\
&\geq& |\nabla \varphi+ \nabla w|^\alpha \tilde F (x,D^2 \varphi) + |\nabla \varphi+ \nabla w|^\alpha {\cal M}^- (D^2 w)-(|\gamma|_\infty+1) w \\
&\geq& |\nabla \varphi+ \nabla w|^\alpha { F(\nabla \varphi, D^2\varphi)\over |\nabla \varphi|^\alpha} +\\
&& + |\nabla \varphi+ \nabla w|^\alpha\frac{ac^2}{2} \delta e^{-cr} -(|\gamma|_\infty+1) \delta e^{-cr}.
\end{eqnarray*}
We also impose $\delta < {R L_1e\over 16 }$ so that $|\nabla w|\leq {|\nabla \varphi]\over 8}$; then
the inequalities
$$||\nabla \varphi+\nabla w|^\alpha-|\nabla \varphi|^\alpha |\leq |\alpha | |\nabla w|| \nabla \varphi|^{\alpha-1}\left( {1\over 2}\right)^{\alpha-1}\leq {|\nabla \varphi|^\alpha \over 2}$$
imply that
$$
|\nabla \varphi+ \nabla w|^\alpha \left(\tilde F(x,D^2 \varphi+ D^2 w)\right)
\geq -f(v) -| f(v)|_\infty|\nabla\varphi|^{-1}|\alpha| 2^{1-\alpha}c\delta e^{-cr} +L_2^\alpha {ac^2\over 4} \delta e^{-cr}.
$$
It is now enough to choose
$$c\geq {4A(N-1) \over R} + {|\alpha | |f(v)|_\infty 2^{2-\alpha} \over a L_2^{1+\alpha}}+ \left({16(|\gamma |_\infty+1)\over a L_2^\alpha} \right)^{1\over 2}$$
to finally obtain
$$
|\nabla \varphi+ \nabla w|^\alpha \tilde F(x,D^2 \varphi+ D^2 w)-(|\gamma|_\infty+1 ) w\geq f(v)+{a c^2\delta L_2^\alpha e^{-cr}\over 8}-(|\gamma|_\infty+1) \delta e^{-cr}
$$
i.e.
$$F(x, \nabla (v+ w), D^2 (v+ w)) -(|\gamma|_\infty+1) w> F(x,\nabla v,D^2 v).$$
Hence the comparison principle, Theorem \ref{thcomp1}, gives that
$$ u\geq v+w\quad\mbox{in} \ B(x_o,\frac{3R}{2})\setminus \overline{ B(x_o,\frac{R}{2})},$$
the desired contradiction. This ends the proof of Proposition \ref{strict}.
From now $f$ will denote a ${\cal C}^{1}$ function defined on $[-1,1]$, such that $f(-1) = f(1) =0$,
and nonincreasing on the set $[-1, -1+\delta]\cup [1-\delta, 1]$ for some $\delta\in ]0,1[$.
Next is a comparison principle in unbounded domains that are "strip" like.
\begin{prop}\label{propcomp}
Suppose that $u$ and $v$ are ${\cal C}^1$, have values in $[-1,1]$ and are respectively sub and super solutions of
$$F(\nabla w,D^2 w) + f(w) = 0\ \mbox{ in } \ {\rm I}\!{\rm R}^N$$
with $F(\nabla u,D^2 u) \in L^\infty$, $F(\nabla v,D^2 v)\in L^\infty$.
If $b, c\in {\rm I}\!{\rm R}$ are such that $b < c$, ${\cal O}mega = [b,c]\times {\rm I}\!{\rm R}^{N-1}$,
$|\nabla u|$ and $|\nabla v|\geq m>0$ and
either $u\leq -1+\delta$ or $v\geq 1-\delta$ in ${\cal O}mega$,
then
$$u-v\leq \sup_{\partial {\cal O}mega } (u-v)^+.$$
\end{prop}
Proof of Proposition \ref{propcomp}.
Without loss of generality $f$ can be extended outside of $[-1,1]$ in order that $f$ be still ${\cal C}^1$ , bounded, and nonincreasing after $1-\delta $ and before $-1+ \delta$. Suppose, to fix the ideas, that $v\geq 1-\delta$ in ${\cal O}mega$.
We can also assume that $u\leq v$ on $\partial {\cal O}mega$. Indeed, since
$f$ is decreasing after $1-\delta$, $w= v+ \sup_{\partial {\cal O}mega } (u-v)^+$ is a super-solution which satisfies $F(\nabla w ,D^2 w) \in L^\infty$. Suppose by contradiction that $\sup_{\cal O}mega (u-v) = \lambda$
for some $\lambda >0$.
By definition of the supremum, there exists some sequence $(x^k)_k$ such that $ (u-v) (x^k)\rightarrow \lambda$. Eventually extracting from $(x^k)_k$ a subsequence, still denoted $(x^k)_k$, we have $x_1^k\rightarrow \bar x_1\in [b,c]$. For any $x=(x_1,x^\prime)$ let
$$u^k(x_1, x^\prime) = u(x_1, x^\prime + (x^\prime)^k)$$
and
$$v^k(x_1, x^\prime) = v(x_1, x^\prime + (x^\prime)^k).$$
By the uniform estimates \ref{eqholdloc} in Theorem \ref{IA} one can extract from $(u^k)_k$ and $(v^k)_k$ some subsequences, denoted in the same way, such that $u^k\rightarrow \bar u$ and $v^k\rightarrow \bar v$ uniformly on every compact set of $[b,c ]\times {\rm I}\!{\rm R}^{N-1}$ and $\bar u$ and $\bar v+ \lambda$ are solutions of
$$F(\nabla \bar u, D^2 \bar u) \geq -f(\bar u),$$
$$F(\nabla(\bar v+\lambda), D^2 (\bar v+\lambda)) \leq -f(\bar v) \leq -f(\bar v+\lambda).$$
Furthermore, $\bar u\leq \bar v+ \lambda$, and
through the uniform convergence on the compact set $[b, c] \times \{0\}^{N-1}$, $\lim_k u^k(\bar x_1, 0) = \lim_k u^k (x_1^k, 0)$ and $\lim_k v^k(\bar x_1, 0) = \lim_k v^k (x_1^k, 0)$. This implies that
\begin{eqnarray*}
\bar u(\bar x_1, 0) &=& \lim_k u(x_1^k, 0+ {x^\prime}^k)\\&=&
\lim_k v( x_1^k, 0+ {x^\prime}^k)+ \lambda =\bar v(\bar x_1, 0)+ \lambda.
\end{eqnarray*}
Now using the fact that $|\nabla u|> m$ and $|\nabla v|> m$ on $[b,c]\times {\rm I}\!{\rm R}^{N-1}$, by passing to the limit one gets that $|\nabla \bar u|\geq m>0$ and $|\nabla \bar v| \geq m$ on that strip, and the strong comparison principle in Proposition \ref{strict}, implies that $\bar u \equiv \bar v+ \lambda$.
On the other hand,
$$u(b, x^\prime + {x^\prime}^k)\leq v(b, x^\prime + {x^\prime}^k)$$ implies, by passing to the limit that
$$\bar u(b, x^\prime )\leq \bar v(b, x^\prime )$$
a contradiction.
\section{Proof of the one dimensionality.}
We now state precisely and prove the main result of this paper:
\begin{theo}\label{th1}
Let $f$ be defined on $[-1,1]$, ${\cal C}^1$ and such that $f$ is nonincreasing near $-1$ and $1$, with $f(-1) = f(1) = 0$.
Let $u$ be a viscosity solution of
$$F(\nabla u, D^2 u) + f(u) = 0\ \mbox{ in } \ {\rm I}\!{\rm R}^N,$$
with values in $[-1,1]$.
Suppose that
$\displaystyle\lim_{x_1\rightarrow
\pm \infty} u(x_1, x^\prime) = \pm 1$, uniformly with respect to $x^\prime $,
and, if $\alpha\neq 0$, suppose that for any $b<c$ there exists $m>0$ such that
$|\nabla u(x)|\geq m>0$ in $[b, c]\times {\rm I}\!{\rm R}^{N-1}$ in the viscosity sense.
\noindent Then $u$ does not depend on $x^\prime$ i.e. $u(x_1, x^\prime ) = v(x_1)$ where
\begin{equation}\label{dim1wholespace}
\left\{ \begin{array}{lc}
F( v^\prime e_1 , v^{\prime\prime}e_1\otimes e_1) + f(v) = 0& \ \mbox{ in } \ {\rm I}\!{\rm R},\\
|v|\leq 1, \ \displaystyle \lim_{x\rightarrow \pm \infty} v = \pm1 & \end{array} \right.
\end{equation}
and $v$ is increasing.
\end{theo}
{\em Proof of Theorem \ref{th1}.} We proceed analogously to the proof given in \cite{BHM}.
First observe that by Theorem \ref{IA} the solution $u$ is in ${\cal C}_{loc}^{1,\beta}({\rm I}\!{\rm R}^N)$, so that the condition on the gradient is pointwise and not only in the viscosity sense.
Let $\delta $ be such that $f$ is nonincreasing on $[-1, -1+\delta]\cup [1-\delta, 1]$. Define
$$\Sigma_M^+:=\{x=(x_1,x')\in{\rm I}\!{\rm R}^N,\ x_1\geq M\}\quad\mbox{
and}\quad \Sigma_M^-:=\{x=(x_1,x')\in{\rm I}\!{\rm R}^N,\ x_1\leq M\}.$$
By the uniform behavior of the solution in the $x_1$ direction, there exists $M_1>0$ such that
$$u(x) \geq 1-\delta \quad \mbox{in}\quad \Sigma^+_{M_1},\quad u(x)\leq -1+\delta \quad \mbox{in}\quad \Sigma^-_{(-M_1)}.$$
Fix any $\nu=(\nu_1,\dots,\nu_n)$ such that $\nu_1>0$ and let
$u_t (x) := u(x+t\vec \nu)$.
\noindent {\bf Claim 1} : For $t$ large enough, $u_t\geq u$ in ${\rm I}\!{\rm R}^N$.
For $x\in\Sigma^+_{(-M_1)}$ and for $t$ large enough, say $t > {2M_1 \over \nu_1}$,
$$ u(x+t \vec\nu ) \geq 1-\delta\quad\mbox{ and }\quad u^t \geq u\quad\mbox{on}\ x_1 = -M_1.
$$
We begin to prove that $u_t\geq u$ in $\Sigma^+_{(-M_1)}$.
\noindent Suppose by contradiction that $\sup_{\Sigma^+_{(-M_1)}}(u-u_t)=m_o>0$.
\noindent Observe that since $\displaystyle \lim_{x_1\rightarrow +\infty} u = \lim_{x_1\rightarrow +\infty} u_t= 1$ uniformly, there exists $M_2$ such that for
$x_1> M_2\geq -M_1$, $|u_t-u|< {m_o\over 2}$. Then $\sup_{\Sigma^+_{(-M_1)}}(u-u_t)=m_o$ is achieved inside
$[-M_1, M_2] \times {\rm I}\!{\rm R}^{N-1}$.
On that strip, by hypothesis, there exists $m>0$ such that $|\nabla u|, |\nabla u_t |\geq m$, and also $u_t\geq 1-\delta$. Then one can apply the strong comparison principle in Proposition \ref{propcomp} with $b = -M_1$ and $c = M_2$ and obtain that
$$u-u_t \leq \sup_{\{x_1 = -M_1\}\cup \{ x_1 = M_2\}} (u-u_t)^+< {m_o\over 2},$$ a contradiction. Finally we have $u\leq u_t$ in $\Sigma^+_{(-M_1)}$.
\noindent We can do the same in $\Sigma^-_{\{-M_1\}}$ by observing that, in that case, $u\leq -1+\delta$.
This ends the proof of Claim 1.
Let $ \tau = \inf\{ t>0,\ \mbox{such that} \ u_t\geq u\in {\rm I}\!{\rm R}^N\}$, by Claim 1, $\tau$ is finite.
\noindent {\bf Claim 2:} $\tau=0$.
To prove this claim, we argue by contradiction, assuming that it is positive.
\noindent We suppose first that
$$\eta := \inf _{ [-M_1, M_1] \times {\rm I}\!{\rm R}^{N-1}} (u_\tau-u)>0,$$ and we prove then that there exists $\varepsilonsilon >0$ such that $u_{\tau-\varepsilonsilon} \geq u$ in ${\rm I}\!{\rm R}^N$. This will contradict the definition of $\tau$ .
By the estimate (\ref{eqholdloc}) in Theorem \ref{IA}, there exists some constant $c>0$ such that for all $\varepsilonsilon >0$
$$ |u_\tau-u_{\tau-\varepsilonsilon}|\leq \varepsilonsilon c. $$
Choosing $\varepsilonsilon$ small enough in order that $ \varepsilonsilon c\leq {\eta\over 2}$ and $\varepsilonsilon < \tau$, one gets that $u_{\tau-\varepsilonsilon}-u \geq 0$ on $\{x_1 = M_1\}$. The same procedure as in Claim 1 proves that the inequality holds in the whole space ${\rm I}\!{\rm R}^N$, a contradiction with the definition of $\tau$.
\noindent Hence
$ \eta = 0$ and there exists a sequence $(x_j)_j\in \left([-M_1, M_1] \times {\rm I}\!{\rm R}^{N-1}\right)^{{\bf N}}$ such that
$$(u-u_\tau) (x_j) \rightarrow 0.$$
Let $v_j (x) = u(x+ x_j)$ and $v_{j, \tau} (x) = u_\tau (x+ x_j)$;
these are sequences of bounded solutions, by uniform elliptic estimates (consequence of Theorem \ref{IA}), one can extract subsequences, denoted in the same way, such that
$$v_j\rightarrow \bar v\quad\mbox{ and }\quad v_{j, \tau} \rightarrow \bar v_\tau$$
uniformly on every compact set of ${\rm I}\!{\rm R}^N$.
Moreover, $v_j$ and $v_{j,\tau}$ are solutions of the same equation and passing to the limit, $\bar v \geq \bar v_\tau$.
Furthermore $\bar v (0) = \lim_{j\rightarrow +\infty} u(x_j)= \lim_{j\rightarrow +\infty} u_\tau (x_j) = \bar v_\tau (0)$ and
$$|\nabla \bar v|(0) = \lim_{j\rightarrow +\infty} |\nabla u(x_j)| \geq m$$
by the assumption on $\nabla u$.
Since $|\nabla \bar v| >0$ everywhere, by the strong comparison principle in Proposition \ref{strict},
$\bar v_\tau = \bar v$ on any neighborhood of $0$
. This would imply that $\bar v$ is $\tau$ periodic.
By our choice of $M_1$,
$\forall x\in \Sigma^+_{2M_1}$,
$v_j(x) = u(x+ x_j) \geq 1-\delta$ and
$\forall x\in \Sigma^-_{(-2M_1)}$,
$v_j(x) = u(x+ x_j) \leq -1+\delta$ ,
This contradicts the periodicity. Hence $\tau = 0$ and this ends the proof of Claim 2.
This implies that $\partial_{\vec \nu} u(x) \geq 0$, for all $x \in {\rm I}\!{\rm R}^{N}$ since for all $t>0$, $u(x+t\vec \nu)\geq u(x)$ as long as $\nu_1>0$.
Take a sequence $\vec{\nu_n}=(\nu_{1,n}, \nu^\prime)$ such that $0<\nu_{1,n}$ and $\nu_{1,n}\rightarrow 0$. Since $ u$ is ${\cal C}^1$, by passing to the limit,
$$\partial_{\vec{\nu^\prime}} u(x) \geq 0.$$
This is also true by changing $\vec \nu^\prime$ in $-\vec\nu^\prime$ , so finally $\partial_{\vec \nu^\prime} u(x) = 0$.
This ends the proof of Theorem \ref{th1}.
\section{Existence's results for the ODE.}
We prove in this section that the one dimensional problem (\ref{dim1wholespace}), under additional assumptions on $f$, admits a solution and that, when $\alpha \leq 0$, the solution is unique up to translation.
We consider the model Cauchy problem
\begin{equation}\label{cauchydel}\left\{ \begin{array}{lc}
-{\cal M}^+_{a,A} (u^{\prime\prime}) |u^\prime|^\alpha = f(u), & \mbox{in}\quad {\rm I}\!{\rm R}\\
u(0) = 0, u^\prime (0) = \delta &
\end{array} \right.
\end{equation}
where ${\cal M}^+_{a,A}$ is one of the Pucci operators.
With $f$ such that
$f(-1) =f(0 ) = f(1)= 0$, $f$ is positive in $]0 ,1[$, negative in $]-1, 0[$, $f$ is ${\cal C}^1([-1, 1])$.
\noindent We introduce the function $f_{a,A}(t)=\left\{\begin{array}{lc} \frac{f(t)}{a} & \mbox{if}\ f(t)>0\\
\frac{f(t)}{A} & \mbox{if}\ f(t)<0
\end{array}
\right.
,$
so that equation (\ref{cauchydel}) can be written in the following way
\begin{equation}\label{cauchydelta}\left\{ \begin{array}{lc}
-u^{\prime\prime} |u^\prime|^\alpha = f_{a,A}(u), & \mbox{in}\quad{\rm I}\!{\rm R}\\
u(0) = 0, u^\prime (0) = \delta. &
\end{array} \right.
\end{equation}
We also assume on $f$:
\begin{enumerate}
\item $f^\prime(\pm1)<0$,
\item $\displaystyle\int_{-1}^{1} f_{a,A}(s) ds =0$,
\item for all $t\in (-1, 0]$, $\int_t^{1 } f_{a,A}(s)ds >0$.
\end{enumerate}
$\delta_1$ will denote the positive real
\begin{equation}\label{delta1}
\delta_1 = \left((2+\alpha ) \int_0 ^{1} {f(s)\over a} ds \right)^{1\over 2+\alpha}.
\end{equation}
Without loss of generality $f$ is extended outside of $[-1,1]$
so that $f\in {\cal C}^{0,1} ({\rm I}\!{\rm R})$, $f\geq 0$ on $(-\infty, -1)$, $f\leq 0$ on $[1, +\infty)$. Then $ f$ satisfies also for all $t\in {\rm I}\!{\rm R}\setminus\{\pm1\}$
$$\int_t^{1 } f_{a,A}(s) ds >0.$$
According to Cauchy-Lipschitz's theorem, as soon as $u^\prime (0) \neq 0$ there exists a local unique solution. Moreover the Cauchy Peano's Theorem establishes some global existence's theorem.
We establish existence and uniqueness (in the case $\alpha \leq 0$) of weak solutions and their equivalence with viscosity solutions.
\begin{defi}
A weak solution for (\ref{cauchydelta}) is a ${\cal C}^1$ function which satisfies in the distribution sense
\begin{equation}\label{weak}\left\{ \begin{array}{lc}
-{d\over dx} (|u^\prime|^\alpha u^\prime) = (1+\alpha) f_{a,A}(u)&\mbox{ in } \ {\rm I}\!{\rm R}\\
u(\theta) =0, \ u^\prime(\theta) = \delta.& \
\end{array}\right.
\end{equation}
Without loss of generality we can suppose that $\theta=0$. \end{defi}
Remark that we are interested in solutions that are in $[-1,1]$ so we shall suppose that $u_o\in (-1,1)$.
\begin{rema}
Let us note that the condition 2 on $f$ is necessary for the existence of weak solutions which satisfy $\lim_{x\rightarrow +\infty} u(x) = 1$, $\lim_{x\rightarrow -\infty} u(x) = -1$. Indeed by continuity $u$ has a zero and without loss of generality we can suppose that it is in 0. Since the solution $u$ is ${\cal C}^1$, and bounded, the limit of $u^\prime$ at infinity is $0$.
In particular, multiplying the equation (\ref{weak}) by $u^\prime$ and integrating in $[0,+\infty)$
$$|u^\prime (0 )|^{2+\alpha}= -(2+\alpha) \int_0^{1} {f(s)\over a} ds$$
and in $]-\infty,0]$,
similarly
$$ |u^\prime (0 )|^{2+\alpha} =(2+\alpha) \int_{0}^{-1} {-f(s)\over A} ds= (2+\alpha) \int_{-1}^0 {f(s)\over A} ds .$$
This implies 2.
\end{rema}
\begin{prop} For $\alpha>-1$ there exists a solution of (\ref{weak}), and for $\alpha\leq 0$ this solution is unique.\end{prop}
Proof.
To prove existence and uniqueness observe that both the equations (\ref{cauchydelta}) and (\ref{weak}) can be written, with $u=X$ and $Y= |u^\prime|^\alpha u^\prime$, under the following form
\begin{equation}\label{eqcauhlip} \left(\begin{array}{c} X^\prime\\
Y^\prime\end{array} \right) = \left( \begin{array}{c}
|Y|^{\frac{1}{\alpha+1}-1}Y\\
-(1+\alpha)f_{a,A}(X) \end{array}\right)
\end{equation}
with the initial conditions $X(0) = 0$, $Y(0) =| \delta|^\alpha \delta $ and the map $(X, Y) \mapsto \left( \begin{array}{c}
|Y|^{\frac{1}{\alpha+1}-1}Y\\
-(1+\alpha)f_{a,A}(X)
\end{array}\right)$
is continuous. When $\alpha\leq 0$ it is Lipschitz continuous; and when $\alpha>0$ it is Lipschitz continuous for $Y(0)\neq 0$.
Now the result is just an application of the classical Cauchy Peano's Theorem,
and the Cauchy Lipschitz theorem.
It is immediate to see that weak solutions and the solutions of (\ref{eqcauhlip}) are the same.
This ends the proof.
Observe that weak solutions are viscosity solutions. Indeed, it is clear that $|u^\prime |^\alpha u^\prime$ is ${\cal C}^1$, hence if $u^\prime \neq 0$, $u^\prime$ is ${\cal C}^1$. Finally $u$ is ${\cal C}^2$ on each point where the derivative is different from zero and on such a point the equation is $-|u^\prime |^\alpha u^{\prime\prime} = f(u(x))$ so $u$ is a viscosity solution.
We now consider the case where $u$ is locally constant on $]x_1-\delta_1, x_1+\delta_1[$ for some $\delta_1 >0$ the "weak equation" gives $f(u(x_1))= 0$, then $u(x_1) = 0, 1$ or $-1$, and $u$ is a viscosity solution.
We now assume that $\alpha \leq 0$ and recall that according to the regularity results in \cite{BD10} applied in the one dimensional case, the solutions are ${\cal C}^2$. We now prove that the viscosity solutions are weak solutions.
When $u^\prime(x)\neq 0$ or when $u$ is locally constant, it is immediate that $u$ is a weak solution in a neighborhood of that point.
So, without loss of generality, we suppose that,
$u^\prime (x_1) = 0$, $1>u(x_1) >0$ and hence $u$ is not locally constant.
Then, by continuity of $u$ and the equation, there exists $r>0$ such that
$$u^{\prime\prime}\leq 0 \quad\mbox{in}\quad (x_1-r,x_1+r).$$
Furthermore there exists $(x_n)_n$, such that $x_n\in (x_1-r,x_1)$, $x_n\rightarrow x_1$ and
$u^\prime(x_n)\neq 0$;
by the equation we obtain that
$$u^{\prime\prime}(x_n)<0.$$
Finally, $u^\prime(x)=\int_{x_1}^x u^{\prime\prime}(t)dt>0$ for $x\in (x_1-r,x_1)$.
Similarly $u^\prime(x)<0$ for $x\in (x_1,x_1+r)$.
By uniqueness of the weak solutions , $u$ satisfies in a neighborhood of $x_1$:
$$-{d\over dx} (|u^\prime|^\alpha u^\prime) = {(1+\alpha)f(u(x))\over a}.$$
This proves that $u$ is a weak solution.
\begin{prop}\label{propcauchy}
Suppose that $\alpha \leq 0$. Let $u_\delta$ be the unique solution of (\ref{cauchydel}). Then for $\delta_1$ defined in (\ref{delta1}),
\noindent 1) If $\delta > \delta_1$, $|u_\delta (x)| \geq C|x|$ for $C=\delta^{2+\alpha}-\delta_1^{2+\alpha}$.
In particular $\displaystyle\lim_{x\rightarrow \pm \infty}u_\delta(x) = \pm \infty$ and $u^\prime_\delta >0$.
\noindent 2) If $\delta = \delta_1$, $u^\prime _\delta >0$ in ${\rm I}\!{\rm R}$ and $\displaystyle \lim_{x\rightarrow +\infty} u_\delta (x) = 1 $, $\displaystyle\lim_{x\rightarrow -\infty}u_\delta (x) = -1$.
\noindent3) If $-\delta_1\leq \delta < \delta_1$ then $ |u_\delta(x)|_\infty < 1$ for any $x\in{\rm I}\!{\rm R}$. The solution can oscillate.
\noindent 4) If $\delta < -\delta_1$, $u_\delta $ is decreasing on ${\rm I}\!{\rm R}$, hence $u_\delta<0$ on ${\rm I}\!{\rm R}^+$, $u_\delta >0$ on ${\rm I}\!{\rm R}^-$.
\end{prop}
\begin{rema}
The case 2) in Proposition \ref{propcauchy} is clearly false in the case $\alpha >0$. As one can see with the example :
$\alpha = 2$, $f(u) = u-u^3$, $u(x) = \sin x$, $u
$ satisfies $u^\prime (0) = \delta_1 = 4 \int_0^1 f(s) ds$, $u(1)={\pi\over 2}$ and it oscillates.
\noindent However the conclusion in the other cases holds for any $\alpha$.
\end{rema}
Proof of Proposition \ref{propcauchy}.
1 \& 4) To fix the ideas we suppose that $\delta> \delta_1$, the proof is identical in the case $\delta<-\delta_1$. For $x>0$, since $u_\delta>0$ one has
\begin{eqnarray*} |u^\prime_\delta| ^{2+\alpha} (x) &=& \delta^{2+\alpha} - (2+\alpha) \int_0^{u_\delta(x)} {f(s)\over a} ds \\
&=& \delta^{2+\alpha} -\delta_1^{2+\alpha} + (2+\alpha)\int_{u_\delta (x)}^{1} {f(s)\over a} ds \\
&\geq & \delta^{2+\alpha} -\delta_1^{2+\alpha}:=C.
\end{eqnarray*}
This proves, in particular, that $u_\delta ^\prime (x) \neq 0$ for all $x$ and the Cauchy Lipschitz theorem ensures the local existence and uniqueness on every point, hence also the global existence . From this, we also derive that $u^\prime _\delta >0$ and for $x>0$, $u_\delta (x ) \geq Cx$, and symmetric estimates for $x<0$ give $u_\delta (x) \leq C x$.
2) If $\delta = \delta_1$ then $|u^\prime_\delta|^{2+\alpha}(x) = (2+\alpha )\int_{u_\delta (x)} ^{1} {f(s )\over a} ds > 0$. Suppose that there exists some point $\bar x$ such that $u_\delta(\bar x) = 1$ then $u_\delta^\prime(\bar x ) = 0$.
By the uniqueness of the solution $u_\delta(x)\equiv 1$ which contradicts the fact that $u_\delta^\prime (0) = \delta_1\neq 0$.
We have obtained that $u_\delta (x) < 1$ everywhere. Moreover $u_\delta $ is increasing and bounded then $\lim_{x\rightarrow+ \infty} u_\delta^\prime = 0$. By hypothesis 3. on $f$, this implies that $\lim_{x\rightarrow + \infty} u_\delta (x) = 1$.
3) Suppose that $0 < \delta < \delta_1$, and let $\theta^+$ be such that $(2+\alpha) \int_0^{\theta^+} \frac{f(x)}{a}dx = \delta^{2+\alpha}$, which exists by the mean value theorem. Either $u_\delta < \theta^+ $ for all $x$, or there exists $x_1$ such that $u_\delta (x_1) = \theta^+$, and then $u^\prime_\delta (x_1) = 0$. Let us note that $ u = \theta^+ $ on a neighborhood of $x_1$ is not a solution since $f(\theta^+) \neq 0$.
So $u_\delta$ is not locally constant and in particular, in a right neighborhood of $x_1$:
$$ \exists \varepsilon_o, \ u^{\prime\prime}_\delta (x) \leq 0, \ u^{\prime\prime}_\delta\not\equiv 0$$
for all $x\in (x_1, x_1+\varepsilon_o)$, hence
$u^\prime_\delta (x) <0$ in $(x_1, x_1+\varepsilon_o)$.
So $u$ is decreasing until it reaches a point where $u^\prime_\delta (x_2) = 0$. Observe that by the equation
$$ 0 = |u^\prime_\delta| ^{2+\alpha} (x_2) = - (2+\alpha) \int_{\theta^+}^{u_\delta(x_2)} f_{a,A}(s) ds .$$
Hence $u(x_2) = \theta^-\in (-1,0)$.
We can reason as above and obtain that $u$ oscillates between $\theta^-$ and $\theta^+$.
\end{document} |
\begin{document}
\title{Balanced power diagrams for redistricting}
\author{Vincent Cohen-Addad\thanks{CNRS, UPMC Paris}\and
Philip N. Klein\thanks{Brown University, Research supported by
National Science Foundation Grant CCF-1409520.} \and Neal
E. Young\thanks{University of California, Riverside. Research
supported by NSF Grant IIS-1619463}}
\date{January 6, 2018}
\maketitle
\begin{abstract}
We explore a method for \emph{redistricting}, decomposing a
geographical area into subareas, called \emph{districts}, so that the
populations of the districts are as close as possible and the
districts are compact and contiguous. Each district is the
intersection of a polygon with the geographical area. The polygons
are convex and the average number of sides per polygon is less than
six. The polygons tend to be quite compact. With each polygon is
associated a \emph{center}. The center is the centroid of the
locations of the residents associated with the polygon. The algorithm
can be viewed as a heuristic for finding centers and a balanced assignment of
residents to centers so as to minimize the sum of squared distances of
residents to centers; hence the solution can be said to have low
dispersion.
\end{abstract}
\section{Introduction}\label{sec: intro}
\paragraph{Redistricting.}
\emph{Redistricting},
in the context of elections
refers to decomposing a geographical area into subareas such that all
subareas have the same population. The subareas are called
\emph{districts}. In most US states, districts are supposed to be
\emph{contiguous} to the extent that is possible.
Contiguous can reasonably be interpreted to mean
\emph{connected}.
In most states, districts are also supposed to be
\emph{compact}. This is not precisely defined in law.
Some measures of
compactness are based on boundaries; a district is preferred if its
boundaries are simpler rather than contorted. Some measures are based
on \emph{dispersion}, ``the degree to which the district spreads from a
central core''~\cite{Levitt}.
Idaho directs its redistricting commision
to ``avoid drawing districts that are oddly shaped.'' Other states
loosely address the meaning of compactness: ``Arizona and
Colorado focus on contorted boundaries; California, Michigan, and
Montana focus on dispersion; and Iowa embraces both''~\cite{Levitt}.
\paragraph{Balanced centroidal power diagrams}\label{sec: intro def}
The goal of this paper is to explore a particular approach to redistricting:
\emph{balanced centroidal power diagrams}. Given the locations
of a state's $m$ residents and given the desired number $k$ of
districts, a balanced centroidal power diagram
partitions the state into $k$ districts with the following desirable properties:
\begin{description}
\item[(P1)] each district is the intersection of the state with a convex polygon,\label{prop:polygon}
\item[(P2)] the average number of sides per polygon is less than six, and
\item[(P3)] the populations of the districts differ by at most one.\label{prop:population}
\end{description}
A balanced centroidal power diagram is a particular kind of
(not necessarily optimal) solution to an optimization problem called
\emph{balanced $k$-means clustering}: given a set $P$ of $m$ points (the \emph{residents})
and the desired number $k$ of clusters, a
solution (not necessarily of minimum cost) consists of a sequence $C$ of $k$ points
(the \emph{centers}) and an assignment $f$ of
residents to centers that is \emph{balanced}:
it assigns $\lfloor m/k\rfloor$ residents to the first $i$ centers,
and $\lceil m/k\rceil$ residents to the remaining $k-i$ centers
(for the $i$ such that $i \lfloor m/k\rfloor + (k-i) \lceil m/k\rceil = m$).
The \emph{cost} of a
solution $(C,f)$ is the sum, over the residents,
of the square of the Euclidean distance between the resident's location and assigned center.
(This is a natural measure of dispersion.)
In \emph{balanced $k$-means clustering}, one seeks a solution of minimum cost.
This problem is NP-hard~\cite{mahajan2009planar}.
A balanced centroidal power diagram arises from a solution to balanced
$k$-means clustering that is not necessarily of minimum cost.
Instead, the solution $(C, f)$ only needs to be a \emph{local minimum},
meaning that it is not possible to lower the cost by just varying $f$
(leaving $C$ fixed), or just varying $C$ (leaving $f$ fixed).
Local minima tend to have low cost, so tend to have low dispersion.
Section~\ref{sec: history} reviews the meaning of the
terms \emph{centroidal} and \emph{power diagram},
and discusses how any such local minimum yields districts
(with each district containing the residents assigned to one center)
for which the desirable properties~(P1)--(P3)
are mathematically guaranteed.
Convex polygons with few sides are arguably well shaped,
and their boundaries are arguably not contorted.
The idea and its application to redistricting are not novel. Spann et
al.~\cite{spann_electoral_2007} describes a method to find a
centroidal power diagram that is nearly balanced (to within 2\%).
Their solutions are thus not exactly balanced in the sense we have
defined. We discuss this and other related work
in Section~\ref{sec: history}.
Figures~\ref{fig:FL} to~\ref{fig:LI}
show proposed districts corresponding to balanced centroidal power diagrams
for the six most populous states in the U.S,
based on population data from the 2010 census
(locating each resident at the centroid of that resident's census
block). We will also show such diagrams at a web site, ~\url{district.cs.brown.edu}.
We computed these diagrams efficiently using a variant of Lloyd's algorithm:
start with a random set $C$ of centers,\footnote
{The probability distribution we used for the initial set of centers is from~\cite{ArthurV07}.}
then repeat the following steps until an equilibrium is reached:
(1) given the current set $C$ of centers, compute a balanced assignment $f$ that minimizes the cost;
(2) given that assignment $f$, change the locations of the centers in $C$ so as to minimize the cost.
\newcommand{\imagefigure}[3]
{\begin{figure}
\caption{#2}
\label{#3}
\end{figure}}
\imagefigure{florida.pdf}{Florida (27 districts)}{fig:FL}
\imagefigure{california.pdf}{California (53 districts).}{fig:CA}
\imagefigure{gnuplot__5_06SF.pdf}{Bay Area (detail of \emph{California}).}{fig:bay}
\imagefigure{texas.pdf}{Texas (36 districts).}{fig:TX}
\imagefigure{alabama.pdf}{Alabama (7 districts).}{fig:AL}
\imagefigure{illinois.pdf}{Illinois (18 districts).}{fig:IL}
\imagefigure{new-york.pdf}{New York (27 districts).}{fig:NY}
\imagefigure{gnuplot__5_36NY.pdf}{Long Island, New York and Manhattan
(detail from \emph{New York}).}{fig:LI}
Some might object that the method does not provide the scope for
achieving some other goals, e.g.\ creating competitive districts. A
counterargument is that one should \emph{avoid} providing politically
motivated legislators the scope to select boundaries of districts so
as to advance political goals. According to this argument, the less
freedom to influence the district boundaries, the better. This method
does not guarantee fairness in outcome; the fairness is in the
process. This point was made, e.g., by Miller~\cite{miller_problem_2007}.
Note that in real applications of redistricting, the locations of
people are not given precisely. Rather, there are regions, called
\emph{census blocks}, and each such region's population is specified.
\section{Balanced centroidal power diagrams}\label{sec: history}
The use of optimization, generally, for redistricting has been
proposed starting at least as far back as 1965 and has continued up to the
present~\cite{hess,garfinkel_optimal_1970,eppstein_defining_2017}.
See~\cite{altman2010promise,olson_rangevoting.org_2011} for additional references.
In what follows, we focus specifically on the use of balanced centroidal power diagrams.
Next is a summary of the relevant history, interspersed with necessary definitions.
Throughout,
$P$ (the \emph{population}) denotes a set of $m$ \emph{residents} (points in a Euclidean space),
$C$ denotes a sequence of $k$ \emph{centers} (points in the same space),
$f:P\rightarrow C$ denotes an assignment of residents to centers,
and $d(y, x)$ denotes the distance from $y\in P$ to $x\in C$.
We generally consider the parameters $P$ and $k$ to be fixed throughout,
while $C$ and $f$ vary.
\newcommand{\power}[2]{{\cal P}(#1, #2)}
\newcommand{\Power}[3]{{\cal P}(#1, #2, #3)}
\newcommand{\voronoi}[1]{{\cal V}(#1)}
\newcommand{\Voronoi}[2]{{\cal V}(#1, #2)}
\paragraph{The power diagram of $(C, w)$.}
Given any sequence $C$ of centers, and a weight $w_x\in\mathbb{R}$ for each center $x\in C$,
the \emph{power diagram} of $(C, w)$, denoted $\power C w$, is defined as follows.
For any center $x\in C$,
the \emph{weighted squared distance} from any point $y$ to $x$ is $d^2(y,x) - w_x$.
The \emph{power region} $C_x$ associated with $x$ consists of all points
whose weighted squared distance to $x$ is no more than the weighted squared distance to any other center.
The power diagram $\power C w$ is the collection of these power regions.
An assignment $f: P \rightarrow C$ is \emph{consistent} with $\power C w$
if every resident assigned to center $x$ belongs to the corresponding region $C_x$.
(Residents in the interior of $C_x$ are necessarily assigned to $x$.)
$\Power C w f$ denotes the power diagram $\power C w$ augmented with such an assignment.
Power diagrams are well-studied~\cite{aurenhammer_power_1987}.
If the Euclidean space is $\mathbb{R}^2$, it is known that each power region $C_x$ is necessarily a (possibly infinite) convex polygon.
If each weight $w_x$ is zero, the power diagram is also called a \emph{Voronoi diagram},
and denoted $\voronoi C$.
Likewise $\Voronoi C f$ denotes the Voronoi diagram extended with a consistent assignment $f$
(which simply assigns each resident to a nearest center).
\paragraph{Centroidal power diagrams.}
A \emph{centroidal power diagram} is an augmented power diagram $\Power C w f$
such that the assignment $f$ is \emph{centroidal}:
each center $x\in C$ is the centroid (center of mass) of its assigned residents, $\{y\in P: x = f(y)\}$.
\paragraph{Centroidal Voronoi diagrams.}
Centroidal Voronoi diagrams (a special case of centroidal power diagrams)
have many applications~\cite{du_centroidal_1999}.
One canonical application from graphics is downsampling a given image,
by partitioning the image into regions, then selecting a single pixel from each region to represent the region.
Centroidal Voronoi diagrams are preferred over arbitrary Voronoi diagrams
because the regions in centroidal Voronoi diagrams tend to be more compact.
\emph{Lloyd's method} is a standard way to compute a centroidal Voronoi diagram $\Voronoi C f$,
given $P$ and the desired number of centers, $k$~\cite
[\S\,5.2]{du_centroidal_1999}.
Starting with a sequence $C$ of $k$ randomly chosen centers,
the method repeats the following steps until the steps do not cause a
change in $f$ or $C$:
\begin{enumerate}
\item Given $C$, let $f$ be any assignment assigning each resident to a nearest center in $C$.
\item Move each center $x\in C$ to the centroid of the residents that $f$ assigns to $x$.
\end{enumerate}
Recall that the \emph{cost} is $\sum_{y\in P} d^2(y, f(y))$.
Step (1) chooses an $f$ of minimum cost, given $C$.
Step (2) moves the centers to minimize the cost, given $f$.
Each iteration except the last reduces the cost, so the algorithm terminates
and, at termination, $(C,f)$ is a \emph{local minimum} in the following sense:
by just moving centers in $C$, or just changing $f$, it is not possible to reduce the cost.
In the last iteration,
Step (1) computes $f$ that is consistent with $\voronoi C$,
and Step (2) does not change $C$, so $f$ is centroidal.
So, at termination, $\Voronoi C f$ is the desired centroidal Voronoi diagram.
Miller~\cite{miller_problem_2007} and Kleiner et al.\xspace~\cite{kleiner_political_2013}
explore the use of centroidal Voronoi diagrams specifically for \emph{redistricting}.
The resulting districts (regions) are guaranteed to be polygonal,
and tend to be compact, but their populations can be far from balanced.
To address this,
consider instead \emph{balanced} centroidal power diagrams, described next,
{which can be computed using} a capacitated variant of Lloyd's method.
\paragraph{Balanced power diagrams.}
A \emph{balanced} power diagram is an augmented power diagram $\Power C w f$
such that the assignment $f$ is balanced (as defined in the introduction).
Hence, the numbers of residents in the regions of $\power C w$ differ by at most 1.
Such regions are desirable in many applications.
Aurenhammer et al.\xspace~\cite[Theorem~1]{aurenhammer_minkowski-type_1998}
give an algorithm that, in the case of a Euclidean metric, given $P$ and $C$,
computes weights $w$ and an assignment $f$
such that $\Power C w f$ is a balanced power diagram,
and $f$ has minimum cost among all balanced assignments of $P$ to $C$.
We observe in Section~\ref{duality} that, given $P$, $C$,
there exist weights $w$ such that $\Power C w f$ is a balanced power diagram
for \emph{any} minimum-cost balanced assignment $f$ and any metric.
Such an argument was previously presented by Spann et al.~\cite{spann_electoral_2007}.
\paragraph{Computing a balanced centroidal power diagram for $P$.}
A \emph{balanced centroidal power diagram} is an augmented power diagram $\Power C w f$
such that $f$ is both balanced and centroidal.
We
{implement} the following capacitated variant of Lloyd's method to compute such a diagram,
given $P$ and the desired number $k$ of centers.
Starting with a sequence $C$ of $k$ randomly chosen centers,
repeat the following steps until Step (2) doesn't change $C$:
\begin{enumerate}
\item Given $C$, compute a minimum-cost balanced assignment $f:P\rightarrow C$.
\item Move each center $x\in C$ to the centroid of the residents that $f$ assigns to it.
\end{enumerate}
As in the analysis of the uncapacitated method,
each iteration except the last reduces the cost, $\sum_{y\in P} d^2(y, f(y))$,
and at termination, the pair $(C, f)$ is a local minimum in the following sense:
by just moving the centers in $C$,
or just changing $f$ (while respecting the balance constraint), it is not possible to reduce the cost.
The problem in Step (1) can be solved via Aurenhammer et al.\xspace's algorithm, described previously.
Instead, as described in Section~\ref{duality},
we solve it by reducing it to minimum-cost flow;
yielding both the stipulated $f$
and (via the dual variables) weights $w$ such that $\Power C w f$ is a
balanced power diagram. Note that the solution obtained by
minimum-cost flow assigns assigns each person to a single district.
In the last iteration,
Step (2) does not change $C$, so $f$ is also centroidal,
and at termination $\Power C w f$ is a balanced centroidal power
diagram, as desired.
In previous work, Spann et al.\xspace~\cite{spann_electoral_2007} proposed a
similar iterative method to find a centroidal power diagram. They did
not seem to be aware of the work of Aurenhammer
\cite{aurenhammer_minkowski-type_1998} but used a duality argument to
derive power weights. It is
not clear from their paper precisely how their method carries out
Step~(1). They state that their implementation starts by allowing a
20\% deviation from balance, and iteratively reduces the allowed
deviation over a series of iterations, adjusting the target
populations per district, and terminates when the deviation is within
2\% of balanced. We believe that the additional complexity and the failure
to achieve perfect balance is a result of the authors' effort to
ensure that census blocks are not split.
Hess et al.~\cite{hess} had previously given a similar method. Like
that of Spann et al., this method ensures that census enumeration
districts (analogous to census blocks) were not split, and as a
consequence did not achieve perfect balance. Unlike the method of
Spann et al., the method of Hess et
al. did not compute power weights and did not output a power diagram;
presumably each output district is defined as the union of census
enumeration districts and is therefore not guaranteed to be connected.
Balzer et al.\xspace~\cite{balzer_capacity-constrained_2008,balzer_capacity-constrained_2009}
proposed an algorithm equivalent to {the iterative algorithm above},
except that a local-exchange heuristic (updating $f$ by swapping pairs of residents) was proposed to carry out Step (1).
That heuristic does not guarantee that $f$ has minimum cost (given $C$),
so does not in fact guarantee that the assignment is consistent with a balanced power diagram (see Figure~\ref{fig: Balzer incorrect}).
{Other} previously published algorithms~\cite
{balzer_capacity-constrained_2008,balzer_capacity-constrained_2009,li_fast_2010,de_goes_blue_2012,xin_centroidal_2016}
for balanced centroidal power diagrams address applications (e.g.~in graphs) that have very large instances,
and for which it is not crucial that the power diagrams be exactly centroidal or exactly balanced.
{This class of algorithms prioritize speed, and none are}
guaranteed to find a local minimum $(C, f)$, nor a balanced centroidal power diagram.
Helbig, Orr, and Roediger~\cite{helbig1972political} proposed a somewhat similar
redistricting algorithm. Like {the algorithm above}, their algorithm
initializes the center locations randomly and then alternates between
(1) using mathematical programming to find an assignment of residents
to centers and (2) replacing each center with the centroid of the
residents assigned to it.
But their
assignment of residents to centers is chosen in each iteration to
minimize the sum of distances, not the sum of squared distances. This
means that the partition does not correspond to a power diagram.
Indeed, Helbig et al.\xspace acknowledge the possibility that noncontiguous
districts could result although they did not observe this occurring.
Their mathematical program for the assignment also
treats each ``population unit'' (e.g.~census block) atomically,
rather than treating each individual that way. Thus their method never splits a population unit into two districts.
While a solution with this property might be desirable, imposing this
requirement means that a solution might not exist that achieves
population balance. Moreover, their
mathematical program constrains the \emph{number} of population units
assigned to a center to be a certain number, rather than constraining
the population to be a certain number. Since different population
units have different populations, this might not achieve population balance.
Helbig et al.\xspace address this issue by iteratively modifying the number
of population units to be assigned to each center using a heuristic.
This does not guarantee convergence, so they allow their algorithm to stop
before reaching a true local minimum.
{For an excellent survey of the redistricting algorithms,
including additional discussion of the Spann et al.\xspace algorithm
and extensions to districts lying on the sphere,
see the online survey by Olson and Smith~\cite{olson_rangevoting.org_2011}.}
\begin{figure}
\caption{\small A counter-example to the swap-based balanced assignment algorithm of Balzer
et al.\xspace~\cite{balzer_capacity-constrained_2008,balzer_capacity-constrained_2009}
\label{fig: Balzer incorrect}
\end{figure}
\section{An Implementation}\label{sec: details}
\subsection{Minimum-cost flow}\label{duality}
Aurenhammer et al.~\cite{aurenhammer_minkowski-type_1998} provide an
algorithm that, given the set $P$ of locations of residents and the
sequence $C$ of centers, and given a target population for each
center (where the targets sum to the total population), finds a
minimum-cost assignment $f$ of residents to centers
subject to the constraint that the number of residents assigned to
each center equals the center's target population. Their algorithm
also outputs weights $w$ for the centers such that the assignment $f$
is consistent with $\power C w$. Their algorithm can be used to find
a minimum-cost balanced assignment by using appropriate targets.
In the implementation here, we take a different approach to computing the
minimum-cost balanced assignment: we use an algorithm for minimum-cost flow.
Aurenhammer et al.~\cite{aurenhammer_minkowski-type_1998} acknowledge
that a minimum-cost flow algorithm can be used but argue that their
method is more computationally efficient. As we observe below,
the necessary weights $w$ can be computed from the values of the variables of the
linear-programming dual to minimum-cost flow.
The goal is to find a balanced assignment $f:P\rightarrow C$ of minimum cost, $\sum_{y\in P} d^2(y, p(y))$.
Let $u_x\in\{\lfloor m/k\rfloor, \lceil m/k\rceil\}$ be the number of residents
that $f$ must assign to center $x\in C$.
Consider the following linear program and dual:
\begin{center}
\begin{tabular}{|@{ } l @{ } | @{ } l @{ } |} \hline
\adjustbox{valign=t}{
\parbox{0.4\textwidth}{
\begin{align*}
& \text{minimize}_a~ \lefteqn{ \textstyle\sum_{y\in P, x\in C} d^2(y, x)\, a_{yx} } \\
& \text{subject to } & \textstyle\sum_{y\in P} a_{yx} & {} = \mu_x & (x\in C) \\
& & \textstyle\sum_{x\in C} a_{yx}& {} = 1 & (y\in P) \\
& & a_{yx} & {} \ge 0 & (x\in C, y\in P)
\end{align*}
}}
&
\adjustbox{valign=t}{
\parbox{0.5\textwidth}{
\begin{align*}
& \text{maximize}_{w,z}~ \lefteqn{ \textstyle\sum_{x\in C} \mu_x \, w_x + \sum_{y\in P} z_y } \\
& \text{subject to } & z_y & \le d^2(y,x) - w_x & (x\in C, y\in P)
\end{align*}
}}
\\ \hline
\end{tabular}
\end{center}
This linear program models the standard \emph{transshipment} problem.
As the capacities $\mu_x$ are integers with $\sum_x \mu_x = |P|$,
it is well-known that the basic feasible solutions to the linear program are 0/1 solutions ($a_{yx} \in \{0,1\}$),
and that the (optimal) solutions $a$ correspond to the (minimum-cost) balanced assignments $f:C\rightarrow P$
such that $a_{yx} = 1$ if $f(y) = x$ and $a_{yx} = 0$ otherwise.
{The implementation here} solves the linear program and dual by
using Goldberg's minimum-cost flow solver~\cite{Goldberg97}
to obtain a minimum-cost balanced assignment $f^*$ and an optimal dual solution $(w^*,z^*)$.
For any minimum-cost balanced assignment $f$ (such as $f^*$)
the resulting weight vector $w^*$ gives a balanced power diagram $\Power C {w^*} f$:
\begin{lemma}[{{see also~\cite{spann_electoral_2007}}}]
Let $(w^*,z^*)$ b any optimal solution to the dual linear program above.
Let $f$ be any balanced assignment.
Then $\Power C {w^*} f$ is a balanced power diagram
if and only if $f$ is a minimum-cost balanced assignment.
\end{lemma}
\begin{proof}
Let $a$ be the linear-program solution corresponding to $f$.
\noindent\emph{(If.)}
Assume that $f$ has minimum cost among balanced assignments.
Consider any resident $y\in P$.
By complimentary slackness, for $x'=f^*(y)$, the dual constraint for $(x', y)$ is tight,
that is, \(z^*_y = d^2(y, f(y)) - w^*_{f(y)}. \)
Combining this with the dual constraint for $y$ and any other $x\in C$ gives
\[ d^2(y, f(y)) - w^*_{f(y)} \,=\, z^*_y \,\le\, d^2(y, x) - w^*_{x}.\]
That is, from $y$, the weighted squared distance to $f(y)$
is no more than the weighted squared distance to any other center $x\in C$.
So, $y$ is in the power region $C_{f(y)}$ of its assigned center $f(y)$.
Hence, $f$ is consistent with $\power C w^*$, and $\Power C {w^*} f$ is a balanced power diagram.
\noindent\emph{(Only if.)}
Assume that $f$ is consistent with $\power C {w^*}$.
That is, the weighted squared distance from $y$ to $f(y)$
is no more than the weighted squared distance to any other center $x\in C$.
That is, defining $z'_y = d^2(y, f(y)) - w^*_{f(y)}$,
\[ z'_y \,=\, d^2(y, f(y)) - w^*_x\,\le\, d^2(y, x) - w^*_{x}.\]
Thus, $(w^*, z')$ is a feasible dual solution.
Furthermore, the complimentary slackness conditions hold for $a$ and $(w^*,z')$.
That is, $a_{yz} > 0 \implies f(y) = x \implies z'_y = d^2(y, x) - w^*_x$.
Hence, $a$ and $(w^*, z')$ are optimal.
Since $a$ is optimal, $f$ has minimum cost.
\end{proof}
\subsection{Experiments}
We ran the implementation on various
instances of the redistricting problem.
We considered the following US states: Alabama, California,
Florida, Illinois, New York, and Texas. Note that this list
of states contains the biggest states in terms of population
and number of representatives, and so our algorithm is usually
faster on smaller states.
For each of these states, we used the data provided by
the US Census Bureau~\cite{USCB}, namely the population and
housing unit count by block from the 2010 census. Hence, the input
for our algorithm was a weighted set of points in the plane where
each point represents a block and its weight represents
the number of people living in the block.
For each state, we defined the number of clusters to be
the number of representatives prescribed for the state. See
Table~\ref{T:data} for more details.
\begin{table}[h!]
\centering
\begin{tabular}{|l|c|c|r|}
\hline
State & Number of representatives & Population
& Number of iterations to converge\\
\hline
\hline
Alabama & 7 & 4779736 & 28\\
California & 53 & 37253956 & 49\\
Florida & 27 & 18801310 & 51\\
Illinois & 18 & 12830632 & 72\\
New York & 27 & 19378102 & 65\\
Texas & 36 & 25145561 & 42\\
\hline
\end{tabular}
\caption{The states considered in our experiments together
with the number of clusters (i.e.:\ number of representatives)
and number of clients (i.e.\ population of the state).}\label{T:data}
\end{table}
We note that in all cases the
algorithm converged to a local optimum.
\subsection{Technical details and implementation}
{The implementation is} available at \url{https://bitbucket.org/pnklein/district}.
It is written mostly in \texttt{C++}.
Our implementation makes use of a slightly adapted version of a
min-cost flow implementation, \texttt{cs2} due to Andrew Goldberg and
Boris Cherkassky and described in~\cite{Goldberg97}. The copyright on
\texttt{cs2} is owned by IG Systems, Inc., who grant permission to use for evaluation
purposes provided that proper acknowledgments are given. If there is
interest, we will write a min-cost flow implementation that is unencumbered.
We also provide Python-3 scripts for reading census-block data,
reading state boundary data, finding the boundaries of the power
regions, and generating \texttt{gnuplot} files to produce the figures
shown in the paper. These figures superimposed the boundaries of the
power regions and the boundaries of states (obtained from~\cite{USCB2}).
For our experiments, the programs were compiled using \texttt{g++-7}
and run on a laptop with processor \texttt{Intel Core i7--6600U CPU, 2.60GHz} and total virtual
memory of 8GB. The system was \texttt{Debian buster/sid}.
The total running time was less than fifteen minutes for all instances
except California, which took about an hour.
\section{Concluding remarks}
The method explored in this paper outputs districts that are convex
polygons with few sides on average and that are balanced with respect
to population, i.e. where the populations in two districts differ by
at most one. However, such balance cannot be guaranteed under a
requirement that certain geographical regions, e.g. census blocks or
counties, remain intact. Since the locations of people within census
blocks are not known, the requirement is sensible.
One possible way to address the requirement is to first compute districts while
disregarding the requirement, then use dynamic programming to modify
the solution to obey that requirement while minimizing the resulting
imbalance.
We have focused in this paper on the Euclidean plane. This ensures
that each district is the intersection of the geographical region
(e.g.~state) with a polygon. However, in view of the fact that the
method
{explored here} might generate a district that includes residents
separated by water, mountains, etc.,
one might want to consider
a different metric, e.g.~to take travel time into account. Suppose,
for example, the metric is that of an undirected graph with
edge-lengths. One can use essentially the same algorithm for
finding a balanced centroidal power diagram. Computing a minimum-cost
balanced assignment (Step~1) and the associated weights can still be
done using an algorithm for minimum-cost
flow as described in Section~\ref{duality}. In Step~2, the algorithm
must move each center to the location that minimizes the sum of
squared distances from the assigned residents to the new center
location. In a graph, we limit the candidate locations to the
vertices and possibly locations along the edges. Under such a limit,
it is not hard to compute the best locations.
\end{document} |
\begin{document}
\title{Algorithmic Decision Processes}
\author{C. Baldassi\quad F. Maccheroni\quad M. Marinacci\quad M. Pirazzini}
\maketitle
\begin{abstract}
We develop a full-fledged analysis of an algorithmic decision process that, in
a multialternative choice problem, produces computable choice probabilities
and expected decision times.
\end{abstract}
\section{Introduction}
\paragraph{An algorithmic decision procedure}
In a multialternative choice problem, decision units aim to find the best
alternatives within a finite choice set $A$ of available ones. Had they
unlimited resources allowing them to make an unconstrained number of exact
judgments between alternatives, they could proceed by standard revision. This
brute force comparison-and-elimination algorithm sequentially analyzes pairs
of alternatives and permanently discards the inferior ones. If the unit
preferences are complete and transitive, after $\left\vert A\right\vert -1$
comparisons the incumbent solution of this algorithm is an optimal choice.
Implicit in traditional choice theory is an underlying algorithm of this kind.
Yet, decision units' resources are typically too limited to implement a
standard revision procedure. Indeed, binary comparisons are typically costly,
time-consuming and subject to error (so inexact). This happens because the
decision unit may only imperfectly know the relative desirability of the
competing alternatives. As a result, deliberation over them consumes resources
(economic or physiological), requires time and is subject to
error.\footnote{Here we are abstracting from inescapable non-decision times.}
In choice episodes involving the same pair of alternatives, different decision
times and choices may be then observed. Binary choice behavior can be
described only in probabilistic terms, with stochastic decision times and
choice probabilities.
The well-known limits of working memory suggest a sequential structure for the
multialternative choice problem. Costly, time-consuming and inexact binary
comparisons unfold one after the other through a stochastic exploration
process, with alternatives playing the roles of proposals and incumbents in
each binary comparison. An iterative choice process then operates. The
decision unit limited resources constrain this process by limiting the number
of executable binary comparisons, with a (possibly random) cap on the number
of affordable iterations. When the process is terminated, an alternative is
selected. The inexact nature of comparisons and the stochasticity of the
exploration process make this selection random.
We formalize this schema through a decision procedure, the Neural Metropolis
Algorithm, that parsimoniously adapt standard revision by building on
sequential binary comparisons between proposals and incumbents, explicitly
modelled as time-consuming and subject to errors (so stochastic), that unfold
through a Markovian exploration of the choice set $A$. A stopping number,
determined by the decision unit resources, terminates the iterations of the
algorithm and thus makes the algorithm select an alternative. Different
iterations of the algorithm may result in different selections of alternatives
because of the stochastic nature of binary comparisons and of Markovian exploration.
To the best of our knowledge, this is the first full-fledged analysis of an
algorithmic decision process over a choice set. We are able to derive both the
choice probabilities and expected decision times that the Neural Metropolis
Algorithm generates, with closed forms in some noteworthy cases (Section
\ref{sect:nma}). We are also able to provide a value representation for the
algorithm, which proceeds as if governed by some underlying utility judgements
over alternatives (Section \ref{sect:rev}). In so doing, we generalize and
extend to pairs of choice probabilities \ and decision times some basic
results of traditional stochastic choice.
This value foundation makes it possible to interpret our algorithmic decision
unit as the neural system of a decision maker that confronts a decision
problem. In particular, traditional choice analysis can be implemented by the
Neural Metropolis Algorithm when the resource constraint is relaxed, as it is
the case in the traditional choice analysis sketched at the outset. On the
other hand, our algorithm may incorporate neuroscience binary choice models,
like the Drift Diffusion Model (DDM), which thus get embedded in a sequential
multialternative decision process.
\paragraph{Outline of the analysis}
We begin the analysis by generalizing traditional stochastic choice by
introducing binary choice probabilities $\rho\left( i\mid j\right) $ to
model binary comparisons that, in a sequential setting, involve alternatives
$i$ and $j$ playing the distinct roles of proposals and incumbents,
respectively (a distinction that stochastic choice does not make). In our
first main result, Theorem \ref{thm:value}, we show that binary choice
probabilities have a value representation through a Fechnerian scaling
$v:A\rightarrow\mathbb{R}$ when they satisfy a basic transitivity property,
thus extending to our setting classic results of traditional deterministic
choice theory and of traditional stochastic choice theory. Indeed, our
analysis includes as special cases both deterministic traditional choice,
where $\rho$ is $0$-$1$ valued, and traditional stochastic choice, where
$\rho$ is strictly positive.
Besides choice probabilities, the other key element of the analysis are the
expected decision times $\tau\left( i\mid j\right) $ that account for the
average duration of comparisons between proposal $i$ and incumbent $j$. We
introduce them formally and consider pairs $\left( \rho,\tau\right) $ to
study their interplay with binary choice probabilities. We propose a value
representation also for these pairs. Such a representation is behaviorally
characterized in our Theorem \ref{prop:chrono}. Theorem \ref{prop:psycho}
captures the special case of symmetric expected decision times which result
from a classical speed/accuracy relation: faster decisons corresponding to
smaller error rates.
With this, we move to the analysis of the Neural Metropolis Algorithm. It
sequentially compares pairs of alternatives, playing the roles of incumbents
and proposals. These comparisons use a binary choice model $\left(
\mathrm{C},\mathrm{RT}\right) $ consisting of choice variables and response
times that determine the frequency $\rho_{\mathrm{C}}\left( i\mid j\right) $
with which proposal $i$ is accepted over incumbent $j$ and the mean response
time $\tau_{\mathrm{RT}}\left( i\mid j\right) $ required by the comparison.
To the pair $\left( \rho_{\mathrm{C}},\tau_{\mathrm{RT}}\right) $ we can
apply the value analysis previously developed, with a Fechnerian scaling
$v_{\mathrm{C}}:A\rightarrow\mathbb{R}$ for the stochastic binary comparisons
(featuring a positive $\rho_{\mathrm{C}}$) and a Paretian utility function
$w_{\mathrm{C}}:A\rightarrow\mathbb{R}$ for the deterministic ones (featuring,
instead, a $0$-$1$ valued $\rho$). An initial random condition $\mu$ and an
exploration matrix $Q$ complete the description of the constituent elements of
the Neural Metropolis Algorithm. A stopping time $N$ terminates the algorithm,
which thus selects an alternatives.
The algorithm thus generated a choice probability $p_{N}$ over alternatives,
where $p_{N}\left( i,A\right) $ is the probability that the algorithm
selects alternative $i$ from the choice set $A$, as well as a mean response
time $\tau_{N}\geq0$, the average time that the algorithm takes to select an
alternative from $A$. We obtain closed forms for both $p_{N}$ and $\tau_{N}$
in the important case of negative binomial stopping times, which includes the
geometric ones.
Our value analysis shows that, as the stopping number allows more and more
iterations, the Neural Metropolis Algorithm has noteworthy optimality
properties. It selects optimal alternatives when all binary comparisons are
deterministic, thus implementing traditional choice behavior. When, instead,
deterministic and stochastic binary comparisons coexist, the algorithm first
selects alternatives $i$ that are best across the deterministic comparisons,
so belong to $\arg\max_{A}w_{\mathrm{C}}$, and then choose over them according
to a logit a rule $
$
\[
\frac{e^{v_{\mathrm{C}}\left( i\right) }}{\sum_{j\in\arg\max_{A}
w_{\mathrm{C}}}e^{v_{\mathrm{C}}\left( j\right) }}
\]
where $v_{\mathrm{C}}$ and $w_{\mathrm{C}}$ are the Fechnerian scaling and
Paretian utility previously mentioned.
\paragraph{Limitations of the analysis}
In our analysis the stopping number, which accounts for the decision unit
limited resources, is exogenous. It is a convenient assumption in a first
analysis of an algorithmic decision process, but a topic for future research
is the study of a decision problem that would endogenously deliver it.
Relatedly, we take the Markovian stochastic exploration as exogenous, though
the decision unit may want to adjust it as exploration progresses. The study
of more sophisticated exploration strategies is another topic for future research.
The sequential structure of the Neural Metropolis Algorithm appears to be
natural in view of the limitations of working memory. It would interesting,
however, to understand its optimality status by making explicit the working
memory constraints that impede the parallel, rather than sequential,
consideration of all competing alternatives in the choice set.
\paragraph{Related literature}
The Neural Metropolis Algorithm has the Metropolis DDM Algorithm of Baldassi
et al. (2020) and Cerreia-Vioglio et al. (2022) as special cases in which
binary comparisons are performed according the DDM of Ratcliff (1978), as
adapted by Krajbich et al. (2010) and Milosavljevic et al. (2010) to
value-based binary choices. The generalization is significant, moving from a
specific binary comparison model to virtually all of them. Moreover, our
results are novel even when binary comparisons are DDM based.
The Neural Metropolis Algorithm differs from most neuro-computational models
of neuroscience that typically consider simultaneous evidence accumulation for
all the alternatives in the choice set $A$. See, e.g., Roe et al. (2001),
Anderson et al. (2004), McMillen and Holmes (2006), Bogacz et al. (2007),
Ditterich (2010), and Krajbich and Rangel (2011). This simultaneity
assumption, although efficient per se, is at odds with the known limits of
attention and working memory, as previously mentioned.
An important exception is Reutskaja et al. (2011), who present three two-stage
models in which subjects randomly search through the feasible set during an
initial search phase and, when this phase is concluded, select the best item
that was encountered during the search (up to some noise). This approach can
be called quasi-exhaustive search in that time pressure may terminate the
search phase before all alternatives have been evaluated and introduces an
error probability.
Although different from the models considered by Krajbich and Rangel (2011)
and Reutskaja et al. (2011), our model is consistent with some of their
experimental findings about the exploration process of choice sets and with
the conclusions of the seminal eye fixation study of Russo and Rosen (1975).
\section{Preliminaries}
\subsection{Mathematics}
\paragraph{Stochastic matrices}
A square matrix $B$ is (\emph{left}) \emph{stochastic} if the sum of the
entries of each column is $1$. Its powers are the stochastic matrices
$B^{0}=I$ and $B^{n}=BB^{n-1}$, with entry $b_{ij}^{(n)}$ for each $i,j$ in
the index set of $B$. A stochastic matrix $B$ is:
\begin{enumerate}
\item[(i)] \emph{positive} if its entries are strictly positive;
\item[(ii)] \emph{quasi-positive} if its off diagonal entries are strictly positive;
\item[(iii)] \emph{primitive} if there exists $n\geq1$ such that $B^{n}$ is positive;
\item[(iv)] \emph{irreducible} if, for each $i,j$ in its index set, there
exists $n\geq1$ such that $b_{ij}^{(n)}>0$;
\item[(v)] \emph{non-traceless} if, it has at least one strictly positive
element on its diagonal;
\item[(vi)] \emph{nice} if it is symmetric and quasi-positive;
\item[(vii)] \emph{reversible} if there exists a probability vector
$p\gg\mathbf{0}$ such that
\begin{equation}
b_{ij}p_{j}=b_{ji}p_{i}\label{eq:balance-pre}
\end{equation}
for each off diagonal entry $b_{ij}$.
\end{enumerate}
This terminology is standard, except (vi). Clearly, a positive matrix is
quasi-positive, a quasi-positive matrix is primitive if at least of order $3$.
An irreducible and non-traceless matrix is primitive. Given a stochastic
matrix $B$, the matrix $I-\zeta B$ is invertible when $\zeta\in\left(
-1,1\right) $ because $\left\Vert \zeta B\right\Vert _{1}=\left\vert
\zeta\right\vert <1$. Instead, the matrix $I-B$ is not invertible because, by
Markov's Theorem, there exists a probability vector $p$ such that
\[
Bp=p
\]
Such a vector is called a \emph{stationary distribution} of $B$.
\paragraph{Stopping number}
A \emph{stopping number} (or \emph{rule}) is a $\mathbb{N}$-valued random
variable with finite mean $\mathbb{E}\left[ N\right] $ defined on an
underlying probability space featuring a probability measure $\mathbb{P}$. Two
important functions are associated with a stopping number $N$. The
\emph{probability generating function} $f_{N}:\left[ 0,1\right]
\rightarrow\mathbb{R}$ is defined by
\[
f_{N}\left( z\right) =
{\displaystyle\sum_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] z^{n}
\]
while the \emph{survival generating function} $g_{N}:\left[ 0,1\right]
\rightarrow\mathbb{R}$ is defined by
\[
g_{N}\left( z\right) =
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N>n\right] z^{n}
\]
These two functions are related as follows (Feller, 1968, p. 265):
\[
g_{N}\left( z\right) =\dfrac{1-f_{N}\left( z\right) }{1-z}\qquad\forall
z\in\left[ 0,1\right]
\]
under the limit convention
\begin{equation}
g_{N}\left( 1\right) =\lim_{z\rightarrow1^{-}}\dfrac{1-f_{N}\left(
z\right) }{1-z}=\mathbb{E}\left[ N\right] \label{eq:limit-bis}
\end{equation}
Probability generating functions are widely used and several formulas are
available for them (see, e.g., Johnson et al. 2005).
Let $\mathcal{B}$ be the collection of all stochastic matrices. Given a
stochastic matrix $B\in\mathcal{B}$, we denote by $f_{N}\left( B\right) $
and $g_{N}\left( B\right) $ the square matrices of the same order of $B$
defined by
\begin{equation}
f_{N}\left( B\right) =
{\displaystyle\sum_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] B^{n}\quad\text{and\quad}g_{N}\left( B\right)
=
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N>n\right] B^{n} \label{eq:matrix-gen-fct}
\end{equation}
It is easy to check that the matrix power series on the r.h.s. converges entry
by entry, and so the matrix $f_{N}\left( B\right) $ is well
defined.\footnote{See Section \ref{sect:more-stoch-matrices} for more
details.} As $B\in\mathcal{B}$ varies, via (\ref{eq:matrix-gen-fct}) one
defines the matrix generating function on $\mathcal{B}$, still denoted by
$f_{N}$, induced by a probability generating function $f_{N}$.\footnote{On
matrix functions see, e.g., Rinehart (1955) and Higham (2008).}
There is a natural partial order on stopping numbers: we say that stopping
number $N$ \emph{stochastically dominates} stopping number $N^{\prime}$,
written $N\geq N^{\prime}$, if
\[
\mathbb{P}\left[ N>n\right] \geq\mathbb{P}\left[ N^{\prime}>n\right]
\qquad\forall n\geq0
\]
Intuitively, $N$ is a less tight stopping number than $N^{\prime}$. At the
limit, we say that a sequence $N_{k}$ of stopping numbers \emph{diverges},
written $N_{k}\rightarrow\infty$, if
\[
\lim_{k\rightarrow\infty}\mathbb{P}\left( N_{k}>n\right) =1\qquad\forall
n\geq0
\]
This means, as easily checked, that the probability of stopping at any finite
$n$ vanishes as $k\rightarrow\infty$, that is, $\lim_{k\rightarrow\infty
}\mathbb{P}\left( N_{k}=n\right) =0$ for each $n\geq0$.
\subsection{Stochastic choice}
Let $A$ be a finite choice set, with at least three alternatives,\footnote{In
this paper we do not carry out comparative statics exercises across menus. For
this reason, we develop the analysis in terms of an arbitrarily fixed menu
$A$.} called \emph{menu}. Its typical elements are $i$, $j$, $h$ and $k$. We
denote by $\Delta\left( A\right) $ the set of all probability distributions
on $A$, viewed as $\left\vert A\right\vert $-dimensional vectors. In other
words, $\Delta\left( A\right) $ is the standard simplex in the Euclidean
space $\mathbb{R}^{\left\vert A\right\vert }$.
A \emph{choice probability} $p\left( \cdot,A\right) \in\Delta\left(
A\right) $ assigns to each alternative $i$ the probability $p\left(
i,A\right) $ that the decision unit chooses $i$ within $A$. Formally,
$p\left( i,A\right) $ is the component $i$ of the $\left\vert A\right\vert
$-dimensional vector $p\left( \cdot,A\right) $.
\section{Binary choice\label{sect:bin-choice}}
As discussed in the Introduction, our algorithmic decision process considers a
sequence of binary choices between an incumbent and a proposal. To model these
binary choices, in this section we generalize traditional stochastic choice to
account for the distinction between the roles of incumbents and proposals that
alternatives may play in binary choices. In the next section we will apply
this generalized framework to observed binary choice behavior.
\subsection{Binary choice probabilities}
A neural system, our decision unit,\footnote{Throughout we use the terms
\textquotedblleft decision unit\textquotedblright\ and \textquotedblleft
neural system\textquotedblright\ interchangeably.} compares two alternatives
$i$ and $j$ in a menu $A$ of alternatives through a \emph{probability kernel}
a function $\rho:A^{2}\rightarrow\left[ 0,1\right] $. For distinct $i$ and
$j$,
\[
\rho\left( i\mid j\right)
\]
denotes the probability with which \emph{proposal} $i$ is accepted when $j$ is
the \emph{incumbent} (or \emph{status quo}). So, $1-\rho\left( i\mid
j\right) $ is the probability with which the proposal is rejected and the
incumbent maintained. Next we introduce the class of kernels that we will study.
\begin{definition}
A probability kernel $\rho:A^{2}\rightarrow\left[ 0,1\right] $ is a
\emph{binary choice probability }if
\begin{equation}
\rho\left( i\mid j\right) =1\Longleftrightarrow\rho\left( j\mid i\right)
=0 \label{eq:pcb}
\end{equation}
with the convention $\rho\left( i\mid i\right) =\varepsilon>0$.
\end{definition}
We thus assume throughout that when an alternative is chosen for sure over
another alternative, this happens regardless of the roles that they play. With
this, we now introduce some basic properties.
\begin{definition}
A binary choice probability $\rho$ is:
\begin{itemize}
\item \emph{(status-quo) unbiased} if
\[
\underset{\text{prob. }i\text{ if proposal}}{\underbrace{\rho\left( i\mid
j\right) }}=\underset{\text{prob. }i\text{ if incumbent}}{\underbrace
{1-\rho\left( j\mid i\right) }}
\]
for all distinct alternatives $i\ $and $j$;
\item \emph{positive} if $\rho\left( i\mid j\right) >0$ for all distinct
alternatives $i\ $and $j$;
\item \emph{Dirac} if $\rho\left( i\mid j\right) \in\left\{ 0,1\right\} $
for all distinct alternatives $i\ $and $j$.
\end{itemize}
\end{definition}
These properties have a simple interpretation: a binary choice probability is
unbiased when gives the incumbent alternative no special status, it is
positive when selects either alternative with strictly positive probability,
and it is Dirac when selects either alternative deterministically.\footnote{If
a binary choice probability is positive, then for all distinct $i$ and $j$,
$\rho\left( i\mid j\right) >0$ and $\rho\left( j\mid i\right) >0$, then
neither of them can be $1$ (otherwise the other would be $0$). Thus we have
$0<\rho\left( i\mid j\right) <1$ for all $i$ and \thinspace$j$.}
Traditional stochastic choice usually considers unbiased (and often positive)
binary choice probabilities, where $\rho\left( i\mid j\right) =p\left(
i,\left\{ i,j\right\} \right) $ describes the probability of choosing $i$
from the doubleton $\left\{ i,j\right\} $. General, possibly biased, binary
choice probabilities account for the incumbent and proposal distinct roles
that are peculiar to a sequential analysis.
\begin{definition}
A binary choice probability $\rho$ is \emph{transitive} if
\begin{equation}
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right) \label{eq:trans-ddm-bis}
\end{equation}
for all distinct alternatives $i$, $j$ and $k$.
\end{definition}
In words, a binary choice probability is transitive when violations of
transitivity in the choices that it determines are due only to the presence of
noise.\footnote{Cf. Luce and Suppes (1965) p. 341. Also note that if two
alternatives are not distinct, then condition (\ref{eq:trans-ddm-bis}) is
automatically satisfied.} Indeed, condition (\ref{eq:trans-ddm-bis}) amounts
to require the intransitive cycles
\[
i\rightarrow j\rightarrow k\rightarrow i\text{\quad and\quad}i\rightarrow
k\rightarrow j\rightarrow i
\]
to be equally likely (over independent choices). Transitivity ensures that
systematic intransitivities, a violation of a basic rationality tenet, cannot
occur. We expect that a viable neural system satisfies this property.
\subsection{Binary value analysis}
The binary choice probability $\rho$ induces two binary relations $\succ
^{\ast}$ and $\succsim$ $\ $defined by
\[
i\succ^{\ast}j\Longleftrightarrow\rho\left( i\mid j\right) =1\quad
\text{and\quad}i\succsim j\Longleftrightarrow\rho\left( i\mid j\right)
\geq\rho\left( j\mid i\right)
\]
for all alternatives $i$ and $j$. We interpret $\succ^{\ast}$ as a clear-cut,
deterministic, strict preference over the alternatives that the decision unit
is able to perfectly discriminate in value. It is a standard notion in
traditional (non-stochastic) utility theory.\footnote{See Fishburn (1970) and
Kreps (1988).} Since
\begin{equation}
i\succ^{\ast}j\Longleftrightarrow\rho\left( i\mid j\right)
=1\Longleftrightarrow\rho\left( j\mid i\right) =0 \label{eq:sstr}
\end{equation}
a strict preference holds irrespective of the alternatives' roles as
incumbents or proposals.\footnote{To further elaborate, $\rho\left( i\mid
j\right) =1$ means that $i$ is accepted for sure when proposed, while
$\rho\left( j\mid i\right) =0$, that is, $1-\rho\left( j\mid i\right) =1$,
means that $i$ is maintained for sure when it is the incumbent.} We write
$i\parallel^{\ast}j$ when there is no strict preference over the two
alternatives, i.e.,
\[
i\parallel^{\ast}j\Longleftrightarrow i\not \succ ^{\ast}j\text{ and
}j\not \succ ^{\ast}i
\]
by (\ref{eq:sstr}) this is equivalent to $\rho\left( i\mid j\right)
\in\left( 0,1\right) $, and also to $\rho\left( j\mid i\right) \in\left(
0,1\right) $. That is when choice is truly stochastic (again irrespective of
the alternatives' roles).
In contrast, we interpret $\succsim$ as a weak notion of preference that
extends the strict preference $\succ^{\ast}$ by allowing for the stochastic
rankings that occur over alternatives that the decision unit only imperfectly
discriminates in value. The consistency property
\begin{equation}
i\succ^{\ast}j\Longrightarrow i\succ j \label{eq:cons}
\end{equation}
shows that, as natural, a strict preference is preserved by (the asymmetric
part of $\succsim$). Finally, the binary relation $\succsim^{
{{}^\circ}
}$ defined by
\[
i\succsim^{
{{}^\circ}
}j\Longleftrightarrow i\succsim j\text{ and }i\parallel^{\ast}j
\]
describes the rankings that are stochastic. Indeed,
\[
i\succsim^{
{{}^\circ}
}j\Longleftrightarrow1>\rho\left( i\mid j\right) \geq\rho\left( j\mid
i\right) >0
\]
The next lemma substantiates our interpretations.
\begin{lemma}
\label{lm:bcp-prop}If the binary choice probability $\rho$ is transitive, then
\begin{enumerate}
\item[(i)] $\succ^{\ast}$ is asymmetric and negatively transitive;
\item[(ii)] $\parallel^{\ast}$ is an equivalence relation;
\item[(iii)] $\succsim$ is complete and transitive;
\item[(iv)] $\succsim^{
{{}^\circ}
}$ is reflexive and transitive as well as complete on each equivalence class
of $\parallel^{\ast}$.
\end{enumerate}
\end{lemma}
The two preferences $\succsim^{
{{}^\circ}
}$ and $\succ^{\ast}$ complement each other by accounting for the stochastic
and deterministic comparisons that occur, respectively, with imperfect and
perfect discrimination in value. Jointly, they rank all pairs of
alternatives.\footnote{Set-theoretically, we have $\succ^{\ast}\cap\succsim^{
{{}^\circ}
}=\emptyset$ and $\succ^{\ast}\cup\succsim^{
{{}^\circ}
}=\succsim$.} In view of this, we focus our analysis on them.
\begin{definition}
A binary choice probability $\rho:A^{2}\rightarrow\left[ 0,1\right] $ has a
\emph{binary} \emph{value} \emph{representation} if there exist
$v,w:A\rightarrow\mathbb{R}$ and a symmetric $s:A^{2}\rightarrow\left(
0,\infty\right) $ such that
\begin{equation}
\rho\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
1
& \qquad\text{if }w\left( i\right) >w\left( j\right) \\
s\left( i,j\right) \dfrac{e^{v\left( i\right) }}{e^{v\left( i\right)
}+e^{v\left( j\right) }}
& \qquad\text{if }w\left( i\right)
=w\left( j\right) \\
0 & \qquad\text{if }w\left( i\right) <w\left( j\right)
\end{array}
\right. \label{eq:stoch-utt}
\end{equation}
for all $i$ and $j$.
\end{definition}
It is readily seen that, in this case
\begin{equation}
i\succ^{\ast}j\Longleftrightarrow w\left( i\right) >w\left( j\right)
\quad\text{;\quad}i\parallel^{\ast}j\Longleftrightarrow w\left( i\right)
=w\left( j\right) \label{eq:stoch-ut-pre}
\end{equation}
and, when $w\left( i\right) =w\left( j\right) $,
\begin{equation}
i\succsim^{
{{}^\circ}
}j\Longleftrightarrow v\left( i\right) \geq v\left( j\right)
\label{eq:stoch-ut}
\end{equation}
Thus, we interpret $w$ as a utility function for $\succ^{\ast}$ and $v$ as a
utility function for $\succsim^{
{{}^\circ}
}$. Moreover, we interpret $s$ as a status quo bias index. These
interpretations are corroborated by the next result. In reading it, keep in
mind that $v$ and $s$ are relevant only for stochastic rankings (as identified
by the equivalence classes of $\parallel^{\ast}$, so by the level sets of $w$).
\begin{lemma}
\label{prop:vb}If a binary choice probability admits a binary value
representation, then,
\begin{enumerate}
\item[(i)] the utility function $w$ is unique up to a strictly increasing transformation;
\item[(ii)] the utility function $v$ is, on each level set of $w$, unique up
to an additive constant;
\item[(iii)] the status quo bias index $s$ is, on each level set of $w$,
unique with
\begin{align}
\rho\left( i\mid j\right) & <1-\rho\left( j\mid i\right)
\Longleftrightarrow s\left( i,j\right) <1\nonumber\\
\rho\left( i\mid j\right) & =1-\rho\left( j\mid i\right)
\Longleftrightarrow s\left( i,j\right) =1\label{eq:propvb}\\
\rho\left( i\mid j\right) & >1-\rho\left( j\mid i\right)
\Longleftrightarrow s\left( i,j\right) >1\nonumber
\end{align}
\end{enumerate}
\end{lemma}
The relations in the last point of the lemma clarify the interpretation of $s$
as a status quo bias index for the comparison of proposal $i$ and incumbent
$j$. In particular, bias favors the incumbent when $s\left( i,j\right) <1$,
the proposal when $s\left( i,j\right) >1$, and it is absent otherwise. Thus,
the binary choice probability $\rho$ is unbiased if and only if $s$ is
constant to $1$.
The utility $w$ is a traditional Paretian utility function that, by ranking
alternatives in an ordinal manner, represents the strict preference
$\succ^{\ast}$. This Paretian utility is constant, so irrelevant in
(\ref{eq:stoch-utt}), if and only if $\rho$ is positive, i.e., when all
rankings are stochastic. When $\rho$ is both positive and unbiased, the binary
value representation (\ref{eq:stoch-utt}) reduces to
\[
\rho\left( i\mid j\right) =\dfrac{e^{v\left( i\right) }}{e^{v\left(
i\right) }+e^{v\left( j\right) }}
\]
This is the strict utility\ representation of Marschak (1960) and Luce and
Suppes (1965),\footnote{Luce (1959) and Block and Marschak (1960) study a
stronger non-binary version of strict utility.} which our binary value
representation thus extends to general, possibly biased and partly
deterministic, binary choice probabilities. As well-known, using the logistic
function $\xi$ we can write:
\begin{equation}
\dfrac{e^{v\left( i\right) }}{e^{v\left( i\right) }+e^{v\left( j\right)
}}
=\xi\left( v\left( i\right) -v\left( j\right) \right)
\label{eq:fech-diff}
\end{equation}
The binary choice probability $\rho\left( i\mid j\right) $ thus depends, in
a Fechnerian way, on the utility difference $v\left( i\right) -v\left(
j\right) $.\footnote{Cf. Luce and Suppes (1965) p. 334. The logistic function
$\xi:\mathbb{R}\rightarrow\mathbb{R}$ is given by $\xi\left( x\right)
=1/\left( 1+e^{-x}\right) $.}
In our extension, $v\ $continues to be a \emph{bona fide} utility function on
the level sets of $w$, as (\ref{eq:stoch-ut}) shows. We call it a
\emph{Fechnerian utility} \emph{function}. When $w\left( i\right) =w\left(
j\right) $, it holds
\begin{equation}
\rho\left( i\mid j\right) \geq\rho\left( j\mid i\right)
\Longleftrightarrow v\left( i\right) \geq v\left( j\right) \iff
1-\rho\left( j\mid i\right) \geq1-\rho\left( i\mid j\right)
\label{eq:ut-stoch}
\end{equation}
Alternatives with a higher Fechnerian utility thus have a higher probability
to be selected, regardless of their roles as proposals or incumbents. When
$\rho$ is both positive and unbiased, (\ref{eq:ut-stoch}) takes the form
\[
v\left( i\right) \geq v\left( j\right) \Longleftrightarrow\rho\left(
i\mid j\right) \geq\frac{1}{2}
\]
familiar from traditional stochastic choice. The Fechnerian utility function
is immaterial when $\rho$ is Dirac, i.e., when all rankings of distinct
alternatives are deterministic.
\begin{lemma}
\label{lm:dirac}A binary choice probability $\rho$ is Dirac and transitive if
and only if $\succ^{\ast}$ is weakly complete and transitive.\footnote{The
strict preference $\succ^{\ast}$ is \emph{weakly complete} if, for each $i\neq
j$, either $i\succ^{\ast}j$ or $j\succ^{\ast}i$ (cf. Fishburn, 1970). Under
weak completeness, we do not have to worry about indifferences, a notoriously
delicate issue.}
\end{lemma}
The transitivity of a binary choice probability thus generalizes the
transitivity of a strict preference of traditional utility theory. In this
case, the binary value representation (\ref{eq:stoch-utt}) reduces to
\[
\rho\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
1
& \qquad\text{if }w\left( i\right) >w\left( j\right) \\
\dfrac{1}{2}
& \qquad\text{if }w\left( i\right) =w\left( j\right)
\\
0 & \qquad\text{if }w\left( i\right) <w\left( j\right)
\end{array}
\right.
\]
The next representation theorem, our first main result, shows that
transitivity characterizes the binary choice probabilities having a binary
value representation.
\begin{theorem}
\label{thm:value}A binary choice probability has a binary value
representation\emph{\ }if and only if it is transitive.
\end{theorem}
This theorem generalizes standard utility representations in stochastic choice
(e.g., Luce and Suppes, 1965, p. 350) as well as, in view of Lemma
\ref{lm:dirac}, in traditional utility theory.
We conclude by observing that the preference $\succsim$ has, in terms of the
binary value representation (\ref{eq:stoch-utt}), a lexicographic
representation via the Fechnerian utility $v$ and the Paretian utility $w$.
Indeed, it is easy to see that, for each $i$ and $j$,
\[
i\succsim j\Longleftrightarrow\left( w\left( i\right) ,v\left( i\right)
\right) \geq_{lex}\left( w\left( j\right) ,v\left( j\right) \right)
\]
where $\geq_{lex}$ is the lexicographic order on the plane.
\subsection{Expected response times}
Besides the choice probability $\rho\left( i\mid j\right) $, the other
quantity featured in sequential binary choice is the expected time
\[
\tau\left( i\mid j\right)
\]
that the decision unit takes to choose between distinct proposal $i$ and
incumbent $j$. We represent it with a function $\tau:A^{2}\rightarrow\left[
0,\infty\right) $.
\begin{definition}
A pair $\left( \rho,\tau\right) $ of a binary choice probability and an
expected response time forms a \emph{tandem} if, for each $i\neq j$,
\begin{equation}
\rho\left( i\mid j\right) \in\left\{ 0,1\right\} \Longleftrightarrow
\tau\left( i\mid j\right) =0 \label{eq:tao-rho}
\end{equation}
and
\begin{equation}
\tau\left( i\mid j\right) =\tau\left( j\mid i\right) \Longrightarrow
\rho\left( i\mid j\right) =1-\rho\left( j\mid i\right) \label{eq:tao-rho2}
\end{equation}
\end{definition}
A tandem provides a thorough description of the binary choices of our decision
unit in the menu $A$. In such a description, the consistency condition
(\ref{eq:tao-rho}) ensures that deterministic choices are the ones that take
no time (so we abstract from non-decision times). In particular, since $\rho$
is a binary choice probability,
\[
\tau\left( i\mid j\right) =0\iff\tau\left( j\mid i\right) =0
\]
The consistency condition (\ref{eq:tao-rho2}), instead, requires that the
absence of a status quo bias manifests itself primarily in the symmetry of
response times.
\begin{definition}
A tandem $\left( \rho,\tau\right) $ has a \emph{binary} \emph{value}
\emph{representation} if there exist $v,w:A\rightarrow\mathbb{R}$, a symmetric
$s:A^{2}\rightarrow\left( 0,\infty\right) $, and a strictly quasiconcave and
unimodal $\varphi:\mathbb{R}\rightarrow\left( 0,\infty\right) $ such that
(\ref{eq:stoch-utt}) holds and
\begin{equation}
\tau\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
0
& \qquad\text{if }w\left( i\right) \neq w\left( j\right) \\
\varphi\left( v\left( i\right) -v\left( j\right) \right) &
\qquad\text{if }w\left( i\right) =w\left( j\right)
\end{array}
\right. \label{eq:response-time-bis}
\end{equation}
for all $i$ and $j$.
\end{definition}
A strictly quasiconcave and unimodal $\varphi:\mathbb{R}\rightarrow\left[
0,\infty\right) $ is a function that first strictly increases to a strong
maximum and then strictly decreases. This pattern is motivated by the standard
psychophysical assumption that the stimulus strength determines response times
with stronger stimuli inducing faster responses. Since here the stimulus
strength corresponds to the preference intensity represented by utility
differences $v\left( i\right) -v\left( j\right) $, this standard
assumption requires that large (positive) differences and small (negative)
differences command short response times. This is exactly what is captured by
the shape of $\varphi$.
To give an observable counterpart of this standard assumption we need to
introduce the following observables:
\[
\ell_{ij}=\ln\frac{\rho\left( i\mid j\right) }{\rho\left( j\mid i\right) }
\]
Using these log-odds we can introduce a class of tandems:
\begin{definition}
A tandem $\left( \rho,\tau\right) $ is \emph{chronometric} if $\rho$ is
transitive and there exists a threshold $l$ such that,
\begin{align}
\left. \ell_{ij}=\ell_{hk}\right. & \implies\tau\left( i\mid j\right)
=\tau\left( h\mid k\right) \label{eq:qe}\\
\left. l\leq\ell_{ij}<\ell_{hk}\right. & \implies\tau\left( i\mid
j\right) >\tau\left( h\mid k\right) \label{eq:qi}\\
\left. \ell_{ij}<\ell_{hk}\leq l\right. & \implies\tau\left( i\mid
j\right) <\tau\left( h\mid k\right) \label{eq:qo}
\end{align}
for all pairs of alternatives $i,j$ and $h,k$ with nonzero response times.
\end{definition}
We can now state our second representation theorem that characterizes tandems
having a binary value representation.
\begin{theorem}
\label{prop:chrono}A tandem has a binary value representation\emph{\ }if and
only if it is chronometric.
\end{theorem}
Another standard psychophysical assumption is that stimulus strength
determines error rates, with stronger stimuli inducing lower error rates.
Consistency of this assumption with the previous one requires shorter response
times to correspond to lower error rates. Surprisingly this leads to unbiased tandems.
Specifically, observe that when comparing a proposal $i$ and an incumbent $j$
(or \textit{viceversa}) we may make errors of two types: we may reject a superior proposal or accept
an inferior one. In analogy with standard terminology, we call them
\emph{first-type }and\emph{\ second-type errors}, respectively. Their
probabilities are
\begin{equation}
\mathrm{ER}_{i,j}^{\mathrm{I}}=\min\left\{ 1-\rho\left( i\mid j\right)
,1-\rho\left( j\mid i\right) \right\} \quad\text{;}\quad\mathrm{ER}
_{i,j}^{\mathrm{II}}=\min\left\{ \rho\left( i\mid j\right) ,\rho\left(
j\mid i\right) \right\} \label{eq:error-prob}
\end{equation}
Next we introduce a basic error-monotonicity property.
\begin{definition}
A tandem $\left( \rho,\tau\right) $ is \emph{psychometric} if $\rho$ is
transitive and
\[
\tau\left( i\mid j\right) <\tau\left( h\mid k\right) \Longrightarrow
\mathrm{ER}_{i,j}^{\mathrm{I}}<\mathrm{ER}_{h,k}^{\mathrm{I}}\quad
\text{and\quad}\mathrm{ER}_{i,j}^{\mathrm{II}}<\mathrm{ER}_{h,k}^{\mathrm{II}}
\]
and
\[
\tau\left( i\mid j\right) \leq\tau\left( h\mid k\right) \Longrightarrow
\mathrm{ER}_{i,j}^{\mathrm{I}}\leq\mathrm{ER}_{h,k}^{\mathrm{I}}
\quad\text{and\quad}\mathrm{ER}_{i,j}^{\mathrm{II}}\leq\mathrm{ER}
_{h,k}^{\mathrm{II}}
\]
for all pairs of alternatives $i,j$ and $h,k$ with nonzero response times.
\end{definition}
In words, shorter binary expected response times correspond to lower errors of
both types, a property that regards as easier to make the choices between
alternatives with larger utility differences. The next representation theorem
shows that psychometricity characterizes chronometric tandems with symmetric
expected response times, thus featuring no status quo biases.
\begin{theorem}
\label{prop:psycho}A tandem has a binary value representation\emph{\ }with an
even $\varphi$ if and only if it is psychometric.
\end{theorem}
It is noteworthy that psychometricity implies chronometricity since the two
definitions are not obviously related. With this theorem, our third main
result, we conclude the general analysis of binary choices.
\section{An algorithmic decision process\label{sect:algo}}
\subsection{Binary choice behavior\label{sect:bin}}
It is time to turn to observed binary choice behavior and apply to it the
general binary choice framework just introduced.
\begin{definition}
A \emph{binary choice model }(\emph{BCM}) is a pair of random matrices
$\left( \mathrm{C},\mathrm{RT}\right) $ where:
\begin{enumerate}
\item[(i)] $\mathrm{C}=\left[ \mathrm{C}_{i,j}\right] $ consists of the
random \emph{choice variables} $\mathrm{C}_{i,j}$ that describe the random
outcome of the comparison between proposal $i$ and status quo $j$, with
\[
\mathrm{C}_{i,j}=\left\{
\begin{array}
[c]{ll}
i & \text{if }i\text{ accepted}
\\
j & \text{if }i\text{ rejected}
\end{array}
\right.
\]
\item[(ii)] $\mathrm{RT}=\left[ \mathrm{RT}_{i,j}\right] $ consists of
random \emph{response times} $\mathrm{RT}_{i,j}$ required by the
comparison.\footnote{Throughout we assume that random response times (say
measured in seconds) have finite mean and variance.}
\end{enumerate}
\end{definition}
The distributions of $\mathrm{C}$ and $\mathrm{RT}$ are, in principle, both
observable in choice behavior. By equating probabilities and frequencies, they
induce a pair $\left( \rho_{\mathrm{C}},\tau_{\mathrm{RT}}\right) $ where
\[
\rho_{\mathrm{C}}\left( i\mid j\right) =\mathbb{P}\left[ \mathrm{C}
_{i,j}=i\right]
\]
is the frequency with which proposal $i$ is accepted over incumbent $j$, and
\[
\tau_{\mathrm{RT}}\left( i\mid j\right) =\mathbb{E}\left[ \mathrm{RT}
_{i,j}\right]
\]
is the mean response time required by the comparison.
When $\left( \rho_{\mathrm{C}},\tau_{\mathrm{RT}}\right) $ has a binary
value representation, we denote by
\[
\left( s_{\mathrm{C}},v_{\mathrm{C}},w_{\mathrm{C}},\varphi_{\mathrm{RT}
}\right)
\]
its elements. The most basic example of a BCM $\left( \mathrm{C}
,\mathrm{RT}\right) $ occurs in traditional utility theory when the choices
of the decision unit are deterministic. In this case, the pair $\left(
\rho_{\mathrm{C}},\tau_{\mathrm{RT}}\right) $ has the binary value
representation of the Dirac form:
\[
\rho_{\mathrm{C}}\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
1
& \qquad\text{if }w_{\mathrm{C}}\left( i\right) >w_{\mathrm{C}
}\left( j\right) \\
\dfrac{1}{2}
& \qquad\text{if }w_{\mathrm{C}}\left( i\right)
=w_{\mathrm{C}}\left( j\right) \\
0 & \qquad\text{if }w_{\mathrm{C}}\left( i\right) <w_{\mathrm{C}}\left(
j\right)
\end{array}
\right.
\]
and $\tau_{\mathrm{RT}}$ is typically undefined.
A popular stochastic binary choice model is the \emph{Drift Diffusion Model
}(\emph{DDM}) introduced by Ratcliff (1978). In its value version, developed
by Krajbich et al. (2010) and Milosavljevic et al. (2010), the comparison of
two alternatives $i$ and $j$ is governed by their \emph{neural utilities }
$\nu\left( i\right) $ and $\nu\left( j\right) $ about which the decision
unit learns, for instance via memory retrieval, during the deliberation that
precedes the choice between the two alternatives.\footnote{To ease the
analysis, we assume that the neural utility $\nu:A\rightarrow\mathbb{R}$ is
injective.} Evidence accumulation in favor of either alternative is
represented by the two Brownian motions with drift $\mathrm{V}_{i}\left(
t\right) =\nu\left( i\right) t+\mathrm{W}_{i}\left( t\right) $ and
$\mathrm{V}_{j}\left( t\right) =\nu\left( j\right) t+\mathrm{W}_{j}\left(
t\right) $. Each accumulation experiences independent white noise
fluctuations modeled by the uncorrelated Wiener processes $\mathrm{W}_{i}$ and
$\mathrm{W}_{j}$. With this,
\begin{itemize}
\item the net\ evidence in favor of $i$ over $j$ is given, at each $t>0$, by
the difference
\begin{equation}
\mathrm{Z}_{i,j}\left( t\right) =\mathrm{V}_{i}\left( t\right)
-\mathrm{V}_{j}\left( t\right) =\left[ \nu\left( i\right) -\nu\left(
j\right) \right] t+\sqrt{2}~\mathrm{W}\left( t\right) \label{eq:ddm}
\end{equation}
where $\mathrm{W}$ is the Wiener difference process $\left( \mathrm{W}
_{i}-\mathrm{W}_{j}\right) /\sqrt{2}$;
\item comparison ends when $\mathrm{Z}_{i,j}\left( t\right) $ reaches either
the barrier $\lambda>0$ or $-\beta<0$; so the response time is
\[
\mathrm{RT}_{i,j}=\min\left\{ t:\mathrm{Z}_{i,j}\left( t\right)
=\lambda\text{ or }\mathrm{Z}_{i,j}\left( t\right) =-\beta\right\}
\]
\item proposal $i$ is accepted when the upper barrier $\lambda$ is reached,
while incumbent $j$ is maintained (so proposal $i$ is rejected) when the lower
barrier $-\beta$ is reached; so the choice variable is
\[
\mathrm{C}_{i,j}=\left\{
\begin{array}
[c]{ll}
i & \qquad\text{if }\mathrm{Z}_{i,j}\left( \mathrm{RT}_{i,j}\right)
=\lambda
\\
j & \qquad\text{if }\mathrm{Z}_{i,j}\left( \mathrm{RT}_{i,j}\right) =-\beta
\end{array}
\right.
\]
\end{itemize}
A different net evidence, $\lambda$ and $\beta$, accounts for the different
roles of alternatives as proposal and status quo. A DDM is pinned down by its
elements $\nu$, $\lambda$ and $\beta$. We thus write it as DDM $\left(
\nu,\lambda,\beta\right) $. When $\lambda=\beta$ we say that the DDM is
\emph{symmetric}.
\begin{proposition}
\label{prop:ddm_bcm}The pair $\left( \rho_{\mathrm{C}},\tau_{\mathrm{RT}
}\right) $ generated by a DDM $\left( \nu,\lambda,\beta\right) $ is a
chronometric tandem, with $\rho_{\mathrm{C}}$ positive (and transitive). It
has a binary value representation with
\begin{equation}
v_{\mathrm{C}}=\lambda\nu\quad\text{;}\quad s_{\mathrm{C}}\left( i,j\right)
=1+\dfrac{e^{\lambda\left\vert \nu\left( i\right) -\nu\left( j\right)
\right\vert }-e^{\beta\left\vert \nu\left( i\right) -\nu\left( j\right)
\right\vert }}{1-e^{\left( \lambda+\beta\right) \left\vert \nu\left(
i\right) -\nu\left( j\right) \right\vert }}\quad\text{;}\quad
w_{\mathrm{C}}\text{ constant} \label{eq:bcp-uno}
\end{equation}
and
\begin{equation}
\varphi_{\mathrm{RT}}\left( x\right) =\frac{\lambda^{2}}{x}\left[
\frac{1-e^{\frac{\beta}{\lambda}x}}{e^{-x}-e^{\frac{\beta}{\lambda}x}}\left(
1+\frac{\beta}{\lambda}\right) -\frac{\beta}{\lambda}\right]
\label{eq:bcp-due}
\end{equation}
for all $x\in\mathbb{R}$.
\end{proposition}
Thus, in the DDM case the pair $\left( \rho_{\mathrm{C}},\tau_{\mathrm{RT}
}\right) $ is a tandem with binary value representation
\[
\rho_{\mathrm{C}}\left( i\mid j\right) =s_{\mathrm{C}}\left( i,j\right)
\xi\left( v_{\mathrm{C}}\left( i\right) -v_{\mathrm{C}}\left( j\right)
\right) =s_{\mathrm{C}}\left( i,j\right) \frac{e^{v_{\mathrm{C}}\left(
i\right) }}{e^{v_{\mathrm{C}}\left( i\right) }+e^{v_{\mathrm{C}}\left(
j\right) }}
\]
and
\[
\tau_{\mathrm{RT}}\left( i\mid j\right) =\varphi_{\mathrm{RT}}\left(
v_{\mathrm{C}}\left( i\right) -v_{\mathrm{C}}\left( j\right) \right)
=\lambda\frac{\lambda\rho_{\mathrm{C}}\left( i\mid j\right) -\beta\left(
1-\rho_{\mathrm{C}}\left( i\mid j\right) \right) }{v_{\mathrm{C}}\left(
i\right) -v_{\mathrm{C}}\left( j\right) }
\]
In particular, we have a decomposition of the Fechnerian utility
$v_{\mathrm{C}}=\lambda\nu$ in terms of neural utility function $\nu$ and
acceptance threshold $\lambda$. Accordingly,
\[
v_{\mathrm{C}}\left( i\right) -v_{\mathrm{C}}\left( j\right)
=\lambda\left( \nu\left( i\right) -\nu\left( j\right) \right)
\]
The Fechnerian utility difference is decomposed in the neural utility
difference $\nu\left( i\right) -\nu\left( j\right) $ weighted by the
coefficient $\lambda$. The higher the neural utility difference, the higher
can be viewed the intensity of the neural value for $i$ over $j$. The higher
$\lambda$, the higher the DM ability to perceive this value difference, so to
discriminate the alternatives' subjective values. In other words, $\lambda$
acts as a magnification lens for neural utility differences.
The next result gives a sharp empirical content to the DDM case. It is
convenient to state it using the log-odds
\[
\ell_{ij}=\log\frac{\rho_{\mathrm{C}}\left( i\mid j\right) }{\rho
_{\mathrm{C}}\left( j\mid i\right) }\quad\text{and\quad}\bar{\ell}_{ij}
=\log\frac{1-\rho_{\mathrm{C}}\left( j\mid i\right) }{1-\rho_{\mathrm{C}
}\left( i\mid j\right) }
\]
\begin{proposition}
\label{prop:ddm-bcm}The elements of a DDM $\left( v,\lambda,\beta\right) $
are uniquely identified by the tandem $\left( \rho_{\mathrm{C}}
,\tau_{\mathrm{RT}}\right) $ that it generates. In particular, if $\ell
_{ij}\neq0$,
\[
\lambda=\left\vert \ell_{ij}\right\vert \sqrt{\frac{\tau_{ij}}{\ell_{ij}
\rho_{ij}+\bar{\ell}_{ij}\left( \rho_{ij}-1\right) }}
\]
and
\[
\beta=\lambda\frac{\bar{\ell}_{ij}}{\ell_{ij}}\quad;\quad\nu\left( i\right)
=\frac{1}{\lambda}\log r_{\mathrm{C}}\left( i,j^{\ast}\right)
\]
under the normalization $v\left( j^{\ast}\right) =0$ for some alternative
$j^{\ast}$.
\end{proposition}
Finally, we characterize symmetric DDMs by showing that symmetry is equivalent
to an unbiased $\rho_{\mathrm{C}}$ as well as to a symmetric $\tau
_{\mathrm{RT}}$.
\begin{proposition}
\label{prop:ddm-error}For a tandem $\left( \rho_{\mathrm{C}},\tau
_{\mathrm{RT}}\right) $ generated by a DDM $\left( \nu,\lambda,\beta\right)
$, the following conditions are equivalent:
\begin{enumerate}
\item[(i)] the tandem is psychometric;
\item[(ii)] $\beta=\lambda$;
\item[(iii)] $\tau_{\mathrm{RT}}\left( i\mid j\right) =\tau_{\mathrm{RT}
}\left( j\mid i\right) $ for some (all) $i\neq j$;
\item[(iv)] $\rho_{\mathrm{C}}\left( i\mid j\right) =1-\rho_{\mathrm{C}
}\left( j\mid i\right) $ for some (all) $i\neq j$.
\end{enumerate}
In this case,
\[
\varphi_{\mathrm{RT}}\left( x\right) =\frac{\lambda^{2}}{x}\tanh\frac{x}{2}
\]
for all $x\in\mathbb{R}$.
\end{proposition}
We conclude this section by observing that a broad family of BCMs is given by
\emph{evidence threshold models}. They encompass integration models, like the
DDM just studied, as well as the extrema detection models discussed by Stine
et al. (2020). In Appendix \ref{sect:evid-mod} we discuss this family of BCMs
in some detail.
\subsection{Neural Metropolis Algorithm\label{sect:nma}}
The protagonist of our analysis is an algorithmic decision process that a
neural system might implement when facing a multialternative menu $A$. This
process consists of a sequence of pairwise comparisons conducted via a BCM,
whose contestants are selected by a Markovian mechanism in the sense of
Metropolis et al. (1953). This sequential structure is motivated, as discussed
in the Introduction, by the well-known limits of working memory and is
supported by classic and recent eye-tracking studies.\footnote{See Russo and
Rosen (1975), Krajbich and Rangel (2011) and Reutskaja et al. (2011) as well
as the discussion in Cerreia-Vioglio et al. (2022).}
In broad strokes, this algorithmic decision process:
\begin{enumerate}
\item starts from an arbitrary element $j$ of the menu, the\textbf{
}\emph{incumbent};
\item selects a candidate alternative $i$ in the menu, the \emph{proposal};
\item compares them via a BCM and makes the winner the new incumbent;
\item repeats steps 1-3 until deliberation time comes, with the last incumbent
being the chosen alternative in the menu.
\end{enumerate}
More in detail, the algorithm starts by selecting a first incumbent $j$
according to an initial distribution $\mu\in\Delta\left( A\right) $ that,
for example, may describe the \textquotedblleft first
fixation\textquotedblright\ of the decision unit. It proceeds through an
\emph{exploration }(\emph{stochastic})\emph{ matrix}
\[
Q=\left[ Q\left( i\mid j\right) :i,j\in A\right]
\]
of order $\left\vert A\right\vert $ that describes how the algorithm navigates
through alternatives. In particular, given the incumbent $j$, a proposal $i$
is selected with probability $Q\left( i\mid j\right) $. Incumbent and
proposal are then compared via a BCM $\left( \mathrm{C},\mathrm{RT}\right)
$. After $\mathrm{RT}_{i,j}$ seconds, the new incumbent is $j^{\prime
}=\mathrm{C}_{i,j}$; a new proposal $i^{\prime}$ is then selected with
probability $Q\left( i^{\prime}\mid j^{\prime}\right) $, and so on so forth.
The algorithm terminates according to a posited random \emph{stopping}
\emph{number }$N$ that limits the number of allowed iterations because of
exogenously constrained computational resources (for instance, this number may
have a cost, say economic or physiological, for the decision unit). The last
incumbent is the algorithm output, so what the algorithm chooses from menu $A$.
After this preliminary discussion, next we formalize the \emph{Neural
Metropolis Algorithm}, our algorithmic decision process. Its constitutive
elements are a BCM $\left( \mathrm{C},\mathrm{RT}\right) $ and an
exploration strategy $\left( \mu,Q\right) $, summarized in the quartet
\begin{equation}
\left( \mathrm{C},\mathrm{RT},\mu,Q\right) \label{eq:algo-elms}
\end{equation}
For mathematical convenience we start the algorithm at time $-1$.
\begin{center}
\rule{18cm}{0.04cm}
\textbf{Neural Metropolis Algorithm}
\rule{18cm}{0.04cm}
\end{center}
\noindent\textbf{Input:}$\ $\emph{Given a stopping number }$N$\emph{.}
\noindent\textbf{Start: }\emph{Draw }$i\ $\emph{from}$\emph{\ }A$\emph{
according to }$\mu$ \emph{and}$
$
$\bullet$ \emph{set }$t_{-1}=0$\emph{,
}
$\bullet$\emph{ set }$j_{-1}=i$\emph{.
}
\noindent\textbf{Repeat: }\emph{Draw }$i_{n}\ \emph{from\ }A$\emph{ according
to }$Q\left( \cdot\mid j_{n-1}\right) $\emph{ and compare it to }$j_{n-1}
$\emph{:
}
$\bullet$ \emph{set }$t_{n}=t_{n-1}+\mathrm{RT}_{i_{n},j_{n-1}}$
\emph{,
}
$\bullet$\emph{ set }$j_{n}=\mathrm{C}_{i_{n},j_{n-1}}$\emph{;
}
\noindent\textbf{until }$n=N$\emph{.}
\noindent\textbf{Stop: }\emph{Set }$k=j_{n-1}$\emph{.
}
\noindent\textbf{Output: }\emph{Choose }$k$\emph{ from }$A$\emph{.\vspace
{-20pt}}
\begin{center}
\rule{18cm}{0.04cm}
\end{center}
Along with stopping number $N$, the Neural Metropolis Algorithm
(\ref{eq:algo-elms}) selects alternative $j_{n-1}$ when $N=n$, where $n$ is
the iteration at which the decision process is interrupted by the stopping
number. The Neural Metropolis Algorithm generalizes the Metropolis-DDM
Algorithm of Cerreia-Vioglio et al. (2022), which is the special case when the
underlying BCM is generated by a DDM, the exploration matrix $Q\left( i\mid
j\right) $ is inversely proportional to the mean of $\mathrm{RT}_{i,j}$ and a
hard deadline is given.
\subsection{Algorithmic properties}
The Neural Metropolis Algorithm (\ref{eq:algo-elms}) generates a Markov chain
of incumbents
\begin{equation}
J=\left\{ J_{-1},J_{0},J_{1},...\right\} \label{eq:marko}
\end{equation}
with $\mathbb{P}\left[ J_{-1}=j\right] =\mu\left( j\right) $ for all
alternatives $j$ in $A$ and, for each $n\geq0$,
\[
\mathbb{P}\left[ J_{n}=i\mid J_{n-1}=j\right] =\underset{\text{prob.
}i\text{ proposed}}{\underbrace{Q\left( i\mid j\right) }}\times
\underset{\text{prob. }i\text{ accepted}}{\underbrace{\rho_{\mathrm{C}}\left(
i\mid j\right) }}=:M\left( i\mid j\right)
\]
for all distinct alternatives $i$ and $j$ in $A$. The stochastic matrix $M$ is
the \emph{transition matrix} of the incumbents' Markov chain $J$. In
particular, we say that the Neural Metropolis Algorithm is \emph{reversible}
when its transition matrix is reversible and so the incumbents' Markov chain
(\ref{eq:marko}) is reversible.
The Neural Metropolis Algorithm induces, for each stopping number $N$, a:
\begin{enumerate}
\item[(i)] \emph{choice probability} $p_{N}\in\Delta\left( A\right)
$,\footnote{Recall that $p_{N}$ is an $\left\vert A\right\vert $-dimensional
vector.} where $p_{N}\left( i,A\right) $ is the probability that alternative
$i$ is selected from menu $A$ by the algorithm;
\item[(ii)] \emph{mean response time} $\tau_{N}\in\left[ 0,\infty\right) $,
the average time that the algorithm takes to select an alternative from $A$.
\end{enumerate}
The possibility of computing these quantities in explicit form is what makes
the Neural Metropolis Algorithm empirically relevant. To this end, next we
introduce a class of stopping numbers amenable to computations.
\begin{definition}
A stopping number $N$ is \emph{simple} within a Neural Metropolis Algorithm
(\ref{eq:algo-elms}) if it is independent of the realizations of incumbents,
proposals and response times.
\end{definition}
Next we compute the choice probabilities and mean response times for a Neural
Metropolis Algorithm with a simple stopping number. A piece of notation: we
denote by
\begin{equation}
\bar{\tau}_{j}=
{\displaystyle\sum\limits_{i\in A}}
Q\left( i\mid j\right) \tau_{\mathrm{RT}}\left( i\mid j\right)
\label{eq:tao-simple}
\end{equation}
the average duration of an iteration when $j$ is the incumbent.
\begin{proposition}
\label{lem:comp}For a Neural Metropolis Algorithm (\ref{eq:algo-elms}) with a
simple stopping number $N$,\footnote{The r.h.s. of both formulas in
(\ref{eq:uella}) involve standard matrix-vector multiplications: $\mu$ and
$\bar{\tau}$ are $\left\vert A\right\vert $-dimensional vectors, while
$f_{N}\left( M\right) $ and $g_{N}\left( M\right) $ are the square
matrices of order $\left\vert A\right\vert $ defined by
(\ref{eq:matrix-gen-fct}).}
\begin{equation}
p_{N}=f_{N}\left( M\right) \mu\quad\text{and\quad}\tau_{N}=\bar{\tau}\cdot
g_{N}\left( M\right) \mu\label{eq:uella}
\end{equation}
\end{proposition}
Using the definitions of probability and survival generating functions, we can
rewrite the choice probabilities and mean response times (\ref{eq:uella}) as
\begin{equation}
p_{N}=\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] M^{n}\right) \mu\quad\text{and\quad}\tau
_{N}=\bar{\tau}\cdot\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N>n\right] M^{n}\right) \mu\label{eq:uella-bis}
\end{equation}
An immediate consequence of this rewriting is that
\[
N\geq N^{\prime}\Longrightarrow\tau_{N}\geq\tau_{N^{\prime}}
\]
A less tight stopping number results, as natural, in a longer mean decision time.
In the following important case we can compute the choice probabilities and
mean response times in closed form.
\begin{definition}
Given two coefficients $\zeta\in\left( 0,1\right) $ and $r\geq1$, the
\emph{negative binomial stopping number} $N_{r}\left( \zeta\right) $ is
defined by
\[
\mathbb{P}\left[ N=n\right] =\binom{n+r-1}{r-1}\zeta^{n}\left(
1-\zeta\right) ^{r}\qquad\forall n\geq0
\]
\end{definition}
Under this distribution, the decision unit receives a \textquotedblleft
search\textquotedblright\ signal with probability $\zeta$ and a
\textquotedblleft stop\textquotedblright\ signal with probability $1-\zeta$;
it then proceeds to compare the alternatives when a search signal is received,
while it stops searching after $r$ stop signals.\footnote{For the first $r-1$
stop signals it just freezes, restarting in the next round.} When $r=1$, it
reduces to a \emph{geometric stopping number}
\[
\mathbb{P}\left[ N_{1}\left( \zeta\right) =n\right] =\zeta^{n}\left(
1-\zeta\right) \qquad\forall n\geq0
\]
Now, the decision unit stops as soon as it receives the first stop signal.
\begin{proposition}
\label{prop:neg-bin}It holds
\[
f_{N_{r}\left( \zeta\right) }\left( M\right) =\left( 1-\zeta\right)
^{r}\left( 1-\zeta M\right) ^{-r}
\]
and
\begin{equation}
g_{N_{r}\left( \zeta\right) }\left( M\right) =-\left( \sum_{k=0}
^{r}\binom{r}{k}\left( -\zeta\right) ^{k}\sum_{j=0}^{k-1}M^{j}\right)
\left( 1-\zeta M\right) ^{-r} \label{eq:gnb}
\end{equation}
\end{proposition}
By Proposition \ref{lem:comp}, for a simple negative binomial stopping
number\emph{ }we thus have
\[
p_{N_{r}\left( \zeta\right) }=\left( 1-\zeta\right) ^{r}\left( 1-\zeta
M\right) ^{-r}\mu\quad\text{and\quad}\tau_{N_{r}\left( \zeta\right) }
=-\bar{\tau}\cdot\left( \sum_{k=0}^{r}\binom{r}{k}\left( -\zeta\right)
^{k}\sum_{j=0}^{k-1}M^{j}\right) \left( 1-\zeta M\right) ^{-r}\mu
\]
In particular, in the geometric case $r=1$ we get
\[
p_{N_{1}\left( \zeta\right) }=\left( 1-\zeta\right) \left( 1-\zeta
M\right) ^{-1}\mu\qquad\text{and}\qquad\tau_{N_{1}\left( \zeta\right)
}=\bar{\tau}\cdot\zeta\left( 1-\zeta M\right) ^{-1}\mu
\]
The formula for $p_{N_{1}\left( \zeta\right) }$ was first proved in
Valkanova (2020), all other formulas appear to be novel.
\subsection{Algorithmic value analysis\label{sect:rev}{}}
Earlier in the paper we discussed the value underpinning of binary choice
probabilities. Next we consider a similar concept for Neural Metropolis Algorithms.
\begin{definition}
A Neural Metropolis Algorithm (\ref{eq:algo-elms}) is \emph{value based} if
its binary choice probability $\rho_{\mathrm{C}}$ has a binary value
representation $\left( s_{\mathrm{C}},v_{\mathrm{C}},w_{\mathrm{C}}\right) $.
\end{definition}
This notion is the algorithmic counterpart of the binary value representation
of a binary choice probability. By Theorem \ref{thm:value}, value-based Neural
Metropolis Algorithms are characterized by transitive binary choice probabilities.
\begin{theorem}
\label{prop:value-bis}If a value-based Neural Metropolis Algorithm has a nice
exploration matrix, then
\begin{equation}
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =\lim_{N_{k}
\rightarrow\infty}p_{N_{k}}\left( i,A\right) =\left\{
\begin{array}
[c]{ll}
\frac{e^{v_{\mathrm{C}}\left( i\right) }}{\sum_{j\in\arg\max
_{A}w_{\mathrm{C}}}e^{v_{\mathrm{C}}\left( j\right) }} & \qquad\text{if
}i\in\arg\max_{A}w_{\mathrm{C}}\\
0 & \qquad\text{else}
\end{array}
\right. \label{eq:algo-value}
\end{equation}
for all sequences of divergent simple stopping rules $N_{k}$.
\end{theorem}
This result clarifies the nature of a value-based Neural Metropolis Algorithm.
To appreciate it, observe that $\Pr\left[ J_{n}=i\right] $ is the
probability that, unstopped, the algorithm chooses alternative $i$ after $n$
iterations, while $\arg\max_{A}w_{\mathrm{C}}$ is the set of alternatives that
are maximal under $\succ^{\ast}$. Thus,
\[
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right]
\]
indicates the inherent tendency of the Neural Metropolis Algorithm to choose a
maximal alternative $i$, regardless of the exogenously posited stopping
number. As a result, it can be seen as representing the underlying value of
alternative $i$. When the algorithm satisfies (\ref{eq:algo-value}), we have,
for alternatives $i$ and $j$ that are maximal under $\succ^{\ast}$,
\[
v_{\mathrm{C}}\left( i\right) \geq v_{\mathrm{C}}\left( j\right)
\Longleftrightarrow\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right]
\geq\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right]
\]
The inherent tendency of the algorithm is thus consistent with the Fechnerian
utility function $v_{\mathrm{C}}$, which in the limit governs the choices
between maximal alternatives (be they incumbents or proposals). The equality
\[
\lim_{N_{k}\rightarrow\infty}p_{N_{k}}\left( i,A\right) =\lim_{n\rightarrow
\infty}\Pr\left[ J_{n}=i\right]
\]
shows that this limit behavior occurs when the stopping number is less and
less tight. This means, \emph{inter alia}, that the limit behavior is
unaffected by status quo biases, so $s_{\mathrm{C}}$ plays no role. Implicit
here is the view that these biases arise under external pressure, here
embodied by the posited stopping number, so they vanish when this pressure relaxes.
Finally, formula (\ref{eq:algo-value}) ensures that
\[
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =\lim_{N_{k}
\rightarrow\infty}p_{N_{k}}\left( i,A\right) =0
\]
for all alternatives $i$ in $A$ that are not maximal under $\succ^{\ast}$. In
other words, at the limit these alternatives have no chance to be selected --
as $\lim_{N_{k}\rightarrow\infty}p_{N_{k}}\left( i,A\right) =0$ -- and in
any event the algorithm has no tendency to select them -- as $\lim
_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =0$. This optimality property
ensures that, as stopping numbers are less and less tight, the algorithm
select alternatives that are maximal under $\succ^{\ast}$. Among them,
stochastic comparisons are then governed by the Fechnerian utility. In sum, at
the limit the Neural Metropolis Algorithm hard-maximizes $w_{\mathrm{C}}$ and
soft-maximizes $v_{\mathrm{C}}$.
A first important consequence of the previous theorem concerns the case in
which $\succ^{\ast}$ features a single maximal element.
\begin{corollary}
\label{prop:value-ter}A value-based Neural Metropolis Algorithm
(\ref{eq:algo-elms}), with nice exploration matrix $Q$, satisfies
\[
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =\lim_{N_{k}
\rightarrow\infty}p_{N_{k}}\left( i,A\right) =\left\{
\begin{array}
[c]{ll}
1 & \qquad\text{if }i\in\arg\max_{A}w_{\mathrm{C}}\\
0 & \qquad\text{else}
\end{array}
\right.
\]
if and only if $\arg\max_{A}w_{\mathrm{C}}$ is a singleton.
\end{corollary}
For instance, in the deterministic case of a transitive Dirac binary choice
probability the Neural Metropolis Algorithm selects, at the limit, the best
alternative.\footnote{Recall that a Dirac and transitive $\rho_{\mathrm{C}}$
corresponds to a weakly complete and transitive $\succ^{\ast}$ (cf. Lemma
\ref{lm:dirac}). So. $\arg\max_{A}w_{\mathrm{C}}$ is a singleton consisting of
the best alternative under $\succ^{\ast}$.} This limit analysis is much in
line with the traditional assumption of unconstrained computational resources.
Traditional choice behavior is thus implemented computationally by the Neural
Metropolis Algorithm.
In the traditional case just considered, $\arg\max_{A}w_{\mathrm{C}}$ is a
singleton in $A$. In contrast, $\arg\max_{A}w_{\mathrm{C}}$ coincides with the
whole set $A$ when, like in the DDM case, the binary choice probability
$\rho_{\mathrm{C}}$ is positive. In this case, $w_{\mathrm{C}}$ is constant,
so all alternatives are maximal under $\succ^{\ast}$. We thus have a second
noteworthy special case of the last representation theorem.
\begin{corollary}
\label{prop:value-quater}A value-based Neural Metropolis Algorithm
(\ref{eq:algo-elms}), with nice exploration matrix $Q$, satisfies
\begin{equation}
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =\lim_{N_{k}
\rightarrow\infty}p_{N_{k}}\left( i,A\right) =\frac{e^{v_{\mathrm{C}}\left(
i\right) }}{\sum_{j\in A}e^{v_{\mathrm{C}}\left( j\right) }}\qquad\forall
i\in A \label{eq:acqua}
\end{equation}
if and only if its binary choice probability $\rho_{\mathrm{C}}$ is positive.
\end{corollary}
By Proposition \ref{prop:ddm_bcm}, in the DDM special case we have
$v_{\mathrm{C}}=\lambda\nu$ in (\ref{eq:acqua}) and so multinomial logit
behavior
\begin{equation}
\frac{e^{\lambda\nu\left( i\right) }}{\sum_{j\in A}e^{\lambda\nu\left(
j\right) }} \label{eq:acqua-bis}
\end{equation}
emerges at the limit, like in Baldassi et al. (2020) and Cerreia-Vioglio et
al. (2022), even though here the assumptions on the stopping numbers are different.
In the positive $\rho_{\mathrm{C}}$ case, value-based Neural Metropolis
Algorithms have a remarkable computational property, as the next theorem, our
last main result, shows.
\begin{theorem}
\label{thm:value-bis}A Neural Metropolis Algorithm (\ref{eq:algo-elms}) with
positive $\rho_{\mathrm{C}}$ and nice exploration matrix $Q$, is value based
if and only if its transition matrix $M$ is reversible.\footnote{If and only
if $\rho_{\mathrm{C}}$ is transitive.}
\end{theorem}
At a computational level, reversibility ensures that the transition matrix $M$
is diagonalizable with real eigenvalues. Therefore,
\[
M=U\operatorname*{diag}\left( \lambda_{1},\lambda_{2},...,\lambda_{\left\vert
A\right\vert }\right) U^{-1}
\]
where $\operatorname*{diag}\left( \cdot\right) $ is the diagonal matrix of
the eigenvalues $\lambda_{i}$ of $M$, each repeated according to its
multiplicity, and the columns of $U$ form a basis of the respective
eigenvectors. In turn, this readily implies
\begin{equation}
f_{N}\left( M\right) =U\operatorname*{diag}\left( f_{N}\left( \lambda
_{1}\right) ,f_{N}\left( \lambda_{2}\right) ,...,f_{N}\left(
\lambda_{\left\vert A\right\vert }\right) \right) U^{-1} \label{eq:frev}
\end{equation}
and
\begin{equation}
g_{N}\left( M\right) =U\operatorname*{diag}\left( \dfrac{1-f_{N}\left(
\lambda_{1}\right) }{1-\lambda_{1}},\dfrac{1-f\left( \lambda_{2}\right)
}{1-\lambda_{2}},...,\dfrac{1-f\left( \lambda_{\left\vert A\right\vert
}\right) }{1-\lambda_{\left\vert A\right\vert }}\right) U^{-1}
\label{eq:grev}
\end{equation}
with the limit convention (\ref{eq:limit-bis}). These formulas permit to
compute choice probabilities and mean response times for simple stopping
numbers, as in formulas (\ref{eq:uella-bis}). This computational achievement
concludes the analysis of value-based Neural Metropolis Algorithms.
\section{Discussion: temporal constrains\label{sect:icemia}}
In our analysis we considered constrained resources as modelled by a stopping
number on iterations, which may have an economic or physiological cost for the
decision unit. For perspective, in this final section we consider a different
type of constraint, namely, a temporal constraint in the form of a hard time
constraint $t$. This deadline induces a stopping number $N_{t}$ with $N_{t}=n$
if $t\in\left[ t_{n-1},t_{n}\right) $. In words, the decision unit cannot
conclude the $n$-th comparison when the duration $t_{n}$ of that comparison
exceeds the deadline $t$. To see how this stopping number affects the
analysis, observe that, if unstopped, a Neural Metropolis Algorithm realizes a
stochastic process
\begin{equation}
\left( J,I,T\right) =\left( J_{-1},I_{0},T_{0},J_{0},I_{1},T_{1}
,...,J_{n-1},I_{n},T_{n},J_{n},I_{n+1},...\right) \label{eq:story}
\end{equation}
where the realization $j_{n-1}$ of $J_{n-1}$ is the incumbent at the end of
iteration $n-1$, the realization $i_{n}$ of $I_{n}$ is the proposal at
iteration $n$, and the realization $t_{n}$ of $T_{n}$ is the duration of
iteration $n$. The stopping number $N_{t}$ acts as follows:
\[
N_{t}=n\iff T_{-1}+T_{0}+\cdot\cdot\cdot+T_{n-1}\leq t<T_{-1}+T_{0}+\cdot
\cdot\cdot+T_{n-1}+T_{n}
\]
where $T_{-1}=0$. In this case, a closed form representation of $p_{N_{t}}$ is
not achievable in general and, by definition, $\tau_{N_{t}}=t$. Yet, we can
give a limit result, in the spirit of Theorem \ref{prop:value-bis}, under the
following assumption.
\noindent\textbf{Regularity} A binary choice model $\left( \mathrm{C}
,\mathrm{RT}\right) $ is \emph{regular} if $\rho_{\mathrm{C}}$ is
positive\ and transitive, $\tau_{\mathrm{RT}}$ is positive and $\mathrm{RT}
=\left[ \mathrm{RT}_{i,j}\right] $ consists of random response times
$\mathrm{RT}_{i,j}$ with a continuous distribution at $0$ and with no singular
part.\footnote{These two conditions on the distributions of response times are
automatically satisfied when they all admit density.}
We can now state the limit result.\footnote{In this discussion section we
focus on the positive case, leaving the more general case to an in-depth
future analysis.}
\begin{proposition}
\label{thm:value-ter}If a Neural Metropolis Algorithm (\ref{eq:algo-elms})
with irreducible exploration matrix $Q$ is based on a regular BCM $\left(
\mathrm{C},\mathrm{RT}\right) $, then
\[
\lim_{t\rightarrow\infty}p_{N_{t}}\left( i,A\right) =\frac{e^{v_{\mathrm{C}
}\left( i\right) }\bar{\tau}_{i}}{\sum_{j\in A}e^{v_{\mathrm{C}}\left(
j\right) }\bar{\tau}_{j}}\qquad\forall i\in A
\]
\end{proposition}
As time pressure diminishes, the limit probability of choosing alternative $i$
becomes proportional to the limit probability with which $i$ is an incumbent
times the average duration of the comparisons in which $i$ is the incumbent.
The intuition is natural: the longer the time spent in comparing an
alternative with the other alternatives, the higher the probability of
choosing that alternative at the deadline $t$.
In the DDM special case, we get
\begin{equation}
\lim_{t\rightarrow\infty}p_{N_{t}}\left( i\right) =\frac{e^{\lambda
\nu\left( i\right) }\bar{\tau}_{i}}{
{\displaystyle\sum\limits_{j\in A}}
e^{\lambda\nu\left( j\right) }\bar{\tau}_{j}}=\frac{e^{\lambda\nu\left(
i\right) +\alpha\left( i\right) }}{
{\displaystyle\sum\limits_{j\in A}}
e^{\lambda\nu\left( j\right) +\alpha\left( j\right) }}\qquad\forall i\in A
\label{eq:alpha}
\end{equation}
Thus, the limit probability is softmax with neural utility $\nu$ and
alternative specific bias
\[
\alpha\left( i\right) =\log\bar{\tau}_{i}\qquad\forall i\in A
\]
If, in addition, the DDM is symmetric and the off diagonal entries of the
exploration matrix $Q$ are inversely proportional to mean response times (as
in Cerreia-Vioglio et al. 2022, Section 2), then the $\bar{\tau}_{i}$'s are
approximately constant and multinomial logit behavior (\ref{eq:acqua-bis})
emerges.\footnote{See Appendix \ref{app:endicectomia} for details.}
\appendix
\section{Appendix: Evidence threshold models\label{sect:evid-mod}}
As it will soon become clear, evidence threshold models are best introduced in
discrete time. For each pair $i,j$ of alternatives in $A$, let $\left\{
\mathrm{Z}_{i,j}\left( t\right) \right\} _{t=0}^{\infty}$ be a
discrete-time stochastic process in which each variable $\mathrm{Z}
_{i,j}\left( t\right) $ represents the net evidence -- accumulated or
instantaneous -- in favor of $i$ over $j$ that the neural system takes into
account at time $t$. Given two evidence thresholds $\lambda,\beta>0$, a
decision is taken when either the evidence in favor of $i$ reaches level
$\lambda$ or the evidence in favor of $j$ reaches level $\beta$. This happens
at (stochastic) time
\begin{equation}
\mathrm{RT}_{i,j}=\min\left\{ t:\mathrm{Z}_{i,j}\left( t\right) \geq
\lambda\text{ or }\mathrm{Z}_{i,j}\left( t\right) \leq-\beta\right\}
\label{BCM1}
\end{equation}
With this, the choice variable is
\begin{equation}
\mathrm{C}_{i,j}=\left\{
\begin{array}
[c]{ll}
i & \qquad\text{if }\mathrm{Z}_{i,j}\left( \mathrm{RT}_{i,j}\right)
\geq\lambda
\\
j & \qquad\text{if }\mathrm{Z}_{i,j}\left( \mathrm{RT}_{i,j}\right)
\leq-\beta
\end{array}
\right. \label{BCM2}
\end{equation}
Evidence threshold models encompass integration models, like a discrete-time
version of the DDM, as well as the extrema detection models discussed by Stine
et al. (2020). To see why, consider the discrete-time Ornstein-Uhlenbeck
process
\begin{equation}
\mathrm{Z}_{i,j}\left( t\right) =\underset{\text{past evidence}}
{\underbrace{\left( 1-\eta\right) \mathrm{Z}_{i,j}\left( t-1\right) }
}+\underset{\text{new evidence}}{\underbrace{\zeta_{i,j}\left( t\right) }
}\qquad\forall t\geq1\nonumber
\end{equation}
with initial condition $\mathrm{Z}_{i,j}\left( 0\right) =0$. The scalar
$\eta\in\left[ 0,1\right] $ captures past evidence deterioration and the
variable
\begin{equation}
\zeta_{i,j}\left( t\right) =\left[ \nu\left( i\right) -\nu\left(
j\right) \right] \mu\left( t-1\right) +\sigma\varepsilon\left( t\right)
\qquad\forall t\geq1 \label{eq:ou-ter}
\end{equation}
is the instantaneous\ noisy evidence gathered at time $t$ in favor of either
alternative.\footnote{Evidence is in favor of $i$ over $j$ when $\zeta
_{i,j}\left( t\right) \geq0$ and in favor of $j$ over $i$ when $\zeta
_{i,j}\left( t\right) \leq0$. The possible dependence of $\mu$ on $t-1$
allows for urgency signals.} The shock $\varepsilon$ is a Gaussian white noise
process -- i.e., it consists of i.i.d. Gaussian random variables
$\varepsilon\left( t\right) \sim N\left( 0,1\right) $; like in the DDM,
$\nu\left( i\right) $ is the value of alternative $i$. When
\begin{equation}
\eta=0\quad\text{,\quad}\mu=1\quad\text{and\quad}\sigma=\sqrt{2}
\label{eq:ddm-discrete}
\end{equation}
process (\ref{eq:ou-ter}) reduces to the following discrete-time version of
the DDM
\[
\mathrm{Z}_{i,j}\left( t\right) -\mathrm{Z}_{i,j}\left( t-1\right)
=\left[ \nu\left( i\right) -\nu\left( j\right) \right] +\sqrt
{2}\,\varepsilon\left( t\right) \qquad\forall t\geq1
\]
Through the discrete-time Wiener process $w\left( t\right) =\sum_{s=1}
^{t}\varepsilon\left( s\right) $, it is immediate to see that $\mathrm{Z}
_{i,j}\left( t\right) $ represents accumulated noisy evidence:
\[
\mathrm{Z}_{i,j}\left( t\right) =\sum_{s=1}^{t}\zeta_{i,j}\left( s\right)
=\left[ \nu\left( i\right) -\nu\left( j\right) \right] t+\sqrt
{2}w\left( t\right)
\]
In contrast, when $\eta=1$ the process (\ref{eq:ou-ter}) takes the
\emph{extrema detection} form
\begin{equation}
\mathrm{Z}_{i,j}\left( t\right) =\zeta_{i,j}\left( t\right) =\left[
\nu\left( i\right) -\nu\left( j\right) \right] \mu\left( t-1\right)
+\sigma\varepsilon\left( t\right) \qquad\forall t\geq1
\label{eq:extrema-det}
\end{equation}
Now $\mathrm{Z}_{i,j}\left( t\right) $ represents instantaneous noisy
evidence, as opposed to the DDM accumulated one.
In continuous time, the Ornstein-Uhlenbeck process becomes
\[
\mathrm{dZ}_{i,j}\left( t\right) =-\eta\mathrm{Z}_{i,j}\left( t\right)
\mathrm{d}t+\left[ \nu\left( i\right) -\nu\left( j\right) \right]
\mu\left( t\right) \mathrm{d}t+\sigma\mathrm{dW}
\]
with solution
\begin{equation}
\mathrm{Z}_{i,j}\left( t\right) =\left[ \nu\left( i\right) -\nu\left(
j\right) \right] \mu\left( t\right) \frac{1-e^{-\eta t}}{\eta}+\int
_{0}^{t}e^{-\eta\left( t-s\right) }\sigma\mathrm{dW}\left( s\right)
\label{eq:ou-cts}
\end{equation}
The DDM (\ref{eq:ddm}) is still the special case (\ref{eq:ddm-discrete}). More
difficult is to identify the continuous counterpart of the extrema detection
model (\ref{eq:extrema-det}) because of the technical issues that arise with
continuous time white noise. As these issues do not appear to have a
substantive neural underpinning, we introduced evidence threshold models in
discrete time.\footnote{The accumulated evidence used by integration models
like the DDM is properly formalized by Wiener processes. The instantaneous
evidence featured by extrema detection models would rely on a notion of
\textquotedblleft derivative\textquotedblright\ for Wiener processes, a
notoriously subtle issue as their paths are nowhere differentiable.}
Be that as it may, Bogacz et al. (2006) report formulas for the continuous
time Ornstein-Uhlenbeck process that generalize the DDM ones upon which
Proposition \ref{prop:ddm_bcm} is based.\ It is unclear, however, whether this
generalized formulas deliver a sharp Ornstein-Uhlenbeck extension of this
proposition. Nevertheless, (\ref{eq:ou-cts}) is a significant generalization
of the DDM that, via the obvious continuous time versions of (\ref{BCM1}) and
(\ref{BCM2}), can play the role of a BCM.\pagebreak
\section{Appendix: Proofs and related analysis}
\subsection{Section \ref{sect:bin-choice}}
In this section it is sometimes convenient to use the exponential
transformation $u=e^{v}$ of the Fenchel utility $v$. We call $u$ \emph{strict
utility}.
\subsubsection{Proof of Lemma \ref{lm:bcp-prop}}
(i) Asymmetry is easily checked. Assume \emph{per contra} that $\succ^{\ast}$
is not negatively transitive.$\ $Then, there exist $i$, $j$ and $k$ such that
$i\nsucc^{\ast}k\nsucc^{\ast}j$ but $i\succ^{\ast}j$. Alternatives $i$, $k$
and $j$ must be distinct: $i\succ^{\ast}j$ implies $i\neq j$, while $i=k$
would imply $i\nsucc^{\ast}j$ and so would $k=j$. Moreover,
\begin{enumerate}
\item[(a)] $i\nsucc^{\ast}k$ implies $\rho\left( i\mid k\right) <1$ and
$\rho\left( k\mid i\right) >0$,
\item[(b)] $k\nsucc^{\ast}j$ implies $\rho\left( k\mid j\right) <1$ and
$\rho\left( j\mid k\right) >0$,
\item[(c)] $i\succ^{\ast}j$ implies $\rho\left( i\mid j\right) =1$
and$\ \rho\left( j\mid i\right) =0$.
\end{enumerate}
Therefore,
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =0\quad\text{and\quad}\rho\left( k\mid i\right) \rho\left( j\mid
k\right) \rho\left( i\mid j\right) \neq0
\]
which contradicts the transitivity of $\rho$. We conclude that $\succ^{\ast}$
is negatively transitive.
(ii) In view of (i), it follows from Fishburn (1970) p. 13.
(iii) Completeness is easily established. Assume \emph{per contra} that
$\succsim$ is not transitive.$\ $Then there exist $i$, $j$ and $k$ such that
$i\succsim k\succsim j$ but $i\not \succsim j$. Alternatives $i$, $k$ and $j$
must be distinct: $i\not \succsim j$ implies $i\neq j$, while $i=k$ would
imply $i\succsim j$, and so would $k=j$. Moreover,
\begin{enumerate}
\item[(a)] $i\succsim k$ implies $\rho\left( i\mid k\right) \geq\rho\left(
k\mid i\right) $,
\item[(b)] $k\succsim j$ implies $\rho\left( k\mid j\right) \geq\rho\left(
j\mid k\right) $,
\item[(c)] $i\not \succsim j$ implies $\rho\left( j\mid i\right)
>\rho\left( i\mid j\right) $.
\end{enumerate}
It holds $\rho\left( i\mid k\right) >0$. Indeed, $\rho\left( i\mid
k\right) =0$ would imply $\rho\left( k\mid i\right) =1$, contradicting (a).
Similarly, $\rho\left( k\mid j\right) >0$. Then,
\[
\rho\left( i\mid k\right) \rho\left( k\mid j\right) \geq\rho\left( k\mid
i\right) \rho\left( j\mid k\right) \quad\text{;\quad}\rho\left( j\mid
i\right) >\rho\left( i\mid j\right) \quad\text{;\quad}\rho\left( i\mid
k\right) \rho\left( k\mid j\right) >0
\]
If $\rho\left( i\mid k\right) \rho\left( k\mid j\right) =\rho\left( k\mid
i\right) \rho\left( j\mid k\right) $, then both terms are strictly
positive, and
\[
\rho\left( i\mid k\right) \rho\left( k\mid j\right) \rho\left( j\mid
i\right) >\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right)
\]
Else $\rho\left( i\mid k\right) \rho\left( k\mid j\right) >\rho\left(
k\mid i\right) \rho\left( j\mid k\right) $, and then
\[
\rho\left( i\mid k\right) \rho\left( k\mid j\right) \rho\left( j\mid
i\right) >\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
j\mid i\right) \geq\rho\left( k\mid i\right) \rho\left( j\mid k\right)
\rho\left( i\mid j\right)
\]
In both cases the transitivity of $\rho$ is contradicted. We conclude that
$\succsim$ is transitive.
(iv) Reflexivity of $\succsim^{\circ}$ is obvious. Let $i\succsim^{
{{}^\circ}
}j$ and $j\succsim^{
{{}^\circ}
}k$. By definition, $i\succsim j$ and $i\parallel^{\ast}j$ as well as
$j\succsim k$ and $j\parallel^{\ast}k$. As both $\succsim$ and $\parallel
^{\ast}$ are transitive, it follows that $i\succsim k$ and $i\parallel^{\ast
}k$, that is, $i\succsim^{
{{}^\circ}
}k$. We conclude that $\succsim^{
{{}^\circ}
}$ is transitive. Finally, assume $i\parallel^{\ast}j$, and note that also
$j\parallel^{\ast}i$. Since $\succsim$ is complete, then either $i\succsim j$
or $j\succsim i$, thus either $i\succsim^{
{{}^\circ}
}j$ or $j\succsim^{
{{}^\circ}
}i$.
$\blacksquare$
\subsubsection{Proof of Lemma \ref{prop:vb}}
Point (i) is obvious.
(ii) Let $i$ and $j$ be any two alternatives with $w\left( i\right)
=w\left( j\right) $, and let $\tilde{u}:A\rightarrow\left( 0,\infty\right)
$ be such that
\[
\rho\left( i\mid j\right) =s\left( i,j\right) \dfrac{\tilde{u}\left(
i\right) }{\tilde{u}\left( i\right) +\tilde{u}\left( j\right) }
\]
We have
\[
\frac{\rho\left( i\mid j\right) }{\rho\left( j\mid i\right) }
=\frac{s\left( i,j\right) \dfrac{\tilde{u}\left( i\right) }{\tilde
{u}\left( i\right) +\tilde{u}\left( j\right) }}{s\left( j,i\right)
\dfrac{\tilde{u}\left( j\right) }{\tilde{u}\left( i\right) +\tilde
{u}\left( j\right) }}=\dfrac{\tilde{u}\left( i\right) }{\tilde{u}\left(
j\right) }
\]
Similarly,
\[
\frac{\rho\left( i\mid j\right) }{\rho\left( j\mid i\right) }
=\frac{s\left( i,j\right) \dfrac{u\left( i\right) }{u\left( i\right)
+u\left( j\right) }}{s\left( j,i\right) \dfrac{u\left( j\right)
}{u\left( i\right) +u\left( j\right) }}=\dfrac{u\left( i\right)
}{u\left( j\right) }
\]
Therefore, for any $j^{\ast}\in A$,
\[
\tilde{u}\left( i\right) =\frac{\tilde{u}\left( j^{\ast}\right) }{u\left(
j^{\ast}\right) }u\left( i\right)
\]
for all $i\in A$. We conclude that $u$ is unique up to a positive scalar multiple.
(iii) Let $i$ and $j$ be any two alternatives with $w\left( i\right)
=w\left( j\right) $. By the symmetry of $s$,
\begin{equation}
\rho\left( i\mid j\right) +\rho\left( j\mid i\right) =s\left( i,j\right)
\dfrac{u\left( i\right) }{u\left( i\right) +u\left( j\right) }+s\left(
j,i\right) \dfrac{u\left( j\right) }{u\left( i\right) +u\left( j\right)
}=s\left( i,j\right) \label{eq:fig}
\end{equation}
Then $s$ is unique on the level set of $w\left( i\right) $. The relations in
(\ref{eq:propvb}) follow.
$\blacksquare$
\subsubsection{Proof of Lemma \ref{lm:dirac}}
Let $\rho$ be a binary choice probability. Suppose that $\rho$ is Dirac and
transitive. Let $i\neq j$. As $\rho$ is Dirac, $\rho\left( i\mid j\right)
\in\left\{ 0,1\right\} $. If $\rho\left( i\mid j\right) =1$, then
$i\succ^{\ast}j$. If $\rho\left( i\mid j\right) =0$, then $\rho\left( j\mid
i\right) =1$ and so $j\succ^{\ast}i$. We conclude that $\succ^{\ast}$ is
weakly complete.
Let $i\succ^{\ast}j$ and $j\succ^{\ast}k$. Hence, we have that $j\not \succ
^{\ast}i$, $k\not \succ ^{\ast}j$, and $k\neq i$. As $\rho$ is transitive,
$\succ^{\ast}$ is negatively transitive by Lemma \ref{lm:bcp-prop}. Hence,
$k\not \succ ^{\ast}i$ thus $\rho\left( i\mid k\right) \neq0$. By the
definition of Dirac and since $i\neq k$, we have that $\rho\left( i\mid
k\right) =1$, thus $i\succ^{\ast}k$. We conclude that $\succ^{\ast}$ is transitive.
As to the converse, let $\succ^{\ast}$ be weakly complete and transitive. Let
$i\neq j$. As $\succ^{\ast}$ is weakly complete, either $i\succ^{\ast}j$ or
$j\succ^{\ast}i$. If $i\succ^{\ast}j$, then $\rho\left( i\mid j\right) =1$;
if $j\succ^{\ast}i$, then $\rho\left( j\mid i\right) =1$ and so $\rho\left(
i\mid j\right) =0$. We conclude that $\rho\left( i\mid j\right) \in\left\{
0,1\right\} $. This proves that $\rho$ is Dirac. Suppose, \emph{per contra},
that $\rho$ is not transitive. Then, there exist three distinct alternatives
$i$, $j$ and $k$ such that
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) \neq\rho\left( k\mid i\right) \rho\left( j\mid k\right)
\rho\left( i\mid j\right)
\]
It is impossible that both sides contain a zero factor. Since $\rho$ is Dirac,
then either
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =1
\]
or
\[
\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left( i\mid
j\right) =1
\]
In the former case, $i\succ^{\ast}k\succ^{\ast}j\succ^{\ast}i$ and so
$\succ^{\ast}$ is not transitive. In the latter case, $i\succ^{\ast}
j\succ^{\ast}k\succ^{\ast}i$ and so, again, $\succ^{\ast}$ is not transitive.
We conclude that $\rho$ must be transitive.
$\blacksquare$
\subsubsection{Theorem \ref{thm:value}}
We prove a more general result that provides a utility representation
$\bar{u}:A\rightarrow\mathbb{R}$ for the preference $\succsim$.
\begin{theorem}
Given a binary choice probability $\rho$, the following conditions are equivalent:
\begin{enumerate}
\item[(i)] $\rho$ is transitive;
\item[(ii)] there exist $w,u:A\rightarrow\left( 0,\infty\right) $ and a
symmetric $s:A^{2}\rightarrow\left( 0,\infty\right) $ such that
\[
\rho\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
1
& \qquad\text{if }w\left( i\right) >w\left( j\right) \\
s\left( i,j\right) \dfrac{u\left( i\right) }{u\left( i\right) +u\left(
j\right) }
& \qquad\text{if }w\left( i\right) =w\left( j\right) \\
0 & \qquad\text{if }w\left( i\right) <w\left( j\right)
\end{array}
\right.
\]
for all $i,j\in A$;
\item[(iii)] there exist $\bar{u}:A\rightarrow\left( 0,\infty\right) $,
$f:\operatorname{Im}\bar{u}\rightarrow\left( 0,\infty\right) $ increasing,
and a symmetric $s:A^{2}\rightarrow\left( 0,\infty\right) $ such that
\[
\rho\left( i\mid j\right) =\left\{
\begin{array}
[c]{ll}
1
& \qquad\text{if }f\left( \bar{u}\left( i\right) \right)
>f\left( \bar{u}\left( j\right) \right) \\
s\left( i,j\right) \dfrac{\bar{u}\left( i\right) }{\bar{u}\left(
i\right) +\bar{u}\left( j\right) }
& \qquad\text{if }f\left(
\bar{u}\left( i\right) \right) =f\left( \bar{u}\left( j\right) \right)
\\
0 & \qquad\text{if }f\left( \bar{u}\left( i\right) \right) <f\left(
\bar{u}\left( j\right) \right)
\end{array}
\right.
\]
for all $i,j\in A$.
\end{enumerate}
\end{theorem}
By setting $v=\log u$ we recover Theorem \ref{thm:value} (note that since $w$
is ordinally unique we can always assume it to be strictly positive).
\noindent\textbf{Proof} (i) implies (iii). Since $A$ is finite there exists
$w:A\rightarrow\left( 0,\infty\right) $ that represents $\succ^{\ast}$ in
the sense of (\ref{eq:stoch-ut-pre}). Then,
\begin{align*}
w\left( i\right) & >w\left( j\right) \iff i\succ^{\ast}j\iff\rho\left(
i\mid j\right) =1\\
w\left( i\right) & <w\left( j\right) \iff j\succ^{\ast}i\iff\rho\left(
i\mid j\right) =0\\
w\left( i\right) & =w\left( j\right) \iff i\parallel^{\ast}j\iff
\rho\left( i\mid j\right) \in\left( 0,1\right)
\end{align*}
By Lemma \ref{lm:bcp-prop}, $\parallel^{\ast}$ is an equivalence relation on
$A$. Since $w$ is unique up to a strictly increasing transformation, if
$\left\vert \operatorname{Im}w\right\vert =m$ we can assume $\operatorname{Im}
w=\left\{ 1,2,...,m\right\} $. For all $h=1,2,...,m$ we can choose
$i_{h}^{\ast}\in w^{-1}\left( h\right) $. With this, $\left[ i_{1}^{\ast
}\right] ,...,\left[ i_{m}^{\ast}\right] $ is the partition of $A$ induced
by $\parallel^{\ast}$. For each $h=1,...,m$, set
\[
u_{h}^{\ast}\left( j\right) =\dfrac{\rho\left( j\mid i_{h}^{\ast}\right)
}{\rho\left( i_{h}^{\ast}\mid j\right) }\qquad\forall j\in\left[
i_{h}^{\ast}\right]
\]
The ratio is well defined because $w\left( j\right) =w\left( i_{h}^{\ast
}\right) $ implies $\rho\left( j\mid i_{h}^{\ast}\right) ,\rho\left(
i_{h}^{\ast}\mid j\right) \in\left( 0,1\right) $. With this,
\[
\frac{u_{h}^{\ast}\left( j\right) }{u_{h}^{\ast}\left( k\right) }
=\frac{\dfrac{\rho\left( j\mid i_{h}^{\ast}\right) }{\rho\left( i_{h}
^{\ast}\mid j\right) }}{\dfrac{\rho\left( k\mid i_{h}^{\ast}\right) }
{\rho\left( i_{h}^{\ast}\mid k\right) }}=\dfrac{\rho\left( j\mid
i_{h}^{\ast}\right) }{\rho\left( i_{h}^{\ast}\mid j\right) }\dfrac
{\rho\left( i_{h}^{\ast}\mid k\right) }{\rho\left( k\mid i_{h}^{\ast
}\right) }\qquad\forall j,k\in\left[ i_{h}^{\ast}\right]
\]
By transitivity, we have that
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right)
\]
for all $i,$ $j,$ $k\in A$,\footnote{As previously observed, transitivity
implies the above \textquotedblleft product rule\textquotedblright\ for all
triplets of alternatives in $A$ and not only triplets of distinct ones.} and
\[
\dfrac{\rho\left( j\mid i_{h}^{\ast}\right) }{\rho\left( i_{h}^{\ast}\mid
j\right) }\dfrac{\rho\left( i_{h}^{\ast}\mid k\right) }{\rho\left( k\mid
i_{h}^{\ast}\right) }=\frac{\rho\left( j\mid k\right) }{\rho\left( k\mid
j\right) }
\]
for all $j,k\in\left[ i_{h}^{\ast}\right] $. Therefore,
\[
\frac{u_{h}^{\ast}\left( j\right) }{u_{h}^{\ast}\left( k\right) }
=\dfrac{\rho\left( j\mid i_{h}^{\ast}\right) }{\rho\left( i_{h}^{\ast}\mid
j\right) }\dfrac{\rho\left( i_{h}^{\ast}\mid k\right) }{\rho\left( k\mid
i_{h}^{\ast}\right) }=\frac{\rho\left( j\mid k\right) }{\rho\left( k\mid
j\right) }\qquad\forall j,k\in\left[ i_{h}^{\ast}\right]
\]
for all $h=1,...,m$.
Set $\sigma_{1}=1$ and for each $h=2,...,m$, choose a strictly positive
constant $\sigma_{h}$ such that
\[
\max_{j\in\left[ i_{h-1}^{\ast}\right] }\sigma_{h-1}u_{h-1}^{\ast}\left(
j\right) <\min_{j\in\left[ i_{h}^{\ast}\right] }\sigma_{h}u_{h}^{\ast
}\left( j\right)
\]
that is,
\[
\sigma_{h}>\sigma_{h-1}\frac{\max_{j\in\left[ i_{h-1}^{\ast}\right] }
u_{h-1}^{\ast}\left( j\right) }{\min_{j\in\left[ i_{h}^{\ast}\right]
}u_{h}^{\ast}\left( j\right) }
\]
Define
\[
\bar{u}\left( j\right) =\sigma_{h}u_{h}^{\ast}\left( j\right)
\qquad\forall j\in\left[ i_{h}^{\ast}\right] ,\forall h=1,...,m
\]
Note that for all $h=2,...,m$, all $j_{h-1}\in\left[ i_{h-1}^{\ast}\right] $
and all $j_{h}\in\left[ i_{h}^{\ast}\right] $
\[
\bar{u}\left( j_{h-1}\right) <\bar{u}\left( j_{h}\right)
\]
that is,
\[
\bar{u}\left( j_{1}\right) <\bar{u}\left( j_{2}\right) <\cdot\cdot
\cdot<\bar{u}\left( j_{m}\right)
\]
whenever $j_{h}\in\left[ i_{h}^{\ast}\right] $ for all $h=1,2,...,m$. Then,
if $\bar{u}\left( k\right) \geq\bar{u}\left( j\right) $, with
$k\in\lbrack i_{h_{k}}^{\ast}]$ and $j\in\lbrack i_{h_{j}}^{\ast}]$, it cannot
be the case that
\[
h_{j}>h_{k}
\]
Thus, $h_{k}\geq h_{j}$, $w\left( i_{h_{k}}^{\ast}\right) \geq w\left(
i_{h_{j}}^{\ast}\right) $, and $w\left( k\right) \geq w\left( j\right) $.
Therefore there exists $f:\bar{u}\left( A\right) \rightarrow\left(
0,\infty\right) $ increasing and such that
\[
f\circ\bar{u}=w
\]
Thus,
\begin{align*}
f\left( \bar{u}\left( i\right) \right) & >f\left( \bar{u}\left(
j\right) \right) \iff\rho\left( i\mid j\right) =1\quad\text{;\quad
}f\left( \bar{u}\left( i\right) \right) =f\left( \bar{u}\left(
j\right) \right) \iff\rho\left( i\mid j\right) \in\left( 0,1\right) \\
f\left( \bar{u}\left( i\right) \right) & <f\left( \bar{u}\left(
j\right) \right) \iff\rho\left( i\mid j\right) =0
\end{align*}
For all $j\neq k$ in $A$ such that $f\left( \bar{u}\left( j\right)
\right) =f\left( \bar{u}\left( k\right) \right) $, we have $\rho\left(
j\mid k\right) \in\left( 0,1\right) $, and so $j\parallel^{\ast}k$, then
there exists $h=1,...,m$ such that, for each $j,k\in\left[ i_{h}^{\ast
}\right] $,
\[
\frac{\bar{u}\left( j\right) }{\bar{u}\left( j\right) +\bar{u}\left(
k\right) }=\frac{1}{1+\frac{\bar{u}\left( k\right) }{\bar{u}\left(
j\right) }}=\frac{1}{1+\frac{\sigma_{h}u^{\ast}\left( k\right) }{\sigma
_{h}u^{\ast}\left( j\right) }}=\frac{1}{1+\frac{\rho\left( k\mid j\right)
}{\rho\left( j\mid k\right) }}=\frac{\rho\left( j\mid k\right) }
{\rho\left( j\mid k\right) +\rho\left( k\mid j\right) }
\]
and
\[
\rho\left( j\mid k\right) =\,\underset{=s\left( j,k\right) }
{\underbrace{(\rho\left( j\mid k\right) +\rho\left( k\mid j\right) )}
}\frac{\bar{u}\left( j\right) }{\bar{u}\left( j\right) +\bar{u}\left(
k\right) }
\]
By setting $s\left( j,k\right) =1$ if $f\left( \bar{u}\left( i\right)
\right) \neq f\left( \bar{u}\left( j\right) \right) $ we conclude the argument.
Since (iii) trivially implies (ii), it remains to prove that (ii) implies (i).
Let $u$, $w$ and $s$ represent $\rho$ as in (ii). We have already observed
that $w$ represents $\succ^{\ast}$.
For any triplet $i,j,k$ of distinct elements of $A$, consider the two products
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) \qquad\text{and}\qquad\rho\left( k\mid i\right) \rho\left( j\mid
k\right) \rho\left( i\mid j\right)
\]
Suppose first that $i,j,k$ do not belong to the same level set of $w$. Without
loss of generality, we can then set $\rho\left( j\mid i\right) =0$. Hence,
$\rho\left( i\mid j\right) =1$ and so $i\succ^{\ast}j$, that is, $w\left(
i\right) >w\left( j\right) $. There are two cases to consider.
\begin{enumerate}
\item[(1)] If $w\left( k\right) \geq w\left( i\right) $, then $w\left(
k\right) >w\left( j\right) $ and so $\rho\left( j\mid k\right) =0$.
\item[(2)] Else $w\left( i\right) >w\left( k\right) $, then $\rho\left(
k\mid i\right) =0$.
\end{enumerate}
In both cases, the two products are null, so equal. Next suppose that $i,j,k$
belong to the same level set of $w$. Then,
\begin{align*}
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) & =s\left( j,i\right) \dfrac{u\left( j\right) }{u\left(
j\right) +u\left( i\right) }s\left( k,j\right) \dfrac{u\left( k\right)
}{u\left( k\right) +u\left( j\right) }s\left( i,k\right) \dfrac{u\left(
i\right) }{u\left( i\right) +u\left( k\right) }\\
& =s\left( k,i\right) \dfrac{u\left( i\right) }{u\left( k\right)
+u\left( i\right) }s\left( j,k\right) \dfrac{u\left( k\right) }{u\left(
j\right) +u\left( k\right) }s\left( i,j\right) \dfrac{u\left( j\right)
}{u\left( i\right) +u\left( j\right) }\\
& =s\left( k,i\right) \dfrac{u\left( k\right) }{u\left( k\right)
+u\left( i\right) }s\left( j,k\right) \dfrac{u\left( j\right) }{u\left(
j\right) +u\left( k\right) }s\left( i,j\right) \dfrac{u\left( i\right)
}{u\left( i\right) +u\left( j\right) }\\
& =\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left( i\mid
j\right)
\end{align*}
We conclude that $\rho$ is transitive.
$\blacksquare$
\subsubsection{Theorem \ref{prop:chrono}}
\textquotedblleft Only if.\textquotedblright\ If a tandem has a binary value
representation $\left( v,w,s,\varphi\right) $, then $\rho$ is transitive
(see Theorem \ref{thm:value}). Consider the set
\begin{align*}
\mathbb{D} & =\left\{ \left( i,j\right) :\tau\left( i\mid j\right)
\neq0\right\} =\left\{ \left( i,j\right) :\rho\left( i\mid j\right)
\in\left( 0,1\right) \right\} \\
& =\left\{ \left( i,j\right) :i\parallel^{\ast}j\right\} =\left\{
\left( i,j\right) :w\left( i\right) =w\left( j\right) \right\}
\end{align*}
Note that for all $\left( i,j\right) \in\mathbb{D}$,
\[
\ell_{ij}=\ln\frac{\rho\left( i\mid j\right) }{\rho\left( j\mid i\right)
}=v\left( i\right) -v\left( j\right)
\]
thus (\ref{eq:response-time-bis}) delivers
\[
\ell_{ij}=\ell_{hk}\implies v\left( i\right) -v\left( j\right) =v\left(
h\right) -v\left( k\right) \implies\tau\left( i\mid j\right) =\tau\left(
h\mid k\right)
\]
for all $\left( i,j\right) ,\left( h,k\right) \in\mathbb{D}$. Thus
(\ref{eq:qe}) is satisfied.
Since $\varphi$ is strictly quasiconcave and unimodal, then there exists a
unique $l\in\mathbb{R}$ such that $\varphi$ is strictly increasing on $\left(
-\infty,l\right] $ and strictly decreasing on $\left[ l,\infty\right) $,
then
\begin{align*}
l & \leq\ell_{ij}<\ell_{hk}\implies l\leq v\left( i\right) -v\left(
j\right) <v\left( h\right) -v\left( k\right) \\
& \implies\varphi\left( v\left( i\right) -v\left( j\right) \right)
>\varphi\left( v\left( h\right) -v\left( k\right) \right) \implies
\tau\left( i\mid j\right) >\tau\left( h\mid k\right)
\end{align*}
and
\begin{align*}
\ell_{ij} & <\ell_{hk}\leq l\implies v\left( i\right) -v\left( j\right)
<v\left( h\right) -v\left( k\right) \leq l\\
& \implies\varphi\left( v\left( i\right) -v\left( j\right) \right)
<\varphi\left( v\left( h\right) -v\left( k\right) \right) \implies
\tau\left( i\mid j\right) <\tau\left( h\mid k\right)
\end{align*}
for all $\left( i,j\right) ,\left( h,k\right) \in\mathbb{D}$. Thus
(\ref{eq:qi}) and (\ref{eq:qo}) are satisfied, as desired.
\textquotedblleft If.\textquotedblright\ If a tandem is chronometric then
$\rho$ is transitive and there exist $v,w:A\rightarrow\mathbb{R}$ and a
symmetric $s:A^{2}\rightarrow\left( 0,\infty\right) $ such that
(\ref{eq:stoch-utt}) holds. Consider the set
\begin{align*}
\mathbb{D} & =\left\{ \left( i,j\right) :\tau\left( i\mid j\right)
\neq0\right\} =\left\{ \left( i,j\right) :\rho\left( i\mid j\right)
\in\left( 0,1\right) \right\} \\
& =\left\{ \left( i,j\right) :i\parallel^{\ast}j\right\} =\left\{
\left( i,j\right) :w\left( i\right) =w\left( j\right) \right\}
\end{align*}
Note that for all $\left( i,j\right) \in\mathbb{D}$,
\[
\ell_{ij}=\ln\frac{\rho\left( i\mid j\right) }{\rho\left( j\mid i\right)
}=v\left( i\right) -v\left( j\right)
\]
and set $L=\left\{ \ell_{ij}:\left( i,j\right) \in\mathbb{D}\right\}
=\left\{ v\left( i\right) -v\left( j\right) :\left( i,j\right)
\in\mathbb{D}\right\} $. With this, (\ref{eq:qe}) implies that there exists
$\psi:L\rightarrow\left( 0,\infty\right) $ such that
\[
\tau\left( i\mid j\right) =\psi\left( \ell_{ij}\right) =\psi\left(
v\left( i\right) -v\left( j\right) \right)
\]
for all $\left( i,j\right) \in\mathbb{D}$. Moreover, by (\ref{eq:qi}), if
$x,y\in L$ are such that $l\leq x<y$, taking $\left( i,j\right) $ and
$\left( h,k\right) \ $in $\mathbb{D}$ such that $x=\ell_{ij}$ and
$y=\ell_{hk}$, it follows that
\[
l\leq\ell_{ij}<\ell_{hk}\implies\tau\left( i\mid j\right) >\tau\left( h\mid
k\right) \implies\psi\left( \ell_{ij}\right) >\psi\left( \ell_{hk}\right)
\implies\psi\left( x\right) >\psi\left( y\right)
\]
Analogously, by (\ref{eq:qo}), if $x,y\in L$ are such that $x<y\leq l$, taking
$\left( i,j\right) $ and $\left( h,k\right) \ $in $\mathbb{D}$ such that
$x=\ell_{ij}$ and $y=\ell_{hk}$, it follows that
\[
\ell_{ij}<\ell_{hk}\leq l\implies\tau\left( i\mid j\right) <\tau\left(
h\mid k\right) \implies\psi\left( \ell_{ij}\right) <\psi\left( \ell
_{hk}\right) \implies\psi\left( x\right) <\psi\left( y\right)
\]
Summing up, $L$ is a finite subset of $\mathbb{R}$ and $\psi:L\rightarrow
\left( 0,\infty\right) $ is such that there exists $l\in\mathbb{R}$ for
which
\begin{align*}
l & \leq x<y\implies\psi\left( x\right) >\psi\left( y\right) \\
x & <y\leq l\implies\psi\left( x\right) <\psi\left( y\right)
\end{align*}
for all $x,y\in L$.
This allows to extend $\psi$ to a function $\varphi:\mathbb{R}\rightarrow
\left( 0,\infty\right) $ such that $\varphi$ is strictly increasing on
$\left( -\infty,l\right] $ and strictly decreasing on $\left[
l,\infty\right) $. Thus there exists a strictly quasiconcave and unimodal
$\varphi:\mathbb{R}\rightarrow\left( 0,\infty\right) $ such that
\[
\tau\left( i\mid j\right) =\varphi\left( v\left( i\right) -v\left(
j\right) \right)
\]
if $w\left( i\right) =w\left( j\right) $.
Finally, if $w\left( i\right) \neq w\left( j\right) $, by
(\ref{eq:stoch-utt}), $\rho\left( i\mid j\right) \in\left\{ 0,1\right\} $
and by definition of tandem $\tau\left( i\mid j\right) =0$.
$\blacksquare$
\subsubsection{Theorem \ref{prop:psycho}}
Note that if either $\left( \rho,\tau\right) $ has a binary value
representation or it is psychometric, then $\rho$ is transitive and there
exist $v,w:A\rightarrow\mathbb{R}$ and a symmetric $s:A^{2}\rightarrow\left(
0,\infty\right) $ such that (\ref{eq:stoch-utt}) holds. Therefore the set of
pairs of alternatives with nonzero response time is
\begin{align*}
\mathbb{D} & =\left\{ \left( i,j\right) :\tau\left( i\mid j\right)
\neq0\right\} =\left\{ \left( i,j\right) :\rho\left( i\mid j\right)
\in\left( 0,1\right) \right\} \\
& =\left\{ \left( i,j\right) :i\parallel^{\ast}j\right\} =\left\{
\left( i,j\right) :w\left( i\right) =w\left( j\right) \right\}
\end{align*}
Arbitrarily choose $\left( i,j\right) \in\mathbb{D}$, since $i\parallel
^{\ast}j$, by Lemma \ref{lm:bcp-prop}, we can assume, without loss of
generality, that $i\succsim^{
{{}^\circ}
}j$, thus
\[
\rho\left( i\mid j\right) \geq\rho\left( j\mid i\right) \quad
\text{and\quad}1-\rho\left( j\mid i\right) \geq1-\rho\left( i\mid j\right)
\]
A second-type error is the probability of accepting an inferior proposal, that
is,
\[
\mathrm{ER}_{i,j}^{\mathrm{II}}=\rho\left( j\mid i\right) =\min\left\{
\rho\left( i\mid j\right) ,\rho\left( j\mid i\right) \right\}
\]
A first-type error is the probability of rejecting a superior proposal, that
is,
\[
\mathrm{ER}_{i,j}^{\mathrm{I}}=1-\rho\left( i\mid j\right) =\min\left\{
1-\rho\left( i\mid j\right) ,1-\rho\left( j\mid i\right) \right\}
\]
Since (\ref{eq:stoch-utt}) holds, $i\succsim^{
{{}^\circ}
}j$ if and only if $v\left( i\right) \geq v\left( j\right) $. Therefore:
\begin{align*}
\mathrm{ER}_{i,j}^{\mathrm{II}} & =\rho\left( j\mid i\right) =s\left(
j,i\right) \frac{1}{1+e^{-\left( v\left( j\right) -v\left( i\right)
\right) }}=s\left( j,i\right) \frac{1}{1+e^{\left\vert v\left( j\right)
-v\left( i\right) \right\vert }}\\
& =s\left( i,j\right) \frac{1}{1+e^{\left\vert v\left( i\right) -v\left(
j\right) \right\vert }}\\
\mathrm{ER}_{i,j}^{\mathrm{I}} & =1-\rho\left( i\mid j\right) =1-s\left(
i,j\right) \frac{1}{1+e^{-\left( v\left( i\right) -v\left( j\right)
\right) }}=1-s\left( i,j\right) \frac{1}{1+e^{-\left\vert v\left(
i\right) -v\left( j\right) \right\vert }}\\
& =1-\mathrm{ER}_{i,j}^{\mathrm{II}}\frac{1+e^{\left\vert v\left( i\right)
-v\left( j\right) \right\vert }}{1+e^{-\left\vert v\left( i\right)
-v\left( j\right) \right\vert }}
\end{align*}
Summing up, for each $\left( i,j\right) \in\mathbb{D}$,
\begin{align*}
\mathrm{ER}_{i,j}^{\mathrm{II}} & =s\left( i,j\right) \frac{1}
{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }}\\
\mathrm{ER}_{i,j}^{\mathrm{I}} & =1-\mathrm{ER}_{i,j}^{\mathrm{II}}
\frac{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }
}{1+e^{-\left\vert v\left( i\right) -v\left( j\right) \right\vert }}
\end{align*}
But then, for all $\left( i,j\right) $ and $\left( h,k\right) $ in
$\mathbb{D}$ such that $\mathrm{ER}_{i,j}^{\mathrm{I}}<\mathrm{ER}
_{h,k}^{\mathrm{I}}\ $and\ $\mathrm{ER}_{i,j}^{\mathrm{II}}<\mathrm{ER}
_{h,k}^{\mathrm{II}}$, it follows that
\begin{gather*}
1-\mathrm{ER}_{i,j}^{\mathrm{II}}\frac{1+e^{\left\vert v\left( i\right)
-v\left( j\right) \right\vert }}{1+e^{-\left\vert v\left( i\right)
-v\left( j\right) \right\vert }}<1-\mathrm{ER}_{h,k}^{\mathrm{II}}
\frac{1+e^{\left\vert v\left( h\right) -v\left( k\right) \right\vert }
}{1+e^{-\left\vert v\left( h\right) -v\left( k\right) \right\vert }}\\
\mathrm{ER}_{h,k}^{\mathrm{II}}\frac{1+e^{\left\vert v\left( h\right)
-v\left( k\right) \right\vert }}{1+e^{-\left\vert v\left( h\right)
-v\left( k\right) \right\vert }}<\mathrm{ER}_{i,j}^{\mathrm{II}}
\frac{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }
}{1+e^{-\left\vert v\left( i\right) -v\left( j\right) \right\vert }}\\
\frac{\frac{1+e^{\left\vert v\left( h\right) -v\left( k\right) \right\vert
}}{1+e^{-\left\vert v\left( h\right) -v\left( k\right) \right\vert }}
}{\frac{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }
}{1+e^{-\left\vert v\left( i\right) -v\left( j\right) \right\vert }}
}<\frac{\mathrm{ER}_{i,j}^{\mathrm{II}}}{\mathrm{ER}_{h,k}^{\mathrm{II}}}<1\\
\frac{1+e^{\left\vert v\left( h\right) -v\left( k\right) \right\vert }
}{1+e^{-\left\vert v\left( h\right) -v\left( k\right) \right\vert }}
<\frac{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }
}{1+e^{-\left\vert v\left( i\right) -v\left( j\right) \right\vert }}\\
\left\vert v\left( h\right) -v\left( k\right) \right\vert <\left\vert
v\left( i\right) -v\left( j\right) \right\vert
\end{gather*}
in other words
\begin{equation}
\mathrm{ER}_{i,j}^{\mathrm{I}}<\mathrm{ER}_{h,k}^{\mathrm{I}}\quad
\text{and\quad}\mathrm{ER}_{i,j}^{\mathrm{II}}<\mathrm{ER}_{h,k}^{\mathrm{II}
}\Longrightarrow\left\vert v\left( i\right) -v\left( j\right) \right\vert
>\left\vert v\left( h\right) -v\left( k\right) \right\vert \label{eq:due}
\end{equation}
A similar argument shows that
\begin{equation}
\mathrm{ER}_{i,j}^{\mathrm{I}}\leq\mathrm{ER}_{h,k}^{\mathrm{I}}
\quad\text{and\quad}\mathrm{ER}_{i,j}^{\mathrm{II}}\leq\mathrm{ER}
_{h,k}^{\mathrm{II}}\Longrightarrow\left\vert v\left( i\right) -v\left(
j\right) \right\vert \geq\left\vert v\left( h\right) -v\left( k\right)
\right\vert \label{eq:due-bis}
\end{equation}
\textquotedblleft If.\textquotedblright\ If $\left( \rho,\tau\right) $ is
psychometric, then (\ref{eq:due}) and (\ref{eq:due-bis}) imply
\begin{equation}
\tau\left( i\mid j\right) <\tau\left( h\mid k\right) \Longrightarrow
\left\vert v\left( i\right) -v\left( j\right) \right\vert >\left\vert
v\left( h\right) -v\left( k\right) \right\vert \label{eq:tre-bis}
\end{equation}
and
\begin{equation}
\tau\left( i\mid j\right) \leq\tau\left( h\mid k\right) \Longrightarrow
\left\vert v\left( i\right) -v\left( j\right) \right\vert \geq\left\vert
v\left( h\right) -v\left( k\right) \right\vert \label{eq:quattro-bis}
\end{equation}
for all $\left( i,j\right) $ and $\left( h,k\right) $ in $\mathbb{D}$. As
a result
\[
\left\vert v\left( i\right) -v\left( j\right) \right\vert \geq\left\vert
v\left( h\right) -v\left( k\right) \right\vert \Longleftrightarrow
\tau\left( i\mid j\right) \leq\tau\left( h\mid k\right)
\]
Therefore, setting $M=\left\{ \left\vert v\left( i\right) -v\left(
j\right) \right\vert :\left( i,j\right) \in\mathbb{D}\right\} $, there is
a strictly decreasing function $\psi:M\rightarrow\left( 0,\infty\right) $
such that $\tau\left( i\mid j\right) =\psi\left( \left\vert v\left(
i\right) -v\left( j\right) \right\vert \right) $. We can first extend
$\psi$ from $M$ to $\left[ 0,\infty\right) $ as a strictly decreasing
function and then set $\varphi\left( x\right) =\psi\left( \left\vert
x\right\vert \right) $ for all $x\in\mathbb{R}$. With this, there exists a
strictly quasiconcave, unimodal, and even $\varphi:\mathbb{R}\rightarrow
\left( 0,\infty\right) $ such that
\[
\tau\left( i\mid j\right) =\varphi\left( v\left( i\right) -v\left(
j\right) \right)
\]
if $w\left( i\right) =w\left( j\right) $.
Finally, if $w\left( i\right) \neq w\left( j\right) $, by
(\ref{eq:stoch-utt}), $\rho\left( i\mid j\right) \in\left\{ 0,1\right\} $
and by definition of tandem $\tau\left( i\mid j\right) =0$.
\textquotedblleft Only if.\textquotedblright\ If a tandem has a binary value
representation $\left( v,w,s,\varphi\right) $, then $\rho$ is transitive
(see Theorem \ref{thm:value}). Now $\varphi$ is strictly quasiconcave,
unimodal, and even $\varphi:\mathbb{R}\rightarrow\left( 0,\infty\right) $,
with strong maximum at $0$ and strictly decreasing on $\left[ 0,\infty
\right) $. In particular,
\[
\tau\left( i\mid j\right) =\tau\left( j\mid i\right)
\]
for all alternatives $i$ and $j$. But then $\rho$ is unbiased, and so
$s\left( i,j\right) =1$ for all $\left( i,j\right) \in\mathbb{D}$, and so
\[
\mathrm{ER}_{i,j}^{\mathrm{I}}=\mathrm{ER}_{i,j}^{\mathrm{II}}=\frac
{1}{1+e^{\left\vert v\left( i\right) -v\left( j\right) \right\vert }}
\]
Moreover, for all $\left( i,j\right) $ and $\left( h,k\right) $ in
$\mathbb{D}$,
\begin{align*}
\left. \tau\left( i\mid j\right) <\tau\left( h\mid k\right) \right. &
\iff\varphi\left( \left\vert v\left( i\right) -v\left( j\right)
\right\vert \right) <\varphi\left( \left\vert v\left( h\right) -v\left(
k\right) \right\vert \right) \\
& \iff\left\vert v\left( i\right) -v\left( j\right) \right\vert
>\left\vert v\left( h\right) -v\left( k\right) \right\vert \\
& \iff\mathrm{ER}_{i,j}<\mathrm{ER}_{h,k}
\end{align*}
This proves that $\left( \rho,\tau\right) $ is psychometric.
$\blacksquare$
\subsection{Section \ref{sect:algo}}
\subsubsection{Subsection \ref{sect:bin}}
To ease notation, we set $\Lambda_{ij}=\nu\left( i\right) -\nu\left(
j\right) $ as well as
\[
\rho_{\mathrm{C}}\left( i\mid j\right) =\rho_{ij}\quad\text{;}\quad
\rho_{\mathrm{C}}\left( j\mid i\right) =\rho_{ji}\quad\text{;}\quad
\tau_{\mathrm{RT}}\left( i\mid j\right) =\tau_{ij}\quad\text{;}\quad
\tau_{\mathrm{RT}}\left( j\mid i\right) =\tau_{ji}\quad\text{;}\quad
s_{\mathrm{C}}\left( i,j\right) =s_{ij}
\]
By Theorems 8.1 and 8.2 of Pinsky and Karlin (2011),
\begin{equation}
\rho_{ij}=\frac{1-e^{\beta\Lambda_{ij}}}{e^{-\lambda\Lambda_{ij}}
-e^{\beta\Lambda_{ij}}}\quad\text{and\quad}\tau_{ij}=\frac{1}{\Lambda_{ij}
}\left[ \rho_{ij}\left( \lambda+\beta\right) -\beta\right]
\label{eq:zero-ddm}
\end{equation}
also note that
\[
\rho_{ij}=\frac{1-e^{-\beta\Lambda_{ij}}}{1-e^{-\left( \lambda+\beta\right)
\Lambda_{ij}}}
\]
We begin with a few preliminary lemmas.
\begin{lemma}
\label{lm:uno-ddm}For each $\left( i,j\right) \in A^{2}$, it holds
\begin{equation}
\lambda\Lambda_{ij}=\ln\frac{\rho_{ij}}{\rho_{ji}}\quad\text{;}\quad
\beta\Lambda_{ij}=\ln\frac{1-\rho_{ji}}{1-\rho_{ij}} \label{eq:uno-ddm}
\end{equation}
\end{lemma}
\noindent\textbf{Proof} Let $\left( i,j\right) \in A^{2}$. We have
\[
\frac{\rho_{ij}}{\rho_{ji}}=\frac{\frac{1-e^{\beta\Lambda_{ij}}}
{e^{-\lambda\Lambda_{ij}}-e^{\beta\Lambda_{ij}}}}{\frac{1-e^{-\beta
\Lambda_{ij}}}{e^{\lambda\Lambda_{ij}}-e^{-\beta\Lambda_{ij}}}}=e^{\lambda
\Lambda_{ij}}
\]
and so $\lambda\Lambda_{i,j}=\ln\rho_{ij}/\rho_{ji}$. We also have
\[
\frac{1-\rho_{ji}}{1-\rho_{ij}}=\frac{1-\frac{1-e^{-\beta\Lambda_{ij}}
}{e^{\lambda\Lambda_{ij}}-e^{-\beta\Lambda_{ij}}}}{1-\frac{1-e^{\beta
\Lambda_{ij}}}{e^{-\lambda\Lambda_{ij}}-e^{\beta\Lambda_{ij}}}}=\frac
{\frac{e^{\lambda\Lambda_{ij}}-1}{e^{\lambda\Lambda_{ij}}-e^{-\beta
\Lambda_{ij}}}}{\frac{1-e^{-\lambda\Lambda_{ij}}}{e^{\beta\Lambda_{ij}
}-e^{-\lambda\Lambda_{ij}}}}=e^{\beta\Lambda_{ij}}
\]
and so $\beta\Lambda_{i,j}=\ln\left( 1-\rho_{ji}\right) /\left( 1-\rho
_{ij}\right) $.
$\blacksquare$
\begin{lemma}
Let $\left( i,j\right) \in A^{2}$ with $\Lambda_{ij}\neq0$. It holds
\begin{equation}
\tau_{ij}=\frac{\lambda^{2}}{\ln\rho_{ij}-\ln\rho_{ji}}\left[ \rho_{ij}
+\frac{\ln\left( 1-\rho_{ji}\right) -\ln\left( 1-\rho_{ij}\right) }
{\ln\rho_{ij}-\ln\rho_{ji}}\left( \rho_{ij}-1\right) \right]
\label{eq:due-ddm}
\end{equation}
\end{lemma}
\noindent\textbf{Proof} By (\ref{eq:uno-ddm}),
\begin{equation}
\frac{\beta}{\lambda}=\frac{\beta\Lambda_{ij}}{\lambda\Lambda_{ij}}=\frac
{\ln\frac{1-\rho_{ji}}{1-\rho_{ij}}}{\ln\frac{\rho_{ij}}{\rho_{ji}}}=\frac
{\ln\left( 1-\rho_{ji}\right) -\ln\left( 1-\rho_{ij}\right) }{\ln\rho
_{ij}-\ln\rho_{ji}} \label{eq:tre-ddm}
\end{equation}
Hence,
\begin{align*}
\tau_{ij} & =\frac{1}{\Lambda_{ij}}\left[ \rho_{ij}\left( \lambda
+\beta\right) -\beta\right] =\frac{\lambda^{2}}{\lambda\Lambda_{ij}}\left[
\rho_{ij}\left( 1+\frac{\beta}{\lambda}\right) -\frac{\beta}{\lambda
}\right] =\frac{\lambda^{2}}{\lambda\Lambda_{ij}}\left[ \rho_{ij}
+\frac{\beta}{\lambda}\left( \rho_{ij}-1\right) \right] \\
& =\frac{\lambda^{2}}{\ln\rho_{ij}-\ln\rho_{ji}}\left[ \rho_{ij}+\frac
{\ln\left( 1-\rho_{ji}\right) -\ln\left( 1-\rho_{ij}\right) }{\ln\rho
_{ij}-\ln\rho_{ji}}\left( \rho_{ij}-1\right) \right]
\end{align*}
as desired.
$\blacksquare$
\begin{lemma}
\label{lm:tre-ddm}Let $\left( i,j\right) \in A^{2}$ with $\Lambda_{ij}\neq
0$. If $\tau_{ij}=\tau_{ji}$, then $\beta=\lambda$.
\end{lemma}
\noindent\textbf{Proof} To further ease notation, set $x=\rho_{ij}$ and
$y=\rho_{ji}$. Since $\Lambda_{ij}\neq0$, by (\ref{eq:uno-ddm}) we have $x\neq
y$. By (\ref{eq:due-ddm}), we have
\begin{align*}
\tau_{ij} & =\tau_{ji}\\
& \Longleftrightarrow\frac{\lambda^{2}}{\ln\frac{x}{y}}\left[ x+\frac
{\ln\frac{1-y}{1-x}}{\ln\frac{x}{y}}\left( x-1\right) \right]
=\frac{\lambda^{2}}{\ln\frac{y}{x}}\left[ y+\frac{\ln\frac{1-x}{1-y}}
{\ln\frac{y}{x}}\left( y-1\right) \right] \\
& \Longleftrightarrow\frac{\ln\frac{y}{x}}{\ln\frac{x}{y}}\left[ x+\frac
{\ln\frac{1-y}{1-x}}{\ln\frac{x}{y}}\left( x-1\right) \right] =y+\frac
{\ln\frac{1-x}{1-y}}{\ln\frac{y}{x}}\left( y-1\right) \\
& \Longleftrightarrow-x-\frac{\ln\frac{1-y}{1-x}}{\ln\frac{x}{y}}\left(
x-1\right) =y+\frac{\ln\frac{1-x}{1-y}}{\ln\frac{y}{x}}\left( y-1\right) \\
& \Longleftrightarrow\frac{\ln\frac{1-y}{1-x}}{\ln\frac{y}{x}}\left(
y-1\right) +\frac{\ln\frac{1-y}{1-x}}{\ln\frac{y}{x}}\left( x-1\right)
=y+x\Longleftrightarrow\frac{2}{x+y}+\frac{\ln\frac{y}{x}}{\ln\frac{1-y}{1-x}
}=1
\end{align*}
The locus of pairs $\left( x,y\right) \in\left( 0,1\right) \times\left(
0,1\right) $, with $x\neq y$, that solve this equation is
\[
\left\{ \left( x,y\right) \in\left( 0,1\right) _{\neq}^{2}:x=1-y\right\}
\]
Thus, $\rho_{ij}=1-\rho_{ji}$. By (\ref{eq:uno-ddm}),
\[
\frac{\beta}{\lambda}=\frac{\beta\Lambda_{ij}}{\lambda\Lambda_{ij}}=\frac
{\ln\frac{1-\rho_{ji}}{1-\rho_{ij}}}{\ln\frac{\rho_{ij}}{\rho_{ji}}}=\frac
{\ln\frac{\rho_{ij}}{\rho_{ji}}}{\ln\frac{\rho_{ij}}{\rho_{ji}}}=1
\]
We conclude that $\beta=\lambda$.
$\blacksquare$
\paragraph{Proof of Proposition \ref{prop:ddm_bcm}}
Positivity of $\rho=\rho_{\mathrm{C}}$ follows immediately from
(\ref{eq:zero-ddm}) and hence $\rho$ is a binary choice probability. So, to
establish whether $\left( \rho,\tau\right) $ is a tandem we need to check
only condition (\ref{eq:tao-rho2}). When $\beta=\lambda$, this condition
trivially holds because $\rho$ is unbiased and $\tau$ is symmetric. Since
$\nu$ is injective, we have $\Lambda_{ij}\neq0$ for all distinct $i$ and $j$
in $A$. By Lemma \ref{lm:tre-ddm}, it must then be the case that $\tau
_{ij}\neq\tau_{ji}$; so, condition (\ref{eq:tao-rho2}) now vacuously holds. We
conclude that $\left( \rho,\tau\right) $ is a tandem. Finally, the
transitivity of $\rho$ follows by Theorem \ref{thm:value-bis} because Baldassi
et al. (2020) show that, given any nice exploration matrix $Q$, the transition
matrix $M$ is reversible.\footnote{Of course, it can also be verified by brute
force from (\ref{eq:zero-ddm}).}
We now turn to the binary value representation. By positivity, $w_{\mathrm{C}
}$ is constant. For all $i\ $and $j$ in $A$,
\[
s_{ij}=\rho_{ij}+\rho_{ji}=\dfrac{1-e^{-\beta\Lambda_{ij}}}{1-e^{-\left(
\lambda+\beta\right) \Lambda_{ij}}}+\dfrac{1-e^{\beta\Lambda_{ij}}
}{1-e^{\left( \lambda+\beta\right) \Lambda_{ij}}}=1+\dfrac{e^{\lambda
\Lambda_{ij}}-e^{\beta\Lambda_{ij}}}{1-e^{\left( \lambda+\beta\right)
\Lambda_{ij}}}
\]
by symmetry
\[
s_{ij}=s_{ji}=1+\dfrac{e^{\lambda\left( -\Lambda_{ij}\right) }
-e^{\beta\left( -\Lambda_{ij}\right) }}{1-e^{\left( \lambda+\beta\right)
\left( -\Lambda_{ij}\right) }}
\]
and so
\[
s_{\mathrm{C}}\left( i,j\right) =s_{ij}=1+\dfrac{e^{\lambda\Lambda_{ij}
}-e^{\beta\Lambda_{ij}}}{1-e^{\left( \lambda+\beta\right) \Lambda_{ij}}
}=1+\dfrac{e^{\lambda\left\vert \Lambda_{ij}\right\vert }-e^{\beta\left\vert
\Lambda_{ij}\right\vert }}{1-e^{\left( \lambda+\beta\right) \left\vert
\Lambda_{ij}\right\vert }}
\]
Moreover,
\[
s_{ij}\frac{1}{1+e^{-\lambda\Lambda_{ij}}}=\left( 1+\dfrac{e^{\lambda
\Lambda_{ij}}-e^{\beta\Lambda_{ij}}}{1-e^{\left( \lambda+\beta\right)
\Lambda_{ij}}}\right) \frac{1}{1+e^{-\lambda\Lambda_{ij}}}=\dfrac
{1-e^{-\beta\Lambda_{ij}}}{1-e^{-\left( \lambda+\beta\right) \Lambda_{ij}}
}=\rho_{ij}
\]
This proves that $v_{\mathrm{C}}=\lambda\nu$, thus completing the proof of
(\ref{eq:bcp-uno}).
As to (\ref{eq:bcp-due}),
\begin{align*}
\tau_{ij} & =\frac{\lambda^{2}}{\lambda\Lambda_{ij}}\left[ \rho_{ij}\left(
1+\frac{\beta}{\lambda}\right) -\frac{\beta}{\lambda}\right] =\frac
{\lambda^{2}}{\lambda\Lambda_{ij}}\left[ \frac{1-e^{\beta\Lambda_{ij}}
}{e^{-\lambda\Lambda_{ij}}-e^{\beta\Lambda_{ij}}}\left( 1+\frac{\beta
}{\lambda}\right) -\frac{\beta}{\lambda}\right] \\
& =\frac{\lambda^{2}}{\lambda\Lambda_{ij}}\left[ \frac{1-e^{\frac{\beta
}{\lambda}\lambda\Lambda_{ij}}}{e^{-\lambda\Lambda_{ij}}-e^{\frac{\beta
}{\lambda}\lambda\Lambda_{ij}}}\left( 1+\frac{\beta}{\lambda}\right)
-\frac{\beta}{\lambda}\right]
\end{align*}
but $\lambda\Lambda_{ij}=v_{\mathrm{C}}\left( i\right) -v_{\mathrm{C}
}\left( j\right) $. We can then define $\varphi_{\mathrm{RT}}:\mathbb{R}
\rightarrow\mathbb{R}$ by
\[
\varphi_{\mathrm{RT}}\left( x\right) =\frac{\lambda^{2}}{x}\left[
\frac{1-e^{\frac{\beta}{\lambda}x}}{e^{-x}-e^{\frac{\beta}{\lambda}x}}\left(
1+\frac{\beta}{\lambda}\right) -\frac{\beta}{\lambda}\right]
\]
and obtain $\tau_{ij}=\varphi_{\mathrm{RT}}\left( v_{\mathrm{C}}\left(
i\right) -v_{\mathrm{C}}\left( j\right) \right) $.
$\blacksquare$
\paragraph{\textbf{Proof of Proposition \ref{prop:ddm-bcm}}}
By (\ref{eq:due-ddm}),
\[
\tau_{ij}=\frac{\lambda^{2}}{\ell_{ij}}\left[ \rho_{ij}+\frac{\bar{\ell}
_{ij}}{\ell_{ij}}\left( \rho_{ij}-1\right) \right] =\frac{\lambda^{2}}
{\ell_{ij}^{2}}\left[ \ell_{ij}\rho_{ij}+\bar{\ell}_{ij}\left( \rho
_{ij}-1\right) \right]
\]
thus
\[
\lambda=\left\vert \ell_{ij}\right\vert \sqrt{\frac{\tau_{ij}}{\ell_{ij}
\rho_{ij}+\bar{\ell}_{ij}\left( \rho_{ij}-1\right) }}
\]
as desired. By (\ref{eq:tre-ddm}), $\beta=\lambda\bar{\ell}_{ij}/\ell_{ij}$.
Finally, set $\nu\left( j^{\ast}\right) =0$ for some alternative $j^{\ast}$.
By (\ref{eq:uno-ddm}), for each $i$ we have
\[
\nu\left( i\right) =\nu\left( i\right) -\nu\left( j^{\ast}\right)
=\Lambda_{ij^{\ast}}=\frac{1}{\lambda}\ell_{ij^{\ast}}
\]
concluding the proof.
$\blacksquare$
\paragraph{\textbf{Proof of Proposition \ref{prop:ddm-error}}}
By Proposition \ref{prop:ddm_bcm}, the tandem $\left( \rho_{\mathrm{C}}
,\tau_{\mathrm{RT}}\right) $ is chronometric.
(i) implies (iii) If $\varphi_{\mathrm{RT}}$ is even, then
\[
\tau_{\mathrm{RT}}\left( i\mid j\right) =\varphi_{\mathrm{RT}}\left(
v_{\mathrm{C}}\left( i\right) -v_{\mathrm{C}}\left( j\right) \right)
=\varphi_{\mathrm{RT}}\left( v_{\mathrm{C}}\left( j\right) -v_{\mathrm{C}
}\left( i\right) \right) =\tau_{\mathrm{RT}}\left( j\mid i\right)
\]
for all $i\neq j$.
(iii) implies (ii). If $\tau_{\mathrm{RT}}\left( i\mid j\right)
=\tau_{\mathrm{RT}}\left( j\mid i\right) $ for some $i\neq j$, since $\nu$
is injective, then $\Lambda_{ij}\neq0$ and so, by Lemma \ref{lm:tre-ddm},
$\beta=\lambda$.
(ii) implies (iv). Indeed if $\beta=\lambda$, then
\[
s_{\mathrm{C}}\left( i,j\right) =1+\dfrac{e^{\lambda\left\vert \nu\left(
i\right) -\nu\left( j\right) \right\vert }-e^{\beta\left\vert \nu\left(
i\right) -\nu\left( j\right) \right\vert }}{1-e^{\left( \lambda
+\beta\right) \left\vert \nu\left( i\right) -\nu\left( j\right)
\right\vert }}=1
\]
for all $i\ $and $j$, and so $\rho_{\mathrm{C}}$ is unbiased.
(iv) implies (i) If $\rho_{\mathrm{C}}\left( i\mid j\right) =1-\rho
_{\mathrm{C}}\left( j\mid i\right) $ for some $i\neq j$, then $\rho
_{\mathrm{C}}\left( j\mid i\right) =1-\rho_{\mathrm{C}}\left( i\mid
j\right) $, and, by Lemma \ref{lm:uno-ddm},
\[
\lambda\Lambda_{ij}=\beta\Lambda_{ij}
\]
Since $\nu$ is injective, then $\Lambda_{ij}\neq0$ and $\beta=\lambda$. In
particular,
\[
\varphi_{\mathrm{RT}}\left( x\right) =\frac{\lambda^{2}}{x}\left(
2\frac{1-e^{x}}{e^{-x}-e^{x}}-1\right) =\frac{\lambda^{2}}{x}\tanh\left(
\frac{x}{2}\right)
\]
is even.
$\blacksquare$
\subsubsection{Stochastic matrices\label{sect:more-stoch-matrices}}
A sequence $a=\left\{ a_{n}\right\} $ of non-negative scalars is
\emph{summable} if $
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}<\infty$. Its \emph{generating function} given by
\begin{equation}
f_{a}\left( z\right) =
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}z^{n} \label{eq:Mirsky}
\end{equation}
is defined where the power series on the right hand side converges.
Summability of $\left\{ a_{n}\right\} $ guarantees that the radius of
convergence $R$ satisfies $R\geq1$ and that $f_{a}\left( z\right) $ is
defined and continuous on the unit disk$\mathbb{\ }\left\{ z\in
\mathbb{C}:\left\vert z\right\vert \leq1\right\} $.
\begin{lemma}
\label{prop:hw}If $a=\left\{ a_{n}\right\} $ is a non-negative and summable
sequence, then the matrix power series $
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}B^{n}$ converges (entry by entry) for all stochastic matrices $B$.
\end{lemma}
\noindent\textbf{Proof} The $\left( i,j\right) $-th entry $b_{ij}^{(n)}$ of
the matrix $B^{n}$ belongs to $\left[ 0,1\right] $ because $B^{n}$ is a
stochastic matrix too. Then $\sum_{n=0}^{\infty}a_{n}b_{ij}^{(n)}$ is a
non-negative series such that $0\leq a_{n}b_{ij}^{(n)}\leq a_{n}$ and it
converges because $\sum_{n=0}^{\infty}a_{n}$ does.
$\blacksquare
$
As a consequence the function
\[
f_{a}\left( B\right) =
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}B^{n}
\]
is well defined in the strong sense of Weyr (see e.g. Rinehart, 1955), for all
stochastic matrices $B$.
Denote by $Q$ and $M$ the exploration and transition matrices defined in the
main text, which, as observed, are stochastic.
\begin{lemma}
\label{lem:ma}Let $\rho_{\mathrm{C}}$ be positive. If $Q$ is irreducible
(quasi-positive), then $M$ is primitive (positive).
\end{lemma}
\noindent\textbf{Proof} To ease notation we write $\rho$ in place of
$\rho_{\mathrm{C}}$. Let $Q$ be irreducible. Recall that
\begin{equation}
M\left( i\mid j\right) =Q\left( i\mid j\right) \rho\left( i\mid j\right)
\qquad\forall i\neq j \label{eq:emme}
\end{equation}
and $M\left( j\mid j\right) =1-
{\displaystyle\sum\limits_{k\neq j}}
Q\left( k\mid j\right) \rho\left( k\mid j\right) $, for all $j\in A$.
Given any $j\in A$, since $Q$ is irreducible, then it cannot be the case that
$Q\left( k\mid j\right) =0$ for all $k\neq j$. Positivity of the BCM implies
that
\[
M\left( j\mid j\right) =1-
{\displaystyle\sum\limits_{k\neq j}}
Q\left( k\mid j\right) \rho\left( k\mid j\right) >1-
{\displaystyle\sum\limits_{k\neq j}}
Q\left( k\mid j\right) \geq1-
{\displaystyle\sum\limits_{k\in A}}
Q\left( k\mid j\right) =0
\]
and so $M\left( j\mid j\right) >0$ for all $j\in A$.
Moreover, if $i\neq j$, then there exist $n\geq1$ and $k_{0},...,k_{n}$ in
$A$, with $k_{0}=i$, $k_{n}=j$, and $k_{h}\neq k_{h-1}$ for all $h=1,...,n$,
such that
\[
Q\left( k_{1}\mid k_{0}\right) Q\left( k_{2}\mid k_{1}\right) \cdot
\cdot\cdot Q\left( k_{n}\mid k_{n-1}\right) >0
\]
and positivity of the BCM implies that
\[
M\left( k_{1}\mid k_{0}\right) M\left( k_{2}\mid k_{1}\right) \cdot
\cdot\cdot M\left( k_{n}\mid k_{n-1}\right) >0
\]
Together with positivity of $M$ on the diagonal, this yields primitivity of
$M$ itself.\footnote{Because $M$ is then irreducible and non-traceless.}
Finally, if $Q$ is quasi-positive, the argument above shows that $M$ is
positive on the diagonal, and (\ref{eq:emme}) shows that $M$ is positive also
off the diagonal.
$\blacksquare$
\subsubsection{\textbf{Proof of Proposition \ref{lem:comp}}}
By Lemma \ref{prop:hw} the two matrix power series
\[
\sum_{n=0}^{\infty}\mathbb{P}\left[ N=n\right] M^{n}\qquad\text{and\qquad
}\sum_{n=0}^{\infty}\mathbb{P}\left[ N>n\right] M^{n}
\]
converge (note that $\sum_{n=0}^{\infty}\mathbb{P}\left[ N>n\right]
=\mathbb{E}\left[ N\right] <\infty$). Recall that, if the algorithm stops at
iteration $n\in\mathbb{N}=\left\{ 0,1,...\right\} $, it chooses the
incumbent $j_{n-1}$. By independence, the joint probability of stopping at
iteration $n$ and choosing $j\in A$ is
\[
\mathbb{P}\left[ N=n,J_{n-1}=j\right] =\mathbb{P}\left[ N=n\right]
\mathbb{P}\left[ J_{n-1}=j\right]
\]
Now, for $n=0$ we have
\[
\mathbb{P}\left[ J_{-1}=j\right] =\mu_{j}=\left( M^{0}\mu\right) _{j}
\]
Assume that for $n=m$ we have
\[
\mathbb{P}\left[ J_{m-1}=j\right] =\left( M^{m}\mu\right) _{j}
\]
Then, for $n=m+1$ we have
\begin{align*}
\mathbb{P}\left[ J_{\left( m+1\right) -1}=j\right] & =\mathbb{P}\left[
J_{m}=j\right] =\sum_{i\in A}\mathbb{P}\left[ J_{m}=j,J_{m-1}=i\right]
=\sum_{i\in A}\mathbb{P}\left[ J_{m}=j\mid J_{m-1}=i\right] \mathbb{P}
\left[ J_{m-1}=i\right] \\
& =\sum_{i\in A}m_{ji}\left( M^{m}\mu\right) _{i}=\left( M\left( M^{m}
\mu\right) \right) _{j}=\left( M^{m+1}\mu\right) _{j}
\end{align*}
We have proved by induction that
\[
\mathbb{P}\left[ J_{n-1}=j\right] =\left( M^{n}\mu\right) _{j}
\qquad\forall n\in\mathbb{N}
\]
It follows that the probability of choosing $j$ is
\[
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n,J_{n-1}=j\right] =
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] \mathbb{P}\left[ J_{n-1}=j\right] =
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] \left( M^{n}\mu\right) _{j}
\]
Then
\begin{align*}
p_{N} & =
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] \left( M^{n}\mu\right) =\lim_{k\rightarrow
\infty}
{\displaystyle\sum\limits_{n=0}^{k}}
\mathbb{P}\left[ N=n\right] \left( M^{n}\mu\right) =\lim_{k\rightarrow
\infty}\left( \left[
{\displaystyle\sum\limits_{n=0}^{k}}
\mathbb{P}\left[ N=n\right] M^{n}\right] \mu\right) \\
& =\left( \lim_{k\rightarrow\infty}\left[
{\displaystyle\sum\limits_{n=0}^{k}}
\mathbb{P}\left[ N=n\right] M^{n}\right] \right) \mu
\end{align*}
and so $p_{N}=f_{N}\left( M\right) \mu$ holds. The average duration of an
iteration starting with incumbent $j$ is
\[
\tau_{j}=
{\displaystyle\sum\limits_{i\in A}}
Q\left( i\mid j\right) \tau_{\mathrm{RT}}\left( i\mid j\right)
\]
where to ease notation we write $\tau$ in place of $\bar{\tau}$. Since
$\mathbb{P}\left[ J_{n-1}=j\right] =\left( M^{n}\mu\right) _{j}$, the
average duration of iteration $k$ (if it takes place, i.e., if $N>k$) is
\[
{\displaystyle\sum\limits_{j\in A}}
\tau_{j}\mathbb{P}\left[ J_{k-1}=j\right] =
{\displaystyle\sum\limits_{j\in A}}
\tau_{j}\left( M^{k}\mu\right) _{j}=\tau\cdot M^{k}\mu
\]
The average duration if $N=n$ is then
\[
{\displaystyle\sum\limits_{k=0}^{n-1}}
\tau\cdot M^{k}\mu=\tau\cdot\left(
{\displaystyle\sum\limits_{k=0}^{n-1}}
M^{k}\right) \mu
\]
with the convention $
{\displaystyle\sum\limits_{k=0}^{-1}}
M^{k}=0$ (the zero matrix). Since the probability of stopping at $n$ is
$\mathbb{P}\left[ N=n\right] $, it follows that
\begin{align}
\tau_{N} & =
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] \tau\cdot\left(
{\displaystyle\sum\limits_{k=0}^{n-1}}
M^{k}\right) \mu=\tau\cdot\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N=n\right] \left(
{\displaystyle\sum\limits_{k=0}^{n-1}}
M^{k}\right) \right) \mu\label{eq:tn-pk}\\
& =\tau\cdot\left(
{\displaystyle\sum\limits_{n=1}^{\infty}}
\mathbb{P}\left[ N=n\right] \left(
{\displaystyle\sum\limits_{k=0}^{n-1}}
M^{k}\right) \right) \mu\nonumber
\end{align}
because $
{\displaystyle\sum\limits_{k=0}^{-1}}
M^{k}=0$. Now
\begin{align*}
{\displaystyle\sum\limits_{n=1}^{\infty}}
\mathbb{P}\left[ N=n\right] \left(
{\displaystyle\sum\limits_{k=0}^{n-1}}
M^{k}\right) & =
{\displaystyle\sum\limits_{n=1}^{\infty}}
\mathbb{P}\left[ N=n\right] \left(
{\displaystyle\sum\limits_{k=1}^{n}}
M^{k-1}\right) \\
& =
{\displaystyle\sum\limits_{n=1}^{\infty}}
{\displaystyle\sum\limits_{k=1}^{\infty}}
1_{\left\{ k\leq n\right\} }\mathbb{P}\left[ N=n\right] M^{k-1}=
{\displaystyle\sum\limits_{k=1}^{\infty}}
{\displaystyle\sum\limits_{n=1}^{\infty}}
1_{\left\{ k\leq n\right\} }\mathbb{P}\left[ N=n\right] M^{k-1}\\
& =
{\displaystyle\sum\limits_{k=1}^{\infty}}
M^{k-1}
{\displaystyle\sum\limits_{n=1}^{\infty}}
1_{\left\{ k\leq n\right\} }\mathbb{P}\left[ N=n\right] =
{\displaystyle\sum\limits_{k=1}^{\infty}}
M^{k-1}
{\displaystyle\sum\limits_{n=k}^{\infty}}
\mathbb{P}\left[ N=n\right] \\
& =
{\displaystyle\sum\limits_{k=1}^{\infty}}
\mathbb{P}\left[ N\geq k\right] M^{k-1}=
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N\geq n+1\right] M^{n}=
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N>n\right] M^{n}
\end{align*}
This proves that $\tau_{N}=\tau\cdot g_{N}\left( M\right) \mu$
holds.
$\blacksquare$
\subsubsection{Proof of Proposition \ref{prop:neg-bin}}
If $N$ is negative binomial, then
\begin{align*}
f_{N}\left( z\right) & =\sum_{n=0}^{\infty}\binom{n+r-1}{r-1}\zeta
^{n}\left( 1-\zeta\right) ^{r}z^{n}=\left( 1-\zeta\right) ^{r}\sum
_{n=0}^{\infty}\binom{n+r-1}{r-1}\left( \zeta z\right) ^{n}=\frac{\left(
1-\zeta\right) ^{r}}{\left( 1-\zeta z\right) ^{r}}\\
g_{N}\left( z\right) & =\frac{1-f_{N}\left( z\right) }{1-z}
=\frac{1-\frac{\left( 1-\zeta\right) ^{r}}{\left( 1-\zeta z\right) ^{r}}
}{1-z}=\frac{\frac{\left( 1-\zeta z\right) ^{r}}{\left( 1-\zeta z\right)
^{r}}-\frac{\left( 1-\zeta\right) ^{r}}{\left( 1-\zeta z\right) ^{r}}
}{1-z}=\frac{\left( 1-\zeta z\right) ^{r}-\left( 1-\zeta\right) ^{r}
}{\left( 1-z\right) \left( 1-\zeta z\right) ^{r}}
\end{align*}
For $r=1$ it yields
\begin{align*}
f_{N}\left( z\right) & =\left( 1-\zeta\right) \left( 1-\zeta z\right)
^{-1}\\
g_{N}\left( z\right) & =\frac{1-\zeta z-1+\zeta}{\left( 1-z\right)
\left( 1-\zeta z\right) }=\frac{\zeta-\zeta z}{\left( 1-z\right) \left(
1-\zeta z\right) }=\frac{\zeta\left( 1-z\right) }{\left( 1-z\right)
\left( 1-\zeta z\right) }=\zeta\left( 1-\zeta z\right) ^{-1}
\end{align*}
In general, note that $z=1$ is a root of $\left( 1-\zeta z\right)
^{r}-\left( 1-\zeta\right) ^{r}$. Thus, the ratio
\[
\frac{\left( 1-\zeta z\right) ^{r}-\left( 1-\zeta\right) ^{r}}{1-z}
\]
appearing above is a polynomial of degree $r-1$ in $z$. Next we compute it. It
holds
\begin{align*}
\left( 1-\zeta z\right) ^{r}-\left( 1-\zeta\right) ^{r} & =\sum
_{k=0}^{r}\binom{r}{k}\left( -\zeta z\right) ^{k}-\sum_{k=0}^{r}\binom{r}
{k}\left( -\zeta\right) ^{k}\\
& =\sum_{k=0}^{r}\left( -1\right) ^{k}\binom{r}{k}\zeta^{k}z^{k}-\sum
_{k=0}^{r}\left( -1\right) ^{k}\binom{r}{k}\zeta^{k}\\
& =\sum_{k=0}^{r}\left( -1\right) ^{k}\binom{r}{k}\zeta^{k}\left(
z^{k}-1\right) =\sum_{k=0}^{r}\binom{r}{k}\left( -\zeta\right) ^{k}\left(
z-1\right) \sum_{j=0}^{k-1}z^{j}
\end{align*}
because
\[
z^{k}-1=\left( z-1\right) \left( 1+z+\cdot\cdot\cdot+z^{k-1}\right)
=\left( z-1\right) \sum_{j=0}^{k-1}z^{j}
\]
with the convention $\sum_{j=0}^{-1}z^{j}=0$. Then,
\begin{align*}
g_{N}\left( z\right) & =\frac{\left( 1-\zeta z\right) ^{r}-\left(
1-\zeta\right) ^{r}}{\left( 1-z\right) \left( 1-\zeta z\right) ^{r}
}=\left( \sum_{k=0}^{r}\binom{r}{k}\left( -\zeta\right) ^{k}\left(
z-1\right) \sum_{j=0}^{k-1}z^{j}\right) \frac{1}{\left( 1-z\right) \left(
1-\zeta z\right) ^{r}}\\
& =-\left( \sum_{k=0}^{r}\binom{r}{k}\left( -\zeta\right) ^{k}\sum
_{j=0}^{k-1}z^{j}\right) \left( 1-\zeta z\right) ^{-r}
\end{align*}
showing that (\ref{eq:gnb}) holds.
\subsubsection{Equations (\ref{eq:frev}) and (\ref{eq:grev})}
\paragraph{Preamble}
A reversible matrix $B$ is diagonalizable with real eigenvalues. Indeed, from
the detailed balance condition (\ref{eq:balance-pre}) it readily follows that
the matrix $B^{\ast}$ with off diagonal entries $b_{ij}^{\ast}=b_{ij}
\sqrt{p_{j}/p_{i}}$ is symmetric and has the same eigenvalues as $B$. A
stochastic reversible matrix $B$ has then a largest eigenvalue $\lambda_{1}$
equal to $1$ and all its other eigenvalues have absolute values $\leq
\lambda_{1}$, i.e., they belong to $\left[ -1,1\right] $. If, in addition,
$B$ is primitive, by Perron's Theorem their absolute values are actually
$<\lambda_{1}$, so they belong to $\left( -1,1\right) $.
\paragraph{Equations}
Let the transition matrix $M$ be diagonalizable (e.g., because it is
reversible) and let $\Lambda=\operatorname*{diag}\left( \lambda_{1}
,\lambda_{2},...,\lambda_{m}\right) $ be the diagonal matrix of its
eigenvalues, each repeated according to its multiplicity. For any summable
sequence $a=\left\{ a_{n}\right\} $ of non-negative scalars we then have
\begin{align*}
f_{a}\left( M\right) & =
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}M^{n}=\lim_{l\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{l}}
a_{n}U\Lambda^{n}U^{-1}=\lim_{l\rightarrow\infty}\left[ U\left(
{\displaystyle\sum\limits_{n=0}^{l}}
a_{n}\Lambda^{n}\right) U^{-1}\right] \\
& =U\left[ \lim_{l\rightarrow\infty}\left(
{\displaystyle\sum\limits_{n=0}^{l}}
a_{n}\Lambda^{n}\right) \right] U^{-1}=U\left[ \operatorname*{diag}\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}\lambda_{1}^{n},
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}\lambda_{2}^{n},...,
{\displaystyle\sum\limits_{n=0}^{\infty}}
a_{n}\lambda_{m}^{n}\right) \right] U^{-1}\\
& =U\left[ \operatorname*{diag}\left( f_{a}\left( \lambda_{1}\right)
,f_{a}\left( \lambda_{2}\right) ,...,f_{a}\left( \lambda_{\left\vert
A\right\vert }\right) \right) \right] U^{-1}
\end{align*}
This immediately yields (\ref{eq:frev}) and (\ref{eq:grev}).
\subsection{Section \ref{sect:rev}}
\subsubsection{Proof of Theorem \ref{prop:value-bis}, Corollary
\ref{prop:value-ter}, and Corollary \ref{prop:value-quater}}
\begin{proposition}
If $\rho_{\mathrm{C}}$ is has a binary value representation and the
exploration matrix $Q$ is nice, then the probability distribution
\[
\pi\left( i\right) =\left\{
\begin{array}
[c]{ll}
\dfrac{u\left( i\right) }{\sum_{j\in\left. \arg\max\right. _{A}w}u\left(
j\right) }
& \qquad\text{if }i\in\left. \arg\max\right. _{A}w\\
0 & \qquad\text{else}
\end{array}
\right.
\]
is the only stationary distribution for $M$, and there exists $\varepsilon
\in\left( 0,1\right) $ such that, for all $n\in\mathbb{N}$ and all $\mu
\in\Delta\left( A\right) $,
\begin{equation}
\left\Vert M^{n}\mu-\pi\right\Vert _{1}\leq2\left( 1-\varepsilon\right) ^{n}
\label{eq:lim_ergo}
\end{equation}
Moreover, if $\left\{ N_{k}\right\} _{k=0}^{\infty}$ is a sequence of
stopping numbers that diverges, then, for all $\mu\in\Delta\left( A\right) $
\begin{equation}
\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
\mathbb{P}\left[ N_{k}=n\right] M^{n}\right) \mu\rightarrow\pi
\qquad\text{as }k\rightarrow\infty\label{eq:lim_pk}
\end{equation}
\end{proposition}
In particular, (\ref{eq:lim_ergo}) implies that
\[
\lim_{n\rightarrow\infty}\Pr\left[ J_{n}=i\right] =\pi\left( i\right)
\]
and (\ref{eq:lim_pk}) implies that
\begin{equation}
\lim_{N_{k}\rightarrow\infty}p_{N_{k}}=\pi\label{eq:asintotico}
\end{equation}
when the stopping numbers are simple. Hence,
\[
\lim_{N_{k}\rightarrow\infty}p_{N_{k}}\left( i,A\right) =\lim_{n\rightarrow
\infty}\Pr\left[ J_{n}=i\right] =\left\{
\begin{array}
[c]{ll}
\dfrac{u\left( i\right) }{\sum_{j\in\left. \arg\max\right. _{A}w}u\left(
j\right) }
& \qquad\text{if }i\in\left. \arg\max\right. _{A}w\\
0 & \qquad\text{else}
\end{array}
\right.
\]
This proves Theorem \ref{prop:value-bis}. Corollaries \ref{prop:value-ter} and
\ref{prop:value-quater} follow immediately.
\noindent\textbf{Proof} To ease notation we write $\rho$ in place of
$\rho_{\mathrm{C}}$. We first show that
\begin{equation}
M\left( k\mid j\right) \pi\left( j\right) =M\left( j\mid k\right)
\pi\left( k\right) \label{eq:db}
\end{equation}
for all $j$ and $k$ in $A$. Denote, for brevity, $W\left( A\right) =\left.
\arg\max\right. _{A}w$. If $j=k$, the equality is trivial. Let $j\neq k$ in
$A$.
\begin{itemize}
\item If $j,k\in W\left( A\right) $, then
\begin{align*}
M\left( k\mid j\right) \pi\left( j\right) & =Q\left( k\mid j\right)
\rho\left( k\mid j\right) \dfrac{u\left( j\right) }{\sum_{i\in W\left(
A\right) }u\left( i\right) }\\
& =Q\left( k\mid j\right) s\left( k,j\right) \dfrac{u\left( k\right)
}{u\left( k\right) +u\left( j\right) }\dfrac{u\left( j\right) }
{\sum_{i\in W\left( A\right) }u\left( i\right) }\\
& =Q\left( j\mid k\right) s\left( j,k\right) \dfrac{u\left( j\right)
}{u\left( j\right) +u\left( k\right) }\dfrac{u\left( k\right) }
{\sum_{i\in W\left( A\right) }u\left( i\right) }=Q\left( j\mid k\right)
\rho\left( j\mid k\right) \pi\left( k\right) \\
& =M\left( j\mid k\right) \pi\left( k\right)
\end{align*}
\item If $j,k\notin W\left( A\right) $, then $\pi\left( j\right)
=\pi\left( k\right) =0$ and
\[
M\left( k\mid j\right) \pi\left( j\right) =M\left( j\mid k\right)
\pi\left( k\right)
\]
\item If $j\in W\left( A\right) $ and $k\notin W\left( A\right) $, then
$w\left( j\right) >w\left( k\right) $ and so $\rho\left( k\mid j\right)
=0=\pi\left( k\right) $, thus
\[
M\left( k\mid j\right) \pi\left( j\right) =Q\left( k\mid j\right)
\rho\left( k\mid j\right) \dfrac{u\left( j\right) }{\sum_{i\in W\left(
A\right) }u\left( i\right) }=0=M\left( j\mid k\right) \pi\left(
k\right)
\]
\item If $j\notin W\left( A\right) $ and $k\in W\left( A\right) $, then
$w\left( k\right) >w\left( j\right) $ and so $\rho\left( j\mid k\right)
=0=\pi\left( j\right) $, thus
\[
M\left( k\mid j\right) \pi\left( j\right) =0=Q\left( j\mid k\right)
\rho\left( j\mid k\right) \pi\left( k\right) =M\left( j\mid k\right)
\pi\left( k\right)
\]
\end{itemize}
The \textquotedblleft detailed balance\textquotedblright\ condition
(\ref{eq:db}) implies that
\[
{\displaystyle\sum\limits_{j\in A}}
M\left( k\mid j\right) \pi\left( j\right) =
{\displaystyle\sum\limits_{j\in A}}
M\left( j\mid k\right) \pi\left( k\right) =\pi\left( k\right)
{\displaystyle\sum\limits_{j\in A}}
M\left( j\mid k\right) =\pi\left( k\right)
\]
for all $k\in A$, then $M\pi=\pi$. Thus $\pi$ is a stationary distribution for
$M$.
Take $j_{0}\in W\left( A\right) $. Then, $w\left( j_{0}\right) \geq
w\left( i\right) $ for all $i\neq j_{0}$, and so
\[
M\left( j_{0}\mid i\right) =Q\left( j_{0}\mid i\right) \rho\left(
j_{0}\mid i\right) =\left\{
\begin{array}
[c]{ll}
Q\left( j_{0}\mid i\right)
& \qquad\text{if }w\left( j_{0}\right)
>w\left( i\right) \\
Q\left( j_{0}\mid i\right) s\left( j_{0},i\right) \dfrac{u\left(
j_{0}\right) }{u\left( j_{0}\right) +u\left( i\right) } & \qquad\text{if
}w\left( j_{0}\right) =w\left( i\right)
\end{array}
\right.
\]
For $i=j_{0}$, we have that $\rho\left( k\mid j_{0}\right) =0$ if $k\notin
W\left( A\right) $ and $\rho\left( k\mid j_{0}\right) \in\left(
0,1\right) $ if $k\in W\left( A\right) $ (provided $k\neq j_{0}$),
\[
M\left( j_{0}\mid j_{0}\right) =1-
{\displaystyle\sum\limits_{k\neq j_{0}}}
Q\left( k\mid j_{0}\right) \rho\left( k\mid j_{0}\right) >1-
{\displaystyle\sum\limits_{k\neq j_{0}}}
Q\left( k\mid j_{0}\right) \geq1-
{\displaystyle\sum\limits_{k\in A}}
Q\left( k\mid j_{0}\right) =0
\]
By Doeblin's Theorem, $\pi$ is the only stationary distribution for $M$ and
there exists $\varepsilon\in\left( 0,1\right) $ such that
\[
\left\Vert M^{n}\mu-\pi\right\Vert _{1}\leq2\left( 1-\varepsilon\right) ^{n}
\]
for all $n\in\mathbb{N}$ and all $\mu\in\Delta\left( A\right) $.
Given any $\mu\in\Delta\left( A\right) $, set, for each $k\in\mathbb{N}$,
\[
P_{k}\left( n\right) =\mathbb{P}\left[ N_{k}=n\right] \qquad\forall
n\in\mathbb{N}
\]
and
\[
p_{k}=\left(
{\displaystyle\sum\limits_{n=0}^{\infty}}
P_{k}\left( n\right) M^{n}\right) \mu
\]
Then
\[
p_{k}=\lim_{m\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) M^{n}\mu\text{\quad and\quad}\pi=\lim_{m\rightarrow
\infty}
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) \pi
\]
and so
\[
p_{k}-\pi=\lim_{m\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) \left( M^{n}\mu-\pi\right)
\]
Thus,
\begin{align*}
\left\Vert p_{k}-\pi\right\Vert _{1} & =\lim_{m\rightarrow\infty}\left\Vert
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) \left( M^{n}\mu-\pi\right) \right\Vert _{1}\leq
\lim_{m\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) \left\Vert M^{n}\mu-\pi\right\Vert _{1}\\
& \leq\lim_{m\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{m}}
P_{k}\left( n\right) 2\left( 1-\varepsilon\right) ^{n}=
{\displaystyle\sum\limits_{n=0}^{\infty}}
P_{k}\left( n\right) 2\left( 1-\varepsilon\right) ^{n}
\end{align*}
The sequence $\left\{ a_{k}\right\} _{k\in\mathbb{N}}$ of functions $a_{k}
$\thinspace$:\mathbb{N\rightarrow}\left[ 0,\infty\right) \ $given by
\[
a_{k}\left( n\right) =P_{k}\left( n\right) 2\left( 1-\varepsilon\right)
^{n}
\]
is bounded above by the function $a:\mathbb{N\rightarrow}\left[
0,\infty\right) $ given by
\[
a\left( n\right) =2\left( 1-\varepsilon\right) ^{n}
\]
The latter is summable with respect to the counting measure $\gamma$ on
$\mathbb{N}$. In addition, $\lim_{k\rightarrow\infty}a_{k}\left( n\right)
=0$ for all $n\in\mathbb{N}$. By the Lebesgue Dominated Convergence Theorem,
\[
\lim_{k\rightarrow\infty}
{\displaystyle\sum\limits_{n=0}^{\infty}}
P_{k}\left( n\right) 2\left( 1-\varepsilon\right) ^{n}=\lim_{k\rightarrow
\infty}\int_{\mathbb{N}}a_{k}\left( n\right) \mathrm{d}\gamma\left(
n\right) =0
\]
Therefore, $\lim_{k\rightarrow\infty}\left\Vert p_{k}-\pi\right\Vert _{1}
=0$.
$\blacksquare$
\subsubsection{Proof of Theorem \ref{thm:value-bis}}
Given a menu $A$, with typical elements $i$, $j$ and $k$, we denote by
$P=\left[ P\left( i\mid j\right) \right] _{i,j\in A}$ a $\left\vert
A\right\vert \times\left\vert A\right\vert $ a\emph{ }stochastic matrix\emph{
}such that $P\left( i\mid j\right) $ is interpreted as the probability with
which a system moves from state $j$ to state $i$. Clearly, $P\left( \cdot\mid
j\right) \in\Delta\left( A\right) $ for all $j\in A$.
\begin{definition}
A stochastic matrix $P$ is \emph{transitive} if
\begin{equation}
P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid k\right)
=P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid j\right)
\qquad\forall i,j,k\in A \label{eq:kolmo}
\end{equation}
\end{definition}
Transitivity is known as the \emph{Kolmogorov criterion} in the Markov chains
literature (see, e.g., Kelly, 1979, p. 24) and as the \emph{product rule} in
the stochastic choice literature (Luce and Suppes, 1965, p. 341).
Transitivity is automatically satisfied if at least two of the three states
$i$, $j$, and $k$ in $A$ coincide. In fact,
\begin{itemize}
\item if $i=j$, then
\begin{align*}
P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid k\right) &
=P\left( i\mid i\right) P\left( k\mid i\right) P\left( i\mid k\right) \\
P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid j\right) &
=P\left( k\mid i\right) P\left( i\mid k\right) P\left( i\mid i\right)
\end{align*}
\item if $i=k$, then
\begin{align*}
P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid k\right) &
=P\left( j\mid i\right) P\left( i\mid j\right) P\left( i\mid i\right) \\
P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid j\right) &
=P\left( i\mid i\right) P\left( j\mid i\right) P\left( i\mid j\right)
\end{align*}
\item if $j=k$, then
\begin{align*}
P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid k\right) &
=P\left( j\mid i\right) P\left( j\mid j\right) P\left( i\mid j\right) \\
P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid j\right) &
=P\left( j\mid i\right) P\left( j\mid j\right) P\left( i\mid j\right)
\end{align*}
\end{itemize}
\noindent Therefore, transitivity can be restated as
\[
P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid k\right)
=P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid j\right)
\]
for all distinct$\ i$, $j$ and $k$ in $A$.\footnote{This argument applies to
any function $P:A\times A\rightarrow\mathbb{R}$ and is independent of its
\textquotedblleft diagonal\textquotedblright\ values $P\left( i\mid i\right)
$.}
The next result, which relates reversibility and transitivity, builds upon
Kolmogorov (1936) and Luce and Suppes (1965).
\begin{proposition}
\label{prop:kolmo}Let $P$ be a positive stochastic matrix. The following
conditions are equivalent:
\begin{enumerate}
\item[(i)] $P$ is reversible under some $\pi\in\Delta\left( A\right) $;
\item[(ii)] $P$ is transitive.
\end{enumerate}
In this case, given any $i\in A$, it holds
\[
\pi\left( j\right) =\frac{r\left( j\mid i\right) }{
{\displaystyle\sum\limits_{k\in A}}
r\left( k\mid i\right) }\qquad\forall j\in A
\]
where $r\left( j\mid i\right) =P\left( j\mid i\right) /P\left( i\mid
j\right) $. In particular, $\pi$ is unique.
\end{proposition}
\noindent\textbf{Proof} Assume that there exists $\pi\in\Delta\left(
A\right) $ such that $P\left( i\mid j\right) \pi\left( j\right) =P\left(
j\mid i\right) \pi\left( i\right) $ for all distinct $i,j\in A$ (note that
this is weaker that reversibility in that $\pi$ is not assumed to be
positive), then
\begin{equation}
P\left( i\mid j\right) \pi\left( j\right) =P\left( j\mid i\right)
\pi\left( i\right) \qquad\forall i,j\in A \label{eq:heart}
\end{equation}
If $\pi\left( i^{\ast}\right) =0$ for some $i^{\ast}\in A$, then (being $P$
positive)
\begin{equation}
\pi\left( j\right) =\frac{P\left( j\mid i^{\ast}\right) }{P\left(
i^{\ast}\mid j\right) }\pi\left( i^{\ast}\right) =0\qquad\forall j\in A
\label{eq:star}
\end{equation}
But, this is impossible since $
{\displaystyle\sum\limits_{j\in A}}
\pi\left( j\right) =1$. Hence, $\pi$ is positive. Moreover, by
(\ref{eq:star}) we have
\[
\frac{\dfrac{P\left( j\mid i^{\ast}\right) }{P\left( i^{\ast}\mid j\right)
}}{
{\displaystyle\sum\limits_{k\in A}}
\dfrac{P\left( k\mid i^{\ast}\right) }{P\left( i^{\ast}\mid k\right) }
}=\frac{\dfrac{P\left( j\mid i^{\ast}\right) }{P\left( i^{\ast}\mid
j\right) }\pi\left( i^{\ast}\right) }{
{\displaystyle\sum\limits_{k\in A}}
\dfrac{P\left( k\mid i^{\ast}\right) }{P\left( i^{\ast}\mid k\right) }
\pi\left( i^{\ast}\right) }=\frac{\pi\left( j\right) }{
{\displaystyle\sum\limits_{k\in A}}
\pi\left( k\right) }=\pi\left( j\right) \qquad\forall j\in A
\]
irrespective of the choice of $i^{\ast}\in A$. Hence, $\pi$ is unique.
Finally, given any $i,j,k\in A$, by (\ref{eq:heart}) we have:
\begin{align*}
\frac{\pi\left( j\right) }{\pi\left( i\right) }\frac{\pi\left( k\right)
}{\pi\left( j\right) }\frac{\pi\left( i\right) }{\pi\left( k\right) }
& =1\implies\frac{P\left( j\mid i\right) }{P\left( i\mid j\right) }
\frac{P\left( k\mid j\right) }{P\left( j\mid k\right) }\frac{P\left(
i\mid k\right) }{P\left( k\mid i\right) }=1\\
& \implies\frac{P\left( j\mid i\right) P\left( k\mid j\right) P\left(
i\mid k\right) }{P\left( k\mid i\right) P\left( j\mid k\right) P\left(
i\mid j\right) }=1\\
& \implies P\left( j\mid i\right) P\left( k\mid j\right) P\left( i\mid
k\right) =P\left( k\mid i\right) P\left( j\mid k\right) P\left( i\mid
j\right)
\end{align*}
So, transitivity holds.
Conversely, if transitivity holds, choose arbitrarily $i^{\ast}\in A$ and set
\begin{equation}
\pi^{\ast}\left( j\right) :=\frac{\dfrac{P\left( j\mid i^{\ast}\right)
}{P\left( i^{\ast}\mid j\right) }}{
{\displaystyle\sum\limits_{k\in A}}
\dfrac{P\left( k\mid i^{\ast}\right) }{P\left( i^{\ast}\mid k\right) }
}=\dfrac{P\left( j\mid i^{\ast}\right) }{P\left( i^{\ast}\mid j\right)
}\zeta\qquad\forall j\in A \label{eq:tr1}
\end{equation}
where $1/\zeta=
{\displaystyle\sum\limits_{k\in A}}
P\left( k\mid i^{\ast}\right) /P\left( i^{\ast}\mid k\right) >0$. By
transitivity, for all $i,j\in A$,
\[
P\left( j\mid i\right) P\left( i^{\ast}\mid j\right) P\left( i\mid
i^{\ast}\right) =P\left( i^{\ast}\mid i\right) P\left( j\mid i^{\ast
}\right) P\left( i\mid j\right)
\]
and, since $P$ is positive,
\[
P\left( j\mid i\right) \frac{P\left( i\mid i^{\ast}\right) }{P\left(
i^{\ast}\mid i\right) }=P\left( i\mid j\right) \frac{P\left( j\mid
i^{\ast}\right) }{P\left( i^{\ast}\mid j\right) }
\]
Thus, for all $i,j\in A$,
\[
P\left( j\mid i\right) \frac{P\left( i\mid i^{\ast}\right) }{P\left(
i^{\ast}\mid i\right) }\zeta=P\left( i\mid j\right) \frac{P\left( j\mid
i^{\ast}\right) }{P\left( i^{\ast}\mid j\right) }\zeta
\]
In view of (\ref{eq:tr1}), reversibility with respect to $\pi^{\ast}$ holds
(note that $\pi^{\ast}$ is strictly positive).
$\blacksquare
$
\noindent\textbf{Proof of Theorem \ref{thm:value-bis}} To ease notation we
write $\rho$ in place of $\rho_{\mathrm{C}}$.
\textquotedblleft If.\textquotedblright\ By Lemma \ref{lem:ma}, since $Q$ is
quasi-positive, then $M$ is positive. By assumption $M$ is reversible. But
then, by Proposition \ref{prop:kolmo}, $M$ is transitive, thus
\[
M\left( j\mid i\right) M\left( k\mid j\right) M\left( i\mid k\right)
=M\left( k\mid i\right) M\left( j\mid k\right) M\left( i\mid j\right)
\]
for all distinct$\ i$, $j$ and $k$ in $A$. By definition of $M$,
\begin{align*}
& Q\left( j\mid i\right) \rho\left( j\mid i\right) Q\left( k\mid
j\right) \rho\left( k\mid j\right) Q\left( i\mid k\right) \rho\left(
i\mid k\right) \\
& =Q\left( k\mid i\right) \rho\left( k\mid i\right) Q\left( j\mid
k\right) \rho\left( j\mid k\right) Q\left( i\mid j\right) \rho\left(
i\mid j\right)
\end{align*}
for all distinct$\ i$, $j$ and $k$ in $A$. By symmetry and quasi-positivity of
$Q$, this implies
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right)
\]
for all distinct$\ i$, $j$ and $k$ in $A$. Therefore, $\rho$ is transitive,
and by Theorem \ref{thm:value} it admits a binary value representation, so
that the Neural Metropolis Algorithm is value based.
\textquotedblleft Only if.\textquotedblright\ If the Neural Metropolis
Algorithm is value based, by definition, $\rho$ admits a binary value
representation, and by Theorem \ref{thm:value} it is transitive. Thus
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) =\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right)
\]
for all distinct$\ i$, $j$ and $k$ in $A$. Since $Q$ is symmetric and
quasi-positivite, then
\begin{align*}
& Q\left( j\mid i\right) \rho\left( j\mid i\right) Q\left( k\mid
j\right) \rho\left( k\mid j\right) Q\left( i\mid k\right) \rho\left(
i\mid k\right) \\
& =Q\left( k\mid i\right) \rho\left( k\mid i\right) Q\left( j\mid
k\right) \rho\left( j\mid k\right) Q\left( i\mid j\right) \rho\left(
i\mid j\right)
\end{align*}
for all distinct$\ i$, $j$ and $k$ in $A$. By definition of $M$,
\[
M\left( j\mid i\right) M\left( k\mid j\right) M\left( i\mid k\right)
=M\left( k\mid i\right) M\left( j\mid k\right) M\left( i\mid j\right)
\]
for all distinct$\ i$, $j$ and $k$ in $A$. By Lemma \ref{lem:ma}, since $Q$ is
quasi-positive, then $M$ is positive. But then, by Proposition
\ref{prop:kolmo}, $M$ is reversible.
$\blacksquare$
\subsection{Section \ref{sect:icemia}\label{app:endicectomia}}
Proposition \ref{thm:value-ter} is a consequence of the following result.
\begin{proposition}
\label{prop:amul}Given any positive $\rho_{\mathrm{C}}$ and any irreducible
exploration matrix $Q$, the transition matrix $M$ is primitive. Moreover,
denoting by $\pi$ the stationary distribution of $M$, it follows that
\[
\lim_{t\rightarrow\infty}p_{N_{t}}\left( j\right) =\frac{\pi\left(
j\right) \bar{\tau}_{j}}{
{\displaystyle\sum\limits_{k\in A}}
\pi\left( k\right) \bar{\tau}_{k}}\qquad\forall j\in A
\]
provided the distribution of $\mathrm{RT}_{i,j}$ has (strictly) positive
expectation, is continuous at $0$, and has no singular part for all $\left(
i,j\right) \in A^{2}$.
\end{proposition}
\noindent\textbf{Proof of Proposition \ref{prop:amul}} To ease notation we
write $\rho$ in place of $\rho_{\mathrm{C}}$. The stochastic process $\left(
I,J,T\right) $ produces sequences
\[
\left( \underset{\text{state }x_{0}}{\underbrace{j_{-1},i_{0}}}
,t_{0},\underset{\text{state }x_{1}}{\underbrace{j_{0},i_{1}}},t_{1}
,...,\underset{\text{state }x_{n}}{\underbrace{j_{n-1},i_{n}}},t_{n}
,\underset{\text{state }x_{n+1}}{\underbrace{j_{n},i_{n+1}}},...\right)
\]
it then can be seen as a semi Markov chain with state space
\[
\mathcal{X}=\left\{ \left( j,i\right) \in A^{2}:Q\left( i\mid j\right)
>0\right\}
\]
where state $x=\left( j,i\right) \in\mathcal{X}$ represents the comparison
between incumbent $j$ and proposal $i$. Since the comparison between $j$ and
$i$ produces incumbent $k=i$ with probability $\rho\left( i\mid j\right) $,
$k=j$ with probability $1-\rho\left( i\mid j\right) $, and all other
incumbents with probability $0$, then the probability of switching from
comparison $\left( j,i\right) $ to comparison $\left( k,h\right) $ is
given by
\[
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( j,i\right)
\right] =\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right)
+\delta_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right)
\right) Q\left( h\mid k\right)
\]
In fact,
\begin{itemize}
\item if $i=j$, then the comparison between $j$ and $i$ produces new incumbent
$i$ for sure and
\begin{itemize}
\item[$\circ$] if $k=i$, then
\begin{align*}
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( i,i\right)
\right] & =Q\left( h\mid i\right) \\
& =\left( \underset{=1}{\underbrace{\delta_{i}\left( k\right) }}
\rho\left( i\mid j\right) +~\underset{=1}{\underbrace{\delta_{j}\left(
k\right) }}\left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right)
\end{align*}
\item[$\circ$] else $k\neq i$, and
\begin{align*}
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( i,i\right)
\right] & =0\\
& =\left( \underset{=0}{\underbrace{\delta_{i}\left( k\right) }}
\rho\left( i\mid j\right) +~\underset{=0}{\underbrace{\delta_{j}\left(
k\right) }}\left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right)
\end{align*}
\end{itemize}
\item if $i\neq j$, then the comparison between $j$ and $i$ produces new
incumbent $k=i$ with probability $\rho\left( i\mid j\right) $ and $k=j$ with
probability $1-\rho\left( i\mid j\right) $ and
\begin{itemize}
\item[$\circ$] if $k=i$, then
\begin{align*}
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( j,i\right)
\right] & =\rho\left( i\mid j\right) Q\left( h\mid i\right) \\
& =\left( \underset{=1}{\underbrace{\delta_{i}\left( k\right) }}
\rho\left( i\mid j\right) +~\underset{=0}{\underbrace{\delta_{j}\left(
k\right) }}\left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right)
\end{align*}
\item[$\circ$] if $k=j$, then, we have
\begin{align*}
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( j,i\right)
\right] & =\left( 1-\rho\left( i\mid j\right) \right) Q\left( h\mid
j\right) \\
& =\left( \underset{=0}{\underbrace{\delta_{i}\left( k\right) }}
\rho\left( i\mid j\right) +~\underset{=1}{\underbrace{\delta_{j}\left(
k\right) }}\left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right)
\end{align*}
\item[$\circ$] else $k\neq i$ and $k\neq j$, thus
\[
\mathbb{P}\left[ X_{n+1}=\left( k,h\right) \mid X_{n}=\left( j,i\right)
\right] =0=\left( \underset{=0}{\underbrace{\delta_{i}\left( k\right) }
}\rho\left( i\mid j\right) +~\underset{=0}{\underbrace{\delta_{j}\left(
k\right) }}\left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right)
\]
\end{itemize}
\end{itemize}
Set
\[
\hat{M}\left( \left( k,h\right) \mid\left( j,i\right) \right) =\left(
\delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta_{j}\left(
k\right) \left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
h\mid k\right) \qquad\forall\left( k,h\right) ,\left( j,i\right)
\in\mathcal{X}
\]
Next we show that $\hat{M}$ is a \emph{bona fide} stochastic matrix. Given any
$\left( j,i\right) \in\mathcal{X}$,
\begin{align*}
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
\hat{M}\left( \left( k,h\right) \mid\left( j,i\right) \right) & =
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) \\
& =
{\displaystyle\sum_{\left( k,h\right) \in A^{2}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) \\
& =
{\displaystyle\sum_{h\in A}}
\left(
{\displaystyle\sum_{k\in A}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) \right)
\end{align*}
where equality in the second line follows from the fact that if $\left(
k,h\right) \in A^{2}\setminus\mathcal{X}$ then $Q\left( h\mid k\right) =0$.
We will use this fact repeatedly. Now,
\begin{itemize}
\item if $i=j$, then
\[
{\displaystyle\sum_{k\in A}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) =Q\left( h\mid i\right)
\]
hence
\[
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
\hat{M}\left( \left( k,h\right) \mid\left( j,i\right) \right) =
{\displaystyle\sum_{h\in A}}
Q\left( h\mid i\right) =1
\]
\item else
\[
{\displaystyle\sum_{k\in A}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) =\rho\left( i\mid j\right) Q\left( h\mid i\right)
+\left( 1-\rho\left( i\mid j\right) \right) Q\left( h\mid j\right)
\]
hence
\begin{align*}
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
\hat{M}\left( \left( k,h\right) \mid\left( j,i\right) \right) & =
{\displaystyle\sum_{h\in A}}
\left( \rho\left( i\mid j\right) Q\left( h\mid i\right) +\left(
1-\rho\left( i\mid j\right) \right) Q\left( h\mid j\right) \right) \\
& =\rho\left( i\mid j\right)
{\displaystyle\sum_{h\in A}}
Q\left( h\mid i\right) +\left( 1-\rho\left( i\mid j\right) \right)
{\displaystyle\sum_{h\in A}}
Q\left( h\mid j\right) =1
\end{align*}
\end{itemize}
Next we show that if $\pi\in\Delta\left( A\right) $ and $M\pi=\pi$, then
setting
\[
\hat{\pi}\left( j,i\right) =Q\left( i\mid j\right) \pi\left( j\right)
\qquad\forall\left( j,i\right) \in\mathcal{X}
\]
defines an element of $\Delta\left( \mathcal{X}\right) $ such that $\hat
{M}\hat{\pi}=\hat{\pi}$. Clearly
\[
{\displaystyle\sum_{\left( j,i\right) \in\mathcal{X}}}
\hat{\pi}\left( j,i\right) =
{\displaystyle\sum_{\left( j,i\right) \in\mathcal{X}}}
Q\left( i\mid j\right) \pi\left( j\right) =
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
Q\left( i\mid j\right) \pi\left( j\right) =
{\displaystyle\sum_{j\in A}}
\left(
{\displaystyle\sum_{i\in A}}
Q\left( i\mid j\right) \pi\left( j\right) \right) =1
\]
Moreover, for all $\forall\left( k,h\right) \in\mathcal{X}$,
\begin{align*}
\left( \hat{M}\hat{\pi}\right) _{\left( k,h\right) } & =
{\displaystyle\sum_{\left( j,i\right) \in\mathcal{X}}}
\hat{M}\left( \left( k,h\right) \mid\left( j,i\right) \right) \hat{\pi
}\left( j,i\right) =\\
& =
{\displaystyle\sum_{\left( j,i\right) \in\mathcal{X}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) Q\left( i\mid j\right) \pi\left( j\right) \\
& =
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( h\mid k\right) Q\left( i\mid j\right) \pi\left( j\right) \\
& =Q\left( h\mid k\right)
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( i\mid j\right) \pi\left( j\right)
\end{align*}
Next we show that, for all $k\in A$,
\[
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( i\mid j\right) \pi\left( j\right) =\pi\left( k\right)
\]
obtaining $\left( \hat{M}\hat{\pi}\right) _{\left( k,h\right) }=Q\left(
h\mid k\right) \pi\left( k\right) =\hat{\pi}_{\left( k,h\right) }$.
Indeed
\begin{align*}
&
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\left( \delta_{i}\left( k\right) \rho\left( i\mid j\right) +\delta
_{j}\left( k\right) \left( 1-\rho\left( i\mid j\right) \right) \right)
Q\left( i\mid j\right) \pi\left( j\right) \\
& =
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\delta_{i}\left( k\right) \rho\left( i\mid j\right) Q\left( i\mid
j\right) \pi\left( j\right) +
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\delta_{j}\left( k\right) Q\left( i\mid j\right) \pi\left( j\right) -
{\displaystyle\sum_{\left( j,i\right) \in A^{2}}}
\delta_{j}\left( k\right) \rho\left( i\mid j\right) Q\left( i\mid
j\right) \pi\left( j\right) \\
& =
{\displaystyle\sum_{j\in A}}
\rho\left( k\mid j\right) Q\left( k\mid j\right) \pi\left( j\right) +
{\displaystyle\sum_{i\in A}}
Q\left( i\mid k\right) \pi\left( k\right) -
{\displaystyle\sum_{i\in A}}
\rho\left( i\mid k\right) Q\left( i\mid k\right) \pi\left( k\right)
\end{align*}
now the central summand $
{\displaystyle\sum_{i\in A}}
Q\left( i\mid k\right) \pi\left( k\right) $ is $\pi\left( k\right) $, we
conclude by showing that $M\pi=\pi$ implies that
\[
{\displaystyle\sum_{j\in A}}
\rho\left( k\mid j\right) Q\left( k\mid j\right) \pi\left( j\right) =
{\displaystyle\sum_{i\in A}}
\rho\left( i\mid k\right) Q\left( i\mid k\right) \pi\left( k\right)
\]
in fact
\begin{align*}
{\displaystyle\sum_{j\in A}}
\rho\left( k\mid j\right) Q\left( k\mid j\right) \pi\left( j\right) &
=
{\displaystyle\sum_{j\neq k}}
\rho\left( k\mid j\right) Q\left( k\mid j\right) \pi\left( j\right)
+\rho\left( k\mid k\right) Q\left( k\mid k\right) \pi\left( k\right) \\
& =
{\displaystyle\sum_{j\neq k}}
M\left( k\mid j\right) \pi\left( j\right) +\rho\left( k\mid k\right)
Q\left( k\mid k\right) \pi\left( k\right) \\
& =-M\left( k\mid k\right) \pi\left( k\right) +
{\displaystyle\sum_{j\in A}}
M\left( k\mid j\right) \pi\left( j\right) +\rho\left( k\mid k\right)
Q\left( k\mid k\right) \pi\left( k\right) \\
& =-M\left( k\mid k\right) \pi\left( k\right) +\pi\left( k\right)
+\rho\left( k\mid k\right) Q\left( k\mid k\right) \pi\left( k\right) \\
& =\left( 1-M\left( k\mid k\right) \right) \pi\left( k\right)
+\rho\left( k\mid k\right) Q\left( k\mid k\right) \pi\left( k\right) \\
& =\left(
{\displaystyle\sum_{i\neq k}}
\rho\left( i\mid k\right) Q\left( i\mid k\right) \right) \pi\left(
k\right) +\rho\left( k\mid k\right) Q\left( k\mid k\right) \pi\left(
k\right) \\
& =
{\displaystyle\sum_{i\in A}}
\rho\left( i\mid k\right) Q\left( i\mid k\right) \pi\left( k\right)
\end{align*}
So far, we did not use the fact that $M$ is primitive, this is used next to
show that $\hat{M}$ is primitive too. By irreducibility of $M$, for all
$i,k\in A$, there exists of a finite sequence
\begin{equation}
i=i_{0},i_{1},i_{2},...,i_{n}=k\label{eq:basechain}
\end{equation}
satisfying $i_{a+1}\neq i_{a}$ and $M\left( i_{a+1}\mid i_{a}\right) >0$,
for all $a=0,...,n-1$, by the definition of $M$, it follows that
\begin{equation}
Q\left( i_{a+1}\mid i_{a}\right) >0\label{eq:chain}
\end{equation}
for all $a=0,...,n-1$.
Consider any $\left( j,i\right) ,\left( k,h\right) \in\mathcal{X}$, and a
chain $i_{0},i_{1},i_{2},...,i_{n}$ satisfying (\ref{eq:basechain}) and
(\ref{eq:chain}). The derived chain
\[
\left( j,i\right) =\left( j,i_{0}\right) ,\left( i_{0},i_{1}\right)
,\left( i_{1},i_{2}\right) ...,\left( i_{n-1},i_{n}\right) ,\left(
i_{n},h\right) =\left( k,h\right)
\]
belongs to $\mathcal{X}$ because $\left( j,i_{0}\right) =\left( j,i\right)
\in\mathcal{X}$, $\left( i_{n},h\right) =\left( k,h\right) \in\mathcal{X}
$, and also $\left( i_{a},i_{a+1}\right) \in\mathcal{X}$ because of
(\ref{eq:chain}). Now
\[
\hat{M}\left( \left( i_{0},i_{1}\right) \mid\left( j,i_{0}\right)
\right) =\left( \delta_{i_{0}}\left( i_{0}\right) \rho\left( i_{0}\mid
j\right) +\delta_{j}\left( i_{0}\right) \left( 1-\rho\left( i_{0}\mid
j\right) \right) \right) Q\left( i_{1}\mid i_{0}\right)
\]
which is strictly positive because, $\rho\left( i_{0}\mid j\right)
=\rho\left( i\mid j\right) >0$ (since $\rho$ is positive) and $Q\left(
i_{1}\mid i_{0}\right) >0$. Moreover,
\[
\hat{M}\left( \left( i_{a+1},i_{a+2}\right) \mid\left( i_{a}
,i_{a+1}\right) \right) =\left( \delta_{i_{a+1}}\left( i_{a+1}\right)
\rho\left( i_{a+1}\mid i_{a}\right) +\delta_{i_{a}}\left( i_{a+1}\right)
\left( 1-\rho\left( i_{a+1}\mid i_{a}\right) \right) \right) Q\left(
i_{a+2}\mid i_{a+1}\right)
\]
which is strictly positive for $a=0,...,n-2$, because $\rho\left( i_{a+1}\mid
i_{a}\right) >0$ and $Q\left( i_{a+2}\mid i_{a+1}\right) >0$. Finally,
\[
\hat{M}\left( \left( i_{n},h\right) \mid\left( i_{n-1},i_{n}\right)
\right) =\left( \delta_{i_{n}}\left( i_{n}\right) \rho\left( i_{n}\mid
i_{n-1}\right) +\delta_{i_{n-1}}\left( i_{n}\right) \left( 1-\rho\left(
i_{n}\mid i_{n-1}\right) \right) \right) Q\left( h\mid i_{n}\right)
\]
which is strictly positive, because $\rho\left( i_{n}\mid i_{n-1}\right) >0$
and $Q\left( h\mid i_{n}\right) =Q\left( h\mid k\right) >0$.
This shows irreducibility of $\hat{M}$. Having proved irreducibility,
primitivity can be established by exhibiting a non-zero diagonal element in
the transition matrix $\hat{M}$. By definition, for all $\left( j,i\right)
\in\mathcal{X}$
\[
\hat{M}\left( \left( j,i\right) \mid\left( j,i\right) \right) =\left(
\delta_{i}\left( j\right) \rho\left( i\mid j\right) +\delta_{j}\left(
j\right) \left( 1-\rho\left( i\mid j\right) \right) \right) Q\left(
i\mid j\right)
\]
Positivity of $\rho$ guarantees that $\delta_{j}\left( j\right) \left(
1-\rho\left( i\mid j\right) \right) >0$, while $Q\left( i\mid j\right)
>0$ by definition of $\mathcal{X}$.
All the assumptions of Howard (1971) p. 713 are then satisfied by the
semi-Markov chain with embedded Markov chain $\hat{M}\left( \left(
k,h\right) \mid\left( j,i\right) \right) $ and holding times $\hat
{T}\left( \left( k,h\right) \mid\left( j,i\right) \right) =\mathrm{RT}
_{i,j}$.\footnote{Observe that holding times are independent of the
\textquotedblleft next state\textquotedblright\ $\left( k,h\right) $
therefore \textquotedblleft average waiting times\textquotedblright\ are just
average holding times (see Howard, 1971, p. 691).} Hence the limit as
$t\rightarrow\infty$ of the probability $\phi_{\left( j,i\right) }\left(
t\right) $ with which comparison $\left( j,i\right) $ is taking place at
time $t$ is given by
\begin{align*}
\phi_{\left( j,i\right) } & =\frac{\hat{\pi}\left( j,i\right)
\tau_{\mathrm{RT}}\left( i\mid j\right) }{
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
\hat{\pi}\left( k,h\right) \tau_{\mathrm{RT}}\left( h\mid k\right) }
=\frac{Q\left( i\mid j\right) \pi\left( j\right) \tau_{\mathrm{RT}}\left(
i\mid j\right) }{
{\displaystyle\sum_{\left( k,h\right) \in\mathcal{X}}}
Q\left( h\mid k\right) \pi\left( k\right) \tau_{\mathrm{RT}}\left( h\mid
k\right) }\\
& =\frac{Q\left( i\mid j\right) \pi\left( j\right) \tau_{\mathrm{RT}
}\left( i\mid j\right) }{
{\displaystyle\sum_{\left( k,h\right) \in A^{2}}}
Q\left( h\mid k\right) \pi\left( k\right) \tau_{\mathrm{RT}}\left( h\mid
k\right) }\qquad\forall\left( j,i\right) \in\mathcal{X}
\end{align*}
The same is true if $\left( j,i\right) \notin\mathcal{X}$, because in that
case $\phi_{\left( j,i\right) }\left( t\right) =0$ for all $t$ and
$Q\left( i\mid j\right) =0$. Thus
\begin{align*}
\lim_{t\rightarrow\infty}p_{N_{t}}\left( j\right) & =\lim_{t\rightarrow
\infty}
{\displaystyle\sum_{i\in A}}
\phi_{\left( j,i\right) }\left( t\right) =
{\displaystyle\sum_{i\in A}}
\lim_{t\rightarrow\infty}\phi_{\left( j,i\right) }\left( t\right) =
{\displaystyle\sum_{i\in A}}
\phi_{\left( j,i\right) }\\
& =\frac{
{\displaystyle\sum_{i\in A}}
Q\left( i\mid j\right) \pi\left( j\right) \tau_{\mathrm{RT}}\left( i\mid
j\right) }{
{\displaystyle\sum_{\left( k,h\right) \in A^{2}}}
Q\left( h\mid k\right) \pi\left( k\right) \tau_{\mathrm{RT}}\left( h\mid
k\right) }=\frac{\pi\left( j\right)
{\displaystyle\sum_{i\in A}}
Q\left( i\mid j\right) \tau_{\mathrm{RT}}\left( i\mid j\right) }{
{\displaystyle\sum_{k\in A}}
\pi\left( k\right)
{\displaystyle\sum_{h\in A}}
Q\left( h\mid k\right) \tau_{\mathrm{RT}}\left( h\mid k\right) }\\
& =\frac{\pi\left( j\right) \tau_{j}}{
{\displaystyle\sum_{k\in A}}
\pi\left( k\right) \tau_{k}}
\end{align*}
as desired.
$\blacksquare$
Consider, like in Cerreia-Vioglio et al. (2022, Section 2), a symmetric DDM
and an exploration matrix with off diagonal entries that are inversely
proportional to mean response times. This means that there exists $w>0$ such
that
\[
Q\left( i\mid j\right) =\frac{w}{\tau_{\mathrm{RT}}\left( i\mid j\right)
}\text{\qquad}\forall i\neq j
\]
so that
\[
Q\left( j\mid j\right) =1-
{\displaystyle\sum\limits_{i\neq j}}
\frac{w}{\tau_{\mathrm{RT}}\left( i\mid j\right) }\text{\qquad}\forall j
\]
Also assume that $\tau_{\mathrm{RT}}\left( j\mid j\right) <\delta$ (small)
for all $j\in A$.\footnote{The Drift Diffusion Model makes no predictions on
the response time for comparisons of an alternative with itself. But, it makes
sense to assume that the decision unit takes almost no time in realizing that
no actual comparison needs to be made.} With this, for all $j\in A$,
\begin{align*}
\bar{\tau}_{j} & =
{\displaystyle\sum\limits_{i\in A}}
Q\left( i\mid j\right) \tau_{\mathrm{RT}}\left( i\mid j\right) \\
& =
{\displaystyle\sum\limits_{i\neq j}}
\frac{w}{\tau_{\mathrm{RT}}\left( i\mid j\right) }\tau_{\mathrm{RT}}\left(
i\mid j\right) +\left( 1-
{\displaystyle\sum\limits_{i\neq j}}
\frac{w}{\tau_{\mathrm{RT}}\left( i\mid j\right) }\right) \tau
_{\mathrm{RT}}\left( j\mid j\right) =\left( \left\vert A\right\vert
-1\right) w+\delta_{j}
\end{align*}
with $0\leq\delta_{j}<\delta$. Since $\delta$ is small, then
\[
\bar{\tau}_{j}\approx\left( \left\vert A\right\vert -1\right) w
\]
irrespective of $j$, so that $\alpha\left( j\right) $ is approximately
constant in (\ref{eq:alpha}) and
\[
\lim_{t\rightarrow\infty}p_{N_{t}}\left( j\right) \approx\frac{e^{\lambda
v\left( j\right) }}{
{\displaystyle\sum\limits_{k\in A}}
e^{\lambda v\left( k\right) }}\qquad\forall j\in A
\]
that is, the limit probability is approximately of the multinomial logit type.
\begin{Hidden}
\noindent\textsc{Powers of a matrix}
\noindent$N=\left\{ 1,2,...,n\right\} $, $T$ an $n\times n$ square matrix,
$T^{0}=I$, $T^{m}=TT^{m-1}$ for all $m\geq1$. Denoting by $t_{ij}^{\left(
m\right) }$ the $\left( i,j\right) $-th element of $T^{m}$, for $m=2$ we
have
\[
t_{ij}^{\left( 2\right) }=\sum\limits_{k=1}^{n}t_{ik}t_{kj}=\sum
\limits_{k_{1}\in N^{2-1}}t_{ik_{1}}t_{k_{1}j}\qquad\forall i,j\in N
\]
\begin{proposition}
By induction if $m\geq3$,
\[
t_{ij}^{\left( m\right) }=\sum\limits_{\left( k_{1},k_{2},...,k_{m-1}
\right) \in N^{m-1}}t_{ik_{1}}t_{k_{1}k_{2}}\cdot\cdot\cdot t_{k_{m-1}
j}\qquad\forall i,j\in N
\]
\end{proposition}
\noindent\textbf{Proof} The result is true for $m=3$, in fact,
\begin{align*}
t_{ij}^{\left( 3\right) } & =\sum\limits_{h=1}^{n}t_{ih}t_{hj}^{\left(
2\right) }=\sum\limits_{h\in N}t_{ih}\sum\limits_{k\in N}t_{hk}t_{kj}
=\sum\limits_{h\in N}\sum\limits_{k\in N}t_{ih}t_{hk}t_{kj}\\
& =\sum\limits_{\left( h,k\right) \in N^{2}}t_{ih}t_{hk}t_{kj}
=\sum\limits_{\left( k_{1},k_{2}\right) \in N^{2}}t_{ik_{1}}t_{k_{1}k_{2}
}t_{k_{2}j}
\end{align*}
Let $m>3$ and assume true for $m-1$, then
\begin{align*}
t_{ij}^{\left( m\right) } & =\sum\limits_{h=1}^{n}t_{ih}t_{hj}^{\left(
m-1\right) }=\sum\limits_{h=1}^{n}t_{ih}\sum\limits_{\left( k_{1}
,k_{2},...,k_{m-2}\right) \in N^{m-2}}t_{hk_{1}}t_{k_{1}k_{2}}\cdot\cdot\cdot
t_{k_{m-2}j}\\
& =\sum\limits_{h\in N}t_{ih}\sum\limits_{\left( h_{2},...,h_{m-1}\right)
\in N^{m-2}}t_{hh_{2}}t_{h_{2}h_{3}}\cdot\cdot\cdot t_{h_{m-1}j}
=\sum\limits_{h\in N}\sum\limits_{\left( h_{2},...,h_{m-1}\right) \in
N^{m-2}}t_{ih_{1}}t_{h_{1}h_{2}}\cdot\cdot\cdot t_{h_{m-1}j}\\
& =\sum\limits_{\left( h_{1},h_{2},...,h_{m-1}\right) \in N^{m-1}}
t_{ih_{1}}t_{h_{1}h_{2}}\cdot\cdot\cdot t_{h_{m-1}j}
\end{align*}
as wanted.
$\blacksquare$
\end{Hidden}
\begin{Hidden}
\noindent\textsc{Quasipositivity and primitivity}
\noindent Let
\[
T=\left[
\begin{array}
[c]{cc}
0 & 1\\
1 & 0
\end{array}
\right]
\]
then
\[
T^{2}=\left[
\begin{array}
[c]{cc}
0 & 1\\
1 & 0
\end{array}
\right] \left[
\begin{array}
[c]{cc}
0 & 1\\
1 & 0
\end{array}
\right] =\left[
\begin{array}
[c]{cc}
1 & 0\\
0 & 1
\end{array}
\right] =I
\]
Therefore $T^{2m}=I$ and $T^{2m+1}=T$ for all $m\geq0$. $T$ is quasi-positive
but not primitive.
\begin{proposition}
If $n\geq3$, then any quasi-positive matrix $T$ is primitive, indeed $T^{2}$
is positive.
\end{proposition}
\noindent\textbf{Proof} For all $i,j\in N$,
\[
t_{ij}^{\left( 2\right) }=\sum\limits_{k\in N}t_{ik}t_{kj}
\]
where, by quasipositivity, all summands are non-negative and $t_{ik}t_{kj}>0$
for all $k\notin\left\{ i,j\right\} $.
$\blacksquare$
\end{Hidden}
\begin{Hidden}
\noindent\textsc{Reversible stochastic matrices are diagonalizable}
\begin{fact}
If $M$ is any square matrix and $D=\mathrm{diag}\left\{ d_{i}
:i=1,...,n\right\} $, then
\begin{align*}
\left( DM\right) _{ij} & =d_{i}m_{ij}\\
\left( MD\right) _{ij} & =m_{ij}d_{j}
\end{align*}
and in particular, if $d_{i}\neq0$ for all $i\in N$, $D^{-1}=\mathrm{diag}
\left\{ d_{i}^{-1}:i=1,...,n\right\} $.
\end{fact}
Now let $B$ be reversible, then
\[
b_{ij}p_{j}=b_{ji}p_{i}
\]
and since $p>0$, then
\[
b_{ij}=p_{i}b_{ji}p_{j}^{-1}
\]
Consider the diagonal matrix $D=\mathrm{diag}\left\{ p_{i}^{1/2}
:i=1,...,n\right\} $, so that $D^{-1}=\mathrm{diag}\left\{ p_{i}
^{-1/2}:i=1,...,n\right\} $. Then
\[
\left( D^{-1}B\right) _{i,j}=p_{i}^{-1/2}b_{ij}
\]
and
\[
\left( D^{-1}BD\right) _{i,j}=p_{i}^{-1/2}b_{ij}p_{j}^{1/2}
\]
by reversibility
\[
\left( D^{-1}BD\right) _{i,j}=p_{i}^{-1/2}p_{i}b_{ji}p_{j}^{-1}p_{j}
^{1/2}=p_{i}^{1/2}b_{ji}p_{j}^{-1/2}=p_{j}^{-1/2}b_{ji}p_{i}^{1/2}=\left(
D^{-1}BD\right) _{j,i}
\]
Therefore, a reversible matrix is similar to a symmetric matrix which is
similar to a diagonal matrix. But similarity is an equivalence relation.
\end{Hidden}
\begin{Hidden}
\noindent\textsc{Transitivities}
An unbiased, $0$-$1$ valued stochastic choice kernel defines a strict
preference
\[
j\succ_{\rho}i\iff\rho\left( j\mid i\right) =1
\]
note that, by unbiasedness,
\[
j\succ_{\rho}i\iff1-\rho\left( i\mid j\right) =1\iff\rho\left( i\mid
j\right) =0
\]
thus, $\succ_{\rho}$ is \emph{total}: for all $i,j\in A$ either $i\succ j$ or
$i=j$ or $j\succ i$. Moreover, $j\succ_{\rho}i$ if and only if $i\nsucc_{\rho
}j$.
Next we show that $\rho$ is transitive if and only if $\succ_{\rho}$ is.
If $\rho$ is transitive, and $\succ_{\rho}$ is not,$\ $then there exists $i$,
$j$, and $k$ such that $i\succ_{\rho}k\succ_{\rho}j$, but $i\nsucc_{\rho}j$.
In this case, $i$, $k$, and $j$ must be distinct and $j\succ_{\rho}i$.
Therefore, $i\succ_{\rho}k\succ_{\rho}j\succ_{\rho}i$ implying $\rho\left(
i\mid k\right) =\rho\left( k\mid j\right) =\rho\left( j\mid i\right) =1$
and $\rho\left( k\mid i\right) =\rho\left( j\mid k\right) =\rho\left(
i\mid j\right) =0$ contradicting (\ref{eq:trans-ddm}); thus must be
$\succ_{\rho}$ transitive.
Conversely, if $\rho$ is not transitive, then there are three distinct
alternatives $i$, $j$ and $k$ for which (\ref{eq:trans-ddm}) does not hold;
that is,
\[
\rho\left( j\mid i\right) \rho\left( k\mid j\right) \rho\left( i\mid
k\right) \neq\rho\left( k\mid i\right) \rho\left( j\mid k\right)
\rho\left( i\mid j\right)
\]
It is then impossible that both sides contain a zero factor. Since $\rho$ is
$0$-$1$ valued, then:
\begin{itemize}
\item either $\rho\left( j\mid i\right) \rho\left( k\mid j\right)
\rho\left( i\mid k\right) =1$, then $i\succ_{\rho}k\succ_{\rho}j\succ_{\rho
}i$, so that $i\succ_{\rho}k\succ_{\rho}j$ and $i\nsucc_{\rho}j$, thus
$\succ_{\rho}$ is not transitive;
\item or $\rho\left( k\mid i\right) \rho\left( j\mid k\right) \rho\left(
i\mid j\right) =1$, then $i\succ_{\rho}j\succ_{\rho}k\succ_{\rho}i$, so that
$i\succ_{\rho}j\succ_{\rho}k$ and $i\nsucc_{\rho}k$, thus $\succ_{\rho}$ is
not transitive.
\end{itemize}
\end{Hidden}
\end{document} |
\begin{document}
\author{
J\'ozsef Balogh \footnote{Department of Mathematics, University of Illinois at Urbana-Champaign, Urbana, Illinois 61801, USA, and Moscow Institute of Physics and Technology, Russian Federation. E-mail: \texttt{[email protected]}. Research is partially supported by NSF Grant DMS-1764123, NSF RTG grant DMS 1937241, Arnold O. Beckman Research
Award (UIUC Campus Research Board RB 18132), the Langan Scholar Fund (UIUC), and the Simons Fellowship.}
\and Felix Christian Clemen \footnote {Department of Mathematics, University of Illinois at Urbana-Champaign, Urbana, Illinois 61801, USA, E-mail: \texttt{[email protected]}.}
\and Bernard Lidick\'{y} \footnote {Iowa State University, Department of Mathematics, Iowa State University, Ames, IA., E-mail: \newline \texttt{ [email protected]}. Research of this author is partially supported by NSF grant DMS-1855653.}
}
\date{\today}
\title{Maximum Number of Almost Similar Triangles in the Plane}
\abstract{
A triangle $T'$ is $\varepsilon$-similar to another triangle $T$ if their angles pairwise differ by at most $\varepsilon$.
Given a triangle $T$, $\varepsilon>0$ and $n\in\mathbb{N}$, B\'ar\'any and F\"uredi asked to determine the maximum number of triangles $h(n,T,\varepsilon)$ being $\varepsilon$-similar to $T$ in a planar point set of size $n$. We show that for almost all triangles $T$ there exists $\varepsilon=\varepsilon(T)>0$ such that $h(n,T,\varepsilon)=n^3/24 (1+o(1))$. Exploring connections to hypergraph Tur\'an problems, we use flag algebras and stability techniques for the proof.}
\noindent
\textbf{Keywords:} similar triangles, extremal hypergraphs, flag algebras.
\noindent
2020 Mathematics Subject Classification: 52C45, 05D05, 05C65
\section{Introduction}
Let $T,T'$ be triangles with angles $\alpha\leq \beta \leq \gamma$ and $\alpha'\leq \beta'\leq \gamma'$ respectively. The triangle $T'$ is \emph{$\varepsilon$-similar} to $T$ if $|\alpha-\alpha'|< \varepsilon, |\beta-\beta'|<\varepsilon,$ and $|\gamma-\gamma'|<\varepsilon$. B\'ar\'any and F\"uredi~\cite{MR3953886}, motivated by Conway, Croft, Erd\H{o}s and Guy~\cite{MR527745}, studied the maximum number $h(n,T,\varepsilon)$ of triangles in a planar set of $n$ points that are $\varepsilon$-similar to a triangle $T$. For every $T$ and $\varepsilon=\varepsilon(T)>0$ sufficiently small, B\'ar\'any and F\"uredi~\cite{MR3953886} found the following lower bound construction: Place the $n$ points in three groups with as equal sizes as possible, and each group very close to the vertices of the triangle $T$. Now, iterate this by splitting each of the three groups into three further subgroups of points, see Figure~\ref{fig:iterated construction}
for an illustration of this construction. Define a sequence $h(n)$ by $h(0)=h(1)=h(2)=0$ and for $n\geq 3$
\begin{align*}
h(n):=\max\{abc+h(a)+h(b)+h(c): a+b+c=n,\ a,b,c\in\mathbb{N} \}.
\end{align*}
\begin{figure}
\caption{Construction sketch on 27 vertices.}
\label{fig:iterated construction}
\end{figure}
By the previously described construction, this sequence $h(n)$ is a lower bound on $h(n,T,\varepsilon)$.
For $T$ being an equilateral triangle equality holds.
\begin{theo}[B\'ar\'any, F\"uredi\cite{MR3953886}]
Let $T$ be an equilateral triangle. There exists $\varepsilon_0\geq 1^{\circ}$ such that for all $\varepsilon \in (0,\varepsilon_0)$ and all $n$ we have $h(n,T,\varepsilon)=h(n)$.
In particular, when $n$ is a power of $3$, $h(n,T,\varepsilon)= \frac{1}{24}(n^3-n)$.
\end{theo}
B\'ar\'any and F\"uredi~\cite{MR3953886} also found various examples of triangles $T$ (e.g.~the isosceles right angled triangle) where $h(n,T,\varepsilon)$ is larger than $h(n)$.
The space of triangle shapes $S\subseteq \mathbb{R}^3$ can be represented
with triples $(\alpha, \beta, \gamma)\in \mathbb{R}^3$ of angles $\alpha,\beta,\gamma>0$ with $\alpha+\beta+\gamma=\pi$. When we make statements about almost every triangle, we mean it in a measure theoretic sense, i.e.~that there exists a set $S'\subseteq S$ with the $2$-dimensional Lebesque measure being $0$ such that the statements holds for all triangles $T\in S\setminus S'$. In \cite{MR3953886} it also was proved that $h(n,T,\varepsilon)$ can only be slightly larger than $h(n)$ for almost every triangle $T$.
\begin{theo}[B\'ar\'any, F\"uredi~\cite{MR3953886}]
\label{pointinplaneasymptotic}
For almost every triangle $T$ there is an $\varepsilon>0$ such that
\begin{align*}
h(n,T,\varepsilon)\leq 0.25072 \binom{n}{3}(1+o(1)).
\end{align*}
\end{theo}
The previously described construction gives a lower bound of $0.25\binom{n}{3}(1+o(1))$. B\'ar\'any and F\"uredi~\cite{MR3953886} reduced the problem of determining $h(n,t,\varepsilon)$ to a hypergraph Tur\'an problem and used the method of flag algebras, to get an upper bound on the corresponding Tur\'an problem. Flag algebras is a powerful tool invented by Razborov~\cite{flagsRaz}, which has been used to solve problems in various different areas, including graph theory~\cite{FAgraphs,FAgraphs2}, permutations~\cite{PAPerm1,PAPerm2} and discrete geometry~\cite{FAGeometry,FAGeom2}.
An obstacle B\'ar\'any and F\"uredi~\cite{MR3953886} encountered is that the conjectured extremal example is an iterative construction and flag algebras tend to struggle with those. We will overcome this issue by using flag algebras only to prove a weak stability result and then use cleaning techniques to identify the recursive structure. Similar ideas have been used in \cite{MR3425964} and \cite{MR3667664}. This allows us to prove the asymptotic result and for large enough $n$ an exact recursion.
\begin{theo}
\label{pointsinplanemainasymp}
For almost every triangle $T$ there is an $\varepsilon=\varepsilon(T)>0$ such that
\begin{align}
h(n,T,\varepsilon)= \frac{1}{4} \binom{n}{3}(1+o(1)).
\end{align}
\end{theo}
\begin{theo}\label{pointinplanemainrekursion}
There exists $n_0$ such that for all $n\geq n_0$ and for almost every triangle $T$ there is an $\varepsilon=\varepsilon(T)>0$ such that
\begin{align}
\label{pointsinplanerecformula}
h(n,T,\varepsilon)= a\cdot b \cdot c+h(a,T,\varepsilon)+h(b,T,\varepsilon)+h(c,T,\varepsilon),
\end{align}
where $n=a+b+c$ and $a,b,c$ are as equal as possible.
\end{theo}
We will observe that Theorem~\ref{pointinplanemainrekursion} implies the exact result when $n$ is a power of $3$.
\begin{corl}
\label{pointsinplanecorol}
Let $n$ be a power of $3$. Then, for almost every triangle $T$ there is an $\varepsilon=\varepsilon(T)>0$ such that
\begin{align*}
h(n,T,\varepsilon)= \frac{1}{24}(n^3-n).
\end{align*}
\end{corl}
The paper is organized as follows. In Section~\ref{pointsinplanepreperation} we introduce terminology and notation that we use, we establish a connection from maximizing the number of similar triangles to Tur\'an problems; and we apply flag algebras in our setting to derive a weak stability result. In Section~\ref{pointsinplanemainsec} we apply cleaning techniques to improve the stability result and derive our main results. Finally, in Section~\ref{pointsinplaneconcludingremarks} we discuss further questions.
\section{Preparation}
\label{pointsinplanepreperation}
\subsection{Terminology and Notation}
\begin{defn}
Let $G$ be a $3$-uniform hypergraph (shortly a $3$-graph), $\mathcal{H}$ be a family of $3$-graphs, $v\in V(G)$ and $A,B\subseteq V(G)$. Then,
\begin{itemize}[leftmargin=*]
\setlength\itemsep{0em}
\item $G$ is \emph{$\mathcal{H}$-free}, if it does not contain a copy of any $H\in \mathcal{H}$.
\item a $3$-graph $G$ on $n$ vertices is \emph{extremal} with respect to $\mathcal{H}$, if $G$ is $\mathcal{H}$-free and $e(G')\leq e(G)$ for every $\mathcal{H}$-free 3-graph $G'$ on $n$ vertices. If it is clear from context, we only say $G$ is extremal.
\item for $a,b\in V(G)$, denote $N(a,b)$ the \emph{neighborhood} of $a$ and $b$, i.e.~the set of vertices $c\in V(G)$ such that $abc\in E(G)$.
\item we write $L(v)$ for the \emph{linkgraph} of $v$, that is the graph $G'$ with $V(G')=V(G)\setminus\{v\}$ and $E(G')$ being the set of all pairs ${a,b}$ with $abv\in E(G)$.
\item we write $L_A(v)$ for the linkgraph of $v$ on $A$, that is the graph $G'$ with $V(G')=A\setminus\{v\}$ and $E(G')$ being the set of all pairs ${a,b}\subseteq A\setminus \{v\}$ with $abv\in E(G)$.
\item we write $L_{A,B}(v)$ for the (bipartite) linkgraph of $v$ on $A\cup B$, that is the graph $G'$ with $V(G')=A\cup B\setminus\{v\}$ and $E(G')$ being the set of all pairs ${a,b}$ with $a\in A, b\in B$ and $abv\in E(G)$.
\item we denote by $|L(v)|,|L_A(v)|$ and $|L_{A,B}(v)|$ the number of edges of the linkgraphs $L(v),L_A(v)$ and $L_{A,B}(v)$ respectively.
\end{itemize}
\end{defn}
Define a $3$-graph $S(n)$ on $n$ vertices recursively. For $n=1,2$, let $S(n)$ be the $3$-graph on $n$ vertices with no edges. For $n\geq 3$, choose $a\geq b \geq c$ as equal as possible such that $n=a+b+c$. Then, define $S(n)$ to be the $3$-graph constructed by taking vertex disjoint copies of $S(a), S(b)$ and $S(c)$ and adding all edges with all $3$ vertices coming from a different copy. B\'ar\'any and F\"uredi~\cite{MR3953886} observed that $|S(n)|\geq \frac{1}{24}n^3-O(n \log n)$.
Given a set $B\subseteq \mathbb{C}$ and $\delta>0$, we call the set $U_\delta(B):=\{z: |z-b|<\delta \text{ for some } b\in B\}$ the $\delta$-\emph{neighborhood} of $B$. If $B=\{b\}$ for some $b\in \mathbb{C}$, abusing notation, we write $U_\delta(b)$ for it.
\subsection{Forbidden subgraphs}
\label{pointsinplaneforbidden}
Given a finite point set $P\subseteq \mathbb{R}^2$ in the plane, a triangle $T\in S$ and an $\varepsilon>0$, we denote $G(P,T,\varepsilon)$ the $3$-graph with vertex set $V(G(P,T,\varepsilon))=P$ and triples $abc$ being an edge in $G(P,T,\varepsilon)$ iff $abc$ forms a triangle $\varepsilon$-similar to $T$.
\begin{comment}
Given a finite multiset $Q=\{q_1,\ldots,q_s\}\subseteq \mathbb{R}^2$ and a triangle shape $T\in S$, denote $G(Q,T)$ to be the following hypergraph. The vertex set is $\{1,\ldots,r\}$ and the edge set is the set of all triples $ijk$ such that either $q_iq_jq_k$ is similar to $T$ or $q_i = q_j = q_k$. We call the multiset $Q$ trivial if all points are the same. For a $3$-graph $H$, we define $S(H)\subseteq S$ to be the set of triangles shapes $T$ such that for all finite point sets $P\subseteq \mathbb{R}^2$, we have $H$ is not contained as a copy in $G(P,T)$.
\end{comment}
A $3$-graph $H$ is called \emph{forbidden} if $|V(H)|\leq 12$ and for almost every triangle shape $T\in S$ there exists an $\varepsilon=\varepsilon(T)>0$ such that for every point set $P\subseteq \mathbb{R}^2$, $G(P,T,\varepsilon)$ is $H$-free. Denote $\mathcal{F}$ the family of all forbidden $3$-graphs and $\mathcal{T}_\mathcal{F}\subseteq S$ the set of all triangles $T$ such that there exists $\varepsilon=\varepsilon(T)>0$ such that for every point set $P\subseteq \mathbb{R}^2$, $G(P,T,\varepsilon)$ is $\mathcal{F}$-free. Given $T\in \mathcal{T}_\mathcal{F}$, we denote some $\varepsilon(T)>0$ to be a positive real number such that for every point set $P\subseteq \mathbb{R}^2$, $G(P,T,\varepsilon(T))$ is $\mathcal{F}$-free.
In our definition of forbidden $3$-graphs we restrict the size to be at most $12$. The reason we choose the number $12$ is that the largest forbidden subgraph we need for our proof has size $12$ and we try to keep the family $\mathcal{F}$ to be small.
We will prove Theorem~\ref{pointsinplanemainasymp}, Theorem~\ref{pointinplanemainrekursion} and Corollary~\ref{pointsinplanecorol} for all triangles $T\in \mathcal{T}_\mathcal{F}$. Note that by the definition of $\mathcal{F}$, almost all triangles are in $\mathcal{T}_\mathcal{F}$. B\'ar\'any and F\"uredi~\cite{MR3953886} determined the following hypergraphs to be members of $\mathcal{F}$.
\begin{lemma}[B\'ar\'any and F\"uredi~\cite{MR3953886}, see Lemma 11.2]
\label{pointsinplaneforbiden1}
The following hypergraphs are members of $\mathcal{F}$.\\
\begin{minipage}[t]{.4\textwidth}
\begin{itemize}
\item $K_4^-=\{123,124,134\}$
\item $C_5^-=\{123,124,135,245\}$
\item $C_5^+=\{126,236,346,456,516\}$
\item $L_2=\{123,124,125,136,456\}$
\item $L_3=\{123,124,135,256,346\}$
\end{itemize}
\end{minipage}
\begin{minipage}[t]{.5\textwidth}
\begin{itemize}[leftmargin=*]
\item $L_4=\{123,124,156,256,345\}$
\item $L_5=\{123,124,135,146,356\}$
\item $L_6=\{123,124,145,346,356\}$
\item $P_7^-=\{123,145,167,246,257,347\}.$
\end{itemize}
\end{minipage}
\end{lemma}
\noindent
For the non-computer assisted part our proof, we will need to extend this list. For the computer assisted part, we excluded additional graphs on $7$ and $8$ vertices.
\begin{lemma}
\label{pointsinplaneforbiden2}
The following hypergraphs are members of $\mathcal{F}$.
\begin{itemize}
\item $L_7=\{123,124,125,136,137,458,678\}$
\item $L_8=\{123,124,125,136,137,468,579,289\}$
\item $L_9=\{123,124,125,136,237,469,578,189\}$
\item $L_{10}=\{123, 124, 125, 126, 137, 138, 239, 58a, 47b, 69c, abc\}.$
\end{itemize}
\end{lemma}
Note that this is not the complete list.
To verify that those hypergraphs are forbidden, we will we use the same method as B\'ar\'any and F\"uredi~\cite{MR3953886} used to show that the hypergraphs from Lemma~\ref{pointsinplaneforbiden1} are forbidden. For sake of completeness, we repeat their argument here.
\begin{proof}
We call a $3$-graph $H$ on $r$ vertices $\emph{dense}$ if there exists a vertex ordering $v_1,v_2,\ldots, v_{r}$ such that for every $3\leq i \leq r-1 $ there exists exactly one edge $e_i\in E(H[\{v_1,\ldots,v_i\}])$ containing $v_i$, and there exists exactly two edges $e_{r}, e_{r}'$ containing $v_{r}$. Note that $L_7,L_8,L_9$ and $L_{10}$ are dense.
For convenience, we will work with a different representation of triangles shapes. A triangle shape $T\in S$ is characterized by a complex number $z \in \mathbb{C} \setminus \mathbb{R}$ such that the triangle with vertices $0,1,z$ is similar to $T$. Note that there are at most twelve complex numbers $w$ such that the triangle $\{0, 1, w\}$ is similar to $T$.
Let $H$ be a dense hypergraph on $r$ vertices with vertex ordering $v_1,\ldots, v_r$ and let $P=\{p_1,\ldots, p_r\}$ $\subseteq \mathbb{R}^2$ be a point set such that $G(P,T,\varepsilon)$ contains $H$ (with $p_i$ corresponding to $v_i$), where $\varepsilon$ is small enough such that the following argument holds.
Let $\delta>0$ be sufficiently small.
Without loss of generality, we can assume that $p_1=(0,0)$ and $p_2=(1,0)$. Now, since $H$ is dense, $v_1v_2v_3\in E(H)$ and therefore $p_1p_2p_3$ forms a triangle $\varepsilon$-similar to $T$. Therefore, there exists at most $12$ points (which are functions in $z$) such that $p_3$ is in a $\delta$-neighborhood of one of them. Since, $v_4$ is contained in some edge with vertices from $\{v_1,v_2,v_3,v_4\}$, there are at most $12 \cdot 12=144$ points (which are functions in $z$) such that $q_4$ is in a $\delta$-neighborhood of one of them. Continuing this argument, we find functions $f_{i,j}(z)$ in $z$ where $3\leq i \leq r-1$ and $j \leq 12^{r-3}$ such that
\begin{align*}(p_3, p_4,\ldots, p_{r})\in U_\delta(f_{3,j}(z)) \times U_\delta(f_{4,j}(z)) \times \ldots \times U_\delta(f_{r-1,j}(z))
\end{align*}
for some $j \leq 12^{r-3}$.
Since $H$ is dense, $v_{r}$ is contained in exactly two edges $e_{r}$ and $e_{r}'$. For each $j \leq 12^{r-3}$, because $v_k\in e_{r}$, there exists at most $12$ points $f_{r,j,\ell}(z)$ where $\ell \leq 12$ such that
\begin{align*}
p_k \in U_\delta \left(f_{r,j,\ell}(z)\right).
\end{align*}
Similarly, because $v_k\in e_{r}'$, there exists at most $12$ points $g_{r,j,\ell}(z)$ where $\ell' \leq 12$ such that
\begin{align*}
p_k \in U_\delta \left(g_{r,j,\ell'}(z)\right).
\end{align*}
Thus,
\begin{align}
\label{pointsinplanebigset}
p_k \in \bigcup_{\ell,\ell'\leq 12} U_\delta \left(f_{r,j,\ell}(z)\right) \cap U_\delta \left(g_{r,j,\ell'}(z)\right).
\end{align}
Note that if there exists a $z$ such that for each $1\leq j\leq 12^{r-3}$ none of the equations
\begin{align}
\label{pointsinplaneequations}
f_{r,j,\ell}(z)=g_{r,j,\ell'}(z), \quad \quad 1\leq \ell,\ell' \leq 12
\end{align}
hold, then we can choose $\varepsilon>0$ such that
\begin{align}
\label{pointsinplanedelta}
\delta< \frac{1}{3} \max_{\ell,\ell'}|f_{r,j,\ell}(z)-g_{r,j,\ell'}(z)|,
\end{align}
and therefore the set in \eqref{pointsinplanebigset} is empty, contradicting that $G(P,T,\varepsilon)$ contains a copy of $H$.
Note that, because of \eqref{pointsinplanedelta}, $\varepsilon$ depends on $z$ and therefore on $T$.
If we could find one $z\in \mathbb{C}$ not satisfying any of the equations in \eqref{pointsinplaneequations}, then each of the equations is non-trivial (the solution space is not $\mathbb{C}$). Thus, for each equation the solution set has Lebesque measure 0. Since there are only at most $12^{r-2}$ equations, the union of the solution sets still has measure $0$. Thus, we can conclude that for almost all triangles $T$ there exists $\varepsilon$ such that $G(P,T,\varepsilon)$ is $H$-free for every point set $P$. It remains to show that for $H\in \{L_7,L_8,L_9,L_{10}\}$ there exists $z\in \mathbb{C}$ not satisfying any of the equations in \eqref{pointsinplaneequations}. We will show this for a $z$ corresponding to the equilateral triangle ($z= \frac{1}{2}+i \cdot \frac{\sqrt{3}}{2}$). For $T$ being the equilateral triangle, there are at most $2^{r-2}$ equations to check. Because of the large amount of cases, we will use a computer to verify it.
Our computer program is a simple brute force recursive approach. It starts by embedding $p_1=(0,0)$ and $p_2=(1,0)$. For each subsequent $3 \leq i \leq r$ it tries both options for embedding $p_i$ dictated by $e_i$. Finally, it checks if the points forming $e'_{r}$ form an equilateral triangle.
If in none of the $2^{r-2}$ generated point configurations the points of $e'_{r}$ form an equilateral triangle, then $H$ is a member of $\mathcal{F}$.
An implementation of this algorithm in python is available at \url{http://lidicky.name/pub/triangle}.
This completes the proof of Lemma~\ref{pointsinplaneforbiden2}.
\end{proof}
Instead of Theorem~\ref{pointsinplanemainasymp} we will actually prove the following stronger result.
\begin{theo}
\label{pointinplaneturanmainrekursion}
We have
\begin{align*}\textup{ex}(n,\mathcal{F})= 0.25\binom{n}{3}(1+o(1)).
\end{align*}
\end{theo}
First, we observe that Theorem~\ref{pointinplaneturanmainrekursion} implies Theorem~\ref{pointsinplanemainasymp}. Let $P\subseteq \mathbb{R}^2$ be a point set of size $n$ and let $T\in \mathcal{T}_{\mathcal{F}}$. Then, $G(P,T,\varepsilon(T))$ is $\mathcal{F}$-free. Now, the number of $\varepsilon$-similar triangles $T$ equals the number of edges in $G(P,T,\varepsilon(T))$. Since $G(P,T,\varepsilon(T))$ is $\mathcal{F}$-free, we have
\begin{align*}
h(n,T,\varepsilon)\leq \textup{ex}(n,\mathcal{F}).
\end{align*}
Therefore, Theorem~\ref{pointinplaneturanmainrekursion} implies Theorem~\ref{pointsinplanemainasymp}.
\subsection{A structural result via Flag Algebras}
It is a standard application of flag algebras to determine an upper bound for $\textup{ex}(n,\mathcal{G})$ given a family $\mathcal{G}$ of 3-uniform hypergraphs. Running the method of flag algebras on $7$ vertices, B\'ar\'any and F\"uredi~\cite{MR3953886} obtained
\begin{align}
\label{pointsinplaneturanupper}
\textup{ex}(n,\mathcal{F})\leq \textup{ex}(n,\{K_4^-,C_5^-,C_5^+,L_2,L_3,L_4,L_5,L_6,P_7^-\})\leq 0.25072 \binom{n}{3}(1+o(1)).
\end{align}
It is conjectured in~\cite{RavryTuran} that $\textup{ex}(n,\{K_4^-,C_5\}) = 0.25 \binom{n}{3}(1+o(1))$.
We note that when running flag algebras on $8$ vertices and forbidding more $3$-graphs in $\mathcal{F}$, then we can obtain the following improved bound.
\begin{align}
\textup{ex}(n,\mathcal{F}))\leq 0.2502 \binom{n}{3}(1+o(1)).
\label{ourbound}
\end{align}
Note that Conjecture~\ref{conj:frv} is a significant strengthening of \eqref{pointsinplaneturanupper} and \eqref{ourbound}.
We use flag algebras to prove a stability result. For an excellent explanation of flag algebras in the setting of $3$-graphs see~\cite{RavryTuran}. Here, we will focus on the formulation of the problem rather than providing a formal explanation of the general method.
As a consequence, we obtain the following lemma, which gives the first rough structure of extremal constructions.
This approach was developed in~\cite{MR3425964} and~\cite{MR3667664}.
\begin{lemma}
\label{flaginductive}
Let $n\in \mathbb{N}$ be sufficiently large and let $G$ be an $\mathcal{F}$-free $3$-graph on $n$ vertices and $|E(G)|\geq 1/24 n^3(1+o(1))$ edges. Then there exists an edge $x_1x_2x_3\in E(G)$ such that for $n$ large enough
\begin{enumerate}
\item[\textup{(i)}] the neighborhoods $N(x_1,x_2),N(x_2,x_3)$, and $N(x_1,x_3)$ are pairwise disjoint.
\item[\textup{(ii)}] $\min\{|N(x_1,x_2)|, |N(x_2,x_3)|, |N(x_1,x_3)|\} \geq 0.26n.$
\item[\textup{(iii)}]
$n - |N(x_1,x_2)|-|N(x_2,x_3)|-|N(x_1,x_3)| \leq 0.012n.$
\end{enumerate}
\end{lemma}
\begin{proof}
Denote $T_{i,j,k}$ the family of $3$-graphs that are obtained from a complete $3$-partite $3$-graph with part sizes $i$, $j$ and $k$ by adding $\mathcal{F}$-free $3$-graphs in each of the three parts. Let $X$ be a subgraph of $G$ isomorphic to $T_{2,2,1}$ on vertices $x_1,x_1',x_2,x_2',x_3$ with edges
$
x_1x_2x_3, x_1x_2'x_3, x_1'x_2x_3, x_1'x_2'x_3
$. Further, define
\begin{align*}
A_1&:=N(x_2,x_3) \cap N(x_2',x_3), &
A_3&:=N(x_1,x_2) \cap N(x_1',x_2) \cap N(x_1,x_2') \cap N(x_1',x_2'), \\
A_2&:=N(x_1,x_3) \cap N(x_1',x_3),
&
J&:= V(G) \setminus \left( A_1 \cup A_2 \cup A_3 \right).
\end{align*}
Let $a_i := |A_i|/n$ for $1 \leq i \leq 3$. Note that $V(G)=A_1 \cup A_2 \cup A_3 \cup J$ is a partition, because the sets $N(x_1,x_2),N(x_1,x_3)$ and $N(x_2,x_3)$ are pairwise disjoint. Indeed, without loss of generality, assume $N(x_1,x_2) \cap N(x_1,x_3)\neq \emptyset$. Let $v\in N(x_1,x_2) \cap N(x_1,x_3)$. Then $v,x_1,x_2,x_3$ spans at least $3$ edges and therefore $G$ contains a copy of $K_4^-$, a contradiction. We choose $X$ such that
\tikzset{vtx/.style={inner sep=1.1pt, outer sep=0pt, circle, fill,draw}}
\tikzset{lab/.style={inner sep=1.5pt, outer sep=0pt, draw}}
\begin{align}
a_1a_2+a_1a_3+a_2a_3 - \frac{1}{4}\left(a_1^2 + a_2^2 + a_3^2 \right) \label{eq:ai}
\end{align}
is maximized.
Flag algebras can be used to give a lower bound on the expected value of \eqref{eq:ai} for $X$ chosen uniformly at random and therefore also a lower bound on \eqref{eq:ai} when $X$ is chosen to maximize \eqref{eq:ai}.
Let $Z$ be a fixed \emph{labeled} subgraph of $G$ belonging to $T_{i',j',k'}$.
Denote by $T_{i,j,k}(Z)$ the family of subgraphs of $G$ that contain $Z$, belong to $T_{i,j,k}$, where $i' \leq i$, $j' \leq j$, and $k' \leq k$, and the natural three parts of $Z$ are mapped to the same 3 parts in $T_{i,j,k}(Z)$. The normalized number of $T_{i,j,k}(Z)$ is
\[
t_{i,j,k}(Z) :=\frac{|T_{i,j,k}(Z)|}{ \binom{n-|V(Z)|}{i+j+k-|V(Z)|}}.
\]
The subgraphs of $G$ isomorphic to $T_{i,j,k}$ are denoted by $T_{i,j,k}(\emptyset)$.
The normalized number is
\[
t_{i,j,k} := \frac{|T_{i,j,k}(\emptyset)|}{\binom{n}{i+j+k}}.
\]
Notice that $a_1 = t_{3,2,1}(X) + o(1)$,
$2a_1a_2 = t_{3,3,1}(X) + o(1)$, and $a_1^2 = t_{4,3,1}(X)+o(1)$.
We start with \eqref{eq:ai} and obtain the following.
\begingroup
\allowdisplaybreaks
\begin{align*}
&~ \left(a_1a_2+a_1a_3+a_2a_3 - \frac{1}{4}\left(a_1^2 + a_2^2 + a_3^2 \right) \right) n^2\\
=&~ \left(2a_1a_2+2a_1a_3+2a_2a_3 - \frac{1}{2}\left(a_1^2 + a_2^2 + a_3^2 \right) \right) \binom{n-5}{2} + o(n^2)\\
=&~
\left(
t_{3,3,1}(X) + t_{3,2,2}(X) + t_{2,3,2}(X)
- \frac{1}{2} \left(
t_{4,2,1}(X) + t_{2,4,1}(X) + t_{2,2,3}(X)
\right)
\right) \binom{n-5}{2}\\ &+o(n^2) \\
\geq&~
\frac{1}{t_{2,2,1} \binom{n}{5}}
\Bigg(
\sum_{Y \in T_{2,2,1}(\emptyset)}
(
t_{3,3,1}(Y) + t_{3,2,2}(Y) + t_{2,3,2}(Y)
\\ &- \frac{1}{2} \left(
t_{4,2,1}(Y) + t_{2,4,1}(Y) + t_{2,2,3}(Y)
\right)
\Bigg) \binom{n-5}{2}
+o(n^2) \\
\geq&~
\frac{1}{t_{2,2,1} \binom{n}{5}}
\left(
9\, t_{3,3,1} + 12\, t_{3,2,2}
- \frac{1}{2} \left(
6\, t_{4,2,1} + 3\, t_{2,2,3}
\right)
\right)
\binom{n}{7}+o(n^2)\\
=&~
\frac{1}{7\,t_{2,2,1} }
\left(
3\, t_{3,3,1} + 3.5\, t_{3,2,2}
- \, t_{4,2,1}
\right)
\binom{n-5}{2}+o(n^2).
\end{align*}
\endgroup
\begin{claim}
\label{pointsinplaneflagclaim}
Using flag algebras, we get that
if $\, t_{1,1,1} \geq 0.25$ then
\[
\frac{1}{7\,t_{2,2,1} }
\left(
3\, t_{3,3,1} + 3.5\, t_{3,2,2}
- \, t_{4,2,1}
\right)
\geq \frac{1.2814228}{ 7 \cdot 0.37502377}> 0.48813.
\]
\end{claim}
The calculations for Claim~\ref{pointsinplaneflagclaim} are computer assisted; we use CSDP~\cite{csdp} to calculate numerical solutions of semidefinite programs. The data files and programs for the calculations are available at \url{http://lidicky.name/pub/triangle}. Claim~\ref{pointsinplaneflagclaim} gives a lower bound on \eqref{eq:ai} as follows
\begin{align}
a_1a_2+a_1a_3+a_2a_3 - \frac{1}{4}\left(a_1^2 + a_2^2 + a_3^2 \right) \geq \frac{1.2814228}{14 \cdot 0.37502377} > 0.24406. \label{eq:a}
\end{align}
Notice that if $a_1=a_2=a_3=\frac{1}{3}$, then \eqref{eq:ai}, which is the left hand side of \eqref{eq:a}, is $0.25$. The conclusions (ii) and (iii) of Lemma~\ref{flaginductive}
can be obtained from \eqref{eq:a}. Indeed, assume $a_1< 0.26$. Then,
\begin{align*}
&a_1a_2+a_1a_3+a_2a_3 - \frac{1}{4}\left(a_1^2 + a_2^2 + a_3^2 \right) \\
&\leq a_1 (
1-a_1) + \left(\frac{1-a_1}{2}\right)^2- \frac{1}{4}\left(a_1^2 + 2\left(\frac{1-a_1}{2}\right)^2 \right) \\
&= -\frac{9}{8}a_1^2+\frac{3}{4}a_1+\frac{1}{8}< -\frac{9}{8}0.26^2+\frac{3}{4}0.26+\frac{1}{8}=0.24325,
\end{align*}
contradicting \eqref{eq:a}. Thus, we have $a_1\geq 0.26$, concluding (ii). Next, assume $a_1+a_2+a_3 \leq 0.988$. Then,
\begin{align*}
&a_1a_2+a_1a_3+a_2a_3 - \frac{1}{4}\left(a_1^2 + a_2^2 + a_3^2 \right) \\
&\leq a_1 (
0.988-a_1) + \left(\frac{0.988-a_1}{2}\right)^2- \frac{1}{4}\left(a_1^2 + 2\left(\frac{0.988-a_1}{2}\right)^2 \right) \\
&= -\frac{9}{8}a_1^2+0.741a_1+0.122018 \leq \frac{61009}{250000} < 0.244037,
\end{align*}
where in the last step we used that the maximum is obtained at $a_1=247/750$. This contradicts \eqref{eq:a}. Thus, we have $a_1+a_2+a_3 \geq 0.988$, concluding (iii).
\end{proof}
In the proof of Lemma~\ref{flaginductive}, we chose a suitable copy of $T_{2,2,1}$ to find the initial 3-partition. One could do the same approach by starting with base $T_{1,1,1}$ instead. However, the resulting bounds would be weaker and not sufficient for the rest of the proof.
This is caused by obtaining a lower bound on \eqref{eq:ai} by taking a random base.
\section{Proof of Theorem~\ref{pointinplaneasymptotic}}
\label{pointsinplanemainsec}
In this section, we will strengthen our flag algebra result Lemma~\ref{flaginductive} by applying cleaning techniques.
\subsection{The top layer}
\begin{lemma}
\label{pointsinplanepartition}
Let $G$ be an $\mathcal{F}$-free $3$-graph on $n$ vertices and $|E(G)|\geq 1/24 n^3(1+o(1))$, satisfying $|L(w)|\geq \frac{1}{8}n^2(1+o(1))$ for every $w\in V(G)$. Then there exists an edge $x_1x_2x_3\in E(G)$ such that for
\begin{gather*}
A_1:=N(x_2,x_3), \ \ A_2:=N(x_1,x_3), \ \ A_3:=N(x_1,x_2), \ \ J:=V(G)\setminus (A_1\cup A_2 \cup A_3),\\ A_1':=A_1\setminus\{x_1\}, \ \ A_2':=A_2\setminus\{x_2\}, \ \ \text{and} \ \ A_3':=A_3\setminus\{x_3\}
\end{gather*}
we have for $n$ sufficiently large
\begin{itemize}
\item[(a)] $ 0.26n\leq |A_i|\leq 0.48n$ for $i\in[3]$.
\item[(b)] $|J|\leq 0.012n$.
\item[(c)] No triple $abc$ with $a,b\in A_i'$ and $c\in A_{j}'$ for some $i,j\in[3],i\neq j$ forms an edge.
\item[(d)] For $v\in V(G)\setminus\{x_1,x_2,x_3\},\ w_1,w_2\in A_i'$, $u_1,u_2\in A_j'$ with $i,j\in[3]$ and $i\neq j$ we have $vw_1w_2\not\in E(G)$ or $vu_1u_2\not\in E(G)$.
\item[(e)] For every $v\in V(G)\setminus\{x_1,x_2,x_3\}$, there exists $i\in[3]$ such that $|L_{A_j,A_k}(v)|\geq 0.001n^2$, where $j,k\in[3], j\neq k, j\neq i, k\neq i$.
\end{itemize}
\end{lemma}
\begin{proof}
Apply Lemma~\ref{flaginductive} and get an edge $x_1x_2x_3$ with the properties from Lemma~\ref{flaginductive}. The sets $A_1,A_2,A_3$ are pairwise disjoint.
\begin{claim}\label{pointsinplaneclaim32}
Properties (a)--(c) holds.
\end{claim}
\begin{proof}
Note that $(a)$ and $(b)$ hold by Lemma~\ref{flaginductive}. To prove $(c)$, assume that there exists $abc\in E(G)$ with $a,b\in A_i'$ and $c\in A_{j}'$ for some $i,j\in[3],i\neq j$. Let $k\in [3], k\neq i, k\neq j$. See Figure~\ref{fig:pointsinplaneclaim32} for an illustration. Now,
\begin{align*}
x_ix_jx_k,abc,x_jx_ka,x_jx_kb,cx_ix_k\in E(G).
\end{align*}
\tikzset{vtx/.style={inner sep=1.7pt, outer sep=0pt, circle, fill,draw}}
\begin{figure}
\caption{Situation in Claim~\ref{pointsinplaneclaim32}
\label{fig:pointsinplaneclaim32}
\caption{Situation in Claim~\ref{pointsinplaneclaimd}
\label{fig:pointsinplaneclaim33}
\end{figure}
Therefore $G$ contains a copy of $L_2$ on $\{x_1,x_2,x_3,a,b,c\}$, a contradiction.
\end{proof}
\begin{claim}
\label{pointsinplaneclaimd}
Property (d) holds.
\end{claim}
\begin{proof}
Towards contradiction, assume that there exists $v\in V(G)\setminus\{x_1,x_2,x_3\}, w_1,w_2\in A_i'$, $u_1,u_2\in A_j'$ for $i,j\in[3]$ with $i\neq j$ such that $vw_1w_2\in E(G)$ and $vu_1u_2\in E(G)$. Let $k\in [3], k\neq i, k\neq j$. See Figure~\ref{fig:pointsinplaneclaim33} for an illustration. Now, $\{x_1,x_2,x_3,v,u_1,u_2,w_1,w_2\}$ spans a copy of $L_7$ because
\begin{align*}x_ix_jx_k,vw_1w_2,vu_1u_2, x_jx_kw_1, x_jx_kw_2, x_ix_ku_1, x_ix_ku_2\in E(G).
\end{align*}
However, $L_7\in \mathcal{F}$ by Lemma~\ref{pointsinplaneforbiden2}, contradicting that $G$ is $\mathcal{F}$-free.
\end{proof}
\begin{claim}
Property (e) holds.
\end{claim}
\begin{proof}
Let $v\in V(G)\setminus\{x_1,x_2,x_3\}$. Towards contradiction, assume
\begin{align*}
|L_{A_{1},A_{2}}(v)|< 0.001n^2 \quad \text{and} \quad |L_{A_{1},A_{3}}(v)|< 0.001n^2 \quad \text{and} \quad |L_{A_{2},A_{3}}(v)|< 0.001n^2.
\end{align*}
By property $(d)$, there exists $i\in[3]$ such that $|L_{A_j'}(v)|=|L_{A_k'}(v)|=0$ for $j,k\in[3]\setminus\{i\}$ with $j\neq k$.
Note, that $|L_{A_i}(v)|\leq |A_i|^2/4$, since $L_{A_i}(v)$ is triangle-free, because otherwise there was a copy of $K_4^-$ in $G$. We have
\begin{align*}
|L(v)|&\leq |J|\cdot n + |L_{A_{1},A_{2}}(v)| +|L_{A_{2},A_{3}}(v)| +|L_{A_{1},A_{3}}(v)| \\
&+ |L_{A_1}(v)|+|L_{A_2}(v)|+|L_{A_3}(v)|\\
&\leq |J| \cdot n +0.003n^2+\frac{|A_i|^2}{4}+2n
\leq 0.012n^2+0.003n^2+ 0.06n^2+2n\\
&< 0.08n^2(1+o(1)),
\end{align*}
contradicting the assumption $|L(v)| \geq \frac{1}{8}n^2(1+o(1))$. Note that we used $|A_i|\leq 0.48n$ and $|J|\leq 0.012n$ from properties $(a)$ and $(b)$.
\end{proof}
This completes the proof of Lemma~\ref{pointsinplanepartition}.
\end{proof}
\begin{lemma}
\label{Fextrpartition}
Let $n\in \mathbb{N}$ be sufficiently large and let $G$ be an $\mathcal{F}$-free $3$-graph on $n$ vertices and $|E(G)|\geq 1/24 n^3(1+o(1))$, satisfying $|L(w)|\geq\frac{1}{8}n^2(1+o(1))$ for every $w\in V(G)$. Then there exists a vertex partition $V(G)=X_1 \cup X_2 \cup X_3$ with $|X_i|\geq 0.26n$ for $i\in[3]$ such that no triple $abc$ with $a,b\in X_i$ and $c\in X_{j}$ for some $i,j\in[3]$ with $i\neq j$ forms an edge.
\end{lemma}
\begin{proof}
Let $x_1x_2x_3\in E(G)$ be an edge with the properties from Lemma~\ref{pointsinplanepartition}. By property (e) we can partition $J=J_1 \cup J_2 \cup J_3$ such that for every $v\in J_i$
we have $|L_{A_j,A_k}(v)|\geq 0.001n^2$, where $j,k\in[3], j\neq k, j\neq i, k\neq i$.
Set $X_i:=A_i\cup J_i$. Note that by properties (c) and (e) for every $v\in X_i\setminus\{x_i\}$ we have $|L_{A_j,A_k}(v)|\geq 0.001n^2$, where $j,k\in[3], j\neq k, j\neq i, k\neq i$.
Further, by property (a) and definition of $X_i$ we have $|X_i|\geq 0.26n$ for $n$ large enough.
Towards contradiction, assume that there exists $a,b\in X_1$ and $c\in X_2$ with $abc\in E(G)$.
For each $a,b,c$ we find their neighbors in $A_1\cup A_2\cup A_3$ that put them to $J_1$ and $J_2$. These neighbors are in $A_1\cup A_2\cup A_3$ because they were adjacent to some of $x_1,x_2,x_3$. This will eventually form one of the forbidden subgraphs.
We will distinguish cases depending on how $a,b,c$ coincide with $x_1,x_2,x_3$.
\tikzset{vtx/.style={inner sep=1.7pt, outer sep=0pt, circle, fill,draw}}
\begin{figure}
\caption{Case 1.}
\label{fig:pointsinplanecase1}
\caption{Case 2.}
\label{fig:pointsinplanecase2}
\caption{Case 4.}
\label{fig:pointsinplanecase4}
\end{figure}
\noindent
\textbf{Case 1:} $a,b\neq x_1$ and $c\neq x_2$.
\noindent
Since
\begin{align*}
|L_{A_2,A_3}(a)|\geq 0.001n^2, \quad |L_{A_2,A_3}(b)|\geq 0.001n^2 \quad \text{and} \quad |L_{A_a,A_3}(c)|\geq 0.001n^2,
\end{align*}
there exists distinct vertices $a_3,b_3,c_3\in A_3, a_2,b_2\in A_2\setminus\{c\}$ and $c_1\in A_1\setminus\{a,b\}$ such that $aa_2a_3,bb_2b_3,cc_1c_3\in E(G)$. See Figure~\ref{fig:pointsinplanecase1} for an illustration. We have
\begin{align*}
x_1x_2x_3, abc, aa_2a_3,bb_2b_3,cc_1c_3, c_1x_2x_3,a_2x_1x_3,b_2x_1x_3, c_3x_1x_2,b_3x_1x_2,a_3x_1x_2\in E(G),
\end{align*}
and therefore the vertices $\{x_1,x_2,x_3,a,b,c,c_1,a_2,b_2,a_3,b_3,c_3\}$ span a copy of $L_{10}$, a contradiction.
\noindent
\textbf{Case 2:} $a=x_1$ and $c\neq x_2$.
\noindent
By property $(d)$, there exists distinct vertices $b_3,c_3\in A_3, b_2\in A_2\setminus\{c\}$ and $c_1\in A_1\setminus\{a,b\}$ such that $bb_2b_3,cc_1c_3\in E(G)$. See Figure~\ref{fig:pointsinplanecase2} for an illustration. We have
\begin{align*}
x_1x_2x_3,x_1bc,bb_2b_3,cc_1c_3, c_1x_2x_3,b_2x_1x_3, c_3x_1x_2,b_3x_1x_2\in E(G),
\end{align*}
and therefore the vertices $\{x_1,x_2,x_3,b,c,c_1,b_2,b_3,c_3\}$ span a copy of $L_{9}$, a contradiction.
\noindent
\textbf{Case 3:} $b=x_1$ and $c\neq x_2$.
\noindent
This case is similar to Case 2.
\noindent
\textbf{Case 4:} $a,b\neq x_1$ and $c=x_2$.
\noindent
By property $(d)$, there exists distinct vertices $a_3,b_3\in A_3, a_2,b_2\in A_2\setminus \{c\}$ such that $aa_2a_3,bb_2b_3\in E(G)$. See Figure~\ref{fig:pointsinplanecase4} for an illustration. We have
\begin{align*}
x_1x_2x_3, abx_2,aa_2a_3,bb_2b_3,a_2x_1x_3,b_2x_1x_3,b_3x_1x_2,a_3x_1x_2\in E(G),
\end{align*}
and therefore the vertices $\{x_1,x_2,x_3,a,b,a_2,b_2,a_3,b_3\}$ span a copy of $L_{8}$, a contradiction.
\noindent
\textbf{Case 5:} $a=x_1$ and $c=x_2$.
\noindent
This means that $b\in N(x_1,x_2)=A_3$, contradicting $b\in X_1$.
\noindent
\textbf{Case 6:} $b=x_1$ and $c=x_2$.
\noindent
This case is similar to case 5.\\
We conclude that for $a,b\in X_1,c\in X_3$, we have $abc\not\in E(G)$. Similarly, for $a,b\in X_i,c\in X_j$ with $i\neq j$, we have $abc\not\in E(G)$.
\end{proof}
\subsection{The asymptotic result}
In this subsection we will prove Theorem~\ref{pointinplaneturanmainrekursion}. We first observe that an extremal $\mathcal{F}$-free $3$-graph satisfies a minimum degree condition.
\begin{lemma}
\label{pointsinplaneaddvertex}
Let $G$ be an $\mathcal{F}$-free $3$-graph and $v\in V(G)$. Denote $G_{u,v}$ the $3$-graph constructed from $G$ by adding a copy $w$ of $v$ and deleting $u$, i.e.
\begin{align*}
V(G_{u,v})=V(G)\cup \{w\} \setminus\{u\}, \quad E(G_{u,v})=E(G[V(G)\setminus \{u\}]) \cup \{wab \ | \ abv\in E(G) \}.
\end{align*}
Then, $G_{u,v}$ is also $\mathcal{F}$-free.
\end{lemma}
\begin{proof}
Towards contradiction assume that $G_{u,v}$ does contain a copy of some $F\in\mathcal{F}$. Since $G$ is $\mathcal{F}$-free, this copy $F'$ of $F$ contains the vertices $v$ and $w$. $F'-w$ is a subgraph of $G$ and thus $\mathcal{F}$-free, in particular $F' - w \notin \mathcal{F}$. Thus, there exists a set of triangles shape $\mathcal{T}$ of positive measure such that for $T\in \mathcal{T}$ and $\varepsilon>0$ there exists a point set $P=P(T,\varepsilon)\subseteq \mathbb{R}^2$ with $F' - w$ being isomorphic to $G(P,T,\varepsilon)$. Construct a new point set $P'$ from $P(T,\varepsilon)$ by adding a new point $p_w$ close enough to the point corresponding to $v$. This guarantees that $v$ and $p_w$ have the same linkgraph in $G(P',T,\varepsilon)$ and that there is no edge in $G(P',T,\varepsilon)$ containing both $p_w$ and $v$. Now, $G(P',T,\varepsilon)$ contains a copy of $F$, contradicting that $F\in\mathcal{F}$.
\end{proof}
\begin{lemma}
\label{pointsplanemindegree}
Let $G$ be an extremal $\mathcal{F}$-free $3$-graph on $n$ vertices. Then for every $w\in V(G)$, we have $|L(w)|\geq \frac{1}{8} n^2(1+o(1))$.
\end{lemma}
\begin{proof}
Assume that there exists $u\in V(G)$ with $|L(u)|<\frac{1}{8} n^2-n^{3/2}$ for $n$ sufficiently large. Let $v\in V(G)$ be a vertex maximizing $|L(v)|$. The $3$-graph $G_{u,v}$ is $\mathcal{F}$-free by Lemma~\ref{pointsinplaneaddvertex} and has more edges than $G$:
\begin{align*}
&|E(G_{u,v})|-|E(G)|\geq -|L(u)|+|L(v)|-d(v,u)\geq -\frac{1}{8} n^2+n^{3/2}+\frac{3|E(G)|}{n}-n \\
\geq& -\frac{1}{8} n^2+n^{3/2}+\frac{3|E(S(n))|}{n}-n \geq -\frac{1}{8} n^2+n^{3/2}+\frac{1}{8}n^3-O(n\log n) >0,
\end{align*}
for $n$ sufficiently large. This contradicts the extremality of $G$. Thus for every $w\in V(G)$, we have $|L(w)|\geq \frac{1}{8} n^2-n^{3/2}= \frac{1}{8} n^2(1+o(1))$.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{pointinplaneturanmainrekursion}]
For the lower bound, we have
\begin{align*}
\textup{ex}(n,\mathcal{F})\geq e(S(n))=\frac{1}{24}n^3 (1+o(1)).
\end{align*}
For the upper bound, let $n_0$ be large enough such that the following reasoning holds. For $n\geq n_0$, $\textup{ex}(n,\mathcal{F})\leq 0.251 \binom{n}{3}$ by \eqref{pointsinplaneturanupper}. We will prove by induction on $n$ that $\textup{ex}(n,\mathcal{F})\leq \frac{1}{24}n^3+n \cdot n_0^2$. This trivially holds for $n\leq n_0$, because
\begin{align*}
\textup{ex}(n,\mathcal{F})\leq \binom{n}{3}\leq \frac{1}{24}n^3+n \cdot n_0^2.
\end{align*}
For $n_0\leq n\leq 4n_0$, we have
\begin{align*}
\textup{ex}(n,\mathcal{F})\leq 0.251\binom{n}{3}\leq \frac{1}{24}n^3+0.001 \frac{n^3}{6}\leq \frac{1}{24}n^3+n \cdot n_0^2.
\end{align*}
Now, let $G$ be an extremal $\mathcal{F}$-free $3$-graph on $n\geq 4n_0$ vertices. By Lemma~\ref{pointsplanemindegree} we have $|L(w)|\geq \frac{1}{8}n^2(1+o(1))$ for every $w\in V(G)$. Therefore, the assumptions for Lemma~\ref{Fextrpartition} hold. Take a vertex partition $V(G)=X_1\cup X_2 \cup X_2$ with the properties from Lemma~\ref{Fextrpartition}. Now, for all $i\in[3]$, $|X_i|\geq 0.26 n\geq n_0$ and since $G[X_i]$ is $\mathcal{F}$-free, we have
\begin{align*}
e(G[X_i])\leq \frac{1}{24}|X_i|^3+|X_i| \cdot n_0^2
\end{align*}
by the induction assumption. We conclude
\begin{align*}
e(G)&\leq |X_1||X_2||X_3|+ \sum_{i=1}^3 e(G[X_i])\leq |X_1||X_2||X_3|+ n\cdot n_0^2+ \frac{1}{24}\sum_{i=1}^3 |X_i|^3\\
&\leq \frac{1}{24}n^3+n \cdot n_0^2,
\end{align*}
where in the last step we used that the function $g(x_1,x_2,x_3):=x_1x_2x_3+1/24 (x_1^3+x_2^3+x_3^3)$ with domain $\{(x_1,x_2,x_3) \in [0.26,0.48]^3 : x_1+x_2+x_3=1\}$ archives its maximum at $x_1=x_2=x_3=1/3$. This can be verified quickly using basic calculus or simply by using a computer, we omit the details.
\end{proof}
Analyzing the previous proof actually gives a stability result.
\begin{lemma}
\label{pointsinplanepartition2}
Let $G$ be an $\mathcal{F}$-free $3$-graph on $n$ vertices and $|E(G)|= 1/24 n^3(1+o(1))$, satisfying $|L(w)|\geq \frac{1}{8}n^2(1+o(1))$ for every $w\in V(G)$.
Then $G$ has a vertex partition $V(G)=X_1\cup X_2 \cup X_2$ such that
\begin{itemize}
\item $|X_i|=\frac{n}{3} (1+o(1))$ for every $i\in[3]$,
\item there is no edge $e=xyz$ with $x,y\in X_i$ and $z\notin X_i$ for $i\in[3]$.
\end{itemize}
\end{lemma}
\begin{proof}
Take a vertex partition $V(G)=X_1\cup X_2 \cup X_2$ from Lemma~\ref{Fextrpartition}. Since $G[X_i]$ is $\mathcal{F}$-free, we have by Theorem~\ref{pointinplaneturanmainrekursion} that $e(G[X_i])\leq \frac{1}{24}|X_i|^3 (1+o(1))$. Now, again
\begin{align*}
\frac{1}{24}n^3(1+o(1))&=e(G)\leq |X_1||X_2||X_3|+ \sum_{i=1}^3 e(G[X_i])\\
&\leq |X_1||X_2||X_3|+ \frac{1}{24}\sum_{i=1}^3 |X_i|^3(1+o(1).
\end{align*}
Again, since the polynomial $g$ with domain $\{(x_1,x_2,x_3) \in [0.26,0.48]^3 : x_1+x_2+x_3=1\}$ achieves its unique maximum at $x_1=x_2=x_3=1/3$, we get $|X_i|= (1/3+o(1))n$.
\end{proof}
\subsection{The exact result}
\begin{lemma}
\label{pointsinplanepointswap}
Let $T\in \mathcal{T}_\mathcal{F}$ and $P\subseteq \mathbb{R}^2$ be a point set. Denote $G=G(P,T,\varepsilon(T))$. For every $u,v\in V(G)$ there exists a point set $P'$ such that $G_{u,v}=G(P',T,\varepsilon(T))$.
\end{lemma}
\begin{proof}
Let $u,v\in V(G)$. Construct $P'$ from $P$ by removing the point corresponding to $u$ and adding a point close enough to the point corresponding to $v$. This point set satisfies $G_{u,v}=G(P',T,\varepsilon(T))$.
\end{proof}
\begin{lemma}
\label{pointsplanemindegree2}
Let $T\in \mathcal{T}_\mathcal{F}$ be a triangle shape and let $P\subseteq \mathbb{R}^2$ be an $n$-element point set maximizing the number of triangles being $\varepsilon(T)$-similar to $T$. Denote $G=G(P,T,\varepsilon(T))$. Then for every $w\in V(G)$, we have $|L(w)|\geq \frac{1}{8} n^2(1+o(1))$.
\end{lemma}
\begin{proof}
We have that $G$ is $\mathcal{F}$-free. Assume that there exists $u\in V(G)$ with
\begin{align*}
|L(u)|<\frac{1}{8}n^2-n^{3/2}.
\end{align*}
Let $v\in V(G)$ be a vertex maximizing $|L(v)|$. By Lemma~\ref{pointsinplanepointswap} there exists a point set $P'$ such that $G_{u,v}=G(P',T,\varepsilon(T))$. We have $|E(G_{u,v})|>|E(G)|$ by the same calculation as in the proof of Lemma~\ref{pointsplanemindegree}. This contradicts the maximality of $P$.
\end{proof}
Now, we will strengthen the previous stability result.
\begin{lemma}
Let $T\in \mathcal{T}_\mathcal{F}$. There exists $n_0$ such that for every $n\geq n_0$ the following holds. Let $P$ be an $n$-element point set maximizing the number of triangles being $\varepsilon(T)$-similar to $T$. Then, the $3$-graph $G=G(P,T,\varepsilon(T))$ has a vertex partition $V(G)=X_1\cup X_2 \cup X_2$ such that
\begin{enumerate}[label=(\roman*)]
\item there is no edge $e=xyz$ with $x,y\in X_i$ and $z\notin X_i$ for $i\in[3]$,
\item $xyz\in E(G)$ for $x\in X_1, y\in X_2, z\in X_3$,
\item $|X_i|-|X_j|\leq 1$ for all $i,j\in[3]$.
\end{enumerate}
\label{pointsinplanepartition3}
\end{lemma}
\begin{proof}
By Lemma~\ref{pointsplanemindegree2}, for every $w\in V(G)$, $|L(w)|\geq \frac{1}{8} n^2(1+o(1))$. Further, we have
\begin{align*}
e(G)\geq e(S(n))=\frac{1}{4}\binom{n}{3}(1+o(1)).
\end{align*}
Therefore, the assumptions from Lemma~\ref{pointsinplanepartition2} hold. Let $V(G)=X_1\cup X_2 \cup X_3$ be a partition having the properties from Lemma~\ref{pointsinplanepartition2}.
Towards contradiction, assume that there exists $x\in X_1, y\in X_2, z\in X_3$ with $xyz\notin E(G)$. For $i\in [3]$, let $P_i$ be the point set corresponding to the set $X_i$. We have,
\begin{align*}
e(G[X_i])=e(G(P_i,T,\varepsilon(T)).
\end{align*}
Construct a new point set $P'$ by taking a large enough triangle of shape $T$ and placing each of the point sets $P_i$ close to one of the three vertices of $T$. Using condition (i), this new point set $P'$ satisfies
\begin{align*}
e(G(P',T,\varepsilon(T)))&=|X_1||X_2||X_3|+ \sum_{i=1}^3 e(G(P_i,T,\varepsilon(T)) \\
&= |X_1||X_2||X_3|+ \sum_{i=1}^3 e(G[X_i]) > e(G),
\end{align*}
contradicting the maximality of $P$. Therefore, for all $x\in X_1, y\in X_2, z\in X_3$ we have $xyz\in E(G)$. By Theorem~\ref{pointinplaneturanmainrekursion}, we have
\begin{align*}
\frac{e(G[X_1])}{\binom{|X_1|}{3}}=\frac{1}{4}+o(1) \quad \text{ and } \quad \frac{e(G[X_2])}{\binom{|X_2|}{3}}=\frac{1}{4}+o(1).
\end{align*}
Next, towards contradiction, assume that without loss of generality $|X_1|\geq |X_2|+2$. Let $v_1\in X_1$ be minimizing $|L_{X_1}(v_1)|$ and let $v_2\in X_2$ be maximizing $|L_{X_2}(v_2)|$.
By the choice of $v_1$ and $v_2$,
\begin{align*}
|L_{X_1}(v_1)|\leq\frac{3e(G[X_1])}{|X_1|} \quad \text{and} \quad |L_{X_2}(v_2)|\geq\frac{3e(G[X_2])}{|X_2|}.
\end{align*}
The hypergraph $G_{v_1,v_2}$ is still $\mathcal{F}$-free by Lemma~\ref{pointsinplaneaddvertex} and has more edges than $G$:
\begingroup
\allowdisplaybreaks
\begin{align*}
&|E(G_{v_1,v_2})|-|E(G)|=|X_1||X_3|+|L_{X_2}(v_2)|-|L_{X_1}(v_1)|-|X_2||X_3|-|X_3|\\
\geq& \frac{3e(G[X_2])}{|X_2|}-\frac{3e(G[X_1])}{|X_1|}+|X_3|(|X_1|-|X_2|-1)\\
=& \frac{3|e(G[X_2])|X_1|-3e(G[X_1])|X_2|}{|X_1||X_2|}+|X_3|(|X_1|-|X_2|-1)\\
=& \left( \frac{1}{4}+o(1) \right)\frac{3\binom{|X_2|}{3}|X_1|-3\binom{|X_1|}{3}|X_2|}{|X_1||X_2|}+|X_3|(|X_1|-|X_2|-1)\\
\geq& \left( \frac{1}{8}+o(1) \right)\frac{|X_2|^3|X_1|-|X_1|^3|X_2|}{|X_1||X_2|}+|X_3|(|X_1|-|X_2|-1)\\
=& \left( \frac{1}{8}+o(1) \right)(|X_2|^2-|X_1|^2)+|X_3|(|X_1|-|X_2|-1)\\
=&(|X_1|-|X_2|)\left( |X_3|-(|X_1|+|X_2|)\left(\frac{1}{8}+o(1)\right) \right)-|X_3|\\
=&(|X_1|-|X_2|)\left( \frac{n}{4}+o(n) \right)-|X_3|
\geq n \left(\frac{1}{2}+o(1)\right)-\left(\frac{1}{3}+o(1)\right)n> 0.
\end{align*}
\endgroup
\end{proof}
\subsection{Proof of Theorem~\ref{pointinplanemainrekursion}}
Let $T\in\mathcal{T}_\mathcal{F}$ and $P$ be an $n$-element point set maximizing the number of triangles being $\varepsilon(T)$-similar to $T$. Denote $G=G(P,T,\varepsilon(T))$. By Lemma~\ref{pointsinplanepartition3}, the $3$-graph $G$ has a vertex partition $V(G)=X_1\cup X_2 \cup X_2$ such that $|X_i|-|X_j|\leq 1$ for all $i,j\in[3]$ and there is no edge $e=xyz$ with $xy\in X_i$ and $z\notin X_i$ for $i\in[3]$. Since the sets $X_1,X_2,X_3$ correspond to point sets of the same sizes, we have $e(G[X_i])\leq h(|X_i|,T,\varepsilon(T))$ for $i\in[3]$. Let $a=|X_1|,b=|X_2|$ and $c=|X_3|$. Now,
\begin{align*}
h(n,T,\varepsilon(T))&=e(G)\leq a \cdot b \cdot c + e(G[X_1])+e(G[X_2])+e(G[X_3])\\
&\leq a \cdot b \cdot c + h(a,T,\varepsilon(T))+h(b,T,\varepsilon(T))+h(c,T,\varepsilon(T)).
\end{align*}
It remains to show
\begin{align*}
h(n,T,\varepsilon(T))\geq
a \cdot b \cdot c + h(a,T,\varepsilon(T))+h(b,T,\varepsilon(T))+h(c,T,\varepsilon(T)).
\end{align*}
There exists point sets $P_a,P_b,P_c\subseteq \mathbb{R}^2$ of sizes $a,b,c$ respectively, such that
\begin{align*}
e(G(P_a,T,\varepsilon(T)))= h(a,T,\varepsilon(T)), \quad \quad e(G(P_b,T,\varepsilon(T)))= h(b,T,\varepsilon(T)) \\ \text{and} \quad \quad e(G(P_c,T,\varepsilon(T)))= h(c,T,\varepsilon(T)).
\end{align*}
Note that we can assume that $\text{diam}(P_a)=1$, $\text{diam}(P_b)=1$ and $\text{diam}(P_c)=1$, where $\text{diam}(Q)$ of a point set $Q$ is the largest distance between two points in the point set. By arranging the three point sets $P_a,P_b,P_c$ in shape of a large enough triangle $T$, we get a point set $P$ such that
\begin{align*}
h(n,T,\varepsilon(T))&\geq e(G(P,T,\varepsilon(T)))=a\cdot b \cdot c+ h(a,T,\varepsilon(T)) + h(b,T,\varepsilon(T)) + h(c,T,\varepsilon(T)),
\end{align*}
completing the proof of Theorem~\ref{pointinplanemainrekursion}.
\subsection{Proof of Corollary~\ref{pointsinplanecorol}}
Let $T$ be a triangle shape such that there exists $\varepsilon(T)$ that \eqref{pointsinplanerecformula} holds.
By Theorem~\ref{pointinplanemainrekursion}, \eqref{pointsinplanerecformula} holds for almost all triangles. Take a point set $P$ on $3^\ell\geq n_0$ points maximizing the number of triangles being $\varepsilon(T)$-similar to $T$. Denote $H=G(P,T,\varepsilon(T))$. Note that because of scaling invariance we can assume that $\text{diam}(P)$ is arbitrary small. By applying \eqref{pointsinplanerecformula} iteratively, we have
\begin{align}
\label{pointsinplaneextramli}
h(3^{\ell+i},T,\varepsilon(T))= 3^i \cdot e(H)+3^{3\ell} \frac{1}{24}\left(3^{3i}-3^i\right)
\end{align}
for all $i\geq 0$. \\
Now, towards contradiction, assume that there exists a point set $P'\subseteq \mathbb{R}^2$ of $3^k$ points such that the number of triangles similar to $\varepsilon(T)$ is more than $e(S(3^k))$. Let $G=G(P',T,\varepsilon(T)).$ Then,
\begin{align*}
e(G)>e(S(3^k))=\frac{1}{24}\left( 3^{3k}-3^k \right).
\end{align*}
Construct a point set $\bar{P}\subseteq \mathbb{R}^2$ of $3^{\ell+k}$ points by taking all points $p_G+p_H$, $p_G\in P',p_H\in P$ where addition is coordinate-wise. Let $\bar{G}:= G(\bar{P},T,\varepsilon(T))$. Since we can assume that $\text{diam}(P')$ is arbitrary small, $\bar{G}$ is the $3$-graph constructed from $G$ by replacing every vertex by a copy of $H$. Now,
\begin{align*}
e(\bar{G})= e(G) \cdot 3^{3\ell}+e(H) \cdot 3^k> 3^k \cdot e(H)+3^{3\ell} \frac{1}{24}\left(3^{3k}-3^k\right),
\end{align*}
contradicting \eqref{pointsinplaneextramli}. This completes the proof of Corollary~\ref{pointsinplanecorol}.
\section{Concluding remarks}
\label{pointsinplaneconcludingremarks}
When carefully reading the proof, one can observe that also the following Tur\'an type results hold. Recall that $\mathcal{F}$ is the set of forbidden $3$-graphs defined in Section~\ref{pointsinplaneforbidden}.
\begin{theo}
\label{pointsinplaneturanexact}
\begin{itemize}
The following statements holds.
\item[(a)] There exists $n_0$ such that for all $n\geq n_0$
\begin{align*}
\textup{ex}(n,\mathcal{F})= a\cdot b \cdot c+ \textup{ex}(a,\mathcal{F})+ \textup{ex}(b,\mathcal{F})+ \textup{ex}(c,\mathcal{F}),
\end{align*}
where $n=a+b+c$ and $a,b,c$ are as equal as possible.
\item[(b)] Let $n$ be a power of $3$. Then,
\begin{align*}
\textup{ex}(n,\mathcal{F})= \frac{1}{24}(n^3-n).
\end{align*}
\end{itemize}
\end{theo}
It would be interesting to prove the Tur\'an type results, Theorem~\ref{pointsinplanemainasymp} and Theorem~\ref{pointsinplaneturanexact}, for a smaller family of hypergraphs than $\mathcal{F}$. Potentially the following conjecture by Falgas-Ravry and Vaughan could be tackled in a similar way.
\begin{conj}[Falgas-Ravry and Vaughan~\cite{RavryTuran}]\label{conj:frv}
\begin{align*}
\textup{ex}(n,\{K_4^-,C_5\})=\frac{1}{4}\binom{n}{3}(1+o(1)).
\end{align*}
\end{conj}
Considering that for our proof it was particularly important that $K_4^-$ and $L_2 = \{123,124,125,136,456\}$ are forbidden, we conjecture that $S(n)$ has asymptotically the most edges among $\{K_4^-,L_2\}$-free $3$-graphs.
\begin{conj}
\begin{align*}
\textup{ex}(n,\{K_4^-,L_2\})=\frac{1}{4}\binom{n}{3}(1+o(1)).
\end{align*}
\end{conj}
Note that a standard application of flag algebras on 7 vertices shows
\begin{align*}
\textup{ex}(n,\{K_4^-,L_2\})\leq 0.25074\binom{n}{3}
\end{align*}
for $n$ sufficiently large.
Theorem~\ref{pointsinplanemainasymp} determines $h(n,T,\varepsilon)$ asymptotically for almost all triangles $T$ and $\varepsilon>0$ sufficiently small. It remains open to determine $h(n,T,\varepsilon)$ for some triangles $T\in S$. B\'ar\'any and F\"uredi~\cite{MR3953886} provided asymptotically better bounds stemming from recursive constructions for some of those triangles. Potentially a similar proof technique to ours could be used to determine $h(n,T,\varepsilon)$ for some of those triangle shapes.
Another interesting question is to change the space, and study point sets in $\mathbb{R}^3$ or even $\mathbb{R}^d$ instead of the plane. Given a triangle $T\in S$, $\varepsilon>0$, $d\geq2$ and $n\in \mathbb{N}$, denote
$g_d(n,T,\varepsilon)$ the maximum number of triangles in a set of $n$ points from $\mathbb{R}^d$ that are $\varepsilon$-similar to a triangle $T$. Being allowed to use one more dimension might help us to find constructions with more triangles being $\varepsilon$-similar to $T$.
For an acute triangle $T$ and $d=3$, we can group the $n$ points into four roughly equal sized groups and place each group very close to a vertex of a tetrahedron with each face being similar to $T$.
For a crafty reader, we are including a cutout that leads to a tetrahedron with all sides being the same triangle in Figure~\ref{fig:cutout} on the left.
Each group can again be split up in the same way. Keep doing this iteratively gives us
\begin{align*}
g_3(n,T,\varepsilon)\geq\frac{1}{15}n^3(1+o(1))
\end{align*}
for some $\varepsilon>0$.
Note that for almost all acute triangles $T$,
\[
g_2(n,T,\varepsilon) = h(n,T,\varepsilon)=\frac{1}{24}n^3(1+o(1)) < g_3(n,T,\varepsilon).
\]
\begin{figure}
\caption{A cutout of a tetrahedron using an acute triangle on the left. A cutout not giving a tetrahedron coming from an obtuse triangle on the right. Bend along the dashed lines.}
\label{fig:cutout}
\end{figure}
For $T$ being an equilateral triangle and $d\geq4$ we can find a better construction. There is a $d$-simplex with all faces forming equilateral triangles. Grouping the $n$ points into $d+1$ roughly equal sized groups and placing each group very close to the vertex of the $d$-simplex and then iterating this, gives us
\begin{align*}
g_d(n,T,\varepsilon)\geq \sum_{i\geq 1}\left(\frac{n}{(d+1)^i}\right)^3 \binom{d+1}{3}(d+1)^{i-1} \ (1+o(1))=\frac{1}{6}\frac{d-1}{d+2}n^3(1+o(1)).
\end{align*}
The following variation of the problem could also be interesting.
We say that two triangles are \emph{$\varepsilon$-isomorphic} if their side lengths are $a \leq b \leq c$ and
$a' \leq b' \leq c'$ and $|a-a'|,|b-b'|,|c-c'| < \varepsilon$.
Maximizing the number of $\varepsilon$-isomorphic triangles has the following upper bound.
Denote the side lengths of a triangle $T$ by $a$, $b$, and $c$. Now color edges of $K_n$ with colors $a$, $b$, and $c$ such that the number of rainbow triangles is maximized. Note that rainbow triangles would correspond to triangles isomorphic to $T$, if there exists an embedding of $K_n$ in some $R^d$ such that the distances correspond to the colors.
The problem of maximizing the number of rainbow triangles in a $3$-edge-colored $K_n$ is a problem of Erd\H{o}s and S\'{o}s (see~\cite{MR0337636}) that was solved by flag algebras~\cite{MR3667664}. The asymptotic construction is an iterated blow-up of a properly $3$-edge-colored $K_4$.
Properly $3$-edge-colored $K_4$ can be embedded as a tetrahedron in $\mathbb{R}^3$.
This gives $\frac{1}{16}n^3(1+o(1))$ $\varepsilon$-isomorphic triangles in $\mathbb{R}^3$.
This heuristics suggests that increasing the dimension beyond $3$ may allow us to embed slightly more $\varepsilon$-isomorphic triangles by making it possible to embed more of the iterated blow-up of $K_4$ construction.
The number of rainbow triangles
the iterated blow-up of a properly $3$-edge-colored $K_4$ is $\frac{1}{15}n^3(1+o(1))$ which is an upper bound on the number of $\varepsilon$-isomorphic triangles for any $d$.
In our construction maximizing the number of $\varepsilon$-similar triangles for $d=3$, the majority of triangles are actually $\varepsilon$-isomorphic. Already for $d=3$, we can embed $\frac{1}{15}n^3(1+o(1))$ $\varepsilon$-similar triangles, which is the upper bound on the number of $\varepsilon$-isomorphic triangles for any $d$. This suggests that increasing the dimension beyond $d=3$ may result in only very small increases on the number $\varepsilon$-isomorphic triangles or a very different construction is needed.
The above heuristic does not apply to isosceles triangles. Maximizing the number of $\varepsilon$-isomorphic triangles would correspond to a $2$-edge-coloring of $K_n$ and maximizing the number of induced path on $3$ vertices in one of the two colors.
The extremal construction is a balanced complete bipartite graph in one color. Increasing the dimension helps with embedding a bigger $2$-edge-coloring of $K_n$ and in turn obtaining larger number of $\varepsilon$-isomorphic triangles
with $\frac{1}{8}n^3(1+o(1))$ being the upper bound.
In general, the number obtuse triangles do not seem to benefit as much from higher dimensions. Embedding three $\varepsilon$-similar obtuse triangles on $4$ points is not possible for any $d$
for almost all obtuse triangles.
This contrasts with acute triangles, where $4$ points can give four $\varepsilon$-isomorphic triangles for dimension at least $3$.
The reader may try it
for $\varepsilon$-isomorphic triangles with cutouts in Figure~\ref{fig:cutout}.
We have not explored the above problems for obtuse triangles further.
\end{document} |
\begin{equation}gin{document}
\begin{equation}gin{abstract}
The fundamental problem in the study of parallel-server systems is that of finding and analyzing ``good'' routing policies of arriving jobs to the servers.
It is well known that, if full information regarding the workload process is available to a central dispatcher, then the {\em join the shortest workload} (JSW) policy,
which assigns jobs to the server with the least workload, is the optimal assignment policy, in that it maximizes server utilization, and thus minimizes sojourn times.
The {\em join the shortest queue} (JSQ) policy is an efficient dispatching policy when information is available only on the number of jobs with each of the servers,
but not on their service requirements.
If information on the state of the system is not available, other dispatching policies need to be employed, such as the power-of-$d$ routing policy,
in which each arriving job joins the shortest among $d \ge 1$ queues
sampled uniformly at random. (Under this latter policy, the system is known as {\em the supermarket model}.)
In this paper we study the stability question of parallel server systems assuming that routing errors occur,
so that arrivals may be routed to the ``wrong'' (not to the smallest) queue with a positive probability.
We show that, even if a ``non-idling'' dispatching policy is employed, under which new arrivals are always routed to an idle server, if any is available,
the performance of the system can be much worse than under the policy that chooses one of the servers uniformly at random.
More specifically, we prove that the usual traffic intensity $\rho < 1$ does not guarantee that the system is stable.
\end{abstract}
\title{Stability of Parallel Server Systems}
\section{Introduction} \label{secIntro}
We consider a parallel-server system with $s \ge 2$ statistically-homogeneous servers, each providing service at rate $\mu$,
that is fed by a rate-$\lambda$ Poisson arrival process of statistically identical jobs (or customers). For each server there is a dedicated infinite buffer in which jobs queue, waiting for their turn to be served.
Upon arrival, a job is routed to one of the $s$ servers according to some pre-specified dispatching (routing) rule, with no jockeying between the queues allowed.
In this setting, one seeks a ``good'' routing policy of jobs to the servers, e.g., a policy ensuring that steady state waiting times are minimized, or that the total throughput rate is maximized.
If the workload at each queue can be computed, then it is natural to employ the Join the Shortest Workload (JSW) routing policy, under which an arriving job is routed to
the server with the least workload among all $s$ servers (together with some tie-breaking rule). However, if the workload is unknown, as is often the case in practice,
one may opt to employ the Join-the-Shortest Queue (JSQ) control, which routes an arriving job to the server with the smallest number of jobs.
Indeed, JSW was shown to minimize the workload process in \cite{DaleyOptimal}, whereas
JSQ has been shown to be throughput maximizing in terms of stochastic order, when the service-time distribution has a non-decreasing failure rate \cite{weber78},
and in particular, when the service times are exponentially distributed \cite{Winston77}.
However, even the queue at each server is not always known: In some settings, the number of customers in each queue is estimated, either by the arriving customers who are free to choose
which queue to join (as in a supermarket or security lanes in airports), or by a central dispatcher (as is often the case in passport-checking stations, for example).
Even in automated settings the queue lengths may not be known. For example, information regarding the queues to each of the servers in web-server farms requires constant communication between the servers and the job dispatchers,
slowing down the response time, and is thus not always available; e.g., see \cite{Lu11}.
For this reason, other routing policies have been considered in the literature, most notably the ``power-of-$d$''
policy, which gives rise to the so-called ``supermarket model'' \cite{Mitzenmacher96}. Under this policy, upon each arrival
$d$ servers are chosen uniformly at random, and that arrival is routed to the server with the smallest number of jobs among the $d$ sampled queues, with ties broken uniformly at random.
We denote this routing rule by PW($d$) and note that $d=1$ corresponds to uniform routing (i.e. any incoming job is sent to a queue that is chosen uniformly at random),
whereas $d=s$ corresponds to JSQ.
\subsection{Motivation and Goals} \label{secMotivation}
We are motivated by the fact that, and unlike the idealized settings considered in the literature, routing errors can occur in practice, so that jobs are not always routed in an efficient manner.
In this regard, our main goal is to demonstrate that routing errors can have substantial negative impacts on performance. To this end, we study a particular form of error,
under which arrivals are sent to the ``wrong'' queue (not the smallest) with a fixed probability, and show that the system might not be stable in this case, even if its total service rate is larger than
the rate at which work arrives, i.e., if the traffic intensity to the system is smaller than $1$.
Such errors are likely to occur when JSW is employed, because the actual workload at each server can only be estimated, unless the server is idle (in which case its workload is zero),
but can also occur under JSQ, especially when there is no central dispatcher, and customers choose which queue to join.
We focus on the latter JSQ policy, since under appropriate distributional assumptions (Poisson arrival process and exponentially distributed service times), the queue process evolves as a
continuous-time Markov chain (CTMC), whereas under JSW, the analysis of the queue process requires a continuous-space Markov representation. (Even under JSQ, exact analyses and steady-state computations of the queue
are intractable, and most of the literature is concerned with asymptotic approximations; see Section \ref{secLit} below.)
It will become intuitively clear, and supported by simulation examples in Section \ref{sec:simu}, that our results extend to the JSW case.
Even though our main motivation is to study the impact of routing errors, we treat the allocation of jobs to servers as a routing policy.
We do this for mathematical convenience, as it allows us to treat PW($d$), and therefore also JSQ, as a special case of the family of allocation policies we consider.
Specifically, we assume that the dispatcher (or the arriving customer) chooses correctly the shortest queue with probability $p_1$,
the second-shortest queue with probability $p_2$, and so forth.
We also consider a {\bf ``non-idling'' case}, in which routing errors are made only when all servers are busy, so that the dispatcher (or arriving customer) always chooses an idle server, if such a server is available,
and otherwise makes errors as was just described.
To show that such errors can lead to extreme departures from the desired behavior under JSQ,
we characterize the stability region under the allocation policy as a function of the system's parameters and the error probabilities,
and prove that the usual traffic condition $\rho := \lambda/(s\mu) < 1$ does not guarantee that the system is stable, even in the non-idling case.
\subsection{Background: PW($d$) and Related Routing Policies}
Note that it is not immediately clear that the condition $\rho < 1$ does not imply that the system is stable, especially under the non-idling allocation mechanism,
because the JSQ policy (and of course, JSW) leaves a lot of ``room'' for making routing errors, as can be seen by comparing a system operating under JSQ to the same system operating under uniform routing.
Clearly, uniform routing induces a lot of ``avoidable'' idleness in the system, because arrivals are often routed to busy servers even if there are idle servers present.
Nevertheless, by symmetry, the rate at which jobs arrive at each server is the same under this policy, implying that the traffic intensity at each server separately is smaller than $1$ whenever the traffic intensity $\rho$ to the
whole system is smaller than $1$.
When the arrival process to the system is Poisson, this follows directly from the splitting property of the Poisson process, which implies that each server operates as an $M/G/1$
queue independently of all other servers.
Indeed, if service times are exponentially distributed, in addition to having a Poisson arrival process, so that
the queue process evolves as a CTMC, the improvement that JSQ provides over uniform routing follows from existing results, which we now review.
Let $Q_\Sigma^{(d)}(t)$ denote the total number of jobs in the system at time $t \ge 0$ under PW($d$). Theorem 4 in \cite{Turner98} implies that\footnote{Theorem 4 in \cite{Turner98}
proves a monotone convex order domination, from which sample-path stochastic order follows immediately}, if $d_1 > d_2$, then $Q_\Sigma^{(d_1)} \le_{st} Q_\Sigma^{(d_2)}$,
where $\le_{st}$ denotes sample-path stochastic-order.
(That is, there exists a coupling of the two processes, such that $Q_\Sigma^{(d_1)}(t) \le Q_\Sigma^{(d_2)}(t)$ w.p.1 for all $t > 0$, provided that the inequality holds at time $t = 0$.)
In particular, for $s > 2$,
\begin{equation}
\label{eq:ineqPW}
Q_\Sigma^{(s)} \le_{st} Q_\Sigma^{(d)} \le_{st} Q_\Sigma^{(1)}, \quad 1 < d \le s.
\end{equation}
The stability of a parallel-server system under PW($d$) readily follows. To state this result formally, we say that a parallel-server system is ``Markovian'' if its multi-dimensional queue process
evolves as a CTMC. In particular, the arrival process is Poisson and the service times are independent and identically distributed
(i.i.d.) exponentially distributed random variables, that are independent of the arrival process and of the state of the system.
\begin{equation}gin{corollary} \label{coroStable}
For a Markovian parallel-server system with $s$ servers operating under PW($d$), $1 \le d \le s$, the condition $\rho := \lambda/(s\mu) < 1$ is necessary and sufficient in order for the queue process to be an ergodic CTMC.
\end{corollary}
\begin{equation}gin{proof}
It is easy to see that $Q_\Sigma^{(d)}$ is an irreducible CTMC.
If $\rho \ge 1$, then $Q_\Sigma^{(d)}$ is either null recurrent or transient, because it is bounded from below, in sample-path stochastic order,
by the number-in-system process in an $M/M/1$ queue with arrival rate $\lambda$ and service rate $s\mu$.
On the other hand, if $\rho < 1$, then $Q_\Sigma^{(1)}$ is ergodic, because it evolves as
$s$ independent $M/M/1$ queues, each with arrival rate $\lambda/s$ and service rate $\mu$. In particular the empty state (zeroth vector) is positive recurrent for the CTMC $Q_\Sigma^{(1)}$,
and, by virtue of \eqref{eq:ineqPW}, also for $Q_\Sigma^{(d)}$, $1 < d \le s$.
\end{proof}
A more quantitative analysis can be carried out asymptotically, by taking the number of servers $s$ to infinity, assuming that the arrival rate grows proportionally to $s$.
As was shown in \cite{Mitzenmacher96, Vved96}, the steady-state probability that an arrival is routed to a queue of length at least $k$ is $\rho^{d^k}$, i.e., it is doubly exponential in $k$ for $d \ge 2$,
as opposed to exponential when $d = 1$ (which is tantamount to uniform routing).
The dramatic differences between the {\em maximum} queue length in stationarity in the cases $d=1$ and $d \ge 2$
is demonstrated in \cite{Luczak06}, which shows that the maximum queue length is of order $\ln(s) / \ln(1/\lambda)$ when $d=1$, and of order $\ln\ln(s)/\ln(d)$ when $d \ge 2$ with probability converging to $1$ as
$s \longrightarrow \infty$.
Further, heavy-traffic analysis shows that the performance under PW($d$), for any fixed $d < s$, is substantially worse than under JSQ.
In particular, considering a sequence of systems indexed by the number of servers $s$, and letting $\lambda_s$ denote the arrival rate to system $s$,
\cite{Gamarnik_JSQ} and \cite{Gamarnik_supermarket} analyze a system operating under JSQ and PW($d$), respectively, in the heavy-traffic limiting regime, where $\lambda_s = s\mu - \Theta(\sqrt{s})$.
It is proved in \cite{Gamarnik_JSQ} that, under JSQ, only a negligible proportion (which converges to $0$) of the customers encounter a queue upon arrival, and those customers that have to wait encounter only
one customer in queue. Thus, asymptotically, no queue is larger than $2$. (This result holds only after some transient period, because the initial condition may have many larger queues.)
On the other hand, \cite{Gamarnik_supermarket} proves that, in the supermarket model with $d > 1$, the fraction of queues that are of order $\log_d \sqrt{s}$ approaches $1$ as $s \rightarrow\infty$.
To conclude, the dimensionality of the queue process, and the fact that it is not reversible, render exact analysis of parallel-server systems intractable, even
under Markovian assumptions. Other than stability results and stochastic domination, as in \eqref{eq:ineqPW},
little can be said about the systems' dynamics and steady state distributions. Nevertheless,
the aforementioned asymptotic results suggest that JSQ is substantially more efficient than
PW($d$) for $d < s$, which, in turn, is substantially more efficient than uniform routing, namely, than PW($1$).
\subsection{Notation}
\label{subsec:not}
We use ${\mathbb R}$ to denote the set of real numbers, with ${\mathbb R}_+ = [0, \infty)$, ${\mathbb Z}_+$ to denote the set of non-negative integers, and ${\mathbb Z}_+^* := {\mathbb Z}_+ - \{0\}$ the subset of (strictly) positive integers.
For any $q \in {\mathbb Z}_+$ and all sets $A$, we denote by
$A^q$ the set of vectors of dimension $q$ having elements in $A$, e.g., ${\mathbb R}^q$ is the set of $q$-dimensional real-valued vectors.
Vectors are in general denoted by bold letters.
For a vector $\mathbf x=(x_1,...,x_q)$ in ${\mathbb R}^q$, we denote by $\ord{\mathbf x}$ the ordered version of $\mathbf x$, i.e. $\ord{\mathbf x} = (x_{(1)}, x_{(2)},\dots, x_{(q)})$ is any permutation of the elements of $\mathbf x$ such that
$x_{(1)} \le x_{(2)} \le \cdots \le x_{(q)}$.
vector $\ord{\mathbf x}$.)
The set of ordered vectors in $A^q$ is denoted by $\ord{A^q}$; for example, $\ord{{\mathbb R}_+^q} := \{\mathbf x \in {\mathbb R}^s_+ : x_1 \le \cdots \le x_q\}$.
We let $\mathbf a \circ \mathbf x \in {\mathbb R}^q$ denote the Hadamard product of two vectors $\mathbf x=(x_1,...,x_q)$ and $\mathbf y=(y_1,...,y_q)$ in ${\mathbb R}^q$, i.e., $\mathbf y \circ\mathbf x=(y_1x_1,...,y_q x_q)$.
For $\mathbf x \in {\mathbb R}_+^q$, we define $\pos(\mathbf x)$ to be the number of positive coordinates of $\mathbf x$, which is $0$ if $\mathbf x$ is the zeroth vector $\mathbf 0 := (0,\dots, 0)$.
Let $\llbracket p,q \rrbracket = {\mathbb Z}_+ \cap [p,q]$. For any $i \in \llbracket 1,q \rrbracket$, let $\gre_i$ denote the vector having all coordinates null except the $i$th one, equal
to $1$, and let $\gre$ denote the unit vector whose components are all equal $1$; $\gre := (1,\dots, 1)$.
For any $\mathbf x \in {\mathbb R}_+^q$ we denote by $\parallel \mathbf x \parallel = \sum_{i=1}^q x_i$ and $\parallel \mathbf x \parallel_2 = \sum_{i=1}^q (x_i)^2$.
For any two real numbers $a$ and $b$, let $a \vee b$ and $a \wedge b$ denote the maximum and the minimum of $a$ and $b$, respectively. Let $a^+= a \vee 0$.
\subsection{Organization}
The rest of the paper is organized as follows: We provide a detailed literature review in Section \ref{secLit}.
The model, including the family of allocation policies, which we call $\mathbf{p}$-allocation policies, is formally introduced in Section \ref{sec:model}.
In Section \ref{sec:suff} we study a class of $\mathbf{p}$-allocation policies for which
the condition $\rho<1$ implies that the system is stable. The insufficiency of this traffic condition to imply stability in general is demonstrated in Section \ref{secInsufficiency}.
In Section \ref{sec:simu} we present simulation results suggesting that our main results extend to workload-based routing policies.
The proofs of two technical lemmas appear in Appendix \ref{sec:proofs}, and two additional supporting results appear in Appendix \ref{sec:aux}.
\section{Related Literature} \label{secLit}
\paragraph{{\em Non-monotonic parallel queues.}}
Under JSW, the dynamics of the system, as well as the sojourn time of jobs, coincide with those of a single-queue $s$-server system operating under the First In First Out (FIFO) service policy.
In particular, that $\rho < 1$ is a necessary and sufficient condition for the stability of the system under JSW follows from from the basic stability theory of the $GI/GI/s$ queue,
first proved in the seminal paper \cite{KW55}.
The sufficiency of the condition $\rho < 1$ for stability of the $G/G/s$ queue was generalized in \cite{Brandt85} to the stationary ergodic framework, namely,
when both the inter-arrival and service-time sequences are time-stationary and ergodic,
but not necessarily independent; see also \S 2.2 of \cite{BacBre02}. This general result was proved using a backwards scheme
of the Loynes type \cite{Loynes62}, building on the fact that the (random) updating map of the stochastic recursive sequence representing the system is non-decreasing for the coordinate-wise vector ordering.
For the same reason, JSW is the unique routing rule within the class of semi-cyclic policies introduced in \cite{SW03}, which renders the total workload to be a non-decreasing function of $s$ at all times; see \cite{Moy17_b}.
Therefore, the stability region under allocation policies {\em other than JSW} cannot simply be characterized via a Loynes-type construction,
and we must therefore adopt a different approach.
\iffalse
In the absence of tractable tools to directly analyze these systems, it is significant that a good model for systems with allocation errors based on workloads,
is precisely given by the $\mathbf{p}$-allocation policies introduced in this paper. Specifically, targeting the server with the shortest workload, and choosing by mistake the server of second shortest workload with
probability $p$ can be approximated by a $\mathbf{p}$-allocation system for $\mathbf{p}=(0,1,0,...,0)$. \ohad{[[Latter discussion may be better to have in the intro to motivate the setting]]}
\pasc{[[I agree with that, but I don't know where to put and how to articulate it with the intro. It not that bad if we leave it here.]]}
As a matter of fact, as the simulations we present in Section \ref{sec:simu} suggest, the bounds we obtain for the stability regions of the corresponding $\mathbf{p}$-allocation policy seem to
match the bound of the stability region for a system in which the second-shortest {workload} is reached with probability $p$.
This statement is not rigorously made in the present paper, and is left as a conjecture.
\fi
\paragraph{{\em JSQ systems.}}
The JSQ policy was first introduced in \cite{Haight58} for a system with two servers, each having a different service rate.
The first proof that the condition $\rho < 1$ is necessary and sufficient for
a Markovian parallel-server system under JSQ to be stable (admit a steady state) appears in \cite[Theorem 1]{King61} for a system with $s=2$ servers, building on
a straightforward Lyapunov stability argument. The main goal of \cite{King61} is to characterize the stationary distribution of the (stable) system via generating functions.
Explicit computation of this distribution is provided in \cite{FMcK77}. A systems with finite buffers is studied in \cite{DFT17} which provides closed-form expressions for the loss probabilities.
A non-idling version of JSQ was proposed and analyzed in \cite{Lu11} which considers systems with more than one dispatcher, and analyzes how to balance information regarding idle servers among those dispatchers.
There are several papers that study JSQ in asymptotic regimes. In addition to \cite{Gamarnik_JSQ}, which was discussed above, we mention \cite{Gra00}, which
identifies a mean-field limit, and shows the chaoticity of the system as $N$ increases.
An Orstein-Uhlenbeck limit for the same model is obtained in \cite{Gra05}.
In general, Lyapunov-stability arguments, as in \cite{King61}, can be hard to generalize to higher-dimensions, because of the need to control the drifts of the process
at all states outside some compact subset of the state space.
Our proof of Theorem \ref{thMaximal} below, that $\rho < 1$ implies that the system is stable for a certain subset of control parameters, is a generalization
of \cite[Theorem]{King61}, both because it allows any number of servers $s$, and because it considers a larger family of routing policies, for which JSQ is a special case.
In the latter regard, it also generalizes Corollary \ref{coroStable}.
Our proof is achieved by employing a certain partial-order relation (see Definition \ref{defOrder} in Section \ref{subsec:order}) in conjunction with a Lyapunov-stability argument.
\paragraph{{\em Power-of-$d$ allocations.}}
The PW($d$) policy was first studied in \cite{Vved96} and \cite{Mitzenmacher96}, which also coined the term ``supermarket model'' to describe a system operating under this control.
The supermarket model has since received substantial attention due to its practical and theoretical significance.
Both \cite{Gamarnik_supermarket} and \cite{Brightwell12} study the supermarket model in heavy traffic, namely, as the traffic intensity approaches $1$.
The rate at which the equilibrium distribution of a typical queue converges to the limiting one in the total-variation distance is studied in \cite{Luczak07}, which also quantifies the chaotic behavior
of the system, asymptotically, namely, the rate at which the joint distribution of any fixed number of queues converges to the limiting product-form distribution.
Finally, we mention a recent game-theoretic supermarket model in \cite{SupermarketGame}, which is also analyzed asymptotically, as the number of servers and arrival rate increase to infinity.
It is significant that the asymptotic result regarding the doubly exponential decay rate of the queue size in equilibrium does not necessarily hold for general service-time distributions.
Indeed, \cite{Bramson10} shows that, for some power-law service-time distributions, the equilibrium queue sizes decay at an exponential, or even polynomial, rate,
depending on the power-law exponent and the number of sampled queues $d$.
\paragraph{{\em Robustness of Control.}}
The dynamics of a system under a given control are typically studied in idealized settings, which do not fully hold in practice. In particular, even small deviations from the theoretical
implementation of a control (due to, e.g., human or measurement errors, discretization of a continuous control process, delays in making or applying a decision, etc.),
can in turn lead to substantial perturbations from theoretically predicted performance.
Such discrepancies between theory and implementation constitute an important area of research in dynamical control theory (see, e.g., \cite[\S 14]{khalil02} and \cite{Liberzon03}),
but received little attention in the queueing literature.
In \cite{PWchatter} it is shown how the implementation of a control, that has theoretically desirable performance in a certain asymptotic regime, can lead
to chattering of the queue process and, in turn, to {\em congestion collapse}, namely, to a severe overload that is solely due to the implementation of the control.
We refer to \cite[Section 9]{PWchatter} for a detailed, albeit informal, discussion on how small perturbations from idealized control settings can have substantial impacts on the performance
of queueing systems.
\paragraph{{\em Instability of Subcritical Systems.}}
Congestion collapse is related to the more general research area regarding instability of subcritical networks, which initialized with the presentation of
the (deterministic) Lu-Kumar network studied in \cite{LuKumar}, and its stochastic counterpart, the Rybko-Stolyar network \cite{RySto92}; see also \cite[\S]{BramsonBook} and
\cite{MoyalPerry} for applications and literature reviews.
A non-idling policy is considered in \cite{Moy17_a}, in which an arrival is routed to the queue having the $2$nd shortest workload.
A sufficient condition for stability, that is strictly stronger than $\rho < 1$, is provided,
and it is conjectured that the latter condition is also necessary.
In ending we remark that the possibility of experiencing congestion collapse in parallel-server systems can be considered a triviality for vacuous choices of the control.
For example, if the arrival rate $\lambda$ is larger than the service rate $\mu$ (but is smaller than $s\mu$), then the policy that routes all arrival to the same server is clearly unstable.
Here, however, we perform a refined analysis of the (in)stability region for the non-idling version of JSQ when routing errors occur with a nonnegligible probability.
\section{The Model} \label{sec:model}
We consider the following class of parallel systems: There are $s$ servers, each having its own infinite buffer for waiting jobs.
Jobs arrive to the system following an homogeneous Poisson process with intensity $\lambda$, and join one of the servers according to a routing policy from a class of policies that will be formally defined immediately.
If the server to which a job is routed is idle, that job enters service immediately;
otherwise, it joins the end of the server's dedicated queue, waiting for its turn to be served (there is no jockeying between queues).
All jobs are statistically equivalent, requiring
i.i.d.\ service times that are exponentially distributed with mean $1/\mu$, regardless of the server.
We let $\rho := \lambda/(s\mu)$ denote the traffic intensity to the system.
Even though this routing mechanism is an erroneous execution of JSQ, we treat it as a control,
which we call a ``$\mathbf{p}$-allocation policy'', where $\mathbf{p}$ is the {\em allocation probability vector} $\mathbf{p}=(p_1,p_2,...,p_s)$.
With this view, the PW($d$), and in particular, JSQ and uniform splitting,
become special cases of the $\mathbf{p}$-allocation policy; see \eqref{eq:pU}-\eqref{eq:pPWd} below.
The class of allocation policies we consider depend only on the queue sizes (number of customers in service plus the number of customers waiting in line) of the servers.
To determine the server allocations without ambiguity, we assume that the servers
are re-labeled as $1,2,...,s$ upon each event (arrival or departure), such that $i < j$ if the queue size for server $i$ is no larger than the queue for server $j$.
Servers having the same queue size have consecutive labels; the labeling within each such group of servers can be arbitrary, but for concreteness, we assume that it is made uniformly at random.
Therefore, with $Q_i(t)$ denoting the queue size of server $i$ at time $t \ge 0$,
the vector $Q(t):=\left(Q_1(t),...,Q_s(t)\right)$ is an element of $\ord{{\mathbb Z}_+^s}$.
We let $Q_{\Sigma}(t)=\sum_{i=1}^d Q_i(t)$ denote the total number of customers in the system at time $t$.
Let ${\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$ denote the family of probability vectors on $[0,1]^s$, namely, a vector $\mathbf{p} := (p_1, \dots, p_s)$ is in ${\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$ if $p_i \in [0,1]$, $1 \le i \le s$, and $\sum_{i=1}^s p_i = 1$.
\begin{equation}gin{definition}
We call a routing policy a {\bf $\mathbf{p}$-allocation policy}, and call $\mathbf{p}$ the {\bf allocation (probability) vector}, $\mathbf{p} \in {\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$,
if, upon arrival, a customer is sent to server $i$ with probability $p_i$, independently of everything else.
A $\mathbf{p}$-allocation policy is said to be {\em non-idling} if an incoming job is routed to an idle server, whenever there is one upon that job's arrival,
and is otherwise routed to server $i$ with probability $p_i$, independently of everything else.
\end{definition}
In particular, for each $\mathbf{p}$-allocation policy there is a corresponding non-idling version which uses the same allocation vector to route jobs that arrive when all servers are busy,
and otherwise route the arrivals to one of the idle servers.
Observe that if two or more queues are equal upon an arrival, a $\mathbf{p}$-allocation policy
assigns the incoming customer to one of those queues with an equal probability. Indeed, if a customer enters the system at $t$ and the consecutive indices $j,j+1,...,k-1,k$ are such that $Q_{j-1}(t^-)<Q_j(t^-)=Q_{j+1}(t^-)=....Q_{k-1}(t-)=Q_{k}(t-)<Q_{k+1}(t-)$,
then by uniformity of the choice of labeling, server $\ell$ is chosen with the probability
$${1 \over k-j+1}\sum_{i=j}^k p_i, \quad \text{for any $\ell \in \llbracket j,k \rrbracket$.}$$
A particular class of $\mathbf{p}$-allocation policies is the PW($d$) policy, and its special cases, uniform splitting and JSQ.
\begin{equation}gin{itemize}
\item For uniform splitting, the allocation vector is
\begin{equation}gin{equation}
\label{eq:pU}
\mathbf{p}^{(1)}:=\left(1/s,...,1/s\right).
\end{equation}
\item For JSQ, we have
\begin{equation}gin{equation}
\label{eq:pJSQ}
\mathbf{p}^{(s)}:=(1,0,...,0).
\end{equation}
\item More generally, under PW($d$) an arriving job is routed to server $i$ if it is one of the $d$ draws,
and the other $d-1$ servers drawn have indices in $\llbracket i+1,d \rrbracket$.
Then the allocation vector for this policy is (with ties broken uniformly at random)
\begin{equation}gin{equation}
\label{eq:pPWd}
\mathbf{p}^{(d)}:=\left(p^{(d)}_1,...,p^{(d)}_s\right)
=\begin{equation}gin{cases}
p^{(d)}_i ={s-i \choose d-1} / {s \choose d},&\quad i\in\{1,...,s-d+1\};\\
p^{(d)}_i =0,&\quad i\in \{s-d+2,\dots,s\},
\end{cases}\end{equation}
\end{itemize}
Observe that \eqref{eq:pU} and \eqref{eq:pJSQ} are consistent with \eqref{eq:pPWd}, and are achieved by taking $d=1$ and $d=s$, respectively.
\subsection{The Stability Regions of the Allocation Policies}
It is immediate that for any probability vector $\mathbf{p} \in {\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$, the process $Q$ is an $\ord{{\mathbb Z}_+^s}$-valued continuous-time Markov chain (CTMC).
The {\em stability region} of the parallel-server system corresponding to the $\mathbf{p}$-allocation policy, which we denote by $\mathcal S(\mathbf{p})$,
is then defined as the set of values of the traffic intensity $\rho = \lambda/(s\mu)$ under which $Q$
is stable in the sense that it is a positive recurrent CTMC. Then for any $\mathbf{p}$-allocation vector we define
\begin{equation}gin{align*}
\mathcal S(\mathbf{p}) &:= \left\{\rho \in [0,1)\,:\, \mbox{$Q$ is positive recurrent under the $\mathbf{p}$-allocation policy}\right\};\\
\mathcal S^\text{\textsc{ni}}(\mathbf{p}) &:= \left\{\rho \in [0,1)\,:\, \mbox{$Q$ is positive recurrent under the {\bf non-idling} $\mathbf{p}$-allocation policy}\right\}.
\end{align*}
It is intuitively clear that the stability region under a non-idling $\mathbf{p}$-allocation policy cannot be smaller than the stability region under the same allocation vector when the policy is not non-idling.
This is formally proved in the next proposition.
\begin{equation}gin{proposition}
\label{prop:busyvsidling}
$\maS(\mathbf{p}) \subseteq \maS^\text{\textsc{ni}}(\mathbf{p})$ for all $\mathbf{p} \in {\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$.
\end{proposition}
\begin{equation}gin{proof}
Consider an allocation vector $\mathbf{p}$ together with an arrival rate $\lambda$ and service rate $\mu$, such that $\rho \in \maS(\mathbf{p})$,
and the corresponding queue process $Q$. Observe that the traffic intensity $\rho$ is then necessarily less than 1. Denote by $Q^\text{\textsc{ni}}$ the queue process in the system operating under the corresponding non-idling $\mathbf{p}$-allocation policy,
and by $Q^{(s)}$ the queue process of a system of same traffic load, operating under the JSQ policy (equivalently, under the PW($s$) policy).
It is easily seen that the process $Q$ coincides in distribution with the process $Q^{\text{\textsc{ni}}}$ on the subset $F := \{\mathbf x \in {\mathbb R}^s : x_i \ge 1, i \in \llbracket 1, s \rrbracket\}$ of the state space,
and with $Q^{(s)}$ on the complement subset $F^c := \{\mathbf x \in {\mathbb R}^s : \mathbf x \notin F\}$.
The result follows from the fact that the process $Q^\text{\textsc{ni}}$ is ergodic by assumption, together with the fact that the process $Q^{(s)}$ is ergodic for any $\rho < 1$ due to \eqref{eq:ineqPW}.
(Recall that $Q_\Sigma^{(1)}$ in \eqref{eq:ineqPW} is the queue under uniform splitting, which operates like $s$ independent $M/M/1$ queues, each with traffic intensity $\lambda/\mu < 1$.)
\end{proof}
\begin{equation}gin{remark}
It is significant that $\maS(\mathbf{p}) \ne \maS^\text{\textsc{ni}}(\mathbf{p})$ in general; in particular, there exist $\mathbf{p}$-allocation policies for which $\maS^\text{\textsc{ni}}(\mathbf{p})$ is strictly larger than $\maS(\mathbf{p})$.
To see why the proof of Proposition \ref{prop:busyvsidling} cannot be adapted to show the containment in the other direction
(i.e., to show that $\maS^\text{\textsc{ni}}(\mathbf{p}) \subseteq \maS(\mathbf{p})$), consider a $\rho$ for which $Q^\text{\textsc{ni}}$ is stable under some $\mathbf{p}$-allocation policy.
Note that, if $Q$ is not known to be an ergodic CTMC at the outset, then there is no guarantee that the expected hitting time of the set $F$ by the process $Q$ is finite,
or even that this hitting time is finite w.p.1. Therefore, even though the expected hitting time of $F^c$ by $Q$ is finite, because $Q^\text{\textsc{ni}}$ is assumed
to be ergodic and $Q$ is locally distributed the same as $Q^\text{\textsc{ni}}$ while in $F$, it is possible that the process $Q$ is absorbed in $F^c$.
\end{remark}
As an immediate consequence of Proposition \ref{prop:busyvsidling} we see that, if stability is proved for given system's parameters and for a specific $\mathbf{p}$-allocation policy (a specific allocation vector $\mathbf{p}$),
then the system is also stable under the non-idling version of that policy.
On the other hand, a system is unstable if operated under a $\mathbf{p}$-allocation policy, if it is shown to be unstable under its non-idling version.
\section{Maximal $\mathbf{p}$-Allocation Policies} \label{sec:suff}
In this section we identify a sub-class of $\mathbf{p}$-allocation policies under which the stability region is the interval $[0,1)$.
We call such an allocation policy {\em maximal}, since its stability region is the largest possible.
\subsection{Preliminary}
\label{subsec:order}
We will state a sufficient condition on the $\mathbf{p}$-allocation probability that ensures that the system is stable if $\rho < 1$.
That condition is expressed in terms of the following partial order on ${\mathbb R}^s_+$.
\begin{equation}gin{definition} \label{defOrder}
Let $\mathbf a = (a_1,...,a_s)$ and $\mathbf b=(b_1,...,b_s)$ be elements of ${\mathbb R}_+^s$, $s\ge 1$.
We say that $\mathbf a$ is smaller than $\mathbf b$ in the ``generalized Schur-convex'' order, and write $\mathbf a \preceq_{\textsc{gsc}} \mathbf b$, if
\[\sum_{i=k}^s a_i \le \sum_{i=k}^s b_i\mbox{ for all }k \le s.\]
\end{definition}
The relation ``$\preceq_{\textsc{gsc}}$" defines a partial ordering on ${\mathbb R}_+^s$ that is a variant (for non-necessarily ordered vectors)
of the partial semi-ordering ``$\prec_{\textsc{cx}}$" introduced in Definition 3 of \cite{Moy17_b}, which itself generalizes the well-known Schur-convex partial semi-ordering ``$\prec_{\textsc{scx}}$"
(see e.g. \cite{MO79}) to vectors of different total sums. Specifically, we have $\mathbf a \preceq_{\textsc{gsc}} \mathbf b$ if and only if $\mathbf a \prec_{\textsc{cx}} \mathbf b$ for any $\mathbf a,\mathbf b \in \ord{{\mathbb R}_+^s}$,
and $\mathbf a \preceq_{\textsc{gsc}} \mathbf b$ if and only if $\mathbf a \prec_{\textsc{scx}} \mathbf b$ for any $\mathbf a,\mathbf b \in \ord{{\mathbb R}_+^s}$ such that $\parallel \mathbf a \parallel = \parallel \mathbf b \parallel$.
Observe that, for any random variables $X$ and $Y$ having respective probability mass functions $\mathbf{p}_X$ and $\mathbf{p}_Y$ in ${\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$ and values in
$\llbracket 1,s \rrbracket $, it holds that
$X \le_{st} Y$ if and only if $\mathbf{p}_X \preceq_{\textsc{gsc}} \mathbf{p}_Y.$
The following monotonicity result is proved in appendix,
\begin{equation}gin{lemma}
\label{lemma:order}
Let $\mathbf a$ and $\mathbf b$ be two vectors in ${\mathbb R}_+^s$ such that $\mathbf a \preceq_{\textsc{gsc}} \mathbf b$, and let $\mathbf x \in \ord{{\mathbb R}_+^s}$.
Then
$$\mathbf x \circ \mathbf a \preceq_{\textsc{gsc}} \mathbf x \circ \mathbf b.$$
\end{lemma}
\subsection{A Sufficient Condition for Stability}\label{subsec:maximal}
The main result of this section shows that if, in addition to $\rho < 1$, it holds that the $\mathbf{p}$-allocation probability vector is no
larger, in the $\preceq_{\textsc{gsc}}$ order, than the uniform probability on $\llbracket 1,s \rrbracket$, namely, if $\mathbf{p}\in{\mathbb P}} \def\Pb{\P} \def\Pbi{\Pi^s$ satisfies
\begin{equation}qu \label{Ncond}
\mathbf{p} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)},
\end{equation}q
for $\mathbf{p}^{(1)}$ in (\ref{eq:pU}), then the system is stable.
\begin{equation}gin{theorem}\label{thMaximal}
If $\mathbf{p}$ satisfies \eqref{Ncond}, then $\maS(\mathbf{p}) = [0,1)$, namely, the $\mathbf{p}$-allocation policy is maximal.
\end{theorem}
\begin{equation}gin{proof}
For $n \ge 0$, let $T_n$ denote the $n$th transition epoch of the CTMC $Q$, with $T_0 = 0$, and consider the embedded discrete-time Markov chain (DTMC) $\{Q_n : n \ge 0\}$ defined via
$Q_n := Q\left(T_n\right)$. We prove the result via a Lyapunov stability argument, employing the Lyapunov function $V: \ord{{\mathbb Z}_+^s} \longrightarrow {\mathbb R}+$ defined by $V(x) = \| \mathbf x \|_2$.
Let
\[\mathcal K = \left\{\mathbf x \in \ord{{\mathbb Z}_+^s}\,:\, \sum_{i=1}^s x_i \le {s(\lambda + s\mu) \over 2(s\mu-\lambda)} \right\}.\]
Then, for any $n \ge 1$ and
$\mathbf x=(x_1,...,x_s) \in \mathcal K^c \cap \ord{{\mathbb Z}_+^s}$ we have
\begin{equation}gin{multline} \label{eqLyp1}
\esp{V\left(Q_{n+1}\right) - V\left(Q_n\right) \mid Q_n = \mathbf x}\\
\begin{equation}gin{aligned}
&= \sum_{i=1}^s {\lambda \over \lambda + \pos(\mathbf x)\mu}p_i\left((x_i + 1)^2 - (x_i)^2\right)
+ \sum_{i=1}^s {\mu \over \lambda +\pos(\mathbf x)\mu}\left(((x_i - 1)^+)^2 - (x_i)^2\right)\\
&= {1 \over \lambda + \pos(\mathbf x)\mu}\left(2\left(\lambda\sum_{i=1}^s p_ix_i-\mu\sum_{i=1}^s x_i\right) + \lambda +\pos(\mathbf x)\mu \right).
\end{aligned}\end{multline}
Applying Lemma \ref{lemma:order} with $\mathbf a := \mathbf{p}$, $\mathbf b := \mathbf{p}^{(1)}$, for $\mathbf{p}^{(1)}$ in (\ref{eq:pU}), and the ordered vector $\mathbf x$, we obtain that $\mathbf x \circ\mathbf{p} \preceq_{\textsc{gsc}} \mathbf x\circ \mathbf{p}^{(1)}$,
and in particular that $\sum_{i=1}^s p_ix_i \le {1 \over s}\sum_{i=1}^s x_i.$ As $\pos(\mathbf x) \le s$, this entails that the last expression in \eqref{eqLyp1} is less than or equal to
\[{1 \over \lambda + \pos(\mathbf x)\mu}\left(2\left({\lambda \over s}-\mu\right)\sum_{i=1}^s x_i + \lambda +s\mu \right),\]
which is strictly negative for $\mathbf x \notin \mathcal K$.
In particular, for all $\mathbf x=(x_1,...,x_s) \in \mathcal K^c \cap \ord{{\mathbb Z}_+^s}$ and all $n$,
\[\esp{V\left(Q_{n+1}\right) - V\left(Q_n\right) \mid Q_n = \mathbf x}<0.\]
We deduce from the Lyapunov-Foster Theorem (see, e.g., \cite[\S 5.1]{Bre99}) that the DTMC $\{Q_n : n \ge 1\}$ is positive recurrent. In turn, this implies that the CTMC $Q$ is positive recurrent as well,
by Theorem 6.18 in \cite{Kulkarni17}, as the rate of the exponentially distributed holding time in each of the states is bounded from below by $\lambda$.
\end{proof}
As discussed in Section \ref{secLit}, the maximality of PW($d$) follows from \eqref{eq:ineqPW} which is proved via coupling arguments.
Theorem \ref{thMaximal} provides an independent proof of this result.
\begin{equation}gin{corollary}
\label{cor:JSQPowerofd}
JSQ, uniform splitting, and PW($d$), $d \ge 2$, are maximal allocation policies.
\end{corollary}
\begin{equation}gin{proof}
Recall (\ref{eq:pU}), (\ref{eq:pJSQ}) and (\ref{eq:pPWd}).
As $\mathbf{p}^{(s)} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)}$ (and $\mathbf{p}^{(1)} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)}$ by definition),
both the JSQ and uniform splitting policies satisfy the assumptions of Theorem \ref{thMaximal}.
To prove the statement for PW$(d)$ policies, $d \in \llbracket 2,s-1 \rrbracket$, fix such $d$ and observe that, for any $k \le s-d+1$,
the quantity $\sum_{i=k}^s p^{(d)}_i$ is the probability that the
$d$ uniformly drawn servers have indices in $\llbracket k,s \rrbracket$, which is equal to ${s-k+1 \choose d} /{s \choose d}.$
From this, we deduce that
\begin{equation}gin{equation}
\label{eq:compared2}
\mathbf{p}^{(d)} \preceq_{\textsc{gsc}} \mathbf{p}^{(2)}.
\end{equation}
Indeed, for any $k\ge s-d+2$ we have $\sum_{i=k}^s p^{(d)}_i =0$, whereas for any $k \le s-d+1$, we have that
\[{\sum_{i=k}^s p^{(d)}_i \over \sum_{i=k}^s p^{(2)}_i} = {{s-k+1 \choose d}{s \choose 2} \over {s \choose d}{s-k+1 \choose 2} } = {(s-d)...(s-d-k+2) \over (s-2)...(s-2-k+2)} \le 1,\]
whence (\ref{eq:compared2}).
Now, $\sum_{i=s}^s p^{(2)}_i = 0$ and for all $k\le s-1$, so that
\[\sum_{i=k}^s p^{(2)}_i = {1 \over {s \choose 2}} \sum_{i=k}^{s} (s-i) = {s-k \over s-1}{s-k+1 \over s} \le {s-k+1 \over s} = \sum_{i=k}^s{1\over s},\]
implying that $\mathbf{p}^{(2)} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)}$. This, together with (\ref{eq:compared2}) and the transitivity of ``$\preceq_{\textsc{gsc}}$", shows that
$\mathbf{p}^{(d)} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)}$. Thus, PW($d$) is maximal by Theorem \ref{thMaximal}.
\end{proof}
Theorem \ref{thJ2SQp} and Proposition \ref{prop:busyvsidling} also imply
\begin{equation}gin{corollary}
\label{cor:maximalnonidling}
$\maS^\text{\textsc{ni}}(\mathbf{p}) = [0,1)$ for any $\mathbf{p}$ satisfying (\ref{Ncond}).
In particular, the non-idling versions of uniform splitting and PW($d$) allocation policies are maximal.
\end{corollary}
\section{Insufficiency of the Condition $\rho < 1$}
\label{secInsufficiency}
Theorem \ref{thMaximal} requires, in addition to the usual traffic condition $\rho < 1$, that the allocation probability $\mathbf{p}$ is smaller, in the generalized Schur convex order,
than the uniform probability distribution on $\llbracket 1,s \rrbracket$.
We now demonstrate that the latter condition is not futile, and that the traffic condition by itself does not imply stability of a system.
To provide simple counter-examples, we consider $\mathbf{p}_{p,2}$-allocation probabilities, with $\mathbf{p}_{p,2} := \left(1-p,p,0,...0\right)$, for $0<p<1$. In other words, any arrival is
routed to the shortest queue with probability $q := 1-p$,
or to the second-shortest queue with probability $p$ (ties broken by a uniform draw from the relevant queues.)
We interpret $p$ as the probability that the controller (or the arriving customer) is making an error in distinguishing between the shortest and the second shortest queue.
We denote this $\mathbf{p}_{p,2}$-allocation policy by J2SQ$(p)$, and its corresponding non-idling version by J2SQ$^\text{\textsc{ni}}(p)$.
Under the non-idling version of the latter policy, the controller identifies idle servers, but otherwise has a probability $p$ of making an error by sending an arrival to the second-shortest queue.
Thus, when all the servers are busy, errors are made according to a Bernoulli trial with a probability $p$ of ``success.''
Observe that, for $p^{(1)}$ in \eqref{eq:pU},
\begin{equation}qu \label{suff2}
\mathbf{p}_{p,2} \preceq_{\textsc{gsc}} \mathbf{p}^{(1)} \quad\mbox{if and only if}\quad p \le 1-1/s.
\end{equation}q
For a given number of servers $s \ge 1$ and an error probability $p > 0$, let
\begin{equation}gin{equation}
\label{eq:defVcr}
V_{\text{cr}}(p) := {s-1 \over 2 s} \left(1+\sqrt{1+{4 \over p(s-1)}}\right).
\end{equation}
We refer to $V_{\text{cr}}(p)$ as the {\em critical value} (for stability; see Theorem \ref{thJ2SQp} below).
Simple algebra shows that
\begin{equation}qu \label{Vcr-cond}
V_{\text{cr}}(p) < 1 \quad\mbox{if and only if}\quad p>1-1/s.
\end{equation}q
\begin{equation}gin{theorem}\label{thJ2SQp}
$\maS^\text{\textsc{ni}}(\mathbf{p}_{p,2}) \subset [0, V_{\text{cr}}(p)\wedge 1)$ for any $p\in [0,1]$.
\end{theorem}
We defer the proof of Theorem \ref{thJ2SQp} to \S \ref{secProof-J2SQp}.
In view of (\ref{suff2}) and \eqref{Vcr-cond}, Theorems \ref{thMaximal} and \ref{thJ2SQp} immediately imply the following,
\begin{equation}gin{corollary} \label{coroJ2SQp}
J2SQ$^\text{\textsc{ni}}(p)$ is maximal if and only if $p \le 1 - 1/s$.
\end{corollary}
In view of Proposition \ref{prop:busyvsidling}, Corollary \ref{coroJ2SQp} implies that the stability region under the $\mathbf{p}_{p,2}$-allocation policy is also characterized by the value of $p$.
\begin{equation}gin{corollary} \label{coroJ2SQid}
$\maS(\mathbf{p}_{p,2}) \subseteq [0, V_{\text{cr}}(p)\wedge 1)$ for all $p \in [0, 1]$. In particular J2SQ$(p)$ is maximal if and only if $p \le 1 - 1/s$.
\end{corollary}
\subsection{Join the $2$nd Shortest Queue Allocation Policy}
\label{subsec:J2SQ}
The proof of Theorem \ref{thJ2SQp} involves some technical details that obscure the main intuition for the instability whenever the error probability $p$
is greater than $1-1/s$.
Simplicity is achieved by consider the special case $p=1$, which is tantamount to having the allocation vector be $\mathbf{p}_{1,2} := (0,1,0,...,0)$.
In this case, the routing policy is simply {\em join the second shortest queue}, which we denote by J2SQ; we denote its non-idling version by J2SQ$^\text{\textsc{ni}}$.
It follows from \eqref{Vcr-cond} that $V_{\text{cr}}(1)$, defined in (\ref{eq:defVcr}) with $p=1$, satisfies $V_{\text{cr}}(1) < 1$.
\begin{equation}gin{proposition} \label{prop:J2SQ}
$\maS^\text{\textsc{ni}}(\mathbf{p}_{1,2}) \subset [0, V_{\text{cr}} (1))$.
In particular, J2SQ$^\text{\textsc{ni}}$ is non-maximal.
\end{proposition}
\begin{equation}gin{proof}
Let
\begin{equation}qu \label{setS}
\maA := \{x \in {\mathbb Z}_+^s : x_1 \in \{0,1\}, ~ x_i \ge 2, ~ i \in \llbracket 2, s\rrbracket \},
\end{equation}q
and note that,
whenever exactly one of the servers has no jobs waiting in queue, the process $Q$ takes values in the set $\maA$, that is, if $Q_i(t) \in \{0,1\}$ for exactly one $i \in \llbracket 1,s\rrbracket$, then $\ord{Q(t)} \in \maA$.
Let $\mathbf s := (0,2,\dots,2) \in \maA$, and for $k = 1,2,\dots$, let $V_k$ denote the event that the $k$th visit of $\ord{Q} := \{\ord{Q(t)} : t \ge 0\}$ to $\maA$ starting at $\mathbf s$ occurs,
where that $k$th visit begins at time $t_k \ge 0$ if $\ord{Q(t_k-)} \ne \mathbf s$ and $\ord{Q(t_k)} = \mathbf s$, and ends when $\ord{Q}$ exists the set $\maA$, namely, at a random time $t_k+T_k$
such that $\ord{Q(t_k+T_k-)} \in S$ and $\ord{Q(t_k+T_k)} \notin \maA$.
We will henceforth refer to such a visit to $\maA$ (which begin at $\mathbf s$) simply as a ``visit'', and to $T_k$ as the length of the $k$th visit.
We prove the result by making the contradictory assumption that $Q$ is positive recurrent, and thus ergodic. Under this ergodicity assumption, $P(V_k) = 1$ for all $k \ge 1$, and
the lengths of the visits $\{T_k : k \ge 1\}$ are IID, by virtue of the strong Markov property, with $P(0 < T_1 < \infty) = 1$ and $E[T_1] < \infty$.
Now, during the $k$th visit, namely, during the intervals $I_k := [t_k, t_k+T_k)$, the ordered queue process $\ord{Q}$ operates as follows:
Any arrival is routed to server $1$, if this server is idle. Otherwise, the arrival is routed to server $2$.
Hence, over each interval $I_k$, we can view server $1$ as a single-server loss system (to which we refer as the ``{front server}''),
with the overflow from this front server constituting the arrival process to a system with $s-1$ homogeneous servers operating under the JSQ routing policy (to which we refer as the ``back servers'').
If the first arrival during the $k$th visit finds the system in state $\mathbf s$,
then that arrival is routed to server $1$ (which is idle).
Let $A_k$ denote this latter event:
with $a_k$ denoting the time of the first arrival after time $t_k$, $A_k := \{Q(a_k-) =\mathbf s\}$. By the strong Markov property, the events $A_1, A_2, \dots$ are
independent and have the same probability, and it clearly holds that $P(A_1) > 0$.
By Lemma \ref{lmStat} in Section \ref{appendix}, the first arrival to a single-server loss system puts this system in steady state. In particular, on $[a_1,t_1+T_1)$
the instantaneous probability that an arrival finds server $1$ busy, and is therefore ``overflowed'' to the back system, is $\lambda/(\lambda+\mu)$.
Thus, due to the PASTA (Poisson Arrivals See Time Average) property,
the ``arrival rate'' to the back servers during $[a_1,t_1+T_1)$ is $\alpha:=\lambda^2/(\lambda+\mu)$. It follows that the process $\ord{Q_{-1}} := \ord{(Q_2,...,Q_s)}$ coincides in distribution with the ordered
queue-length process of a JSQ system with $s-1$ servers and arrival rate $\alpha$.
Next, observe that $V_{\text{cr}}(1) < 1$ by \eqref{Vcr-cond}, and that $V_{\text{cr}}(1)$ is thus the only positive root of the polynomial
$x \mapsto s^2x^2 -(s-1)sx -(s-1).$
It then readily follows that, for any $\rho > 0$,
\begin{equation}gin{equation}
\label{eq:equivregions}
{(s\rho)^2 \over 1+s\rho} > (s-1) \quad\mbox{ if and only if }\quad \rho > V_{\text{cr}}(1).
\end{equation}
Therefore, if $\rho=\lambda/s\mu > V_{\text{cr}}(1)$, then $\alpha > (s-1)\mu$, and so the probability that
the process $\ord{Q_{-1}}$ will never reach a state in which the smallest of the $s-1$ queues is equal to $1$ is strictly positive, implying that $P(T_1 = \infty) > 0$.
If $\alpha = (s-1)\mu$ (so that $\rho = V_{\text{cr}}(1)$), then $\ord{Q_{-1}}$ is null recurrent, and the expected time until a state with the smallest queue being $1$ is reached is infinite.
In either case, the expected length of a visit is infinite, namely, $E[I_1] = E[T_1] = \infty$, in contradiction to the assumed ergodicity of $Q$.
\end{proof}
The proof of Proposition \ref{prop:J2SQ} makes the reason for the instability of the system we consider apparent:
Eventually, the system must split into a front loss single-server system whose overflow process constitutes the arrival process to a back $(s-1)$ parallel-server system operating under the JSQ policy.
If the overflow process is larger than the service capacity of the ``back servers'', then the system as a whole is unstable, because the expected time for it to exit this split structure is infinite.
In particular, once the system splits, the expected time until $Q$ reaches states that are not in the set $\maA$ defined in \eqref{setS} is infinite.
In fact, the regenerative structure of $Q$ implies that, if the traffic intensity is {\em strictly larger} than the critical value, i.e., if $\rho > V_{\text{cr}}(p,s)$,
then $P(T_k = \infty \text{ for some } k \ge 1) = 1$ and $\|Q(t)\| \longrightarrow \infty$ w.p.1 as $t\rightarrow\infty$.
\begin{equation}gin{remark}
\rm
We note that the (in)stability of the back system is solely determined by
the arrival rate to that system and mean service time $\mu$, and is independent of any other distributional assumptions. In particular, it does not rely on the service time distribution.
Furthermore, the blocking probability of a loss system is insensitive to the service-time distribution, so that the overflow rate from the front server {\em at stationarity} is $\alpha = \lambda^2/(\lambda+\mu)$
regardless of the assumption that service times are exponentially distributed. Thus, a generalization of Proposition \ref{prop:J2SQ} can be proved for a system with general service time distributions
having a finite mean $\mu$, but further arguments are needed for the step in which PASTA is applied.
\end{remark}
\subsection{Join the $m$-Shortest Queue Allocation Policy}
\label{subsec:JmSQ}
The arguments in the proof of Proposition \ref{prop:J2SQ} can be easily extended to the case in which there are several ``front servers'' instead of just one such server,
a scenario which arises when the $p$-allocation policy follows the ``join the $m$th shortest queue" assignment rule, corresponding to the allocation vector $\mathbf{p}_{1,m} = (0,...,0,\underbrace{1}_m,0,...,0)$.
Under this allocation policy, which we denote by J$m$SQ, an incoming customer is routed to the $m$th shortest queue ($2 \le m \le s$) with probability $1$.
The non-idling version of this policy is denoted by J$m$SQ$^\text{\textsc{ni}}$.
For $m \in \llbracket 2, s\rrbracket,$ define
\begin{equation}gin{align}
\label{gSet}
\mathscr G(m) & := \left\{\rho \in (0,1)\,:\,{s\rho\left(s\rho\right)^{m-1} /(m-1)! \over \sum_{i=0}^{m-1}\left(s\rho\right)^{i} / i!} < (s-m+1) \right\}; \\
V_{\text{cr}}(1,m) & := \sup \, \mathscr G(m). \label{eq:Vcr1m}
\end{align}
Note that the set $\mathscr G(m)$ is not empty, since it contains all the positive numbers that are smaller than $(s-m+1)/s$. In particular, $V_{\text{cr}}(1,m)$ is finite.
Further, the inequality in the definition of $\mathscr G(m)$ reduces to (\ref{eq:equivregions}) when $m=2$, so that $V_{\text{cr}}(1,2)\equiv V_{\text{cr}}(1)$, for $V_{\text{cr}}(1)$ in (\ref{eq:defVcr}).
\begin{equation}gin{lemma} \label{lemma:regionm}
$V_{\text{cr}} (1,m) < 1$ for all $m \in \llbracket 2, s\rrbracket$.
\end{lemma}
The proof of Lemma \ref{lemma:regionm} appears is the appendix.
Given Lemma \ref{lemma:regionm}, the following result generalizes Proposition \ref{prop:J2SQ}.
\begin{equation}gin{proposition}
\label{prop:JmSQ}
$\maS^\text{\textsc{ni}}(\mathbf{p}_{1,m}) \subset [0, V_{\text{cr}}(1,m))$; In particular, J$m$SQ$^\text{\textsc{ni}}$ is non-maximal.
\end{proposition}
\begin{equation}gin{proof}{Proof of Proposition \ref{prop:JmSQ}}.
Fix $m \in \llbracket 2, s\rrbracket$ and let
$$\maA_m := \{x \in {\mathbb Z}_+^s : x_i \in \{0,1\}, ~ i \in \llbracket 1, m-1\rrbracket, ~ \mbox{ and } x_j \ge 2, ~ j \in \llbracket m, s\rrbracket \}.$$
As in the proof of Proposition \ref{prop:J2SQ}, the statistical homogeneity of the $s$ servers implies that any vector $\mathbf x \in {\mathbb Z}^s_+$ that has exactly $m-1$ coordinates with values in $\{0,1\}$
can be considered in $\maA_m$ since $\ord{\mathbf x} \in \maA_m$. Further, as long as the system is in $\maA_m$, it is essentially split into two systems: the first $m-1$ servers
operate like an $M/M/(m-1)$ loss system, and the remaining $s-m+1$ servers operate like a parallel system under the JSQ routing policy, whose arrival process
is the overflow from the first $m-1$ ``front servers.''
Let $\mathbf s = \left(\underbrace{0,\dots,0}_{m-1},\underbrace{2,\dots,2}_{s-m+1}\right).$
We say that a {\em visit} begins when the system transitions into state $\mathbf s$, and ends when it exists the set $\maA_m$, namely, when the splitting into a front and back servers ends.
Let $L_m := \{L_m(t) : t \ge 0\}$ denote the number-in-system process in the $M/M/(m-1)$ loss system, and let $L_m(\infty)$ denote a random variable having the stationary distribution of $L$, which we denote by $\pi_m$,
i.e., $\pi_m(j) := P(L_m(\infty) = j)$.
Note that, during a visit, the number of busy servers in the aforementioned $m-1$ front-servers is distributed like $L_m$. By Lemma \ref{lmStat-m} in \S \ref{appendix},
there exists a random time $\tau$, such that $L_m(t) = L_m(\infty)$ for all $t \ge \tau$, and therefore, the number of busy servers among those front servers is also distributed like $L_m(\infty)$
for all $t\ge \tau_k$ on the event $E_k := \{\tau_k < T_k\}$, where $T_k$ denotes the length of the $k$th visit, and $\{\tau_k : k \ge 1\}$ are IID with $\tau_1 \deq \tau$.
By the strong Markov property, all the visits are IID and $P(E_1) > 0$. Therefore, $\{E_k : k \ge 1\}$ must occur infinitely often, unless one of the visits is infinite,
i.e., finitely-many $E_k$'s will occur if and only if $T_k = \infty$, for some $k \ge 1$.
Now, if $E_k$ occurs for the $k$th visit, then the overflow process from the front servers, which is the arrival process into the back servers, has rate
$\lambda \pi_m(m-1)$ after time $\tau_k$, due to PASTA. If $\rho \ge V_{\text{cr}}(1,m)$, then $\lambda \pi^m(m-1) \ge \mu(s-m+1)$, i.e. the arrival rate to the ``back servers'' is
larger than the maximum total service rate of those $s-m+1$ servers after time $\tau_k$ as long as the $k$th visit is in process.
Therefore, $P(T_k = \infty) > 0$ on the event $E_k$.
We conclude that
$$P(T_k = \infty \text{ for some } k \ge 1) = 1,$$
so that $Q$ is either transient or null recurrent.
\end{proof}
\subsection{Proof of Theorem \ref{thJ2SQp}} \label{secProof-J2SQp}
The proofs of Propositions \ref{prop:J2SQ} and \ref{prop:JmSQ} build on the fact that each time a splitting of the system occurs, the front ``loss system'' has a positive probability of reaching stationarity in finite time,
after which PASTA is employed to characterize the overflow rate into the ``back servers.''
In the setting of Theorem \ref{thJ2SQp} with $p<1$ the splitting is as follows: There is one ``front server'' and $s-1$ ``back servers'', as in the proof of Proposition \ref{prop:J2SQ}.
However, the front server does not operate as a loss system. Instead, during each ``visit'' (splitting event), the front server operates as an $M/M/1$ queue with an infinite buffer, having a Poisson arrival process with rate $\lambda$.
Each arrival to this $M/M/1$ queue enters service if the server is idle, and otherwise joins its queue with probability $p$, and the back servers with probability $1-p$, independently of everything else.
In particular, the arrival process to the $s-1$ back servers constitutes all the arrival who did not join the front server.
For the particular $M/M/1$ queue we obtain during a splitting event, the time to reach stationarity is infinite, so that PASTA cannot be directly employed as in the proofs of Propositions \ref{prop:J2SQ}
and \ref{prop:JmSQ}.
\begin{equation}gin{proof}[Proof of Theorem \ref{thJ2SQp}.]
Consider $p \in (1-1/s,1]$, and fix $\lambda,\mu$ such that $\rho=\lambda/s\mu \in [V_{\text{cr}}(p,s), 1)$.
Let $Y^\text{\textsc{f}}(t)\in{\mathbb Z}_+$ be the number of customers in the front server at time $t$, and for $i \in \llbracket 1,s-1 \rrbracket$, let $Y^{\text{\textsc{ni}}}_i(t)$ be the size of the
$i$th queue among the back servers, in the increasing order of queue lengths. It is easily seen that both processes $Y^{\text{\textsc{f}}}$ and $Y:=\left(Y^{\text{\textsc{f}}},Y^{\text{\textsc{b}}}_1,...,Y^{\text{\textsc{b}}}_{s-1}\right)$ (as functions of $t$) are CTMCs
on ${\mathbb Z}_+$ and ${\mathbb Z}_+^{s-1}$, respectively.
In particular, $Y^{\text{\textsc{f}}}$ is a Birth and Death (BD) process on ${\mathbb Z}_+$ with respective birth and death rates
$\lambda$ and $0$ at state $0$, and $\lambda(1-p)$ and $\mu$ at all other states. By the assumed values of $p$ and $\rho$, $Y^{\text{\textsc{f}}}$ is ergodic with stationary distribution
\begin{equation}gin{align*}
\pi^{\text{\textsc{f}}}(0) &={\mu-\lambda+\lambda p \over \mu +\lambda p};\\
\pi^{\text{\textsc{f}}}(i) &=\left({\lambda (1-p)\over \mu}\right)^{i-1} {\lambda \over \mu}\pi^{\text{\textsc{f}}}(0),\,i \ge 2.
\end{align*}
In particular the stationary probability that the front server is busy is
\begin{equation}gin{equation}
\label{eq:defpip}
\pi^{\text{\textsc{f}}}\left({\mathbb Z}_+^*\right)=1-\pi^{\text{\textsc{f}}}(0)= {\lambda\over \mu +\lambda p}={s\rho\over 1 + s\rho p}.
\end{equation}
Now, it is well-known that an ergodic BD process with birth and death rates that are uniformly bounded is exponentially ergodic; e.g., see \cite[\S 4]{Tweedie81}.
Then letting $\|\cdot\|_{TV}$ denote the total-variation norm (e.g., see \cite{asmussen}),
\begin{equation}qu\label{ergodic}
\|P(Y^{\text{\textsc{f}}}(t) \in \cdot) - \pi(\cdot)\|_{TV} < C_0 e^{-\begin{equation}ta t}, \quad t \ge 0,
\end{equation}q
for some $C_0 \in [0, \infty)$ that depends on the initial condition only, and for some $\begin{equation}ta > 0$ that is independent of the initial condition.
For a given $y \in {\mathbb Z}_+$, Let $P^y_t$ denote the one-dimensional marginal distribution of the random variable $Y^{\text{\textsc{f}}}(t)$ when $Y^{\text{\textsc{f}}}(0) = y$.
It follows from \eqref{ergodic} that, for any $\epsilon > 0$, there exists a $T^y_\epsilon < \infty$ that depends on the initial condition $y$,
such that
\begin{equation}qu \label{ergodic2}
\|P^y_t - \pi^{\text{\textsc{f}}}\|_{TV} < \epsilon \quad\mbox{for all } t > T_\epsilon.
\end{equation}q
Next, let $\{t_n : n \ge 1\}$ denote the event (arrival) times in the Poisson arrival process to the system, and for $A \subset {\mathbb Z}_+$, let
\begin{equation}s
P^{\text{\textsc{f}}}_n(A) := P(Y^{\text{\textsc{f}}}(t_n-) \in A) = P_{t_n-}(A) ~~\mbox{ and }~~ P^{\text{\textsc{f}}}_\infty(A) := \lim_{n\rightarrow\infty} P^{\text{\textsc{f}}}_n(A) .
\end{equation}s
From the PASTA property, we know that the above limit $P^{\text{\textsc{f}}}_\infty$ exists for all $A \subset {\mathbb Z}_+$, and that $P^{\text{\textsc{f}}}_\infty = \pi$.
Thus \eqref{ergodic2} implies that, for any $\epsilon > 0$ and for any fixed initial condition $y$,
there exists $T^y_\epsilon$, such that $\|P^{\text{\textsc{f}}}_n - \pi^{\text{\textsc{f}}}\|_{TV} < \epsilon$ for all $n$ such that $t_n \ge T_\epsilon$.
(The weak convergence to the stationary distribution is equivalent to convergence in total variation since the state space of $T^{\text{\textsc{f}}}$ is countable.)
In particular, taking $A := {\mathbb Z}_+^*$--corresponding to the event that the front server is busy--and $Y^{\text{\textsc{f}}}(0) = 0$, we have that, for some $T_\epsilon := T^0_\epsilon > 0$
\begin{equation}qu \label{almostPasta}
\left|P^{\text{\textsc{f}}}_n\left({\mathbb Z}_+^*\right) - \pi^{\text{\textsc{f}}}\left({\mathbb Z}_+^*\right)\right| < \epsilon \quad\mbox{for all } n \text{ for which } t_n > T_\epsilon.
\end{equation}q
Let $N_{\textsc{of}}(a,b]$ denote the overflow process from the front server (which is the arrival process to the back servers) over the time interval $(a,b]$, $0 \le a < b$.
Consider also a sequence of independent Bernoulli random variables $\{B_n : n \ge 1\}$, that are also independent of all other random variables defining the system,
each having ``success'' probability $p$, i.e., $P(B_n = 1) = p$ for all $n \ge 1$.
As in (\ref{eq:equivregions}), one can easily check that $\rho >V_{\text{cr}}(p,2)$ if and only if
$\lambda p\pi^{\text{\textsc{f}}}({\mathbb Z}_+^*) > (s-1)\mu$. Take $\epsilon > 0$ for which $\lambda p \left(\pi^{\text{\textsc{f}}}\left({\mathbb Z}_+^*\right) - \epsilon\right) > (s-1)\mu$.
Then \eqref{almostPasta} implies that, for $T_\epsilon$ in \eqref{almostPasta} and for all $t > 0$,
\begin{equation}qu \label{Pasta}
t^{-1}E\left[N_{\textsc{of}}(T_\epsilon,T_\epsilon + t]\right] = t^{-1} E \left[ \sum_{t_n \in (T_\epsilon, T_\epsilon+t]}\mathbf 1_{\left\{\{Y^{\text{\textsc{f}}}(t_n-) \in {\mathbb Z}_+^*\} \cap \{B_n = 1\}\right\}} \right]
> \lambda p \left(\pi^{\text{\textsc{f}}}\left({\mathbb Z}_+^*\right) - \epsilon\right).
\end{equation}q
The rest of the proof is similar to the arguments in the proof of Proposition \ref{prop:J2SQ}:
Taking the (contradictory) assumption that $Q$ is ergodic, a splitting to a forward and backward servers must occur i.o.
Letting a visit begin when, during such a splitting, the front server first reaches the empty state, we have that
the visits are IID, and each lasts for at least $T_\epsilon$ time units with a strictly positive probability, for any $\epsilon$ satisfying the inequality in \eqref{Pasta}.
(Note that, since a visit begins at a fixed state, we can choose the same $T_\epsilon$ in \eqref{almostPasta} for all the visits.)
More specifically, with $I_k$ denoting the time interval during the $k$th visit beginning when the front server is empty and ending when the visit ends,
we have that $P(I_k > T_\epsilon) > 0$, so that $\{I_k > T_\epsilon\}$, $k \ge 1$, must occur i.o.
However, since the overflow process from the front server is guaranteed to be larger than the total service rate $\mu(s-1)$ of the back servers after time $T_\epsilon$,
there is a positive probability that a visit will never end, contradicting the ergodicity assumption. The proposition is proved.
\end{proof}
\section{Simulation Experiments for Workload-Based Allocation Policies} \label{sec:simu}
As discussed in Section \ref{secMotivation}, our results and analyses provide insights for systems operating under allocation policies that are based on the workload (as opposed to the queue length).
Indeed, it is intuitively clear from the proofs of our main results that a system under JSW also experiences random ``splitting'' into forward and backward subsystems,
and that the backward subsystem may be unstable (so that the whole system is unstable) even if $\rho < 1$.
In this section we present simulation experiments to support this intuition.
In fact, the simulations indicate that the bounds we obtained for the stability regions in Theorem \ref{thJ2SQp} and
Propositions \ref{prop:J2SQ} and \ref{prop:JmSQ}, are tight estimates of the stability regions for the corresponding workload-based allocation policies, which are formally defined as follows.
\begin{equation}gin{definition}
For $m \in \llbracket 1,s \rrbracket$ and $p \in [0,1]$, we say that the allocation policy is {\em Join the $m$th shortest workload with probability $p$}, denoted by
JmSW($p$), if each arrival is sent to the queue having the smallest workload with probability $1-p$, and is otherwise sent to the queue with the $m$th smallest workload with probability $p$.
In the non-idling version of JmSW($p$), denoted by JmSW$^{\text{\textsc{ni}}}$($p$), an arrival is sent to an idle server w.p.1, if such a server is available, and is otherwise routed to a server according to JmSW($p$).
\end{definition}
\paragraph{Cases Considered.}
We simulated a system with $4$ servers, each providing exponentially distributed service with mean $1$,
that is operating under J2SW$^\text{\textsc{ni}}$($p$) (join the second-smallest workload with probability $p$), where $p \in \{0.8, 0.9, 1\}$.
In addition, we simulated the system when it is operating under J3SW$^\text{\textsc{ni}}$($1$), namely, $m=3$ and $p=1$.
For each of these four systems we simulated the corresponding embedded DTMC over $10^7$ arrivals for two values of the traffic intensity $\rho$, one that is slightly above, and the other slightly below,
the critical values $V_{\text{cr}}(p)$ (for J2SW$^\text{\textsc{ni}}$($p$)) and $V_{\text{cr}}(1,3)$ (for the system under J3SW$^\text{\textsc{ni}}$($1$)).
The critical values are computed via \eqref{eq:defVcr} and \eqref{gSet}-\eqref{eq:Vcr1m}, respectively.
In particular, for each of the four examples we considered a traffic intensity that is larger than the critical value of $\rho$ by $2/10^3 = 0.002$,
and a traffic intensity that is smaller than the corresponding critical value by $0.002$.
We emphasize that the critical values are for the same system operating under J2SQ$^\text{\textsc{ni}}$($p$) and J3SW$^\text{\textsc{ni}}$($1$), and so we do not know whether they are also the critical values for the system
under the simulated scenarios.
In Figure \ref{Fig:plotJSW4} we show a sample path of the most loaded server (in terms of workload) for each of the six cases considered for the system
under J2SW$^\text{\textsc{ni}}$($p$), namely, two examples, each with a different $\rho$ for each of the three different values of $p$, as described above.
Two sample paths simulated for the system operating under J3SW$^\text{\textsc{ni}}$($1$), one for each value of $\rho$, are shown in Figure \ref{Fig:plotJSW4bis}.
\begin{equation}gin{figure}
\includegraphics[scale=0.4]{J2SW4New_08,09854_.jpeg}
\includegraphics[scale=0.4]{J2SW4New_08,09894_.jpeg}
\includegraphics[scale=0.4]{J2SW4New_09,09637_.jpeg}
\includegraphics[scale=0.4]{J2SW4New_09,09677_.jpeg}
\includegraphics[scale=0.4]{J2SW4New_1,09458_.jpeg}
\includegraphics[scale=0.4]{J2SW4New_1,09498_.jpeg}
\caption{\scriptsize Sample paths of the largest workload process generated for $10^7$ arrivals of a system with four servers operating under J2SW$^\text{\textsc{ni}}$($p$).
The two figures in each row depict one value of $p$, with the left figure having $\rho = V_{\text{cr}}(p) + 0.002$, and the right figure having $\rho = V_{\text{cr}}(p) - 0.002$.
{{\bf Upper panel:} a system operating under J2SW$^\text{\textsc{ni}}$($0.8$), for which $V_{\text{cr}}($0.8$) \approx 0.9874$.
{{\bf Middle panel:} a system operating under J2SW$^\text{\textsc{ni}}$($0.9$), for which $V_{\text{cr}}(0.9) \approx 0.9657$.
{\bf Lower panel:} a system operating under J2SW$^\text{\textsc{ni}}$($1$), for which $V_{\text{cr}}(1) \approx 0.9778$.}
}
}
\label{Fig:plotJSW4}
\end{figure}
\begin{equation}gin{figure}
\includegraphics[scale=0.4]{J3SW4New_1,0868_.jpeg}
\includegraphics[scale=0.4]{J3SW4New_1,0872_.jpeg}
\caption{Sample paths of the largest workload process generated for $10^7$ arrivals of a system with four servers operating under J3SW$^\text{\textsc{ni}}$($1$), for which $V_{\text{cr}}(1,3) = 0.87$.
The left figure depicts a sample path when $\rho = V_{\text{cr}}(1,3) - 0.002$, and the right figure depicts a sample path when $\rho = V_{\text{cr}}(1,3) + 0.02$.
}
\label{Fig:plotJSW4bis}
\end{figure}
We remark that, whenever $\rho$ is equal to its critical value, the queue process is null recurrent, and it is therefore hard to determine from simulation whether a system is stable or not when $\rho$
is ``too close'' to its critical value. (For any value of $\rho$ in a small-enough neighborhood of the critical value, the stochastic fluctuations are large, and one may observe a return to the empty state
over any finite time interval, even in the transient case.)
Nevertheless, for each of the four simulated routing policies, the system seems to be unambiguously unstable for the larger value of $\rho$, and to be stable for the smaller value of $\rho$.
This, together with the fact that the difference between the two traffic intensities is just $0.004$, strongly suggest that the critical value of $\rho$ for the system operating under the queue-based allocation policy
is very close, if not equal, to critical value of $\rho$ for the system operating under the corresponding workload-based allocation policy.
\section{Summary}
In this paper we considered parallel server systems with $s \ge 1$ statistically homogeneous servers, to which jobs are routed upon arrival according to a family of random-assignment rules,
which we called $\mathbf{p}$-allocation policies. That family of routing policies includes the PW($d$) routing rule, and the special cases JSQ and uniform routing, as well as their ``non-idling'' versions,
under which an arrival is always routed to an idle server, if there is one.
Our motivation for this study was the fact that in practice, and unlike the ideal settings that are typically considered in the literature, routing errors are likely to occur, so that jobs are not necessarily routed
to the shortest queue when JSQ is implemented.
We first characterized a sufficient condition for stability (in Theorem \ref{thMaximal}) which, in addition to the usual traffic condition $\rho < 1$, requires the $\mathbf{p}$-allocation vector to be smaller,
in the generalized Schur convex order, defined in Definition \ref{defOrder}, than the uniform distribution on $\llbracket 1,s\rrbracket$. In particular, under the the extra assumption on $\mathbf{p}$,
the $\mathbf{p}$-allocation policy (and its non-idling version) is guaranteed to be maximal.
We then demonstrated that the condition $\rho < 1$ by itself does not guarantee that the system is stable, even when a non-idling $\mathbf{p}$-allocation policy is employed.
Specifically, we considered the stability region of the policy J2SQ$^\text{\textsc{ni}}(p)$, under which arrivals are always routed to an idle server, if one is present,
and are otherwise routed to the shortest queue with probability
$1-p$, and to the second shortest queue with an ``error probability'' $p$. Theorem \ref{thJ2SQp} proves that the stability region may be strictly contained in $[0,1)$,
namely, $\rho$ must be smaller than a positive number $V_{\text{cr}}$, which is itself smaller than $1$ for a range of values of $p$.
Corollary \ref{coroJ2SQid} proves that $p$ must satisfy $p\le 1-1/s$ in order for J2SQ$^\text{\textsc{ni}}(p)$ to be maximal.
One way of interpreting our results is that the risk of instability caused by erroneous routing decisions is small when the number of servers is large.
On the other hand, routing errors cause any system to effectively be in heavier traffic than planned;
if the system is designed to operate in ``heavy traffic,'' namely, if $\rho \approx 1$, then we can conclude that even a small probability of making routing errors may lead to harmful
departures from the desired performance, and may even lead to instability.
Finally, simulation examples in \S \ref{sec:simu} demonstrate that our results are insightful also for systems operating under JSW,
for which routing errors are more likely to occur, even in automated environments, because the actual workload in each queue can typically only be estimated.
Indeed, we conjecture that the stability regions under JSQ and JSW are the same.
\begin{equation}gin{appendix}{ }\label{appendix}
\section{Remaining Proofs}
\label{sec:proofs}
\subsection{Proof of Lemma \ref{lemma:order}}
\label{subsec:lemma}
As $\mathbf a \preceq_{\textsc{gsc}} \mathbf b$ and $\mathbf x$ is ordered, for any $k \le s$ we have that
\begin{equation}gin{align*}
\sum_{i=k}^s x_ia_i &= x_ka_k + \sum_{i=k+1}^s \sum_{j=k}^{i-1} (x_{j+1}-x_j) a_i + \sum_{i=k+1}^s x_k a_i\\
&= x_k\sum_{i=k}^s a_i + \sum_{i=k+1}^s \left(x_i - x_{i-1}\right)\sum_{j=i}^s a_j
\le x_k\sum_{i=k}^s b_i + \sum_{i=k+1}^s \left(x_i - x_{i-1}\right)\sum_{j=i}^s b_j= \sum_{i=k}^s x_ib_i.
\end{align*}
\subsection{Proof of Lemma \ref{lemma:regionm}}
For $m \in \llbracket 2,s \rrbracket$ let $\pi_{\rho, m}$ denote the loss probability of a $M/M/m-1/0$ queue (a loss system with $m-1$ servers), having traffic intensity
$s \rho = \lambda/\mu$; then
\[\pi_{\rho,m}:={\left(s\rho\right)^{m-1}/(m-1)! \over \sum_{i=0}^{m-1}\left(s\rho\right)^{i} /i!}.\]
Observe that $\rho \in \mathscr G(m)$, for $\mathscr G(m)$ in (\ref{eq:Vcr1m}), is equivalent to $s\rho\pi_{m}<(s-m+1)$. Also, we clearly have that
\begin{equation}gin{equation}
\label{eq:recregionm}
{1 \over \pi_{\rho,m+1}} = 1 + {m \over s\rho \pi_{\rho,m}},\,m = 2,...,s-1.
\end{equation}
First, $V_{\text{cr}}(1,2) = \sup \mathscr G(2) < 1$ from (\ref{Vcr-cond}).
We then proceed by induction. Suppose that $\sup \mathscr G(m)<1$ for some $m \in \llbracket 2,s \rrbracket$.
Let $\rho \in \mathscr G(m+1)$. If $\rho \ge {(s-m)(s+1) \over (s-m+1)s}$, then we have that
\[s\rho \pi_{\rho,m+1} < (s-m) \le s\rho{s-m+1 \over s+1}\]
which, after an immediate computation using (\ref{eq:recregionm}), is equivalent to $s\rho\pi_{\rho,m}<s-m+1$, i.e.
$\rho \in \mathscr G(m)$. By the induction assumption, this implies that
\[\sup \mathscr G(m+1) \le \left( \sup \mathscr G(m) \vee {(s-m)(s+1) \over (s-m+1)s}\right) <1,\]
which concludes the proof.
\section{Auxiliary results}
\label{sec:aux}
\label{subsec:coupling}
Let $L_1 := \{L(t) : t \ge 0\}$ denote the queue process in an $M/M/1/0$ queue (one-server loss system) having a Poisson arrival process with rate $\lambda$ and service rate $\mu$.
The proof of the following lemma is a simple application of a standard coupling argument which we bring here for completeness.
\begin{equation}gin{lemma} \label{lmStat}
Consider the process $L_1$, and let $\tau_1$ denote the time of the first event after time $0$ (arrival or departure). Then $L_1$ is stationary for all $t \ge \tau_1$; in particular,
$P(L_1(t)) = 0) = 1 - P(L_1(t) = 0) = \mu/(\lambda+\mu)$, $t \ge \tau_1$.
\end{lemma}
\begin{equation}gin{proof}
Let $L_e := \{L_e(t) : t \ge 0\}$ denote a stationary version of the process $L_1$, namely, $P(L_e(0) = 0) = 1-P(L_e(0) = 1) = \mu/(\lambda+\mu)$.
Let $T$ denote the first time $L_1$ and $L_e$ are equal; $T := \inf\{t \ge 0 : L(t) = L_e(t)\}$, and define the process
\begin{equation}qu \label{Lx}
L_0(t) := \left\{\begin{equation}gin{array}{ll}
L_1(t) & t < T, \\
L_e(t) & t \ge T.
\end{array}\right.
\end{equation}q
Since $T$ is a stopping time that is finite w.p.1, the strong Markov property implies that $L_0 \deq L_1$. The coupling inequality (e.g., \cite[VII 2a]{asmussen} gives
\begin{equation}s
\|P(L_1(t) \in \cdot) - \pi(\cdot)\|_{TV} \le P(T > t).
\end{equation}s
Clearly, $L_0$ and $L_e$ are equal when the first event (arrival or departure) in either of the two processes occurs, and in particular, when the first event in $L_0$ occurs.
\end{proof}
Similarly to the proof of Lemma \ref{lmStat} we can prove the following result. Recall that $L_m :- \{L_m(t) : t \ge 0\}$ denotes the number-in-system process in an $M/M/(m-1)/0$ queue--a loss system
with $m-1$ servers and no buffer. Let $\tau_m := \inf\{t \ge 0 : L_m(t) = m-1\}$, namely, $\tau_m$ is the first time instant in which all servers are busy.
Note that $\tau_m$ is a proper random variable, i.e., $P(\tau_m < \infty) = 1$.
\begin{equation}gin{lemma} \label{lmStat-m}
If $L_m(0) = 0$, then $L_m$ is stationary for all $t \ge \tau_m$; in particular, for all $t \ge \tau_m$,
\begin{equation}s
P(L(t) = k) = \pi^{m-1} := {\rho^k/k! \over \sum_{j=0}^{m-1} \rho^j/j!}, \quad k \in \llbracket 1, m-1 \rrbracket.
\end{equation}s
\end{lemma}
\begin{equation}gin{proof}
Let $L_\infty$ denote the stationary version of $L_m$, namely, $L_\infty(0) \deq \pi^m$, for $\pi^m$ in the statement of the lemma.
We couple $L_m$ and $L_\infty$ on the same probability space and allow them to evolve independently of each other until they couple, after which
the two processes follow the path of $L_\infty$ (similarly to the construction of $L_0$ in the proof of Lemma \ref{lmStat}). Since $L_m(0) = 0$, the two processes must have coupled by $\tau_m$,
and so the result follows from the strong Markov property.
\end{proof}
\end{appendix}
\providecommand{\mathbf ysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet^-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\begin{equation}gin{thebibliography}{{{\"U}}82}
\bibitem{asmussen}
S. Asmussen. (2003).
{\em Applied probability and queues}.
Springer Verlag.
\bibitem{BacBre02}
{F. Baccelli and P. Br\'emaud}. (2002).
\emph{Elements of Queueing Theory} (2nd ed.).
Springer.
\bibitem{Bramson_Instable}
M. Bramson. (1994).
Instability of FIFO queueing networks.
{\em Ann. Appl. Probab.} {\bf 4}(2), 414--431.
\bibitem{BramsonBook}
M. Bramson. (2008).
{\em Stability of queueing networks}.
Springer.
\bibitem{Bramson10}
M. Bramson, Y. Lu, and B. Prabhakar. (2010).
Randomized load balancing with general service time distributions.
{\em ACM SIGMETRICS performance evaluation review} 38(1), 275--286.
\bibitem{Bramson11}
M. Bramson. (2011)
Stability of join the shortest queue networks.
{\em The Annals of Applied Probability}, 21(4), 1568--1625.
\bibitem{Brandt85}
{A. Brandt.} (1985).
On stationary waiting times and limiting behavior of queues with many servers I: the general G/G/m/$\infty$ case. {Elektron. Inform. u. Kybernet.} \textbf{21}, 47--64.
\bibitem{Bre99}
P. Br\'emaud. (1999).
{\em Markov Chains: Gibbs Fields, Monte Carlo Simulation, and Queues}. Texts Appl. Math. {\bf 31}. Springer, new York.
\bibitem{Brightwell12}
G. Brightwell and M. Luczak (2012).
The supermarket model with arrival rate tending to one.
arXiv preprint arXiv:1201.5523.
\bibitem{DaiFluid}
J.G. Dai. (1995).
On positive Harris recurrence of multiclass queueing networks: a unified approach via fluid limit models.
{\em The Annals of Applied Probability}, 5(1), 49--77.
\bibitem{DaleyOptimal}
D.J. Daley. (1987).
Certain optimality properties of the first-come first-served discipline for $G/G/s$ queues.
{\em Stochastic Processes and their Applications}, 25, 301-308.
\bibitem{DFT17}
P.S. Dester, C. Fricker and D. Tibi. (2017).
Stationary analysis of the shortest queue problem.
{\em Working paper}. Available at: arXiv: 1704.066442v3.
\bibitem{Gamarnik_JSQ}
P. Eschenfeldt and D. Gamarnik. (2015).
Join the shortest queue with many servers. The heavy traffic asymptotics.
{\em Working paper}. Available at: {arXiv:1502.00999}.
\bibitem{Gamarnik_supermarket}
P. Eschenfeldt and D. Gamarnik. (2016).
Supermarket queueing system in the heavy traffic regime. Short queue dynamics.
{\em Working paper.} Available at: {arXiv:1610.03522}.
\bibitem{FMcK77}
L. Flatto and H.P. Mc Kean. (1977).
Two queues in parallel. {\em Comm. Pure Appl. Math.}, {\bf 15}, 255-263.
\bibitem{FS78}
G.J. Foschini and J. Salz. (1978).
A basic routing problem and diffusion. {\em IEEE Trans. on Comm.} \textbf{26}, 320--327.
\bibitem{Foss81}
{S. Foss.} (1981).
Comparison of service disciplines in multichannel service systems. {\em Siberian Math. Zh.}, \textbf{22}(1), 190--197.
\bibitem{Foss98}
S. Foss and N. Chernova. (1998).
On the stability of a partially accessible multi-station queue with state-dependent routing.
{\em Queueing Systems}, {\bf 29}(1), 55--73.
\bibitem{Gra00}
C. Graham. (2000).
Chaoticity on path space for a queueing network with selection of the shortest queue among several.
{\em Journ. Appl. Prob.} {\bf 37}, 198-211.
\bibitem{Gra05}
C. Graham. (2005).
Functional central theorems for a large network in which customers join the shortest among several queues or a queueing network with selection of the shortest of several queues.
{\em Probab. Theory Relat. Fields.} {\bf 131}, 97-120.
\bibitem{Haight58}
F.A. Haight. (1958).
Two queues in parallel. {\em Biometrika} \text{45}, 401--410.
\bibitem{khalil02}
H.K. Khalil. (2002).
{\em Nonlinear Systems}. Prentice Hall, New Jersey.
\bibitem{KW55}
J. Kiefer and J. Wolfowitz. (1955). On the theory of queues with many servers. {\em Trans. Amer. Math. Soc.} {\bf 78}, 1--18.
\bibitem{King61}
J.F.C. Kingman. (1961). Two Similar Queues in Parallel.
{\em The Annals of Mathematical Statistics} \textbf{32}(4), 1314--1323.
\bibitem{Kulkarni17}
V.G. Kulkarni. (2017).
{\em Modeling and analysis of stochastic systems.} Chapman and Hall/CRC.
\bibitem{Liberzon03}
Liberzon, D. (2003).
{\em Switching in Systems and Control.} Birk\"{a}user.
\bibitem{Loynes62}
{R.M. Loynes.} (1962).
The stability of queues with non-independent interarrivals and
service times. {\em Proceedings of the Cambridge Philosophical Society}, \textbf{58}, 497--520.
\bibitem{LuKumar}
S.H. Lu and P.R. Kumar. (1991).
Distributed scheduling based on due dates and buffer priorities.
{\em IEEE Trans. Automat. Control}. {\bf 36}(12), 1406--1416.
\bibitem{Lu11}
Y. Lu, Q. Xie, G. Kliot, A. Geller, J.R. Larus and A. Greenberg. (2011).
Join-Idle-Queue: A novel load balancing algorithm for dynamically scalable web services.
{\em Performance Evaluation} 68(11), 1056--1071.
\bibitem{Luczak06}
M.J. Luczak and C. McDiarmid. (2006).
On the maximum queue length in the supermarket model.
{\em The Annals of Probability}, 34(2), 493--527.
\bibitem{Luczak07}
M.J. Luczak and C. McDiarmid. (2007).
Asymptotic distributions and chaos for the supermarket model.
{\em Electronic Journal of Probability} 12, 75--99.
\bibitem{MO79}
A.W. Marshall and I. Olkin. (1979).
\emph{Inequalities: Theory of Majorization and Its Applications}, Academic Press, New York.
\bibitem{Mitzenmacher96}
M. Mitzenmacher. (1996).
{\em The Power of Two Choices in Randomized Load Balancing}, PhD thesis, Univ. of California, Berkeley.
\bibitem{Moy17_a}
{P. Moyal}. (2017).
On the Stability of non-monotonic systems of parallel queues.
{\em Discrete Events Dynamic Systems}, 27(1), 85--107.
\bibitem{Moy17_b}
{P. Moyal}. (2017).
A pathwise comparison of parallel queues.
{\em Discrete Events Dynamic Systems}, 27(3), 573--584.
\bibitem{MoyalPerry}
P. Moyal and O. Perry. (2017).
On the instability of matching queues.
{\em The Annals of Applied Probability}, 27(6), pp. 3385-3434.
\bibitem{PWchatter}
O. Perry and W. Whitt. (2016).
Chattering and Congestion Collapse in an Overload Switching Control.
{\em Stochastic Systems}, 6(1), pp. 132--210.
\bibitem{RobertBook}
P. Robert. (2003).
{\em Stochastic networks and queues}.
Springer-Verlag.
\bibitem{RySto92}
A.N. Rybko and A.L. Stolyar. (1992).
Ergodicity of stochastic processes describing the operations of open queueing networks.
{\em Problems Inform. Transmission} {\bf 28}, 3--26 (in Russian).
\bibitem{SW03}
A. Scheller-Wolf. (2003).
\newblock Necessary and sufficient conditions for delay moments in FIFO multiserver queues with and application comparing $s$ slow
servers with one fast one.
\newblock {\em Operations Research} {\bf 51}(5): 748--758.
\bibitem{Turner98}
S.R. Turner. (1998).
The effect of increasing routing choice on resource pooling.
{\em Probability in the Engineering and Informational Sciences}, 12(1), 109--124.
\bibitem{Tweedie81}
R.L. Tweedie. (1981).
Criteria for ergodicity, exponential ergodicity and strong ergodicity of Markoc processes.
{\em Journal of Applied Probability}, 18(1), 122--130.
\bibitem{Vved96}
N.D. Vvedenskaya, R.L.V. Dobrushin, and F.I. Karpelevich. (1996).
Queueing system with selection of the shortest of two queues: An asymptotic approach.
{\em Problemy Peredachi Informatsii}, 32(1), 20--34.
\bibitem{weber78}
R.W. Weber. (1978).
On the optimal assignment of customers to parallel servers.
{\em Journal of Applied Probability} 15(2), 406--413.
\bibitem{Whitt85}
W. Whitt. (1985).
Deciding which queue to join: some counterexamples.
{\em Operations Research}, 34(1), 55--62.
\bibitem{Whitt02}
W. Whitt. (2002).
{\em Stochastic Process Limits}, Springer, New York.
\bibitem{Winston77}
W. Winston. (1977).
Optimality of the shortest line discipline.
{\em Journal of Applied Probability} 14(1), 181--189.
\bibitem{SupermarketGame}
J. Xu and B. Hajek. (2013).
The supermarket game, {\em Stochastic Systems}, 3(2), 405--441.
\end{thebibliography}
\end{document} |
\begin{document}
\maketitle
\begin{abstract}
We show how a parameterized family of maps of the spine of a manifold
can be used to construct a family of \homeos\ of the ambient manifold
which have the inverse limits of the spine maps as global attractors.
We describe applications to unimodal families of interval maps, to
rotation sets, and to the standard family of circle maps.
\end{abstract}
\section{Introduction}
The use of inverse limits to construct and analyze examples has been
an important tool in dynamical systems since Williams's
work~\cite{williams} on expanding attractors. Given a continuous
self-map $f\colon X\raw X$ of a metric space, its natural extension
$\hat{f}$ is a self-homeomorphism of the inverse limit space
$X_\infty=\ilim(X,f)$. The natural extension $\hat{f}\colon
X_\infty\raw X_\infty$ is, in a precise sense, the dynamically
minimal extension of~$f$ to a homeomorphism. The space~$X_\infty$ is
defined abstractly as a subspace of $X^\N$, but in many examples the
inverse limit can be embedded inside a manifold~$M$ which has the
original space~$X$ as a spine, and $\hat{f}\colon X_\infty \raw
X_\infty$ can be extended to a self-homeomorphism of~$M$ for which
$X_\infty$ is a global attractor.
In the simplest case, this provides a method for constructing
homeomorphisms of surfaces from endomorphisms of graphs having the
same homotopy type as the surface: the surface homeomorphisms have
attracting sets -- generally with complicated topology -- on which the
dynamics is derived from that of the graph endomorphism. This
construction is useful because it is much easier to construct and
analyze graph endomorphisms than surface homeomorphisms. More
generally, the technique can often be used to embed non-invertible
dynamics as an attractor of a higher-dimensional invertible system.
Barge and Martin~\cite{bargemartin} systematized this idea, describing
a construction to embed the inverse limit of any interval endomorphism
as a global attractor of a plane homeomorphism. As they commented,
their construction readily generalizes to graphs other than the
interval and to higher dimensions, and this generalization is
a special case of the results presented here: a continuous self-map
$f\colon X\raw X$ of a {\em boundary retract}~$X$ of a compact
manifold~$M$ gives rise to an appropriate homeomorphism $\Phi\colon
M\raw M$ provided that~$f$ satisfies a certain topological condition
(it {\em unwraps} in~$M$).
The main purpose of this paper is to develop a parameterized
version of the Barge-Martin construction. Continuously varying
families of maps are of central importance in dynamical systems
theory: apart from their obvious relevance in modelling, one of the
best ways to understand complicated dynamics is to study the way that
it is built up from simple dynamics in parameterized families. The
main result of this paper, Theorem~\ref{BBMparam}, states that the
Barge-Martin construction can be carried out for a parameterized
family~$f_t\colon X\raw X$ in such a way as to yield a continuously
varying family~$\Phi_t\colon M\raw M$ of homeomorphisms, provided
that each~$f_t$ unwraps in~$M$. Under a mild additional assumption
(that there is some~$m>0$ such that $f_t^{m+1}(X)=f_t^m(X)$ for
all~$t$), the attractors~$\Lambda_t$ of~$\Phi_t$ (on which the
dymamics is given by the natural extension of~$f_t$) vary Hausdorff
continuously with the parameter.
The main tool in the Barge-Martin construction is a theorem of Morton
Brown~\cite{brown}, that the inverse limit of a near-homeomorphism of
a compact metric space~$X$ is homeomorphic to~$X$. The parameterized
version of the construction requires this homeomorphism to vary
continuously with the near-homeomorphism, and this extension of
Brown's theorem is presented in Section~\ref{sec:brown}
(Theorem~\ref{brownplus} and Corollary~\ref{ourthing}). The main
theorem of the paper, the parameterized Brown-Barge-Martin (BBM)
construction, is contained in Section~\ref{sec:BBM}.
Section~\ref{apps} contains a brief summary of each of three areas of
application. In Section~\ref{sec:unimodal}, the construction is
applied to the tent family and the quadratic family of unimodal
interval endomorphisms to provide families of homeomorphisms of the
disk~$D^2$ with monotonically increasing dynamics. The inverse limits
of these unimodal maps, their embeddings as attractors of
homeomorphisms, and their relationship to H\'enon maps have been much
studied, and we relate our construction to other work in this area.
The original motivation for this paper was an attempt to understand
the rotation sets that arise in continuously varying families of
homeomorphisms of the torus. This problem, which had previously
resisted analysis, becomes tractable in certain cases when it is
reduced to a one-dimensional problem, allowing it to be attacked using
methods of kneading theory. Section~\ref{sec:rotation} provides a
general description of the relationship between the rotation sets of
the endomorphisms $f_t\colon X\raw X$ and those of the homeomorphisms
$\Phi_t\colon M\raw M$. In Section~\ref{sec:standard}, this is applied
in another example, Arnol'd's {\em standard family} $f_{b,w}$ of
circle endomorphisms, yielding a continuously varying family
$\Phi_{b,w}$ of annulus homeomorphisms with the same rotation sets
as~$f_{b,w}$.
\subsection*{Definitions and notation}
Let~$(X,d)$ and~$(Y,e)$ be compact metric spaces. We write $\cC(X,Y)$
and $\cH(X,Y)$ respectively for the spaces of continuous maps and
\homeos\ $X\raw Y$, endowed with the uniform metric.
A map $f\in\cC(X,Y)$ is a \de{near-homeomorphism} if it is the
uniform limit of homeomorphisms: that is, if it lies in the closure of
$\cH(X,Y)$ in $\cC(X,Y)$. Every near-homeomorphism is onto,
being the uniform limit of continuous surjections from a compact
space.
Let~$I$ be a compact metric space, which will be considered as a
parameter space. A continuous family $\{f_t\}_{t\in I}$ in~$\cC(X,Y)$
is a \de{near-isotopy} if the map $X\times I \to Y$ given by
$(x,t)\mapsto f_t(x)$ can be uniformly approximated by maps
of the form $(x,t)\mapsto h_t(x)$, where $\{h_t\}_{t\in I}$ is a
continuous family in $\cH(X,Y)$. The term near-isotopy and the
notation~$I$ are intended to suggest the ``standard'' case
$I=[0,1]$.
The question of whether or not a continuous family of
near-homeomorphisms is necessarily a near-isotopy appears to be subtle.
If~$I=[0,1]$ and~$X=Y$ is a compact manifold, then this follows
straightforwardly from the deep result of Edwards and
Kirby~\cite{edwardskirby} that~$\cH(X,X)$ is uniformly locally contractible and
thus uniformly locally path connected.
Let $\epsilon>0$. A map $g\in\cC(X,Y)$ is called an \de{$\epsilon$-map} if
$g(x_1)=g(x_2)$ implies $d(x_1,x_2)<\epsilon$. The
set~$\cC_\epsilon(X,Y)$ of continuous $\epsilon$-maps from~$X$ to~$Y$
is an open subset of~$\cC(X,Y)$ since~$X$ is compact.
The set of natural numbers~$\N$ is considered to include~$0$. We will
use a standard metric~$d_\infty$ on the product space~$X^\N$, defined
by
\[d_\infty\left(\ux, \uy\right)=
\max_{i\in\N}\,\,\frac{\min(d(x_i,y_i),1)}{i+1}, \] so that
$d_\infty(\ux,\uy)\le 1/(k+2)$ if $x_i=y_i$ for $0\le i\le k$. To avoid
excessive notation, we will generally denote the metric on any metric
space indiscriminately by~$d$.
\section{Inverse limits and families of inverse limits}
\label{sec:brown}
Recall that if $X$ is a metric space and $f\colon X\raw X$ is continuous,
the \de{inverse limit} is the metric space
\begin{equation*}
\ilim(X, f) = \{\ux\in X^\N: f(x_{i+1}) = x_i \
\text{for all}\ i\in\N \} \subset X^\N.
\end{equation*}
Where there is no ambiguity regarding the self-map~$f$, we will use
the shorter notation $X_\infty$ for $\ilim(X, f)$. The projection
$\ux\mapsto x_k$ from~$X_\infty$ to the $k^\text{th}$ coordinate is
denoted $\pi_k\colon X_\infty\raw X$.
The \de{natural extension} of $f\colon X\raw X$ is the
\homeo\ $\hat{f}\colon X_\infty\raw X_\infty$ defined by \mbox{$\hat{f}(\ux) =
(f(x_0), x_0, x_1, \dots)$}. The self-map~$f$ is semi-conjugate to its
natural extension provided that~$f$ is onto, since $f\circ \pi_0 =
\pi_0\circ\hat{f}$ and~$\pi_0$ is onto if and only if~$f$ is. The
relationship between the dynamics of~$f$, the dynamics of~$\hat{f}$, and
the topology of~$\ilim(X, f)$ has been much
studied: we refer the reader to the book~\cite{ingrambook} and
the references therein for more information.
One simple fact that we need here is
that $\pi_0$ restricts to a bijection from the set
of periodic points of~$\hat{f}$ to the set of periodic points of~$f$.
If $f\colon X\raw X$ is a \homeo, then it is clear that $\pi_0\colon X_\infty\to
X$ is a homeomorphism which conjugates~$f$ and~$\hat{f}$. Morton
Brown~\cite{brown} shows that if $X$ is compact and $f$ is a
near-homeomorphism, then $X_\infty$ is still homeomorphic to~$X$.
As noted in the introduction, the parameterized BBM-construction
requires a parameterized version of Brown's Theorem applicable to
near-isotopies. Let~$I$ be a compact parameter space
and~$\{f_t\}_{t\in I}$ be a near-isotopy of a compact metric
space~$X$, and for each~$t\in I$ write $X_\infty^t$ for the inverse limit
$\ilim(X, f_t)$. Brown's theorem provides a homeomorphism $h_t\colon
X_\infty^t \raw X$ for each~$t$. The parameterized version of Brown's
theorem guarantees that the family~$\{h_t\}$ can be chosen to vary
continuously with~$t$: the easiest way to formulate this is using the
language of fat maps.
Recall that if $\{f_t\}_{t\in I}$ is a continuous family of self-maps
of~$X$, the corresponding \de{fat map} $F\colon X\times I \raw X\times I$ is
defined by
\begin{equation}\label{fat}
F(x,t) = (f_t(x), t).
\end{equation}
A function $G\colon X\times I \raw X\times I$ is called
\de{slice-preserving} if it has the property that
\mbox{$G(X\times\{t\})\subset X\times\{t\}$} for all $t\in I$: we
write $\cC^s(X\times I,X\times I)$ and $\cH^s(X\times I,X\times I)$
for the subsets of slice-preserving elements of $\cC(X\times I,X\times
I)$ and $\cH(X\times I,X\times I)$. Any \mbox{$F\in\cC^s(X\times I,
X\times I)$} can be written in the form~\eqref{fat}, with $f_t(x)$
the first component of $F(x,t)$.
\begin{remark}
\label{near-isotopy}
It follows immediately from the definitions that the family $\{f_t\}$
in $\cC(X,X)$ is a near-isotopy if and only if its fat map $F\colon X\times
I \raw X\times I$ lies in the closure of \mbox{$\cH^s(X\times I,
X\times I)$} in $\cC^s(X\times I, X\times I)$.
\end{remark}
The inverse limit $(X\times I)_\infty$ of the fat map $F\colon X\times I\to
X\times I$ provides a natural topology for the family of inverse
limits $X_\infty^t=\ilim(X, f_t)$. Specifically, for each \mbox{$t\in I$}
there is a natural embedding $\iota_t \colon X_\infty^t \to (X\times
I)_\infty$ given by $\iota_t(x_0, x_1, \ldots) = ((x_0,t), (x_1,t),
\ldots)$; moreover, \mbox{$(X\times I)_\infty$} is the disjoint union of the
subsets $\iota_t(X_\infty^t)$, and
\begin{equation}
\label{CD}
\begin{CD}
X_\infty^t @>\hat{f}_t>> X_\infty^t \\
@VV\iota_t V @VV\iota_t V\\
(X\times I)_\infty @>\hat{F}>> (X\times I)_\infty
\end{CD}
\end{equation}
commutes for each $t\in I$.
The parameterized version of Brown's theorem is:
\begin{theorem}\label{brownplus}
Let $\{f_t\}_{t\in I}$ be a near-isotopy of the compact metric space
$X$, and \mbox{$F\colon X\times I\raw X\times I$} be the corresponding fat
map. Then for all~$\epsilon>0$ there exists a \homeo\ $\beta\colon
(X\times I)_\infty\raw X\times I$ such that
\begin{enumerate}[(a)]
\item $\beta\circ \iota_t(X_\infty^t) = X\times
\{t\}$ for all $t\in I$, and
\item $d(\beta, \pi_0) < \epsilon$.
\end{enumerate}
\end{theorem}
\begin{proof}
The proof given here is an adaptation of
Ancel's short and elegant proof~\cite{ancel} of Brown's Theorem.
To simplify notation, write $Z = X\times I$ and $Z_\infty = (X\times
I)_\infty$. Observe that for each~$k\in\N$, the projection
$\pi_k\colon Z_\infty \raw Z$ is a $1/(k+2)$-map, since if
$\pi_k(\ux)=\pi_k(\uy)$ then $x_i=y_i$ for $0\le i\le k$.
Let~$\cF=\overline{\cE}$ be the closure in~$\cC(Z_\infty, Z)$ of
\[\cE = \{H\circ \pi_k\,:\,H\in \cH^s(Z,Z) \text{ and }k\in\N
\}.\] Observe that every~$\alpha\in\cF$ is onto (each~$\pi_k$ is onto,
since $F$ is a near-homeomorphism, so that~$\alpha$ is the uniform
limit of onto maps defined on a compact space), and moreover satisfies
$\alpha\circ\iota_t(X_\infty^t) = X\times\{t\}$ for all~$t$ (for
$H\circ\pi_k\circ\iota_t(\ux)=H(x_k,t) \in X\times\{t\}$, and $x_k$
takes every value in~$X$ since~$f_t$ is onto). We will show that
injections (and therefore homeomorphisms which satisfy~(a) of the
theorem statement) are dense in~$\cF$: this will complete the proof,
with (b) following because~$\pi_0\in\cF$.
Given $\delta > 0$,
let $\cF(\delta) = \cF\cap \cC_\delta(Z_\infty, Z)$, an open
subset of~$\cF$. We shall show that it is also dense in~$\cF$. For
this, it suffices to show that for every $H\circ\pi_k\in\cE$ and every
$\eta>0$, there is some $G\in\cF(\delta)$ with $d(G,
H\circ\pi_k)<\eta$. To find such a~$G$, pick $j\ge k$ with
$1/(j+2)<\delta$, and observe that
\[H\circ \pi_k = H \circ F^{j-k} \circ \pi_j.\]
By Remark~\ref{near-isotopy}, $F^{j-k}$ can be approximated
arbitrarily closely by elements of $\cH^s(Z,Z)$, so in particular
there is some $H'\in \cH^s(Z,Z)$ with \mbox{$d(H\circ H'\circ \pi_j, H\circ
\pi_k)<\eta$}. Then $G=H\circ H'\circ\pi_j$ lies in~$\cF(\delta)$
since $\pi_j$ is a $1/(j+2)$-map and $H\circ H'$ is a homeomorphism.
By the Baire category theorem on the complete space~$\cF$, the set
$\bigcap_{n\ge 1}\,\cF(1/n)$ is dense in~$\cF$. However, elements of
this set are injective since they are $1/n$-maps for all $n\ge 1$,
completing the proof that injections are dense in~$\cF$ as required.
\end{proof}
The following corollary will be used in the parameterized Barge-Martin
construction.
\begin{corollary}\label{ourthing}
Let~$\{f_t\}_{t\in I}$ be a near-isotopy of the compact metric space~$X$, and
let the natural extension of $f_t$ to the inverse limit $X_\infty^t$
be denoted $\hat f_t$. Then for all~$\epsilon>0$ there exist
homeomorphisms $h_t\colon X_\infty^t \raw X$ for each~$t$ such that
\begin{enumerate}[(a)]
\item $h_t\circ \hat{f}_t\circ h_t\I$ is a continuous family of
homeomorphisms of~$X$, and
\item $d(h_t,\pi_{0,t})<\epsilon$ for all~$t$, where~$\pi_{0,t}\colon X_\infty^t\raw X$
is projection to the $0^\text{th}$ coordinate.
\end{enumerate}
Moreover, if~$X$ is a compact manifold and $\bd X$ is totally
invariant under~$f_t$ for all~$t$, then
\[\bd X_\infty^t = \{\ux\in X_\infty^t\,:\,x_0\in\bd X\}.\]
\end{corollary}
\begin{proof}
Given~$\epsilon>0$, let $\beta \colon (X\times I)_\infty \to X\times I$ be
a \homeo\ satisfying~(a) and~(b) of Theorem~\ref{brownplus} (and
constructed as in the proof of the theorem). Define
$h_t\colon X_\infty^t\raw X$ by $h_t = p_1\circ\beta\circ\iota_t$, where
$p_1\colon X\times I \raw X$ is projection onto the first coordinate: thus
$\beta\circ\iota_t(\ux) = (h_t(\ux),t)$. Then each~$h_t$ is a
homeomorphism: it is injective and continuous because $\iota_t$,
$\beta$, and $p_1|_{X\times\{t\}}$ are; and it is surjective because
\mbox{$\beta\circ\iota_t(X_\infty^t)=X\times\{t\}$}. Now
\begin{eqnarray*}
h_t\circ \hat{f}_t\circ h_t\I(x) &=& p_1 \circ (\beta\circ\iota_t)
\circ \hat{f}_t \circ (\beta\circ\iota_t)\I(x,t) \\
&=& p_1 \circ \beta \circ \hat{F}\circ \beta\I(x,t)
\end{eqnarray*}
by~\eqref{CD}, and so depends continuously on~$x$ and~$t$. Hence
$h_t\circ \hat{f}_t\circ h_t\I$ is a continuous family of
homeomorphisms of~$X$ as required.
For~(b), let~$\ux\in X_\infty^t$ and observe that
\[d(h_t(\ux),\pi_{0,t}(\ux)) = d(h_t(\ux), x_0) = d((h_t(\ux),t), (x_0,t)) =
d(\beta\circ\iota_t(\ux), \pi_0\circ\iota_t(\ux))<\epsilon\] since
$d(\beta,\pi_0)<\epsilon$.
For the final statement, if~$X$ is a compact manifold and $\bd X$ is
totally invariant under~$f_t$, then every element $\ux$ of
$X_\infty^t$ either has $x_i\in \bd X$ for all~$i$, or $x_i\not\in\bd
X$ for all~$i$. Now
\[\bd X_\infty^t = h_t\I(\bd X) = \{\ux\in
X_\infty^t\,:\,\beta\circ\iota_t(\ux)\in \bd X\times I\}.\]
Using the notation from the proof of Theorem~\ref{brownplus}, every
$\alpha=H\circ\pi_k\in\cE$ satisfies that \mbox{$\alpha\circ\iota_t(\ux) =
H(x_k,t)$} lies in $\bd X\times I$ if and only if $x_k\in\bd X$, which
establishes the result.
\end{proof}
\section{The parameterized Barge-Martin construction}
\label{sec:BBM}
The parameterized BBM construction starts with a family of maps
defined on a \BR of a compact manifold, with the additional property
that the family unwraps. We begin by defining these and related terms.
Let~$M$ be a compact manifold with non-empty boundary $\bd M$. A
subset~$E$ of~$M$ is said to be a \de{\BR}of~$M$ if there is a
continuous map $\Psi\colon \bd M\times[0,1]\raw M$ with the following
properties:
\begin{enumerate}[(1)]
\item $\Psi$ restricted to $\bd M \times [0, 1)$ is a
\homeo\ onto $M - E$,
\item $\Psi(\eta, 0) = \eta$, for all $\eta\in\bd M$, and
\item $\Psi(\bd M \times \{1\}) = E$.
\end{enumerate}
An alternative characterization is that $\Psi$ decomposes~$M$ into a
continuously varying family of arcs $\{\gamma_\eta\}_{\eta\in\bd M}$
defined by $\gamma_\eta(s)= \Psi(\eta,s)$, whose images are mutually
disjoint except perhaps at their final points. The arc $\gamma_\eta$
has initial point~$\eta$, final point in~$E$, and interior disjoint
from~$E$. Thus, in particular, each point $x\in M-E$ can be written
uniquely as $x=\Psi(\eta,s)$ for some $\eta\in\bd M$ and $s\in[0,1)$.
The example of interest in the applications described in
Section~\ref{apps} is when~$E$ is a graph embedded as the spine of a
surface~$M$ with boundary (Figure~\ref{spine}).
\begin{figure}
\caption{The figure 8, with a spike, as a \BR of the pair of
pants. Each valence~$k$ vertex is the endpoint of~$k$ of the arcs
$\gamma_\eta$.}
\label{spine}
\end{figure}
Associated to~$\Psi$ is the strong deformation retract
$S\colon M\times[0,1]\raw M$ of $M$ onto~$E$ defined by $S(\Psi(\eta,s),t) =
\Psi(\eta, s+t(1-s))$. The corresponding retraction $R\colon M\raw E$ is
defined by
\[R(\Psi(\eta,s)) = \Psi(\eta, 1).\]
If~$E$ is a \BR of $M$ with associated retraction $R\colon M\raw E$, then a
continuous map $f\colon E\raw E$ is said to \de{unwrap in $M$} if there is a
homeomorphism $\barf\colon M\raw M$ such that
\begin{enumerate}[(1)]
\item $R\circ\barf_{\vert E} = f$, and
\item There is some~$k>0$ such that $\barf^k$ is the identity on~$\bd
M$.
\end{enumerate}
Such a homeomorphism~$\barf$ is called an \de{unwrapping} of~$f$. A
continuous family $\{f_t\}_{t\in I}$ in $\cC(E,E)$ is said to
\de{unwrap in~$M$} if it has a continuous family $\{\barf_t\}_{t\in I}$ of
unwrappings.
Finally, we say that~$f\in\cC(X,X)$ \de{stabilizes at
iterate~$m$} if $f^{m+1}(X)=f^m(X)$ (there is no requirement for~$m$
to be the least such integer). If $f$ stabilizes at iterate~$m$ then
\[\ilim(X, f) = \ilim(f^m(X), f_{\vert f^m(X)}),\]
and the projection $\pi_k\colon \ilim(X, f)\to X$ has image $f^m(X)$ for
all~$k$.
The parameterized BBM construction can now be stated: the
unparameterized version is obtained on taking the parameter space~$I$
to be a point. Roughly speaking, the theorem states that a continuous
family of continuous self-maps~$f_t$ of~$E$ which unwraps in~$M$ and
stabilizes at a common iterate gives rise to a continuous family of
self-homeomorphisms~$\Phi_t$ of~$M$ having global
attractors~$\Lambda_t$ on which the dynamics of~$\Phi_t$ is conjugate
to that of~$\hat{f}_t$. Moreover, the dynamics of $\Phi_t$
on~$\Lambda_t$ is semi-conjugate to that of $f_t$ on~$\bigcap_{k\ge 0}
f_t^k(E)$ by a semi-conjugacy which can be extended to a continuous
map $M\raw M$ arbitrarily close to the identity; and the
attactors~$\Lambda_t\subset M$ vary continuously with~$t$.
\begin{theorem}
\label{BBMparam}
Let~$M$ be a compact manifold with boundary $\bd M$, $E\subset M$ be a
\BR of~$M$, and $\{f_t\}_{t\in I}$ be a continuous family in
$\cC(E,E)$ which unwraps in~$M$. Suppose moreover that there is
some~$m>0$ such that every~$f_t$ stabilizes at iterate~$m$. Then for
each $\epsilon>0$ there is a continuous family $\{\Phi_t\}_{t\in I}$
in $\cH(M,M)$ such that
\begin{enumerate}[(a)]
\item For each~$t\in I$ there is a compact $\Phi_t$-invariant
subset~$\Lambda_t$ of~$M$ with the following properties:
\begin{enumerate}[(i)]
\item $\Phi_t|_{\Lambda_t}\colon \Lambda_t\raw\Lambda_t$ is topologically
conjugate to $\hat{f}_t\colon \ilim(E, f_t)\raw\ilim(E, f_t)$.
\item If $x\in M-\bd M$, the $\omega$-limit set $\omega(x,\Phi_t)$ is
contained in~$\Lambda_t$.
\item There is some~$k>0$ such that each $\Phi_t^k$ is the identity
on~$\bd M$.
\end{enumerate}
\item There is a continuous family $\{g_t\}_{t\in I}$ in $\cC(M,M)$
with $d(g_t, \id)<\epsilon$, $g_t(\Lambda_t)=f_t^m(E)$, and $f_t\circ
g_t|_{\Lambda_t} = g_t\circ \Phi_t|_{\Lambda_t}$.
\item For each~$t$, the semiconjugacy $g_t$ restricts to a bijection
from the set of periodic points of $\Phi_t$ in~$\Lambda_t$ to the
set of periodic points of~$f_t$.
\item The attractors~$\Lambda_t$ vary Hausdorff continuously with
$t\in I$.
\end{enumerate}
\end{theorem}
\begin{proof}
Let $\Psi\colon \bd M \times[0,1]\raw M$ be the map expressing~$E$ as a \BR
of~$M$. Let $\phi\colon [0,1]\raw[0,1]$ be defined by $\phi(s)=2s$ for
$s\in[0,1/2]$ and $\phi(s)=1$ for $s\in[1/2,1]$, and define
$\Upsilon\colon M\raw M$ by $\Upsilon(\Psi(\eta,s)) = \Psi(\eta, \phi(s))$,
which is well defined since $\phi(1)=1$. Because $\phi$ is the uniform
limit of homeomorphisms $[0,1]\raw[0,1]$, the map $\Upsilon$ is a
near-homeomorphism.
Write $N(E)=\Psi(M\times[1/2,1])=\Upsilon\I(E)$: thus $N(E)$ is a
compact neighborhood of~$E$ which is homeomorphic to~$M$, by the
homeomorphism $S\colon M\raw N(E)$ defined by $S(\Psi(\eta,s))=\Psi(\eta,
(s+1)/2)$.
Let $\{\barg_t\}$ be the unwrapping of the family~$\{f_t\}$, and
define the family $\{\barf_t\}$ in $\cH(N(E), N(E))$ by $\barf_t = S
\circ \barg_t \circ S\I$. Extend $\{\barf_t\}$ to a family in
$\cH(M,M)$ along the arc decomposition given by~$\Psi$: that is, if
$\lambda_t\colon \bd M\raw\bd M$ is the homeomorphism defined by
$\barf_t(\Psi(\eta,1/2)) = \Psi(\lambda_t(\eta), 1/2)$, then
$\barf_t(\Psi(\eta, s))=\Psi(\lambda_t(\eta), s)$ for
\mbox{$s\in[0,1/2]$}.
Now let $H_t = \Upsilon \circ \barf_t\colon M \raw M$ for each $t\in
I$. Then $\{H_t\}$ is a near-isotopy, since $\Upsilon$ is a
near-homeomorphism and each $\barf_t$ is a homeomorphism. Moreover,
for each~$t\in I$ we have:
\begin{enumerate}[(I)]
\item $H_t$ is equal to~$f_t$ on~$E$ (since $\Upsilon$ and $R$
restrict to the same retraction $\Psi(\eta,s)\mapsto\Psi(\eta,1)$
of~$N(E)$ onto~$E$, and $R\circ\barf_t|_{E}=f_t$);
\item for all $x\in M-\bd M$, there is some~$n\ge 0$ with $H_t^n(x)\in
E$ (since $H_t(\Psi(\eta,s))=\Psi(\eta',2s)$ for some~$\eta'$ if
$s\le 1/2$, and $H_t(\Psi(\eta,s))\in E$ if $s\ge 1/2$); and
\item there is some~$k>0$ such that $H_t^k=\id$ on $\bd M$ (since
$H_t=\barf_t=\barg_t$ on $\bd M$).
\end{enumerate}
Let $h_t\colon M_\infty^t = \ilim(M, H_t) \raw M$ be the homeomorphisms
given by Corollary~\ref{ourthing} (and constructed as in the proof of
the corollary), so that $\{\Phi_t\} = \{h_t\circ \hat{H}_t \circ
h_t\I\}$ is a continuous family in~$\cH(M,M)$, and $d(h_t,
\pi_{0,t})<\epsilon$ for all~$t$. We now show that~(a), (b), (c), and
(d) in the theorem statement hold for this family~$\{\Phi_t\}$.
\noindent\textit{Property (a):}
By~(I) above, there is a homeomorphic copy~$\Omega_t$ of
$\ilim(E, f_t)$ embedded in $M_\infty^t$, namely
\[\Omega_t = \{\ux\in M_\infty^t\,:\, x_k\in E \text{ for all }k\in\N\},\]
on which the restriction of~$\hat{H}_t$ is topologically conjugate to
$\hat{f}_t$. Hence $\Lambda_t = h_t(\Omega_t)$ is a compact
$\Phi_t$-invariant subset of~$M$ with $\Phi_t|_{\Lambda_t}$
topologically conjugate to $\hat{f}_t\colon \ilim(E, f_t) \raw \ilim(E,
f_t)$.
By~(II), $\omega(\ux, \hat{H}_t)\subset \Omega_t$ for all $\ux\in
M_\infty^t$ with $x_0\not\in\bd M$: that is, by the final statement of
Corollary~\ref{ourthing}, for all $\ux\not\in\bd M_\infty^t$. Hence
$\omega(x, \Phi_t) \subset \Lambda_t$ for all~$x\not\in\bd
M$. Similarly, (III) gives that $\hat{H}_t^k(\ux) = \ux$ for all $\ux\in
M_\infty^t$ with $x_0\in\bd M$ (i.e. for all~$\ux\in\bd M_\infty^t$),
from which it follows that $\Phi_t^k$ is the identity on $\bd M$.
\noindent\textit{Property (b):}
Let $g_t = \pi_{0,t}\circ h_t\I \colon M\to M$: that is, using the
notation of Theorem~\ref{brownplus} and Corollary~\ref{ourthing},
$g_t(x)=y_0$, where $\beta\I(x,t)=((y_0,t), (y_1,t), \ldots)$. It
follows from the continuity of~$\beta\I$ that $\{g_t\}_{t\in I}$ is
a continuous family in $\cC(M,M)$. Moreover, $d(g_t, \id)<\epsilon$
for all~$t$; and $g_t(\Lambda_t) = g_t\circ h_t(\Omega_t) =
\pi_{0,t}(\Omega_t) = f_t^m(E)$. Now
\[g_t\circ\Phi_t = \pi_{0,t}\circ\hat{H}_t\circ h_t\I = H_t\circ
\pi_{0,t}\circ h_t\I = H_t\circ g_t \colon M \raw M,\]
so that $g_t\circ\Phi_t|_{\Lambda_t} = f_t\circ g_t|_{\Lambda_t}$
by~(I).
\noindent\textit{Property (c):}
This is an immediate consequence of the fact that $\pi_{0,t}$
restricts to a bijection from the set of periodic points of~$\hat
H_t$ to the set of periodic points of $H_t$, and $\Per(\Phi_t) =
h_t(\Per(\hat H_t))$.
\noindent\textit{Property (d):}
To show the Hausdorff continuity of $t\mapsto \Lambda_t$, we
first show that the function $t\mapsto\Omega_t \subset M_\infty^t
\subset M^\N$ is continuous as a function $I\raw C(M^\N)$ into the
set of compact subsets of $M^\N$ with the Hausdorff metric. For
this, it suffices to show that for all~$\delta>0$ there is
some~$\eta>0$ such that if $d(s,t)<\eta$ then every point of
$\Omega_t$ is within~$\delta$ of a point of $\Omega_s$.
Recall that all of the~$f_t$ stabilize at iterate~$m$ so that
$f_t\colon f_t^m(E)\raw f_t^m(E)$ is surjective, and
\[\Omega_t = \{\ux\in M_\infty^t\,:\, x_k\in f_t^m(E) \text{ for all }
k\in\N\}.\]
Pick $J>1/\delta$ and $\eta>0$ such that
\begin{equation}
\label{uniform}
d(s,t) < \eta \implies d(f_s^j, f_t^j)<\delta \qquad \text{for } 1
\le j < J+m
\end{equation}
using the (uniform) continuity of $t\mapsto f_t^j$ for each~$j$.
Let~$s,t\in I$ with $d(s,t)<\eta$. Let $\ux\in\Omega_t$, so that
$\ux=(x_0,x_1,x_2,\ldots)$ with $x_k \in f_t^m(E)$ and
$f_t(x_{k+1})=x_k$ for all~$k$. Let~$y\in E$ be such that $x_J =
f_t^m(y)$. Define an element \mbox{$\uy=(y_0,y_1,y_2,\ldots)$} of
$(f_s^m(E))^\N$ by setting $y_j = f_s^m(f_s^{J-j}(y))$ for $0\le j\le
J$, and then choosing $y_j\in f_s^m(E)$ for $j>J$ inductively to
satisfy $f_s(y_{j})=y_{j-1}$.
Then $\uy\in\Omega_s$, and $d(\ux, \uy)<\delta$ as required, since
\[d(x_j,y_j) = d(f_t^{m+J-j}(y), f_s^{m+J-j}(y)) < \delta\qquad \text{ for
} 0\le j\le J \]
by~\eqref{uniform}, and $1/(j+2) < \delta$ for $j>J$.
To complete the proof that $t\mapsto\Lambda_t$ is continuous, recall
(using the notation of the proof of Corollary~\ref{ourthing}) that
$\Lambda_t = h_t(\Omega_t) =
p_1\circ\beta\circ\iota_t(\Omega_t)\subset M$. It is required to show
that for all~$\delta>0$ there is some~$\eta>0$ such that, if
$d(s,t)<\eta$, then every point of~$\Lambda_t$ is within~$\delta$ of a
point of~$\Lambda_s$. Now the map
\[K\colon \{(\ux,t)\in M^\N\times I \,:\, \ux\in\Omega_t\} \to (M\times
I)_\infty\]
defined by $K(\ux,t)=\iota_t(\ux)$ is continuous, so we can pick
$\xi>0$ such that if $d((\ux,t),(\uy,s))<\xi$ then
$d(p_1\circ\beta\circ K(\ux,t), p_1\circ\beta\circ K(\uy, s)) <
\delta$. By the first part of the proof, there is some~$\eta>0$ such
that if $d(s,t)<\eta$ and $\ux\in\Omega_t$, then there is some
$\uy\in\Omega_s$ with $d((\ux,t), (\uy, s)) < \xi$.
Then, given $s,t\in I$ with $d(s,t)<\eta$ and $x\in\Lambda_t$, write
$x=p_1\circ\beta\circ K(\ux,t)$ for some $\ux\in\Omega_t$. Let
$\uy\in\Omega_s$ with $d((\ux,t),(\uy,s))<\xi$: then
$y=p_1\circ\beta\circ K(\uy,s)$ lies in~$\Lambda_s$ with
$d(x,y)<\delta$ as required.
\end{proof}
\begin{remarks}
\label{BBM-rks}
\begin{enumerate}[(a)]
\item The proof of part~(a) of the theorem does not require the family to
stabilize uniformly.
\item If condition~(2) in the definition of an unwrapping is removed
(so that an unwrapping $\barf\in\cH(M,M)$ is only required to
satisfy $R\circ \barf_{\vert E}=f$), then the theorem still holds
except for part~(a)(iii).
\item By part (a)(ii), if each boundary component of~$M$ is collapsed
to a point then each~$\Phi_t$ becomes a homeomorphism of the
resulting space (which in the surface case is a surface without
boundary), for which every point except for a finite number of
repelling periodic points has $\omega$-limit set contained
in~$\Lambda_t$.
\end{enumerate}
\end{remarks}
\section{Applications}
\label{apps}
\subsection{Unimodal dynamics}
\label{sec:unimodal}
The inverse limits of members of unimodal families of interval
endomorphisms such as the tent family
\begin{equation*}
T_s(x) = \min\{sx, s(1 - x)\}, \qquad 1\le s\le 2
\end{equation*}
and the quadratic family
\[
f_a(x) = a-x^2, \qquad -1/2 \le a\le 2
\]
have been intensively studied: a major recent advance is the proof by
Barge, Bruin, and {\v S}timac of Ingram's conjecture, that if \mbox{$1
\leq t < s \leq 2$} then the inverse limits $\ilim(I, T_t)$ and
$\ilim(I, T_s)$ are not homeomorphic (see \cite{bargebruinstimac} and
the additional references therein).
Barge and Martin~\cite{bargemartin} showed that any map of the
interval unwraps in the disk, and their construction gives a
continuous family of unwrappings of any unimodal family. Thus
Theorem~\ref{BBMparam} provides a continuously varying family
\mbox{$\Phi_s:D^2\raw D^2$} of \homeos\ of the disk, with $\Phi_s$
having $\ilim(I, T_s)$ as a global attracting set. Moreover, since the
family $T_s$ uniformly stabilizes at iterate one, these attractors
vary continuously in the Hausdorff topology, although no two are
homeomorphic. The family $\Phi_s$ has dynamics which increases
monotonically with~$s$: for example, the number of periodic orbits of
each period increases with~$s$, as does the topological entropy
$\htop(\Phi_s)=\htop(f_s)=\log(s)$.
Extending the range of parameters to $s\in [0,2]$ illustrates the need
for the stabilization hypothesis in Theorem~\ref{BBMparam}. For
$s\in[0,1)$ we have $T_s^k([0,1])\searrow \{0\}$ as $k\raw\infty$,
so these $T_s$ never stabilize and $\ilim(I, T_s)$ is a
point. On the other hand, $T_1$ is the identity on
$T_1([0,1])=[0,1/2]$, so $T_1$ stabilizes at iterate~1 and $\ilim(I,
T_1)$ is an arc. Hence there is a discontinuous change
in the attractor at~$s=1$.
Theorem~\ref{BBMparam} may likewise be applied to the quadratic family,
yielding a family of of plane homeomorphisms with monotonically
increasing dynamics.
All of the constructions in this paper are strictly in the
$C^0$-category. Embeddings of the inverse limits of members of the
tent family as attractors of \homeos\ with varying degrees of
increased regularity have been carried out
in~\cite{mis,szczechla,barge,bruin}. The fascinating question of
which inverse limits and families of inverse limits can be embedded as
attractors for $C^r$-diffeomorphisms is little understood. Barge and
Holte~\cite{bargeholte} show that for certain parameter ranges the
real H\'enon attractor is conjugate to an inverse limit from the quadratic
family. For example, for hyperbolic values of~$a$ and small
enough~$b$, the H\'enon map $H_{a,b}$ restricted to its attractor is
conjugate to the inverse limit $\ilim(I,f_a)$. As has been pointed out
by several authors (for example~\cite{HolWhi,HolWil}), this cannot be
extended to a uniform band of small values of $b$ in parameter space,
since bifurcation curves for periodic orbits in the H\'enon family
cross arbitrarily close to~$b=0$. In fact, the ``antimonotonicity"
results of~\cite{KKY} show that even if it were possible to embed the
inverse limit of the unimodal family as a family of diffeomorphisms of
the plane, it is not possible to do so with high regularity. Much of
the work done to understand infinitely renormalizable H\'enon
maps~\cite{dCLM,LyuMar2} is based on comparisons between them and the
inverse limits of infinitely renormalizable unimodal maps.
The relationship of the inverse limit of the complex quadratic family
to the complex H\'enon family was used to great advantage
in~\cite{HOV1,HOV2}. More generally, the dynamics of families of
homeomorphisms of $\R^2$ and $\C^2$ are often related to the inverse
limits of 1-dimensional endomorphisms of graphs and ($\R$-)trees and
to branched surfaces (see~\cite{eqgpa}).
\subsection{Rotation sets}
\label{sec:rotation}
If $\Phi\colon M\raw M$ is a map of a compact, smooth manifold which acts as
the identity on $H_1(M;\R)$, one may define the rotation vector under
$\Phi$ of a point $x\in M$. The usual way to do this is to lift $\Phi$
to $\tPhi$ on the universal free Abelian cover $\tM_A$, and then use
the lifts of a basis of closed one-forms to measure the average
displacements of points $\tx\in\tM_A$ under $\tPhi$ (see, for example,
\S3.3
of~\cite{bdabelian}). Because $\Phi$ acts as the identity on
$H_1(M;\R)$, $\tPhi$ commutes with the deck transformations of
$\tM_A$, and so the rotation vector is independent of the choice of
lift $\tx\in\tM_A$ of a point $x\in M$. However, the rotation vector
does depend in a simple way on the choice of lift $\tPhi$. After
fixing a lift $\tPhi$ one defines the rotation set $\rho(\tPhi)$ (or
$\rho(\Phi)$ if a lift is understood) as the collection of rotation
vectors of points $\tx\in\tM_A$.
Now consider a self-map $f\colon E\raw E$ of a \BR $E$ of~$M$. Assume that
$f$ acts as the identity on $H_1(E;\R)$ and unwraps to a homeomorphism
\mbox{$\barf\colon M\raw M$} with $\barf_{\vert \bd M} = \id$. Let
$\Phi\colon M\raw M$ be the BBM-extension \homeo\ obtained from
Theorem~\ref{BBMparam} by taking the parameter space~$I$ to be a point
and $\epsilon < \diam(M)/10$. Since $E$ is a strong deformation
retract of $M$, we also have that $\Phi_* = \id$ on $H_1(M;\R)$. Thus
both the rotation sets $\rho(\Phi)$ and $\rho(f)$ can be defined. They
are essentially the same, provided that care is taken to choose
corresponding lifts as will now be described.
By construction $\Phi_{\vert \bd M} = \id$, and we choose the lift
$\tPhi$ to $\tM_A$ which has \mbox{$\tPhi_{\vert \bd \tM_A} = \id$}.
Now we also lift the map $g\colon M\raw M$ with $\td(g,\id) < \epsilon$
given by Theorem~\ref{BBMparam}(b) to a $\tg\colon \tM_A\raw\tM_A$ with
$\td(\tg,\id) < \epsilon$. If $\tE\subset\tM_A$ and
$\tLambda\subset\tM_A$ are the lifts of $E$ and $\Lambda$ inside the
universal free Abelian cover, we choose the lift $\tf\colon \tE\raw\tE$ of
$f$ so that $\tf\circ\tg(\tz) = \tg \circ \tPhi(\tz)$ for all
$\tz\in\tLambda$. Now the rotation set of a map is the same as its
rotation set restricted to the recurrent set. Since by the BBM
construction the recurrent set of $\Phi$ is contained in $\Lambda\cup
\bd M$, we have $\rho(\tPhi) = \rho(\tPhi_{\vert \tLambda}) \cup
\{0\}$. Finally, since $\td(\tg,\id) < \epsilon$ we get that for any
$\tz\in\tLambda$, $d(\tPhi^k(\tz), \tf^k(\tg(\tz))) < \epsilon$ for
all $k\in\Z$. Thus since $\tg(\tLambda) = \tE$, we have
$\rho(\tPhi_{\vert \tLambda}) = \rho(\tf)$, and so
\begin{equation}\label{rot}
\rho(\tPhi) = \rho(\tf) \cup \{0\}.
\end{equation}
By Theorem~\ref{BBMparam}, \eqref{rot} also holds for parameterized
families and the attractors in the BBM-extension family vary
continuously provided that the family stabilizes uniformly. An
application is given in the authors' forthcoming paper {\em New
rotation sets in a family of torus homeomorphisms}, in which we
consider a family of maps~$k_t$ of the figure eight $E$ embedded as
the spine of the two torus minus a disk. The resulting BBM-extension
family is extended to a family of torus \homeos~$K_t$ as in
Remark~\ref{BBM-rks}~(c). The intricate sequence of bifurcations of the
rotation sets of the family~$k_t$ can be described in detail using
kneading theory techniques, and by~\eqref{rot} the rotation sets of
the family~$K_t$ of torus homeomorphisms undergo the same
bifurcations.
\subsection{The standard family of circle maps}
\label{sec:standard}
The standard family of degree-one circle maps was introduced by
Arnol'd~\cite{arnold}. This two-parameter family
\mbox{$f_{b,\omega}\colon S^1\raw S^1$}, for $b\geq 0$ and $0 \leq \omega
\leq 1$, is defined via its lifts $\tf_{b,\omega}\colon \R\raw\R$, which are
given by
\begin{equation*}
\tf_{b,\omega}(x) = x + \omega + \frac{b}{2\pi}\sin(2 \pi x).
\end{equation*}
The dynamics and bifurcations of this family have been much studied
(see for example~\cite{bdcircle,brucks,rempe}). The main objects of
interest in its parameter space are the so-called Arnol'd tongues
$T_r$ given by
\begin{equation*}
T_r = \{(b,\omega) \,:\, r \in\rho(f_{b,\omega})\}.
\end{equation*}
The core circle of an annulus $A$ is a \BR of~$A$, and the family
$f_{b,\omega}$ unwraps in~$A$. Further, since each $f_{b,\omega}$ is
onto, the family uniformly stabilizes at iterate zero. Thus, after
restricting to the compact parameter space $b\in[0,b^*]$ for some
large~$b^*$, Theorem~\ref{BBMparam} yields a continuous family of
annulus \homeos\ $\Phi_{b,\omega}$ each having an attracting set
$\Lambda_{b,\omega}$ homeomorphic to $\ilim(S^1, f_{b,\omega})$ and,
by \eqref{rot}, $\rho(\Phi_{b,\omega}) = \rho(f_{b,\omega}) \cup
\{0\}$.
By composing each $\Phi_{b,\omega}$ with an appropriate lateral push
on and near on the boundary we can obtain a new family of
\homeos\ $\Phi_{b,\omega}^0$ such that the rotation numbers of each
$\Phi_{b,\omega}^0$ restricted to $\bd A$ are contained in
$\rho(f_{b,\omega})$. We then have that
\begin{equation*}
\rho(\Phi_{b,\omega}^0) = \rho(f_{b,\omega})
\end{equation*}
for all $(b,\omega)\in[0,b^*]\times[0,1]$.
When $b\leq 1$, each $f_{b,\omega}$ is a \homeo\ and so the attractor
of $\Phi_{b,\omega}^0$ is an invariant circle on which the dynamics is
topologically conjugate to $f_{b,\omega}$. When $b> 1$, however, the
Arnol'd tongues $T_r$ for rational $r$ begin to overlap and for
irrational $r$, $T_r$ opens from a Lipschitz curve into a
tongue. These changes are accompanied by an elaborate sequence of
bifurcations which are all shared by the family $\Phi_{b,\omega}^0$.
\affiliationone{
Philip Boyland\\
Department of Mathematics \\
University of Florida \\
372 Little Hall\\
Gainesville, FL 32611-8105, USA
\email{[email protected]}
}
\affiliationtwo{
Andr\'e de Carvalho\\
Departamento de Matem\'atica Aplicada\\
IME-USP\\
Rua Do Mat\~ao 1010\\
Cidade Universit\'aria\\
05508-090 S\~ao Paulo SP, Brazil
\email{[email protected]}
}
\affiliationthree{
Toby Hall\\
Department of Mathematical Sciences\\
University of Liverpool\\
Liverpool L69 7ZL, UK
\email{[email protected]}
}
\end{document} |
\begin{document}
\title{Newform Eisenstein congruences of local origin}
\begin{center}\textit{In memory of Lynne Walling.}\end{center}
\begin{abstract}
We give a general conjecture concerning the existence of Eisenstein congruences between weight $k\geq 3$ newforms of square-free level $NM$ and weight $k$ new Eisenstein series of square-free level $N$. Our conjecture allows the forms to have arbitrary character $\chi$ of conductor $N$. The special cases $M=1$ and $M=p$ prime are fully proved, with partial results given in general. We also consider the relation with the Bloch-Kato conjecture, and finish with computational examples demonstrating cases of our conjecture that have resisted proof.
\end{abstract}
\section{Introduction}
The theory of Eisenstein congruences has a rich and beautiful history, beginning with Ramanujan's remarkable observation that the Fourier coefficients $\tau(n)$ of the discriminant function: \[\Delta(z) = q\prod_n(1-q^n)^{24} = \sum_{n\geq 1}\tau(n)q^n \in S_{12}(\text{SL}_2(\mathbb{Z}))\] satisfy $\tau(n)\equiv \sigma_{11}(n) \bmod 691$ for all $n\geq 1$ (here $\sigma_{11}(n) = \sum_{d\mid n}d^{11}$). Intuitively, this family of congruences is explained via a congruence between two modular forms of weight $12$; the cusp form $\Delta$ and the Eisenstein series $E_{12}$. The significance of the modulus $691$ is that it divides the numerator of $-\frac{B_{12}}{24}$, the constant term of $E_{12}$ (so that $E_{12}$ is a cusp form mod $691$).
Since Ramanujan, many other congruences have been found between cusp forms and Eisenstein series, modulo other interesting primes. For example, one can vary the weight of the forms and find congruences whose moduli divide the numerators of other Bernoulli numbers. The existence of such congruences has been a key tool in the proofs of various important results in Algebraic Number Theory, e.g. the Herbrand-Ribet Theorem, relating the $p$-divisibility of Bernoulli numbers with the Galois module structure of $\text{Cl}(\mathbb{Q}(\zeta_p))[p]$. (see \cite{ribet_1976}).
Similarly, varying the levels and characters of our forms produces even more congruences. This time the moduli are observed to divide numerators of generalised Bernoulli numbers and special values of local Euler factors of Dirichlet $L$-functions. In the latter case such congruences are often referred to as being of ``local origin". The papers \cite{dummigan_2007}, \cite{dummigan_fretwell_2014}, \cite{billerey_menares_2016}, \cite{billerey_menares_2017} and \cite{spencer_2018} contain thorough discussions of such congruences. The existence of these congruences is linked to special cases of the Bloch-Kato Conjecture, a far reaching generalisation of the Herbrand-Ribet Theorem, the Analytic Class Number Formula and the Birch Swinnerton-Dyer Conjecture. This conjecture implies links between $p$-divisibility of special values of certain motivic $L$-functions and $p$-torsion in certain Bloch-Kato Selmer groups.
More generally, the theory of Eisenstein congruences has been extended to various families of automorphic form, although the landscape is still highly conjectural \cite{harder}. Roughly speaking, if $G/\mathbb{Q}$ is a reductive group then one expects to observe congruences between Hecke eigenvalues coming from cuspidal automorphic representations for $G(\mathbb{A}_{\mathbb{Q}})$ and automorphic representations that are parabolically induced from Levi subgroups of $G$. The moduli of such congruences are also predicted to arise from special values of certain motivic $L$-functions (related to the particular Levi subgroup considered), in direct comparison with the Bloch-Kato conjecture. For detailed discussion of results and conjectures in this direction, see \cite{bergstrom}. In the special case of $\text{GL}_2/\mathbb{Q}$ we recover exactly the Eisenstein congruences mentioned above, the relevant $L$-functions being Dirichlet $L$-functions (possibly incomplete, hence the appearance of local Euler factors). It is expected that proving such congruences will provide key insights into high rank cases of the Bloch-Kato Conjecture.
In this paper we will return to the ``classical" case of $\text{GL}_2$ Eisenstein congruences, but focus instead on the existence of newforms that satisfy such congruences. Progress has been made on this question in the case of trivial character (\cite{billerey_menares_2016}, \cite{dummigan_fretwell_2014}), and so we consider the more general case of forms with arbitrary character of square-free conductor. To do this, we discuss the following general conjecture and provide a full proof in the special cases of $M=1$ and $M=p$ prime.
\begin{conj}
Let $N,M\geq 1$ be square-free, $k > 2$ and $l>k+1$ be a prime satisfying $l \nmid NM$. Let $\psi,\phi$ be Dirichlet characters of conductors $u,v\geq 1$, satisfying $N = uv$, and set $\chi = \psi\phi$ (with a choice of lift $\tilde{\chi}$ to modulus $NM$). There exists a newform $f \in S_k^{\text{new}}(\Gamma_0(NM), \tilde{\chi})$ and a prime $\lambda\mid l$ of $\mathcal{O}_f[\phi,\psi]$ such that
\begin{equation*}
a_q(f) \equiv \psi(q)+\phi(q)q^{k-1} \ \text{mod } \lambda
\end{equation*}
for all primes $q\nmid NM$ if and only if both of the following conditions hold for some $\lambda'|l$ in $\mathbb{Z}[\psi,\phi]$ (satisfying $\lambda|\lambda'$):
\begin{enumerate}
\item $\text{ord}_{\lambda'}(L(1-k, \psi^{-1}\phi) \prod_{p \in \mathcal{P}_M} (\psi(p) - \phi(p)p^k)) > 0$.
\item $\text{ord}_{\lambda'}((\psi(p)-\phi(p)p^k)(\psi(p)-\phi(p)p^{k-2}))>0$ for each prime $p\in \mathcal{P}_M$.
\end{enumerate}
\end{conj}
\noindent Here, $\mathcal{P}_M$ is the set of primes divisors of $M$, $a_n(f)$ is the $n$th Fourier coefficient of $f$ and $\mathcal{O}_f[\psi,\phi]$ is the ring of integers of the smallest extension of $K_f = \mathbb{Q}(\{a_n(f)\})$ containing the values of $\psi$ and $\phi$ (similarly for $\mathbb{Z}[\psi,\phi]$).
After, we consider the natural relationship between these newform congruences and the Bloch-Kato conjecture, and give computational evidence for our conjecture in cases where it is not known.
\textbf{Acknowledgements} We thank Neil Dummigan for useful discussions concerning links between Conjecture $1.1$ and the Bloch-Kato conjecture, and for helpful comments and suggestions for improvement.
This work forms part of the thesis of the second named author, and we are grateful to the Heilbronn Institute for support via their PhD Studentship program.
Finally, we wish to dedicate this work to the memory of Lynne Walling, a close friend, mentor and collaborator of the first author, and who would have been a PhD supervisor of the second. Lynne was a highly valued member of the Mathematics community, whose encouragement and support towards early career mathematicians and those from under represented groups was second to none.
\section{Background and notation}
\subsection{The Setup}
We recap the background knowledge of modular forms that we will need, and refer the reader to \cite{diamond_shurman_2016} for further definitions and discussions. For an integer $N \geq 1$, define the standard congruence subgroups of $SL_2(\field{Z})$:
\begin{equation*}
\Gamma_1(N)=\left\{ \begin{pmatrix} a & b \\ c & d \end{pmatrix} \in SL_2(\field{Z}) : \begin{pmatrix} a & b \\ c & d \end{pmatrix} \equiv \begin{pmatrix} 1 & * \\ 0 & 1 \end{pmatrix} \ (\text{mod} \ N) \right\}
\end{equation*}
\begin{equation*}
\Gamma_0(N)=\left\{ \begin{pmatrix} a & b \\ c & d \end{pmatrix} \in SL_2(\field{Z}) : \begin{pmatrix} a & b \\ c & d \end{pmatrix} \equiv \begin{pmatrix} * & * \\ 0 & * \end{pmatrix} \ (\text{mod} \ N) \right\}.
\end{equation*}
Let $M_k(\Gamma_0(N), \chi)$ be the space of modular forms of weight $k \geq 2$, level $N$ and Dirichlet character $\chi : (\field{Z}/N\field{Z})^\times \rightarrow \field{C}^\times$. This is the space of holomorphic functions $f:\mathcal{H}\rightarrow\mathbb{C}$ on the upper half plane $\mathcal{H}$ that satisfy:
\begin{equation*}
f[\gamma]_k:=(cz + d)^{-k} f\left(\frac{az+b}{cz+d} \right) = \chi(d)f(z)
\end{equation*}
for all $\gamma=\begin{pmatrix} a & b \\ c & d \end{pmatrix} \in \Gamma_0(N) $, and are such that $f[\alpha]_k$ is holomorphic at $i\infty$ for all $\alpha \in SL_2(\field{Z})$ (i.e.\ the Fourier expansion of $f[\alpha]_k$ is of the form $f[\alpha]_k(z) = \sum_{n=0}^\infty a_n(f) q^n$, with $q=e^{2\pi i z}$). Note that the weight must satisfy $\chi(-1)=(-1)^k$, since $\begin{pmatrix} -1 & 0 \\ 0 & -1 \end{pmatrix}\in\Gamma_0(N)$.
The subspace $S_k(\Gamma_0(N), \chi)$ of cusp forms consists of the forms such that $f[\alpha]_k$ has Fourier coefficient $a_0(f)=0$ for each $\alpha\in SL_2(\field{Z})$. The orthogonal complement of $S_k(\Gamma_0(N),\chi)$ with respect to the Petersson inner product is the Eisenstein subspace $\mathcal{E}_k(\Gamma_0(N), \chi)$. If $k>2$ (the case of interest to us) then a natural basis of this space consists of the normalised Eisenstein series $E_k^{\psi, \phi}(tz)$ for all ordered pairs of Dirichlet characters $\phi,\psi$ of conductors $u,v$ satisfying $\psi\phi = \chi$ and $tuv\mid N$. The Fourier expansion of $ E_k^{\psi,\phi}$ is:
\begin{equation*}
E_k^{\psi,\phi}(z)=\delta(\psi)\frac{L(1-k, \psi^{-1}\phi)}{2} + \sum_{n=1}^\infty \sigma_{k-1}^{\psi, \phi}(n) q^n,.
\end{equation*}
where $\delta(\psi)=\delta_{\psi,\mathds{1}_1}$ (the trivial character modulo $1$) and
\begin{equation*}
\sigma_{k-1}^{\psi,\phi}(n):=\sum_{d\mid n, d>0} \psi(n/d) \phi(d) d^{k-1}
\end{equation*}
is a generalised power divisor sum. The Eisenstein series with $uv = N$ are referred to as being new at level $N$.
For any given level $N$, we can also decompose $S_k(\Gamma_1(N))$ into new and old subspaces. For any $d\mid N$ we have the map:
\begin{align*}
i_d: S_k(\Gamma_1(N/d))^2 &\rightarrow S_k(\Gamma_1(N)) \\
(f,g) &\mapsto f + g[\alpha_d]_k
\end{align*}
\noindent where $\alpha_d = \left(\begin{smallmatrix} d & 0 \\ 0 & 1\end{smallmatrix}\right)$. Then the old subspace is:
\begin{equation*}
S_k^{\text{old}}(\Gamma_1(N)) = \sum_{p \in \mathcal{P}_N} i_p(S_k(\Gamma_1(N/p))^2),
\end{equation*} where $\mathcal{P}_N=\{p \text{ prime}: p \mid N \}$. The new subspace $S_k^{\text{new}}(\Gamma_1(N))$ is then the orthogonal complement of $S_k^{\text{old}}(\Gamma_1(N))$ with respect to the Petersson inner product, so that
\begin{equation*}
S_k(\Gamma_1(N)) = S_k^{\text{old}}(\Gamma_1(N))\oplus S_k^{\text{new}}(\Gamma_1(N)).
\end{equation*} This induces a decomposition of the space $S_k(\Gamma_0(N),\chi)$ into new and old spaces (lifting from spaces $S_k(\Gamma_0(N/p),\chi')$ with $p\in \mathcal{P}_N$ such that $p\text{ cond}(\chi)\mid N$, and taking $\chi'$ to be the reduction of $\chi$ mod $N/p$).
The space $M_k(\Gamma_0(N),\chi)$ comes equipped with the action of a Hecke algebra, a commutative algebra generated by operators $T_p$ indexed by primes $p$. The action of $T_p$ on the level of Fourier coefficients is as follows:
\begin{equation*}
a_n(T_p(f)) = a_{np}(f) + \chi(p)p^{k-1}a_{n/p}(f),
\end{equation*}
where we take $a_{n/p}(f)=0$ if $n/p \notin \field{Z}$.
The space $S_k(\Gamma_0(N),\chi)$ has a basis of eigenforms for the operators $T_p$ for all $p\nmid N$. We can always normalise an eigenform $f$ so that $a_1(f) = 1$, and in this case $T_p(f) = a_p(f)f$ for each prime $p\nmid N$. For the subspace $S_k^{\text{new}}(\Gamma_0(N),\chi)$ we can find a basis of newforms, eigenforms for the full Hecke algebra.
The action of the Hecke algebra on the Eisenstein subspace $\mathcal{E}_k(\Gamma_0(N),\chi)$ is also well understood (see \cite[Proposition 5.2.3]{diamond_shurman_2016}). In particular, the normalised Eisenstein series $E_k^{\psi,\phi}$ are eigenforms for all Hecke operators $T_p$ with $p\nmid N$, with eigenvalues given by:
\begin{equation*}
T_p(E_k^{\psi,\phi}) = \sigma_{k-1}^{\psi,\phi}(p)E_k^{\psi,\phi} = (\psi(p)+\phi(p)p^{k-1})E_k^{\psi,\phi}.
\end{equation*} If $E_k^{\psi,\phi}$ is new at level $N$ then it is an eigenform for the full Hecke algebra and so the above holds for all $p$.
The field of definition $K_f = \mathbb{Q}(\{a_n(f)\})$ of an eigenform $f\in S_k(\Gamma_0(N),\chi)$ is known to be a number field, and so has a well defined ring of integers $\mathcal{O}_f$. We will often denote by $K_f[\psi,\phi]$ the finite extension generated by the values of $\psi,\phi$ (i.e. roots of unity), and denote by $\mathcal{O}_f[\psi,\phi]$ be the corresponding ring of integers.
By a theorem of Deligne \cite{deligne_1969}, for each prime $\lambda$ of $\mathcal{O}_f$ there exists a continuous $\lambda$-adic Galois representation \[\rho_{f,\lambda}: G_{\mathbb{Q}} \rightarrow \text{GL}_2(K_{f,\lambda})\] which is unramified for $q \nmid NMl$ and satisfies
\begin{align*}
\text{Tr}(\rho_{f,\lambda}(\text{Frob}_q)) &=a_q(f) \\ \text{det}(\rho_{f,\lambda}(\text{Frob}_q)) &=\chi(q)q^{k-1}.
\end{align*} for such primes (here $\text{Frob}_q$ is an arithmetic Frobenius element at $q$).
By standard arguments, it is possible to conjugate $\rho_{f,\lambda}$ so that it takes values in $\text{GL}_2(\mathcal{O}_{f,\lambda})$ and reduce modulo $\lambda$ to get a continuous representation \[\overline{\rho}_{f,\lambda}:G_{\mathbb{Q}} \rightarrow \text{GL}_2(\mathbb{F}_{\lambda}).\] In general, the reduction depends on the choice of invariant $\mathcal{O}_{f,\lambda}$-lattice, but the irreducible composition factors are independent of this choice.
\section{A General Conjecture}
\label{section:conj}
From now on, $N, M\geq 1$ are fixed coprime squarefree integers and $\chi$ is a fixed character of conductor $N$. Suppose further that $\phi, \psi$ are characters of conductors $u, v\geq 1$ respectively satisfying $uv=N$, and set $\psi \phi = \chi$. Then $E_k^{\psi,\phi}\in \mathcal{E}_k(\Gamma_0(N),\chi)$ is new at level $N$. Fix a lift $\tilde{\chi}$ of $\chi$ to modulus $NM$.
The following is a restatement of Conjecture $1.1$. It is a general conjecture concerning Eisenstein congruences between newforms in $S_k^{\text{new}}(\Gamma_0(NM),\tilde{\chi})$ and $E_k^{\psi,\phi}$, providing a wide generalisation of Ramanujan's congruence (as defined earlier, $\mathcal{P}_d$ is the set of prime divisors of $d\geq 1$).
\begin{conj}
\label{conj:general}
Let $k > 2$ and $l>k+1$ be a prime satisfying $l \nmid NM$. There exists a newform $f \in S_k^{\text{new}}(\Gamma_0(NM), \tilde{\chi})$ and a prime $\lambda\mid l$ of $\mathcal{O}_f[\phi,\psi]$ such that
\begin{equation*}
a_q(f) \equiv \psi(q)+\phi(q)q^{k-1} \ \text{mod } \lambda
\end{equation*}
for all primes $q\nmid NM$ if and only if both of the following conditions hold for some $\lambda'|l$ in $\mathbb{Z}[\psi,\phi]$ (satisfying $\lambda|\lambda'$):
\begin{enumerate}
\item $\text{ord}_{\lambda'}(L(1-k, \psi^{-1}\phi) \prod_{p \in \mathcal{P}_M} (\psi(p) - \phi(p)p^k)) > 0$.
\item $\text{ord}_{\lambda'}((\psi(p)-\phi(p)p^k)(\psi(p)-\phi(p)p^{k-2}))>0$ for each $p \in \mathcal{P}_M$.
\end{enumerate}
\end{conj}
Condition (1) of the conjecture is enough to guarantee the existence of an eigenform in $S_k(\Gamma_0(NM),\tilde{\chi})$ satisfying the congruence (see Theorem \ref{thm:eigenform} below). This condition can be thought of as the analogue of $\text{ord}_{691}\left(-\frac{B_{12}}{24}\right)>0$ in Ramanujan's congruence, but now allowing prime divisors of Euler factors as well as Dirichlet $L$-values (an artifact of our Eisenstein series being lifted from level $N$ to level $NM$). Such congruences coming from divisibility of the Euler factor (as opposed to the complete $L$-value) are often said to be of ``local origin".
Condition (2) can be thought of as measuring how ``new'' the modular form $f$ in the conjecture is, i.e. if the prime $l$ only satisfied condition (2) for all $p \in \mathcal{P}_d$, with $d \mid M$, then we would only expect to find a $d$-newform $f$ satisfying the congruence condition.
We will prove the reverse implication of this Conjecture, and partial results concerning the direct implication. The major hurdle is that the Conditions (1) and (2) allow us to prove the existence of a $p$-newform $f$ satisfying the congruence condition for each $p \in \mathcal{P}_M$, but we are currently unable to show that these $p$-newforms can in fact be taken to the same genuine newform.
Later, we will discuss the relationship of this conjecture with previous results in this area, for example \cite{dummigan_2007}, \cite{billerey_menares_2016}, \cite{dummigan_fretwell_2014}, as well as connections with the Bloch-Kato Conjecture. We will also see computational examples using MAGMA \cite{magma}.
\subsection{Initial results}
To get us started, we must construct various lifts of $E_k^{\psi,\phi}$ to level $MN$ having specified constant terms. The following construction and results generalise those found in \cite[Section 1.2.2]{billerey_menares_2016}
For each $m\geq 1$ we define the operator $\alpha_m$, acting on complex valued functions on the upper half plane, by $(\alpha_m f)(z)=f(mz)$. We then consider the collection of Eisenstein series given by
\begin{equation}
\label{eq:E}
E_{\underline{\delta}}(z)=\left[\prod_{p\in \mathcal{P}_M}(T_p - \delta_p)\right]\alpha_M E_k^{\psi,\phi}\in \mathcal{E}_k(\Gamma_0(NM),\tilde{\chi}),
\end{equation}
where $\underline{\delta} = \{\delta_p\}_{p\mid M}$ and $\delta_p$ is determined by fixing an ordering:
\begin{equation}
\label{eq:delta/eps}
\{\delta_p, \varepsilon_p\} = \{\psi(p), \phi(p)p^{k-1}\}.
\end{equation}
\noindent When $M=1$ there is no choice and we define $E_{\underline{\delta}} = E_k^{\psi,\phi}$.
\begin{lemma}
\label{lemma:E}
Each Eisenstein series $E_{\underline{\delta}}$ is a normalised eigenform in \\ $\mathcal{E}_k(\Gamma_0(NM), \tilde{\chi})$. For each prime $p$ we have:
\begin{equation*}
T_p E_{\underline{\delta}}=
\begin{cases}
\varepsilon_pE_{\underline{\delta}} & \text{if } p \in \mathcal{P}_M \\
(\psi(p)+\phi(p)p^{k-1})E_{\underline{\delta}} & \text{otherwise} \\
\end{cases}
\end{equation*}
We can also write
\begin{equation}
\label{eq:altE}
E_{\underline{\delta}}=\sum_{m \mid M}(-1)^{|\mathcal{P}_m|} \delta_m \alpha_m E_k^{\psi,\phi}.
\end{equation}
where $\delta_m = \prod_{p \in \mathcal{P}_m} \delta_p$ for each $m\mid M$.
\end{lemma}
\begin{proof}
If $p \notin \mathcal{P}_M$ then
\begin{align*}
a_n(T_p(\alpha_M E_k^{\psi, \phi})) &= a_{np}(\alpha_M E_k^{\psi, \phi}) + \tilde{\chi}(p)p^{k-1}a_{n/p}(\alpha_M E_k^{\psi,\phi}) \\
&= a_{np/M}(E_k^{\psi,\phi}) + \chi(p)p^{k-1}a_{n/pM}(E_k^{\psi,\phi}) \\
&= a_{n/M}(T_pE_k^{\psi,\phi}) \\
&= a_n(\alpha_M (T_p E_k^{\psi,\phi})).
\end{align*}
\noindent Hence, we see that $T_p \alpha_M E_k^{\psi,\phi} = \alpha_M T_p E_k^{\psi,\phi}$. It follows that
\begin{align*}
T_pE_{\underline{\delta}}&=T_p \left[\prod_{q\in \mathcal{P}_M} (T_q - \delta_q)\right]\alpha_ME_k^{\psi,\phi} \\
&=\left[\prod_{q\in \mathcal{P}_M} (T_q - \delta_q)\right]\alpha_M T_pE_k^{\psi,\phi} \\
&=(\psi(p)+\phi(p)p^{k-1})E_{\underline{\delta}}
\end{align*}
\noindent If $p \in \mathcal{P}_M$ we find that: \begin{align*}T_pE_{\underline{\delta}}&=T_p \left[\prod_{q\in \mathcal{P}_M} (T_q - \delta_q)\right]\alpha_ME_k^{\psi,\phi} \\
&=\left[\prod_{q\in \mathcal{P}_{M/p}} (T_q - \delta_q)\right](T_p^2-\delta_pT_p)\alpha_M E_k^{\psi,\phi}.\end{align*} and we must figure out the action of the final operator on $E_k^{\psi,\phi}$. We do this by first proving the claim that for each $m\mid M$:
\begin{equation*}
T_p\alpha_m E_k^{\psi, \phi} =
\begin{cases}
\alpha_{m/p}E_k^{\psi,\phi} & \text{if } p \in
\mathcal{P}_m \\
(\psi(p)+\phi(p)p^{k-1})\alpha_{m}E_k^{\psi,\phi}-\chi(p)p^{k-1}\alpha_{mp}E_k^{\psi,\phi} & \text{if } p \notin
\mathcal{P}_m
\end{cases}
\end{equation*}
To prove the claim, note that
\begin{align*}
a_n(T_p\alpha_m E_k^{\psi,\phi})&=a_{np}(\alpha_m E_k^{\psi,\phi}) + \tilde{\chi}(p)p^{k-1}a_{n/p}(\alpha_m E_k^{\psi,\phi}) \\
&= a_{np}(\alpha_m E_k^{\psi,\phi}) \\
&= \sigma_{k-1}^{\psi,\phi}\left(\frac{np}{m}\right).
\end{align*}
(here, $\tilde{\chi}(p)$ vanishes since $\tilde{\chi}$ has modulus $NM$ and $p \in \mathcal{P}_M$).
\noindent When $p \in \mathcal{P}_m$ we have $a_n(T_p\alpha_m E_k^{\psi,\phi})=\sigma_{k-1}^{\psi,\phi}\left(\frac{np}{m}\right)=a_n(\alpha_{m/p}E_k^{\psi,\phi})$, and when $p\notin \mathcal{P}_m$ we use the fact that:
\begin{equation}
\label{eq:powerdiv}
\sigma_{k-1}^{\psi,\phi}(np)+\chi(p)p^{k-1}\sigma_{k-1}^{\psi,\phi}(n/p) = (\psi(p)+\phi(p)p^{k-1})\sigma_{k-1}^{\psi,\phi}(n)
\end{equation}
to get
\begin{equation*}
\sigma_{k-1}^{\psi,\phi}\left(\frac{np}{m}\right) = (\psi(p)+\phi(p)p^{k-1})\sigma_{k-1}^{\psi,\phi}\left(\frac{n}{m}\right)-\chi(p)p^{k-1}\sigma_{k-1}^{\psi,\phi}\left(\frac{n}{mp}\right),
\end{equation*}
so that
\begin{equation*}
a_n(T_p\alpha_m E_k^{\psi,\phi})=(\psi(p)+\phi(p)p^{k-1})a_n(\alpha_mE_k^{\psi,\phi})-\chi(p)p^{k-1}a_n(\alpha_{mp}E_k^{\psi,\phi}).
\end{equation*}
\noindent The claim follows and so
\begin{align*}
(T_p^2-\delta_pT_p)\alpha_ME_k^{\psi,\phi} &= T_p\alpha_{M/p}E_k^{\psi,\phi} - \delta_p\alpha_{M/p}E_k^{\psi,\phi} \\
&=(\psi(p)+\phi(p)p^{k-1})\alpha_{M/p}E_k^{\psi,\phi}-\chi(p)p^{k-1}\alpha_ME_k^{\psi,\phi} - \delta_p\alpha_{M/p}E_k^{\psi,\phi} \\
&=\varepsilon_p\alpha_{M/p}E_k^{\psi,\phi} - \chi(p)p^{k-1}\alpha_ME_k^{\psi,\phi} \\
&=\varepsilon_p(T_p-\delta_p)\alpha_M E_k^{\psi,\phi}.
\end{align*}
Therefore, when $p\in \mathcal{P}_M$ we have that $T_pE_{\underline{\delta}}=\varepsilon_pE_{\underline{\delta}}$, as required.
Equation \eqref{eq:altE} holds by the Inclusion-Exclusion Principle and the claim. The fact that $E_{\underline{\delta}}$ is normalised now follows, since only the term with $m=1$ contributes to the $q$ coefficient of $E_{\underline{\delta}}$, and $E_k^{\psi,\phi}$ is normalised.
\end{proof}
We now state a Proposition which allows us to determine the value of $E_{\underline{\delta}}$ at all cusps. The proof can be found in \cite[Proposition 3.1.2]{spencer_2018} or \cite[Proposition 4]{billerey_menares_2017}.
\begin{propn}[Spencer]
\label{prop:spencer}
If $m \geq 1$ is coprime to $N$ and $\gamma = \left(\begin{smallmatrix} a & \beta \\ b & \delta \end{smallmatrix}\right) \in SL_2(\field{Z})$ then the constant term of $(\alpha_mE_k^{\psi,\phi})[\gamma]_k$ is given by
\begin{equation*}
a_0((\alpha_m E_k^{\psi,\phi})[\gamma]_k) = \begin{cases}
-\frac{g(\psi\phi^{-1})}{g(\phi^{-1})}\frac{\phi^{-1}(m^\prime a) \psi\left(\frac{-b^\prime}{v}\right)}{u^k m^{\prime k}}\frac{L(1-k, \psi^{-1}\phi)}{2} & \text{ if }v\mid b'\\ 0 & \text{otherwise}
\end{cases}
\end{equation*}
where $b^\prime = \frac{b}{gcd(b,m)}$, $m^\prime=\frac{m}{gcd(b,m)}$ and $g(\phi)=\sum_{n=0}^{v-1}\phi(n)e^{\frac{2\pi i n}{v}}$ is the Gauss sum of $\phi$.
\end{propn}
For convenience we will write:
\begin{equation*}
C_{\gamma}=-\frac{g(\psi\phi^{-1})}{g(\phi^{-1})}\frac{\phi^{-1}(a) \psi\left(\frac{-b}{v}\right)}{u^k }\frac{L(1-k, \psi^{-1}\phi)}{2}
\end{equation*}
Using this and the notation in Proposition \ref{prop:spencer}, we can write a formula for the constant term of $E_{\underline{\delta}}[\gamma]_k$.
\begin{corollary}
\label{cor:cusp}
If $\gamma = \left(\begin{smallmatrix} a & \beta \\ b & \delta \end{smallmatrix}\right) \in SL_2(\field{Z})$ and $M' = \frac{M}{\text{gcd}(M,b)}$ then the constant term of $E_{\underline{\delta}}[\gamma]_k$ is
\begin{equation*}
a_0(E_{\underline{\delta}}[\gamma]_k) = \begin{cases}C_{\gamma} \prod_{p\in \mathcal{P}_{M'}}(1 - \delta_p \phi^{-1}(p)p^{-k})\prod_{p \in \mathcal{P}_{M/M'}} (1 - \delta_p \psi^{-1}(p)) & \text{ if } v\mid b'\\ 0& \text{ otherwise}\end{cases}.
\end{equation*}
\end{corollary}
\begin{proof}
By Lemma \ref{lemma:E}, we can write $E$ as
\begin{equation*}
E_{\underline{\delta}}=\sum_{m \mid M}(-1)^{|\mathcal{P}_m|} \delta_m \alpha_m E_k^{\psi,\phi}.
\end{equation*}
It is clear from Proposition \ref{prop:spencer} that the constant term of $E_{\underline{\delta}}$ is zero if $v\nmid b'$ and so it suffices to consider the case $v \mid b^\prime$. First we use Proposition \ref{prop:spencer} to evaluate the constant term of $(-1)^{|\mathcal{P}_m|}\delta_m \alpha_m E_k^{\psi, \phi}[\gamma]_k$ for a fixed $m \mid M$ as follows:
\begin{align*}
&(-1)^{|\mathcal{P}_m| +1}\frac{g(\psi \phi^{-1})}{g(\phi^{-1})} \frac{\phi^{-1}\left(m' a \right)\psi \left(\frac{-b'}{v}\right)}{u^k m'^k} \frac{L(1-k, \psi^{-1}\phi)}{2} \delta_m \\
&= C_{\gamma} (-1)^{|\mathcal{P}_m|} \frac{\phi^{-1}(m')\psi^{-1}\left(\frac{m}{m'}\right)}{m'^k} \delta_m\\ &= C_{\gamma} (-1)^{|\mathcal{P}_m|} \left(\prod_{p\in \mathcal{P}_{m'}} \delta_p\phi^{-1}(p)p^{-k} \right) \left( \prod_{p \in \mathcal{P}_{m/m'}} \delta_p\psi^{-1}(p) \right).\label{eq:constant}
\end{align*}
It follows that the constant term of $E_{\underline{\delta}}[\gamma]_k$ is:
\begin{align*}
a_0(E_{\underline{\delta}}[\gamma]_k) &=C_{\gamma} \sum_{m \mid M} \left[ (-1)^{|\mathcal{P}_m|} \left(\prod_{p\in \mathcal{P}_{m'}} \delta_p\phi^{-1}(p)p^{-k}\right) \left( \prod_{p \in \mathcal{P}_{m/m'}} \delta_p\psi^{-1}(p) \right)\right] \\
&= C_{\gamma} \prod_{p \in \mathcal{P}_{M'}} (1-\delta_p\phi^{-1}(p)p^{-k}) \prod_{p \in \mathcal{P}_{M/M'}} (1-\delta_p\psi^{-1}(p))
\end{align*}
by the Inclusion-Exclusion Principle.
\end{proof}
\subsection{The reverse implication}
We have now constructed various Eisenstein series $E_{\underline{\delta}}$ whose constant coefficients contain terms which look somewhat like those in Condition (1) of Conjecture \ref{conj:general}. We are now in a position to use this to prove the reverse implication of the conjecture.
\begin{theorem}
\label{thm:reverse}
The reverse implication of Conjecture \ref{conj:general} is true.
\end{theorem}
\begin{proof}
Suppose that we have a newform $f \in S_k^{\text{new}}(\Gamma_0(NM), \tilde{\chi})$ satisfying the congruence \[a_q(f) \equiv \psi(q) + \phi(q)q^{k-1} \ \text{mod } \lambda\] for all primes $q \nmid NM$ and for some prime $\lambda\mid l$ of $\mathcal{O}_f[\psi,\phi]$ (with the assumptions $k>2$, $l>k+1$ and $l\nmid NM$).
Attached to $f$ is the $\lambda$-adic Galois representation given by \[\rho_{f,\lambda}: G_{\mathbb{Q}}\rightarrow \text{GL}_2(K_{f,\lambda_0}) \hookrightarrow \text{GL}_2(K_f[\psi,\phi]_{\lambda}),\] where the first arrow is the usual $\lambda_0$-adic Galois representation attached to $f$ (with $\lambda_0$ the unique prime of $K_f$ lying below $\lambda$). This may be conjugated to take values in $\text{GL}_2(\mathcal{O}_f[\psi,\phi]_\lambda)$. The congruence then implies that $\rho_{f,\lambda}$ is residually reducible mod $\lambda$ (by Cebotarev density and Brauer-Nesbitt, noting that $l > k > 2$):
\begin{equation*}
\overline{\rho}_{f,\lambda} \sim \begin{pmatrix} \overline{\psi} & *\\ 0 & \overline{\phi} \chi_l^{k - 1} \end{pmatrix}
\end{equation*}
so that the semisimplification is given by
\begin{equation}
\label{eq:rep}
\overline{\rho}_{f,\lambda}^{ss} \sim \overline{\psi} \oplus \overline{\phi} \chi_l^{k-1}.
\end{equation}
(i.e.\ $\overline{\rho}_{f,\lambda}$ has composition factors $\{\overline{\psi}, \overline{\phi}\chi_l^{k-1}\}$). Here $\chi_{l}:G_{\mathbb{Q}}\rightarrow \mathbb{F}_{l}^{\times}$ is the mod $l$ cyclotomic character.
For each $p\in \mathcal{P}_M$ it is well known that that the composition factors of $\overline{\rho}_{f,\lambda}$ locally at $p$ are given by:
\begin{equation*}
\overline{\rho}^{\text{ss}}_{f,\lambda}\mid_{W_{\mathbb{Q}_p}} \sim \left(\mu\chi_l^{k/2}\oplus\mu\chi_l^{k/2-1}\right)\mid_{W_{\mathbb{Q}_p}}
\end{equation*}
(e.g.\ \cite[Proposition 2.8]{loeffler_weinstein_2011}). Here $W_{\mathbb{Q}_p}$ is the local Weil group at $p$ and $\mu : W_{\mathbb{Q}_p}\rightarrow\mathbb{F}_\lambda^{\times}$ is the unramified character such that $\mu(\text{Frob}_p) \equiv a_p(f)/p^{k/2-1} \ \text{mod } \lambda$.
\noindent This leads to the following equivalence for each $p\in \mathcal{P}_M$:
\begin{equation*}
\left(\mu\chi_l^{k/2}\oplus\mu\chi_l^{k/2-1}\right)\mid_{W_{\mathbb{Q}_p}} \sim \left(\overline{\psi}\oplus \,\overline{\phi} \chi_l^{k-1}\right)\mid_{W_{\mathbb{Q}_p}}.
\end{equation*}
There are only two possibilities (note also that $l\neq p$ since $p\in \mathcal{P}_M$):
\begin{enumerate}
\item[(A)] $\overline{\psi}\mid_{W_{\mathbb{Q}_p}} = \mu \chi_l^{k/2}\mid_{W_{\mathbb{Q}_p}}$\qquad\text{and}\qquad $\overline{\phi} \chi_l^{k-1}\mid_{W_{\mathbb{Q}_p}} = \mu \chi_l^{k/2-1}\mid_{W_{\mathbb{Q}_p}}$.\\
\noindent Evaluating at $\text{Frob}_p$ gives
\begin{align*}\psi(p) &\equiv \mu(p)p^{k/2} \ \text{mod } \lambda\\ \phi(p)p^{k-1} &\equiv \mu(p)p^{k/2 -1} \ \text{mod } \lambda,\end{align*} so that
\begin{equation*}
\psi(p) - \phi(p)p^k \equiv 0 \ \text{mod } \lambda.
\end{equation*}
\item[(B)] $\overline{\psi}\mid_{W_{\mathbb{Q}_p}} = \mu \chi_l^{k/2-1}\mid_{W_{\mathbb{Q}_p}}$\qquad\text{and}\qquad $\overline{\phi} \chi_l^{k-1}\mid_{W_{\mathbb{Q}_p}} = \mu \chi_l^{k/2}\mid_{W_{\mathbb{Q}_p}}$. \\
\noindent Evaluating at $\text{Frob}_p$ gives \begin{align*}\psi(p) &\equiv \mu(p)p^{k/2-1} \ \text{mod } \lambda\\ \phi(p)p^{k-1} &\equiv \mu(p)p^{k/2} \ \text{mod } \lambda,\end{align*} so that we have both of the following
\begin{align*}
\psi(p) - \phi(p)p^{k-2} &\equiv 0 \ \text{mod } \lambda\\ \psi(p) &\equiv a_p(f) \bmod \lambda.
\end{align*}
\end{enumerate}
\noindent To summarise, one of the following must hold for each $p\in \mathcal{P}_M$:
\begin{enumerate}
\item[(A)] $ \psi(p) - \phi(p)p^k \equiv 0 \ \text{mod } \lambda.$
\item[(B)] $\psi(p) - \phi(p)p^{k-2} \equiv 0 \ \text{mod } \lambda$ and $a_p(f) \equiv \psi(p) \ \text{mod } \lambda$.
\end{enumerate}
Taking norms down to $\mathbb{Z}[\psi,\phi]$ gives Condition (2) (i.e. divisibility by $\lambda'$). It remains to prove that Condition (1) holds: \[\text{ord}_{\lambda'}\left(L(1-k, \psi^{-1}\phi) \prod_{p \in \mathcal{P}_M} (\psi(p) - \phi(p)p^k)\right) > 0.\] First note that this is immediate if there exists a prime $p \in \mathcal{P}_M$ satisfying case (A), since $l>k+1$ and $l\nmid N$. We assume from now on that case (B) is satisfied for each $p \in \mathcal{P}_M$.
Consider the Eisenstein series $E_{\underline{\delta}}$ corresponding to the choice $\delta_p=\phi(p)p^{k-1}$ for each $p \in \mathcal{P}_M$. We claim that for all primes $p\neq l$, the following congruence holds
\begin{equation*}
a_p(E_{\underline{\delta}}) \equiv a_p(f) \ \text{mod } \lambda.
\end{equation*}
This is true for each prime $p\in \mathcal{P}_M$, since by Lemma \ref{lemma:E} we have \[a_p(E_{\underline{\delta}}) = \varepsilon_p = \psi(p) \equiv a_p(f) \bmod \lambda .\] For each prime $p\in \mathcal{P}_N$ the form $f$ is $p$-new, and another comparison of local composition factors gives (e.g.\ \cite[Proposition 2.8]{loeffler_weinstein_2011}, see also \cite{billerey_menares_2017}):
\begin{equation*}
\left(\mu_1\chi_l^{(k-1)/2} \oplus \mu_2\chi_l^{(k-1)/2}\right)\mid_{W_{\mathbb{Q}_p}} \sim \left(\overline{\psi} \oplus \overline{\phi} \chi_l^{k-1}\right)\mid_{W_{\mathbb{Q}_p}}.
\end{equation*}
with $\mu_1,\mu_2: W_{\mathbb{Q}_p}\rightarrow\mathbb{F}_\lambda^{\times}$ characters of conductors $1$ and $p$ satisfying \[a_p(f) \equiv p^{(k-1)/2}(\mu_1(p)+\mu_2(p)) \ \text{mod } \lambda.\] It follows that, for such primes \[a_p(f) \equiv \psi(p) + \phi(p)p^{k-1} \equiv a_p(E_{\underline{\delta}}) \ \text{mod } \lambda.\] For all other primes the claim follows from Lemma \ref{lemma:E} and the assumption that $f$ satisfies the congruence.
By the claim, and the fact that $f$ and $E_{\underline{\delta}}$ are both normalised Hecke eigenforms, we get the following congruence for all $n$ coprime to $l$
\begin{equation*}
a_n(E_{\underline{\delta}}) \equiv a_n(f) \ \text{mod } \lambda
\end{equation*}
Applying the theta operator $\Theta = q\frac{d}{dq}$ in \cite{serre_2003}, we obtain $\Theta(E_{\underline{\delta}}) \equiv \Theta(f) \ \text{mod } \lambda$. However, the theta operator is injective for $l > k+1$ \cite[Corollary 3]{katz_1977}, and so we have that $E_{\underline{\delta}} \equiv f \ \text{mod } \lambda$. Since $f$ is a cusp form it must be that $E_{\underline{\delta}}$ must vanish at all cusps mod $\lambda$. Choosing any $\gamma \in SL_2(\field{Z})$ with lower left entry $b$ such that $\mathcal{P}_M\subseteq \mathcal{P}_b$, we then find that
\begin{equation*}
\text{ord}_\lambda(a_0(E_{\underline{\delta}}[\gamma]_k)) = \text{ord}_\lambda\left(C_{\gamma}\left(\prod_{p \in \mathcal{P}_M} (1-\psi^{-1}(p)\phi(p)p^{k-1})\right)\right)>0.
\end{equation*} by Corollary \ref{cor:cusp}.
Using the facts that $\frac{g(\psi\phi^{-1})}{g(\phi)}$, $\phi^{-1}(a), \psi^{-1}(M)$ and $\psi\left(\frac{-b}{v}\right)$ are units in $\field{Z}[\psi, \phi]$, and that $l \nmid 2u$ by assumption, we find that:
\begin{equation*}
\text{ord}_{\lambda}\left(L(1-k, \psi^{-1}\phi)\prod_{p \in \mathcal{P}_M} (\psi(p)-\phi(p)p^{k-1})\right)>0.
\end{equation*}
\noindent By assumption, we also have that $\psi(p) - \phi(p)p^{k-2} \equiv 0 \bmod \lambda$ for each $p\in\mathcal{P}_M$, and so this implies that: \begin{equation*}
\text{ord}_{\lambda}\left(L(1-k, \psi^{-1}\phi)\prod_{p \in \mathcal{P}_M} (\psi(p)-\phi(p)p^k)\right)>0.
\end{equation*}
Condition (1) follows again by taking the norm down to $\mathbb{Z}[\psi,\phi]$.
\end{proof}
\subsection{The direct implication}
We now give partial results towards the direct implication of Conjecture \ref{conj:general}. First, we prove that Condition (1) guarantees the existence of an eigenform in $S_k(\Gamma_0(NM),\tilde{\chi})$ satisfying the congruence. The following is the necessary extension of \cite[Theorem 2.10]{dummigan_spencer} and \cite[Theorem 3.0.1]{spencer_2018}.
\begin{theorem}
\label{thm:eigenform}
Let $k > 2$ and $\lambda' \nmid 6NM$ be a prime of $\mathbb{Z}[\psi,\phi]$ such that \[\text{ord}_{\lambda'}\left(L(1-k, \psi^{-1} \phi) \prod_{p \in \mathcal{P}_M}(\psi(p) - \phi(p)p^k)\right)>0.\] There exists a normalised Hecke eigenform $f \in S_k(\Gamma_1(NM), \tilde{\chi})$ and a prime $\lambda\mid \lambda'$ of $\mathcal{O}_f[\psi,\phi]$ such that for all primes $q \nmid NM$,
\begin{equation*}
a_q(f) \equiv \psi(q)+\phi(q)q^{k-1} \ \text{mod } \lambda.
\end{equation*}
\end{theorem}
\begin{proof}
Consider the Eisenstein series $E_{\underline{\delta}}$ corresponding to the choice $\delta_p=\psi(p)$ for all $p \in \mathcal{P}_M$. By Corollary \ref{cor:cusp}, for each $\gamma\in\text{SL}_2(\mathbb{Z})$ the constant term of $E_{\underline{\delta}}[\gamma]_k$ is either $0$ or is
\begin{equation*}
a_0(E_{\underline{\delta}}[\gamma]_k) = C_{\gamma} \prod_{p \in \mathcal{P}_M} (1 - \psi(p) \phi^{-1}(p)p^{-k}) = (-1)^{|\mathcal{P}_M|}\frac{C_{\gamma}}{M^k}\prod_{p\in\mathcal{P}_M}(\psi(p)-\phi(p)p^k).
\end{equation*}
In either case $\text{ord}_{\lambda'}(a_0(E_{\underline{\delta}}[\gamma]_k)) > 0$, and so $E_{\underline{\delta}}$ is a cusp form mod $\lambda'$. A standard argument using the Deligne-Serre Lifting Lemma and Carayol's Lemma (e.g.\ \cite[Theorem 3.0.1]{spencer_2018}) then gives a lift to a characteristic zero eigenform $f\in S_k(\Gamma_0(NM),\tilde{\chi})$ satisfying $a_q(f) \equiv a_q(E_{\underline{\delta}})\bmod \lambda$ for some prime $\lambda|\lambda'$ of $\mathcal{O}_f[\psi,\phi]$. This eigenform satisfies the required congruence by construction.
\end{proof}
Taking $M=1$ in Theorem \ref{thm:eigenform} gives a result of Dummigan \cite[Proposition 2.1]{dummigan_2007}. As remarked in the paper, the eigenform satisfying the congruence must be new (since $\chi$ has conductor $N$). This completes the proof of Conjecture \ref{conj:general} in the case $M=1$. We will now see that the case $M=p$ prime can also be fully proved.
\begin{theorem}
\label{thm:prime}
If $M=p$ is prime then Conjecture \ref{conj:general} is true.
\end{theorem}
\begin{proof}
Theorem \ref{thm:reverse} provides the reverse implication and so it suffices to prove the direct implication. By Theorem \ref{thm:eigenform}, Condition (1) provides a level $Np$ eigenform $f_0\in S_k(\Gamma_1(Np), \tilde{\chi})$ and a prime $\lambda_0\mid \lambda'$ of $\mathcal{O}_f[\psi,\phi]$ satisfying \[a_q(f_0) \equiv \psi(q)+\phi(q)q^{k-1} \ \text{mod } \lambda_0,\] for all $q\nmid Np$. We may assume that $f_0$ is an oldform, otherwise we are done. Since $\tilde{\chi}$ has conductor $N$, $f_0$ must be a lift of an eigenform $f_1\in S_k^{\text{new}}(\Gamma_0(N),\chi)$. By the Cebotarev density theorem, we have that $\overline{\rho}_{f_1,\lambda_0}\sim\overline{\rho}_{f_0,\lambda_0}$. As earlier, the congruence implies that $\overline{\rho}_{f_0,\lambda_0}^{\text{ss}}\sim\overline{\psi} \oplus \overline{\phi} \chi_l^{k-1}$. Since $l \neq p$ we see that:
\begin{equation*}
a_p(f_1) \equiv \psi(p) + \phi(p)p^{k-1} \ \text{mod } \lambda_0.
\end{equation*}
\noindent By Condition (2), we also have that one of the following holds: \begin{align*}\psi(p) &\equiv \phi(p)p^k \ \text{mod } \lambda_0\\ \psi(p) &\equiv \phi(p)p^{k-2} \ \text{mod } \lambda_0,\end{align*} so that
\begin{equation*}
a_p(f_1) \equiv \psi(p)+\phi(p)p^{k-1} \equiv
\begin{cases}
\psi(p)(1 + p^{-1}) \ \text{mod } \lambda_0 & \text{if } \psi(p) \equiv \phi(p)p^k \ \text{mod } \lambda_0 \\
\psi(p)(1 + p) \ \text{mod } \lambda_0 & \text{if } \psi(p) \equiv \phi(p)p^{k-2} \ \text{mod } \lambda_0
\end{cases}
\end{equation*}
We now claim that the following congruence condition holds:
\begin{equation}
\label{eq:diamond}
a_p(f_1)^2 \equiv \chi(p)p^{k-2}(1+p)^2 \ \text{mod } \lambda_0.
\end{equation}
Indeed, if $\psi(p) \equiv \phi(p)p^k \ \text{mod } \lambda_0$ then
\begin{align*}
\chi(p)p^{k-2}(1+p)^2 \equiv \psi(p)\phi(p)p^{k-2}(1+p)^2
\equiv \psi^2(p)p^{-2}(1+p)^2
&\equiv \psi^2(p)(1+p^{-1})^2 \\ &\equiv a_p(f_1)^2 \bmod \lambda_0
\end{align*}
Alternatively, if $\psi(p) \equiv \phi(p)p^{k-2} \ \text{mod } \lambda_0$ then
\begin{align*}
\chi(p)p^{k-2}(1+p)^2 \equiv \psi(p)\phi(p)p^{k-2}(1+p)^2
&\equiv \psi^2(p)(1+p)^2 \\ &\equiv a_p(f_1)^2 \bmod \lambda_0.
\end{align*}
A well known theorem of Diamond (see \cite{diamond_1991}) now implies the existence of a normalised $p$-newform $f \in S_k^{p\text{-new}}(\Gamma_1(Np), \tilde{\chi})$ and a prime $\lambda \mid \lambda_0$ of $\mathcal{O}_{f_0,f}[\psi,\phi]$ satisfying
\begin{equation*}
a_q(f) \equiv a_q(f_1) \ \text{mod } \lambda
\end{equation*}
for all primes $q\nmid Npl$. In fact, we must have that $f\in S_k^{\text{new}}(\Gamma_0(Np),\tilde{\chi})$, since $\tilde{\chi}$ has conductor $N$ and $p\nmid N$. This newform satisfies the required congruence by construction.
\end{proof}
The above argument highlights the bottleneck in trying to prove the direct implication in general. Conditions (1) and (2) still imply the level raising condition for each $p\mid M$, but this only allows us to find ``local" newforms satisfying the congruence, i.e.\ a $p$-newform for each $p|M$. There seems to be no clear way to prove the existence of a ``global" newform satisfying the congruence.
\subsection{Comparison with known results}
In the special case of $N=1$ (so that $\chi = \mathds{1}_1$) Conjecture \ref{conj:general} agrees with Conjecture $4.1$ of Dummigan and Fretwell \cite{dummigan_fretwell_2014} and Conjecture $3.2$ of Billerey and Menares \cite{billerey_menares_2016}.
In the special case of arbitrary square-free $N$ and $M=p$ prime, Theorem \ref{thm:eigenform} becomes Theorem $3.0.1$ of Spencer's thesis \cite{spencer_2018}. Newform congruences were not explored in this thesis, and so Theorem \ref{thm:prime} complements this Theorem well.
\subsection{Low weight}
For weight $k=2$, the analogue of Theorem \ref{thm:eigenform} is expected to be true in the case $N> 1$. However, when $N=1$ the condition can fail to provide an eigenform congruence. For example, when $N=1$ and $M = p\geq 5$ is prime then a famous result of Mazur \cite{mazur} says that eigenform congruences only arise when $\text{ord}_{l}\left(\frac{p-1}{12}\right)>0$, as opposed to $\text{ord}_{l}(p^2-1)>0$. Work of Ribet and Yoo considers results for more general levels \cite{ribetyoo}.
Conjecture \ref{conj:general} is also invalid in general for weight $k=2$. Even when an eigenform congruence exists there may not exist a newform satisfying the congruence, despite the fact that it is possible for Condition (2) to be automatically satisfed. It would be interesting to see what the analogue of Conjecture \ref{conj:general} is in this case.
It would be very interesting to see if there are analogues of our results for weight $1$ modular forms. The existence of such eigenform congruences has been studied for $M=p$ prime in \cite{spencer_2018}, but very little seems to be known beyond this.
\section{Relation with the Bloch-Kato Conjecture}
\label{section:BK}
In this section, we relate Conjecture \ref{conj:general} to the Bloch-Kato Conjecture. Throughout we assume Conditions (1) and (2) and fix an eigenform $f\in S_k(\Gamma_0(NM), \tilde{\chi})$ satisfying the congruence mod $\lambda$ (guaranteed to exist by Theorem \ref{thm:eigenform}).
As earlier, the congruence implies that the composition factors of $\overline{\rho}_{f,\lambda}$ are given by: \[\overline{\rho}_{f,\lambda}^{\text{ss}} \sim \overline{\psi}\oplus\overline{\phi}\chi_l^{k-1},\] realised on the one dimensional $\mathbb{F}_\lambda[G_{\mathbb{Q}}]$-modules $\mathbb{F}_\lambda(\psi)$ and $\mathbb{F}_\lambda (1-k)(\phi)$ , a Tate twist of $\mathbb{F}_{\lambda}(\phi)$, i.e.\ $\text{Frob}_p$ acts by multiplication by $\phi(p)p^{1-k} \bmod \lambda$ if $\lambda\nmid p$.
By a result of Ribet \cite{ribet_1976}, we can choose the invariant $\mathcal{O}_{f,\lambda}$-lattice defining $\rho_{f,\lambda}$ in such a way that $\overline{\rho}_{f,\lambda}$ is realised on an $\mathbb{F}_{\lambda}$-vector space $V$ such that
\begin{equation*}
0 \longrightarrow \mathbb{F}_\lambda(1-k)(\phi) \overset{\iota}\longrightarrow V \overset{\pi}\longrightarrow \mathbb{F}_\lambda(\psi) \longrightarrow 0
\end{equation*}
is a non-split extension of $\mathbb{F}_\lambda[G_{\mathbb{Q}}]$-modules. Let $h: \mathbb{F}_\lambda(\psi) \longrightarrow V$ be an $\mathbb{F}_\lambda$-linear map and fix $x \in \mathbb{F}_\lambda(\psi)$. Then for each $g \in G_{\mathbb{Q}}$ we find that $g(h(g^{-1}(x)))-h(x) \in \text{ker}(\pi) =\text{im}(\iota)$, giving a well defined map \begin{align*}C: G_{\mathbb{Q}} &\longrightarrow \text{Hom}(\mathbb{F}_{\lambda}(\psi),\mathbb{F}_{\lambda}(1-k)(\phi)) \\
g &\longmapsto C(g): x \longmapsto\iota^{-1}(g(h(g^{-1}(x))) - h(x)).
\end{align*}
\noindent It is easily verified that $C$ is a cocycle, whose class \begin{align*}c = [C] &\in H^1(\mathbb{Q},\text{Hom}(\mathbb{F}_{\lambda}(\psi),\mathbb{F}_{\lambda}(1-k)(\phi))\\ &\cong H^1(\mathbb{Q}, \mathbb{F}_{\lambda}(1-k)(\psi^{-1}\phi)).\end{align*}
is independent of the choice of $x$ and $h$, and is non-trivial since the extension is non-split.
Consider the $\mathbb{F}_\lambda[G_{\mathbb{Q}}]$-module $A_{f,\lambda}^{\psi,\phi}= (K_f[\psi,\phi]_{\lambda} / \mathcal{O}_f[\psi,\phi]_{\lambda})(1-k)(\psi^{-1}\phi)$ and let $A_f^{\psi,\phi}[\lambda]$ be the kernel of multiplication by $\lambda$ (abusing notation slightly, we let $\lambda$ be a uniformiser). It follows that \[A_f^{\psi,\phi}[\lambda] = \left(\frac{1}{\lambda}\mathcal{O}_f[\psi,\phi]_{\lambda}/\mathcal{O}_f[\psi,\phi]_{\lambda}\right)(1-k)(\psi^{-1}\phi)\cong \mathbb{F}_{\lambda}(1-k)(\psi^{-1}\phi)\] and so we may view $c$ as a class in $H^1(\mathbb{Q},A_f^{\psi,\phi}[\lambda])$. The following short exact sequence of $\mathbb{F}_{\lambda}[G_{\mathbb{Q}}]$-modules:
\begin{equation*}
0 \longrightarrow A_f^{\psi,\phi}[\lambda] \overset{i}\longrightarrow A_{f,\lambda}^{\psi,\phi} \overset{\lambda}\longrightarrow A_{f,\lambda}^{\psi,\phi} \longrightarrow 0
\end{equation*}
induces a long exact sequence in Galois cohomology, a piece of which is the following:
\begin{equation*}
H^0(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi}) \overset{\delta}\longrightarrow H^1(\mathbb{Q}, A_f^{\psi,\phi}[\lambda]) \overset{i_*}\longrightarrow H^1(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi}).
\end{equation*}
\noindent Note that since $l> k+1$ we have that $(l-1)\nmid (k-1)$, and so $H^0(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$ is trivial. It follows that $i_*$ is injective, so that we can lift $c$ to a non-trivial class $c' = i_*(c) \in H^1(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$.
The aim is to show that $c'$ is a non-trivial element of a Bloch-Kato Selmer group, which we now define (as in \cite[\S 3]{bloch_kato_2007}). First let $B_{f,\lambda}^{\psi,\phi} = K_f[\psi,\phi]_{\lambda}(1-k)(\psi^{-1}\phi)$. For a prime $q \neq l$ we define the local Bloch-Kato Selmer group attached to $B_{f,\lambda}^{\psi,\phi}$:
\[H^1_f(\mathbb{Q}_q, B_{f,\lambda}^{\psi,\phi}) = \text{ker}(H^1(D_q, B_{f,\lambda}^{\psi,\phi})
\longrightarrow H^1(I_q, B_{f,\lambda}^{\psi,\phi})),\]
\noindent where $I_q\subset D_q\subset G_{\mathbb{Q}_q}$ are inertia and decomposition subgroups at $q$. The cohomology is taken with respect to continuous cocycles and coboundaries. Note also that the $f$ on the LHS is standard notation and is not related to the modular form $f$. When $q=l$ the local Bloch-Kato Selmer group of $B_{f,\lambda}^{\psi,\phi}$ is defined to be
\[H^1_f(\mathbb{Q}_l, B_{f,\lambda}^{\psi,\phi}) = \text{ker}(H^1(D_l, B_{f,\lambda}^{\psi,\phi}) \longrightarrow H^1(I_l, B_{f,\lambda}^{\psi,\phi} \otimes_{\mathbb{Q}_l} B_{\text{crys}} )).\]
\noindent See \cite[\S 1]{bloch_kato_2007} for the definition of Fontaine's ring $B_{\text{crys}}$. The global Bloch-Kato Selmer group of $B_{f,\lambda}^{\psi,\phi}$ is then $H^1_f(\mathbb{Q}, B_{f,\lambda}^{\psi,\phi})$, the subgroup of $H^1(\mathbb{Q}, B_{f,\lambda}^{\psi,\phi})$ consisting of classes which have local restriction lying in $H^1_f(\mathbb{Q}_q, B_{f,\lambda}^{\psi,\phi})$ for all primes $q$.
Letting $\pi: B_{f,\lambda}^{\psi,\phi} \longrightarrow A_{f,\lambda}^{\psi,\phi}$ be the quotient map, the local Bloch-Kato Selmer group of $A_{f,\lambda}^{\psi,\phi}$ is defined to be the pushforward $H^1_f(\mathbb{Q}_q, A_{f,\lambda}^{\psi,\phi}) = \pi_*H^1_f(\mathbb{Q}_q,B_{f,\lambda}^{\psi,\phi})$. The global Bloch-Kato Selmer group of $A_{f,\lambda}^{\psi,\phi}$ is then $H^1_f(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$, the subgroup of $H^1(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$ consisting of classes whose local restrictions lie in $H^1_f(\mathbb{Q}_q, A_{f,\lambda}^{\psi,\phi})$ for all primes $q$. Note that, since $l \nmid 2$ we may omit $q=\infty$.
More generally, given a finite set of primes $\mathcal{P}$ with $l \notin \mathcal{P}$, we define $H^1_\mathcal{P}(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$ to be the subgroup of $H^1(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$ consisting of classes whose local restrictions lie in $H^1_f(\mathbb{Q}_q, A_{f,\lambda}^{\psi,\phi})$ for all primes $q \notin \mathcal{P}$.
\begin{propn}\label{prop:BK}
The congruence satisfied by $f$ gives the existence of a non-trivial element $c' \in H^1_{\mathcal{P}_{NM}}(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi})$.
\end{propn}
\begin{proof}
We have that $\rho_{f,\lambda}$ is unramified at each $q\nmid NMl$. It follows that restriction of $c'$ to $H^1(I_q, A_{f,\lambda}^{\psi,\phi})$ is 0 for such $q$. Then by \cite[Lemma 7.4]{brown_2007}, $c' \in H^1_f(\mathbb{Q}_q, A_{f,\lambda}^{\psi,\phi})$. Under the assumption that $l > k+1$, the representation $\rho_{f,\lambda}$ is crystalline at $l$ and we deduce that $c' \in H^1_f(\mathbb{Q}_l, A_{f,\lambda}^{\psi,\phi})$, as a consequence of \cite[Proposition 2.2]{diamond_flach_guo_2004}. Since the necessary local conditions are satisfied, we have that $c' \in H^1_{\mathcal{P}_{NM}}(\mathbb{Q}, A_{f,\lambda}^{\psi,\phi}))$.
\end{proof}
Let $C_{k,l}^{\psi,\phi} = (\mathbb{Q}_l/\mathbb{Z}_l)(1-k)(\psi^{-1}\phi)$. Note that since $\lambda|l$, the module $K_f[\psi,\phi]_{\lambda}(1-k)(\psi^{-1}\phi)$ decomposes as a direct sum of copies of $C_{k,l}^{\psi,\phi}$. Proposition \ref{prop:BK} then implies the existence of a non-trivial element $c'\in H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$ by projection. We now discuss how the existence of such an element agrees with the Bloch-Kato conjecture.
Consider the partial Dirichlet $L$-value $L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})$, i.e. with Euler factors at primes $q \in \mathcal{P}_{NM}$ omitted. Letting $\lambda'$ be as in Condition (1) of Conjecture \ref{conj:general}, below is a reformulation of a special case of the $\lambda'$-part of the Bloch-Kato conjecture (as in \cite{diamond_flach_guo_2004}, proved in this case by Huber and Kings in \cite{huber_kings_2003}).
\begin{conj}
\label{conj:BK}
\[\text{ord}_{\lambda'}\left( \frac{L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})}{g(\psi\phi^{-1})(2\pi i)^k}\right)
= \text{ord}_{\lambda'} \left( \frac{\text{Tam}^0_\lambda(C_{k,l}^{\psi,\phi}) \#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})}{\#H^0(\mathbb{Q},C_{k,l}^{\psi,\phi})} \right).\]
\end{conj}
\noindent We omit the definition of the Tamagawa factor $\text{Tam}^0_\lambda(C_{k,l}^{\psi,\phi})$, but note that it is trivial in this case since $l > k+1$ and $\lambda' \mid l$, by \cite[Theorem 4.1.1(iii)]{bloch_kato_2007}. We also know that $H^0(\mathbb{Q},C_{k,l}^{\psi,\phi}))$ is trivial, and so \[\text{ord}_{\lambda'}\left( \frac{L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})}{g(\psi\phi^{-1})(2\pi i)^k}\right)=\text{ord}_{\lambda'}(\#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})).\] Hence if we can show that $\lambda'$ divides the partial $L$-value then we know there is a non-trivial element in the Bloch-Kato Selmer group.
\begin{propn}
\label{propn:divideL}
Condition (1) of Conjecture \ref{conj:general} implies the condition \[\text{ord}_{\lambda'}\left( \frac{L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})}{g(\psi\phi^{-1})(2\pi i)^k}\right)>0.\]
\end{propn}
\begin{proof}
By the functional equation for $L(s,\psi\phi^{-1})$ we have:
\begin{align*}
\frac{L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})}{g(\psi\phi^{-1})(2\pi i)^k} &= \frac{(-1)^{|\mathcal{P}_{NM}|}}{\phi(NM)(NM)^k}\frac{L(k, \psi \phi^{-1})}{ g(\psi\phi^{-1})(2\pi i)^k}\prod_{p \in \mathcal{P}_{NM}} (\psi(p)-\phi(p)p^k)\\ &=\frac{(-1)^{|\mathcal{P}_m|+k}}{2(k-1)!\phi(NM)(N^2M)^k}L(1-k,\psi^{-1}\phi)\prod_{p \in \mathcal{P}_{NM}} (\psi(p)-\phi(p)p^k)
\end{align*}
The claim follows, since $l\nmid NM$ and $l>k+1$.
\end{proof}
The above shows that Condition (1) of Conjecture \ref{conj:general} provides a non-trivial element $c'\in H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$, and that this is in line with the Bloch-Kato Conjecture. Since Condition (2) is (conjecturally) telling us that a newform $f$ of level $NM$ can be found to satisfy the congruence, we might naively expect that the corresponding element $c'\in H^1_{
\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$ is ``new", i.e.\ that $c'\notin H^1_{\mathcal{P}_{Nd}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$ for each $d| M$ with $d\neq M$. However, this may not be the case, since considering the Bloch-Kato quotient for such a divisor $d$ gives: \begin{align*}
\text{ord}_{\lambda^\prime}\left( \frac{ \#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})}{\#H^1_{\mathcal{P}_{Nd}}(\mathbb{Q},C_{k,l}^{\psi,\phi})}\right) &= \text{ord}_{\lambda^\prime}\left( \frac{L_{\mathcal{P}_{NM}}(k, \psi\phi^{-1})}{L_{\mathcal{P}_{Nd}}(k, \psi\phi^{-1})} \right)\\ &=\text{ord}_{\lambda^\prime}\left( \frac{\prod_{p \in \mathcal{P}_{M/d}}(\psi(p)-\phi(p)p^k)}{\phi(M/d)(M/d)^k}\right),
\end{align*}
revealing that new elements can only be accounted for by local divisibility conditions of the form $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k)>0$ (as in Case (A) in the proof of Theorem \ref{thm:reverse}). It follows that the primes $p\in\mathcal{P}_M$ satisfying $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^{k-2})>0$ and $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k)=0$ in Condition (2) cannot contribute towards new elements.
\begin{propn}\label{prop:BK1}
Let $d|M$ and $d\neq M$. Then there exists a prime $p\in \mathcal{P}_{M/d}$ such that $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k)>0$ if and only if \[\text{ord}_{\lambda'}\left(\frac{\#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})}{\#H^1_{\mathcal{P}_{Nd}}(\mathbb{Q},C_{k,l}^{\psi,\phi})}\right) > 0.\] In particular, $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k)>0$ if and only if there exists an element $c'_p\in H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$ that is ``$p$-new", i.e.\ $c'_p\notin H^1_{\mathcal{P}_{NM/p}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$.
\end{propn}
\begin{proof}
This follows from the above discussion.
\end{proof}
\noindent The above should be roughly compared with the situation in Theorem \ref{thm:prime} and the proceeding discussion, where Conditions (1) and (2) alone were only able to guarantee the existence of $p$-newforms satisfying the congruence for each $p\in\mathcal{P}_M$, as opposed to a genuine newform. Based on this, we conjecture the following.
\begin{conj}\label{conj:pnew}
Let $\mathcal{S} = \{p\in \mathcal{P}_M\,|\,\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k)>0\}$. Then the class $c'\in H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k,l}^{\psi,\phi})$ provided by Proposition \ref{prop:BK} is ``$p$-new" for each $p\in \mathcal{S}$.
\end{conj}
Now suppose that $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^k) = 0$ for every $p\in\mathcal{P}_{M/d}$. In order to satisfy Condition (2) we would then have the following conditions for each $p\in\mathcal{P}_{M/d}$: \begin{align*}&\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^{k-2})>0,\\ &\text{ord}_{\lambda'}(L(1-k,\psi^{-1}\phi))>0.\end{align*} Here, the situation is not fully clear (at least not to the authors). One conclusion that can be made in this case is the following.
\begin{propn}\label{prop:BK2}
Let $d|M$ and $d\neq M$. If $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^{k-2})>0$ for each $p\in\mathcal{P}_{M/d}$ then \[\text{ord}_{\lambda'}\left(\frac{\#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})}{\#H^1_{\mathcal{P}_{Nd}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})}\right) \geq \#\mathcal{P}_{M/d}.\]
In particular, if $\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^{k-2})>0$ then there exists an element $c_p{''}\in H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})$ that is ``$p$-new", i.e.\ $c_p{''}\notin H^1_{\mathcal{P}_{NM/p}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})$.
\end{propn}
\begin{proof}
This follows by considering the Bloch-Kato quotient of weight $k-2$: \begin{align*}\text{ord}_{\lambda^\prime}\left( \frac{ \#H^1_{\mathcal{P}_{NM}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})}{\#H^1_{\mathcal{P}_{Nd}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})}\right) &= \text{ord}_{\lambda^\prime}\left( \frac{L_{\mathcal{P}_{NM}}(k-2, \psi\phi^{-1})}{L_{\mathcal{P}_{Nd}}(k-2, \psi\phi^{-1})} \right)\\ &=\text{ord}_{\lambda^\prime}\left( \frac{\prod_{p \in \mathcal{P}_{M/d}}(\psi(p)-\phi(p)p^{k-2})}{\phi(M/d)(M/d)^{k-2}}\right).\end{align*}
\end{proof}
Alternatively, the divisibility condition in the above Proposition implies Conditions (1) and (2) for weight $k-2$. By Theorem \ref{thm:eigenform} there exists a congruence between the eigenvalues of an eigenform $g\in S_{k-2}(\Gamma_0(NM/d),\tilde{\chi})$ and the eigenvalues of $E_{k-2}^{\psi,\phi}$ (at ``good" primes). By a similar argument to earlier, this supplies a non-trivial element $c^{''}\in H^1_{\mathcal{P}_{NM/d}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})$. Conjecture \ref{conj:general} would let us take $g$ to be a newform, and so we are led to conjecture the following.
\begin{conj}\label{conj:pnew2}
Let $\mathcal{S} = \{p\in \mathcal{P}_M\,|\,\text{ord}_{\lambda'}(\psi(p)-\phi(p)p^{k-2})>0\}$. Then the class $c^{''}\in H^1_{\mathcal{P}_{NM/d}}(\mathbb{Q},C_{k-2,l}^{\psi,\phi})$ constructed above is $p$-new for each $p\in \mathcal{S}$.
\end{conj}
\noindent In summary, we have considered the two different ways in which Conditions (1) and (2) can hold. By Bloch-Kato, each implies the existence of a collection ``$p$-new" elements in a certain Bloch-Kato Selmer group (i.e.\ Propositions \ref{prop:BK1} and \ref{prop:BK2}). Further, we expect that both collections of $p$-new elements should be explained by the existence of two ``new" elements, each arising from a newform congruence implied by Conjecture \ref{conj:general} (i.e.\ Conjectures \ref{conj:pnew} and \ref{conj:pnew2}). In the case of $M=p$ prime, we remark that Theorem \ref{thm:prime} implies the truth of these conjectures, but for more general square-free $M$, little seems to be known.
\section{Examples}
\label{section:eg}
Below, we give brief computational examples of Conjecture \ref{conj:general}, using data provided by the LMFDB databse \cite{LMFDB}. First we consider examples demonstrating Theorem \ref{thm:prime}.
\begin{eg}
Take $N=5$, $M=2$ and $k=8$. Also let $\psi = \mathds{1}$ and $\phi = \left(\frac{\cdot}{5}\right)$ (so that $\chi = \phi$). The only prime $\lambda'$ of $\mathbb{Z}[\psi,\phi] = \mathbb{Z}$ satisfying the conditions of Theorem \ref{thm:prime} is $\lambda' = 257$.
\noindent Indeed, the newform $f \in S_8^{\text{new}}(\Gamma_0(10), \tilde{\chi})$ with LMFDB label [10.8.b.a] satisfies the congruence \[a_q(f) \equiv 1+\left(\frac{q}{5}\right)q^{7} \bmod \lambda\] for all $q\neq 2,5$ and for some fixed prime $\lambda\mid \lambda'$ of $\mathcal{O}_f[\psi, \phi] = \mathcal{O}_f$ (the ring of integers of the quartic field generated by a root of $x^4-15x^2+64$).
\end{eg}
\begin{eg}
Take $N=7$, $M=2$ and $k=7$. Also let $\psi = \mathds{1}$ and $\phi$ be the primitive mod $7$ character satisfying $\phi(3) = \zeta_6$ (so that $\chi = \phi$). The only prime $\lambda'$ of $\mathbb{Z}[\psi,\phi] = \mathbb{Z}[\zeta_6]$ satisfying the conditions of Theorem \ref{thm:eigenform} is $\lambda' = \langle 337, \zeta_6+128\rangle$ (lying above $l = 337$).
\noindent Indeed, the newform $f \in S_7^{\text{new}}(\Gamma_0(14), \tilde{\chi})$ with LMFDB label [14.7.d.a] satisfies the congruence \[a_q(f) \equiv 1 + \phi(q)q^6 \bmod \lambda\] for all $q\neq 2,7$ and for some fixed prime $\lambda\mid \lambda'$ of $\mathcal{O}_f[\psi,\phi] = \mathcal{O}_f[\zeta_6]$ (here $\mathcal{O}_f$ is the ring of integers of a degree $8$ number field).
\end{eg}
We finish with an example demonstrating Conjecture \ref{conj:general} in a case where $M$ is composite.
\begin{eg}
Take $N=7, M=6$ and $k=6$. Let $\psi = \mathds{1}$ and $\phi$ be the primitive mod $7$ character satisfying $\phi(3) = \zeta_3^2$ (so that $\chi = \phi$). The only prime $\lambda'$ of $\mathbb{Z}[\psi,\phi] = \mathbb{Z}[\zeta_6]$ satisfying the conditions of Theorem \ref{thm:eigenform} is $\lambda' = \langle 73, \zeta_6+64\rangle$ (lying above $l = 73$).
\noindent Indeed, the newform $f\in S_6^{\text{new}}(\Gamma_0(42),\tilde{\chi})$ with LMFDB label [42.6.e.c] satisfies the congruence: \[a_q(f) \equiv 1 + \phi(q)q^6\bmod \lambda\] for all primes $q\neq 2,7$ and for a fixed prime $\lambda\mid \lambda'$ of $\mathcal{O}_{f}[\psi,\phi] = \mathcal{O}_{f}[\zeta_6]$ (here $\mathcal{O}_f$ is the ring of integers of a degree $4$ number field).
\end{eg}
\end{document} |
\begin{document}
\begin{abstract}
We are interested in the normal class of an algebraic hypersurface $\mathcal Z$ in
the complexified euclidean projective space $\mathbb P^n$, that is the
number of normal lines to $\mathcal Z$ passing through a generic point of $\mathbb P^n$.
Thanks to the notion of normal polars, we state a formula for the normal class valid for a general hypersurface $\mathcal Z\subset\mathbb P^n$. We give a generic result and illustrate our formula on examples
in $\mathbb P^n$. We define the orthogonal incidence variety and
compute the Schubert class of the variety of projective normal lines to a surface of $\mathbb P^3$ in the Chow ring of $\mathbb G(1,3)$.
We complete our work with a generalization of Salmon's formula for the normal class of a Pl\"ucker
curve to any plane curve with any kind of singularity.
\end{abstract}
\title{Normal class and normal lines of algebraic hypersurfaces}
\section*{Introduction}
The notion of normal lines to an hypersurface of an euclidean space
is extended here to the complexified euclidean projective space $\mathbb P^n$ ($n\ge 2$).
In this setting, $\mathcal H^\infty$ the hyperplane at infinity is fixed, together with
the umbilical at infinity $\mathcal U_\infty\subset\mathcal H^\infty$, the smooth quadric in $\mathcal H^\infty$ corresponding to the intersection
of $\mathcal H^\infty$ with any hypersphere (see Section \ref{DEFI0} for details).
The aim of the present work is the study the {\bf normal class} $c_\nu(\mathcal Z)$ of a hypersurface $\mathcal Z$ of
$\mathbb P^n$,
that is the number of $m\in\mathcal Z$ such that the projective normal line $\mathcal N_{m}(\mathcal Z)$ to
$\mathcal Z$ at $m$ passing through a generic $m_1\in\mathbb P^n$
(see Section \ref{SEC00} for details).
Our estimates provide upper bounds for the number of normal lines, of a real algebraic surface
in an $n$-dimensional affine euclidean space $E_n$, passing through a generic point in $E_n$.
Let us consider the \textbf{variety $\mathfrak{N}_{\mathcal{Z}}$ of projective normal lines of }$ \mathcal Z$ by
\[
\mathfrak{N}_{\mathcal{Z}}:=\overline{\{\mathcal{N}_{m}(\mathcal{Z});m\in \mathcal{Z}\}}\subset
\mathbb{G}(1,n)\subset \mathbb P^{\frac{n(n+1)}2-1}
\]
and its Schubert class $\mathfrak{n}_{\mathcal{Z}}:=[\mathfrak{N}_{\mathcal{Z}
}]\in A^{n-1}(\mathbb{G}(1,n))$ (when $\dim \mathfrak{N}_{\mathcal{Z}}=n-1$).
The fact that $PGL(n,\mathbb C)$ {\bf does not preserve normal lines} complicates our study compared to the study of tangent hyperplanes.
We prove namely the following result valid for a wide family of surfaces
of $\mathbb P^n$.
Let $\mathcal Z=V(F)$ be an irreducible hypersurface of $\mathbb P^n$. We write $\mathcal Z_\infty:=\mathcal Z\cap
\mathcal H^\infty$.
Note that the singular points of $\mathcal Z_\infty$ correspond
to the points of tangency of $\mathcal Z$ with $\mathcal H^\infty$.
\begin{thm}\label{thmhypersurface}
Let $\mathcal Z\in\mathbb P^n$ be a smooth irreducible hypersurface of degree $d_{\mathcal Z}\ge 2$ such that $\mathcal H^\infty$ is not tangent to $\mathcal Z$ and that
at any $m\in\mathcal Z_\infty\cap\mathcal U_\infty$, the tangent planes to
$\mathcal Z_\infty$ and to $\mathcal U_\infty$ at $m$ are distinct.
Then the normal class $c_\nu(\mathcal Z)$ of $\mathcal Z$ is
$$c_\nu(\mathcal Z)=d_{\mathcal Z}\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$
In particular,
\begin{itemize}
\item if $d_\mathcal Z=2$, $c_\nu(\mathcal Z)=n$;
\item if $n=2$, $c_\nu(\mathcal Z)=d_{\mathcal Z}$;
\item if $n=3$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^3-d_{\mathcal Z}^2+d_{\mathcal Z}$;
\item if $n=4$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^4-2d_{\mathcal Z}^3+2d_{\mathcal Z}^2$;
\item if $n=5$, $c_\nu(\mathcal Z)=d_{\mathcal Z}^5-3d_{\mathcal Z}^4+4d_{\mathcal Z}^3-2d_{\mathcal Z}^2+d_{\mathcal Z}$.
\end{itemize}
The normal class of an hyperplane $\mathcal H\subset\mathbb P^n$ (other than $\mathcal H^\infty$) is $c_\nu(\mathcal H)=1$.
\end{thm}
Actually we establish a general formula which is valid for a wider family of hypersurfaces of $\mathbb P^n$.
The notion of normal polars $\mathcal P_{A,\mathcal Z}$
plays an important role in our study. It is a notion analogous to the notion
of polars \cite{Dolga}.
Given an irreducible hypersurface
$\mathcal Z\subset\mathbb P^n$ of degree $d_{\mathcal Z}$, we extend the definition of the line
$\mathcal N_m(\mathcal S)$ to any $m\in\mathbb P^n$.
We then define a regular map $\alpha_{\mathcal Z}:\mathbb P^n\setminus\mathcal B^{(0)}_{\mathcal Z}\rightarrow
\mathbb P^{\frac{n(n+1)}2-1}$ corresponding to $m\mapsto\mathcal N_m(\mathcal Z)$ (where $\mathcal B^{(0)}_\mathcal Z$
is the set of base points of $\alpha_{\mathcal Z}$).
We will see that $\mathcal B_{\mathcal Z}:=\mathcal B^{(0)}_{\mathcal Z}\cap\mathcal Z$ corresponds to the union of the set of singular points of $\mathcal Z$, of the set of points of tangency of $\mathcal Z$ with $\mathcal H^\infty$ and of the set of points of tangency of $\mathcal Z_\infty$ with $\mathcal U_\infty$.
For any $A\in\mathbb P^n$, we will introduce the notion of {\bf normal polar}
$\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$
as the set of $m\in\mathbb P^n$
such that either $m\in\mathcal B^{(0)}_{\mathcal Z}$ or $A\in \mathcal N_m(\mathcal Z)$.
We will see that, if $\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$, then, for a generic $A\in\mathbb P^n$,
$$\dim\mathcal P_{A,\mathcal Z}=1\quad\mbox{and}\quad
\deg \left(\mathcal P_{A,\mathcal Z}\right)= \sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$
\begin{thm}\label{formulegeneralehypersurface}
Let $\mathcal Z$ be an irreducible hypersurface of $\mathbb P^n$
with isolated singularities, admitting a finite number of
points of tangency with $\mathcal H^\infty$ and such that
$\mathcal Z_\infty$
has a finite number of points of tangency with $\mathcal U_\infty$.
Then the normal class $c_\nu(\mathcal Z)$
of $\mathcal Z$ is given by
\[
c_\nu(\mathcal Z)=d_{\mathcal Z}.\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k
-\sum_{P\in B_{\mathcal Z}} i_P(\mathcal Z,{\mathcal P}_{A,\mathcal Z})\, ,
\]
for a generic $A\in \mathbb P^n$, where $i_P(\mathcal Z,{\mathcal P}_{A,\mathcal Z})$
is the intersection multiplicity of $\mathcal Z$ with ${\mathcal P}_{A,\mathcal Z}$.
\end{thm}
In dimension 3, we obtain the following result.
\begin{thm}[n=3, normal class and Chow ring]\label{formulegeneralesurface}
Let $\mathcal S$ be an irreducible surface of $\mathbb P^3$
with isolated singularities, admitting a finite number of
points of tangency with $\mathcal H^\infty$ and such that
$\mathcal S_\infty$
has a finite number of (non singular) points of tangency with $\mathcal U_\infty$.
Then
$$\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)),$$
where the normal class $c_\nu(\mathcal S)$
of $\mathcal Z$ is equal to
$d_{\mathcal Z}.\deg(\mathcal P_{A,\mathcal Z}) $ (for a generic $A\in \mathbb P^n$)
minus the sum of the intersection multiplicities of $\mathcal S$
with its generic normal polars ${\mathcal P}_{A,\mathcal S}$ at points of $\mathcal B_{\mathcal S}$.
\end{thm}
\begin{coro}[n=3]
For a generic irreducible surface $\mathcal S\subset \mathbb P^3$ of degree $d\ge 2$, we have
$c_\nu(\mathcal S)=d^3-d^2+d$ and
\[
{\mathfrak n}_{\mathcal{S}}=(d^3-d^2+d).\sigma_2+d(d-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)).
\]
\end{coro}
In the next statement, we consider smooth surfaces $\mathcal S$ of $\mathbb P^3$
($\mathbb P^3$ being endowed with projective coordinates $[x:y:z:t]$)
such that $\mathcal S_\infty$
has no worse singularities than ordinary multiple points and
ordinary cusps.
\begin{thm}[n=3]\label{thmsurfaces}
Let $\mathcal S\subset\mathbb P^3$ be a smooth irreducible
surface of degree $d_{\mathcal S}\ge 2$ such that:
\begin{itemize}
\item[(i)] in $\mathcal H^\infty$, the curve $\mathcal S_\infty$ has a finite number
of points of tangency with $\mathcal U_\infty$,
\item[(ii)] any singular point of $\mathcal S_\infty$ is either an
ordinary multiple point or an ordinary cusp,
\item[(iii)] at any (non singular) point of tangency of $\mathcal S_\infty$
with $\mathcal U_\infty$, the contact is ordinary,
\item[(iv)] at any singular point of $\mathcal S_\infty$
contained in $\mathcal U_\infty$, the tangent line to $\mathcal U_\infty$ is not contained
in the tangent cone to $\mathcal S_\infty$.
\end{itemize}
Then
\[
\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3))
\]
and
the normal class of $\mathcal S$ is
$$c_\nu(\mathcal S)=d_{\mathcal S}^3-d_{\mathcal S}^2+d_{\mathcal S}-\sum_{k\ge 2}((k-1)^2m_\infty^{*(k)}+
k(k-1)\tilde m_\infty^{(k)}) -2\kappa_\infty^* - 3\tilde \kappa_\infty-c_\infty,$$
where
\begin{itemize}
\item $m_\infty^{*(k)}$ (resp. $\tilde m_\infty^{(k)}$) is the number of ordinary multiple points of
order $k$ of $\mathcal S_\infty$ outside (resp. contained in) $\mathcal U_\infty$,
\item $\kappa_\infty^*$ (resp. $\tilde \kappa_\infty$)
is the number of ordinary cusps of $\mathcal S_\infty$ outside (resp. contained in) $\mathcal U_\infty$,
\item $c_\infty$ is the number of ordinary (non singular) points of tangency of $\mathcal S_\infty$ with $\mathcal U_\infty$.
\end{itemize}
\end{thm}
\begin{exa}[n=3]
The surface $\mathcal S=V(xzt-tx^2-zt^2-xz^2+y^3)\subset \mathbb P^3$
is smooth, its only point of tangency with $\mathcal H_\infty=V(t)$ is
$P[1:0:0:0]$ which is an ordinary cusp of $\mathcal S_\infty=V(t,-xz^2+y^3)$. Moreover $\mathcal S_\infty$ has no point of tangency with
$\mathcal U_\infty$. Hence the normal class of $\mathcal S$ is
$27-9+3-2=19$.
\end{exa}
Theorem \ref{thmhypersurface} (resp. \ref{thmsurfaces}) is a consequence of Theorem \ref{formulegeneralehypersurface} (resp. \ref{formulegeneralesurface}).
In a more general setting, when $n=3$, we can replace $\alpha_{\mathcal S}$ in $\tilde\alpha_{\mathcal S}=\frac{\alpha_{\mathcal S}}H$
(for some homogeneous polynomial $H$ of degree $d_H$) so that the
set $\tilde{\mathcal B}^{(0)}_{\mathcal S}$ of base points of $\tilde\alpha_{\mathcal S}$
has dimension at most 1. In this case, we consider a notion of
normal polars associated to $\tilde\alpha_{\mathcal S}$ which have generically
dimension 1 and degree $\tilde d_{\mathcal S}^2
-\tilde d_{\mathcal S}+1$
(with $\tilde d_{\mathcal S}=d_{\mathcal S}-d_H$).
\begin{thm}[n=3]\label{factorisable}
Let $\mathcal S$ be an irreducible surface of $\mathbb P^3$.
If the set $\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S$ is finite,
then
$\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(\tilde d_{\mathcal S}-1)
.\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3))$ and
the normal class $c_\nu(\mathcal S)$
of $\mathcal S$ is equal to
$d_{\mathcal S}(\tilde d_{\mathcal S}^2-\tilde d_{\mathcal S}+1) $
minus the intersection multiplicity of $\mathcal S$
with its generic normal polars $\tilde{\mathcal P}_{A,\mathcal S}$ at points $m\in\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap \mathcal S$.
\end{thm}
When the surface is a "cylinder" or a surface of revolution, its normal class is equal to
the normal class of its plane base curve. The normal class of any plane curve
is given by the simple formula of Theorem \ref{thmcurves} below, that we give for completness.
Let us recall that, when $\mathcal C=V(F)$ is an irreducible curve of $\mathbb P^2$,
the evolute of $\mathcal C$ is the curve tangent to the family of normal lines to $\mathcal Z$
and that the evolute of a line or a circle is reduced to a single point.
Hence, except for lines and circles, the normal class of $\mathcal C$ is simply the class (with multiplicity)
of its evolute.
The following result generalizes the result by Salmon \cite[p. 137]{Salmon-Cayley} proved in the case
of Pl\"ucker curves (plane curves with no worse multiple tangents than ordinary double tangents, no singularities other than
ordinary nodes and cusps) to any plane curve (with any type of singularities).
We write $\ell_\infty$ for the line at infinity of $\mathbb P^2$.
We define the two cyclic points $I[1:i:0]$ and $J[1:-i:0]$ in $\mathbb P^2$ (when $n=2$, $\mathcal U_\infty=\{I,J\}$).
\begin{thm}[n=2]\label{thmcurves}
Let $\mathcal C=V(F)$ be an irreducible curve of $\mathbb P^2$ of degree $d\ge 2$ with class $d^\vee$. Then
its normal class is $$c_\nu(\mathcal C)=d+d^\vee-\mathcal{O}mega(\mathcal C,\ell_\infty)-\mu_{I}(\mathcal C)-\mu_{J}(\mathcal C),$$
where $\mathcal{O}mega$ denotes the sum of the contact numbers between two curves
and where $\mu_P(\mathcal C)$ is the multiplicity of $P$ on $\mathcal C$.
\end{thm}
In \cite{Fantechi}, Fantechi proved that the evolute map is birational from $\mathcal C$ to
its evolute curve unless if\footnote{We write $[x:y:z]$ for the coordinates of
$m\in\mathbb P^2$ and $F_x,F_y,F_z$ for the partial derivatives of $F$.}
$F_x^2+F_y^2$ is a square modulo $F$ and that in this latest
case the evolute map is $2:1$ (if $\mathcal C$ is neither a line nor a circle).
Therefore, the normal class $c_\nu(\mathcal C)$ of a plane curve $\mathcal C$ corresponds to the class of its evolute
unless $F_x^2+F_y^2$ is a square modulo $F$ and in this last case, the normal class $c_\nu(\mathcal C)$ of $\mathcal C$
corresponds to the class of its evolute times 2 (if $\mathcal C$ is neither a line nor a circle).
The notion of focal loci generalizes the notion of evolute to higher dimension \cite{Trifogli,CataneseTrifogli}.
The normal lines of an hypersurface $\mathcal Z$ are tangent to the focal loci hypersurface of $\mathcal Z$ but of course the normal class of $\mathcal Z$
does not correspond anymore (in general) to the class of its focal loci (the normal lines to $\mathcal Z$ are contained
in but are not equal to the tangent hyperplanes of its focal loci).
In Section \ref{SEC00}, we introduce normal lines, normal class, normal polars in $\mathbb P^n$ (see also Appendix \ref{NORMAL} for the link between projective orthogonality and affine orthogonality).
In Section \ref{SECpolar}, we study normal polars and prove Theorems \ref{formulegeneralehypersurface} and \ref{thmhypersurface}.
In Section \ref{incidenceschubert}, we introduce the orthogonal incidence variety $\mathcal I^\perp$ in $\mathbb G(1,n)$, give some recalls on the Schubert classes in the Chow ring of $\mathbb G(1,3)$
and prove Theorems \ref{formulegeneralesurface}
and \ref{factorisable}.
In Section \ref{sec:proofthm1}, we prove Theorem \ref{thmsurfaces}.
In Section \ref{secquadric}, we apply our results on examples in $\mathbb P^3$: we compute the normal class
of every quadric and of a cubic
surface with singularity $E_6$.
In Section \ref{proofcurve}, we prove Theorem \ref{thmcurves}.
Appendix \ref{cylindreetrevolution} on the normal class of "cylinders" and of surfaces of revolution in $\mathbb P^n$.
\section{Normal lines, normal class and normal polars}\label{SEC00}
\subsection{Definitions and notations}\label{DEFI0}
Let $\mathbf V$ be a $\mathbb C$-vector space of dimension $n+1$.
Given $\mathcal Z=V(F)\ne\mathcal H^\infty$ an irreducible hypersurface of
$\mathbb P^n=\mathbb P(\mathbf V)$ (with
$F\in Sym(\mathbf{V}^\vee)\cong\mathbb C[x_1,...,x_{n+1}]$), we consider
the rational map $n_{\mathcal Z}:\mathbb P^n\dashrightarrow\mathcal H^\infty$
given by $n_{\mathcal Z}=[F_{x_1}:\cdots :F_{x_n}:0]$.
Note that, for nonsingular $m\in \mathcal Z$ such that
the tangent hyperplane $\mathcal T_m\mathcal Z$ to $\mathcal Z$ at $m$
is not $\mathcal H^\infty$, $n_{\mathcal Z}(m)$ is the pole of the $(n-2)$-variety at infinity $\mathcal T_m\mathcal Z\cap\mathcal H^\infty\subset\mathcal H^\infty$
with respect to the {\bf umbilical}
$\mathcal U_\infty:=V(x_1^2+...+x_n^2)\cap\mathcal H^\infty\subset\mathcal H^\infty$. $\mathcal U_\infty$ corresponds to the set of {\bf circular points at infinity}.
\begin{defi}
{\bf The projective normal line} $\mathcal N_m\mathcal Z$ to $\mathcal Z$
at $m\in\mathcal Z$ is the line $(m\, n_{\mathcal Z}(m))$ when $n_{\mathcal Z}(m)$
is well defined in $\mathbb P^n$ and not equal to $m$.
\end{defi}
\begin{rqe}
This is a generalization of affine normal lines in the euclidean space $E_n$. Indeed,
if $F$ has real coefficients and if $m\in\mathcal Z\setminus\mathcal H_\infty$
has real coordinates $[x^{(0)}_1:\cdots:x^{(0)}_n:1]$,
then $\mathcal N_m\mathcal Z$ corresponds to the affine normal line of the affine
hypersurface $V(F(x_1,...,x_n,1))\subset E_n$
at the point of coordinates $(x^{(0)}_1,\cdots,x^{(0)}_n)$ (see Section \ref{NORMAL}).
\end{rqe}
The aim of this work is the study of the notion of normal class.
\begin{defi}
Let $\mathcal Z$ be an irreducible hypersurface of $\mathbb P^n$.
{\bf The normal class} of $\mathcal Z$ is the number $c_\nu(\mathcal Z)$ of $m\in\mathcal Z$
such that $\mathcal N_m(\mathcal Z)$ contains $m_1$ for a generic
$m_1\in\mathbb P^n$.
\end{defi}
Let $\mathcal{D}elta:=\{(m_1,m_2)\in\mathbb P^n\times\mathbb{P}^{n}\ :\ m_1=m_2\}$
be the diagonal of $\mathbb{P}^{n}\times\mathbb{P}^{n}$.
Recall that the {\bf Pl\"ucker embedding}
$\left( \mathbb{P}^{n}\times\mathbb{P}^{n}\right) \backslash\mathcal{D}elta
\overset{Pl}{\hookrightarrow} \mathbb{P}
(\bigwedge^{2}\mathbf V)\cong \mathbb{P}^{\frac{n(n+1)}2-1}$ is defined by
$$Pl(u,v)=\bigwedge^2({u}, {v})=\left[p_{i,j}=u_iv_j-u_jv_i\right]
_{1\le i<j\le n+1}\in\mathbb P^{\frac{n(n+1)}2-1},$$
with $p_{i,j}=-p_{j,i}$ the $(i,j)$-th Pl\"ucker coordinate, identifying $\mathbb P^{\frac{n(n+1)}2-1}$ with the projective space of $n\times n$ antisymmetric matrices.
Its image is the Grassmannian $\mathbb G(1,n)$ (see \cite{Eisenbud-Harris}) given by
$$\mathbb G(1,n):=Pl((\mathbb P^n)^2\setminus \mathcal{D}elta)=\bigcap_{(i,j_1,j_2,j_3)\in\mathcal I}V(B_{i,j_1,j_2,j_3}) \subset \mathbb P^{\frac{n(n+1)}2-1},$$
where $B_{i,j_1,j_2,j_3}:=p_{i,j_1}p_{j_2,j_3}-p_{i,j_2}p_{j_1,j_3}+p_{i,j_3}p_{j_1,j_2}$ and where $\mathcal I$ is the set of $(i,j_1,j_2,j_3)\in\{1,...,n+1\}$ such that
$j_1<j_2<j_3$ and $j_1,j_2,j_3\ne i$. We recall also that $\dim \mathbb G(1,n)=2n-2$.
\begin{rqe}
Let $h_{\mathcal Z}:\mathbb{P}^{n}\setminus V(F_{x_1},\cdots,F_{x_n})\rightarrow \mathbb{P}^{n}\times\mathbb{P}^{n}$
be the morphism defined by $j(m)=\left( m,{n}_{\mathcal Z}(m)\right) $.
The variety $\mathfrak N_{\mathcal Z}\subset\mathbb G(1,n)$ of projective normal lines to $\mathcal Z$ is the (Zariski closure of the) image of $\mathcal Z$
by the regular map $\alpha_{\mathcal Z}:=Pl\circ h_{\mathcal Z}:\mathbb P^n\setminus\mathcal B^{(0)}_{\mathcal Z}\rightarrow \mathbb{P}^{\frac{n(n+1)}2-1}$, with $\mathcal B^{(0)}_{\mathcal Z}:=V(F_{x_1},...,F_{x_n})\cup j^{-1}(\mathcal{D}elta)$, i.e.
$$\mathcal B^{(0)}_{\mathcal Z}:=\left\{ m\in\mathbb{P}^n;\ \bigwedge^2(\mathbf {m}, \mathbf n_{\mathcal Z}(\mathbf m))=\mathbf 0\ \mbox{in}\ \bigwedge^2\mathbf V\right\}.$$
\end{rqe}
Note that the number of normal lines to $\mathcal Z$ passing through
$A\in\mathbb P^n$
corresponds to the number of $m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}$ satisfying the following
sets of equations~:
\begin{equation}\label{EQUA000}
\bigwedge^3[\mathbf {m}\ \mathbf n_{\mathcal Z}(\mathbf m)\ \mathbf A] =\mathbf 0\quad\mbox{in}\ \bigwedge^3 \mathbf V.
\end{equation}
\begin{defi}
For any $A\in\mathbb P^n$,
the set of points $m\in \mathbb P^n$ satisfying \eqref{EQUA000} is called
{\bf normal polar} $\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$
\end{defi}
\subsection{Projective similitudes}
Recall that, for every field $\mathbb{\mathcal{B}bbk }$,
$$GO(n,\mathbb{\mathcal{B}bbk }
)=\left\{ A\in GL(n,\mathbb{\mathcal{B}bbk });\exists \lambda \in \mathbb{\mathcal{B}bbk }
^{\ast },A\cdot^{t}A=\lambda \cdot I_{n}\right\} $$
is the \textbf{orthogonal similitude group} (for the standard products) and that
$GOAff(n,\mathbb{\mathcal{B}bbk })=\mathbb{\mathcal{B}bbk }^{n}\rtimes GO(n,\mathbb{\mathcal{B}bbk })$
is the \textbf{orthogonal similitude affine group}.
We have a natural monomorphism of groups
$\kappa :Aff(n,\mathbb{R})=\mathbb{R}^{n}\rtimes GL(n,\mathbb{R}
)\longrightarrow GL(n+1,\mathbb{R})$
given by
\begin{equation}\label{similitude}
\kappa (b,A)=\left(
\begin{array}{ccccccc}
a_{11} & ... & & & ... & a_{1n} & b_{1} \\
a_{21} & ... & & & ... & a_{2n} & b_{2} \\
& & & & & & \\
a_{n1} & .. & & & ... & a_{nn} & b_{n} \\
0 & ... & & & 0 & 0 & 1
\end{array}
\right)
\end{equation}
and,
by restriction,
$\kappa |_{GOAff(n,\mathbb{R})}:GOAff(n,\mathbb{R})=\mathbb{R}^{n}\rtimes
GO(n,\mathbb{R})\longrightarrow GL(n+1,\mathbb{R})$. Analogously we have a natural monomorphism of groups
$\kappa ^{\prime }:=(\kappa\otimes 1) |_{GOAff(n,\mathbb{C})}:GOAff(n,\mathbb{C})=
\mathbb{C}^{n}\rtimes GO(n,\mathbb{C})\longrightarrow GL(n+1,\mathbb{C})$.
Composing with the canonical projection $\pi :GL(n+1,\mathbb{C}
)\longrightarrow \mathbb{P}(GL(n+1,\mathbb{C}))$ we obtain the \textbf{
projective complex similitude Group}:
\begin{equation*}
\widehat{Sim_{\mathbb{C}}(n)}:=(\pi \circ \kappa ^{\prime })(G OAff(n,\mathbb{
C})).
\end{equation*}
which acts naturally on $\mathbb{P}^{n}$.
\begin{defi}
An element of $\mathbb P(Gl(\mathbf V))$ corresponding
to an element of $\widehat{Sim_{\mathbb{C}}(n)}$ with respect to the basis
$(\mathbf e_1,\cdots,\mathbf e_n)$
is called a \textbf{projective similitude of }$\mathbb P^{n}.$
\end{defi}
The set of projective similitudes of $\mathbb P^n$ is isomorphic to $\widehat{Sim_{\mathbb{C}}(n)}$.
\begin{lem}\label{lemmesimilitude}
The projective similitude preserves the orthogonality structure in $\mathbb P^n$. They preserve namely the normal lines
and the normal class of surfaces of $\mathbb P^n$.
\end{lem}
This lemma has a straightforward proof that is omitted.
\section{Proof of Theorem \ref{formulegeneralehypersurface}}\label{SECpolar}
\subsection{Geometric study of $\mathcal B_{\mathcal Z}:=\mathcal B^{(0)}_{\mathcal Z}\cap{\mathcal Z}$}\label{sec:base}
We write $\mathcal Z_\infty:=\mathcal Z\cap\mathcal H^\infty$. Recall that $\mathcal U_\infty:=\mathcal H^\infty\cap V\left(x_1^2+...+x_n^2\right)$.
\begin{prop}
A point of $\mathcal Z$ is in $\mathcal B_\mathcal Z$ if it is a singular point
of $\mathcal Z$ or a tangential point of $\mathcal Z$ at infinity or a tangential point of $\mathcal Z_\infty$ to the umbilical, i.e.
$\mathcal B_{\mathcal Z}=\sing(\mathcal Z)\cup \mathcal K_\infty(\mathcal Z)\cup
\mathcal{G}amma_\infty(\mathcal Z)$, where
\begin{itemize}
\item $\sing(\mathcal Z)$ is the set of singular points of
$\mathcal Z$,
\item $\mathcal K_\infty(\mathcal Z)$ is the set of points of $\mathcal Z$ at which the tangent hyperplane is $\mathcal H^\infty$,
\item $\mathcal{G}amma_\infty(\mathcal Z)$ is
the set of points of $\mathcal Z_\infty\cap \mathcal U_\infty$
at which the tangent space to $\mathcal Z_\infty$ and to $\mathcal U_\infty$ are the same.
\end{itemize}
\end{prop}
\begin{proof}
Let $m\in\mathcal Z$. We have
\begin{eqnarray*}
m\in\mathcal B_{\mathcal Z} &\Leftrightarrow& \bigwedge^2\left(\mathbf m , \mathbf n_{\mathcal Z}(\mathbf m)\right)=0\\
&\Leftrightarrow& \mathbf n_{\mathcal Z}(\mathbf m)=\mathbf 0\ \mbox{or}\ m= n_{\mathcal Z}( m)\\
&\Leftrightarrow& m\in V(F_{x_1},\cdots,F_{x_n})\ \mbox{or}\ m= n_{\mathcal Z}( m).
\end{eqnarray*}
Now $m\in V(F_{x_1},\cdots,F_{x_n})$ means either that $m$ is a singular point of $\mathcal S$ or that
$\mathcal T_m\mathcal Z=\mathcal H^\infty$.
Let $m=[x_1:\cdots:x_{n+1}]\in\mathcal Z$ be such that $m=n_{\mathcal S}( m)$. So $[x_1:\cdots:x_{n+1}]=[F_{x_1}:\cdots: F_{x_n}:0]$. In particular
$x_{n+1}=0$.
Due to the Euler identity, we have
$0=\sum_{i=1}^{n+1}x_iF_{x_i}=\sum_{i=1}^{n+1} x_i^2$. Hence $m\in\mathcal U_\infty$.
Note that the $(n-2)$-dimensional tangent space $\mathcal T_m\mathcal U_\infty$ to $\mathcal U_\infty$ at $m$ has equations
$X_{n+1}=0$ and $\langle m,\cdot\rangle$
and that the $(n-2)$-dimensional tangent space $\mathcal T_m\mathcal Z_\infty$ to $\mathcal Z_\infty$ at $m$ has equations
$X_{n+1}=0$ and $\langle(n_{\mathcal S}(m),\cdot\rangle$.
We conclude that $\mathcal T_m\mathcal U_\infty=\mathcal T_m\mathcal Z_\infty$.
Conversely, if $m=[x_1:\cdots:x_n:0]$ is a nonsingular point of $\mathcal Z_\infty\cap\mathcal U_\infty$ such that
$\mathcal T_m\mathcal U_\infty=\mathcal T_m\mathcal Z_\infty$, then the linear spaces $Span(\mathbf{m},\vec e_{n+1})$
and $Span(\nabla F,\vec e_{n+1})$ are equal which implies that $[x_1:\cdots:x_n:0]=[F_{x_1}:\cdots:F_{x_n}:0]$.
\end{proof}
Recall that the dual
variety of $\mathcal Z_\infty\subset\mathcal H^\infty$ is the variety
$\mathcal Z_\infty^\vee\subset (\mathcal H^\infty)^\vee\cong (\mathbb P^{n-1})^\vee$ of tangent hyperplanes
to $\mathcal Z_\infty$.
It corresponds to the (Zariski closure of the) image of $\mathcal Z_\infty$ by the rational map $n_{\mathcal Z}$. We write $\mathcal Z_\infty^\wedge\subset\mathbb P^n$ for this image. With this notation,
$\mathcal B_{\mathcal Z}=\sing(\mathcal Z)\cup (\mathcal Z_\infty\cap\mathcal Z_\infty^\wedge)$.
\begin{rqe}\label{Basegenerique}
For a generic hypersurface of $\mathbb P^n$, $\mathcal B_{\mathcal Z}=\emptyset$
and so $\dim \mathcal B_{\mathcal Z}^{(0)}\le 0$.
\end{rqe}
But we will also consider cases for which $\#\mathcal B_{\mathcal Z}<\infty$, and so
$\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$.
\begin{exa}[n=3]\label{exemple2}
For the saddle surface $\mathcal S_1=V(xy-zt)$, the set
$\mathcal B_{\mathcal S_1}$ contains a single point $[0:0:1:0]$
which is a point of tangency at infinity of $\mathcal S_1$.
For the ellipsoid $\mathcal E_1:=V(x^2+2y^2+4z^2-t^2)$, the set $\mathcal B_{\mathcal E_1}$ is empty.
For the ellipsoid $\mathcal E_2:=V(x^2+4y^2+4z^2-t^2)$, the set
$\mathcal B_{\mathcal E_2}$ has two elements: $[0:1:\pm i:0]$ which
are points of tangency of $\mathcal E_2$ with $\mathcal U_\infty$.
\end{exa}
\begin{exa}
For the cuartic $\mathcal Z:=V(x_1^2+x_2^2+(x_3+x_5)x_3+(2x_3+x_4)x_4)\subset \mathbb P^4$, $Sing(\mathcal Z)=\emptyset$, $\mathcal K_\infty(\mathcal Z)=\{[0:0:1:-1:0]\}$ and $\mathcal{G}amma_\infty(\mathcal Z)=\{I_1,I_2\}$,
with $I_1[1:i:0:0:0]$ and $I_2[1:-i:0:0:0]$.
\end{exa}
\subsection{Normal polars of $\mathcal Z\subset\mathbb P^n$}
\label{polars}
Let $\mathcal Z=V(F)\subset \mathbb P^n$ (with $F\in Sym(\mathbf V^\vee)$) be an
irreducible hypersurface.
For every $A\in\mathbb P^n$, {\bf the normal polar} $\mathcal P_{A,\mathcal Z}$ of $\mathcal Z$ with respect to $A$ is the set of $m\in\mathbb P^n$ satisfying
the $\left(\begin{array}{c}n+1\\ 3\end{array}\right)$ equations of \eqref{EQUA000}.
For every $m,A\in \mathbb P^n$, we have
$$m\in \mathcal P_{A,\mathcal Z}\\ \Leftrightarrow\ m\in\mathcal B^{(0)}_{\mathcal Z}\ \mbox{or}\ A\in
\mathcal N_m\mathcal Z,$$
extending the definition of $\mathcal N_m\mathcal Z$ from $m\in\mathcal Z$
to $m\in\mathbb P^n$.
\begin{lem}[The projective similitudes preserve the normal polars]
\label{preservpolar}
Let $\mathcal Z=V(F)\subset\mathbb P^n$ be a hypersurface and
$\varphi$ be any projective similitude, then
$\varphi(\mathcal P_{A,\mathcal Z})=\mathcal P_{\varphi(A),\varphi(\mathcal Z)}$.
\end{lem}
\begin{proof}
Due to Lemma \ref{lemmesimilitude}, $\varphi(\mathcal N_m\mathcal Z)=\mathcal N_{\varphi(m)}
(\varphi(\mathcal Z))$ which gives the result.
\end{proof}
Note that
$$\mathcal P_{A,\mathcal Z}=\mathcal B^{(0)}_{\mathcal Z}\cup\left(\bigcap_{i<j<k}\alpha_{\mathcal Z}^{-1}\mathcal H_{A,i,j,k}\right),$$
where $\mathcal{H}_{A,i,j,k}$ is the hyperplane of $\mathbb{P}^{\frac{n(n+1)}2-1}$ given by
$\mathcal{H}_{A,i,j,k}:= V(D_{i,j,k})\subset \mathbb{P}^{\frac{n(n+1)}2-1}$, with
$D_{i,j,k}:=a_ip_{j,k}-a_jp_ {i,k}+a_kp_{i,j}$.
On $\mathbb G(1,n)$, $p=Pl(u,v)\in\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$
means that $\bigwedge ^3(\mathbf A,\mathbf u,\mathbf v)=0$.
\begin{lem}\label{lemdimG(1,n)}
For every $A\in \mathbb P^n$, the set $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ is a
$(n-1)$-dimensional linear space of $\mathbb P^{\frac{n(n+1)}2-1}$ contained
in $\mathbb G(1,n)$.
\end{lem}
\begin{proof}
Let $A[a_1:\cdots:a_{n+1}]\in\mathbb P^n$.
Assume for example $a_{j_0}\ne 0$ (the proof being analogous when $a_j\ne 0$
for symetry reason).
Let $p\in\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$. Let us prove that $p\in\mathbb G(1,n)$.
Let $i,j_1,j_2,j_3\in\{1,...,n+1\}$ be distinct indices.
Due to $D_{j_1,j_2,j_3}=D_{i,j_1,j_2}=0$, we have
\begin{multline*}
a_{j_1}a_{j_2}p_{i,j_1}p_{j_2,j_3}=a_{j_1}a_{j_2}p_{j_1,j_3}p_{i,j_2}
+a_{j_1}p_{j_1,j_2}(-a_{j_3}p_{i,j_2})\\+a_{j_2}p_{j_1,j_2}(-a_ip_{j_1,j_3})+
p_{j_1,j_2}(a_ia_{j_3}p_{j_1,j_2})\\
=a_{j_1}a_{j_2}p_{j_1,j_3}p_{i,j_2}-a_{j_1}a_{j_2}p_{i,j_3}p_{j_1,j_2}
-a_jp_{j_1,j_2}D_{i,j_2,j_3}+a_ip_{j_1,j_2}D_{j_1,j_2,j_3}.
\end{multline*}
Hence $a_{j_1}a_{j_2}B_{i,j_1,j_2,j_3}=0$, for every $i,j_1,j_2,j_3\in\{1,...,n+1\}$.
So $B_{i,j_1,j_2,j_3}\ne 0$ implies that $a_{j_1}=a_{j_2}=a_{j_3}=0$ (up to a permutation
of $(i,j_1,j_2,j_3)$) and so
$0=D_{j_0,j_1,j_2}=D_{j_0,j_2,j_3}=D_{j_0,j_1,j_3}$ imply $p_{j_1,j_2}=p_{j_2,j_3}=p_{j_1,j_3}=0$ which contradicts $B_{i,j_1,j_2,j_3}\ne 0$.
Since $\bigwedge ^4(\mathbf A,\mathbf A,\mathbf u,\mathbf v)=0$, we get
that $a_{j_0}D_{i,j,k}-a_iD_{j_0,j,k}+a_jD_{j_0,i,k}-a_kD_{j_0,i,j}=0$ for every $1\le i<j<k\le n+1$ such that $i,j,k\ne j_0$.
Hence
$\bigcap_{i<j<k}\mathcal H_{A,i,j,k}=\bigcap_{i<j, i,j\ne j_0}\mathcal H_{A,j_0,i,j}$.
Since $a_{1}\ne 0$, the $\frac {n(n-1)}2$ corresponding linear equations are linearly independent and so
$\bigcap_{i<j, i,j\ne j_0}\mathcal H_{A,j_0,i,j}\subset \mathbb P^{\frac{n(n+1)}2-1}$
has dimension
$\frac{n(n+1)}2-1-\frac{(n-1)n}2=n-1$.
\end{proof}
\begin{prop}\label{degrepolaire}
Let $\mathcal Z=V(F)\subset\mathbb P^n$ be an irreducible hypersurface
such that $d_{\mathcal Z}:=\deg\mathcal Z\ge 2$ and $\dim\mathcal B^{(0)}_{\mathcal Z}\le 1$.
Then, for a generic $A\in\mathbf V$, we have $\dim \mathcal P_{A,\mathcal Z}=1$ and
$$\deg \mathcal P_{A,\mathcal Z}=\sum_{k=0}^{n-1}(d_{\mathcal Z}-1)^k.$$
\end{prop}
\begin{proof}
Due to the proof of Lemma \ref{lemdimG(1,n)}, for every $A\in\mathbb P^n$
such that $a_{n+1}\ne 0$, we have
$\bigcap_{i<j<k}\mathcal H_{A,i,j,k}=\bigcap_{i<j<n+1}\mathcal H_{A,n+1,i,j}\subset\mathbb P^{\frac{n(n+1)}2-1}$
and so
$$\mathcal P_{A,\mathcal Z}= \bigcap_{1\le i<j\le n}V(E_{A,i,j})\subset\mathbb P^n,$$
with
$$\forall i,j\in\{1,...,n\},\quad E_{A,i,j}:=L_{A,i}F_{x_j}-L_{A,j}F_{x_i}\quad\mbox{and}\quad L_{A,i}:=a_{n+1}x_i-a_ix_{n+1},$$
i.e. $E_{A,i,j}=a_{n+1}(x_iF_{x_j}-x_jF_{x_i})+a_jx_{n+1}F_{x_i}-a_ix_{n+1}F_{x_j}$.
Note that
\begin{equation}\label{simplification}
L_{A,k}E_{A,i,j}-L_{A,j}E_{A,i,k}=L_{A,i}E_{A,k,j}\quad\mbox{and}\quad
F_{x_k}E_{A,i,j}-F_{x_j}E_{A,i,k}=F_{x_i}E_{A,k,j}.
\end{equation}
Hence
\begin{equation}\label{n-1eq}
\forall i\in\{1,...,n\},\quad
\mathcal P_{A,\mathcal Z}\setminus V(L_{A,i},F_{x_i})=\bigcap_{j\in\{1,...,n\}\setminus\{i\}}
V(E_{A,i,j})\setminus V(L_{A,i},F_{x_i}),
\end{equation}
and so $\dim\mathcal P_{A,\mathcal Z}\ge 1$.
Recall that $\mathcal B_{\mathcal Z}^{(0)}=\bigcap_{i=1}^nV(x_{n+1}F_{x_i})\cap
\bigcap_{i,j=1}^nV(x_iF_{x_j}-x_jF_{x_i})$ and that we have assumed that
$\dim \mathcal B_{\mathcal Z}^{(0)}\le 1$. In particular
$\mathcal P_{A,\mathcal Z}\cap\mathcal H^\infty=\mathcal B_{\mathcal Z}^{(0)}\cap
\mathcal H^\infty$ and
$\mathcal B_{\mathcal Z}^{(0)}\setminus\mathcal H^\infty
=V(F_{x_1},...,F_{x_n})\setminus\mathcal H^\infty$.
This combined with \eqref{n-1eq} and with the expression of $E_{A,i,j}$ leads to $\dim\mathcal P_{A,\mathcal Z}= 1$.
Now let us compute the degree of $\mathcal P_{A,\mathcal Z}$.
The idea is to prove an induction formula.
Assume that $A\not\in\mathcal H^\infty$ is such that $\dim\mathcal P_{A,\mathcal Z}
=1$. Let $\mathcal H=V(\sum_{i=1}^{n+1}
\alpha_ix_i)\subset\mathbb P^n$ be an hyperplane
such that $\#(\mathcal H\cap \mathcal P_{A,\mathcal Z})
=\deg \mathcal P_{A,\mathcal Z}$ and $\sum_{i=1}^n\alpha_i^2\ne 0$.
We compose by a projective similitude $\phi:\mathbb P^n\rightarrow\mathbb P^n$
so that $\phi(A)$ has projective coordinates $[0:\cdots:0:1]$ and that $\hat{\mathcal H}:=\phi(\mathcal H)=V(x_1-b x_{n+1})\subset\mathbb P^n$. Set
$\hat{\mathcal Z}:=\phi(\mathcal Z)=V(\hat F)\subset\mathbb P^n$, with $\hat F:= F\circ\phi$.
Hence $\phi(\mathcal P_{A,\mathcal Z})=\mathcal P_{\phi(A),\tilde{\mathcal Z}}$
is the set of points $m[x_1:\cdots:x_{n+1}]\in\mathbb P^n$
such that $\bigwedge^2 \left(\left(\begin{array}{c}x_1\\ \vdots\\x_n\\0\end{array}\right),\left(
\begin{array}{c}\hat F_{x_1}\\ \vdots\\\hat F_{x_n}\\0\end{array}\right)\right)=0$ in $\bigwedge^2\mathbf V$.
We then define $G(x_2,...,x_{n+1}):=\hat F(bx_{n+1},x_2,...,x_{n+1})
\in\mathbb C[x_2,...,x_{n+1}]$ and $H(x_3,...,x_{n+1}):=G(0,x_3,...,x_{n+1})\in\mathbb C[x_3,...,x_{n+1}]$.
We set $\mathcal Z_1:=V(G)\subset\mathbb P^{n-1}$,
$\mathcal Z_2:=V(H)\subset\mathbb P^{n-2}$ and $B_k[0:...:0:1]\in\mathbb P^{k}$.
We then write $\mathcal P_{n-k,B_{n-k},{\mathcal Z}_k}$ for the normal polar
in $\mathbb P^{n-k}$ of $\mathcal Z_k\subset\mathbb P^{n-k}$ with respect to
$B_{n-k}$, with the conventions $\mathcal P_{0,B_0,{\mathcal Z}_k}=\emptyset$
(if $k=n$) and $\mathcal P_{1,B_1,{\mathcal Z}_k}=\mathbb P^1$ (if $k=n-1$).
We will prove that
\[
\deg \mathcal P_{A,{\mathcal Z}}=d\times \deg \mathcal P_{n-1,B_{n-1},{\mathcal Z}_1} - (d-1)\times \deg \mathcal P_{n-2,B_{n-2},{\mathcal Z}_2}\, .
\]
Let $\Pi_1: \mathbb P^n\rightarrow \mathbb P^{n-1}$
and $\Pi_2:\mathbb P^n\rightarrow \mathbb P^{n-2}$ be
the projections given by $\Pi_1[x_1:...:x_{n+1}]=[x_2:...:x_{n+1}]$
and $\Pi_2[x_1:...:x_{n+1}]=[x_3:...:x_{n+1}]$.
Due to \eqref{simplification},
\begin{equation}\label{decomp}
\hat{\mathcal H}\cap V(x_1\hat F_{x_2}-x_2\hat F_{x_1})\cap\Pi_1^{-1}(\mathcal P_{n-1,B_{n-1},{\mathcal Z}_1})=(\hat{\mathcal H}\cap
\mathcal P_{\phi(A),\hat{\mathcal Z}})\cup [\hat{\mathcal H}\cap V(x_2,\hat F_{x_2})\cap\Pi_2^{-1}(\mathcal P_{n-2,B_{n-2},{\mathcal Z}_2})].
\end{equation}
For a generic $\mathcal H$ and for a good choice of $\phi$,
the union in the right hand side of \eqref{decomp} is disjoint and
\begin{eqnarray*}
\deg \mathcal P_{A,\mathcal Z}&=&\#(\mathcal H\cap \mathcal P_{A,\mathcal Z})\\
&=&\#(\hat{\mathcal H}\cap \deg \mathcal P_{\phi(A),\hat{\mathcal Z}})\\
&=& d_{\mathcal Z}.\deg \mathcal P_{n-1,B_{n-1},{\mathcal Z}_1} -(d_{\mathcal Z}-1).\mathcal P_{n-2,B_{n-2},{\mathcal Z}_2}.
\end{eqnarray*}
Hence $\deg \mathcal P_{A,\mathcal Z}=d_{\mathcal Z}$ if $n=2$ and
$\deg \mathcal P_{A,\mathcal Z}=d_{\mathcal Z}^2-d_{\mathcal Z}+1$ if $n=3$.
The formula in the general case follows by induction.
\end{proof}
Analogously we have the following.
\begin{prop}[n=3]\label{rqebasepoints}
If $\mathcal S$ is an irreducible algebraic surface of $\mathbb P^3$
(with projective coordinates $[x:y:z:t]$)
and if $\dim \mathcal B^{(0)}_{\mathcal S}=2$, then the two dimensional part of $\mathcal B^{(0)}_{\mathcal S}$
is $V(H)\subset\mathbb P^3$ for some homogeneous polynomial
$H\in\mathbb C[x,y,z,t]$ of degree $d_H$.
We write $\boldsymbol{\alpha}_{\mathcal S}=H\cdot\boldsymbol{\tilde\alpha}_{\mathcal S}$.
Note that the regular map $\tilde\alpha_{\mathcal S}:\mathbb P^3\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S}
\rightarrow\mathbb P^3$ (with $\dim \tilde{\mathcal B}^{(0)}_{\mathcal S}\le 1$) associated to $\boldsymbol{\tilde\alpha}_{\mathcal S}$. We then adapt our study with respect to $\tilde\alpha_{\mathcal S}$ instead
of $\alpha_{\mathcal S}$ and define the corresponding polar $\tilde {\mathcal P}_{A,\mathcal S}$.
Then, we have $\deg \tilde {\mathcal P}_{A,\mathcal S}=( d_{\mathcal S}
-d_H)^2-d_{\mathcal S}+d_H+1$.
\end{prop}
\begin{exa}\label{exemple1}
Note that the only irreducible quadrics $\mathcal S=V(F)\subset\mathbb P^3$
such that $\dim \mathcal B^{(0)}_{\mathcal S}\ge 2$ are the spheres and cones,
i.e. with $F$ of the following form
$$F(x,y,z,t)=(x-x_0t)^2+(y-y_0t)^2+(z-z_0t)^2+a_0t^2,$$
where $x_0,y_0,z_0,a_0$ are complex numbers (it is a sphere if $a_0\ne 0$ and it is a cone otherwise).
Hence, due to Proposition \ref{degrepolaire}, the degree of a generic normal polar of any
irreducible quadric of $\mathbb P^3$ which is neither a sphere nor a cone is 3.
Moreover, for a sphere or for a cone, applying Proposition \ref{rqebasepoints} with $H=t$,
$\tilde{\mathcal P}_{A,S}$ is a line for a generic $A\in\mathbb P^3$.
\end{exa}
\subsection{Proof of Theorems \ref{thmhypersurface} and \ref{formulegeneralehypersurface}}
\begin{proof}[Proof of Theorem \ref{formulegeneralehypersurface}]
Let $\mathcal Z$ be an irreducible surface of $\mathbb P^n$ of degree
$d_{\mathcal Z}\ge 2$ such that $\#\mathcal B_{\mathcal Z}<\infty$.
It remains to prove that
\begin{equation}\label{formuleclassenormalesurface}
c_\nu(\mathcal S)= d_{\mathcal Z}.\deg \mathcal P_{A,\mathcal Z}
-\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z,\mathcal P_{A,\mathcal Z}\right),
\end{equation}
for a generic $A\in\mathbb P^n$.
Note that, for a generic $A\in\mathbb P^n$,
since $\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ is irreducible of
dimension at most $n-1$, we have
$\#\bigcap_{i<j<k}\mathcal H_{A,i,j,k}\cap \overline{\alpha_{\mathcal Z}(\mathcal Z)}<\infty$ and so
$\# \mathcal P_{A,\mathcal Z}\cap \mathcal Z<\infty$ (since $\#\mathcal B_{\mathcal Z}<\infty$).
Since $\dim \mathcal P_{A,\mathcal Z}=1$ and $\# \mathcal Z\cap \mathcal P_{A,\mathcal Z}<\infty$
for a generic $A\in\mathbb P^n$, due to Proposition
\ref{degrepolaire} and to the Bezout formula, we have:
\begin{eqnarray*}
d_{\mathcal Z}.\deg \left(\mathcal P_{A,\mathcal Z}\right)&=&
\deg\left(\mathcal Z\cap \mathcal P_{A,\mathcal Z}\right)\\
&=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)+
\sum_{P\in\mathcal S\setminus\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right).
\end{eqnarray*}
Now let us prove that, for a generic $A\in\mathbb P^n$,
\begin{equation}\label{multpolaire}
\sum_{P\in\mathcal Z\setminus\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)=
\#((\mathcal Z\cap \mathcal P_{A,\mathcal Z})\setminus\mathcal B_{\mathcal Z}).
\end{equation}
Since $\alpha_{\mathcal Z}$ defines a rational map, $\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ is irreducible and its dimension is at most $n-1$.
Assume first that $\dim \overline{\alpha_{\mathcal Z}(\mathcal Z)}<n-1$.
For a generic $A\in\mathbb P^n$,
the plane $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ does not meet
$\overline{\alpha_{\mathcal Z}(\mathcal Z)}$ and so the left and right hand sides of \refeq{multpolaire} are both zero. So Formula \eqref{formuleclassenormalesurface}
holds true with $c_\nu(\mathcal Z)=0$.
Assume now that $\dim \overline{\alpha_{\mathcal Z}(\mathcal Z)}=n-1$. Then, for a generic $A\in\mathbb P^n$,
the plane $\bigcap_{i<j<k}\mathcal H_{A,i,j,k}$ meets $\alpha_{\mathcal Z}(\mathcal Z)$ transversally (with
intersection number 1 at every intersection point) and does not meet $\overline{\alpha_{\mathcal Z}(\mathcal Z)}\setminus\alpha_{\mathcal Z}(\mathcal Z)$.
This implies that, for a generic $A\in\mathbb P^n$, we have
$i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)=1$ for every $P\in(\mathcal S\cap\mathcal P_{A,\mathcal Z})
\setminus\mathcal B_{\mathcal Z}$ and so \refeq{multpolaire} follows.
Hence, for a generic $A\in\mathbb P^n$, we have
\begin{eqnarray*}
d_{\mathcal Z}.\deg \left(\mathcal P_{A,\mathcal Z}\right)
&=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal Z, \mathcal P_{A,\mathcal Z}\right)+
\#\{P\in \mathcal Z\setminus \mathcal B_{\mathcal Z} :\ A\in\mathcal N_m\mathcal Z\}\\
&=&\sum_{P\in\mathcal B_{\mathcal Z}}i_P\left(\mathcal S,\mathcal P_{A,\mathcal Z}\right)+c_\nu(\mathcal Z),
\end{eqnarray*}
which gives \eqref{formuleclassenormalesurface}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{thmhypersurface}]
Let $\mathcal H=V(\sum_{i=1}^{n+1}a_ix_i)$ be an hyperplane such that
$\mathcal H\ne\mathcal H^\infty$. For every $m\in\mathcal H$,
$n_{\mathcal H}(m)[a_1:\cdots:a_n:0]\in\mathbb P^n$.
Hence every $A\in\mathbb P^n$ belong to a single normal line to $\mathcal H$
(the line containing $A$ and $[a_1:\cdots:a_n:0]$).
The case $d_{\mathcal Z}\ge 2$ follows from Theorem \ref{formulegeneralehypersurface} and Remark \ref{Basegenerique}.
\end{proof}
\section{Orthogonal incidence variety and Schubert classes}\label{incidenceschubert}
\subsection{Orthogonal incidence variety}
Let us write as usual $\mathbb G(1,n)$ (resp. $\mathbb G(n-1,n)$) for the grassmannian of the lines
(resp. of the hyperplanes) of $\mathbb P^n$.
Let us write $pr_{1}:\mathbb{G}(1,n)\times \mathbb{G}(n-1,n)\rightarrow
\mathbb{G}(1,n)$ and $pr_{2}:\mathbb{G}(1,n)\times \mathbb{G}
(n-1,n)\rightarrow \mathbb{G}(n-1,n)$ for the canonical projections.
We define the {\bf orthogonal incidence variety} $\mathcal{I}^{\perp }$
by
$$\mathcal{I}^{\perp }:=\{(\mathcal{L}_1,\mathcal{H}_1)\in \mathbb G(1,n)\times
\mathbb{G}(n-1,n)\, :\, \mathcal{L}_1{\perp }\mathcal {H}_1\}.$$
Let us write $p_{1}:\mathcal{I}^{\mathbb{\perp }}\rightarrow \mathbb{G}
(1,n)$ and $p_{2}:\mathcal{I}^{\mathbb{\perp }}\rightarrow \mathbb{G}(n-1,n)$
for the restrictions of $pr_{1}$ and $pr_{2}$.
We want to describe in the Chow ring of $\mathbb{G}(1,n)$ and $\mathbb{G}
(n-1,n)\equiv \mathbb{P}^{n\vee }$
the rational equivalence class of $p_{2}p_{1}^{-1}(\mathcal{L)}$ and $
p_{1}p_{2}^{-1}(\mathcal{H)}$.
\begin{lem}
$p_{2}\circ p_{1}^{-1}:\mathbb{G}(1,n)\setminus\{\mathcal L\subset\mathcal H^\infty\}
\rightarrow \mathbb{G}(n-1,n)$
is a line projective bundle and
$p_{1}\circ p_{2}^{-1}:\mathbb{G}(n-1,n)\setminus\{\mathcal H^\infty\}\rightarrow\mathbb{G}(1,n)$ and
is a plane projective bundle.
\end{lem}
\begin{proof}
Let $\mathcal H=V(a_1x_1+\cdots a_{n+1}x_{n+1})$ be a projective hyperplane of $\mathbb P^n$, which is not $\mathcal H^\infty$.
Then
$$ p_{1}(p_{2}^{-1}(\mathcal{H}))=\{\mathcal L\in \mathbb G(1,n),\ (a_1,\cdots,a_n,0)\in \mathbf{L}\}.$$
Moreover $ p_{1}(p_{2}^{-1}(\mathcal{H}^\infty))=\mathbb G(1,n)$.\\
Let $\mathcal L\not\subset\mathcal H^\infty$ be a line of $\mathbb P^3$, let $A_0[a_1:\cdots:a_n:0]$ be the only point in $\mathcal L\cap\mathcal H^\infty$, we have
$$ p_2(p_1^{-1}(\mathcal L))=\{\mathcal H\in\mathbb G(n-1,n)\, :\ \exists [a:b]\in\mathbb P^1,\
\mathcal H=V(aa_1x_1+\cdots+aa_nx_n+bx_{n+1}) \}.$$
Finally, if $\mathcal L=\mathbb P(\mathbf L)\subset\mathcal H^\infty$ is a projective line, then we have
$$ p_2(p_1^{-1}(\mathcal L))=\{\mathcal H\in\mathbb G(n-1,n)\, :\ \exists a,b\in\mathbb C,\ \exists (a_1,\cdots,a_n,0)\in \mathbf{L},\
\mathcal H=V(aa_1x_1+\cdots+aa_nx_n+bx_{n+1}) \}.$$
\end{proof}
It follows directly from the proof of this lemma that if $\mathcal H\in\mathbb G(n-1,n)\setminus\{\mathcal H^\infty\}$, the class of $p_{1}(p_{2}^{-1}(\mathcal{\mathcal H)})$ in the Chow ring $A^*(\mathbb{G}(1,n))$ is simply the Schubert class $\sigma_{n-1}$.
\subsection{Schubert classes for $\mathbb G(1,3)$}
Given a flag $\mathbf{F}=\{\mathbf{V_1}\subset \mathbf {V_2}\subset \mathbf {V_3}\subset \mathbf {V_4}=\mathbf{V}\}$ of $\mathbf V$
with dim$_{\mathbb{C}}(V_{i})=i$ for all integer $i$, we consider
its associated projective flag $\mathcal F$ of $\mathbb{P}^{3}$ (image by the canonical projection $\pi :\mathbf{V\backslash \{0\}}\rightarrow \mathbb{P}^{3}$)
\[
\mathcal{F}=\{p\in \mathcal{D}\subset \mathcal{P}\subset
\mathbb{P}^{3}\}.
\]
Let $\mathcal{Z}^{k}$ denote the set of cycles of
codimension $k$ in $\mathbb{G}(1,3)$. We recall that the \textbf{Schubert cycles} of
$\mathbb{G}(1,3)$ associated to $\mathcal F$ (or to {\bf F}) are given by
\begin{equation}\label{EQSchubert}
\left\{
\begin{array}{c}
\mathcal{S}igma _{0,0}:=\mathbb{G}(1,3)\in \mathcal{Z}^{0}(\mathbb{G}(1,3)) \\
\mathcal{S}igma _{1,0}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{D\cap L\neq
\varnothing }\right\} \in \mathcal{Z}^{1}(\mathbb{G}(1,3)) \\
\mathcal{S}igma _{2,0}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);p\mathcal{\in L}
\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3)) \\
\mathcal{S}igma _{1,1}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L\subset P}
\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3)) \\
\mathcal{S}igma _{2,1}:=\mathcal{S}igma _{2,0}\cap \mathcal{S}igma _{1,1}\in \mathcal{Z}^{3}(\mathbb{G}
(1,3)) \\
\mathcal{S}igma _{2,2}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L=D}\right\}
\in \mathcal{Z}^{2}(\mathbb{G}(1,3))
\end{array}
\right. .
\end{equation}
We write as usual $A^{\ast }(\mathbb{G}(1,3))$
for the Chow ring of $\mathbb{G}(1,3)$ and
$\sigma _{i,j}:=\left[ \mathcal{S}igma _{i,j}\right] \in A^{i+j}(\mathbb{G}(1,3))$ for \textbf{Schubert classes}.
For commodity we will use the notation $\mathcal{S}igma _k:=\mathcal{S}igma _{k,0}$ and $\sigma _k:=\sigma _{k,0}$.
We recall that $A^{\ast }(\mathbb{G}(1,3))$ is freely generated as graded $\mathbb{Z}$
-module by $\left\{ \sigma _{i,j};0\leq j\leq i\leq 2\right\} $ with the following
multiplicative relations
\[
(E)\left\{
\begin{array}{c}
\sigma _{1,1}=\sigma _{1}^{2}-\sigma _{2} \\
\sigma _{1,1}\cdot \sigma _{1}=\sigma _{1}\cdot \sigma _{2}=\sigma _{2,1} \\
\sigma _{2,1}\cdot \sigma _{1}=\sigma _{1,1}^{2}=\sigma _{2}^{2}=\sigma
_{2,2} \\
\sigma _{1,1}\cdot \sigma _{2}^{{}}=0
\end{array}
\right. .
\]
Hence, the Chow ring of the grassmannian is
\[
A^{\ast }(\mathbb{G}(1,3))=\frac{\mathbb{Z[}\sigma _{1},\sigma _{2}]}{
(2\sigma _{1}\cdot \sigma _{2}-\sigma _{1}^{3},\sigma _{1}^{2}\cdot \sigma
_{2}-\sigma _{2}^{2})}.
\]
\subsection{Proofs of Theorems \ref{formulegeneralesurface}
and \ref{factorisable}}
Recall that we have defined
$
\mathfrak{N}_{\mathcal{S}}:=\overline{\{\mathcal{N}_{m}(\mathcal{S});m\in \mathcal{S}\}}\subset
\mathbb{G}(1,3)$
and $\mathfrak{n}_{\mathcal{S}}:=[\mathfrak{N}_{\mathcal{S}
}]\in A^{2}(\mathbb{G}(1,3))$.
\begin{prop}\label{PROP1}
Let $\mathcal S\subset \mathbb P^3$ be an irreducible surface of degree $d\ge 2$ of $\mathbb P^3$.
\begin{itemize}
\item If $\#\mathcal B_{\mathcal S}<\infty$, we have
\[
\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)).
\]
\item If $\dim\mathcal B^{(0)}_{\mathcal S}=2$ with two dimensional part $V(H)$
and $\#\tilde{\mathcal B}_{\mathcal S}<\infty$
(with the notations of Proposition \ref{rqebasepoints}), then we have
\[
\mathfrak{n}_{\mathcal{S}}=c_\nu(\mathcal S).\sigma_2+d_{\mathcal S}(d_{\mathcal S}-d_H-1).\sigma _{1,1}\in A^{2}(\mathbb{G}(1,3)).
\]
\end{itemize}
\end{prop}
\begin{proof}
Since $\mathfrak{n}_{\mathcal{S}}\in A^{2}(\mathbb{G}(1,3)),$ we have $\mathfrak{n}_{\mathcal{S}
}=a.\sigma _{2}+b.\sigma _{1,1}$ for some integers $a$ and $b$.
Morever by Kleiman's transversality theorem (see for example \cite[Thm 5.20]{Eisenbud-Harris}), since $\mathcal{S}igma _{1,1}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);\mathcal{L\subset P
}\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3))$, we have $\mathfrak{n}_{\mathcal{S}
}\cdot \sigma _{1,1}=\left( a\sigma _{2}+b\sigma _{1,1}\right) \cdot \sigma_{1,1}$ and so, using
\eqref{EQSchubert}, we obtain
\begin{equation}
\mathfrak{n}_{\mathcal{S}}\cdot \sigma _{1,1}=b.\sigma _{1,1}^{2}=b.\sigma_{2,2}=b.
\end{equation}
Analogously, since
$\mathcal{S}igma _{2}:=\left\{ \mathcal{L}\in \mathbb{G}(1,3);p\in \mathcal{L}
\right\} \in \mathcal{Z}^{2}(\mathbb{G}(1,3))$, due to \eqref{EQSchubert}, we have
\begin{equation}
\mathfrak{n}_{\mathcal{S}}\cdot \sigma
_{2}=\left( a\sigma _{2}^{{}}+b\sigma _{1,1}\right) \cdot \sigma _{2}=a\sigma _{2}^{2}=a\sigma_{2,2}=a .\end{equation}
Now it remains to compute $\mathfrak{n}_{\mathcal{S}}\cdot \sigma
_{2}$ and $\mathfrak{n}_{\mathcal{S}}\cdot \sigma
_{1,1}$, i.e. to compute the cardinality of the intersection of
$\mathfrak{N
}_{\mathcal{S}}$ with $\mathcal{S}igma _{1,1}$ and with $\mathcal{S}igma _{2}$.\\
Let us start with the computation of $a=\mathfrak{n}_{\mathcal {S}}\cdot \sigma_{2}$.
If $\#\mathcal B_{\mathcal S}<\infty$, then, for a generic $P\in\mathbb P^3$, we have
$$\mathfrak{N}_{\mathcal{S}}\cap \mathcal{S}igma _{2}=\left\{ \mathcal{L}\in \mathfrak{N}
_{\mathcal{S}};P\in \mathcal{L}\right\} =\left\{\mathcal{N}_{m}\mathcal{S}
;m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ P\in \mathcal{N}_{m}\mathcal{S}\right\}$$
and if $\dim \mathcal B^{(0)}_{\mathcal S}=2$ and $\#\tilde{\mathcal B}^{(0)}_{\mathcal Z}\cap{\mathcal S}<\infty$, then, for a generic $P\in\mathbb P^3$, we have
$$\mathfrak{N}_{\mathcal{S}}\cap \mathcal{S}igma _{2}=\left\{ \mathcal{L}\in \mathfrak{N}
_{\mathcal{S}};P\in \mathcal{L}\right\} =\left\{\mathcal{N}_{m}\mathcal{S}
;m\in\mathcal S\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ P\in \mathcal{N}_{m}\mathcal{S}\right\}.$$
So, in any case, $a=c_{\nu }(\mathcal S)$ by definition
of the normal class of $\mathcal S$.\\
Now, for $b$, since $\#\mathcal B_{\mathcal S}<\infty$, we note that, for a generic projective plane $\mathcal H\subset\mathbb P^3$, we have
$$\mathfrak{N}_{\mathcal{S}}\cap \mathcal{S}igma _{1,1}=\left\{ \mathcal{L}\in \mathfrak{
N}_{\mathcal{S}};\mathcal{L\subset H}\right\} =\left\{ \mathcal{N}_{m}
\mathcal{S};\ m\in\mathcal S\setminus \mathcal B_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}\right\} .$$
We have
$\mathcal{H}=V(a_1 X+a_2 Y+a_3 Z+a_4 T)\subset \mathbb{P}^{3} $
for some complex numbers $a_1$, $a_2$, $a_3$ and $a_4$.
Let $m[x:y:z:t]\in \mathbb P^3$. For a generic $\mathcal H$, we have
\begin{eqnarray*}
m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}
&\Leftrightarrow &
m\in\mathcal S\setminus\mathcal B_{\mathcal S},\ \ m\in\mathcal H,\ \
n_{\mathcal S}(m)\in\mathcal H\\
&\Leftrightarrow&\left\{
\begin{array}{c}
F(x,y,z,t)=0 \\
a_1 F_{x}+a_2 F_{y}+a_3 F_{z}=0\\
a_1 x+a_2 y+a_3 z+a_4 t=0
\end{array}
\right. .
\end{eqnarray*}
Hence $b=d_{\mathcal S}(d_{\mathcal S}-1)$.
Assume now that $\dim\mathcal B^{(0)}_{\mathcal S}=2$ with two dimensional part $V(H)$
and $\#\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap{\mathcal S}<\infty$.
For a generic projective plane
$\mathcal H=V(A^\vee)\subset \mathbb P^3$, we have
\begin{eqnarray*}
\mathfrak{N}_{\mathcal{S}}\cap \mathcal{S}igma _{1,1}&=&\left\{ \mathcal{N}_{m}
\mathcal{S};\ m\in\mathcal S\setminus \tilde{\mathcal B}^{(0)}_{\mathcal S},\
\mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}\right\}\\
&=&\{\mathcal N_m\mathcal S;\ m\in\mathcal S\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ \ m\in\mathcal H,\ \
n_{\mathcal S}(m)\in\mathcal H\}.
\end{eqnarray*}
Now there are two cases:
\begin{itemize}
\item If $H$ divides $F_x$, $F_y$ and $F_z$ and then
$n_{\mathcal S}=[\frac{F_x}H:\frac{F_y}H:\frac{F_z}H]$ and
$b=d_{\mathcal S}(d_{\mathcal S}-d_H-1)$.
\item Otherwise $H=tH_1$, with $H_1$ dividing $F_x$, $F_y$ and $F_z$ and
$V(X)\subset V(x{F_y}-y{F_x},x{F_z}-z{F_x},y{F_z}-z{F_y})$. Hence
$n_{\mathcal S}=[\frac{F_x}{H_1}:\frac{F_y}{H_1}:\frac{F_z}{H_1}]$. We have
\[
m\in\mathcal S\setminus(\mathcal H^\infty\cup\tilde{\mathcal B}^{(0)}_{\mathcal S}),\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}
\quad\Leftrightarrow\quad \left\{
\begin{array}{c}
F(x,y,z,t)=0,\ \ t\ne 0 \\
a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}=0\\
a_1 x+a_2 y+a_3 z+a_4 t=0
\end{array}
\right.
\]
and
\[
m\in\mathcal S_\infty\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S},\ \mathcal{N}_{m}\mathcal{S}\subset \mathcal{H}
\quad\Leftrightarrow\quad \left\{
\begin{array}{c}
F(x,y,z,t)=0\\
t=0\\
a_1 x+a_2 y+a_3 z=0
\end{array}
\right. ,
\]
so
$$b=d_{\mathcal S}(d_{\mathcal S}-d_H)+d_{\mathcal S}-
\sum_{P\in \mathcal S\cap\mathcal H^\infty\cap\mathcal H}i_P\left(\mathcal S,V(a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}),\mathcal H\right)$$
(due to the Bezout Theorem). Now let $P\in \mathcal S\cap\mathcal H^\infty\cap\mathcal H$,
we have $x\ne 0$ or $y\ne 0$ or $z\ne 0$. Assume for example that $x\ne 0$, we have
\begin{eqnarray*}
&\ &i_P\left(\mathcal S,V(a_1 \frac{F_{x}}{H_1}+a_2\frac{ F_{y}}{H_1}+a_3\frac {F_{z}}{H_1}),\mathcal H\right)=\\
&=&i_P\left(\mathcal S,V(t(-a_4\frac{F_x}{H_1}+a_2\frac{xF_y-yF_x}H+a_3\frac{xF_z-zF_x}H),\mathcal H\right)\\
&=&1 +i_P\left(\mathcal S,V(-a_4\frac{F_x}{H_1}+a_2\frac{xF_y-yF_x}H+a_3\frac{xF_z-zF_x}H),\mathcal H\right)\\
&=&2
\end{eqnarray*}
for a generic $\mathcal H$ and so
$b=d_{\mathcal S}(d_{\mathcal S}-d_H+1)$.
\end{itemize}
\end{proof}
Theorem \ref{formulegeneralesurface} follows from Theorem \ref{formulegeneralehypersurface}
and Proposition \ref{PROP1}.
\begin{proof}[Proof of Theorem \ref{factorisable}]
If $\dim\mathcal B^{(0)}_{\mathcal S}=2$, we saw in Proposition \ref{rqebasepoints}
that we can adapt our study to compute the degree of the reduced
normal polar $\tilde {\mathcal P}_{A,\mathcal S}$
associated to the rational map $\tilde\alpha_{\mathcal S}:\mathbb P^3\setminus\tilde{\mathcal B}^{(0)}_{\mathcal S}
\rightarrow\mathbb P^3$ such that $\boldsymbol{\alpha}_{\mathcal S}=H\cdot\boldsymbol{\tilde\alpha}_{\mathcal S}$.
Using Proposition
\ref{rqebasepoints} and following the proof of Theorem
\ref{formulegeneralehypersurface},
we obtain Theorem \ref{factorisable}.
\end{proof}
\section{Proof of Theorem \ref{thmsurfaces}}\label{sec:proofthm1}
We apply Theorem \ref{formulegeneralesurface}.
Note that, since $\mathcal S$ is smooth, it has only a finite number of points of tangency with $\mathcal H_\infty$ (due to Zak's theorem on tangencies \cite[corolloray 1.8]{Zak}).
Since the surface is smooth, $\mathcal B_{\mathcal S}$ consists of points of tangency of
$\mathcal S$ with $\mathcal H_\infty$ and of points of tangency of
$\mathcal S_\infty$ with $\mathcal U_\infty$. It remains to compute
the intersection multiplicity of $\mathcal S$ with a generic normal polar at these points.
Let us recall that if $A\not\in\mathcal H^\infty$, then
$$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\dim_\mathbb C \left(\left(
\mathbb C[x,y,z,t]/I\right)_P \right)$$
where $I$ is the ideal $(F,E_{A,1,2}E_{A,1,3},E_{A,2,3})$ of $\mathbb C[x,y,z,t]$,
with the notation $E_{A,i,j}$ introduced in the proof of Proposition \ref{degrepolaire}.
To compute these quantities, it may be useful to make an appropriate change of coordinates with the use of a projective similitude of $\mathbb P^3$.
Note that:
\begin{itemize}
\item[*] The umbilical $\mathcal U_\infty$ is stable under the action
of the group of projective similitudes of $\mathbb P^3$.
\item[*] For any $P\in\mathcal H_\infty\setminus\mathcal U_\infty$,
there exists a projective similitude $\zeta$ of $\mathbb P^3$
mapping $[1:0:0:0]$ to $P$.
\footnote{Let $P[x_0:y_0:z_0:0]$ with $x_0^2+y_0^2+z_0^2=1$.
Assume for example that $z_0^2\ne 1$ (up to a permutation of the coordinates) and take $\zeta$
given by $\kappa'(b,A)$ (for any $b\in\mathbb C^n)$ with
$A=(u\, v\, w)$ where $u=(x_0,y_0,z_0)$ and $v=(x_0^2+y_0^2)^{-\frac 12}(y_0,-x_0,0)$ and $w=(x_0^2+y_0^2)^{-\frac 12}(x_0z_0,y_0z_0,-x_0^2-y_0^2)$.}
\item[*] For any $P\in\mathcal U_\infty$,
there exists a projective similitude $\zeta$ of $\mathbb P^3$
mapping $[1:i:0:0]$ to $P$.
\footnote{Let $P[x_0:y_0:z_0:0]\in\mathcal U_\infty$.
Assume for example that $y_0\ne 0$ and $x_0^2+y_0^2\ne 0$
(up to a composition by a permutation matrix). A suitable $\zeta$ is given by $\kappa'(b,A)$ (for any $b\in\mathbb C^n)$ with
$A=\left(\begin{array}{ccc}\frac{x_0(y_0^2-1)}{2y_0^2}&-\frac{ix_0(1+y_0^2)}{2y_0^2}&\frac{\sqrt{x_0^2+y_0^2}}{y_0}\\
\frac{1+y_0^2}{2y_0^2}&\frac{i(1-y_0^2)}{y_0}&0\\
\frac{i(y_0^4+y_0^2x_0^2-y_0^2-x_0^2)}{y_0^2\sqrt{x_0^2+y_0^2}}&\frac{(1+y_0^2)\sqrt{x_0^2+y_0^2}}
{2y_0^2}&\frac{ix_0}{y_0}\end{array}\right)$.}
\end{itemize}
We recall that a multiple point of order $k$ of a plane curve
is ordinary if its tangent cone contains $k$ pairwise distinct lines
and that an ordinary cusp of a plane curve is a double point with a single
tangent line in the tangent cone, this tangent line being non contained in the cubic cone of the curve at this point.
\begin{itemize}
\item \underline{Let $P$ be a (non singular) point of tangency of $\mathcal S$ with
$\mathcal H_\infty$.}
We prove the following:
\begin{itemize}
\item[(a)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k^2$ for a generic $A\in\mathbb P^3 $ if
$P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty\setminus \mathcal U_\infty$.
\item[(b)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k(k+1)$ for a generic $A\in\mathbb P^3$ if
$P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone
of $\mathcal S_\infty$.
\item[(c)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=3$ for a generic $A\in\mathbb P^3$ if $P$ is an ordinary cusp of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone
of $\mathcal S_\infty$.
\item[(d)] $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=2$ for a generic $A\in\mathbb P^3$ if $P$ is an ordinary cusp of
$\mathcal S_\infty\setminus \mathcal U_\infty$.
\end{itemize}
Due to Lemma \ref{preservpolar},
we assume that $P[1:\theta:0:0]$ with $\theta=0$ (if $P\in\mathcal H_\infty\setminus\mathcal U_\infty$) or $\theta=i$ (if $P\in\mathcal U_\infty$). Since $\mathcal T_P\mathcal S=\mathcal H_\infty$, we suppose that $F_x(P)=F_y(P)=F_z(P)=0$
and $F_t(P)=1$ (without any loss of generality).
Recall that the Hessian determinant $H_F$ of
$F$ satisfies\footnote{see for example \cite{fredsoaz3}.}
$$H_F=\frac{(d_{\mathcal S}-1)^2}{x^2}\left|\begin{array}{cccc}
0&F_{y}&F_{z}&F_{t}\\
F_{y}&F_{yy}&F_{yz}&F_{yt}\\
F_{z}&F_{yz}&F_{zz}&F_{zt}\\
F_{t}&F_{yt}&F_{zt}&F_{tt}
\end{array}
\right|.$$
Hence $H_F(P)\ne 0\ \Leftrightarrow\ [F_{yy}F_{zz}-F_{yz}^2](P)\ne 0$.
For a generic $A\in\mathbb P^3$, we have
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P \cong
\left(\frac{\mathbb C[x,y,z,t]}{(F,A_2,A_3)}\right)_P.$$
Recall that $A_2=atF_z-ctF_x+d(zF_x-xF_z)$
and
$A_3=-atF_y+btF_x+d(xF_y-yF_x)$
(with $A[a:b:c:d]$).
Using the Euler identity $xF_x+yF_y+zF_z+tF_t=d_{\mathcal S}F$,
we obtain that
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong
\left(\frac{\mathbb C[x,y,z,t]}{(F,A'_2,A'_3)}\right)_P
\cong \left(\frac{\mathbb C[y,z,t]}{(F_*,A'_{2*},A'_{3*})}
\right)_{(0,0,0)}$$
with
$$A'_2:=atxF_z+ct(yF_y+zF_z+tF_t)-d(z(yF_y+zF_z+tF_t)+x^2F_z),
$$
$$A'_3:=-atxF_y-bt(yF_y+zF_z+tF_t)+d(x^2F_y+y(yF_y+zF_z+tF_t))$$
and with $G_{*}(y,z,t):=G(1,\theta+y,z,t)$ for any homogeneous $G$.
In a neighbourhood of $(0,0,0)$, $V(F_*)$ is given by $t=\varphi(y,z)$ with $\varphi(y,z)\in\mathbb C[[y,z]]$ and
\begin{equation}\label{deriveephi}
\varphi_y(y,z)=-\frac{F_y(1,\theta+y,z,\varphi(y,z))}{F_t(1,\theta+y,z,\varphi(y,z))}\quad\mbox{and}\quad\varphi_z(y,z)=-\frac{F_z(1,\theta+y,z,\varphi(y,z))}{F_t(1,\theta+y,z,\varphi(y,z))}.
\end{equation}
So
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(A'_{2**},A'_{3**})},$$
with
$G_{**}(y,z):=G(1,\theta+y,z,\varphi(y,z))$. Now due to
\eqref{deriveephi}, we have
$$H:=A'_{2**}=(F_t)_{**} [-a\varphi\varphi_z+c\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)+d(z((\theta+y)\varphi_y+z\varphi_z-\varphi)+\varphi_z)]$$
and
$$K:=A'_{3**}=(F_t)_{**} [a\varphi\varphi_y-b\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)-d(\varphi_y+(\theta+y)((\theta+y)\varphi_y+z\varphi_z-\varphi))].$$
Hence
$$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=i_{(0,0)}(\mathcal{G}amma_H,\mathcal{G}amma_K),$$
where $\mathcal{G}amma_H$ and $\mathcal{G}amma_K$ are the analytic plane curves
of respective equations $H$ and $K$ of $\mathbb C[[y,z]]$.
Note that $(H_y(0,0),H_z(0:0))=d(\varphi_{yz}(0,0),\varphi_{zz}(0,0))$.
Analogously, we obtain $(K_y(0,0),K_z(0,0))=-d(1+\theta^2)(\varphi_{y,y}(0,0),\varphi_{yz}(0,0))$.
\begin{itemize}
\item[(a)] If $P\not\in\mathcal U_\infty$ and if
$P$ is an ordinary multiple point of order $k+1$ of $\mathcal S_\infty$, with our change of coordinates
we have $P[1:0:0:0]$ (i.e. $\theta=0$) and $V((\varphi_{k+1})_y)$
and $V((\varphi_{k+1})_z)$ have no common lines.\footnote{Recall that the tangent cone $V(\varphi_{k+1})$ of $\mathcal{G}amma_{\varphi}$ at $(0,0)$
(corresponding to the tangent cone of $V(\mathcal S_\infty)$ at $P$)
has pairwise distinct tangent lines if and only if
$V((\varphi_{k+1})_y)$
and $V((\varphi_{k+1})_z)$ have no common lines.
}
Then the first homogeneous parts of $H$ and $K$ have order $k$
and are $H_k=d(\varphi_{k+1})_z$ and $K_k=-d(\varphi_{k+1})_y$
respectively.
Since $\mathcal{G}amma_{H_k}$
and $\mathcal{G}amma_{K_k}$ have no common lines,
we conclude that $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k^2$.
\item[(b)]
Assume now that $P\in\mathcal U_\infty$
is an ordinary multiple point of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone
of $\mathcal S_\infty$.
With our changes of coordinates,
this means that $P[1:i:0:0]$ (i.e. $\theta=i$) that
$y$ does divide $\varphi_{k+1}$ (since $V(y)$ is the tangent line
to $\mathcal U_\infty$ at $P$) and that $V((\varphi_{k+1})_y)$
and $V((\varphi_{k+1})_z)$ have no common lines.
Note that the first homogeneous parts of $H$ and $K$ have respective orders $k$ and $k+1$ and are respectively
$H_k=d(\varphi_{k+1})_z$ and
\begin{eqnarray*}
K_{k+1}&=&-di[2y(\varphi_{k+1})_y+z(\varphi_{k+1})_z-\varphi_{k+1}]\\
&=&-di\left[\left(2-\frac 1{k+1}\right) y(\varphi_{k+1})_y+\left(1-\frac 1{k+1}\right)z(\varphi_{k+1})_z\right],
\end{eqnarray*}
due to the Euler identity applied to $\varphi_{k+1}$.
Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=k(k+1)$ if
$V((\varphi_{k+1})_z)$ and $V(y(\varphi_{k+1})_y)$ have no common lines, which is true since $V((\varphi_{k+1})_z)$ and $V((\varphi_{k+1})_y)$ have no common lines and since $y$ does not divide $(\varphi_{k+1})_z$.
\item[(c)] Assume that $P\in\mathcal U_\infty$ is an ordinary cusp (of order 2)
of $\mathcal S_\infty$, which belongs to $\mathcal U_\infty$ and at which the tangent line to $\mathcal U_\infty$ is not contain in the tangent cone
of $\mathcal S_\infty$. With our changes of coordinates,
this means that $P[1:i:0:0]$ (i.e. $\theta=i$), that
$H_F(P)=0$ and $F_{zz}(P)\ne 0$ (since $V(y)$ is the tangent line
to $\mathcal U_\infty$ at $P$).
Note that $H=-d(F_{yz}(P)y+F_{zz}(P)z)+...$. In a neighbourhood of $(0,0)$, $H(y,z)=0\ \Leftrightarrow\ z=h(y)$
with
$h(y)=-\frac{F_{yz}(P)}{F_{zz}(P)}y-\frac{ F_{zzz}(P)F^2_{yz}(P)
-2F_{yzz}(P)F_{yz}(P)F_{zz}(P)+F_{yyz}(P)F^2_{zz}(P)}{F_{zz}^3(P)
}y^2+...$
and we obtain that $\val_yK(y,h(y))=3$ if
$P\not\in V(F_{yyy}F_{zz}^3-3F_{yyz}F_{zz}^2F_{yz}
+3F_{yzz}F_{zz}F_{yz}^2-F_{zzz}^3F_{yz}^3)$ which means that
the line $V(F_{yz}(P)y+F_{zz}(P)z)$ (corresponding to the
tangent cone of $V(F(1,y,z,0))$) is not contained
in the cubic cone of $V(F(1,y,z,0))$. Hence
$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=3$ if the node $P$
of $\mathcal S_\infty$ is ordinary.
\item[(d)] Assume now that $P$ is an ordinary cusp (of order 2) of
$\mathcal S_\infty\setminus \mathcal U_\infty$.
With our change of coordinates, this means that $P[1:0:0:0]$
(i.e. $\theta=0$), that $H_F(P)=0$ and $P\not\in V(F_{yy},F_{yz},F_{zz})$. This implies that
$F_{yy}(P)\ne 0$ or $F_{zz}(P)\ne 0$.
If $F_{yy}(P)\ne 0$, the tangent line to $\mathcal S_\infty$
at $P$ is given by $V(t,F_{yy}(P)y+F_{yz}(P)z)$.
If $F_{zz}(P)\ne 0$, the tangent line to $\mathcal S_\infty$
at $P$ is given by $V(t,F_{zz}(P)z+F_{yz}(P)y)$.
The fact that the cusp is ordinary implies also that the tangent line
is not contained in the cubic cone of $V(F(1,y,z,0))$, i.e. this tangent line is not contained $V(F_{yyy}(P)y^3+3F_{yyz}(P)y^2z
+3F_{yzz}(P)yz^2+F_{zzz}(P)z^3](P)$.
Hence, we have either
$$F_{yy}[F_{yyy}F_{yz}^3-3F_{yyz}
F_{yz}^2F_{yy}+3F_{yzz}F_{yz}F_{yy}^2-F_{zzz}F_{yy}^3](P)\ne 0$$
or
$$F_{zz}[F_{zzz}F_{yz}^3-3F_{yzz}
F_{yz}^2F_{zz}+3F_{yyz}F_{yz}F_{zz}^2-F_{yyy}F_{zz}^3](P)\ne 0.$$
Note that, if $F_{yy}(P)$ and $F_{zz}(P)$ are both non null,
these two conditions are equivalent.
Assume for example that the first condition holds.
In a neighbourhood of $(0,0)$, $H(y,z)=0\,\Leftrightarrow\, y=h(z)$
and $K(y,z)=0\, \Leftrightarrow\, y=k(z)$, with
$$h'(z)=-\frac{H_z(h(z),z)}{H_y(h(z),z)}\quad\mbox{and}\quad
k'(z)=-\frac{K_z(h(z),z)}{K_y(h(z),z)}.$$
Hence we have
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(y-h(z),y-k(z))}
\cong \frac{\mathbb C[[z]]}{((h-k)(z))} .$$
We have $h'(0)=k'(0)=-\varphi_{yz}(0,0)/\varphi_{yy}(0,0)$ and
$(h''-k'')(0)=\left[\varphi_{yy}^3\varphi_{zy}
\left(\varphi_{yyy}\varphi_{yz}^3-3\varphi_{yyz}\varphi_{yz}^2
\varphi_{yy}+3\varphi_{yzz}\varphi_{yy}^2
\varphi_{yz}-\varphi_{zzz}\varphi_{yy}^3\right)\right](0,0)$
\end{itemize}
Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\dim_{\mathbb C}\frac{\mathbb C[[z]]}{((h-k)(z))}=2$.
\item \underline{Let $P$ be a simple (non singular) point of tangency of $\mathcal S_\infty$
with $\mathcal U_\infty$.}
Let us prove that
$i_P(\mathcal S,\mathcal P_{A,\mathcal S})=1$.
Due to Lemma \ref{preservpolar},
we can assume that $P=[1:i:0:0]$ (i.e. $\theta=i$) and that $F_t(P)= 1$.
As previously, we note that
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong \frac{\mathbb C[[y,z]]}{(A_{1**},A'_{3**})},$$
with
$$A_{1**}(y,z)=(F_t)_{**}[\varphi(b\varphi_z-c\varphi_y)-d((y+i)\varphi_z-z\varphi_y)]$$
and
$$A_{3**}=(F_t)_{**} [a\varphi\varphi_y-b\varphi(\varphi-(\theta+y)\varphi_y-z\varphi_z)-d(\varphi_y+(i+y)((i+y)\varphi_y+z\varphi_z-\varphi))].$$
The fact that $P$ is a simple contact point of $\mathcal S_\infty$
with $\mathcal U_\infty$ implies that $[F_x(P):F_y(P):F_z(P)]=[1:i:0]$ and that $\varphi_{zz}(0,0)\ne 1$.
Indeed $V(t,F(1,i+y,z,t))$
is given by $t=0,\, y=g(z)$ with $g(0)=0$ and $g'(z)=-\varphi_z(g(z),z)/\varphi_y(g(z),z)$ (in particular
$g'(0)=0$), so
$$\frac{\mathbb C[[y,z]]}{(y-g(z),1+(i+y)^2+z^2)}\cong
\frac{\mathbb C[[z]]}{(1+(i+g(z))^2+z^2)}
$$
and finally
$$
i_P(\mathcal S_\infty,\mathcal U_\infty)=
\val_z(1+(i+g(z))^2+z^2)=1+\val_z((i+g(z))g'(z)+z)
$$
which is equal to 2 if and only if $\varphi_{zz}(0,0)\ne 1$.
In a neighbourhood of $(i,0)$, $A_{1**}$ can be rewritten
$\varphi-\kappa$ with $\kappa=d((y+i)\varphi_z-z\varphi_y)/(b\varphi_z-c\varphi_y)$.
Since $ \varphi_{zz}(0)\ne 1$, $\kappa_z(0,0)\ne 0$
and, in a neighbourhood of $0$, $\varphi-\kappa=0$ corresponds to $y=h(z)$
with $h'(0)\ne 0$ (recall that $\varphi_y(0)=i\ne 0$ and that $A$ is generic) which gives
$$\left(\frac{\mathbb C[x,y,z,t]}{I}\right)_P
\cong
\frac{\mathbb C[[y,z]]}{(y-h(z),A'_{3**})}$$
and finally $i_P(\mathcal S ,\mathcal P_{A,\mathcal S})=\val_z
A'_{3**}(h(z),z)=1$.
\end{itemize}
\section{Examples in $\mathbb P^3$}
\subsection{Normal class of quadrics}\label{secquadric}
The aim of the present section is the study of the normal class
of every irreducible quadric. Let $\mathcal S=V(F)\subset\mathbb P^3$ be an irreducible quadric.
We recall that, up to a composition by
$\varphi\in \widehat{Sim_{\mathbb C}(3)}$, one can suppose that $F$
has the one of the following forms:
\begin{eqnarray*}
&(a)& F(x,y,z,t)=x^2+\alpha y^2+\beta z^2 +t^2\\
&(b)& F(x,y,z,t)=x^2+\alpha y^2+\beta z^2\\
&(c)& F(x,y,z,t)=x^2+\alpha y^2-2tz\\
&(d)& F(x,y,z,t)=x^2+\alpha y^2+t^2,
\end{eqnarray*}
with $\alpha,\beta$ two non zero complex numbers.
Spheres, ellipsoids and hyperboloids are particular cases of (a),
paraboloids (including the saddle surface) are particular cases of (c), (b) correspond to cones and (d) to cylinders.
We will see, in Appendix \ref{cylindreetrevolution}, that in the case (d)
(cylinders) and in the cases (a) and (b) with $\alpha=\beta=1$, the normal class of the quadric is naturally related to the normal class of a conic.
\begin{prop}\label{quadric}
The normal class of a sphere is 2.
The normal class of a quadric $V(F)$ with $F$ given by (a) is 6 if $1,\alpha,\beta$ are pairwise distinct.
The normal class of a quadric $V(F)$ with $F$ given by (a) is 4 if $\alpha=1\ne\beta$.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 4 if $1,\alpha,\beta$ are pairwise distinct.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 2 if $\alpha=1\ne\beta$.
The normal class of a quadric $V(F)$ with $F$ given by (b) is 0 if $\alpha=\beta=1$.
The normal class of a quadric $V(F)$ with $F$ given by (c) is 5 if $\alpha\ne 1$ and 3 if $\alpha=1$.
The normal class of a quadric $V(F)$ with $F$ given by (d) is 4 if
$\alpha\ne 1$ and 2 if $\alpha=1$.
\end{prop}
\begin{coro}
The normal class of the saddle surface $\mathcal S_1=V(xy-zt)$ is 5.
The normal class of the ellipsoid $\mathcal E_1=V(x^2+2y^2+4z^2-t^2)$ with three different length of axis is 6.
The normal class of the ellipsoid $\mathcal E_2=V(x^2+4y^2+4z^2-t^2)$ with two different length of axis is 4.
\end{coro}
\begin{proof}[Proof of Proposition \ref{quadric}]
Let $\mathcal S=V(F)$ be a quadric with $F$ of the form (a), (b), (c) or (d).
\begin{itemize}
\item The easiest cases is (a) with $1,\alpha,\beta$ pairwise distinct since $\mathcal B_{\mathcal S}$ is empty. In this case, since the generic
degree of the normal polar curves is 3 and since $\mathcal E_1$ has degree 2, we simply have $c_{\nu}(\mathcal E_1)=2\cdot 3=6$ (due to Theorem \ref{formulegeneralesurface}).
\item
The case of a sphere $\mathcal S$ is analogous. In this case, $\tilde {\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S=\emptyset$ and $\deg\tilde{\mathcal P}_{A,\mathcal S}=1$ for a generic $A\in\mathbb P^3$ (see Example \ref{exemple1}).
Hence, we have $c_{\nu}(\mathcal E_1)=2\cdot 1=2$ (due to Theorem
\ref{factorisable}).
\item In case (a) with $\alpha=1\ne\beta$, the set $\mathcal B_{\mathcal S}$ contains two points $[1:\pm i:0:0]$.
We find the parametrization $\psi(y)=[1:\pm i+y:0:0]$ of $\mathcal P_{A,\mathcal S}$ at the neighbourhood of $P[1:\pm i:0:0]$,
which gives $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\val_z(1+(\pm i+y)^2)=1$ and so $c_{\nu}(\mathcal S)=2\cdot 3-1-1=4$.
\item In case (b) with $\alpha$, $\beta$ and $1$ are pairwise distinct,
the set $\mathcal B_{\mathcal S}$ contains a single point $P[0:0:0:1]$
and a parametrization of ${\mathcal P}_{A,\mathcal S}$
in a neighbourhood of $P$ is
\begin{equation}\label{parametrisation1}
\psi(x)=\left[x:-\frac{bx}{d(\alpha-1)x-a}:\frac{cx}{a+d(1-\beta)x}:1\right].
\end{equation}
Hence $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=\val_x(F(\psi(x)))=2$
and so $c_\nu(\mathcal S)=2\cdot 3-2=4$.
\item In case (b) with $\alpha=1\ne\beta$, we have
$\mathcal B_{\mathcal S}=\{P,P'_+,P'_-\}$ with $P[0:0:0:1]$
and $P'_{\pm}[1:\pm i:0:0]$. A
parametrization of ${\mathcal P}_{A,\mathcal S}$
in a neighbourhood of $P$ is given by \eqref{parametrisation1} with
$\alpha=1$ and so $i_P(\mathcal S,\mathcal P_{A,\mathcal S})=2$.
A parametrization of ${\mathcal P}_{A,\mathcal S}$ at a neighbourhood of $P'_{\pm}$ is $\psi(z)=[1:\pm i+y:0:0]$ and so
$i_{P'_{\pm}}(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Hence
$c_\nu(\mathcal S)=2\cdot 3-2-1-1=2$.
\item In case (b) with $\alpha=\beta=1$, for a generic $A\in\mathbb P^3$, we have $\deg\tilde{\mathcal P}_{A,\mathcal S}=1$ (see Example \ref{exemple1}) but here $\tilde{\mathcal B}^{(0)}_{\mathcal S}\cap\mathcal S=\{[0:0:0:1]\}$. We find
the parametrization $\psi(x)=[x:(bx/a):(cx/a):1]$ of $\tilde{\mathcal P}_{A,\mathcal S}$ at the neighbourhood of $P[0:0:0:1]$. Hence $i_P(\mathcal S,\tilde{\mathcal P}_{A,\mathcal S})=2$ and so $c_{\nu}(\mathcal S)=2\cdot 1-2=0$.
\item In case (c) with $\alpha\ne 1$, the only point of $\mathcal B_{\mathcal S}$
is $P_1[0:0:1:0]$ and a parametrization of ${\mathcal P}_{A,\mathcal S}$
at the neighbourhood of this point is
\begin{equation}\label{parametrisation2}
\psi(t)=\left[\frac{at^2}{d+(d-c)t}:\frac{bt^2}{t(d-c\alpha)+\alpha d}:1:t\right],
\end{equation}
which gives $i_{P_1}(\mathcal S,\mathcal P_{A,\mathcal S})=1$. Hence
$c_{\nu}(\mathcal S)=2\times 3-1=5$.
\item In case (c) with $\alpha=1$, $\mathcal B_{\mathcal S}$ is made of
three points: $P_1[0:0:1:0]$, $P_{2,\pm}[1:\pm i:0:0]$.
As in the previous case, a parametrization of ${\mathcal P}_{A,\mathcal S}$
at the neighbourhood of $P_1$ is given by \eqref{parametrisation2}
with $\alpha=1$
and so $i_{P_1}(\mathcal S,\mathcal P_{A,\mathcal S})=1$.
Now, a parametrization of ${\mathcal P}_{A,\mathcal S}$
at the neighbourhood of $P_{2,\pm}$ is $\psi(t)=[1:\pm i+y:0:0]$
and so $i_{P_{2,\pm}}=(\mathcal S,\mathcal P_{A,\mathcal S})=\val_y
(1+(y\pm i)^2)=1$.
\item For the case (d), due to Proposition \ref{propcylinder},
$c_\nu(\mathcal S)=c_\nu(\mathcal C)$ with $\mathcal C=V(x^2+\alpha y^2+z^2)\subset\mathbb P^2$ which is a circle if $\alpha=1$ and an ellipse
otherwise. Hence, due to Theorem \ref{thmcurves}, $c_\nu(\mathcal C)=2+2-0-1-1=2$ if $\alpha=1$ and $c_\nu(\mathcal C)=2+2=4$ otherwise.
\end{itemize}
\end{proof}
\subsection{Normal class of a cubic surface with singularity $E_6$}\label{cubic}
Consider $S=V(F)\subset\mathbb{P}^{3}$ with $F(x,y,z,t):=x^{2}z+z^{2}t+y^{3}$.
$\mathcal{S}$ is a singular cubic surface with $E_{6}-$singularity at
$p[0:0:0:1]$. Let a generic $A[a:b:c:d]\in\mathbb P^3$.
The ideal of the normal polar $\mathcal{P}_{\mathcal{S},A}$ is given by
$I(\mathcal{P}_{\mathcal{S},A})=\langle H_1,H_2,H_3\rangle\subset\mathbb{C}[x,y,z,t]$ with
$H_1:=(y(x^{2}+2zt)-3y^{2}z)d-b(x^{2}+2zt)t+3y^{2}ct$,
$H_2:=(x(x^{2}+2zt)-2xz^{2})d-a(x^{2}+2zt)t+2xztc$ and $H_3:=(-2xzy+3xy^{2}
)d-3ay^{2}t+2xztb$. $\mathcal B_{\mathcal S}$ is made of
two points: $p$ and $q[0:0:1:0]$. Actually $q$ is the
point of tangency of $\mathcal{S}$ with $\mathcal{H}_{\infty}$. This point is an ordinary cusp of $\mathcal{S}_{\infty}$.
\begin{enumerate}
\item Study at $p$.
Near p the ideal of the normal polar, in the chart $t=1$, $H_3=0$
gives $z=g(x,y):=\frac{3y^{2}(-xd+a)}{2x(-yd+b)}$. Now $V(A_1(x,y,g(x,y),1))$
corresponds to a quintic with a cusp at the
origine (and with tangent cone $V(y^{2}))$. Its single branch has Puiseux expansion
$y^2=-\frac b{3a}x^3+o(x^3)$, with probranches $y=\varphi_{\varepsilon}(x)$
with $\varphi_\varepsilon(x)=i\varepsilon\sqrt{\frac b{3a}}x^{\frac 32}+o(x^{\frac 32})$
for $\varepsilon\in\{\pm 1\}$. Hence, $g(x,\varphi_\varepsilon(x))=-\frac {x^2}2+o(x^2)$.
Hence parametrizations of the probranches of $\mathcal P_{A,\mathcal S}$ at a neighbourhood
of $p$ are
$$\mathcal{G}amma_{\varepsilon}(x)=[x:\varphi_\varepsilon(x):g(x,\varphi_\varepsilon(x)):1]$$
and $F(\mathcal{G}amma_{\varepsilon}(x))=-\frac {x^4}4+o(x^{4})$.
Therefore $i_{p}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=8$.
\item Study at $q.$
Assume that $b=1$. Near $q[0:0:1:0]$, in the chart $z=1$, $H_3=0$ gives
$t=h(x,y):=\frac{d(-2+3y)xy}{3ay^{2}-2x}$ and
$V(H_2(x,y,1,h(x,y)))$ is a quartic with a (tacnode) double point in $(0,0)$ with
vertical tangent and which has Puiseux expansion
$$x=\theta_{\varepsilon}(y)=\omega_{\varepsilon}a\, y^{2}+o(y^{2}),$$
with $\omega_{\varepsilon}=\frac{3-d}{2}+\frac{\varepsilon}{2}\sqrt{d(d-6)}$
for $\varepsilon\in\{\pm 1\}$ and
$h(\theta_\varepsilon(y),y)=-\frac{2d\omega_\varepsilon}
{3-2\omega_\varepsilon}y+o(y)$.
Hence parametrizations of the probranches of $\mathcal{P}_{\mathcal{S},A}$ in a
neighbourhood of $q$ are given by
$$\mathcal{G}amma_{\varepsilon}(y):=[\theta_{\varepsilon}(y):y:1:h(\theta_\varepsilon(y),y)]$$
for $\varepsilon\in\{\pm 1\}$ and
$F(\mathcal{G}amma_{\varepsilon}(y))=-\frac{2\omega_\varepsilon d}{3-2\omega_\varepsilon}y+o(y)$.
Hence $i_{q}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=2$
We can also apply directly Item (c) of Section \ref{sec:proofthm1}
to prove that $i_{q}(\mathcal{P}_{\mathcal{S},A},\mathcal S)=2$.
\end{enumerate}
Therefore, due to Theorem \ref{formulegeneralesurface}, the normal class of
$\mathcal S=V(x^{2}z+z^{2}t+y^{3})\subset \mathbb P^3(\mathbb C)$
is
$$c_{\nu}(\mathcal{S})=3\cdot(3^{2}-3+1)-8-2=11.$$
\section{Normal class of plane curves~: Proof of Theorem \ref{thmcurves}}\label{proofcurve}
Let $\mathbf V$ be a three dimensional complex vector space and set $\mathbb P^2:=\mathbb P(\mathbf V)$ with projective coordinates
$x,y,z$. We denote by $\ell_\infty=V(z)$ the line at infinity.
Let $\mathcal C=V(F)\subset \mathbb P^2$
be an irreducible curve of degree $d\ge 2$.
For any nonsingular $m[x:y:z]\in\mathcal C$ (with coordinates $\mathbf m=(x,y,z)\in\mathbb C^3$), we write $\mathcal T_m\mathcal C$
for the tangent line to $\mathcal C$ at $m$.
If $\mathcal T_m\mathcal C\ne\ell_\infty$, then $n_{\mathcal C}(m)=[F_x:F_y:0]$
is well defined in $\mathbb P^2$ and
{\bf the projective normal line} $\mathcal N_m\mathcal C$ to $\mathcal C$ at $m$ is the line
$(m\, n_{\mathcal C}(m))$ if $n_{\mathcal C}(m)\ne m$.
An equation of this normal line is then given by $\langle \mathbf {N}_\mathcal C({m}),\cdot\rangle$ where $N_\mathcal C:\mathbb P^2\dashrightarrow\mathbb P^2$ is the rational map defined by
\begin{equation}\label{NC}
\mathbf{N}_\mathcal C(\mathbf{m}):=\mathbf{m}\wedge\left(\begin{array}{c}F_x\\mathcal{F}_y\\0\end{array}\right)
=\left(\begin{array}{c}-zF_y(m)\\ zF_x(m)\\ xF_y(m)-yF_x(m)\end{array}\right).
\end{equation}
\begin{lem}
The base points of $(N_\mathcal C)_{|\mathcal C}$ are the singular points of $\mathcal C$,
the points of tangency with the line at infinity and the points of $\{I,J\}\cap\mathcal C$.
\end{lem}
\begin{proof}
A point $m\in\mathcal C$ is a base point of $N_\mathcal C$ if and only if $F_x=F_y=0$ or $z=xF_y-yF_x=0$.
Hence, singular points of $\mathcal C$ are base points of $N_{\mathcal C}$.
Let $m=[x:y:z]$ be a nonsingular point of $\mathcal C$.
First $F_x=F_y=0$ is equivalent to $\mathcal T_m\mathcal C=\ell_\infty$.
Assume now that $z=xF_y-yF_x=0$ and $(F_x,F_y)\ne(0,0)$. Then $m=[x:y:0]=[F_x:F_y:0]$ and,
due to the Euler formula, we have $0=-zF_z=xF_x+yF_y$ and so $x^2+y^2=0$, which implies $m=I$ or $m=J$.
Finally note that if $m\in\{I,J\}\cap\mathcal C$, then $m=[-y:x:0]$ and,
due to the Euler formula, $0=-zF_z=xF_x+yF_y=xF_y-yF_x$.
\end{proof}
Since the degree of each non zero coordinate of $\mathbf
\mathbf N_\mathcal C$ is $d$, we have
\begin{equation}\label{lemmefondamental}
c_\nu(\mathcal C)
=d^2-\sum_{P\in \mathcal \base\left({(N_\mathcal C)}_{\vert\mathcal C}\right)} i_P(\mathcal C,V(\langle L,\mathbf N_{\mathcal C}(\cdot)\rangle)),
\end{equation}
for a generic $L\in\mathbb P^2$, where we write
$\base\left({(N_\mathcal C)}_{\vert\mathcal C}\right)$
for the set of base points of
${(N_\mathcal C)}_{\vert\mathcal C}$.
The set $V(\langle L,\mathbf N_{\mathcal C}(\cdot)\rangle)\subset \mathbb P^2$ is called
{\bf the normal polar} of $\mathcal C$ with respect to $L$. It satisfies
$$m \in V(\langle L, \mathbf N_{\mathcal C}(\cdot)\rangle)\quad \Leftrightarrow\quad \mathbf N_{\mathcal C}(\mathbf m)=0\ \mbox{or}\ L\in
\mathcal N_m(\mathcal C).$$
Now, to compute the generic intersection numbers, we use the notion of probranches \cite{Halphen,Wall,Wall2}.
See section 4 of
\cite{fredsoaz1} for details.
Let $P\in\mathcal C$ be an indeterminancy point of $N_{\mathcal C}$ and let us write $\mu_P$ for the multiplicity of $\mathcal C$ at $P$.
Recall that $\mu_P=1$ means that $P$ is a nonsingular point of $\mathcal C$.
Let $M\in GL(\mathbf V)$ be such that $M(\mathbf O)=\mathbf P$ with $\mathbf O=(0,0,1)$ (we set also $O=[0:0:1]$)
and such that $V(x)$ is not contained in the tangent cone of $V(F\circ M)$ at $O$.
Recall that the equation of this tangent cone is the homogeneous part of lowest degree in $(x,y)$ of $F(x,y,1)\in\mathbb C[x,y]$ and
that this lowest degree is $\mu_P$.
Using the combination of the Weierstrass preparation theorem and of the Puiseux expansions,
$$F\circ M(x,y,1)=U(x,y)\prod_{j=1}^{\mu_P}
(y-g_j(x)),$$
for some $U(x,y)$ in the ring of convergent series in $x,y$ with $U(0,0)\ne 0$ and where
$g_j(x)=\sum_{m\ge 1}a_{j,m}x^{\frac m{q_j}}$ for some integer $q_j\ne 0$.
The $y=g_j(x)$ correspond to the equations of the probranches of $\mathcal C$ at $P$.
Since $V(x)$ is not contained in the tangent cone of $V(F\circ M)$ at $O$, the valuation in $x$
of $g_j$ is strictly larger than or equal to 1 and so the probranch $y=g_j(x)$ is tangent to $V(y-xg_j'(0))$.
We write $\mathcal T_P^{(i)}:=M(V(y-xg_j'(0)))$ the associated (eventually singular) tangent line to $\mathcal C$ at $P$
($\mathcal T_P^{(i)}$ is the tangent to the branch of $\mathcal C$ at $P$ corresponding to this probranch)
and we denote by $i_P^{(j)}$ the tangential intersection number of this probranch:
$$i_P^{(j)}=\val_x (g_j(x)-xg_j'(0))=\val_x (g_j(x)-xg_j'(x)).$$
We recall that for any homogeneous polynomial $H\in\mathbb C[x,y,z]$, we have
\begin{eqnarray*}
i_P(\mathcal C,V(H))&=& i_{O}(V(F\circ M),V(H\circ M))\\
&=&\sum_{j=1}^{\mu_P} \val_x(H(M(G_j(x)))),
\end{eqnarray*}
where $G_j(x):=(x,g_j(x),1)$.
With these notations and results, we have
$$\mathcal{O}mega(\mathcal C,\ell_\infty)=\sum_{P\in\mathcal C\cap\ell_\infty} (i_P(\mathcal C,\ell_\infty)-\mu_P(\mathcal C))
= \sum_{P\in\mathcal C\cap\ell_\infty} \sum_{j:\mathcal T_P^{(j)}=\ell_\infty} (i_P^{(j)}-1).$$
For a generic $L\in\mathbf V^\vee$, we also have
\begin{eqnarray*}
i_P(\mathcal C,V(L\circ N_{\mathcal C}))
&=&\sum_{j=1}^{\mu_P} \val_x(L(N_{\mathcal C}(M(G_j(x)))))\\
&=&\sum_{j=1}^{\mu_P} \min_k \val_x([N_{\mathcal C}\circ M]_k(G_j(x))),
\end{eqnarray*}
where $[\cdot ]_k$ denotes the $k$-th coordinate.
Moreover, due to \eqref{NC}, as seen in Proposition 16 of \cite{fredsoaz2}, we have
$$\mathbf{N}_{\mathcal C}\circ M(\mathbf m)= Com(M)\cdot (\mathbf{m}\wedge\left[\mathcal{D}elta_{\mathbf{A}}
G(\mathbf m)
\cdot \mathbf{A}+\mathcal{D}elta_{\mathbf{B}} G(\mathbf m)\cdot \mathbf{B}\right]),$$
where $G:=F\circ M$, $\mathbf{A}:=M^{-1}(1,0,0)$, $\mathbf{B}:=M^{-1}(0,1,0)$ and $\mathcal{D}elta_{(x_1,y_1,z_1)}H=x_1H_x+y_1H_y+z_1H_z$.
As seen in Lemma 33 of \cite{fredsoaz1}, we have
$$\mathcal{D}elta_{(x_1,y_1,z_1)} G(x,g_j(x),1)=R_j(x)W_{(x_1,y_1,z_1),j}(x),$$
where
$R_j(x)=U(x,g_j(x))\prod_{j'\ne j}(g_{j'}(x)-g_j(x))$ and $W_{(x_1,y_1,z_1),j}(x):=y_1-x_1g'_j(x)+z_1(xg'_j(x)-g_j(x))$.
Therefore, for a generic $L\in\mathbf{V}^\vee$, we have
$$
i_P(\mathcal C,V(L\circ N_{\mathcal C}))=
V_P+\sum_{j=1}^{\mu_P} \min_k \val_x([G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A
+W_{\mathbf{B},j}(x)\cdot \mathbf B)]_k)
$$
where $V_P:=\sum_{j=1}^{\mu_P}\sum_{j'\ne j}\val(g_{j'}-g_j)$.
Now, we write $h_P^{(j)}:=\min_k \val_x([G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A
+W_{\mathbf{B},j}(x)\cdot \mathbf B)]_k)$ and $h_P:=\sum_{j=1}^{\mu_P}h_P^{(j)}$.
Note that $V(P)=0$ if $P$ is a nonsingular point of $\mathcal C$.
We recall that, due to Corollary 31 of \cite{fredsoaz1}, we have
$$\sum_{P\in \mathcal C\cap \base(N_{\mathcal C})} V_P= d(d-1)-d^\vee$$
and so, due to \eqref{lemmefondamental}, we obtain
\begin{equation}\label{lemme fondamental2}
c_\nu(\mathcal C)
=d+d^\vee -\sum_{P\in \mathcal C\cap\base(N_\mathcal C)} h_P.
\end{equation}
Now we have to compute the contribution $h_P^{(j)}$ of each probranch of each
$P\in\mathcal C\cap \base(N_{\mathcal C})$. We have seen, in Proposition 29 of \cite{fredsoaz1},
that we can adapt our choice of $M$ to each probranch (or, to be more precise, to each branch corresponding to the probranch).
This fact will be useful in the sequel. In particular, for each probranch, we take $M$ such that $g_j'(0)=0$ so
$G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$ can be rewritten:
\begin{equation}\label{probranche1}
\left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}x_Ay_A-(x_A^2+x_B^2)g'_j(x)+x_By_B+(z_Ax_A+z_Bx_B)(xg_j'(x)-g_j(x))\\
y_A^2+y_B^2-(x_Ay_A+x_By_B)g'_j(x)+(z_Ay_A+z_By_B)(xg_j'(x)-g_j(x))\\
y_Az_A+y_Bz_B-(x_Az_A+x_Bz_B)g'_j(x)+(z_A^2+z_B^2)(xg_j'(x)-g_j(x))\end{array}\right) .
\end{equation}
\begin{itemize}
\item Assume first that $P$ is a point of $\mathcal C$ outside $\ell_\infty$. Then for $M$
as above and such that $z_A=z_B=0$, we have
$$ G_j(0)\wedge (W_{\mathbf{A},j}(0)\cdot \mathbf A
+W_{\mathbf{B},j}(0)\cdot \mathbf B)=\left(\begin{array}{c}
-y_A^2-y_B^2\\x_Ay_A+x_By_B\\0\end{array}\right)$$
which is non null since $(y_A,y_B)\ne(0,0)$ and since $\mathbf A$ and $\mathbf B$ are linearly independent.
So $h_P^{(j)}=0$.
\item Assume now that $P\in\mathcal C\cap \ell_\infty\setminus\{I,J\}$ and $\mathcal T_P^{(j)}\ne\ell_\infty$. Then $y_A+iy_B\ne 0$
and $y_A-iy_B\ne 0$ (since $I,J\not\in \mathcal T_P^{(j)}$) and so $y_A^2+y_B^2\ne 0$ which together with
\eqref{probranche1} implies that $h_P^{(j)}=0$ as in the previous case.
\item Assume that $P\in\mathcal C\cap \ell_\infty\setminus\{I,J\}$ and $\mathcal T_P^{(i)}=\ell_\infty$.
Assume that $M(1,0,0)=(1,i,0)$. Hence $\mathbf A+i\mathbf B=(1,0,0)$. Then $y_A=y_B=0$, $x_A+ix_B=1$, $z_A+iz_B=0$. So $z_A^2+z_B^2=0$
and $z_Ax_A+z_Bx_B=z_A\ne 0$ (since $z_B=iz_A$ and $x_B=i(x_A-1)$).
Note that $P\ne J$ implies also that $x_A-ix_B\ne 0$. So that $x_A^2+x_B^2\ne 0$.
Hence, due to \refeq{probranche1}, $G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$
is equal to
$$
\left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}(x_A^2+x_B^2)g'_j(x)+z_A(xg_j'(x)-g_j(x))\\
0\\
z_Ag_j'(x)\end{array}\right) .
$$
Therefore we have $h_P^{(j)}=\val_x((x_A^2+x_B^2)g'_j(x))=i_P^{(j)}-1$.
\item Assume that $P=I$ and that $\mathcal T_P^{(j)}=\ell_\infty$. Take $M$ such that $M(\mathbf O)=(1,i,0)$, $\mathbf B=(1,0,0)$ and so $\mathbf A=(-i,0,1)$. Due to \refeq{probranche1},
$G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$
is equal to
\begin{equation}
\left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}-i(xg_j'(x)-g_j(x))\\
0\\
ig_j'(x)+(xg_j'(x)-g_j(x))\end{array}\right) .
\end{equation}
Note that each coordinate has valuation at least equal to $i_P^{(j)}=\val g_j$ and that the term of
degree $i_P^{(j)}$ of the second coordinate is the term of degree $i_P^{(j)}$ of
$$-i(xg'_j(x)-g_j(x))+xig'_j(x)=ig_j(x)\ne 0$$
which is non null. Therefore $h_P^{(j)}=i_P^{(j)}$.
\item Assume finally that $P=I$ and that $\mathcal T_P^{(j)}\ne\ell_\infty$. Take $M$ such that $M(\mathbf O)=(1,i,0)$,
$\mathbf B=(0,1,0)$ and so $\mathbf A=(0,-i,1)$.
Due to \refeq{probranche1},
$G_j(x)\wedge (W_{\mathbf{A},j}(x)\cdot \mathbf A +W_{\mathbf{B},j}(x)\cdot \mathbf B)$
is equal to
\begin{equation}
\left(\begin{array}{c}x\\ g_j(x)\\1\end{array}\right)\wedge\left(\begin{array}{c}0\\-i(xg_j'(x)-g_j(x))\\
-i+(xg_j'(x)-g_j(x))\end{array}\right) .
\end{equation}
Note that each coordinate has valuation at least equal to $1$ and that the term of
degree $1$ of the second coordinate is $ix\ne 0$. Hence $i_P^{(j)}=1$.
\end{itemize}
Note that the case $P=J$ can be treated in the same way than the case $P=I$.
Theorem \ref{thmcurves} follows from \eqref{lemme fondamental2} and from the previous
computation of $h_P$.
\begin{appendix}
\section{Dimension decrease}\label{cylindreetrevolution}
Here, we consider two particular cases of hypersurfaces the normal class of which is equal to the normal class of a hypersurface of lower dimension: cylinders
(i.e. cones at a point at infinity) and revolution hypersurfaces
(circles fibers).
Let $n\ge 3$.
Let $\tilde F\in\mathbb C[u_1,...,u_n]$ be homogeneous.
We call {\bf cylinder of base $\tilde{\mathcal Z}=V(\tilde F)\subset\mathbb P^n$
and of axis $V(x_2,...,x_n)\subset\mathbb P^n$} the hypersurface $V(F)\subset\mathbb P^n$, with
$F(x_1,\dots,x_{n+1}):=\tilde F(x_2,\dots,x_{n+1})$.
\begin{prop}\label{propcylinder}
Let $n\ge 3$ and $d\ge 2$.
Let $\mathcal Z=V(F)\subset\mathbb P^n$ be the cylinder
of axis $V(x_2,...,x_n)\subset\mathbb P^n$
and of base $\tilde{\mathcal Z}=V(\tilde F)\subset\mathbb P^{n-1}$.
Then $c_{\nu}(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$.
\end{prop}
\begin{proof}
Note that $\mathcal Z\cap V(x_2,...,x_{n+1})\subset\sing(\mathcal Z)\subset\mathcal B_{\mathcal Z}$.
Let $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z\setminus V(x_2,...,x_{n+1})$
and $P[x_1^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^n\setminus V(x_2,...,x_{n+1})$.
Set $\tilde m[x_2^{(1)}:\cdots:x_{n+1}^{(1)}]\in\tilde{\mathcal Z}$
and $\tilde P[x_2^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^{n-1}$.
Note that
$n_{\mathcal Z}(m)[0:\tilde F_{u_1}(\tilde{\mathbf m}):\cdots:\tilde F_{u_{n-1}}(\tilde{\mathbf m}):0]\in\mathbb P^{n}$.
\begin{itemize}
\item Let $\mathcal H=V(\alpha x_1+\beta x_{n+1})\subset \mathbb P^n$ be a hyperplane orthogonal to $V(x_2,...,x_n)$ such that $\mathcal H\ne\mathcal H^\infty$ (i.e. $\alpha\ne 0$). Assume $m\in\mathcal H$.
Then $m\in\mathcal B_{\mathcal Z}\, \Leftrightarrow\, \tilde m\in\mathcal B_{\tilde{\mathcal Z}}$.
If $m\in\mathcal H\cap\mathcal Z\setminus\mathcal B_{\mathcal Z}$, then
$\mathcal N_m(\mathcal Z)\subset \mathcal H$.
\item Assume $P\in \mathbb P^n\setminus V(x_1,x_{n+1})$. Then
$\mathcal H:=V(x_1^{(0)}x_{n+1}-x_{n+1}^{(0)}x_{1})$ is the unique
hyperplane orthogonal to $V(x_2,...,x_n)$ containing $P$ and
$$P\in \mathcal N_m(\mathcal Z),\ m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}\quad\Leftrightarrow\quad m\in\mathcal H,\ \tilde m\in\tilde{\mathcal Z}\setminus
\mathcal B_{\tilde{\mathcal Z}},\ \tilde P\in\mathcal N_{\tilde m}(\tilde{\mathcal Z}).$$
\end{itemize}
Hence $c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$
\end{proof}
Let $\tilde F\in\mathbb C[u_1,...,u_n]$ be a homogeneous polynomial
of the form $\tilde F(u_1,...,u_n)=G(u_1^2,...,u_n)$ for some $G\in\mathbb C[u_1,...,u_n]$.
Let $\tilde{\mathcal Z}:=V(\tilde F)\subset\mathbb P^{n-1}$.
We call {\bf algebraic hypersurface of revolution of $\tilde{\mathcal Z}$ around
the subspace $V(x_1,x_2)$} the hypersurface $\mathcal Z=V(F)\subset\mathbb P^n$ with
$F(x_1,....,x_{n+1}):=G(x_1^2+x_2^2,x_3,...,x_{n+1})$.
Note that if $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z\setminus\mathcal H^\infty$ with $x_{n+1}^{(1)}=1$,
then the "circle" $V(x_1^2+x_2^2-(x_1^{(1)})^2-(x_2^{(1)})^2)\cap\bigcap_{i=3}^{n}V(x_i-x_i^{(1)}x_{n+1})$ of center $[0:0:x_3^{(1)}:\cdots:x_{n+1}^{(1)}]$
that passes through $m$ is contained in $\mathcal Z$.
\begin{prop}
Let $n\ge 3$ and $d\ge 2$.
Let $\mathcal Z=V(F)\subset\mathbb P^n$ be the algebraic hypersurface of revolution of $\tilde{\mathcal Z}=V(\tilde F)\subset \mathbb P^{n-1}$ (with $\tilde F\in\mathbb C[u_1,...,u_n]$ as above) around the subspace $V(x_1,x_2)$, then
$c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$.
\end{prop}
\begin{proof}
Let $m[x_1^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathcal Z$
and $P[x_1^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^n$.
Then
$$n_{\mathcal Z}(m)[2x_1^{(1)}G_{u_1}({\mathbf m}_1):2x_2^{(1)}G_{u_1}({\mathbf m}_1):
G_{u_2}({\mathbf m}_1):\cdots:G_{u_{n-1}}(\tilde{\mathbf m}_1):0]\in\mathbb P^n,$$
with ${\mathbf m}_1((x_1^{(1)})^2+(x_2^{(1)})^2,x_3^{(1)},...,x_{n+1}^{(1)})\in\mathbb C^n$.
Hence if $m\in\mathcal Z\cap V(x_1^2+x_2^2)\setminus\mathcal B_{\mathcal Z}$,
then $\mathcal N_m\mathcal Z\subset V(x_1^2+x_2^2)$.
Assume from now on that $m\in\mathcal Z\setminus V(x_1^2+x_2^2)$
and that $P\in\mathbb P^n\setminus (V(x_1^2+x_2^2)\cup V(x_1))$.
Let $\tilde m[y_1^{(1)}:x_3^{(1)}:\cdots:x_{n+1}^{(1)}]\in\mathbb P^{n-1}$
and $\tilde P[y_1^{(0)}:x_3^{(0)}:\cdots:x_{n+1}^{(0)}]\in\mathbb P^{n-1}$
with $(y_1^{(i)})^2=(x_1^{(i)})^2+(x_2^{(i)})^2$. Note that $\tilde m\in\tilde{\mathcal Z}$.
Then
$$n_{\mathcal Z}(m)[x_1^{(1)}\tilde F_{u_1}(\tilde{\mathbf m})/y_1^{(1)}:x_2^{(1)}\tilde F_{u_1}(\tilde{\mathbf m})/y_1^{(1)}:
\tilde F_{u_2}(\tilde{\mathbf m}):\cdots:\tilde F_{u_{n-1}}(\tilde{\mathbf m}):0]\in\mathbb P^n.$$
\begin{itemize}
\item Note that $m\in\mathcal B_{\mathcal Z}\ \Leftrightarrow\ \tilde m\in\mathcal B_{\tilde{\mathcal Z}}$ (since $x_1^{(1)}$ and $x_1^{(1)}$ are not both null).
\item Let $\mathcal H=V(\alpha x_1+\beta x_{2})\subset \mathbb P^n$ be a hyperplane that contains $V(x_1,x_2)$ but not contained in $V(x_1^2+x_2^2)$
(i.e. $\alpha^2+\beta^2\ne 0$).
If $m\in\mathcal H\cap\mathcal Z\setminus\mathcal B_{\mathcal Z}$, then
$\mathcal N_m\mathcal Z\subset \mathcal H$.
\item Let $\mathcal H:=V(x_1^{(0)}x_{2}-x_{2}^{(0)}x_{1})$ be the unique
hyperplane that contains $V(x_1,x_2)$ and $P$. Then
$$P\in \mathcal N_m(\mathcal Z),\ m\in\mathcal Z\setminus\mathcal B_{\mathcal Z}\quad\Leftrightarrow\quad m\in\mathcal H,\ \tilde m\in\tilde{\mathcal Z}\setminus
\mathcal B_{\tilde{\mathcal Z}},\ \tilde P\in\mathcal N_{\tilde m}(\tilde{\mathcal Z}),$$
by choosing $y_1^{(1)}:=y_1^{(0)}x_1^{(1)}/x_1^{(0)}$.
\end{itemize}
Hence $c_\nu(\mathcal Z)=c_{\nu}(\tilde{\mathcal Z})$
\end{proof}
\section{Projective orthogonality in $\mathbb P^n$}\label{NORMAL}
\subsection{From affine orthogonality to projective orthogonality}
Let $E_n$ be an euclidean affine $n$-space of direction the $n$-vector space $\mathbf E_n$
(endowed with some fix basis). Let $\mathbf V:=(\mathbf E_n\oplus \mathbb R)\otimes \mathbb C$
(endowed with the induced basis $\mathbf{e}_1,...,\mathbf{e}_{n+1}$). We consider the complex projective space
$\mathbb P^n:=\mathbb P(\mathbf V)$ with projective coordinates $x_1,...,x_{n+1}$.
Let us write $\pi:\mathbf V\setminus\{0\}\rightarrow\mathbb P^3$ for the canonical projection.
We denote by $\mathcal H^\infty:=V(x_{n+1})\subset \mathbb P^n$ the hyperplane at infinity.
We consider the affine space ${A}^{n}:=\mathbb{P}^{n}\setminus \mathcal{H}^{\infty }$ endowed
with the vector space $\overrightarrow{\mathbf E}:=Span(\mathbf e_1,\cdots,\mathbf e_n)
\subset \mathbf V$ (with the affine structure
$m+\overrightarrow{\mathbf v}=\pi(\mathbf m+\overrightarrow{\mathbf v})$ if $\overrightarrow{\mathbf v}\in \overrightarrow{\mathbf E}$
and $m=\pi(\mathbf m)\in A^n$ with
$\mathbf m(x_1,\cdots,x_n,1)$).
Let us consider $\mathcal{W}_1= \mathbb{P}(\mathbf W_1)
\subset\mathbb P^n$
and $\mathcal{W}_2 =\mathbb{P}(\mathbf W_2)\subset\mathbb P^n$ where
$\mathbf W_1$ and $\mathbf W_2$ are two vector subspaces of $\mathbf V$
not contained in $\overrightarrow{\mathbf E}$
such that $\dim \mathbf W_1+\dim\mathbf W_2=n+2$.
Since $\mathcal W_i$ is not contained in $\mathcal H^\infty$,
$ W_i:=\mathcal{W}_i\setminus\mathcal H^\infty$ is
an affine subspace of $A^n$ with vector space
$\overrightarrow{\mathbf{W}_i}:=\mathbf W_i \cap\overrightarrow{\mathbf E}$,
that is to say that
there exists $m_i$ such that $W_i=m_i+\overrightarrow{\mathbf{W}_i}$ in $ A^n$.
Consider the usual bilinear symmetric form $\langle u,v\rangle
=\sum_{i=0}^3u_{i}v_{i}$ on $\mathbf V$, the associated orthogonality on $\mathbf V$ is
written $\boldsymbol{\perp} $.
\begin{defi}
Let us consider $\mathcal{W}_1= \mathbb{P}(\mathbf W_1)
\subset\mathbb P^n$
and $\mathcal{W}_2 =\mathbb{P}(\mathbf W_2)\subset\mathbb P^n$ where
$\mathbf W_1$ and $\mathbf W_2$ are two vector subspaces of $\mathbf V$
not contained in $\overrightarrow{\mathbf E}$ and
such that $\dim \mathbf W_1+\dim\mathbf W_2=n+2$.
With the above notations, we say that $\mathcal W_1$ and $\mathcal W_2$ are orthogonal in $\mathbb{P}^{3}$ if $\overrightarrow {\mathbf W}_1\perp \overrightarrow{\mathbf{W}}_2$.
We then write ${\mathcal W_1\boldsymbol{\perp }\mathcal W_2}$.
\end{defi}
Note that if $\mathcal H\subset \mathbb P^n$ and $\mathcal L
\subset\mathbb P^n$ are respectively an hyperplane and a line in
$\mathbb P^n$ not contained in $\mathcal H^\infty$, then
$\mathcal H\perp\mathcal L$ if and only if the point at infinity of $\mathcal L$
is the pole in $\mathcal H^\infty$ of the line $\mathcal H\cap\mathcal H^\infty\subset\mathcal H^\infty$ with respect to the {\bf umbilical}
$\mathcal U_\infty:=V(x_1^2+...+x_n^2)\cap\mathcal H^\infty\subset\mathcal H^\infty$.
This leads us to the following generalization of normal lines to an hyperplane.
\begin{defi}
We say that a projective hyperplane $\mathcal H=V(a_1x_1+\cdots+a_{n+1}x_{n+1})\subset\mathbb P^n$ and a projective line $\mathcal L=\mathbb P(\mathbf L)\subset\mathbb P^n$ are orthogonal in $\mathbb P^n$ if
$(a_1,\cdots,a_n,0)\in\mathbf L$. We then write $\mathcal L\perp\mathcal H$.
\end{defi}
It is worthful to note that, with this definition, an orthogonal line to
an hyperplane $\mathcal H$ may be included in $\mathcal H$.
\end{appendix}
\end{document} |
\begin{document}
\pagenumbering{arabic}
\title{
Global dynamics under a weak potential on a sphere \thanks{Work
partially supported by``Progetto 5 per mille per la ricerca''
(Bando 2011). ``Collisioni fra vortici puntiformi e fra filamenti
di vorticita': singolarita', trasporto e caos.''}}
\author{Roberto Castelli, Francesco Paparella and Alessandro Portaluri}
\author{Roberto Castelli \thanks{BCAM - Basque Center for Applied
Mathematics, Bizkaia Technology Park, 48160 Derio, Bizkaia, Spain
({\tt [email protected]}).} \and \and Francesco Paparella
\thanks {Dipartimento di Matematica ``Ennio De Giorgi'', Ex-collegio
Fiorini, University of Salento, 73100 Lecce, Italy ({\tt
[email protected]}). } \and Alessandro
Portaluri\thanks{Dipartimento di Matematica ``Ennio De Giorgi'',
Ex-collegio Fiorini, University of Salento, 73100 Lecce, Italy
({\tt [email protected]}).} }
\maketitle
\begin{abstract}
We give a detailed analytical description of the global
dynamics of a point mass moving on a sphere under the action of a
logarithmic potential. After performing a McGehee-type blow-up in
order to cope with the singularity of the potential, we investigate
the rest-points of the flow, the invariant (stable and unstable)
manifolds and we give a complete dynamical description of the
motion.
\end{abstract}
\noindent {\em MSC Subject Class\/}: Primary 70F10; Secondary 37C80.
\simbolovettore{s}pace{0.5truecm}
\noindent {\em Keywords\/}: Singular dynamics, McGehee
coordinates, regularization of collisions, heteroclinics.
\section*{Introduction} \label{sec:intro}
Topologically, two dimensional Riemann surfaces with constant
(Gaussian) curvature $K$ are classified into three categories:
Euclidean spheres, $\mathbb S^2$ ($K>0$); Euclidean planes, $E^2$
($K=0$); and hyperbolic planes $H^2$ ($K<0$). Among them, $\mathbb
S^2$ and $E^2$ are more familiar and come out very often in
practice. For example, the mechanics of thin fluid layers on $\mathbb
S^2$ provides a global model of a planetary atmosphere, and on $E^2$
its local approximation.
In this paper we analyze the motion of a point particle moving on a
sphere under the action of a logarithmic potential. Two are the main
reasons for the choice of this particular potential. First, it arises
in different physical scenarios: such as in models of astrodynamics,
\cite{tremaine}, \cite{stoica}; in the dynamics of a charged particle
in a cylindrically symmetric electric field \cite{hooverman} and in
the mathematical theory of vortex filaments of an ideal fluid
\cite{Newton}, \cite{ponce}. The second reason relies on the fact
that the logarithmic potential $V(x)=-\log(|x|)$ could be considered
as a limit case for $\alpha\rightarrow 0$ of the homogeneous
potentials $V_{\alpha}=|x|^{-\alpha}$ and, while the latter have been
extensively studied by different authors, the former has not been so
deeply investigated. In particular one could be interested to know if
(and how) some features regarding for instance the regularization of
collisions, the minimality properties of the solutions, the stability
character, may be extended from the homogeneous to the logarithmic
potential case. Results in this direction have been achieved for
instance in \cite{cas2}, \cite{gencoll}, \cite{tremaine}.
In addition, we consider a sphere, rather than the classical two or
three-dimensional Euclidean space, as the configuration space. Our
goal is to understand which aspects of the dynamics are affected if
the geometry of the underlying space changes, or equally well, what
survives of the planar dynamics if one considers a curved manifold.
From a dynamical point of view the most interesting feature, and the
hardest obstacle for a full understanding of the motion, is played by
the presence of the singularity in the potential function. Indeed, as
it often happens in celestial mechanics, the singularities are the
source of a complicated dynamics and sometimes they are even
responsible for a sort of chaotic motion. From the mathematical point
of view the singularities represent a severe technical hurdle to
overcome and different techniques have been proposed to regularize the
vector field, , mainly for the homogeneous potential case
\cite{levicivita}, \cite{McGehee74}, \cite{easton}, \cite{gronchi},
\cite{cas2} and \cite{cate}.
This paper is inspired by the recent a work \cite{stoica} that studies
the planar motion of a point mass subject to a logarithmic potential
in an astrodynamic context. To overcome the singularity of the vector
field we adapt to our problem the celebrated {\em McGehee
transformation\/}, a regularizing change of variables currently
popular in the field of Celestial Mechanics and first introduced in
1974 by McGehee\cite{McGehee74} to solve the collisions in the
collinear three-body problem.
The McGehee transformations consist of a polar type change of
coordinates in the configuration space, together with a suitable
rescaling of the momentum. In this way the total collision is blown-up
into an invariant manifold called {\em total collision manifold\/} over
which the flow extends smoothly. Furthermore, each hypersurface of
constant energy has this manifold as a boundary. By rescaling time in
a suitable way, it is possible to study qualitative properties of
the solutions close to total collision, obtaining a precise
characterization of the singular solutions.
The McGehee transformation are usually applied to the case of
homogeneous potentials but, as shown in \cite{stoica} and as it will be
manifest throughout this paper, with slight modifications they give
interesting results even in the presence of a logarithmic potential.
In fact, although the lack of homogeneity of the logarithmic
nonlinearity breaks down some nice and useful properties of the
transformation, it is still possible to regularize the vector field
and therefore it is still possible to carry out a detailed analytical
description of the rest points, of the invariant manifolds, and of the
heteroclinics on the collision manifold.
The paper is organized as follows: first we introduce some basic
notions about the Hamiltonian formulation of the co-geodesics flow on
a general Riemanian manifold, then in Section \ref{sec:stereo} we
restrict to the case of the sphere and we formulate the equivalent
co-geodesics flow through the stereographic projection. In Section
\ref{sec:description} we introduce the singular logarithmic potential
and we write the equation of motion. Section \ref{sec:stabunstab}
deals with the in-deep study of the dynamical system: we regularize
the singularity of the potential with the modified McGehee technique,
and we provide an analysis of the flow on the collision and the zero
velocity manifolds. Section \ref{sec:global} concerns the global
dynamics and we rephrase the results in terms of the original motion
on the sphere with untransformed coordinates.
\tableofcontents
\section{Preliminaries}
Let $M$ be a Riemannian manifold, namely a smooth $n$-dimensional
manifold $M$ endowed with a metric given by a positive definite
(non-degenerate) symmetric two-form $g$. We denote by $D$ the
associated Levi-Civita connection and by $\Ddtp$ the covariant
derivative of a vector field along a smooth curve $\gamma$. Let $I$ be
an interval on the real line and let $V$ be a smooth function defined
on $I\times M$.
\begin{defn}
A {\em perturbed geodesic\/} abbreviated as {\em p-geodesic\/} is a
smooth curve $\gamma\colon I \rightarrow M$ which satisfies the
differential equation
\begin{equation}\label{eq:pgeodesic}
\Ddt \gamma '(t) +\nabla V(t,\gamma(t))=0
\end{equation}
where $\nabla V$ denotes the
gradient of $V (t,-) $ with respect to the metric $g.$
\end{defn}
\begin{rem}
From a dynamical viewpoint, the data $(g,V)$ define a mechanical
system on the manifold $M$, with kinetic energy $\frac{1}{2}g(v ,v)$
and time dependent potential energy $V.$ Solutions of the
differential equation \eqref{eq:pgeodesic} correspond to
trajectories of particles moving on the Riemannian manifold in the
presence of the potential $V$. If the potential vanishes we get
trajectories of free particles and hence geodesics on $M$. This
motivates the suggestive name, ``perturbed geodesics" in the case
$\nabla V\neq 0$. Moreover, if the potential $V$ is time
independent, modulo reparametrization, perturbed geodesics become
geodesics of the Jacobi metric associated to $(g,V)$: indeed the
total energy $$ e = \frac{1}{2}g (\gamma (t)) ( \gamma '(t) , \gamma
'(t) ) + V (\gamma(t) )$$
is constant along the any trajectory
$\gamma$ thus, whenever $V$ is bounded from above, the solutions of
\eqref{eq:pgeodesic} with energy $e> \sup_{m \in M} V(m)$ are
nothing but reparametrized geodesics for metric $ [ e-V]g$ on $M$
with total energy one \cite{AM}.
\end{rem}
Denoting by $(q^1, \dots, q^n)$ a local system of coordinates on $M$,
equation \eqref{eq:pgeodesic} reduces to
\[
\ddot{q}^i + \Gamma_{jk}^i \dot{q}^j \dot{q}^k = - g^{ij} \dfrac{\partial V}{\partial q^j},
\]
where, as usual, $g^{ij}=(g)^{-1}_{ij}$, and $\Gamma_{jk}^i$ are the
Christoffel symbols.
\subsection*{Geodesic flow as Hamiltonian flow}
The geodesic flow turns out to be a Hamiltonian flow of a special
Hamiltonian vector field defined on the cotangent bundle of the
manifold. The Hamiltonian depends on the metric on the manifold and it
is a quadratic form consisting entirely of the kinetic term. The
geodesic equation corresponds to a second-order nonlinear ordinary
differential system. Therefore by suitably defining the momenta it can
be re-written as first-order Hamiltonian system.
More explicitly, let us consider a local trivialization chart of the
cotangent bundle $T^*M$
\[
T^*M \Big\vert_U \cong U \times \mathbb R^n
\]
where $U$ is an open subset of the manifold $M$, and the tangent space
is of rank $n$. Let us denote by $(q_1, q_2,\dots , q_n, p_1, p_2,\dots,
p_n)$ the local coordinates
on $T^{*}M$ and introduce the Hamiltonian
\begin{equation}\label{eq:hamiltgeo}
H: T^*M \to\mathbb R: H(\simbolovettore{q},\simbolovettore{p})= \dfrac12 g^{ij}(\simbolovettore{x})p_i \, p_j\ .
\end{equation}
The Hamilton-Jacobi equations of the geodesic equation with respect to
the metric $\simbolovettore{g}$ can be written as
\[
\left\{
\begin{array}{ll}
\dot q^i = \dfrac{\partial H}{\partial p_i}= g^{ij}(x) p_j\\
\dot p_i = -\dfrac{\partial H}{\partial q_i}= -\dfrac12\dfrac{\partial g^{jk}}{\partial q^i}\,p_j\,p_k\ .
\end{array}\right.
\]
The second order geodesic equations are easily obtained by
substitution of one into the other. The flow determined by these
equations is called the {\em co-geodesic flow\/}, while the flow
induced by the first equation on the tangent bundle is called {\em
geodesic flow}. Thus, the geodesic lines are the projections of
integral curves of the geodesic flow onto the manifold $M$.
Being the Hamiltonian $H$ time-independent, it is readily seen that
the Hamiltonian is constant along the geodesics. Thus, the
co-geodesic flow splits the cotangent bundle into level sets of
constant energy
\[
M_E=\{(\simbolovettore{q}, \simbolovettore{p}) \in T^*M: \quad H(\simbolovettore{q},\simbolovettore{p})=E \},
\]
for each energy $E\geq 0$ , so that
\[
T^*M=\bigcup_{E\geq0} M_E.
\]
Now let $\simbolovettore{g},\simbolovettore{h}$ be two Riemannian metrics on $M$ in the same
conformal class; namely there exists a positive and smooth function
$\lambda=\lambda(\simbolovettore{q})$ of the coordinates such that
\[
\simbolovettore{g}^{ij}= \lambda \,\simbolovettore{h}^{ij}
\]
or equivalently $\simbolovettore{g}=\lambda^{-1}\simbolovettore{h}$. From the definition
\eqref{eq:hamiltgeo} it follows that a scaled co-geodesic Hamiltonian
function corresponds to a conformal change of the metric. In fact
if $H_\simbolovettore{g}$ and $H_\simbolovettore{h}$ denote the Hamiltonian co-geodesic functions
and if $\simbolovettore{g}, \simbolovettore{h}$ are in the same conformal class then it immediately
follows by \eqref{eq:hamiltgeo} that
\[
H_\simbolovettore{g}(\simbolovettore{q},\simbolovettore{p})= \lambda\, H_\simbolovettore{h}(\simbolovettore{q}, \simbolovettore{p}).
\]
As a consequence, Hamilton's equations with respect to this two
Hamiltonian functions are related as follows
\begin{equation}\label{eq:conformal}
\left\{
\begin{array}{ll}
\dot \simbolovettore{q} = \hphantom{-}\partial_\simbolovettore{p} H_\simbolovettore{g}= \lambda\partial_\simbolovettore{p} H_\simbolovettore{h} + H_\simbolovettore{h} \partial_\simbolovettore{p}\lambda= \lambda\partial_\simbolovettore{p} H_\simbolovettore{h}\\
\\
\dot \simbolovettore{p} = -\partial_\simbolovettore{q} H_\simbolovettore{g}= -\lambda\partial_\simbolovettore{q} H_\simbolovettore{h} - H_\simbolovettore{h} \partial_\simbolovettore{q} \lambda
\end{array}\right.
\end{equation}
where the last equality in the first equation comes by the fact the
the function $\lambda$ only depends on $\simbolovettore{q}$.
In the following we consider a perturbed-geodesic flow on a
sphere, thus it is worth to write down explicitly the free geodesic
flow when the manifold $M$ is a surface of revolution in $\mathbb R^{3}$.
Denote with $(x,y)$ the Cartesian coordinates of $\mathbb R^2$ and consider
the function $\simbolovettore{a}rphi:U \subset \mathbb R^2 \to \mathbb R^3$ given by $
\simbolovettore{a}rphi(x,y)=(f(y)\cos x, f(y) \sin x, g(y))$
\[
U=\{(x,y)\in \mathbb R^2: 0\leq x< 2\pi, y_0 < y < y_1\},
\]
where $f$ and $g$ are differentiable functions, with $f'(y)^2+ g'(y)^2
\neq 0$ and $f(y)\neq 0$.
Thus $\simbolovettore{a}rphi(x,y)$ is an immersion and the
image $\simbolovettore{a}rphi(U)$ is the surface generated by the rotation of the
curve $(0, f(y), g(y))$ around the $z$ axis.\footnote{
Here we are considering the Euclidean space equipped with Cartesian
coordinates whose axis are labeled as $x,y,z$ according to the
ordering induced by the canonical orthonormal basis of $\mathbb R^3$.} The induced Riemannian metric
$g=(g_{ij})$ in the $(x,y)$
coordinates is given by
\[
g_{11}= f^2\qquad g_{12}=0 \qquad g_{22}=(f')^2 + (g')^2.
\]
From \eqref{eq:hamiltgeo}, the Hamiltonian function associated to the
geodesic flow is given by
\[
H(x,y, p_x, p_y)=\dfrac12\left(\dfrac{1}{f^2}p_x^2+ \dfrac{1}{f'^2+g'^2} p_y^2\right)
\]
and the co-geodesics flow reads as
\begin{equation}\label{eq:cogeodesicrot}
\left\{
\begin{array}{ll}
\dot x= \dfrac{1}{f^2} p_x\\
\dot y= \dfrac{1}{f'^2+g'^2} p_y\\
\dot p_x=0\\
\dot p_y= \left[\dfrac{f f'}{f^4}p_x^2+ \dfrac{f'f''+ g'g''}{(f'^2+g'^2)^2}\right]
\end{array}\right.
\end{equation}
or, equivalently, as
\[
\left\{
\begin{array}{ll}
\ddot x + \dfrac{2f\,f'}{f^2}\dot x\, \dot y=0\\
\ddot y - \dfrac{f\,f'}{f'^2+ g'^2} (\dot x)^2+ \dfrac{f'f''+ g'g''}{f'^2+g'^2}(\dot y)^2=0.
\end{array}\right.
\]
\section{The stereographic projection of the sphere}\label{sec:stereo}
\begin{figure}
\caption{Mutual positions of the sphere ($R=1$) and the 0 plane.}
\label{fig:proj}
\end{figure}
It turns out that transformations of the McGehee type may be devised
without too many difficulties for equations which are written in
Cartesian coordinates on a plane. Therefore, rather than attempting to
work directly onto the sphere, we felt it would be more easy (and more
clear) first to project the dynamics on a stereographic plane, and
then to remove the singularities of the resulting equations.
We work on a two-dimensional spherical surface $\simbolovettore{S}$ of radius
$R$ and center at the point $C=(0,0, R)$, namely
\[
\simbolovettore{S}:=\{(x,y,z) \in \mathbb R^3: \ \ x^2+ y^2 + (z-R)^2=R^2\}.
\]
where $(x,y,z)$ are the Cartesian coordinates in $\mathbb R^3$ (figure
\ref{fig:proj}). We shall call {\em north pole\/} and {\em south
pole\/} the point $N:=(0,\dots, 0,2R) \in \simbolovettore{S}$ and its
antipodal $S:=(0, \dots, 0, 0) \in \simbolovettore{S}$, respectively. Note that the sphere is
tangent at the origin to the plane $\{\simbolovettore{z}=0\}$, that we identify with
$\mathbb R^2$. Next we introduce the stereographic projection
\[\begin{array}{rl}
\pi_\simbolovettore{S}: \simbolovettore{S}\setminus\{N\} &\longrightarrow \mathbb R^2\\
P &\longmapsto \tilde P,
\end{array}
\]
defined by requiring that the three points $N, P, \tilde P$ are
collinear. By a straightforward calculation it follows that the map
$\pi_\simbolovettore{S}$ is given explicitly by
\begin{equation}\label{eq:proiezione}
\pi_\simbolovettore{S}(x,y,z)=\dfrac{2\,R}{2\,R- z}\,( x, y).
\end{equation}
We use slightly non-standard angular coordinates $\phi$ and
$\theta$ for the spherical surface:
\begin{itemize}
\item $\phi\in[0,2\pi)$ is the usual polar angle of the projection of
$P$ onto the plane $z=0$;
\item $\theta\in[0,\pi]$ is the angle between the segment
$\overline{PC}$ and the negative direction of the $z$-axis.
\end{itemize}
A generic point $P=(x,y,z)$ on the sphere in these coordinates has a
local parameterization given by
\[
\left[
\begin{array}{ll}
x\\
y\\
z
\end{array}\right]=R\, \left[
\begin{array}{ll}
\sin\theta \cos\phi\\
\sin \theta \sin \phi\\
1-\cos \theta\end{array}\right]=\simbolovettore{P}(\phi,\theta).
\]
Of course the map is a diffeomorphism of class $\mathscr C^\infty$. In these
coordinates the stereographic projection $\pi_\simbolovettore{S}$ is defined as :
\[
R \left[
\begin{array}{ll}
\sin\theta \cos\phi\\
\sin \theta \sin \phi\\
1-\cos \theta\end{array}\right]\longmapsto
\dfrac{2\, R}{1+\cos\theta}
\left[
\begin{array}{ll}
\sin\theta \cos\phi\\
\sin \theta \sin \phi
\end{array}\right]\ .
\]
We recall that if $M \subset \mathbb R^3$ is a portion of a regular surface
represented in Cartesian local coordinates by the vector equation
\[
\simbolovettore{P}(u,v):= \boldsymbol{0}+ x(u,v)\,\simbolovettore{i}+ y(u,v)\,\simbolovettore{j}+ z(u,v)\, \simbolovettore{k}
\]
then $d\simbolovettore{P}= \simbolovettore{P}_u\, du + \simbolovettore{P}_v\, dv$ for $\simbolovettore{P}_u=(x_u, y_u, z_u)$ and
$\simbolovettore{P}_v=(x_v, y_v, z_v)$ and hence the metric is given by $ds^2=
\|d\simbolovettore{P}\|^2$. With the above
parametrization of the sphere it follows that
\[
\simbolovettore{P}_\theta=R\, (\cos\theta\cos\phi, \cos \theta \sin \phi, \sin\theta), \qquad \simbolovettore{P}_\phi=R (-\sin\theta \sin\phi, \sin\theta \cos\phi,0).
\]
Denoting by $\simbolovettore{g}$ and $\simbolovettore{g}_\simbolovettore{S}$ respectively the Riemannian metric on
the sphere $\simbolovettore{S}$ and the metric on the plane induced by the
stereographic projection, we have
\[
\simbolovettore{g}:= R^{2}\left[\begin{array}{c c}
\sin^2\theta\, &0\\
0 &1
\end{array}\right], \qquad \simbolovettore{g}_\simbolovettore{S}:=\dfrac{4}{(1+\cos\theta)^2}\, \simbolovettore{g}
\]
As a consequence of the above calculation the following result holds.
\begin{lem}
The manifolds $(\simbolovettore{S}, \simbolovettore{g})$ and $(\overline{\mathbb R^2}, \simbolovettore{g}_\simbolovettore{S})$ are in
the same conformal class, where $\overline{\mathbb R^2}$ denotes the
Alexandroff compactification of $\mathbb R^2$.
\end{lem}
\subsection*{Co-geodesic flows}
Let $\simbolovettore{G}$ and $\simbolovettore{G}_\simbolovettore{S}$ be the matrices corresponding to the inverse
of $\simbolovettore{g}$ and $\simbolovettore{g}_\simbolovettore{S}$ respectively; thus we have
\[
\simbolovettore{G}= R^{-2}\,
\begin{pmatrix}
\sin^{-2}\theta& 0\\
0 & 1
\end{pmatrix}, \quad \textrm{and}\quad \simbolovettore{G}_\simbolovettore{S}= \dfrac{(1+\cos\theta)^2}{4}\, \simbolovettore{G}.
\]
We denote by $\hat \simbolovettore{S}=\simbolovettore{S}\setminus\{N,S\}$, the sphere minus the
north and the south pole, and by $T^*\hat\simbolovettore{S}$ its cotangent bundle
where it is well-defined the Hamiltonian function
\[\begin{array}{rl}
H_{geod}: T^*\hat \simbolovettore{S} &\to \mathbb R\\
(\simbolovettore{q}; \simbolovettore{p})&\mapsto \dfrac12 \langle \simbolovettore{G} \,\simbolovettore{p}, \simbolovettore{p} \rangle.
\end{array}
\]
Here $\simbolovettore{q}:=(\phi, \theta)$ are the positions and $\simbolovettore{p}:=(p_\phi,
p_\theta)$ are the momenta. With this
choice the Hamiltonian function is given by
\[
H_{geod}(\phi, \theta;p_\phi, p_\theta)= \dfrac{1}{2\,R^2} \left(\dfrac{1}{\sin^2\theta}\,
p_\phi^2+ p_\theta^2\right)
\]
and, as a particular case of \eqref{eq:cogeodesicrot}, the co-geodesic flow
on the sphere may be written as
\begin{equation}\label{eq:HJonsphere}
\left\{
\begin{array}{ll}
\dot \phi = \dfrac{1}{R^2\, \sin^2\theta} \, p_\phi\\
\\
\dot \theta= \dfrac{p_\theta}{R^2}\\
\\
\dot p_\phi=0\\
\\
\dot p_\theta=\dfrac{\cos\theta}{R^2\, \sin^3\theta} \,p_\phi^2\\
\\
\end{array}\right. .
\end{equation}
The co-geodesic flow on the plane $\{\simbolovettore{z}=0\}$ is equivalent to the
above one through the stereographic projection. Since the metrics
$\simbolovettore{g}$ and $\simbolovettore{g}_{\simbolovettore{S}}$ are in the same conformal class, the new system
is easily derived using \eqref{eq:conformal} with
$\lambda=\frac{(1+\cos\theta)^{2}}{4}$.
However, on the plane we prefer to use the Cartesian
coordinates $(x,y)$ rather than the angular coordinates $(\phi,\theta)$.
The latter are related to the former by the transformation
\[
\phi=\arctan\left( \dfrac{y}{x}\right), \qquad
\theta= 2\arctan\left(\dfrac{\sqrt{x^2+y^2}}{2R}\right).
\]
Hence, denoting by $\mathbb R^2_s$ the plane endowed with the metric
$\simbolovettore{g}_\simbolovettore{S}$, the Hamiltonian function for the co-geodesic flow
on $(\mathbb R^2, \simbolovettore{g}_\simbolovettore{S})$
\[
K_{geod}: T^* \mathbb R^2_s\to \mathbb R: (\simbolovettore{q}; \simbolovettore{p})\mapsto \dfrac12 \langle \simbolovettore{G}_\simbolovettore{S} \,\simbolovettore{p}, \simbolovettore{p} \rangle
\]
is explicitly given by
\[
K_{geod}(x, y;p_x, p_y)= \,\simbolovettore{l} (x,y)\, [\simbolovettore{a}(x,y)\,
p_x^2+ p_y^2].
\]
where $\simbolovettore{q}:=(x, y)$ are the positions and $\simbolovettore{p}:=(p_x, p_y)$ are the
momenta. In order to derive this expression we set
\[
\simbolovettore{a}(x,y):= \left[\dfrac{4R^2+x^2+y^2}{4R\sqrt{x^2+y^2}} \right]^2, \qquad \simbolovettore{l}(x,y):=\dfrac{8R^2}{(4R^2+ x^2+ y^2)^2}.
\]
and we use the identities
\[
\sin\theta= \dfrac{4R\sqrt{x^2+y^2}}{4R^2+x^2+y^2},\qquad
\sin\phi= \dfrac{y}{\sqrt{x^2+y^2}}.
\]
and
\[
\dfrac{(1+\cos\theta)^2}{8R^2}= \dfrac{8R^2}{(4R^2+ x^2+ y^2)^2}.
\]
Note that $\simbolovettore{a}(x,y)$ corresponds to the term $(\sin\theta)^{-2}$,
while $\simbolovettore{l}(x,y)$ is just $\frac{1}{2R^{2}}\lambda$.
To the Hamiltonian $K_{geod}$ is associated the Hamiltonian flow
\begin{equation}\label{eq:HJonplanenotresc}
\left\{
\begin{array}{ll}
\dot x =2\, (\simbolovettore{a}\,\simbolovettore{l})(x,y)\, p_x\\
\\
\dot y= 2\simbolovettore{l}(x,y)\, p_y\\
\\
\dot p_x=-[\partial_x(\simbolovettore{a} \, \simbolovettore{l}) p_x^2+ (\partial_x \simbolovettore{l})\, p_y^2 ]\\
\\
\dot p_y=- [\partial_y(\simbolovettore{a} \, \simbolovettore{l}) p_x^2+ (\partial_y \simbolovettore{l})\, p_y^2 ]
\end{array}\right.
\end{equation}
where
\[
(\simbolovettore{a}\,\simbolovettore{l})(x,y):= \dfrac{1}{2\,(x^2+y^2)}, \quad \partial_x \simbolovettore{l}(x,y)=-\dfrac{32 \,R^2x}{(4R^2+x^2+y^2)^3}, \quad
\partial_y \simbolovettore{l}(x,y)=-\dfrac{32 \,R^2y}{(4R^2+x^2+y^2)^3}.
\]
\section{Position of the problem}\label{sec:description}
We are now in the position to introduce a conservative force field on
the sphere that perturbs, not necessarily by small amounts, the
geodetic dynamics of a free particle governed by the equations discussed
in the previous section.
We place the singularity of the potential at the point $Q=(0,R,R)$. As
a naming convention, we shall often refer to it as the {\it vortex}
point. On $\simbolovettore{S}\setminus\{Q\}$ we define the {\em logarithmic
potential $U$ \/} as
\[
U(P):= -\dfrac{\Gamma}{4\pi}\log (\|\overline{PQ}\|)
\]
where $\|\overline{PQ}\|$ is the three-dimensional Euclidean distance
between $P$ and $Q$, i.e. $\|\overline{PQ}\|$ is the length of the
chord between two points on the sphere. We denote by $\simbolovettore{f}$ the force
field generated by the potential $U$, that is $\simbolovettore{f}(P)=\nabla U(P)$.
At any point $P\neq Q$ it associates a force pointing towards $Q$ if
$\Gamma>0$ or in the opposite direction otherwise, and proportional to
the inverse of the distance $\|\overline{PQ}\|$. Note that, unlike the
planar case, the force field is not tangent to the manifold: at any
point $P$ one could decompose the force vector into two components,
one directed as the normal to the sphere the other tangent to the
sphere. We take the first as balanced by the smooth constraint given
by requiring that the motion happens on the spherical surface,
therefore only the second contributes to the motion.
The Hamiltonian augmented with the potential function
$$H_{mech}: T^* (\hat \simbolovettore{S}\setminus\{Q\}) \to \mathbb R$$
in the $(\phi, \theta)$-coordinates is
\[
H_{mech}(\phi,\theta, p_\phi, p_\theta):=H_{geod}(\phi, \theta, p_\phi, p_\theta) + \dfrac{\Gamma}{8\pi} \log(2 R^2(1-\sin \theta \sin \phi)).
\]
where $2 R^2(1-\sin \theta \sin \phi) = \|\overline{PQ}\|^2$.
On $(\mathbb R^{2},\simbolovettore{g}_{\simbolovettore{S}})$ the distance $\|\overline{PQ}\|^2$ becomes
the function
\[
\qquad \simbolovettore{b}(x,y):=\left[\dfrac{2R^2[x^2+(y-2R)^2]}{4R^2+x^2+y^2}\right],
\]
therefore we introduce the Hamiltonian
$$
K_{mech}: T^*(\mathbb R^2_s\setminus\{0,V\})\to \mathbb R
$$
\[
K_{mech}(x, y, p_x, p_y):=K_{geod}(x,y, p_x, p_y) + \dfrac{\Gamma}{8\pi} \log\simbolovettore{b}(x,y)\ .
\]
We observe that $\log\simbolovettore{b} \in \mathscr C^\infty(\mathbb R^2\backslash\{(0,2R)\})$:
indeed the point $V=(0,2R)$
corresponds to the stereographic projection of the vortex $Q\in \simbolovettore{S}$,
while the origin is a singularity of the metric.
Hamilton's equations associated to $K_{mech}$ can be written as follows:
\begin{equation}\label{eq:hamiltonequ}
\left\{
\begin{array}{ll}
\dot x =2\, (\simbolovettore{a}\,\simbolovettore{l})(x,y)\, p_x\\
\\
\dot y= 2\simbolovettore{l}(x,y)\, p_y\\
\\
\dot p_x=-[\partial_x(\simbolovettore{a} \, \simbolovettore{l}) p_x^2+ (\partial_x \simbolovettore{l})\, p_y^2 + \Gamma/(8\pi) \simbolovettore{b}(x,y)^{-1}\,\partial_x \simbolovettore{b}(x,y)]\\
\\
\dot p_y=- [\partial_y(\simbolovettore{a} \, \simbolovettore{l}) p_x^2+ (\partial_y \simbolovettore{l})\, p_y^2 + \Gamma/(8\pi) \simbolovettore{b}(x,y)^{-1}\,\partial_y \simbolovettore{b}(x,y)]
\end{array}\right. \ .
\end{equation}
These equations govern the motion of a particle constrained on a
spherical surface and subject to a force field generated by a
logarithmic potential, as seen on a stereographic plane.
\begin{rem}
Before we delve into the analysis of system \eqref{eq:hamiltonequ}
let us remark that when defining the potential function one could
consider different notions of the distance between two points on a
sphere. A reasonable choice could be the geodesic distance, that is
the length of the shortest arc of a great circle passing through two
points. More precisely, for any couple $\simbolovettore{x},\simbolovettore{y}\in \mathbb S^n(r)
\subset \mathbb R^{n+1}$, the geodesic
distance $d_\mathbb S(\simbolovettore{x}, \simbolovettore{y})$ is given by
\begin{equation}\label{eq:distanzageodetica}
d_\mathbb S(\simbolovettore{x}, \simbolovettore{y}):= r\, \arccos \dfrac{\langle \simbolovettore{x}, \simbolovettore{y}\rangle}{r^2},
\end{equation}
where $<\cdot,\cdot>$ is the Euclidean scalar product in $\mathbb R^{n+1}$.
In the following only the chord distance will be considered, but we
guess that the local flow, that is the dynamics close and up to the
singularity, should not be different when the geodesic distance is
taken into account. On the other side, we expect the global flow
to be slightly different. However a complete study of the dynamics
with the geodesic distance, the differences and similarity with the
dynamics on the plane and with the one here described, could be
material for future investigations.
\end{rem}
\section{Energy hypersurfaces, regularization and flow}\label{sec:stabunstab}
We begin the analysis of system \eqref{eq:hamiltonequ}
with the description of the topology of the constant-energy
hypersurfaces associated to $K_{mech}$. For any $h\in\mathbb R$ the
hypersurface of constant energy $h$ is given by
\begin{equation}
\begin{array}{rl}
\widetilde \Sigma_h&:= \{(x,y, p_x, p_y) \in T^*X: K_{mech}(x,y, p_x, p_y)=h\}\\
&=\left\{(x,y, p_x, p_y) \in T^*X: \simbolovettore{a}(x,y) \, p_x^2+ p_y^2 = \dfrac{1}{\simbolovettore{l}(x,y)}
\Big(h - \Gamma/(8\pi) \, \log(\simbolovettore{b}(x,y))\Big)\right\}.
\end{array}
\end{equation}
where $X:=\mathbb R^2\backslash\{0, V\}$ denotes the configuration space and
$T^*X$ the phase space (the cotangent bundle over $X$).
Since the function $\simbolovettore{a}(x,y)$ is strictly positive, for any value of
$h$ the motion is allowed only in those regions of the configuration
space where the right hand side of the equation in the definition of
$\widetilde \Sigma_h$ is positive (Fig.\ref{fig:energylevels}). In the Lemma
\ref{thm:zonedinamiche} below, the analysis is performed for
$\Gamma>0$: changing the sign of $\Gamma$ simply switches the allowed region
with the forbidden region.
\begin{figure}
\caption{Zero-level curves of the function $\tilde E_{h}
\label{fig:energylevels}
\end{figure}
\begin{lem}\label{thm:zonedinamiche}
For any fixed $h$ let $\widetilde E_{h}: \mathbb R^{2} \rightarrow \mathbb R$ be defined by:
\begin{equation}\label{eq:ehatinalpha}
\widetilde E_{h}(x,y):= \Big(h - \Gamma/(8\pi) \, \log(\simbolovettore{b}(x,y))\Big),
\end{equation}
and let $h_{1}=\tfrac{\Gamma}{8\pi}\log(2R^{2})$, $h_{2}=\tfrac{\Gamma}{4\pi}\log(2R)$.
Then
\begin{enumerate}
\item for every $h >h_2$ the surface $\widetilde E_{h} (x,y)$ is positive for any $(x,y)\in\mathbb R^{2}$;
\item for any $h \in (h_1, h_2)$ there exists a disk $D^{1}_{h}$ in
the $\{y<0\}$ half-plane and containing the point $(0,-2R)$ such
that $\widetilde E_{h} (x,y)$ is positive for each $(x,y) \in
\mathbb R^{2}\backslash D^{1}_h$ and negative otherwise;
\item for any $h < h_1$ there exists a disk $D_h^{2}$ in the $\{y>0\}$
half-plane, containing the point $(0,2R)$, such that $ \widetilde
E_{h}(x,y)$ is positive
for all $(x,y) \in D_h^{2}$ and negative otherwise.
\end{enumerate}
\end{lem}
\proof The logarithm is a monotone function, therefore the topology of
the level sets of $\widetilde E_{h}(x,y)$ only depends on the level
sets of $\simbolovettore{b}(x,y)$. For any $\delta$ the $\delta$-level set of
$\simbolovettore{b}(x,y)$ is given by the point $(x,y)$ lying on the circle
$C_{\delta}$ with equation
$$
x^{2}+y^{2}-\frac{8R^{3}}{2R^{2}-\delta}y+4R^{2}=0\ .
$$
The center of $C_{\delta}$ is placed in the point
$O_{\delta}=(0,\tfrac{4R^{3}}{2R^{2}-\delta})$ and the radius is
$r_{\delta}=\tfrac{2R}{|2R^{2}-\delta|}\sqrt{4\delta
R^{2}-\delta^{2}}$. It follows that the function $\simbolovettore{b}(x,y)$ only
admits values $\delta$ in the range $\delta\in[0,4R^{2}]$. For
$\delta=4R^{2}$ the $\delta$-level set restricts to the point $\tilde
P=(0,-2R)$ while,
as $\delta$ decrease towards $2R^{2}$, the level set
consists of a circle completely contained in the $\{y<0\}$
half-plane. Moreover it can be checked that
$|O_{\delta}-(0,-2R)|<r_{\delta}$, meaning that the point $\tilde P$
is always surrounded by these circles. The value $\delta=2R^{2}$ is a
singularity for the topology of the level sets: indeed the center of
$C_{\delta}$ as well as the radius $r_{\delta}$ diverge. Note that
$\simbolovettore{b}(x,0)=2R^{2}$ for any $x$ and
$\lim_{x^{2}+y^{2}\to\infty}{\simbolovettore{b}(x,y)}=2R^{2}$. Then as $\delta$
decreases below $2R^{2}$ towards zero, the circles $C_{\delta}$ live
in the positive-$y$ halfplane and shrinks around the point $\tilde
Q=(0,2R)$.
$\Box$
In order to develop a McGehee type transformation, we define the functions
\begin{equation}\label{eq:lefi}
\left\{
\begin{array}{ll}
\simbolovettore{a}rphi_1(r):= r\, e^{-1/r^2}\\
\simbolovettore{a}rphi_2(r):= 1/r
\end{array}\right.
\end{equation}
and, following the notation of \cite{stoica}, we introduce the change
of variables
\begin{equation}
\left\{\begin{array}{ll}
x=\simbolovettore{a}rphi_1(r)\, s_1\\
y= \simbolovettore{a}rphi_1(r)\, s_2 + 2R
\end{array}\right.,\qquad \left\{\begin{array}{ll}
p_x=\simbolovettore{a}rphi_2(r)\, z_x\\
p_y= \simbolovettore{a}rphi_2(r)\, z_y
\end{array}\right.
\end{equation}
where $\simbolovettore{s}=(s_1, s_2)=(\cos\alpha,\sin\alpha) \in \mathbb S^1$ is a
point on the unit circle. It readily follows that
\[
\simbolovettore{a}(r,\simbolovettore{s})=\left[\dfrac{8R^2 + 4R\simbolovettore{a}rphi_1(r) s_2+ \simbolovettore{a}rphi_1^2}{4R\sqrt{\simbolovettore{a}rphi_1^2+ 4R^2+ 4R \simbolovettore{a}rphi_1(r) s_2}}\right]^2,
\qquad \simbolovettore{b}(r,\simbolovettore{s})= \dfrac{2R^2\,\simbolovettore{a}rphi_1(r)^2}{8R^2 + \simbolovettore{a}rphi_1(r)^2+ 4R \simbolovettore{a}rphi_1(r)\, s_2},
\]
\[
\simbolovettore{l}(r,\simbolovettore{s})= \dfrac{8\,R^2}{(8\,R^2+ \simbolovettore{a}rphi_1^2+ 4R\simbolovettore{a}rphi_1\,s_2)^2},
\qquad \textrm{and} \qquad(\simbolovettore{a}\,\simbolovettore{l})(r,\simbolovettore{s})= \dfrac{1}{2(4\,R^2 + \simbolovettore{a}rphi_1^2 + 4R\,\simbolovettore{a}rphi_1\,s_2)};
\]
hence in these new coordinates the energy surfaces $\Sigma_h$ can be written as
\[
\Sigma_h=\left\{(r,\simbolovettore{s},z_x,z_y) \in \mathbb R^+ \times \mathbb S^1 \times \mathbb R^2:\ \simbolovettore{a}(r,\simbolovettore{s}) \, z_x^2+ z_y^2 = \dfrac{ r^2}{\simbolovettore{l}(r,\simbolovettore{s})}
\Big(h - \Gamma/(8\pi) \, \log(\simbolovettore{b}(r,\simbolovettore{s})\Big)\right\}.
\]
We also observe that
\begin{itemize}
\item $\lim_{r\to 0^+} \simbolovettore{a}(r,\simbolovettore{s})=1\qquad \textrm{uniformly with respect to}\ \simbolovettore{s};$
\item $\lim_{r\to 0^+} \simbolovettore{b}(r,\simbolovettore{s})=0\qquad \textrm{uniformly with respect to}\ \simbolovettore{s}; $
\item $\lim_{r\to 0^+} \simbolovettore{l}(r,\simbolovettore{s})=1/(8R^2)\qquad \textrm{uniformly with respect to}\ \simbolovettore{s}. $
\end{itemize}
By taking into account the definition of the functions $\simbolovettore{a}rphi_j$,
the right hand side of the equation defining the level set
$\Sigma_{h}$ reduces to
\[
\hat E(h,r, \simbolovettore{s}):= \dfrac{\, r^2}{l(r,\simbolovettore{s})}\left[h- \Gamma/(8 \pi)\log(2 R^2 r^2 e^{-2/r^2}) + \Gamma/(8\pi)\log(\simbolovettore{c}(r,\simbolovettore{s}))\right]
\]
where $\simbolovettore{c}(r,\simbolovettore{s}):= 8R^2 + \simbolovettore{a}rphi_1(r)^2+ 4R \simbolovettore{a}rphi_1(r)\, s_2=8R^{2}+r^{2}e^{-2/r^{2}}+4Rre^{-1/r}s_{2}$.
We observe that
\begin{equation}
\label{eq:energy_coll_man} \lim_{r \to 0^+} \hat E(h,r, \simbolovettore{s})= \dfrac{2\,\Gamma\, R^2}{\pi},
\end{equation}
thus, as already implicit in Lemma \ref{thm:zonedinamiche}, in the
attractive case ($\Gamma >0$) the vortex point lies in the allowed region
of every energy level $h$, while the opposite holds in the repelling case
($\Gamma<0$). However, a first important consequence of the change of
variable above introduced is that in the variables $(r,s)$ the kinetic
energy remains bounded when a collision occurs.
From now on, we shall only consider the attractive case. Thus we
assume
$$
\Gamma>0.
$$
The intersection between one (and hence every) energy hypersurface
$\Sigma_h$ with $r=0$ is called {\em total collision manifold.\/} In
virtue of the limit \eqref{eq:energy_coll_man}, we may
conclude that
\begin{itemize}
\item the total collision manifold does not depend on the fixed energy
level $h$; otherwise stated it is a boundary of every energy level;
\item it is diffeomorphic to the two dimensional torus $\mathbb T:=\mathbb S^1 \times \mathbb S^1$.
\end{itemize}
\begin{figure}
\caption{Graph of the function $\hat E$ and of $\hat E=0$ in the three cases: (a) $h>h_{2}
\label{fig:3casi}
\end{figure}
From the dynamical viewpoint an important role is played by the zero
set of the function $\hat E$:
\[
\simbolovettore{Z}_h:=\{(r,\alpha) \in X: \ \hat E(h,r,\alpha)=0\}.
\]
In the following we refer to this set as the {\em zero velocity
manifold\/} in $\Sigma_h$. Rephrasing the results of Lemma
\ref{thm:zonedinamiche} in terms of the new coordinates $(r,\alpha)$
it readily follows that $\simbolovettore{Z}_h$ is empty in the first case $(h>h_{2})$
and non-empty otherwise. (Figure \ref{fig:3casi}). In the second
case, $(h_{1}\leq h \leq h_{2})$, the zero velocity manifold is
represented by a simple closed curve homeomorphic to a circle (or to a
point in the limit $h\to h_2$). The motion is forbidden in the region
bounded by the curve. In the third case, the zero set can be seen as
the graph of a single-valued function $\alpha \mapsto r(\alpha)$ and the function
$\hat E$ is positive for $0<r<r(\alpha)$, which is the region on the left of the
curve shown in figure (\ref{fig:zerovelnegative}b)
\begin{figure}
\caption{Zero Velocity manifold (a) in the second case $h \in (h_1, h_2)$, (b) in the third case: $h<h_1$.}
\label{fig:zerovelnegative}
\end{figure}
\subsection*{Regularization and McGehee coordinates}
We now use the new variables $r,$ $\alpha$ and $\simbolovettore{z}$ in the equations
of motion \eqref{eq:hamiltonequ}. In order to preserve the continuity
of the flow with respect to the initial data, we need to ensure that
the transformed system has an everywhere differentiable vector
field. To this purpose we rescale the time variable in terms of the
distance from the singularity with the effect to exponentially
decrease the velocities near the singularity. As a result the
collision solutions (which are singular in the old coordinates) move
along smooth orbits that asymptotically converge to the collision
manifold.
Let us define $d\tau= \simbolovettore{a}rphi_2(r)\simbolovettore{a}rphi_1^{-1}(r)\, dt$ and use the notation
\[
\langle \simbolovettore{z}, \simbolovettore{s}(\alpha)\rangle_a:= \simbolovettore{a}(x,y) z_x\, \cos\alpha + z_y\, \sin\alpha.
\]
With the help of the identities
\[
\dfrac{\simbolovettore{a}rphi_1(r)}{\simbolovettore{a}rphi_1'(r)}= \dfrac{r^3}{r^2+2}, \qquad \dfrac{\simbolovettore{a}rphi_1(r)}{\simbolovettore{a}rphi_2^2(r)}= r^3\, e^{-1/r^2}, \qquad
\dfrac{\simbolovettore{a}rphi_1(r)\simbolovettore{a}rphi_2'(r)}{\simbolovettore{a}rphi_2(r)\simbolovettore{a}rphi_1'(r)}=-\dfrac{r^2}{r^2+2}
\]
the Hamiltonian equations in \eqref{eq:hamiltonequ} become
\begin{equation}\label{eq:mcgehee1}
\left\{ \begin{array}{ll}
\dfrac{dr}{d\tau} &= \dfrac{2r^3}{(2+r^2)}\,\simbolovettore{l}(r,\alpha)\,\langle \simbolovettore{z}, \simbolovettore{s}(\alpha)\rangle_a\\
\\
\dfrac{d\alpha}{d\tau}&= \simbolovettore{l}(r,\alpha)\,(z_y \cos\alpha- \simbolovettore{a}(r, \alpha) z_x \sin \alpha)\\
\\
\dfrac{dz_x}{d\tau}&=- \, r e^{-1/r^2} [(\simbolovettore{a}\simbolovettore{l})_x(r, \alpha) z_x^2 + \simbolovettore{l}_x(r,\alpha) \, z_y^2] - \dfrac{\Gamma}{8\pi}\, r^3\, e^{-1/r^2}
\dfrac{\simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}(r,\alpha)}+\\
&+2\,\dfrac{r^2}{r^2+2} \simbolovettore{l}(r,\alpha)\langle \simbolovettore{z}, \simbolovettore{s}(\alpha)\rangle_a\, z_x\\
\\
\dfrac{dz_y}{d\tau}&=- \, r e^{-1/r^2} [(\simbolovettore{a}\simbolovettore{l})_y(r, \alpha) z_x^2 + \simbolovettore{l}_y(r,\alpha) \, z_y^2] - \dfrac{\Gamma}{8\pi}\, r^3\, e^{-1/r^2}
\dfrac{\simbolovettore{b}_y(r,\alpha)}{\simbolovettore{b}(r,\alpha)}+\\
&+2\,\dfrac{r^2}{r^2+2} \simbolovettore{l}(r,\alpha)\langle \simbolovettore{z}, \simbolovettore{s}(\alpha)\rangle_a\, z_y
\end{array}\right.
\end{equation}
where the subscripts in $(\simbolovettore{a}\simbolovettore{l})_x(r,\simbolovettore{s}), (\simbolovettore{a}\simbolovettore{l})_y(r,\simbolovettore{s})$
(resp. $\simbolovettore{b}_x(r,\simbolovettore{s}), \simbolovettore{b}_y(r,\simbolovettore{s})$) denote the partial derivative
with respect to the old cartesian variables. The derivate function is
then evaluated in the new coordinates at the point $(r,\alpha)$.
The equations above are no longer singular at $r=0$: in fact, by computing
${\displaystyle \frac{\simbolovettore{b}_{x}}{\simbolovettore{b}}(r,\alpha)}$ one finds
${\displaystyle \frac{\simbolovettore{b}_{x}}{\simbolovettore{b}}(r,\alpha)\sim\simbolovettore{a}rphi_{1}^{-1}}$
as $r\to 0$. Thus the time change produces the effect to regularize
the singularity. In addition the $\{r=0\}$ manifold results to be
invariant.
From a naive point of view, the study of the flow on the collision
manifold could appear meaningless, since the manifold is the image of
just a singular point where the orbits cease to exists. In reality,
the properties of the flow on such manifold yield informations on
the behavior of the orbits {\em close to} the singularity.
In order to simplify system \eqref{eq:mcgehee1}, we introduce a
further change of coordinates and time rescaling. Using the energy
relation
\begin{equation}\label{eq:energyrel}
\simbolovettore{a}(r,\alpha)\, z_x^2 + z_y^2 = \hat E(h,r,\alpha),
\end{equation}
let us define $\psi$ and $\sigma$ such that
\begin{equation}
\left\{
\begin{array}{ll}
z_x= \sqrt{\hat E(h ,r , \alpha)/\simbolovettore{a}(r, \alpha)}\, \cos \psi, \\
z_y= \sqrt{\hat E(h, r, \alpha)} \, \sin \psi \\
\noalign{\simbolovettore{s}kip 3pt}d\tau=\sqrt{\hat E(h, r,\alpha)}\, d\sigma
\end{array}\right.
\end{equation}
Let us denote with $A_1(r,\alpha,z)$, $A_2(r,\alpha, z)$ the right
hand side of, respectively, the third and fourth equation in
\eqref{eq:mcgehee1}, so that $\dfrac{dz_z}{d\tau}=A_{1},
\dfrac{dz_y}{d\tau}=A_{2}$.
Therefore, on any fixed-energy shell, the system given in \eqref{eq:mcgehee1}
reads
\begin{equation}\label{eq:mcgehee2}
\left\{ \begin{array}{ll}
\dfrac{dr}{d\sigma} = \dfrac{2r^3\, \hat E(h, r, \alpha)}{(2+r^2)}\,\simbolovettore{l}(r,\alpha)(\sqrt{\simbolovettore{a}(r,\alpha) }\, \cos\psi\, \cos \alpha\,+ \, \sin\psi\,\sin\alpha)\\
\\
\dfrac{d\alpha}{d\sigma}= \hat E(h,r,\alpha)\,\simbolovettore{l}(r,\alpha)(\sin\psi\, \cos\alpha- \sqrt{\simbolovettore{a}(r, \alpha)}\, \cos\psi \sin \alpha)\\
\\\
\dfrac{d\psi}{d\sigma}= B(r, \alpha, \psi)
\end{array}\right.
\end{equation}
where $B$ is given by:
\[
B(r,\alpha,\psi):= -\sqrt{\simbolovettore{a}} \, \sin\psi\, A_1+ \sqrt{\simbolovettore{a}}\sin \psi\, \cos\psi \dfrac{d}{d\tau}\,
\left(\sqrt{\dfrac{\hat E}{\simbolovettore{a}}}\right) + A_2\, \cos\psi - \dfrac{d}{d\tau}(\sqrt{\hat E}) \,\sin \psi \cos\psi.
\]
\subsection*{Flow and invariant manifolds}
Recalling that $\simbolovettore{l}$ is everywhere positive, the restpoints of
\eqref{eq:mcgehee2} correspond to solutions of the
following systems:
\begin{equation}\label{systems}
\left\{
\begin{array}{ll}
r=0\\
f_2(r,\alpha, \psi)=0\\
B(r,\alpha,\psi)=0
\end{array}\right. \qquad \textrm{or} \qquad
\left\{
\begin{array}{ll}
f_1(r,\alpha, \psi)=0\\
f_2(r,\alpha, \psi)=0\\
B(r,\alpha,\psi)=0
\end{array}\right. \qquad \textrm{or}\qquad
\left\{
\begin{array}{ll}
\hat E(h,r,\alpha)=0\\
B(r,\alpha,\psi)=0
\end{array}\right.
\end{equation}
where
\begin{equation*}
f_1(r,\alpha, \psi):= \sqrt{\simbolovettore{a}} \, \cos\psi\, \cos \alpha\,+ \, \sin\psi\,\sin\alpha, \qquad
f_2(r,\alpha, \psi) := \sin\psi\, \cos\alpha- \sqrt{\simbolovettore{a}}\, \cos\psi \sin \alpha\ .
\end{equation*}
We immediately discard the second system as it allows no solutions.
We note that $\simbolovettore{a} \to 1$ for $r\to 0$, thus the first system reduces to
\begin{equation*}
\left\{
\begin{array}{ll}
r=0\\
\sin(\psi-\alpha)=0\\
B(r,\alpha,\psi)=0
\end{array}\right.
\end{equation*}
whose solutions correspond to fixed points on the collision
manifold. The existence of solutions of the last system depends on the
energy level $h$: if $h\geq h_{2}$ the zero set of $\hat E$ is empty
and no solutions exist. For $h\leq h_2$, some solution
may exist.
Summarizing, any restpoint either lies on the collision manifold or on
the zero velocity manifold. Let us first consider the collision
manifold: the asymptotic analysis of the function $B(r,\alpha, \psi)$
on the collision manifold (see Appendix A) gives
\[
\lim_{r\to 0^+} B(r, \alpha, \psi)=0.
\]
It follows that
\begin{lem}\label{thm:restpoints}
The equilibria of the vector field given in \eqref{eq:mcgehee2} lying
on the total collision manifold consists of two curves. In local
coordinates $(r, \alpha, \psi)$ these curves are given by
\begin{enumerate}
\item[(i)] $\mathscr P_1\equiv(0,\alpha,\alpha)$;
\item[(ii)] $\mathscr P_2 \equiv (0,\alpha, \pi+\alpha)$.
\end{enumerate}
\end{lem}
\begin{prop}\label{pro:piep2selledegeneri}
For each $\alpha$, the equilibrium points
\[
(0,\alpha,\alpha) \in \mathscr P_1
\]
and
\[
(0, \alpha, \pi+\alpha) \in \mathscr P_2
\]
are degenerate saddles.
\begin{enumerate}
\item $
\dim W^u(\mathscr P_1)=1, \qquad \dim W^s(\mathscr P_1)=1, \qquad \dim W^0(\mathscr P_1)=1.$
\item $
\dim W^u(\mathscr P_2)=1, \qquad \dim W^s(\mathscr P_2)=1, \qquad \dim W^0(\mathscr P_2)=1.$
\end{enumerate}
\end{prop}
\proof The flow on the collision manifold is given by
\begin{equation}\label{eq:mcgehee22trisdiel=2collmanfld}
\left\{
\begin{array}{ll}
\dfrac{d\alpha}{d\sigma}= \dfrac{\Gamma}{4\pi}\, \sin (\psi-\alpha)\\
\\
\dfrac{d\psi}{d \sigma} = 0.\\
\end{array}\right.
\end{equation}
whose orbits are parallel to $\alpha$-axis and flow from
$\mathscr P_{2}$ to $\mathscr P_{1}$. The stability of the restpoints
is determined by the eigenvalues of the Jacobian matrix of
\eqref{eq:mcgehee2}. It follows (see appendix \ref{subsec:asy}) that
for any point $P_{1}\in\mathscr P_{1}$ and $P_{2}\in\mathscr P_{2}$
the eigenvalues are
\begin{equation*}
P_{1}\in\mathscr P_{1}\mathbb Rightarrow\left\{
\begin{array}{ll}
\lambda_r=0\\
\lambda_\alpha=-\dfrac{\Gamma}{4\pi}\\
\lambda_\psi=0.
\end{array}\right.,\qquad
P_{2}\in\mathscr P_{2}\mathbb Rightarrow \left\{
\begin{array}{ll}
\lambda_r=0\\
\lambda_\alpha=\dfrac{\Gamma}{4\pi}\\
\lambda_\psi=0.
\end{array}\right.
\end{equation*}
\begin{figure}
\caption{The collision manifold, the curves of restpoints $\mathscr P_{1}
\label{fig:streamlines}
\end{figure}
and are coherent with the dynamics restricted on the collision
manifold as given by \eqref{eq:mcgehee22trisdiel=2collmanfld}, where
$\mathscr P_{1}$ is an attractor and $\mathscr P_{2}$ is a repeller
(figure \ref{fig:streamlines}). However, the presence of null
eigenvalues implies that the linear approximation of the flow, taken
alone, does not provide enough information to determine the
qualitative dynamics close to the equilibrium points. As flow in the
$\psi$ direction is null (and in fact $\psi$ can be regarded as a
parameter for an equilibrium point), in order to determine the
asymptotic behavior close to $P_1$ and $P_2$, it is enough investigate
the dynamics restricted to the $(r,\alpha)$-plane. The proof that the
two equilibrium curves are indeed degenerate saddles follows by direct
integration of the system once the equations have been expanded around
the equilibrium point in Taylor series. We omit the details and we
refer to the equivalent proof of Lemma 7.4 in \cite{stoica}.
$\Box$
\begin{table}\centering
\begin{tabular}{|l|c|c|c|}
\hline &
\begin{sideways}$\dim W^{s}\ $\end{sideways} &
\begin{sideways}$\dim W^{u}\ $\end{sideways} &
\begin{sideways}$\dim W^0\ $\end{sideways}
\\ \hline
&&&\\
\multirow{1}{*}{At $\mathscr P_1$}& $1$ &
$1$ & $1 $ \\
\hline &&&\\
\multirow{1}{*}{At $\mathscr P_2$} & $1$&
$1$ & $1$ \\
\hline
\end{tabular}
\caption{Dimensions of the invariant manifolds along the equilibrium curves $\mathscr P_1$ and $\mathscr P_2$}
\label{tb:tabella1}
\end{table}
\begin{defn}
We shall say that the flow on the collision manifold is {\em totally
degenerate\/} if the unstable manifold of an equilibrium point
$\simbolovettore{P}_1 \in \mathscr P_1$ , coincides with the stable manifold of
some equilibrium point $\simbolovettore{P}_2 \in \mathscr P_2$.
\end{defn}
\begin{lem}\label{thm:flussodegcollmnfld}
The flow on the total collision manifold is totally degenerate. More
precisely
\begin{enumerate}
\item[(i)] $W^u(\simbolovettore{P}_1) \equiv W^s(\simbolovettore{P}_2)$;
\item[(ii)] $W^u(\simbolovettore{P}_2) \equiv W^s(\simbolovettore{P}_1)$;
\end{enumerate}
where $\simbolovettore{P}_1 \in \mathscr P_1$ and $\simbolovettore{P}_2 \in \mathscr P_2$ are chosen
in such a way the last coordinate of the two points agrees.
\end{lem}
\proof The proof of this result follows by a straightforward
integration of the equations of motion on the total collision manifold
$r=0$.
$\Box$\\
A direct consequence of the previous result is the following:
\begin{cor}({\bf Existence of heteroclinic
connections\/})\label{thm:collhetoncollmanfold} There exists an
heteroclinic connection between each equilibrium point $\simbolovettore{P}_1 \in
\mathscr P_1$ and the point $\simbolovettore{P}_2 \in \mathscr P_2$ where $\simbolovettore{P}_1,
\simbolovettore{P}_2$ were chosen in such a way that they have the same projection
on the first and third coordinate.
\end{cor}
\proof The proof of this result follows immediately by the previous
result. By the fact that $r=0$ and $\psi$ is constant, it follows that
the non equilibrium solutions are in the $(\alpha, \psi)$-plane lines
parallel to the $\alpha$-axis. Moreover each point of equilibrium on
$\mathscr P_1$ is attracting while each equilibrium point on $\mathscr
P_2$ is repelling.
$\Box$ \\
Moving out of the collision manifold, the two lines $\mathscr P_{1}$
and $\mathscr P_{2}$ exhibit the opposite stability character: indeed
$\dfrac{dr}{d\sigma}>0$ for $\psi=\alpha$ and $\dfrac{dr}{d\sigma}<0$
when $\psi=\alpha+\pi$, meaning that the system goes into the
collision along $\mathscr P_{2}$ and escape from the collision along
$\mathscr P_{1}$.
Next we examine the restpoints and the flow on the zero velocity
manifold. This is more easily accomplished by looking at the system
given in \eqref{eq:mcgehee1}. Restpoints, in fact, are not changed by
a time scaling. Since the zero velocity manifold coincides with the
zero set of the function $\hat E$ and by taking into account
Setting $\hat E=0$ in the energy relation \eqref{eq:energyrel}, which
implies $z_x=z_y=0$, it follows that on the zero velocity manifold the
dynamical system \eqref{eq:mcgehee1} reduces to:
\begin{equation}\label{eq:mcgeheezerovelmnfld}
\left\{ \begin{array}{ll}
\dfrac{dr}{d\tau} = 0\\
\\
\dfrac{d\alpha}{d\tau}= 0\\
\\
\dfrac{dz_x}{d\tau}=- \dfrac{\Gamma}{8\pi}\, r^3\, e^{-1/r^2}
\dfrac{\simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}(r,\alpha)}\\
\\
\dfrac{d z_y}{d\tau}= - \dfrac{\Gamma}{8\pi}\, r^3 e^{-1/r^2}
\dfrac{\simbolovettore{b}_y(r,\alpha)}{\simbolovettore{b}(r,\alpha)}.
\end{array}\right.
\end{equation}
The restpoints on the zero velocity manifold (if any) correspond
to the solutions of the equations:
\[
\dfrac{ \simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}(r,\alpha)}=\dfrac{\simbolovettore{b}_y(r,\alpha)}{\simbolovettore{b}(r,\alpha)}=0.
\]
An elementary calculation shows the following result.
\begin{lem}\label{thm:restvelmnfld}
For $h=h_2$ (defined above as $h_2:= \dfrac{\Gamma}{4\pi} \log
(2R)$) there exists only one restpoint on the zero velocity
manifolds at $P:=(r_*, 3\pi/2)$ for $\simbolovettore{a}rphi_1(r_*)= 4R$. For
$h\neq h_2$ there are no restpoints.
\end{lem}
\section{Global flow and dynamics on the sphere}\label{sec:global}
It is now possible to bring back on the sphere the results found on
the stereographic plane in the previous sections.
In terms of the coordinates $(\phi,\theta)$ on the sphere the Hamiltonian reads as:
\[
H(\theta, \phi, p_\theta, p_\phi)= \dfrac{1}{2 R^2} \left(\dfrac{1}{\sin^2\theta} \, p_\phi^2+ p_\theta^2\right) + \dfrac{\Gamma}{8\pi} \log(2R^2(1-\sin\theta\sin\phi)).
\]
Note that
the vortex is located at $(\phi,\theta)=(\pi/2, \pi/2)$,
therefore for $(\phi,\theta)\to (\pi/2, \pi/2)$ the dynamical behavior
becomes unknown since the vectorfield ceases to exist.
Let us call {\em vortex half-sphere\/} the half-sphere centered around
the vortex point, and {\em antivortex half-sphere\/} the complementary
half-sphere; let us call {\em vortex-parallel\/} any circle on the
sphere equidistant from the vortex and {\em vortex-meridian\/} any
great circle passing through the vortex. Finally let us call {\em
antipodal point} the point on the sphere opposite to the vortex
point. On the sphere the results of Lemma \ref{thm:zonedinamiche} can
be rephrased as follows.
\begin{thm}\label{thm:1}
If $h<h_{2}$ the motion is allowed in the region of the sphere
containing the vortex point and bounded by a vortex-parallel that
lies on the vortex half-sphere for $h<h_{1}$ and in the
antivortex half-sphere otherwise. If $h\geq h_{2}$ then the motion
is allowed everywhere on the
sphere.
\end{thm}
Moreover lemma \ref{thm:restvelmnfld} is rephrased as:
\begin{thm}\label{thm:2}
For $h=h_{2}$ the zero velocity manifold consists only of one
point which is the antipodal point.
\end{thm}
In order to understand the global dynamics it is useful to show the
existence of a second conserved quantity, analogous to the angular
momentum for planar dynamics. To this aim it is convenient to move
the vortex point at the north pole $N=(0,0,2R)$ (or, equivalently, to
redefine the parameterization of the sphere). Obviously this does not
change the dynamics. Note that the curves $\{\theta=const\}$ and
$\{\phi=const\}$ now respectively correspond to the vortex parallels
and to the vortex meridians.
In this setting, the dynamical system reads as
\begin{equation}\label{eq:HJonspherenord}
\left\{
\begin{array}{ll}
\dot \phi = \dfrac{1}{R^2\, \sin^2\theta} \, p_\phi\\
\\
\dot \theta= \dfrac{p_\theta}{R^2}\\
\\
\dot p_\phi=0\\
\\
\dot p_\theta=\dfrac{\cos\theta}{R^2\, \sin^3\theta} \,p_\phi^2+\dfrac{\Gamma}{8\pi}\dfrac{\sin\theta}{2R^{2}(1+\cos\theta)}\\
\\
\end{array}\right.
\end{equation}
corresponding to the Hamiltonian
$$
H(\theta, \phi, p_\theta, p_\phi)= \dfrac{1}{2 R^2} \left(\dfrac{1}{\sin^2\theta} \, p_\phi^2+ p_\theta^2\right) + \dfrac{\Gamma}{8\pi} \log(2R^2(1+\cos\theta)).
$$
Going to the Lagrangian formulation, we can write the
Euler-Lagrange equations
$$
\frac{d}{dt}(R^{2}\dot\phi\sin^{2}\theta)=0
$$
$$
R^{2}\ddot\theta-R^{2}\sin\theta\cos\theta\dot\phi^{2}+\frac{\Gamma}{8\pi}\frac{\sin\theta}{2R^{2}(1+\cos\theta)}=0.
$$
The first shows the existence of a conserved quantity, namely the
spherical angular-momentum $l=R^{2}\sin^{2}\theta\dot\phi$. It follows
that
\begin{lem}
A necessary condition for a solution to either collide with the
vortex or to reach the antipodal point is $l=0$.
\end{lem}
\proof Writing the energy relation in terms of
$(\phi,\theta,\dot\phi,\dot\theta)$ and substituting
$\dot\phi=\frac{l}{R^{2}\sin^{2}\theta}$, it follows that a solution
exists only for those $\theta$ satisfying
$$
2R^{2}h\sin^{2}\theta-\frac{\Gamma}{4\pi}R^{2}\sin^{2}\theta\log(2R^{2}(1+\cos\theta))-l^{2}\geq 0
$$
Recalling that the vortex is placed at $\theta=\pi$, and that the
antipodal point is at $\theta=0$, it follows that if either a
collision occurs, or the point goes to the antipodal point, then
$l^{2}\leq 0$.
$\Box$\\
Looking at the system \eqref{eq:HJonspherenord}, one can easily prove
the existence of particular solutions as depicted in
Fig.\ref{fig:orbitesfera}
\begin{figure}
\caption{Particular orbits lying on the vortex-paralles and vortex-meridians }
\label{fig:orbitesfera}
\end{figure}
\begin{lem}
\begin{itemize}
\item[(i)] Any vortex-parallel on the vortex half-sphere is the
support of a periodic orbit.
\item[(ii)] The vortex-meridians are flow-invariant.
\end{itemize}
\end{lem}
\proof
For any $\simbolovettore\theta\in(\tfrac{\pi}{2},\pi)$ the curve
\[
\gamma_{\simbolovettore{\theta}}(t):=(\phi,\theta,p_{\phi},p_{\theta})(t)=\left(\phi_{0}+t\frac{\simbolovettore{p_{\phi}}}{R^{2}(\sin\simbolovettore\theta)^{2}},
\simbolovettore\theta,\simbolovettore{p_{\phi}},0\right)
\]
with
$\simbolovettore{p_{\phi}}^{2}=-\frac{\Gamma}{8\pi}\frac{(\sin\simbolovettore{\theta})^{4}}
{2\cos\simbolovettore{\theta}(1+\cos\simbolovettore{\theta})}$ is a
solution of the system. Note that the previous relation can not be
satisfied if $\simbolovettore{\theta}\in(0,\pi/2]$, which implies that only
the vortex-parallel placed in the vortex half-sphere are support of
periodic orbits. Moreover the period of
$\gamma_{\simbolovettore{\theta}}(t)$ tends to zero as
$\simbolovettore{\theta}$ goes to $\pi/2$ or $\pi$. This proves statement
$(i)$. Statement $(ii)$ immediately follows by noting that
any initial data
$(\phi,\theta,p_{\phi},p_{\theta})(0)=(\phi^{0},\theta^{0},0,p_{\theta}^{0})$
leads to an orbit traveling on the $\{\phi=\phi_{0}\}$ vortex
meridian.
$\Box$\\
A consequence of the previous lemmas is that any orbit with energy
$h>h_{2}$ that passes through the antipodal point will end into the
collision.
The existence of heteroclinic connection anywhere on the total
collision manifold of the regularized flow (Corollary
\ref{thm:collhetoncollmanfold}) provides the way to extend beyond the
collision the orbits of the singular flow. In fact, if
$\gamma(t)=(\phi,\theta)(t):[0,T_{s})\rightarrow \simbolovettore{S}$ is a collision
solution ending in the singularity at time $T_{s}$, we define the
collision-transmission solution as the path
$\bar\gamma:[0,2T_{s}]\rightarrow \simbolovettore{S}$ as
$$
\bar\gamma(t):=\left\{\begin{array}{ll}
\gamma(t),&t\in[0,T_{s})\\
(\phi_{V},\theta_{V}) &t=T_{s}\\
(2\phi_{V}-\phi(2T_{s}-t),2\theta_{V}-\theta(2T_{s}-t)) &t\in(T_{s},2T_{s}]\\
\end{array}\right.
$$
where $(\phi_{V},\theta_{V})$ are the coordinates of the vortex
point. The extended flow obtained by replacing the singular
trajectories with the collision-transmission solution results to be
continuous with respect to the initial data. The same result for a
single logarithmic center on the plane has already been proved, among
others, in \cite{cate} with a completely different technique.
Finally, from the above discussion it follows that the
collision-transmission solution behaves in three different ways,
depending on the energy level $h$: if $h<h_{2}$, after the ejection
from the singularity, the particle reaches the zero velocity manifold,
then it reverses the motion and falls back into the vortex point; if
$h=h_{2}$ after the ejection the orbit asymptotically reaches the
antipodal restpoint (this is an heteroclinic orbit between a point of
$\mathscr{P}_2$ and the single restpoint on the zero velocity
manifold), and if $h>h_{2}$ after the ejection, the orbits travels
along a vortex meridian, passes through the antipodal restpoint,
continues the motion on the opposite meridian and falls down again
into the singularity.
\appendix
\section{Useful asymptotics } \label{sec:asympt}
In this appendix we list some asymptotic limits of the functions
appearing in the equation of motion in McGeheee coordinates. They are
useful to compute the spectrum of the eigenvalues associated to the
fixed points.
\subsection*{On the total collision manifold $r=0$}\label{subsec:asy}
All the limits below are computed with respect to $r$ and are
uniform with respect to the other variables. For the function $\hat
E$, we have:
\[
\lim_{r \to 0^+} \hat E(h, r, \alpha)= \dfrac{4\Gamma\, R^4}{\pi}\quad
\lim_{r \to 0^+} \hat \partial_r \,E(h, r, \alpha)=0\quad \lim_{r \to 0^+} \hat \partial_\alpha \,E(h, r, \alpha)=0
\]
For the functions $\simbolovettore{a}, \simbolovettore{b}$, we have
\[
\lim_{r \to 0^+} \simbolovettore{a}( r, \alpha)= 1,\quad
\lim_{r \to 0^+} \partial_r\simbolovettore{a}( r, \alpha)=0,\quad
\lim_{r \to 0^+} \partial_\alpha \simbolovettore{a}( r, \alpha)=0
\]
\[
\lim_{r \to 0^+} \simbolovettore{b}(r,\alpha)=0,\quad
\lim_{r \to 0^+}\partial_r \simbolovettore{b}(r,\alpha)=0
\]
For the functions $\simbolovettore{a}_x, \simbolovettore{b}_x, \simbolovettore{a}_y, \simbolovettore{b}_y$, we have
\[
\lim_{r \to 0^+} \simbolovettore{a}_x( r, \alpha)= 0\quad
\lim_{r \to 0^+} \simbolovettore{a}_y( r, \alpha)= 0\quad
\lim_{r \to 0^+} \partial_r\simbolovettore{a}_x( r, \alpha)=0
\]
\[
\lim_{r \to 0^+} \simbolovettore{b}_x(r,\alpha)=0\quad
\lim_{r \to 0^+} \simbolovettore{b}_y( r, \alpha)= 0\quad
\lim_{r \to 0^+}\partial_r \simbolovettore{b}_x(r,\alpha)=0
\]
\[
\lim_{r\to 0^+} \dfrac{r^3\, e^{-1/r^2} \simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}(r,\alpha)}=0
\]
\[
\lim_{r \to 0^+} \partial_\alpha\simbolovettore{a}_x( r, \alpha)=0\\
\lim_{r \to 0^+} \partial_\alpha\simbolovettore{a}_y( r, \alpha)=0
\]
\[
\lim_{r \to 0^+} \partial_\alpha\simbolovettore{b}_x( r, \alpha)=0\\
\lim_{r \to 0^+} \partial_\alpha\simbolovettore{b}_y( r, \alpha)=0
\]
\[
\lim_{r\to 0^+} \dfrac{r^3\, e^{-1/r^2} \partial_\alpha \simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}(r,\alpha)}=0,\qquad
\lim_{r\to 0^+} \dfrac{r^3\, e^{-1/r^2} \partial_\alpha \simbolovettore{b}_y(r,\alpha)}{\simbolovettore{b}(r,\alpha)}=0
\]
\[
\lim_{r\to 0^+} \dfrac{r^3\, e^{-1/r^2} \simbolovettore{b}_x(r,\alpha)\partial_\alpha \simbolovettore{b}_x(r,\alpha)}{\simbolovettore{b}^2(r,\alpha)}=0,\qquad
\lim_{r\to 0^+} \dfrac{r^3\, e^{-1/r^2} \simbolovettore{b}_y(r,\alpha)\partial_\alpha \simbolovettore{b}_y(r,\alpha)}{\simbolovettore{b}^2(r,\alpha)}=0
\]
For the functions $z_x, z_y$, we have:
\[
\lim_{r \to 0^+} z_x(r,\alpha, \psi)=\sqrt{\dfrac{\Gamma}{\pi}}\,2 R^2 \cos\psi,
\quad \lim_{r\to 0^+} z_y(r,\alpha, \psi)= \sqrt{\dfrac{\Gamma}{\pi}}\,2 R^2 \sin\psi,
\]
\[
\lim_{r\to 0^+} \partial_r z_x(r,\alpha, \psi)=0,\quad \lim_{r \to 0^+} \partial_\alpha z_x(r,\alpha, \psi)=0,
\]
\[
\lim_{r\to 0^+} \partial_\psi z_x(r,\alpha, \psi)=-\sqrt{\dfrac{\Gamma}{\pi}}\,2 R^2 \sin\psi,\quad \lim_{r \to 0^+} \partial_\psi z_y(r,\alpha, \psi)=
\sqrt{\dfrac{\Gamma}{\pi}}\, 2R^2 \cos\psi,
\]
\[
\lim_{r\to 0^+} \partial_r z_y(r, \alpha, \psi)=0, \quad \lim_{r \to 0^+} \partial_\alpha z_y(r,\alpha, \psi)=0.
\]
As consequence of the above asymptotic behavior it follows that
\[
\lim_{r \to 0^+} A_1(r, \alpha, \psi)=0, \qquad \lim_{r \to 0^+} A_2(r, \alpha, \psi)=0, \qquad \lim_{r \to 0^+} B(r, \alpha, \psi)=0
\]
\[
\lim_{r \to 0^+} \partial_\alpha A_1(r, \alpha, \psi)=0, \qquad \lim_{r \to 0^+} \partial_\alpha A_2(r, \alpha, \psi)=0.
\]
Denoting by $J:=(J_{ij})_{i,j}$ the variational matrix on the total
collision manifold it follows that
\[
J_{11}=0, \quad J_{12}=0, \qquad J_{13}=0, \qquad J_{32}=0, \qquad J_{33}=0.
\]
\[
J_{21}=0, \qquad J_{22}=-\dfrac{\Gamma}{2\pi}\, \cos(\psi-\alpha),\qquad J_{23}=-J_{22},
\]
\[
\lim_{r\to 0^+} \partial^2_{\alpha \tau} \simbolovettore{a}(r,\alpha)=0, \qquad \lim_{r\to 0^+} \partial^2_{\alpha \tau} \hat E(h,r,\alpha)=0.
\]
The limit involved in the computation of the term $J_{31}$, in general
may not exists. However at the restpoints $\psi=\alpha$ or
$\psi=\alpha+\pi$ this limit actually exists and this implies that
$J_{31}=0$.
\end{document} |
\begin{document}
\title[Pseudofree ${\mathbb Z}/3$-actions on $K3$ surfaces]
{Pseudofree ${\mathbb Z}/3$-actions on $K3$ surfaces}
\author{Ximin Liu}
\address{Department of Applied Mathematics, Dalian University of Technology, Dalian 116024, China (\emailfont{[email protected]})}
\author{Nobuhiro Nakamura}
\address{Research Institute for Mathematical Sciences, Kyoto university, Kyoto, 606-8502, Japan (\emailfont{[email protected]})}
\begin{abstract}
In this paper, we give a weak classification of locally linear pseudofree actions of the cyclic group of order $3$ on a $K3$ surface, and prove the existence of such an action which can not be realized as a smooth action on the standard smooth $K3$ surface.
\end{abstract}
\subjclass[2000]{Primary: 57S17. Secondary: 57S25, 57M60, 57R57}
\keywords{group actions, locally linear, pseudofree, $K3$ surface, Seiberg-Witten invariants.}
\maketitle
\section{Introduction}\label{sec:intro}
Let $G$ be the cyclic group of order $3$ ($G={\mathbb Z}/3$), and
suppose that $G$ acts locally linearly and pseudofreely on a $K3$ surface $X$.
(An action on a space is called {\it pseudofree} if it is free on the compliment of a discrete subset.)
The purpose of this paper is to give a weak classification of such $G$-actions and to prove that there exists such an action on $X$ which can not be realized by a smooth action for the standard smooth structure on $X$.
\begin{Theorem}\label{thm:main0}
There exists a locally linear pseudofree $G$-action on a $K3$ surface $X$ which can not be realized by a smooth action for the standard smooth structure on $X$.
\end{Theorem}
After submitting this paper to the journal, the authors found that the $G$-action in \thmref{thm:main0} is unsmoothable for infinitely many smooth structures on $X$.
This is proved in \remref{rem:inf}.
To state the result more precisely, we prepare notation.
Let $b_i$ be the $i$-th Betti number of $X$, and $b_+$ (resp. $b_-$) be the rank of a maximal positive (resp. negative) definite subspace $H^+(X;{\mathbb R})$ (resp. $H^-(X;{\mathbb R})$) of $H^2(X;{\mathbb R})$.
For any $G$-space $V$, let $V^G$ be the fixed point set of the $G$-action.
Let $b_\bullet^G = \dim H^\bullet (X;{\mathbb R})^G$, where $\bullet = 2, +, -$.
The Euler number of $X$ is denoted by $\chi (X)$ and the signature of $X$ by $\operatorname{Sign}(X)$.
When we fix a generator $g$ of $G$, the representation at a fixed point can be described by a pair of nonzero integers $(a,b)$ modulo $3$ which is well-defined up to order and changing the sign of both together.
Hence, there are two types of fixed points.
\begin{itemize}
\item The type ($+$): $(1,2) = (2,1)$.
\item The type ($-$): $(1,1) = (2,2)$.
\end{itemize}
Let $m_+$ be the number of fixed points of the type ($+$), and $m_-$ be the number of fixed points of the type ($-$).
\thmref{thm:main0} immediately follows from the next theorem.
\begin{Theorem}\label{thm:main}
Let $G$ be the cyclic group of order $3$.
For locally linear pseudofree $G$-actions on a $K3$ surface $X$, we have the following{\textup :}
\begin{enumerate}
\item Every locally linear pseudofree $G$-action on $X$ belongs to one of four types in \tabref{tab:actions}.
Furthermore, each of four types can be actually realized by a locally linear pseudofree $G$-action on $X$.
\begin{table}[h]
\caption{The classification of actions}
\label{tab:actions}
\begin{center}
\begin{tabular}{l|c|c|c|c|c|c|c}
Type & $\#X^G$ & $m_+$ & $m_-$ & $b_2^G$ & $b_+^G$ & $b_-^G$ & $\operatorname{Sign}(X/G)$ \\
\hline
$A_0$ & $6$ & $6$ & $0$ & $10$ & $3$ & $7$ & $-4$ \\
$A_1$ & $9$ & $3$ & $6$ & $12$ & $3$ & $9$ & $-6$ \\
$A_2$ & $12$ & $0$ & $12$ & $14$ & $3$ & $11$ & $-8$ \\
\hline
$B$ & $3$ & $0$ & $3$ & $8$ & $1$ & $7$ & $-6$ \\
\end{tabular}
\end{center}
\end{table}
\item The type $A_1$ can not be realized by a smooth action on the standard smooth $K3$ surface.
\end{enumerate}
\end{Theorem}
\begin{Remark}\label{rem:1}
The assertion (1) in \thmref{thm:main} is an application of the remarkable result by A.~L.~Edmonds and J.~H.~Ewing \cite{EE} with Freedman's classification of simply-connected topological $4$-manifolds \cite{Freedman}.
\end{Remark}
\begin{Remark}
To prove the assertion (2), we use the mod $p$ vanishing theorem of Seiberg-Witten invariants by F.~Fang \cite{Fang}, with the fact that the Seiberg-Witten invariants for the canonical $\operatorname{Spin}^c$-structure of the standard smooth $K3$ surface is $\pm 1$.
\end{Remark}
\begin{Remark}
The type $A_0$, $A_1$ and $A_2$ are actions which act trivially on $H^+(X;{\mathbb R})$.
\end{Remark}
\begin{Remark}\label{rem:std}
The type $A_0$ is realized by a smooth action on the Fermat quartic surface.
(See \propref{prop:Fermat}. )
\end{Remark}
\begin{Remark}
We do not know whether $A_2$ and $B$ can be realized by a smooth action for some smooth structure on a $K3$ surface, or not.
\end{Remark}
\begin{Remark}
K.~Kiyono proved the existence of unsmoothable locally linear pseudofree actions on the connected sums of $S^2\times S^2$ \cite{Kiyono}.
Although he also uses the Seiberg-Witten gauge theory, his method is different from ours.
It is interesting that he invokes the ``$G$-invariant $10/8$-theorem'' instead of Seiberg-Witten invariants.
(A related paper is \cite{KL}.)
\end{Remark}
\section{The proof of the assertion (1)}\label{sec:proof1}
As mentioned in \remref{rem:1}, the proof of the assertion (1) of \thmref{thm:main} will rely on the realization theorem by A.~L.~Edmonds and J.~H.~Ewing \cite{EE}. First, we summarize their result in the very special case when $G={\mathbb Z}/3$.
\begin{Theorem}[\cite{EE}]\label{thm:EE}
Let $G$ be the cyclic group of order $3$.
Suppose that one is given a fixed point data
$$
\mathcal D = \{(a_0,b_0), (a_1,b_1), \ldots, (a_n,b_n), (a_{n+1},b_{n+1})\},
$$
where $a_i, b_i \in {\mathbb Z}/3\setminus\{0\}$, and a $G$-invariant symmetric unimodular form
$$
\Phi\colon V\times V\to {\mathbb Z},
$$
where $V$ be a finitely generated ${\mathbb Z}$-free ${\mathbb Z}[G]$-module.
Then the data $\mathcal D$ and the form $(V,\Phi)$ are realizable by a locally linear, pseudofree, $G$-action on a closed, simply-connected, topological $4$-manifold if and only if they satisfy the following two conditions{\textup :}
\begin{enumerate}
\item The condition REP{\textup :} As a ${\mathbb Z}[G]$-module, $V$ splits into $F\oplus T$, where $F$ is free and $T$ is a trivial ${\mathbb Z}[G]$-module with $\operatorname{rank}_{\mathbb Z} T = n$.
\item The condition GSF{\textup :} The $G$-Signature Formula is satisfied{\textup :}
$$
\operatorname{Sign}(g, (V,\Phi)) = \sum_{i=0}^{n+1}\frac{(\zeta^{a_i} + 1)(\zeta^{b_i} + 1)}{(\zeta^{a_i} - 1)(\zeta^{b_i} - 1)},
$$
where $\zeta = \exp(2\pi\sqrt{-1}/3)$.
\end{enumerate}
\end{Theorem}
\begin{Remark}
In \cite{EE}, A.~L.~Edmonds and J.~H.~Ewing prove the realization theorem for all cyclic groups of prime order $p$, and for general $p$, the third condition {\it TOR} which is related to the Reidemeister torsion should be satisfied.
However, when $p=3$, the condition {\it TOR} is redundant.
This follows from the fact that the class number of ${\mathbb Z}[\zeta]$ is $1$, and Corollary 3.2 of \cite{EE}.
\end{Remark}
Now, let us begin the proof of the assertion (1).
Suppose that a locally linear pseudofree $G$-action on $X$ is given.
First of all, the ordinary Lefschetz formula should hold: $L(g,X) = 2 + \operatorname{tr} ( g|_{H^2(X)}) =\#X^G$.
Noting that $\#X^G = m_+ + m_- $ and $ 2 + \operatorname{tr} ( g|_{H^2(X)}) \leq 24$, we obtain
$$
m_+ + m_- \leq 24.
$$
This is compatible with the condition {\it REP}.
Note that
\begin{equation*}
\chi (X/G) = \frac13 \{ 24 + 2 (m_+ + m_- )\}.
\end{equation*}
By \thmref{thm:EE}, the $G$-Signature Formula should hold:
\begin{align*}
\operatorname{Sign} (g,X) &= \operatorname{Sign} (g^2,X) = \frac13 (m_+ - m_-),\\
\operatorname{Sign} (X/G) &= \frac13 \left\{ -16 + \frac23 (m_+ - m_-)\right\}.\\
\end{align*}
Since $\operatorname{Sign} (X/G)$ is an integer,
$
m_+ - m_- \equiv 6 \mod 9.
$
This with the inequality $-24\leq m_+ - m_-\leq 24$ implies that
\begin{equation}\label{eq:diff}
m_+ - m_- = -21,-12,-3,6,15,24.
\end{equation}
We can calculate $b_+^G$ and $b_-^G$ from $\chi(X/G)$ and $\operatorname{Sign} (X/G)$.
Since $b_+^G$ is $1$ or $3$, we obtain the following:
\begin{itemize}
\item When $b_+^G=1$, $2m_+ + m_-=3$.
\item When $b_+^G=3$, $2m_+ + m_-=12$.
\end{itemize}
By these equations, \eqref{eq:diff} and non-negativity of $m_+$ and $m_-$, we obtain \tabref{tab:actions}.
Next we will prove the existence of actions. First, we construct a smooth $G$-action of type $A_0$ on the Fermat quartic surface.
\begin{Proposition}\label{prop:Fermat}
There exists a smooth $G$-action of the type $A_0$ on the Fermat quartic surface $X$ which is defined by the equation $\sum_{i=0}^3 z_i^4 = 0$ in ${\mathbb C}P^3$.
\end{Proposition}
\proof
By the symmetry of the defining equation, the symmetric group of degree $4$ acts on $X$ as permutations of variables.
Therefore $G$ acts smoothly on $X$ via this action.
We can easily check that the $G$-action is pseudofree, and belongs to the type $A_0$.
\endproof
To prove the existence of actions of other types, we invoke \thmref{thm:EE}.
We need to construct $G$-actions on the intersection form.
Let $(V_{K3}, \Phi_{K3})$ be the intersection form of the $K3$ surface, which is even and indefinite.
Since an even indefinite form is completely characterized by its rank and signature, $(V_{K3}, \Phi_{K3})$ is isomorphic to $3H\oplus \Gamma_{16}$, where $H$ is the hyperbolic form, and $\Gamma_{16}$ is a negative definite even form of rank $16$.
We will construct $G$-actions on $3H$ and $\Gamma_{16}$ separately.
\begin{Lemma}\label{lem:E16}
For each integer $k$ which satisfies $0\leq k\leq5$, there is a $G$-action on $\Gamma_{16}$ such that
$$
\Gamma_{16}\cong (16-3k){\mathbb Z}\oplus k{\mathbb Z}[G]\text{ as a ${\mathbb Z}[G]$-module}.
$$
\end{Lemma}
\proof
When $k=0$, it suffices to take the trivial $G$-action.
Hence we suppose $k\geq 1$.
Recall that the lattice $\Gamma_{16}$ is the set of $(x_1,\ldots,x_{16})\in(\frac12 {\mathbb Z})^{16}$ which satisfy
\begin{enumerate}
\item $x_i\equiv x_j\mod {\mathbb Z}$ for any $i,j$,
\item $\sum_{i=1}^{16}x_i\equiv 0\mod 2{\mathbb Z}$.
\end{enumerate}
The unimodular bilinear form on $\Gamma_{16}$ is defined by $-\sum_{i=1}^{16}x_i^2$.
Note that the symmetric group of degree $16$ acts on $\Gamma_{16}$ as permutations of components.
For a fixed generator $g$ of $G$, define the $G$-action on $\Gamma_{16}$ by
$$g = (1,2,3)(4,5,6)\cdots(3k-2,3k-1,3k),$$
where $(l,m,n)$ is the cyclic permutation of $(x_l, x_m ,x_n)$.
As a basis for $\Gamma_{16}$, we take
\begin{equation*}
f_i = \left\{
\begin{aligned}
e_i+e_{16},& \quad\qquad (i=1,\ldots ,9), \\
e_i-e_{16},& \quad\qquad (i=10,\ldots,15),\\
\frac12 (e_1+e_2&+\cdots+e_{16}),\, (i=16),
\end{aligned}\right.
\end{equation*}
where $e_1,\ldots,e_{16}$ is the usual orthonormal basis for ${\mathbb R}^{16}$.
Then the basis $(f_1,f_2,\ldots,f_{16})$ gives required direct splitting.
\endproof
\begin{Lemma}\label{lem:3H}
There is a $G$-action on $3H$ such that $3H \cong{\mathbb Z}[G]\oplus{\mathbb Z}[G]$ as a ${\mathbb Z} [G]$-module, and $G$-fixed parts of a maximal positive definite subspace and a negative one of $3H\otimes{\mathbb R}$ both have rank $1$.
\end{Lemma}
\proof
Such a $G$-action is given as permutations of three $H$'s.
\endproof
With \lemref{lem:E16} and \lemref{lem:3H} understood, for each of $A_1$, $A_2$ and $B$, the corresponding $G$-action on $(V_{K3}, \Phi_{K3})$ can be constructed. That is,
\begin{itemize}
\item for $A_1$, $3H \cong 6{\mathbb Z}$ and $\Gamma_{16}\cong {\mathbb Z}\oplus 5{\mathbb Z}[G]$,
\item for $A_2$, $3H \cong 6{\mathbb Z}$ and $\Gamma_{16}\cong 4{\mathbb Z}\oplus 4{\mathbb Z}[G]$,
\item for $B$, $3H \cong {\mathbb Z} [G]\oplus{\mathbb Z} [G]$ and $\Gamma_{16}\cong {\mathbb Z}\oplus 5{\mathbb Z}[G]$.
\end{itemize}
Now the conditions {\it REP} and {\it GSF} are satisfied. Therefore we have a locally linear pseudofree $G$-action on a closed simply-connected $4$-manifold $X$ whose intersection form is just $(V_{K3}, \Phi_{K3})$ by \thmref{thm:EE}.
Since $X$ is simply-connected and its intersection form is even, we see that $X$ is homeomorphic to the $K3$ surface by Freedman's theorem \cite{Freedman}.
Thus the assertion (1) is proved.
\begin{Remark}
By using Theorem 1.3 in \cite{BW}, we can prove that the topological conjugacy class of actions of the type $B$ is unique, that is, any action of the type $B$ is conjugate to the action which we have constructed.
\end{Remark}
\begin{Remark}
We can also construct a locally linear pseudofree action of the type $A_0$ by \thmref{thm:EE}.
For this purpose, we need to construct a $G$-action on $3H$ such that $3H \cong 3{\mathbb Z} \oplus{\mathbb Z} [G]$ as a ${\mathbb Z}[G]$-module, and the rank of a $G$-fixed maximal positive definite subspace of $3H\otimes{\mathbb R}$ is $3$ and the rank of a negative one is $1$.
Such a $G$-action on $3H$ is constructed from the cohomology ring of a $4$-torus with a $G$-action as follows:
Let $\zeta =\exp(2\pi\sqrt{-1}/3)$, and consider the lattice ${\mathbb Z}\oplus\zeta{\mathbb Z}\subset{\mathbb C}$.
For each $i = 0,1,2$, let us consider a $2$-torus $T_{\zeta^i} = {\mathbb C}/({\mathbb Z}\oplus\zeta{\mathbb Z})$ with a $G$-action, where the $G$-action is defined by the multiplication by $\zeta^i$.
Next, consider the $4$-torus $T_{12}=T_{\zeta}\times T_{\zeta^2}$ with the diagonal $G$-action.
Then we can prove that the induced $G$-action on $H^2(T_{12};{\mathbb Z})$ has required properties.
Using this with a $G$-action on $\Gamma_{16}$ such that $\Gamma_{16}\cong {\mathbb Z}\oplus 5{\mathbb Z}[G]$, we obtain a $G$-action of the type $A_0$ by \thmref{thm:EE}.
\end{Remark}
\section{The proof of the assertion (2)}\label{sec:proof2}
In this section, we consider $X$ as the smooth $K3$ surface with the standard smooth structure.
Suppose now that a smooth action of the type $A_1$ exists.
To obtain a contradiction, we use a Seiberg-Witten invariant of $X$.
Recall that, for a smooth $4$-manifold with $b_1=0$ and $b_+\geq 2$, Seiberg-Witten invariants constitute a map from the set of equivalence classes of $\operatorname{Spin}^c$-structures on $X$ to ${\mathbb Z}$.
That is, for a $\operatorname{Spin}^c$-structure $c$, the corresponding Seiberg-Witten invariant $\operatorname{SW}_X(c)$ is given as an integer.
We use the canonical $\operatorname{Spin}^c$-structure $c_0$ which is characterized as one whose determinant line bundle $L$ is trivial in the case of $K3$ surface $X$.
Note that $c_0$ is also characterized as the $\operatorname{Spin}^c$-structure which is determined by the $\operatorname{Spin}$-structure.
Since $X$ is simply-connected and $L$ is trivial, we can see that every $G={\mathbb Z}/3$-action on $X$ lifts to a $G$-action on the $\operatorname{Spin}^c$-structure $c_0$.
Then, the $G$-index of the Dirac operator $D_X$ can be written as $\mathop{\text{\rm ind}}\nolimits_G D_X = \sum_{j=0}^2 k_j{\mathbb C}_j \in R(G) \cong {\mathbb Z}[t]/(t^3=1)$, where ${\mathbb C}_j$ is the complex $1$-dimensional weight $j$ representation of $G$ and $R(G)$ is the representation ring of $G$.
F.~Fang \cite{Fang} proves the mod $p$ vanishing theorem under a ${\mathbb Z}/p$-action where $p$ is a prime.
\begin{Theorem}[\cite{Fang}]\label{thm:Fang}
Let $Y$ be a smooth closed oriented $4$-dimensional ${\mathbb Z}/p$-manifold with $b_1=0$ and $b_+\geq 2$, where $p$ is a prime. Suppose that $c$ is a $\operatorname{Spin}^c$-structure on which the ${\mathbb Z}/p$-action lifts, and that ${\mathbb Z}/p$ acts trivially on $H^+(Y;{\mathbb R})$.
If $2k_j \leq b_+ -1$ for $j=0,\ldots,p-1$, then
\begin{equation*}
\operatorname{SW}_Y(c) \equiv 0 \mod p.
\end{equation*}
\end{Theorem}
\begin{Remark}
The second author generalized \thmref{thm:Fang} to the case when $b_1>0$ \cite{Nakamura}.
\end{Remark}
On the other hand, it is well-known that $\operatorname{SW}_X(c_0) = \pm 1$ for the standard $K3$ surface $X$. (See e.g. \cite{FM} or \cite{T}.)
Therefore, in the case when $G$ acts on $(X,c_0)$, we have $k_j >1$ for some $j$ by \thmref{thm:Fang}.
Coefficients $k_j$ are calculated by the $G$-spin theorem.
(For the $G$-spin theorem, we refer \cite{AB,AH,LM,Sh}.)
For the fixed generator $g\in G$, the Lefschetz number $\mathop{\text{\rm ind}}\nolimits_g D_{X}$ is calculated by the formula as
\begin{equation*}
\mathop{\text{\rm ind}}\nolimits_g D_{X} =\sum_{j=0}^{2} \zeta^j k_j = \sum_{P\in X^G} \nu(P),
\end{equation*}
where $\zeta=\exp (2\pi\sqrt{-1}/3)$ and $\nu(P)$ is a complex number associated to each fixed point $P$ given as follows.
Suppose that a fixed point $P$ has the representation type $(a, b)$ with respect to $g$.
Then the number $\nu (P)$ associated to $P$ is given by,
\begin{equation}\label{eq:nup}
\nu (P) = \frac1{{(\zeta^{a})}^{1/2} - {(\zeta^{a})}^{-1/2}}\frac1{{(\zeta^{b})}^{1/2} - {(\zeta^{b})}^{-1/2}}.
\end{equation}
The signs of ${(\zeta^{a})}^{1/2}$ and ${(\zeta^{b})}^{1/2}$ are determined such that
$$
\left\{{(\zeta^{a})}^{1/2}\right\}^3 =\left\{{(\zeta^{b})}^{1/2}\right\}^3 = 1.
$$
(This is because, in our case, the $g$-action on the $\operatorname{Spin}$-structure generates a $G$-action on the $\operatorname{Spin}$-structure. See \cite[p.20]{AH} or \cite[p.175]{Sh}.)
With the above understood, we obtain
\begin{align*}
\mathop{\text{\rm ind}}\nolimits_g D_X & =k_0 + \zeta k_1 + \zeta^2 k_2 = \frac13 (m_+ - m_-),\\
\mathop{\text{\rm ind}}\nolimits_{g^2} D_X & = k_0 + \zeta^2 k_1 + \zeta k_2 = \frac13 (m_+ - m_-),\\
\mathop{\text{\rm ind}}\nolimits_1 D_X & = k_0 + k_1 + k_2 = 2.\\
\end{align*}
Solving these equations, we have
\begin{align*}
k_0 &= \frac19 \left\{ 6 + 2 (m_+ - m_-)\right\},\\
k_1 = k_2 &= \frac19 \left\{ 6 - (m_+ - m_-)\right\}.
\end{align*}
In the case of an action of type $A_1$, $m_+=3$ and $m_- =6$.
Hence, we have $k_0=0$ and $k_1=k_2=1$.
Therefore there is no $j$ so that $k_j >1$. This is a contradiction. Thus the assertion (2) is proved.
\begin{Remark}\label{rem:inf}
It is clear that a proposition similar to (2) of \thmref{thm:main} is true for the smooth structure such that the Seiberg-Witten invariant for the $\operatorname{Spin}^c$-structure with trivial determinant line bundle is not congruent to $0$ modulo $3$.
Let us examine elliptic surfaces which are homeomorphic to $K3$.
Consider relatively minimal regular elliptic surfaces with at most two multiple fibers whose Euler number is $24$.
Let $p$ and $q$ be the multiplicities of multiple fibers, and let us write such elliptic surface as $E(2)_{p,q}$.
(We assume that $p$ and $q$ may be $1$.)
The following are known about $E(2)_{p,q}$.
\begin{enumerate}
\item $E(2)_{1,1}$ (no multiple fiber) is diffeomorphic to the standard $K3$.
\item $E(2)_{p,q}$ is homeomorphic to the $K3$ surface if and only if $\gcd(p,q)=1$. (See e.g.\cite{Ue}.)
\item $E(2)_{p,q}$ is not diffeomorphic to $E(2)_{p^\prime,q^\prime}$ if $pq\neq p^\prime q^\prime$\cite{FM0}.
\item Let $c_0$ be the $\operatorname{Spin}^c$-structure with trivial determinant line bundle.
If $p$ and $q$ are odd, then $\operatorname{SW}_{E(2)_{p,q}}(c_0)=\pm 1$ \cite{FM2,FS}.
\end{enumerate}
Thus we see that the type $A_1$ can not be realized by a smooth action on $E(2)_{p,q}$ such that $\gcd(p,q)=1$ and $p$ and $q$ are odd.
Note that there are infinitely many $(p,q)$ which give different smooth structures.
\end{Remark}
\end{document} |
\begin{document}
\title{Exact dynamics of finite Glauber-Fock photonic lattices}
\author{B. M. Rodr\'{\i}guez-Lara}
\affiliation{Centre for Quantum Technologies, National University of Singapore, 2 Science Drive 3, Singapore 117542. }
\begin{abstract}
The dynamics of Glauber-Fock lattice of size N is given through exact diagonalization of the corresponding Hamiltonian; the spectra $\{ \lambda_{k} \}$ is given as the roots of the $N$-th Hermite polynomial, $H_{N}(\lambda_k/\sqrt{2})=0$, and the eigenstates are given in terms of Hermite polynomials evaluated at these roots.
The exact dynamics is used to study coherent phenomena in discrete lattices.
Due to the symmetry and spacing of the eigenvalues $\{ \lambda_{k} \}$, oscillatory behavior with highly localized spectra, that is, near complete revivals of the photon number and partial recovery of the initial state at given waveguides, is predicted.
\end{abstract}
\pacs{42.50.Dv, 42.50.Ex, 05.60.Gg, 42.82.Et}
\maketitle
\section{Introduction} \label{sec:S1}
Waveguide lattices, that is, arrays of single-mode waveguides coupled by evanescent fields, have been the focus of considerable interest due to their ability to simulate a variety of quantum effects under negligible decoherence.
Examples of such quantum effects are Bloch oscillations in lattices with linearly varying on-site refraction index \cite{Peschel1998p1701,Pertsch1999p4752,Morandotti1999p4756,Rai2009p053849}, Zeno effect due to a defect in the coupling between the first two waveguide in a lattice with otherwise homogeneously coupled components \cite{Longhi2006p110402}, random walks in homogeneous lattices \cite{Rai2008p042304,Perets2008p170506,Bromberg2009p253904}, Anderson localization in homogeneous lattices where controlled disorder has been added \cite{Thompson2010p053805,Martin2011p13636} and the effect of isolated defects on quantum correlations \cite{Longhi2011p033821}.
In particular, lattices where the coupling is homogeneous are well understood and their analytical closed form time evolution is well known \cite{Jones1965p261,Makris2006p036616,Rai2008p042304,SotoEguibar2011p158}.
Recently, a photonic waveguide lattice where the coupling between adjacent waveguides varies as the square root of their position in the lattice has been proposed. The propagation of a classical field in a semi-infinite array of this type, the so-called Glauber-Fock photonic lattice, has been solved in close analytical form by creatively mapping the $j$-th waveguide to the $j$-th Fock state, this has been shown to produce a classical analogue to quantum coherent and displaced Fock states at the lattice output \cite{PerezLeija2010p2010}.
The quantum correlations of non-classical light input have also been analyzed by numerical diagonalization and classical experimental results for single waveguide input have been presented for a lattice composed of sixty waveguides \cite{Keil2011p103601}.
Here, in Section \ref{sec:S2}, it is shown that the finite Glauber-Fock Hamiltonian describing an array of identical waveguides where nearest neighbor couplings varies as the square root of the position of the waveguide is exact diagonalizable.
The exact result is given in terms of Hermite polynomials evaluated at the roots of the $N$-th Hermite polynomial, where $N$ is the number of waveguides in the system.
In Section \ref{sec:S3}, it is shown that, for a Fock state coupled to the zeroth waveguide, there exists an almost complete revival of the probability to find the photons back in the starting waveguide; the opposite occurs when the state couples to the end waveguide, where oscillations are polychromatic and revivals are weak.
Single-waveguide revivals do not occur in semi-infinite Glauber-Fock lattices, nor in uniform lattices unless multi-input phenomena or tunning of the lattice is used; say, Talbot effect \cite{Iwanow2005p053902} or Bloch oscillations \cite{Peschel1998p1701,Pertsch1999p4752}, in that order.
Single input revivals occur in more complex waveguide lattices, for example, harmonic oscillator \cite{Gordon2004p2752}, Jaynes-Cummings \cite{Longhi2011} and Glauber-Fock oscillator \cite{PerezLeija2011p1109871} lattices.
The dynamics of initial states involving multiple waveguides is presented in Section \ref{sec:S4}.
Time evolution of two-waveguide input, in particular product and NOON states, is explicitly discussed and revivals for fidelities, in the single-photon superposition case, and two-photon correlations, for the two-photon case, are shown.
Finally, in Section \ref{sec:S5}, conclusions are presented.
\begin{figure}\label{fig:Fig1}
\end{figure}
\section{Exact dynamics} \label{sec:S2}
The Hamiltonian describing a one-dimensional chain of $N$ cavities where nearest neighbors are coupled according to their position in the chain is given by, in units of $\hbar$,
\begin{eqnarray}
\hat{H} = \omega \sum_{j=0}^{N-1} \hat{a}^{\dagger}_{j} \hat{a}_{j} + g \sum_{j=0}^{N-2} \sqrt{j+1} \left( \hat{a}^{\dagger}_{j} \hat{a}_{j+1} + \hat{a}_{j} \hat{a}^{\dagger}_{j+1}\right). \label{eq:GFH}
\end{eqnarray}
The operator $\hat{a}^{\dagger}_{k}$ ($\hat{a}_{k}$) creates (annihilates) a photon in the $k$-th cavity, the constants $\omega$ and $g$ are the field frequency and the base coupling between cavities; these terms are related to the refraction index of the waveguides and the inter-waveguide distance in the photonic lattice.
Figure \ref{fig:Fig1} shows a sampler of systems modeled by this Hamiltonian.
In the frame defined by the free field, $U_{0}(t) = e^{- \imath \omega t \sum_{j=0}^{N-1} \hat{a}^{\dagger}_{j} \hat{a}_{j} }$, the dynamics is given by the Hamiltonian, in units of $\hbar g$,
\begin{eqnarray} \label{eq:HI}
\hat{H}_{I} = \sum_{j=0}^{N-2} \sqrt{j+1} \left( \hat{a}^{\dagger}_{j} \hat{a}_{j+1} + \hat{a}_{j} \hat{a}^{\dagger}_{j+1}\right)
\end{eqnarray}
which Heisenberg equations of motion, $\dot{a}_{j} = \imath [\hat{H}_{I}, \hat{a}_{j}] $, are those of a Glauber-Fock photonic lattice \cite{PerezLeija2010p2010},
\begin{eqnarray}
\imath \dot{a}_{0} &=& \hat{a}_{1}, \\
\imath \dot{a}_{j} &=& \sqrt{j} \hat{a}_{j+1} + \sqrt{j-1} \hat{a}_{j-1}, \\
\imath \dot{a}_{N-1} &=& \sqrt{N-2} \hat{a}_{N-2}.
\end{eqnarray}
In matrix form, the Heisenberg set is given by the expression $ \partial_{t} \vec{a} = -\imath \hat{M} \vec{a}$ where the matrix elements of $\hat{M}$ are $m_{j,k} = \delta_{j+1,k} \sqrt{k} + \delta_{j,k+1} \sqrt{j}$, with $j,k = 0,1,\ldots,N-1$.
As the matrix $\hat{M}$ is real, symmetric and tridiagonal, there exists a solution for this differential set given by,
\begin{eqnarray}
\vec{a}(t) &=& e^{-\imath \hat{M} t} \vec{a}(0), \\
&=& \hat{V}^{\dagger} e^{-\imath \Hat{\Lambda} t} \hat{V} \vec{a}(0), \label{eq:HEMa}
\end{eqnarray}
where $\hat{\Lambda}$ is a diagonal matrix with elements $\lambda_{j}\equiv\lambda_{j,j}$ being the eigenvalues of $\hat{M}$, and the $j$-th eigenvector with $k$-th element $v_{jk}$ defines the $j$-th row of the matrix $\hat{V}$. As $v^{\ast}_{j,k} = v_{k,j}$, it is possible to write
\begin{eqnarray} \label{eq:afnt}
\hat{a}_{j}(t) &=& \sum_{k=0}^{N-1} U_{j,k}(t) \hat{a}_{k}(0), \\
\quad U_{j,k}(t) &=& \sum_{l=0}^{N-1} e^{- \imath \lambda_{l} t} v_{lj} v_{lk}.
\end{eqnarray}
The collective modes $\vec{b}= V \vec{a}$ allow rewriting the equation of motion for the annihilation operator, Eq.\eqref{eq:HEMa}, as
\begin{eqnarray}
\vec{b}(t) &=& e^{-\imath \Hat{\Lambda} t} \vec{b}(0), \end{eqnarray}
which leads to the diagonal Hamiltonian, in units of $\hbar g$,
\begin{eqnarray}
\hat{H}_{I} = \sum_{j=0}^{N-1} \lambda_{j} \hat{b}^{\dagger}_{j} \hat{b}_{j}.
\end{eqnarray}
Thus, the solution to Heisenberg equations of motion lead to the diagonalization of the Hamiltonian \cite{MarSarao2011p402}.
The method of minors \cite{Horn1990} delivers the eigenvalues $\lambda_{j}$ as the roots of the characteristic polynomial of the matrix $\hat{M}$ given by
\begin{eqnarray}
H_{N}\left( \frac{\lambda_{j}}{\sqrt{2}} \right) = 0, \quad j=0,\ldots,N-1.
\end{eqnarray}
where $H_{n}(x)$ is the $n$-th Hermite polynomial \cite{Lebedev1965} which zeros are well known \cite{Abramowitz1970}.
The eigenvectors are given by solving the eigenvalue equation $(M - I \lambda_{j}) \vec{v}_{j} = 0$ leading to the recurrence relations,
\begin{eqnarray}
-\lambda_{j} v_{j,0} + \sqrt{1} v_{j,1} &=& 0, \\
\sqrt{k} v_{j,k-1} - \lambda_{j} v_{j,k} + \sqrt{k+1} v_{j,k+1} &=& 0,
\end{eqnarray}
for $j = 0, \ldots, N-1$ and $ k=1,2,\ldots,N-2$. These recurrence relations are fulfilled by the matrix elements
\begin{eqnarray}
v_{j,k} &=& \frac{u_{j,k} }{\sqrt{ \sum_{k=0}^{N-1} u_{j,k}^2} }, \label{eq:vjk} \\
u_{j,k} &=& \frac{1}{\sqrt{2^{k} k!}} H_{k}\left( \frac{\lambda_{j}}{\sqrt{2}} \right),
\end{eqnarray}
for $j,k = 0, \ldots, N-1$. The Hamiltonian has been diagonalized by giving an analytical closed form.
\section{Single-waveguide input} \label{sec:S3}
Let us consider as initial state a Fock state with $m\ge1$ photons in the $p$-th waveguide,
\begin{equation}\label{eq:InFockS}
\vert \psi_{p}(0) \rangle = \frac{1}{\sqrt{m!}} \hat{a}^{\dagger m}_{p}(0) \vert 0 \rangle, \quad p=0,\ldots,N-1.
\end{equation}
The time evolution of the photon number at the $q$-th waveguide for such single input initial state is
\begin{equation} \label{eq:nktht}
\langle n_{q} \rangle_{p} \equiv \langle \psi_{p}(0) \vert \hat{a}^{\dagger}_{q}(t) \hat{a}_{q}(t) \vert \psi_{p}(0) \rangle = m \vert U_{p,q} (t) \vert^2.
\end{equation}
For a single-photon coupled to a single-waveguide, it is possible to compare published results for a semi-infinite lattice \cite{PerezLeija2010p2010,Keil2011p103601} with the results presented here for a finite large lattice where propagation time is short enough to guarantee that the probability of finding the photon near the $(N-1)$-th waveguide is negligible. This leads to the following identities,
\begin{eqnarray}
\lim_{N \rightarrow \infty} \vert U_{k-s,n} \vert^2 &=& \vert e^{-t^2/2} (\imath t)^{s} \sqrt{\frac{(k-s)!}{k!}} L^{(s)}_{k-s}(t^2) \vert^2, \label{eq:Conjecture1} \\
\lim_{N \rightarrow \infty} \vert U_{k+s,n} \vert^2 &=& \vert e^{-t^2/2} (\imath t)^{s} \sqrt{\frac{k!}{(k+s)!}} L^{(s)}_{k}(t^2) \vert^2, \label{eq:Conjecture2}
\end{eqnarray}
where the expression $L^{(\alpha)}_{j}(x)$ is a generalized Laguerre polynomial \cite{Lebedev1965}. In the case of the initial photon impinging the zeroth waveguide, the value becomes
\begin{eqnarray}
\lim_{N \rightarrow \infty} \vert U_{k-s,0} \vert^2 = \left\vert e^{-t^2/2} \frac{t^{s}}{\sqrt{k!}} \right\vert^2. \label{eq:Conjecture3}
\end{eqnarray}
Figure \ref{fig:Fig2} shows the time evolution of the mean photon number for a lattice composed of two hundred waveguides.
The cases of a single-photon starting in the zeroth, Fig.\eqref{fig:Fig2}(a-c), and fifth, Fig.\eqref{fig:Fig2}(d-f), waveguide are presented; the figure is equivalent to those found in \cite{PerezLeija2010p2010, Keil2011p103601}.
\begin{figure}
\caption{(Color online) Exact time evolution of the mean photon number at the $j$-th waveguide for an initial state consisting of a single-photon in the (a-c) fifth and (b-f) zeroth waveguide for a lattice size of two hundred waveguides. Snapshots at times (b,e) $t= 3$ and (c,f) $t=6$ comparing the exact finite result (blue dots) with the semi-infinite result (solid red line).
Time is given in units of $g^{-1}
\label{fig:Fig2}
\end{figure}
Here, the focus will be the study of finite size behavior. Equation \eqref{eq:nktht} allows the emergence of well defined oscillations and almost complete revivals at the zeroth waveguide as $\langle n_{0} \rangle_{0} = m \vert U_{0,0} \vert^2$ with
\begin{eqnarray}
U_{0,0} = \sum_{l=0}^{N-1} \mathrm{w}_{0,l} e^{-\imath \lambda_{l} t}, \quad \mathrm{w}_{0,l}= \frac{1}{\sum_{k=0}^{N-1} u_{l,k}^2}.
\end{eqnarray}
The weights $\mathrm{w}_{0,l}$ are highly centralized; that is, revivals at the zeroth waveguide are due to a few-component chromatic oscillation of the mean photon number; see Fig.\eqref{fig:Fig3}(b).
Meanwhile, when the Fock state couples to the $(N-1)$-th waveguide,
\begin{eqnarray}
U_{N-1,N-1} = v_{0,N-1}^2 \sum_{l=0}^{N-1} e^{-\imath \lambda_{l} t}, \label{eq:Polychrome}
\end{eqnarray}
all eigenvalues contribute equally to the time evolution of the mean photon number $ \langle n_{N-1} \rangle_{N-1} = m \vert U_{N-1,N-1} \vert^2$ for any given size of the lattice and major revivals have a lower probability to appear than in the opposite extreme case, $\langle n_{0} \rangle_{0}$; see Fig.\eqref{fig:Fig3}(c).
Figure \ref{fig:Fig3}(a) shows the time evolution of the mean photon number at the $j$-th waveguide for the $N$ possible initial conditions where a single-photon starts at the same waveguide, $\langle n_{j} \rangle_{j} = m \vert U_{j,j} \vert^2$.
Figure \ref{fig:Fig3}(b) and (c) shows the spectral weights corresponding to the first and last three waveguides, in that order.
It is possible to see that major revivals of the mean photon number at the initial waveguide only occur when the input field impinges near the zeroth waveguide.
\begin{figure}
\caption{(Color online) Summary of the $N$ different cases where the initial state is given by a single-photon in the $j$-th waveguide. (a) Only the time evolution of the mean photon number at the $j$-th waveguide, $\langle n_{j}
\label{fig:Fig3}
\end{figure}
Figure \ref{fig:Fig4} shows the time evolution of the mean photon number, Eq.\eqref{eq:nktht}, in a finite Glauber-Fock lattice composed by eleven, Fig\ref{fig:Fig4}(a), and twenty, Fig\ref{fig:Fig4}(b), waveguides for the initial state given in Eq.\eqref{eq:InFockS} at the zeroth waveguide, $p=0$.
In this figure, it is possible to see an oscillator-like behavior for single-waveguide input with almost complete revivals of the mean photon number at the zeroth waveguide; well defined minor revivals occur at the last waveguide of the lattice too.
\begin{figure}
\caption{(Color online) Exact time evolution of the mean photon number at the $j$-th waveguide for an initial state consisting of a $m$-photon Fock state in the zeroth waveguide, $n_{0,j}
\label{fig:Fig4}
\end{figure}
\section{Multi-waveguide input} \label{sec:S4}
The focus of this section will be the study of the oscillator-like behavior, already found for single-waveguide input, in the case of multiple-waveguide input.
The dynamics for single-photon and a pair of relevant multi-photon, that is, product and NOON, states will be discussed.
\subsection{Single-photon superposition states}
The general state describing the superposition of one photon coupled to the lattice is
\begin{eqnarray}
\vert \psi(0)_{s} \rangle = \sum_{j=0}^{N-1} c_{j} \hat{a}_{j}^{\dagger} \vert 0 \rangle, \quad \sum_{j=0}^{N-1} \vert {c}_{j} \vert^{2} = 1.
\end{eqnarray}
Such an initial state covers, for example, Bell states, when a single-photon is evenly coupled to two waveguides only, and W-states, when the photon is evenly coupled to all waveguides.
The mean photon number at the $q$-th waveguide evolution for such class of states is given by the expression,
\begin{eqnarray}
\langle n_{q} \rangle_{ps} = \vert \sum_{j=0}^{N-1} c_{j} U_{j,q}(t) \vert^{2}.
\end{eqnarray}
The simplest of such states is the output of a beam splitter with transmission (reflection) $\alpha$ ($\beta= \sqrt{1 - \alpha^2}$) coupled to two waveguides,
\begin{eqnarray}
\vert \psi(0)_{s} \rangle &=& ( \alpha \hat{a}_{j}^{\dagger} + \beta \hat{a}_{k}^{\dagger} ) \vert 0 \rangle, \label{eq:BellS} \\
\langle n_{q} \rangle_{s} &=& \vert \alpha U_{j,q}(t) + \beta U_{k,q}(t) \vert^{2}. \label{eq:Belln}
\end{eqnarray}
Figure \ref{fig:Fig5}(a) shows the evolution of a 50/50 beams splitter, that is, a Bell, initial state coupled to the $(j,k)=(0,1)$ waveguides and Fig. \ref{fig:Fig5}(b) for $(j,k)=(0,4)$. The oscillator-like behavior of the mean photon number evolution is well defined for the first set of initial conditions and noisy for the second. The revivals of the mean photon number at the starting waveguide are well defined and almost complete for both sets as shown in the following.
\begin{figure}
\caption{(Color online) Time evolution of the mean photon number, Eq.\eqref{eq:Belln}
\label{fig:Fig5}
\end{figure}
In order to quantify the likeliness between the initial state and its time evolution, it is possible to define a fidelity,
\begin{eqnarray}
\mathcal{F}(t) &=& \left\vert \langle \psi(0)_{s} \vert \psi(t)_{s} \rangle \right\vert^2 \nonumber \\
&=& \left\vert \vert \alpha \vert^2 U_{j,j} + \vert \beta \vert^2 U_{k,k} + 2 \mathrm{Re}(\alpha^{\ast} \beta) U_{j,k} \right\vert^2, \label{eq:BSFid}
\end{eqnarray}
that is, the fidelity reaches a value of one if the evolved state and the initial state are identical.
Building upon the spectral decomposition ideas shown in Fig.\eqref{fig:Fig3}, it is possible to realize that, in order to get strong revivals, the initial state should be coupled to the zeroth waveguide and the coupled waveguides should be as close as possible.
A lattice of larger size will present stronger revivals of the fidelity because the dominant eigenvalues of the spectra of $U_{j,j}$ and $U_{k,k}$ will be closer to each other allowing them to interfere constructively; of course, period of the revivals will increase with the size of the lattice and, for real world systems, losses will accumulate.
Figure \ref{fig:Fig6}(a,c) shows the time evolution of the fidelity, for the cases mentioned above, Fig.\ref{fig:Fig5}, and Fig.\eqref{fig:Fig6}(b,d) for identical initial conditions but a lattice of size two hundred waveguides, $N=200$.
Figure \ref{fig:Fig6}(e,f) show the absolute value of the normal mode spectra of the lattice, $\{ \lambda_{j} \}$, ordered such that $\lambda_{0} < \lambda_{1} < \ldots < \lambda_{N-1}$.
\begin{figure}
\caption{(Color online) Time evolution of the fidelity, Eq.\eqref{eq:BSFid}
\label{fig:Fig6}
\end{figure}
\subsection{Product states}
Published results for the semi-infinite Glauber-Fock lattice include the evolution of the two-photon separable state \cite{Keil2011p103601} belonging to the class,
\begin{eqnarray}
\vert \psi_{ps}(0) \rangle = \prod_{j=1}^{k} \hat{a}^{\dagger}_{x_{k}} \vert 0 \rangle,
\end{eqnarray}
where $\mathbf{x} = (x_{1},\ldots,x_{k})$ with $x_{i} \in [0,N-1]$ and $x_{i} \neq x_{j}$ for any $i \neq j$.
It is simple to calculate the evolution of the photon number at the $q$-th waveguide,
\begin{eqnarray}
\langle n_{q} \rangle_{ps} = \sum_{j=1}^{k} \vert U_{x_{j},q} \vert^2.
\end{eqnarray}
That is, the probability of finding almost complete revivals in the oscillation of the mean photon number at the starting waveguides depends on the functions $U_{x_{j},q}$ having almost identical centralized spectral distributions; that is, the criteria commented above regarding distance to the zeroth waveguide, separation between field mode components, and size of the lattice hold.
For two-photon product states, the mean photon number evolution and the two-photon correlation function are well known \cite{Bromberg2009p253904,Keil2011p103601},
\begin{eqnarray}
\vert \psi(0)_{ps} \rangle &=& \hat{a}_{j}^{\dagger} \hat{a}_{k}^{\dagger} \vert 0 \rangle, \label{eq:ProdS} \\
\langle n_{q} \rangle_{ps} &=& \vert U_{j,q} \vert^2 + \vert U_{k,q} \vert^2 , \\
\Gamma_{pq}^{ps} &=& \vert U_{p,j} U_{q,k} + U_{p,k} U_{q,j} \vert^2 \label{eq:2PhCorrProd} .
\end{eqnarray}
The two-photon correlation can be used to verify the partial recovery of the initial state.
Figure \ref{fig:Fig7} shows the time evolution of the two photon correlation for a two-photon product state in a lattice of size twenty waveguides. It is possible to see the partial recovery at time approximately equal to $t = \pi/\lambda_{\textrm{min}}$ as expected from all previous arguments; $\lambda_{\textrm{min}} = \vert \lambda_{N/2} \vert$ for both even and odd (with $N/2$ rounded to the next integer) lattice parameter $N$ and, again, $\lambda_{0} < \lambda_{1} < \ldots < \lambda_{N-1}$.
\begin{figure}
\caption{(Color online) Time evolution of the two photon correlation, Eq.\eqref{eq:2PhCorrProd}
\label{fig:Fig7}
\end{figure}
\subsection{NOON states}
Higher order NOON states are a highly entangled class of states,
\begin{eqnarray}
\vert \psi(0) \rangle &=& \frac{1}{2 \sqrt{m!}} \left( \hat{a}_{j}^{\dagger m} + e^{ i m \phi} \hat{a}_{k}^{\dagger m} \right) \vert 0 \rangle, \quad m=2,3,\ldots \label{eq:NOONS}\\
\langle n_{q} \rangle &=& \frac{m}{2} \left( \vert U_{j,q} \vert^2 + \vert U_{k,q} \vert^2 \right).
\end{eqnarray}
Notice that the restriction $m\ge2$ has been implemented to separate the single-photon superposition treated before which delivers an interferometer-like mean photon number evolution.
It is well known that for the two-photon case, $m=2$, the time evolution of the mean photon number is identical with that of the two-photon separable state considered above but the two-photon correlation is different \cite{Bromberg2009p253904}
\begin{eqnarray}
\Gamma_{pq} = \vert U_{p,j} U_{q,j}\vert^2 + \vert U_{p,k} U_{q,k} \vert^2 + 2 \mathrm{Re} \left( e^{i m \phi} U_{p,j}^{\ast} U_{q,j}^{\ast} U_{p,k} U_{q,k} \right). \label{eq:2PhCorrNOON}
\end{eqnarray}
Figure \ref{fig:Fig8} shows the time evolution of the two photon correlation for a two-photon NOON state in a lattice composed by twenty waveguides. Again, it is possible to see the partial recovery at time approximately equal to $t = \pi/\lambda_{\textrm{min}}$ as defined above.
\begin{figure}
\caption{(Color online) Time evolution of the two photon correlation, Eq.\eqref{eq:2PhCorrNOON}
\label{fig:Fig8}
\end{figure}
\section{Conclusions} \label{sec:S5}
The exact dynamics of a Hamiltonian describing a finite array of coupled identical photonic waveguides where the coupling varies as the square root of the position of the waveguide in the lattice was presented.
It was shown that the closed form analytical time evolution predicts a strong oscillator-like behavior of the lattice for single and double waveguide input.
That is, the initial state is partially reconstructed periodically, leading to revivals of the mean photon number evolution at the waveguides where the photons started.
The strength of the reconstructions, thus of the revivals, is a combination of input distance from the zeroth waveguide, inter-waveguide input distance, and lattice size.
\begin{acknowledgments}
The author is grateful to Changsuk Noh and Rafael Rabelo for helpful discussion and acknowledges constructive criticism by Dimitris G. Angelakis and Amit Rai.
\end{acknowledgments}
\begin{thebibliography}{24}
\expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi
\expandafter\ifx\csname bibnamefont\endcsname\relax
\def\bibnamefont#1{#1}\fi
\expandafter\ifx\csname bibfnamefont\endcsname\relax
\def\bibfnamefont#1{#1}\fi
\expandafter\ifx\csname citenamefont\endcsname\relax
\def\citenamefont#1{#1}\fi
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\eprint}[2][]{\url{#2}}
\bibitem[{\citenamefont{Peschel et~al.}(1998)\citenamefont{Peschel, Pertsch,
and Lederer}}]{Peschel1998p1701}
\bibinfo{author}{\bibfnamefont{U.}~\bibnamefont{Peschel}},
\bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Pertsch}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Lederer}},
\bibinfo{journal}{Opt. Lett.} \textbf{\bibinfo{volume}{23}},
\bibinfo{pages}{1701 } (\bibinfo{year}{1998}).
\bibitem[{\citenamefont{Pertsch et~al.}(1999)\citenamefont{Pertsch, Dannberg,
Elflein, Br\"auer, and Lederer}}]{Pertsch1999p4752}
\bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Pertsch}},
\bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Dannberg}},
\bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Elflein}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Br\"auer}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Lederer}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{4752 } (\bibinfo{year}{1999}).
\bibitem[{\citenamefont{Morandotti et~al.}(1999)\citenamefont{Morandotti,
Peschel, Aitchison, Eisenberg, and Silberberg}}]{Morandotti1999p4756}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Morandotti}},
\bibinfo{author}{\bibfnamefont{U.}~\bibnamefont{Peschel}},
\bibinfo{author}{\bibfnamefont{J.~S.} \bibnamefont{Aitchison}},
\bibinfo{author}{\bibfnamefont{H.~S.} \bibnamefont{Eisenberg}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Silberberg}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{4756 } (\bibinfo{year}{1999}).
\bibitem[{\citenamefont{Rai and Agarwal}(2009)}]{Rai2009p053849}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Rai}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{G.~S.} \bibnamefont{Agarwal}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{79}},
\bibinfo{pages}{053849} (\bibinfo{year}{2009}).
\bibitem[{\citenamefont{Longhi}(2006)}]{Longhi2006p110402}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Longhi}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{97}},
\bibinfo{pages}{110402} (\bibinfo{year}{2006}).
\bibitem[{\citenamefont{Rai et~al.}(2008)\citenamefont{Rai, Agarwal, and
Perk}}]{Rai2008p042304}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Rai}},
\bibinfo{author}{\bibfnamefont{G.~S.} \bibnamefont{Agarwal}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.~H.~H.}
\bibnamefont{Perk}}, \bibinfo{journal}{Phys. Rev. A}
\textbf{\bibinfo{volume}{78}}, \bibinfo{pages}{042304}
(\bibinfo{year}{2008}).
\bibitem[{\citenamefont{Perets et~al.}(2008)\citenamefont{Perets, Lahini,
Pozzi, Sorel, Morandotti, and Silberberg}}]{Perets2008p170506}
\bibinfo{author}{\bibfnamefont{H.~B.} \bibnamefont{Perets}},
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Lahini}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Pozzi}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Sorel}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Morandotti}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Silberberg}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{100}},
\bibinfo{pages}{170506} (\bibinfo{year}{2008}).
\bibitem[{\citenamefont{Bromberg et~al.}(2009)\citenamefont{Bromberg, Lahini,
Morandotti, and Silberberg}}]{Bromberg2009p253904}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Bromberg}},
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Lahini}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Morandotti}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Silberberg}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{102}},
\bibinfo{pages}{253904} (\bibinfo{year}{2009}).
\bibitem[{\citenamefont{Thompson et~al.}(2010)\citenamefont{Thompson, Vemuri,
and Agarwal}}]{Thompson2010p053805}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Thompson}},
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Vemuri}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{G.~S.} \bibnamefont{Agarwal}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{82}},
\bibinfo{pages}{053805} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Martin et~al.}(2011)\citenamefont{Martin, Giuseppe,
Perez-Leija, Keil, Dreisow, Heinrich, Nolte, Szameit, Abouraddy,
Christodoulides et~al.}}]{Martin2011p13636}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Martin}},
\bibinfo{author}{\bibfnamefont{G.~D.} \bibnamefont{Giuseppe}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Perez-Leija}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Keil}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Dreisow}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Heinrich}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Nolte}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Szameit}},
\bibinfo{author}{\bibfnamefont{A.~F.} \bibnamefont{Abouraddy}},
\bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Christodoulides}},
\bibnamefont{et~al.}, \bibinfo{journal}{Opt. Express}
\textbf{\bibinfo{volume}{19}}, \bibinfo{pages}{13636 }
(\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Longhi}(2011{\natexlab{a}})}]{Longhi2011p033821}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Longhi}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{83}},
\bibinfo{pages}{033821} (\bibinfo{year}{2011}{\natexlab{a}}).
\bibitem[{\citenamefont{Jones}(1965)}]{Jones1965p261}
\bibinfo{author}{\bibfnamefont{A.~L.} \bibnamefont{Jones}},
\bibinfo{journal}{J. Opt. Soc. Am.} \textbf{\bibinfo{volume}{55}},
\bibinfo{pages}{261 } (\bibinfo{year}{1965}).
\bibitem[{\citenamefont{Makris and Christodoulides}(2006)}]{Makris2006p036616}
\bibinfo{author}{\bibfnamefont{K.~G.} \bibnamefont{Makris}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Christodoulides}},
\bibinfo{journal}{Phys. Rev. E} \textbf{\bibinfo{volume}{73}},
\bibinfo{pages}{036616} (\bibinfo{year}{2006}).
\bibitem[{\citenamefont{Soto-Eguibar et~al.}(2011)\citenamefont{Soto-Eguibar,
Aguilar-Loreto, Perez-Leija, Moya-Cessa, and
Christodoulidesc}}]{SotoEguibar2011p158}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Soto-Eguibar}},
\bibinfo{author}{\bibfnamefont{O.}~\bibnamefont{Aguilar-Loreto}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Perez-Leija}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Moya-Cessa}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.~N.}
\bibnamefont{Christodoulidesc}}, \bibinfo{journal}{Rev. Mex. Fis.}
\textbf{\bibinfo{volume}{57}}, \bibinfo{pages}{158 } (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Perez-Leija et~al.}(2010)\citenamefont{Perez-Leija,
Moya-Cessa, Szameit, and Christodoulides}}]{PerezLeija2010p2010}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Perez-Leija}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Moya-Cessa}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Szameit}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Christodoulides}},
\bibinfo{journal}{Opt. Lett.} \textbf{\bibinfo{volume}{35}},
\bibinfo{pages}{2010} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Keil et~al.}(2011)\citenamefont{Keil, Perez-Leija,
Dreisow, Heinrich, Moya-Cessa, Nolte, Christodoulides, and
Szameit}}]{Keil2011p103601}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Keil}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Perez-Leija}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Dreisow}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Heinrich}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Moya-Cessa}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Nolte}},
\bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Christodoulides}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Szameit}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{107}},
\bibinfo{pages}{103601} (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Iwanow et~al.}(2005)\citenamefont{Iwanow, May-Arrioja,
Christodoulides, Stegeman, Min, and Sohler}}]{Iwanow2005p053902}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Iwanow}},
\bibinfo{author}{\bibfnamefont{D.~A.} \bibnamefont{May-Arrioja}},
\bibinfo{author}{\bibfnamefont{D.~N.} \bibnamefont{Christodoulides}},
\bibinfo{author}{\bibfnamefont{G.~I.} \bibnamefont{Stegeman}},
\bibinfo{author}{\bibfnamefont{Y.}~\bibnamefont{Min}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Sohler}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{95}},
\bibinfo{pages}{053902} (\bibinfo{year}{2005}).
\bibitem[{\citenamefont{Gordon}(2004)}]{Gordon2004p2752}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Gordon}},
\bibinfo{journal}{Opt. Lett.} \textbf{\bibinfo{volume}{29}},
\bibinfo{pages}{2752 } (\bibinfo{year}{2004}).
\bibitem[{\citenamefont{Longhi}(2011{\natexlab{b}})}]{Longhi2011}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Longhi}},
\bibinfo{journal}{Opt. Lett.} \textbf{\bibinfo{volume}{36}},
\bibinfo{pages}{3407 } (\bibinfo{year}{2011}{\natexlab{b}}).
\bibitem[{\citenamefont{Perez-Leija et~al.}(2011)\citenamefont{Perez-Leija,
Keil, Szameit, Abouraddy, Moya-Cessa, and
Christodoulides}}]{PerezLeija2011p1109871}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Perez-Leija}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Keil}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Szameit}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Abouraddy}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Moya-Cessa}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.~N.}
\bibnamefont{Christodoulides}}, \bibinfo{journal}{arXiv:}
\bibinfo{pages}{1109871 [quant--ph]} (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Mar-Sarao et~al.}(2011)\citenamefont{Mar-Sarao,
Soto-Eguibar, and Moya-Cessa}}]{MarSarao2011p402}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Mar-Sarao}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Soto-Eguibar}},
\bibnamefont{and}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Moya-Cessa}},
\bibinfo{journal}{Ann. Phys. (Berlin)} \textbf{\bibinfo{volume}{523}},
\bibinfo{pages}{402 } (\bibinfo{year}{2011}).
\bibitem[{\citenamefont{Horn and Johnson}(1990)}]{Horn1990}
\bibinfo{author}{\bibfnamefont{R.~A.} \bibnamefont{Horn}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{C.~R.} \bibnamefont{Johnson}},
\emph{\bibinfo{title}{Matrix Analysis}} (\bibinfo{publisher}{Cambridge
University Press}, \bibinfo{year}{1990}).
\bibitem[{\citenamefont{Lebedev}(1965)}]{Lebedev1965}
\bibinfo{author}{\bibfnamefont{N.~N.} \bibnamefont{Lebedev}},
\emph{\bibinfo{title}{Special functions and their applications}}
(\bibinfo{publisher}{Prentice-Hall}, \bibinfo{year}{1965}).
\bibitem[{\citenamefont{Abramowitz and Stegun}(1970)}]{Abramowitz1970}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Abramowitz}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{I.~A.} \bibnamefont{Stegun}},
\emph{\bibinfo{title}{Handbook of Mathematical Functions}}
(\bibinfo{year}{1970}).
\end{thebibliography}
\end{document} |
\begin{document}
\begin{abstract} We study $L^p-L^r$ restriction estimates for algebraic varieties in $d$-dimensional
vector spaces over finite fields. Unlike the Euclidean case, if the dimension $d$ is even, then it is
conjectured that the $L^{(2d+2)/(d+3)}-L^2$ Stein-Tomas restriction result can be improved to the
$L^{(2d+4)/(d+4)}-L^2$ estimate for both spheres and paraboloids in finite fields.
In this paper we show that the conjectured $L^p-L^2$ restriction estimate holds in the specific case when
test functions under consideration are restricted to $d$-coordinate functions or homogeneous functions of
degree zero. To deduce our result, we use the connection between the restriction phenomena for our
varieties in $d$ dimensions and those for homogeneous varieties in $(d+1)$ dimensions.
\end{abstract}
\maketitle
\section{Introduction}
Let $V$ be a subset of ${\mathbb R}^d, d\geq 2,$ and $d\sigma$ a positive measure
supported on $V$. The classical restriction problem asks us to determine $1\leq p,r \leq \infty$ such that
the following restriction estimate holds:
\begin{equation}\label{defr}
\|\widehat{f}\|_{L^r(V, d\sigma)}\le C_{p,r,d} ~\|f\|_{L^p(\mathbb R^d)}
\end{equation}
for every Schwarz function $f:\mathbb R^d \to \mathbb C.$
By duality, the restriction estimate (\ref{defr}) is same as the following extension estimate:
$$ \|(gd\sigma)^\vee\|_{L^{p^\prime}(\mathbb R^d)} \le C_{p,r,d} ~\|g\|_{L^{r^\prime}(V, d\sigma)},$$
where $p^{\prime}=p/(p-1)$ and $r^{\prime}=r/(r-1).$
This problem was addressed and studied by E.M. Stein (\cite{St78}).
Much attention has been given to this problem, in part because it is closely related to other important problems such as the Falconer distance problem, the Kakeya problem, and the Bochner-Riestz problem
(for example, see \cite{Er05, BCT06, Ca92, Ta99}). The complete answer to the restriction problem is known only for certain lower dimensional hypersurfaces.
For instance, Zygmund (\cite{Zy74}) established the restriction conjecture for the circle and the parabola in the plane.
Barcelo (\cite{Ba85}) and Wolff (\cite{Wo01}) also solved it for the cone of $\mathbb R^3$ and $\mathbb R^4$, respectively.
However, the restriction conjecture remains open in other higher dimensions.
The best known result for the cone of $\mathbb R^d, d\geq 5$, is due to Wolff (\cite{Wo01}) who utilized the bilinear restriction method.
Terence Tao (\cite{Ta03}) also used the method to derive the best known restriction results on the sphere and paraboloid of $\mathbb R^d, d\geq 3.$
However, it has been believed that classically used analytical approaches are not enough to settle down the restriction problem.
We refer reader to Tao's survey paper \cite{Ta04} and references therein for currently known skills to deduce restriction results in the Euclidean case. \\
In recent years, problems in the Euclidean space have been studied in the finite field setting.
Motivation on the study of Euclidean problems in finite fields is to understand the original problems in simple finite field structure.
In 1999, Tom Wolf (\cite{Wo99}) formulated the Kakeya problem in finite fields and
new results on the problem were addressed in the subsequent papers (see \cite{Ro01, MT04, Ta05}).
Surprisingly, Dvir (\cite{Dv09}) proved the finite field Kakeya conjecture by beautifully simple, new argument based on the polynomial method.
His work has inspired researchers to further efforts for seeking solutions to other analysis problems in finite fields.
In \cite{MT04}, Mockenhaupt and Tao first investigated the Fourier restriction problem for various algebraic varieties in the finite field setting
and they addressed interesting results on this problem. Further efforts to understand the finite field restriction problem have been made by other researchers (see, for example, \cite{IK09, IK10, Ko13, KS12, KS1, LL13, LL10}). In particular, the finite field restriction problem for cones, paraboloids, and spheres have been mainly studied, but known results are far from the conjectured results in higher dimensions. \\
When we study analogue of Euclidean problems in finite fields, we often find an unprecedented phenomenon
which never occurs in the Euclidean case.
It is well known that if $V\subset \mathbb R^d$ is the sphere or a compact subset of the paraboloid,
then $ p_0=(2d+2)/(d+3)$ gives the sharp $p $ exponent for $L^p-L^2 $ restriction estimates for $V.$
The number $p_0$ is called the Stein-Tomas exponent for the $L^p-L^2$ restriction inequality.
On the contrary to the Euclidean case, it is possible to improve the Stein-Tomas exponent $p_0$ if $V$ is the paraboloid in {\bf even} dimensional vector spaces over finite fields. For example, Mockenhaupt and Tao (\cite{MT04}) proved
the $L^{4/3}-L^2$ restriction estimate for the parabola lying in two dimensional vector spaces over finite fields.
For even dimensions $d\geq 4$, A. Lewko and M. Lewko (\cite{LL10}) obtained the $L^{2d^2/(d^2+2d-2)}-L^2$ restriction result for the paraboloid in the finite field setting. These results are clearly better than the Stein-Tomas inequality. Here, we point out that if the dimension $d\geq 3$ is {\bf odd} and $-1$ is a square number, then it is impossible to improve the Stein-Tomas restriction estimate for spheres or paraboloids in finite field case. For this reason, we shall just focus on studying the $L^p-L^2$ restriction estimates for spheres or paraboloids in {\bf even} dimensions. \\
When $-1$ is a square number in the underlying finite field, it is conjectured that the $L^{(2d+4)/(d+4)}-L^2$ restriction estimate is the best possible result on the $L^p-L^2$ estimate for the sphere or the paraboloid in {\bf even} dimensional vector spaces over finite fields (see Conjecture \ref{conjecture1}).
The conjecture is open except for $d=2,$ and the aforemensioned result due to A. Lewko and M. Lewko is far from the conjectured one. Furthermore, there is no known result for spheres in even dimensions $d\geq 4$ which improves on the Stein-Tomas exponent.
The main purpose of this paper is to find a class of test functions for which the conjectured $L^{(2d+4)/(d+4)}-L^2 $ restriction estimate holds for the sphere or the paraboloid in even dimensional vector spaces over finite fields. The main idea to derive our results is to use a connection between restriction estimates for homogeneous varieties in $(d+1)$ dimensions and those for the sphere or the paraboloid in $d$-dimensional vector spaces over finite fields.
\section{ Weak version of restriction problems}
To precisely state our main results, we shall introduce the weak version of restriction problems in the finite field setting. Roughly speaking, we investigate the $L^p-L^2$ restriction estimates for algebraic varieties in the specific case when the test functions are restricted to specific classes of functions rather than all functions on vector spaces over finite fields. We begin by reviewing the restriction problem for algebraic varieties in finite fields.
\subsection{Review of the restriction problem}
Let $\mathbb F_q^d, d\geq 2,$ be the $d$-dimensional vector spaces over finite fields $\mathbb F_q$ with $q$ elements. We assume that the characteristic of $\mathbb F_q$ is greater than two.
The space $\mathbb F_q^d$ is equipped with a counting measure $dm$, by setting, for any function $g: (\mathbb F_q^d, dm) \to \mathbb C,$
$$\int_{\mathbb F_q^d} g(m)~dm = \sum_{m\in \mathbb F_q^d} g(m).$$
Here and throughout the paper, we write the notation $(\mathbb F_q^d, dm)$ for the space $\mathbb F_q^d$ with the counting measure $dm.$
On the contrary to the space $(\mathbb F_q^d, dm)$, we endow its dual space with a normalized counting measure $dx.$
The dual space of $(\mathbb F_q^d, dm)$ is denoted by the notation $(\mathbb F_q^d, dx).$
Recall that if $g: (\mathbb F_q^d, dm) \to \mathbb C,$ then its Fourier transform $\widehat{g}$ is a function on the dual space $(\mathbb F_q^d, dx).$ Thus, for $x\in (\mathbb F_q^d, dx)$,
$$\widehat{g}(x)=\int_{\mathbb F_q^d} \chi(-m\cdot x) g(m)~ dm=\sum_{m\in \mathbb F_q^d} \chi(-m\cdot x) g(m),$$
where $\chi$ denotes a nontrivial additive character of $\mathbb F_q.$
Also recall that if $f: (\mathbb F_q^d, dx) \to \mathbb C$, then its inverse Fourier transform $f^\vee$ can be defined by
$$ f^\vee(m)=\int_{\mathbb F_q^d} \chi( m\cdot x) f(x)~ dx= \frac{1}{q^d} \sum_{x\in \mathbb F_q^d} \chi(m\cdot x) f(x)$$
where $m\in (\mathbb F_q^d, dm).$ Using the orthogonality relation of $\chi$, one can easily show that $(\widehat{g})^\vee(m)=g(m)$ for $g:(\mathbb F_q^d, dm)\to \mathbb C.$ This provides us of the Fourier inversion theorem:
\begin{equation}\label{FI} g(m)=\int_{\mathbb F_q^d} \chi(m\cdot x) \widehat{g}(x)~dx =\frac{1}{q^d} \sum_{x\in \mathbb F_q^d} \chi(m\cdot x) \widehat{g}(x).\end{equation}
Let $V$ be an algebraic variety in the dual space $(\mathbb F_q^d, dx).$ The variety $V$ is equipped with the normalized surface measure $d\sigma$, which is defined by the relation
$$ \int f(x) ~d\sigma(x) = \frac{1}{|V|}\sum_{x\in V} f(x),$$
where $f:(\mathbb F_q^d, dx) \to \mathbb C.$
Observe that we can write $d\sigma(x)= \frac{q^d}{|V|} V(x)~dx.$ Here, and throughout this paper, we write $A(x)$ for the characteristic function on a set $A\subset \mathbb F_q^d$ and $|A|$ denotes the cardinality of the set $A.$\\
The restriction problem for the variety $V$ is to determine $1\leq p, r\leq \infty$ such that
the following restriction estimate holds:
\begin{equation}\label{restriction} \|\widehat{g}\|_{L^r(V, d\sigma)} \leq C \|g\|_{L^p(\mathbb F_q^d, dm)} \quad\mbox{for all functions}~~g:\mathbb F_q^d \to \mathbb C,\end{equation}
where the constant $C>0$ is independent of functions $g$ and the size of the underlying finite field $\mathbb F_q.$ The notation $R(p\to r)\lesssim 1$ is used to indicate that the restriction inequality (\ref{restriction}) holds. In this case, we say that the $L^p-L^r$ restriction estimate holds.
By duality, inequality (\ref{restriction}) is same as the following extension estimate:
\begin{equation}\label{extension} \|(gd\sigma)^\vee\|_{L^{p^\prime}(\mathbb F_q^d, dm)} \leq C \|g\|_{L^{r^\prime}(V, d\sigma)}.\end{equation}
When this extension inequality holds, we say that the $L^{r^\prime}-L^{p^\prime}$ extension estimate holds and we write $R^*(r^\prime\to p^\prime)\lesssim 1$ for it. Thus, $R(p\to r)\lesssim 1 $ if and only if $R^*(r^\prime \to p^\prime)\lesssim 1.$
\begin{remark} $A\lesssim B$ for $A,B>0$ means that there exists $C>0$ independent of $q=|\mathbb F_q|$ such that $A\le CB.$ We also write $B\gtrsim A$ for $A\lesssim B.$ In addition, $A\sim B$ means that $A\lesssim B$ and $A\gtrsim B.$ We can define $R(p\to r)$ to be the best constant such that the restriction estimate (\ref{restriction}) holds. $R(p\to r)$ may depend on $q$. The restriction problem is to determine $p,r$ such that $R(p\to r)\lesssim 1.$
\end{remark}
When $V\subset \mathbb F_q^d$ is the sphere or the paraboloid, the necessary conditions for $R(p\to r)\lesssim 1$ are well known.
In particular, necessary conditions for $R(p\to 2)\lesssim 1$ mainly depend on the biggest size of the affine subspaces lying in the variety $V.$
For example, if $-1\in \mathbb F_q$ is a square number and $V\subset \mathbb F_q^d$ is the sphere or the paraboloid, then one can construct an affine subspace $H \subset \mathbb F_q^d$ such that
$|H|=q^{(d-1)/2}$ for $d\geq 3$ odd and $|H|=q^{(d-2)/2}$ for $d\geq 2$ even (see \cite{IK09} and \cite{IR07}). Taking $g(x)=H(x)$ in (\ref{extension}), we can directly deduce that the necessary conditions for $R(p\to 2)\lesssim 1$ are given by
\begin{equation}\label{oddN} 1\leq p\leq \frac{2d+2}{d+3}\quad \mbox{for odd}~~ d\ge 3\end{equation}
and
\begin{equation}\label{evenN} 1\leq p\leq \frac{2d+4}{d+4}\quad \mbox{for even}~~ d\ge 2.\end{equation}
It was proved in \cite{MT04} and \cite{IK08} that the Stein-Tomas inequality holds for the sphere and the paraboloid, respectively.
Therefore, if $d\ge 3$ is {\bf odd}, then (\ref{oddN}) is also the sufficient condition for $R(p\to 2)\lesssim 1.$
However, when the dimension $d$ is {\bf even}, it is not known that (\ref{evenN}) is the sufficient condition for $R(p\to 2)\lesssim 1$ except for dimension two.
For this reason, by the nesting property of norms, one may want to establish the following conjecture.
\begin{conjecture}\label{conjecture1} Let $V\subset \mathbb F_q^d$ be the sphere or the paraboloid. Assuming that $-1\in \mathbb F_q$ is a square number and $d\geq 4$ is even, then
$$R\left(\frac{2d+4}{d+4}\to 2\right)\lesssim 1.$$
\end{conjecture}
\subsection{$d$-coordinate lay functions and homogeneous functions of degree zero}
We introduce specific test functions on which the restriction operator for the sphere or the paraboloid acts.
The following two definitions are closely related to a weak version of the restriction problem for the paraboloid.
\begin{definition}
A function $g:(\mathbb F_q^d, dm)\to \mathbb C$ is called a $d$-coordinate lay function if it satisfies that for each $(m^\prime, m_d)\in \mathbb F_q^{d-1}\times \mathbb F_q,$
$$ g(m^\prime, m_d)=g(m^\prime, s m_d) \quad\mbox{for all} ~~s\in \mathbb F_q\setminus\{0\}.$$\end{definition}
\begin{definition} We write $R_{d-lay}(p\to r)\lesssim 1$ if the restriction estimate (\ref{restriction}) holds for all $d$-coordinate lay functions $g:(\mathbb F_q^d, dm) \to \mathbb C.$
\end{definition}
The weak version of the restriction operator for the sphere shall be defined by taking homogeneous functions of degree zero as test functions.
As usual, a function $g:(\mathbb F_q^d, dm)\to \mathbb C$ is named a homogeneous function of degree zero if $ g(s m)=g(m)$ for $m \in \mathbb F_q^d, ~s \in \mathbb F_q\setminus\{0\}.$
\begin{definition} We write $R_{hom}(p\to r)\lesssim 1$ if the restriction estimate (\ref{restriction}) holds for all homogeneous functions of degree zero, $g:(\mathbb F_q^d, dm) \to \mathbb C.$
\end{definition}
\subsection{Statement of main results} Our first result below is related to the parabolical restriction estimate for $d-lay$ test functions.
\begin{theorem}\label{main3} Let $d\sigma$ be the normalized surface measure on the paraboloid
$P:=\{x\in \mathbb F_{q}^d: x_1^2+\cdots+x_{d-1}^2=x_d\}.$ If $d\geq 2$ is even, then we have
$$ R_{d-lay}\left(\frac{2d+4}{d+4}\to 2\right)\lesssim 1.$$
\end{theorem}
When the test functions are homogeneous functions of degree zero, we obtain the strong result on the weak version of spherical restriction problems.
\begin{theorem}\label{main4} Let $d\sigma$ be the normalized surface measure on the sphere with nonzero radius
$S_j:=\{x\in \mathbb F_{q}^d: x_1^2+\cdots+x_{d}^2=j\neq 0\}.$ Then if $d\geq 2$ is even, we have
$$ R_{hom}\left(\frac{2d+4}{d+4}\to 2\right)\lesssim 1.$$
\end{theorem}
Conjecture \ref{conjecture1} claims that if $d\geq 4$ is even, then $(2d+4)/(d+4)$ is the optimal $p$ value for the $L^p-L^2$ restriction estimate for spheres and paraboloids in finite fields.
According to Theorem \ref{main3} and \ref{main4}, it seems that the conjecture is true.
In dimension two, this conjecture was actually proved by Mockenhaupt and Tao (\cite{MT04}) for the parabola and Iosevich and Koh (\cite{IK08}) for the circle. Indeed, they obtained the $L^2-L^4$ extension estimate which exactly implies that $L^{4/3}-L^2$ restriction estimate holds.
However, it is still open in higher even dimensions $d\geq 4$ and the currently best known result for the paraboloid is $R(2d^2/(d^2+2d-2)\to 2)\lesssim 1$ due to A. Lewko and M. Lewko (\cite{LL10}).
In fact, they proved the extension estimate, $R^*(2\to 2d^2/(d^2-2d+2))\lesssim 1$ for even $d\geq 4.$
Notice that this result is much better than the Stein-Tamas inequality, that is $R( (2d+2)/(d+3) \to 2)\lesssim 1.$ For the sphere in even dimensions $d\geq 4,$ the Stein-Tomas inequality was only obtained by Iosevich and Koh (\cite{IK08}) and it has not been improved.
\subsection{Outline of the remain parts of the paper}
The remain parts of this paper are constructed for providing proofs of Theorem \ref{main3} and \ref{main4}. In Section \ref{conerestriction}, we deduce the $L^p-L^2$ restriction estimate for homogeneous varieties in $d+1$ dimensional vector spaces over finite fields $\mathbb F_q$.
Since homogeneous varieties are a collection of lines, it sounds plausible to expect that the Fourier decay of them is not so good. However, it is not always true. Indeed, we observe that if $(d+1)$ is odd, then the Fourier decay of homogeneous varieties in (d+1) dimensions is enough to derive a good $L^p-L^2$ restriction result from the Stein-Tomas argument. In Section \ref{connection}, we complete the proofs of Theorem \ref{main3} and \ref{main4} by deducing the connection between a weak version of restriction estimates for spheres or paraboloids in $d$ dimensions and the restriction estimates for homogeneous varieties in $d+1$ dimensions.
\section{ Restriction phenomenon for homogeneous varieties}\label{conerestriction}
Let $d\geq 2$ be an integer. In this section, we derive the $L^p-L^2$ estimate for homogeneous varieties lying in $(\mathbb F_q^{d+1}, d\overline{x})$ where $d\overline{x}$ denotes the normalized counting measure on $\mathbb F_q^{d+1}.$ Define a variety $C\subset (\mathbb F_q^{d+1}, d\overline{x})$ as
$$ C=\{(x, x_{d+1})\in \mathbb F_q^d\times \mathbb F_q: x_1^2+\cdots+x_{d-1}^2=x_d x_{d+1} \}.$$
For each $j\in \mathbb F_q^*,$ define a variety $H_j\subset (\mathbb F_q^{d+1}, d\overline{x})$ by
$$H_j=\{(x, x_{d+1})\in \mathbb F_q^d\times \mathbb F_q: x_1^2+\cdots+x_d^2=j x_{d+1}^2 \}.$$
Throughout this paper, we denote by $d\sigma_c$ and $d\sigma_j$ the normalized surface measures on $C$ and $H_j$, respectively. In addition, $(\mathbb F_q^d, d\overline{m})$ denotes the dual space of $ (\mathbb F_q^{d+1}, d\overline{x})$ where $d\overline{m}$ is the counting measure on $\mathbb F_q^{d+1}.$ Recall that if $\overline{m} \in (\mathbb F_q^{d+1}, d\overline{m})$, then
$$ (d\sigma_c)^\vee(\overline{m})=\int_C \chi(\overline{m}\cdot \overline{x})~ d\sigma_c(\overline{x})= \frac{1}{|C|} \sum_{\overline{x}\in C} \chi(\overline{m}\cdot \overline{x})$$
and
$$ (d\sigma_j)^\vee(\overline{m})=\int_{H_j} \chi(\overline{m}\cdot \overline{x}) ~d\sigma_c(\overline{x})= \frac{1}{|H_j|} \sum_{\overline{x}\in H_j} \chi(\overline{m}\cdot \overline{x}).$$
With the above notation, we have the following result.
\begin{lemma}\label{keyR} Let $d\geq 2$ be even. Then
$$|C|=|H_j|=q^d \quad \mbox{for}~~j\in \mathbb F_q^*.$$
Moreover, if $\overline{m}\in \mathbb F_q^{d+1}\setminus \{ (0,\cdots,0)\},$ then
$$\left |(d\sigma_c)^\vee(\overline{m})\right| \leq q^{-d/2}$$
and
$$ \left| (d\sigma_j)^\vee(\overline{m})\right|\leq q^{-d/2} \quad \mbox{for all}~~ j\in \mathbb F_q^*.$$
\end{lemma}
\begin{proof}
Before we proceed with the proof, we recall preliminary knowledge for exponential sums.
Let $\eta$ be a quadratic character of $\mathbb F_q.$
For each $a\in \mathbb F_q$, the absolute value of the Gauss sum $G_a$ is given by
\begin{equation}\label{gauss} |G_a| :=\left| \sum_{s\in \mathbb F_q^*} \eta(s) \chi(as)\right|=\left| \sum_{s\in \mathbb F_q^*} \eta(s) \chi({a}/{s}) \right|= \left\{\begin{array}{ll} q^{\frac{1}{2}} \quad &\mbox{if} \quad a\ne 0\\
0 \quad & \mbox{if} \quad a=0. \end{array}\right.\end{equation}
It is not hard to see that
\begin{equation}\label{square} \sum_{s \in {\mathbb F}_q} \chi (a s^2) = G_1 \eta(a) \quad \mbox{for any} \quad a \ne 0.\end{equation}
It follows from the orthogonality relations of $\chi$ and $\eta$ that
\begin{equation}\label{orthogonality} \sum_{s\in \mathbb F_q} \chi(as)=\left\{\begin{array}{ll} 0 &\quad\mbox{if}~~a\in \mathbb F_q^*\\
q &\quad\mbox{if}~~a=0, \end{array} \right.\end{equation}
and
$$\sum_{s\in \mathbb F_q^*} \eta(as)=\left\{\begin{array}{ll} 0 &\quad\mbox{if}~~a\in \mathbb F_q^*\\
q-1 &\quad\mbox{if}~~a=0. \end{array} \right.$$
For (\ref{gauss}), (\ref{square}), and (\ref{orthogonality}), see Chapter 5 in \cite{LN97}.
Completing the square and using a change of variables, (\ref{square}) can be generalized by the formula:
\begin{equation}\label{complete} \sum_{s\in \mathbb F_q} \chi(as^2+bs) = G_1 \eta(a) \chi({b^2}/{(-4a)})
\quad\mbox{for}~~a\in \mathbb F_q^*, b\in \mathbb F_q.\end{equation}
Now we are ready to prove the lemma. First, we estimate $(d\sigma_c)^\vee.$
For $\overline{m}=(m_1,\cdots, m_{d+1})\in \mathbb F_q^{d+1},$ it follows from the orthogonality relation of $\chi$ that
\begin{align*} (d\sigma_c)^\vee(\overline{m})=&\frac{1}{|C|} \sum_{\overline{x}\in C } \chi(\overline{m}\cdot\overline{x})\\
=& \frac{1}{q|C|} \sum_{t\in \mathbb F_q}\sum_{\overline{x}\in \mathbb F_q^{d+1}} \chi(\overline{m}\cdot\overline{x})\chi(t(x_1^2+\cdots+x_{d-1}^2-x_dx_{d+1}))\\
=& \frac{q^d}{|C|} \delta_0(\overline{m}) + \frac{1}{q|C|} \sum_{t\ne 0}\sum_{\overline{x}\in \mathbb F_q^{d+1}} \chi(\overline{m}\cdot\overline{x})\chi(t(x_1^2+\cdots+x_{d-1}^2-x_dx_{d+1}))\\
:=& \mbox{I} +\mbox{II}
\end{align*}
where $\delta_0(\overline{m})=1$ for $\overline{m}=(0,\dots,0)$, and $0$ otherwise.
Applying (\ref{complete}), we see that
$$\mbox{II}=\frac{G_1^{d-1}}{q|C|} \sum_{t\neq 0} \eta(t)^{d-1} \chi(\|m^\prime\|^2/(-4t)) \sum_{x_{d+1}\in \mathbb F_q} \chi(m_{d+1}x_{d+1})
\sum_{x_d\in \mathbb F_q} \chi((m_d-tx_{d+1})x_d),$$
where we define that $\|m^\prime\|^2=m_1^2+\cdots+ x_{d-1}^2.$
Since $d$ is even and $\eta$ is the quadratic character of $\mathbb F_q$, we see $\eta(t)^{d-1} =\eta(t).$ In addition, notice from the orthogonality relation of $\chi$ that
$$\sum_{x_{d+1}\in \mathbb F_q} \chi(m_{d+1}x_{d+1})
\sum_{x_d\in \mathbb F_q} \chi((m_d-tx_{d+1})x_d) =q \chi(m_dm_{d+1}/t).$$
Then we obtain that for $\overline{m}\in \mathbb F_q^{d+1},$
\begin{equation}\label{form1} (d\sigma_c)^\vee(\overline{m})
=\frac{q^d}{|C|} \delta_0(\overline{m}) + \frac{G_1^{d-1}}{|C|} \sum_{t\neq 0} \eta(t) \chi((\|m^\prime\|^2-4m_dm_{d+1})/(-4t)).
\end{equation}
From the definition of $(d\sigma_c)^\vee$ and the orthogonality relation of $\eta$, we see that
$$ 1= (d\sigma_c)^\vee(0,\cdots,0)= \frac{q^d}{|C|}.$$ Thus, we completes the proof of
$|C|=q^d$ and it follows immediately from (\ref{form1}) and (\ref{gauss}) that
$|(d\sigma_c)^\vee(\overline{m})|\leq q^{-d/2}$ for $\overline{m}\ne (0,\dots, 0).$ \\
Next, we can directly deduce by the previous argument that if $j\in \mathbb F_q^*$ and $\overline{m}\in \mathbb F_q^{d+1}$, then
$$ (d\sigma_j)^\vee(\overline{m})
= \frac{q^d}{|H_j|} \delta_0(\overline{m}) + \frac{G_1^{d+1}}{q|H_j|} \eta(-j) \sum_{t\in \mathbb F_q^*} \eta(t) \chi( (m_{d+1}^2-j\|m\|^2)/(4jt)),$$
where $\|m\|^2=m_1^2+\cdots+ m_d^2.$ This implies that $|H_j|=q^d$ for $j\in \mathbb F_q^*$ and
$|(d\sigma_j)^\vee(\overline{m})|\leq q^{-d/2}$ for $\overline{m}\ne (0,\dots, 0).$ We leave the detail to readers.
\end{proof}
\begin{remark} If $d$ is odd, then the Fourier decays become much worse than those in conclusions of Lemma \ref{keyR}. To see this, notice that if $d$ is odd, then $\eta^{d-1}=1.$ Thus, the term $\eta$ disappears in the formula (\ref{form1}). Consequently, if $\overline{m}=(m^\prime, m_d, m_{d+1}) \neq (0,\dots,0)$ and $\|m^\prime\|^2-4m_dm_{d+1}=0,$ then $|(d\sigma_c)^\vee(\overline{m})|\sim q^{(-d+1)/2}.$
\end{remark}
Applying the well known Stein-Tomas argument in finite fields, Lemma \ref{keyR} enables us to deduce the $L^p-L^2$ restriction theorem for the homogeneous varieties $C$ and $H_j$ for $j\in \mathbb F_q^*.$
\begin{lemma}\label{Koh} Let $d\geq 2$ be an even integer. Then we have
\begin{equation}\label{res1} \|\widehat{G}\|_{L^2(C, d\sigma_c)} \lesssim \|G\|_{L^{\frac{2d+4}{d+4}}(\mathbb F_q^{d+1}, d\overline{m})}\quad\mbox{for all functions}\quad G:\mathbb F_q^{d+1} \to \mathbb C.\end{equation}
We also have that if $j\in \mathbb F_q^*$, then
\begin{equation}\label{res2} \|\widehat{G}\|_{L^2(H_j, d\sigma_{j})} \lesssim \|G\|_{L^{\frac{2d+4}{d+4}}(\mathbb F_q^{d+1}, d\overline{m})}\quad\mbox{for all functions}~~ G:\mathbb F_q^{d+1} \to \mathbb C.\end{equation}
\end{lemma}
\begin{proof}
Since the proof of (\ref{res1}) is exactly same as that of (\ref{res2}),
we shall only introduce the proof of (\ref{res1}).
By duality and H\"{o}lder's inequality, we see
\begin{align*} \|\widehat{G}\|^2_{L^2(C,d\sigma_c)} &=\sum_{\overline{m}\in \mathbb F_q^{d+1}} G(\overline{m})~ \overline{ (G\ast (d\sigma_c)^\vee)(\overline{m})}\\
&\leq\|G\|_{L^{\frac{2d+4}{d+4}}(\mathbb F_q^{d+1}, d\overline{m})} \|G\ast (d\sigma_c)^\vee \|_{L^{\frac{2d+4}{d}}(\mathbb F_q^{d+1}, d\overline{m})}.\end{align*}
It is enough to prove that for every function $G$ on $(\mathbb F_q^{d+1},d\overline{m}),$
$$\|G\ast (d\sigma_c)^\vee \|_{L^{\frac{2d+4}{d}}({\mathbb F_q^{d+1}}, d\overline{m})}\lesssim \|G\|_{L^{\frac{2d+4}{d+4}}({\mathbb F_q^{d+1}}, d\overline{m})}.$$
Define $ K(\overline{m})=(d\sigma_c)^\vee(\overline{m}) -\delta_0(\overline{m}).$ Since $(d\sigma)^\vee(0,\dots,0)=1$, we see that $ K(\overline{m})=0$ for $m=(0,\dots,0)$, and $K(\overline{m})=(d\sigma_c)^\vee(\overline{m})$ for $\overline{m}\in {\mathbb F_q^{d+1}}\setminus \{(0,\dots,0)\}.$ Since $G\ast (d\sigma_c)^\vee=G\ast \delta_0 + G\ast K,$ it will be enough to prove the following two inequalities:
\begin{equation}\label{E1} \|G\ast \delta_0 \|_{L^{\frac{2d+4}{d}}({\mathbb F_q^{d+1}}, d\overline{m})}\lesssim \|G\|_{L^{\frac{2d+4}{d+4}}({\mathbb F_q^{d+1}}, d\overline{m})}
\end{equation}
and
\begin{equation}\label{E2}\|G\ast K \|_{L^{\frac{2d+4}{d}}({\mathbb F_q^{d+1}}, d\overline{m})}\lesssim \|G\|_{L^{\frac{2d+4}{d+4}}({\mathbb F_q^{d+1}}, d\overline{m})}.
\end{equation}
Since $G\ast \delta_0(\overline{m})=G(\overline{m})$ for $\overline{m}\in (\mathbb F_q^{d+1}, d\overline{m}),$ (\ref{E1}) follows by observing that
$$\begin{array}{ll} \|G\ast \delta_0 \|_{L^{\frac{2d+4}{d}}({\mathbb F_q^{d+1}}, d\overline{m})}
&=\|G \|_{L^{\frac{2d+4}{d}}({\mathbb F_q^{d+1}}, d\overline{m})}\\
&\leq \|G\|_{L^{\frac{2d+4}{d+4}}({\mathbb F_q^{d+1}}, d\overline{m})},\end{array}$$
where the last line follows from the facts that $d\overline{m}$ is the counting measure and
$(2d+4)/(d+4) <(2d+4)/d.$ In order to prove (\ref{E2}), we assume for a moment that
\begin{equation}\label{two} \|G\ast K\|_{L^2({\mathbb F_q^{d+1}}, d\overline{m})}\lesssim q \|G\|_{L^2(\mathbb F_q^{d+1},d\overline{m})}
\end{equation}
and
\begin{equation}\label{infty}\|G\ast K\|_{L^\infty({\mathbb F_q^{d+1}}, d\overline{m})}\lesssim q^{-\frac{d}{2}} \|G\|_{L^1(\mathbb F_q^{d+1},d\overline{m})}.
\end{equation}
Then (\ref{E2}) follows immediately by interpolating (\ref{two}) and (\ref{infty}). Thus, our final task is to show that both (\ref{two}) and (\ref{infty}) hold. As a direct consequence from the Plancherel theorem, (\ref{two}) can be proved. Indeed, we have
$$ \begin{array}{ll} \|G\ast K\|_{L^2(\mathbb F_q^{d+1}, d\overline{m})}&=\|\widehat{G}\widehat{K}\|_{L^2({\mathbb F_q^{d+1}}, d\overline{x})}\\
&\leq \|\widehat{K}\|_{L^\infty({\mathbb F_q^{d+1}}, d\overline{x})} \|\widehat{G}\|_{L^2({\mathbb F_q^{d+1}}, d\overline{x})}\\
&< q\|G\|_{L^2({\mathbb F_q^{d+1}}, d\overline{m})},\end{array}$$
where the last line is obtained by observing that for each $\overline{x}\in (\mathbb F_q^{d+1},d\overline{x})$\\
$|\widehat{K}(\overline{x})|= |d\sigma_c(\overline{x})-\widehat{\delta_0}(\overline{x})|=|q^{d+1}|C|^{-1} C(\overline{x})-1| < q.$
Now, we prove (\ref{infty}). It follows from Young's inequality that
$$\|G\ast K\|_{L^\infty({\mathbb F_q^{d+1}}, d\overline{m})}\leq \| K\|_{L^\infty({\mathbb F_q^{d+1}}, d\overline{m})} \|G\|_{L^1({\mathbb F_q^{d+1}}, d\overline{m})}.$$
From the definition of $K$ and the Fourier decay estimate in Lemma \ref{keyR}, we conclude that
(\ref{infty}) holds. Thus, the proof is complete.
\end{proof}
\section{Proofs of Theorem \ref{main3} and \ref{main4}} \label{connection}
As a key ingredient of proving our main results, we use the relation between the restriction theorem for $C$ and $H_j$ in $\mathbb F_q^{d+1}$ and the weak restriction theorem for paraboloids and spheres in $\mathbb F_q^d.$ Theorem \ref{main3} shall be deduced from (\ref{res1}) in Lemma \ref{Koh}. Similarly, we shall prove Theorem \ref{main4} by applying (\ref{res2}) in Lemma \ref{Koh}.
\subsection{Proof of Theorem \ref{main3} }
We must prove that if $d\geq 2$ is even, then
$$\|\hat{g}\|_{L^2(P, d\sigma)} \lesssim \|g\|_{L^{(2d+4)/(d+4) }(\mathbb F_q^d, dm)} \quad \mbox{for all $d$-coordinate lay functions}\quad g:\mathbb F_q^{d} \to \mathbb C.$$
Given a $d$-coordinate lay function $g: (\mathbb F_q^d, dm) \to \mathbb C,$ we define
$G_g: (\mathbb F_q^{d+1}, d\overline{m}) \to \mathbb C$ by the relation
\begin{equation}\label{DG} \widehat{G_g}(x^\prime, x_d, s)=\left\{\begin{array}{ll} \widehat{g}(x^\prime, x_d s)&\quad\mbox{if}~~s\ne 0\\
0&\quad\mbox{if}~~s= 0, \end{array} \right.\end{equation}
where $(x^\prime, x_d, s) \in (\mathbb F_q^{d+1}, d\overline{x})$ with $x^\prime\in \mathbb F_q^{d-1}, x_d, s\in \mathbb F_q.$
We need the explicit form of $G_g.$
\begin{proposition}
For $(m, l)\in \mathbb F_q^d \times \mathbb F_q,$
$$ G_g(m, l) =\frac{g(m)}{q} \sum_{s\in \mathbb F_q^*} \chi(ls).$$
\end{proposition}\label{Pro}
\begin{proof} By the Fourier inversion theorem (\ref{FI}) for $d+1$ dimensions, and the definition of $\widehat{G_g}$ in (\ref{DG}), we see that
if $(m^\prime, m_d, l) \in \mathbb F_q^{d-1}\times \mathbb F_q \times \mathbb F_q =\mathbb F_q^{d+1},$ then
\begin{align*} G_g(m^\prime, m_d, l)=&\frac{1}{q^{d+1}} \sum_{x^\prime\in \mathbb F_q^{d-1}, x_d, s\in \mathbb F_q} \chi(m^\prime \cdot x^\prime+ m_dx_d+ls) ~\widehat{G_g}(x^\prime, x_d, s)\\
=& \frac{1}{q^{d+1}} \sum_{s\neq 0} \sum_{(x^\prime, x_d)\in \mathbb F_q^d} \chi(m^\prime \cdot x^\prime+ m_dx_d+ls) ~\widehat{g}(x^\prime, x_d s).
\end{align*}
By a change of variables, $x_d\to x_d/s,$ and the Fourier inversion formula (\ref{FI}),
\begin{align*}G_g(m^\prime, m_d, l)=&\frac{1}{q^{d+1}} \sum_{s\ne 0} \chi(ls) \sum_{x\in \mathbb F_q^d} \chi(x\cdot (m^\prime, m_d/s))~ \widehat{g}(x)\\
=&\frac{1}{q} \sum_{s\ne 0} \chi(ls)~ g(m^\prime, m_d/s).\end{align*}
Since $g$ is a $d$-coordinate lay function, $ g(m^\prime, m_d/s)=g(m)$ for all $s\in \mathbb F_q^*.$
Hence, the proof of Proposition \ref{Pro} is complete.\end{proof}
We continue to prove Theorem \ref{main3}. It is enough to show that
$$ \|\widehat{g}\|^2_{L^2(P, d\sigma)} \lesssim \|g\|^2_{L^{(2d+4)/(d+4)} (\mathbb F_q^d, dm )}.$$
Since $|C|=q^{d}=q|P|,$ it follows that
\begin{align*}\|\widehat{g}\|^2_{L^2(P, d\sigma)} =&\frac{1}{|P|}\sum_{x\in P} |\widehat{g}(x)|^2 \sim \frac{1}{|C|} \sum_{s\in \mathbb F_q^*} \sum_{x\in P} |\widehat{g}(x)|^2\\
=& \frac{1}{|C|} \sum_{s\in\mathbb F_q^*} \sum_{\substack{(x^\prime, x_d)\in \mathbb F_q^d\\
: x_1^2+\cdots+x_{d-1}^2=x_d s}} |\widehat{g}(x^\prime, x_d s)|^2\\
=&\|\widehat{G_g}\|^2_{L^2(C, d\sigma_c)}, \end{align*}
where the last line follows from (\ref{DG}).
By (\ref{res1}) in Lemma \ref{Koh}, to prove Theorem \ref{main3}, it therefore suffices to show that
$$ \|G_g\|^2_{L^{(2d+4)/(d+4)}(\mathbb F_q^{d+1}, d\overline{m})}\lesssim \|g\|^2_{L^{(2d+4)/(d+4)}(\mathbb F_q^{d}, dm)}.$$ Letting $\alpha=(2d+4)/(d+4) >1,$ it will be enough to prove that
$$ \|G_g\|^{\alpha}_{L^{\alpha}(\mathbb F_q^{d+1}, d\overline{m})}\lesssim \|g\|^{\alpha}_{L^{\alpha}(\mathbb F_q^{d}, dm)}.$$
From the explicit form of $G_g$ in Proposition \ref{Pro}, it follows that
\begin{align*} \|G_g\|^{\alpha}_{L^{\alpha}(\mathbb F_q^{d+1}, d\overline{m})} =&\sum_{(m,l)\in \mathbb F_q^d \times \mathbb F_q} |G_g(m,l)|^{\alpha}\\
=& \sum_{l\in \mathbb F_q} \sum_{m\in \mathbb F_q^d} |g(m)|^{\alpha} \left|q^{-1} \sum_{s\in \mathbb F_q^*} \chi(ls)\right|^{\alpha}\\
=& \sum_{m\in \mathbb F_q^d} |g(m)|^{\alpha} (q^{-1} (q-1) )^{\alpha} + \sum_{l\ne 0}
\sum_{m\in \mathbb F_q^d} q^{-\alpha} |g(m)|^{\alpha}\\
\leq& \sum_{m\in \mathbb F_q^d} |g(m)|^{\alpha}+ q^{-\alpha} (q-1)\sum_{m\in \mathbb F_q^d} |g(m)|^{\alpha} \leq 2 \|g\|^{\alpha}_{L^{\alpha}(\mathbb F_q^d, dm)}.
\end{align*}
Thus, the proof of Theorem \ref{main3} is complete.
\subsection{Proof of Theorem \ref{main4}}
We aim to prove that for every $j\in \mathbb F_q^*,$
$$\|\widehat{g}\|_{L^2(S_j, d\sigma)} \lesssim \|g\|_{L^{(2d+4)/(d+4)}(\mathbb F_q^d, dm)}$$
for all homogeneous functions of degree zero $g:\mathbb F_q^{d} \to \mathbb C.$
Let $g:(\mathbb F_q^d, dm) \to \mathbb C$ be a homogeneous function of degree zero. By the definition of the homogeneous function of degree zero, we see that for every $t\in \mathbb F_ q^*$ and $x\in (\mathbb F_q^d, dx),$
$$ \widehat{g}(x)=\sum_{m\in \mathbb F_q^d} \chi(-m\cdot x) g(m/t).$$
From this observation and a change of variables, $m \to tm,$ it follows that
\begin{align*}\|\widehat{g}\|^2_{L^2(S_j, d\sigma)}=&\frac{1}{|S_j|(q-1)} \sum_{t\in \mathbb F_q^*}\sum_{x\in S_j} \left| \sum_{m\in \mathbb F_q^d} \chi(-m\cdot x) g(m/t) \right |^2\\
=&\frac{1}{|S_j|(q-1)} \sum_{t\in \mathbb F_q^*}\sum_{x\in S_j} \left| \sum_{m\in \mathbb F_q^d} \chi(-tm\cdot x) g(m) \right |^2\\
=&\frac{1}{|S_j|(q-1)} \sum_{t\in \mathbb F_q^*}\sum_{\substack{x\in \mathbb F_q^d\\
: x_1^2+\cdots+x_d^2=j}} \left| \sum_{m\in \mathbb F_q^d} \chi(-m\cdot tx) g(m) \right |^2.\end{align*}
Applying a change of variables, $x\to x/t$, we have
\begin{align*}\|\widehat{g}\|^2_{L^2(S_j, d\sigma)}=&\frac{1}{|S_j|(q-1)} \sum_{\substack{t\in \mathbb F_q^*, x\in \mathbb F_q^d\\: x_1^2+\cdots+x_d^2=jt^2}}\left| \sum_{m\in \mathbb F_q^d} \chi(-m\cdot x) g(m) \right |^2\\
\leq& \frac{1}{|S_j|(q-1)} \sum_{(x, t)\in H_j} \left| \sum_{m\in \mathbb F_q^d} \chi(-m\cdot x) g(m) \right |^2.
\end{align*}
Now, consider a function $G_g:(\mathbb F_q^{d+1}, d\overline{m})\to \mathbb C$ defined by
$$ G_g(m, m_{d+1})= \left\{ \begin{array}{ll}0 \quad &\mbox{if}~~m_{d+1}\neq 0,\\
g(m) \quad&\mbox{if}~~m_{d+1} =0.\end{array}\right.$$
Then the last expression above can be written by
$$ \frac{1}{|S_j|(q-1)} \sum_{(x, t)\in H_j} \left| \sum_{(m, m_{d+1})\in \mathbb F_q^d\times \mathbb F_q} \chi(-m\cdot x) \chi(-m_{d+1}\cdot t) G_g(m, m_{d+1})\right|^2.$$
Since $|S_j|(q-1) \sim q^d= |H_j|,$ we see that
$$\|\widehat{g}\|^2_{L^2(S_j, d\sigma_j)} \lesssim \frac{1}{|H_j|} \sum_{(x,t)\in H_j} |\widehat{G_g}(x,t)|^2 =\|\widehat{G_g}\|^2_{L^2(H_j, d\sigma_j )}.$$
Applying (\ref{res2}) in Lemma \ref{Koh}, we conclude from the definition of $G_g$ that
$$\|\widehat{g}\|^2_{L^2(S_j, d\sigma)} \lesssim \|G_g\|^2_{L^{(2d+4)/(d+4)}(\mathbb F_q^{d+1}, d\overline{m})} =\|g\|^2_{L^{(2d+4)/(d+4)}(\mathbb F_q^d, dm)},$$
which completes the proof.
\end{document} |
\begin{document}
\title{A violation of the uncertainty principle implies a violation of the
second law of thermodynamics}
\author{Esther H\"anggi}
\email[]{[email protected]}
\affiliation{Centre for Quantum Technologies, National University of Singapore, 3 Science Drive 2, 117543 Singapore}
\author{Stephanie Wehner}
\email[]{[email protected]}
\affiliation{Centre for Quantum Technologies, National University of Singapore, 3 Science Drive 2, 117543 Singapore}
\date{\today}
\begin{abstract}
Uncertainty relations state that there exist
certain incompatible measurements, to which the outcomes cannot be
simultaneously predicted. While the exact incompatibility of quantum
measurements dictated by such uncertainty relations
can be inferred from the mathematical formalism of quantum
theory, the question remains whether there is any more fundamental
reason for the uncertainty relations to have this exact form. What, if
any, would be the operational consequences if we were able to go beyond
any of these uncertainty relations? We give a strong argument that justifies
uncertainty relations in quantum theory by showing that violating them
implies that it is also possible to violate the second law of thermodynamics.
More precisely, we show that violating the uncertainty relations in quantum mechanics leads to
a thermodynamic cycle with positive net work gain, which is very unlikely to exist in nature.
\end{abstract}
\maketitle
Many features commonly associated with quantum physics, such as the
uncertainty principle~\cite{heisenberg27} or non-locality~\cite{bell} appear
highly counter-intuitive at first sight.
The fact that quantum mechanics is more non-local than any classical
theory~\cite{bell}, but yet more limited~\cite{tsirel:original,tsirel:separated}
than what the no-signalling principle alone demands~\cite{PR,PR1,PR2}
has been the subject of much investigation~\cite{function,glance,s3:nonlocal,causality,OppenheimWehner}.
Several reasons and principles were put forward that explain
the origin of such quantum mechanical limits~\cite{s3:nonlocal,causality,OppenheimWehner}.
In~\cite{OppenheimWehner} it was shown that the amount of non-locality
in quantum mechanics is indeed directly related
to another fundamental quantum mechanical limit, namely the uncertainty
principle~\cite{heisenberg27}.
This forged a relation between two fundamental quantum mechanical concepts.
We may however still
ask why the uncertainty principle itself is not maybe
stronger or weaker than predicted by quantum physics? - and, what would
happen if it was?
Here we relate this question to the second law
of thermodynamics. We show that any violation of uncertainty relations
in quantum mechanics also leads to a violation of the
second law.
\section{Background}
To state our result, we need to explain three different concepts. First,
we need some properties of generalized physical
theories (see e.g.~\cite{barrett,leifer,hardy,dariano,howard:survey}).
Second, we recall the concept of uncertainty relations, and finally the second law of thermodynamics.
{\bf Physical theories} Whereas it is not hard to prove our result for quantum
theory, we extend our result to some more general physical theories.
These are described by a probabilistic framework that makes the minimal
assumptions that there are \emph{states}
and \emph{measurements} which can be made on a physical system (see,
e.g.,~\cite{teleportation,entropy}).
Even for general theories, we denote
a state as $\rho \in \Omega$, where $\Omega$ is a convex state space.
In quantum mechanics, $\rho$ is simply a density matrix.
The assumption that the state space is convex is thereby generally
made~\cite{howard:survey} and says that
if we can prepare states $\rho_1$ and $\rho_2$, then the
probabilistic mixture $\rho = \rho_1/2 + \rho_2/2$ prepared by
by tossing a coin and preparing $\rho_1$ or $\rho_2$ with probability $1/2$ each is also an element of $\Omega$.
A state is called \emph{pure} if it cannot be written as a convex combination of other states.
Measurements consist of linear functionals $e_j: \Omega \rightarrow [0,1]$ called \emph{effects}.
We call an effect $e_j$ \emph{pure} if it cannot be written as a
positive linear combination of any other allowed effects.
Intuitively, each effect corresponds to a possible measurement outcome, where $p(e_j|\rho) = e_j(\rho)$
is the probability of obtaining ''outcome'' $e_j$ given the state $\rho$.
More precisely, a measurement is thus given by $\textbf{e} = \{e_j \mid \sum_j p(e_j|\rho)=1\}$.
For quantum mechanics, we will simply label effects by measurement operators.
For example, a projective measurement in the eigenbasis $\{0_Z,1_Z\}$ of the Pauli $Z$
operator is denoted by $p(0_Z|\rho) = \operatorname{tr}(\proj{0_Z}\rho)$.
The assumption that effects are linear, i.e., $p(e_j|\rho)$ is
linear in $\rho$, is essentially made for all probabilistic
theories~\cite{howard:survey}
and says that when we prepared a probabilistic mixture of states the
distribution of measurement
outcomes scales accordingly.
{\bf Uncertainty relations} A modern way of quantifying
uncertainty~\cite{deutsch83,maassen88} is by means of
\emph{entropic uncertainty relations} (see~\cite{WehnerWinter} for a survey),
or the closely related \emph{fine-grained uncertainty
relations}~\cite{OppenheimWehner}.
Here we will use the latter. As for our cycle we will
only need two measurements with two outcomes,
and each measurement is chosen with probability $1/2$.
We state their definition only for this simple case.
Let $\textbf{f} = \{f_0,f_1\}$ and $\textbf{g} = \{g_0,g_1\}$ denote the
two measurements with effects $f_{y_1}$ and $g_{y_2}$ respectively.
A fine-grained uncertainty relation for these measurements is a set of inequalities
\begin{align}
\label{eq:fineGrained}
\left\lbrace \forall \rho:\ \frac{1}{2}\left(p(f_{y_1}|\rho) + p(g_{y_2}|\rho)\right) \leq \zeta_{\vec{y}} \middle|
\vec{y} \in \{0,1\}^2
\right\rbrace\,.
\end{align}
To see why this quantifies uncertainty, note that if $\zeta_{\vec{y}} < 1$ for
some $\vec{y} = (y_1,y_2)$, then
we have that if the outcome is certain for one of the
measurements (e.g., $p(f_{y_1}|\rho) = 1$) it is uncertain ($p(g_{y_2}|\rho) < 1$) for the other.
As an example from quantum mechanics, consider measurements in the $X = \{0_X,1_X\}$
and $Z = \{0_Z,1_Z\}$ eigenbases.~\footnote{We use the common convention of labelling
the $X$ and $Z$ eigenbases states as $\{\ket{+},\ket{-}\}$ and $\{\ket{0},\ket{1}\}$ respectively.}
We then have for all pure quantum states $\rho$
\begin{align}\label{eq:quantumFG}
\frac{1}{2}\left(p(0_X|\rho) + p(0_Z|\rho)\right) \leq \frac{1}{2}+ \frac{1}{2\sqrt{2}}\,.
\end{align}
The same relation holds for all other pairs of outcomes $(0_X,1_Z)$,$(1_X,0_Z)$ and $(1_X,1_Z)$.
Depending on $\vec{y}$, the eigenstates of either
$(X+Z)/\sqrt{2}$ or $(X-Z)/\sqrt{2}$ saturate these inequalities.
A state that saturates a particular inequality is also called
a \emph{maximally certain state}~\cite{OppenheimWehner}.
For any theory such as quantum mechanics in which there is a direct correspondence between
\emph{states} and \emph{measurements} uncertainty relations can also be stated in terms
of \emph{states} instead of measurements.
More precisely, uncertainty relations
can be written in terms of states if pure effects and pure
states are dual to each other in the sense that
for any pure effect $f$ there exists a corresponding pure
state $\rho_f$, and conversely for every pure state $\sigma$ an effect
$e_\sigma$ such that $p(f|\sigma) = p(e_\sigma|\rho_f)$. Here,
we restrict ourselves to theories that exhibit such a duality.
This is often (but not always) assumed~\cite{howard:survey,entropy}.
As a quantum mechanical example, consider
the effect $f = 0_X$ and the state $\sigma = \proj{0}$. We then
have $p(f|\sigma) = \operatorname{tr}(\proj{+}\sigma) = \operatorname{tr}(\proj{+}\proj{0}) = p(e_\sigma|\rho_f)$
with $\rho_f = \proj{+}$ and $e_{\sigma} = 0_Z$.
For measurements $\textbf{f} = \{f_0,f_1\}$ and $\textbf{g} = \{g_0,g_1\}$ consisting of
pure effects, let $\{\rho_{f_0},\rho_{f_1}\}$ and $\{\rho_{g_0},\rho_{g_1}\}$
denote the corresponding dual states. The equations of~\eqref{eq:fineGrained}
then take the dual form
\begin{align}\label{eq:stateUR}
\forall \mbox{ pure effects } e:\ \frac{1}{2}\left(p(e|\rho_{f_{y_1}}) + p(e|\rho_{g_{y_2}})\right) \leq \zeta_{\vec{y}}\,.
\end{align}
For our quantum example of measuring in the $X$ and $Z$ eigenbasis,
we have $\rho_{0_X} = \proj{+}$, $\rho_{1_X} = \proj{-}$,
$\rho_{0_Z} = \proj{0}$ and $\rho_{1_Z} = \proj{1}$. We then have that for all
pure quantum effects $e$
\begin{align}
\frac{1}{2}\left(p(e|\rho_{0_X}) + p(e|\rho_{1_Z})\right) \leq \frac{1}{2}+ \frac{1}{2\sqrt{2}}\,.
\end{align}
The same relation holds for all other pairs $(0_X,1_Z)$,$(1_X,0_Z)$ and $(1_X,1_Z)$.
Again, measurement effects from the eigenstates of either
$(X+Z)/\sqrt{2}$ or $(X-Z)/\sqrt{2}$ saturate these inequalities.
In analogy, with maximally certain states we refer to effects that saturate
the inequalities~\eqref{eq:stateUR} as
\emph{maximally certain effects}.
From now on, we will always consider uncertainty relations in terms of~\emph{states}.
{\bf 2nd law} The second law of thermodynamics is usually stated in terms of
entropies. One way to state it is to say that the entropy of an
isolated system cannot decrease. These entropies can be defined for general
physical theories even for systems which are not described by the quantum
formalism~\cite{entropy,barnum:entropy,japanese:entropy} (see appendix).
However, for our
case it will be sufficient to consider one operational consequence of the
second law of thermodynamics~\cite{peres,demon}:
there cannot exist a cyclic physical process with a net work gain over
the cycle.
\section{Result}
Our main result is that if it was possible to violate the fine-grained
uncertainty relations as predicted by quantum physics, then we could
create a cycle with net work gain. This holds for \emph{any} two projective measurements with
two outcomes on a qubit.
By the results of~\cite{OppenheimWehner} which showed that the amount of non-locality is
solely determined by the uncertainty relations of quantum mechanics and our ability to
steer, our result extends to a link between the amount of non-locality and the second law of thermodynamics.
In the following we focus on the quantum case, i.e., in
the situation where all the properties except the uncertainty relations
hold as for quantum theory. In the appendix, we extend our result to more
general physical theories that satisfy
certain assumptions. In essence, different forms of entropies coincide in quantum mechanics, but
can differ in more general theories~\cite{entropy}. This has consequences on
whether a net work gain in our cycle is due to a
violation of uncertainty alone, or can also be understood as the closely related question of whether
certain entropies can differ.
Let us now first state our result for quantum mechanics more precisely.
We consider the following process as depicted in Figure~\ref{fig:imp}. We start with a box which contains two
types of particles described by states $\rho_0$ and $\rho_1$ in two separated volumes. The state $\rho_0$
is the equal mixture of the eigenstates $\rho_{f_0}$ and $\rho_{g_0}$ of two measurements (observables)
$\textbf{f} = \{f_0,f_1\}$ and $\textbf{g} = \{g_0,g_1\}$. The state
$\rho_1$ is the equal mixture of $\rho_{f_1}$ and $\rho_{g_1}$.
We choose the measurements such that the equal mixture $\rho = (\rho_0 + \rho_1)/2$
is the completely mixed state in dimension $2$. We
then replace the wall separating $\rho_0$ from $\rho_1$ by two semi-transparent membranes, i.e., membranes which
measure any arriving particle in a certain basis $\textbf{e} = \{e_0,e_1\}$ and only let it pass
for a certain outcome. In the first part of the cycle we separate the two membranes until they are in
equilibrium, which happens when the state everywhere in the box can
be described as $\rho$. Then, in the second part of
the cycle, we separate $\rho$ again into its different
components.
\begin{figure*}
\caption{The impossible process.}
\label{fig:imp}
\end{figure*}
We find that the total work which can be extracted by
performing this cycle is given by
\begin{align}
\nonumber \Delta W &=
NkT \ln2 \left(
\sum_{i=0}^1 p_i S(\rho_i)
\right. \\ \nonumber & \quad \left.
-\frac{1}{2} H\left(\zeta_{(f_0,g_0)}\right)
-\frac{1}{2} H\left(\zeta_{(f_1,g_1)}\right)
\right)\,.
\end{align}
Here, $S(\rho) = - \operatorname{tr}(\rho \log \rho)$ is the \emph{von Neumann entropy} of the state.
The entropy $H$ appearing in the above expression is simply the Shannon
entropy of the distribution over measurement outcomes when measuring in the
basis $\textbf{f}$ and $\textbf{g}$, respectively.~\footnote{The Shannon
entropy of a probability distribution $\{p_1,\ldots,p_d\}$ is given
by $H(\{p_1,\ldots,p_d\}) = - \sum_j p_j \log p_j$. All logarithms in this paper are to base $2$.}
{\bf Example} To illustrate our result, consider the concrete quantum example, where
the states are given by
\begin{align} \label{eq:stateDef}
\rho_0 &=
\frac{1}{2}\left( \rho_{0_X}+ \rho_{0_Z} \right)=\frac{\mathds{1} + \frac{X+Z}{2}}{2}\ \text{and}\\
\nonumber \rho_1&=\frac{1}{2}\left( \rho_{1_{X}}+ \rho_{1_{Z}} \right)=
\frac{\mathds{1} - \frac{X+Z}{2}}{2} \,.
\end{align}
The work which can be extracted from the
cycle then becomes
\begin{align}
\nonumber \Delta W &=
NkT \ln2 \left(
H\left( \frac{1}{2}+\frac{1}{2\sqrt{2}}\right)
\right. \\ \nonumber & \quad \left.
-\frac{1}{2} H\left(\zeta_{(0_X,0_Z)}\right)
-\frac{1}{2} H\left(\zeta_{(1_X,1_Z)}\right)
\right)\,.
\end{align}
The fine-grained uncertainty relations predict in the quantum case that
$\zeta_{(0_X,0_Z)}$ and $\zeta_{(1_X,1_Z)}$ are at most
$\frac{1}{2}+\frac{1}{2\sqrt{2}}$. We see that a theory which can violate
this uncertainty relation, i.e., reach a larger value of $\zeta$, would
lead to $\Delta W>0$ --- a violation of the second law of thermodynamics.
\section{Methods}
We now explain in more detail how we obtain the work which can be
extracted from the cycle in quantum mechanics. In the appendix, we consider
the case of general physical theories.
\subsection*{First part of the cycle}
For the first part of the cycle we start with two separate
parts of the box in each of which there are $N/2$ particles in the
states $\rho_0$ and $\rho_1$ respectively. These states
are described by
\begin{align}
\nonumber \rho_0 &=
\frac{1}{2}\left( \rho_{f_0}+ \rho_{g_0} \right)\ \text{and}\\
\nonumber \rho_1&=\frac{1}{2}\left( \rho_{f_1}+ \rho_{g_1} \right) \,,
\end{align}
where $\textbf{f} = \{f_0,f_1\}$ and
$\textbf{g} = \{g_0,g_1\}$ are chosen such that
the state $\rho=\rho_0/2+\rho_1/2$ corresponds to the completely mixed state in dimension $2$.
We then make a projective measurement $\textbf{e}=\{e_0,e_1\}$ with two possible outcomes denoted by $0,1$.
More precisely, we insert two semi-transparent membranes instead of the wall
separating the two volumes. One of the membranes is transparent to $e_0$ but completely opaque to
$e_1$ while the other lets the particle pass if the outcome is $e_1$, but not if it was
$e_0$. Letting these membranes move apart until they are in equilibrium, we can
extract work from the system. The equilibrium is reached when on both sides of the membranes which is
opaque for $e_1$, there is the same density of particles in this state and similarly for the
membrane which is opaque for $e_0$.
The work which can be extracted from the first part of the
cycle (i.e., by going from {\ding{192}} to \ding{194} in Figure~\ref{fig:imp}) is
given by the following (see appendix).
\begin{align}
\nonumber W &=
NkT \ln 2\left( 1
- \frac{1}{2} H\left(\frac{1}{2}p(e_0|\rho_{f_0})
+ \frac{1}{2}p(e_0|\rho_{g_0})
\right)
\right. \\ \nonumber & \quad \left.
- \frac{1}{2} H\left(\frac{1}{2}p(e_1|\rho_{f_1})+ \frac{1}{2}p(e_1|\rho_{g_1}) \right)
\right)\\
\nonumber & \leq
NkT \ln 2\left( 1
- \frac{1}{2} H\left(\zeta_{(f_0,g_0)}\right)
- \frac{1}{2} H\left(\zeta_{(f_1,g_1)}\right)
\right)\,,
\end{align}
where we denoted by $\zeta$ the fine-grained uncertainty
relations. The inequality can be saturated by choosing
$e_0$ and $e_1$ to be maximally certain effects.~\footnote{It is easy to see
that in quantum mechanics the maximally certain effects $e_0$ and $e_1$ do
indeed form a complete measurement in dimension $2$.}
Note that our argument is not specific to the
outcome combination $(0_f,0_g)$ and $(1_f,1_g)$ used in the the fine-grained uncertainty relation
and choosing the remaining two inequalities corresponding to outcomes $(0_f,1_g)$ and $(1_f,0_g)$
leads to an analogous argument.
\textbf{Example} For our quantum example given by the states~\eqref{eq:stateDef}
we obtain
\begin{align}
\nonumber W & \leq
NkT \ln 2\left( 1
- \frac{1}{2} H\left(\zeta_{(0_X,0_Z)}\right)
- \frac{1}{2} H\left(\zeta_{(1_X,1_Z)}\right)
\right)\,.
\end{align}
Equality is attained by taking $\{e_0,e_1\}$ to be the maximally certain effects
given by the two eigenstates of $(X+Z)/\sqrt{2}$.
\subsection*{Second part of the cycle}
In the second part we form a cycle (i.e., we go from {\ding{194}} to \ding{192} in Figure~\ref{fig:imp})).
We start with the
completely mixed state $\rho$. Denote the different pure components of
$\rho$ by $\{q_j,\sigma_j\}_j$, i.e., $\rho=\sum_j q_j\sigma_j$. We
can now `decompose' $\rho$ into its components
by inserting a semi-transparent membrane which
is opaque for a specific component $\sigma_j$, but completely
transparent for all other components. Effectively, this membrane
measures using the effects $h_{\sigma_j}$ that
are dual to the states $\sigma_j$. This membrane is used
to confine all states $\sigma_j$ in a volume $q_jV$. This is done for
all components and we end up with a box where each component of $\rho$ is sorted
in a volume proportional to its weight in the convex combination.
This process needs work proportional to $S(\rho)$.
In a second step, we create the (pure) components $\tau$ of
$\rho_0=\sum_j r_j^0 \tau_j^0$ and $\rho_1=\sum_j r_j^1 \tau_j^1$
from the pure components of $\rho$ and then `reassamble' the states
$\rho_0$ and $\rho_1$.
In order to do so, we subdivide the volumes containing $\sigma_j$
into smaller volumes,
such that the number of particles contained in these smaller
volumes are proportional to $p_0r_j^0$ and $p_1r_j^1$. The pure
state contained in each small volume is then transformed into the
pure state $\tau_j^0$ or $\tau_j^1$. Since these last states are
also pure, no work is needed for this transformation. Finally,
we `mix' the different components of $\rho_0$ together, which
allows us to extract work $p_0S(\rho_0)$. Similarly we obtain
work $p_1S(\rho_1)$ from $\rho_1$.
In total, the transformation
$\rho \rightarrow \{p_i,\rho_i\}$, needs work
\begin{align}
\nonumber W&=NkT \ln 2 (S(\rho)-\sum_i p_i S(\rho_i))\,.
\end{align}
{\bf Example} Returning to
the example above and using that the two eigenvalues of $\rho$
are $1/2$, we obtain
\begin{align}
\nonumber S(\rho)&= -2\cdot \frac{1}{2} \log_2 \frac{1}{2}=1\,.
\end{align}
Both $\rho_0$ and $\rho_1$ have the two eigenvalues
$\lbrace \frac{1}{2}+\frac{1}{2\sqrt{2}}, \frac{1}{2}-\frac{1}{2\sqrt{2}}\rbrace$.
Therefore,
\begin{align}
\nonumber S(\rho_i)&= H\left( \frac{1}{2}+\frac{1}{2\sqrt{2}} \right)\approx H(0.85)\,.
\end{align}
The total work which has to be invested for this process is therefore given by
\begin{align}
\nonumber W&=NkT \ln2\left( 1- H\left( \frac{1}{2}+\frac{1}{2\sqrt{2}} \right)\right) \,.
\end{align}
\subsection*{Closing the cycle}
If we now perform the first and second process described above
one after another (i.e., we perform a cycle, as
depicted in Figure~\ref{fig:imp}), the total work which can be
extracted is given by
\begin{align}
\nonumber \Delta W &= NkT \ln2 \left(
- \left( S(\rho)- \sum_i p_i S(\rho_i) \right)
\right. \\ \nonumber & \quad \left.
+
\left( 1
- \frac{1}{2} H\left(\zeta_{(f_0,g_0)}\right)
- \frac{1}{2} H\left(\zeta_{(f_1,g_1)}\right)\right)
\right)\,.
\end{align}
In general, we can see that when the uncertainty relation is
violated, this quantity can become positive and a positive
$\Delta W$ corresponds to a violation of the second law of thermodynamics.
{\bf Example} In our example, the above quantity
corresponds to
\begin{align}
\nonumber \Delta W &=
NkT \ln2 \left(
H\left( \frac{1}{2}+\frac{1}{2\sqrt{2}}\right)
\right. \\ \nonumber & \quad \left.
-\frac{1}{2} H\left(\zeta_{(0_X,0_Z)}\right)
-\frac{1}{2} H\left(\zeta_{(1_X,1_Z)}\right)
\right)\,.
\end{align}
The fine-grained uncertainty relations for quantum mechanics state that
$\zeta_{(0_X,0_Z)},\zeta_{(1_X,1_Z)}\leq \frac{1}{2}+\frac{1}{2\sqrt{2}}$. When
this value is reached with equality, then $\Delta W=0$ in the above calculation.
On the other hand if these values were larger, i.e., the uncertainty relation could be
violated, then the binary entropy of them would be smaller and $\Delta W$ becomes
positive.
\section{Discussion}
We give a strong argument why quantum mechanical uncertainty relations
should not be violated. Indeed, as we show,
a violation of the uncertainty relations would lead to an `impossible
machine' which could extract net work from a cycle.
Our result extends to more general theories than quantum theory - however,
raises the question of which general form of entropy~\cite{entropy}
is most significant. In quantum mechanics, the different entropies of~\cite{entropy}
coincide, meaning that if a physical theory is just like quantum mechanics,
but with a different amount of uncertainty, net work can be extracted.
Our cycle is similar to the ones given in~\cite{peres,cost,demon}, which study related questions:
We can understand uncertainty relations as given in~\eqref{eq:fineGrained} as imposing a limit
on how well one of several bits of information can be extracted
from a qubit using the given measurements~\cite{OppenheimWehner}. This means
that the amount of uncertainty for all pairs of measurements that we could
make directly imposes a limit on how much classical information we can
store in each qubit. Indeed, in any system that is finite dimensional
(possibly due to an energetic constraint), it is thus clear that the
mere fact that we can only store a finite
amount of information in a finite dimensional system (Holevo's
bound~\cite{holevo}) demands that non-commuting measurements obey
uncertainty relations. This shows that our example is closely related to
the ones given in~\cite{plenio,pleniovitelli,cost,demon} where it
has been shown that if it was possible to encode
more than one bit of information in a qubit and therefore to violate the
Holevo bound~\cite{holevo}, then it was also possible to violate the second law of thermodynamics.
In~\cite{peres} similar consequences had been shown if one was able to perfectly
distinguish non-orthogonal quantum states. The possibility of distinguishing non-orthogonal states
is again directly related to the question of how much information we can store in a quantum state.
In future work, it might be interesting to investigate whether an
implication also holds in the other direction. Does any violation of
the second law lead to a violation of the uncertainty relations?
We have investigated the relation between uncertainty and the
second law of thermodynamics. A concept related to uncertainty is the
one of complementarity. It is an open question, whether a
violation of complementarity could also be used to build such an
impossible machine.
\textbf{Acknowledgments:} We thank Christian Gogolin,
Markus M\"uller, and Jonathan Oppenheim for helpful
discussions and comments on an earlier draft.
EH and SW acknowledge support from the National Research
Foundation (Singapore), and the
Ministry of Education (Singapore).
\begin{thebibliography}{37}
\expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi
\expandafter\ifx\csname bibnamefont\endcsname\relax
\def\bibnamefont#1{#1}\fi
\expandafter\ifx\csname bibfnamefont\endcsname\relax
\def\bibfnamefont#1{#1}\fi
\expandafter\ifx\csname citenamefont\endcsname\relax
\def\citenamefont#1{#1}\fi
\expandafter\ifx\csname url\endcsname\relax
\def\url#1{\texttt{#1}}\fi
\expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi
\providecommand{\bibinfo}[2]{#2}
\providecommand{\varepsilonrint}[2][]{\url{#2}}
\bibitem[{\citenamefont{Heisenberg}(1927)}]{heisenberg27}
\bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Heisenberg}},
\bibinfo{journal}{Zeitschrift f{\"u}r Physik} \textbf{\bibinfo{volume}{43}},
\bibinfo{pages}{172} (\bibinfo{year}{1927}).
\bibitem[{\citenamefont{Bell}(1964)}]{bell}
\bibinfo{author}{\bibfnamefont{J.~S.} \bibnamefont{Bell}},
\bibinfo{journal}{Physics} \textbf{\bibinfo{volume}{1}}, \bibinfo{pages}{195}
(\bibinfo{year}{1964}).
\bibitem[{\citenamefont{Tsirelson}(1980)}]{tsirel:original}
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Tsirelson}},
\bibinfo{journal}{Letters in Mathematical Physics}
\textbf{\bibinfo{volume}{4}}, \bibinfo{pages}{93} (\bibinfo{year}{1980}).
\bibitem[{\citenamefont{Tsirelson}(1987)}]{tsirel:separated}
\bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Tsirelson}},
\bibinfo{journal}{Journal of Soviet Mathematics}
\textbf{\bibinfo{volume}{36}}, \bibinfo{pages}{557} (\bibinfo{year}{1987}).
\bibitem[{\citenamefont{Popescu and Rohrlich}(1994)}]{PR}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}},
\bibinfo{journal}{Foundations of Physics} \textbf{\bibinfo{volume}{24}},
\bibinfo{pages}{379} (\bibinfo{year}{1994}).
\bibitem[{\citenamefont{Popescu and Rohrlich}(1996)}]{PR1}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}}, in
\emph{\bibinfo{booktitle}{The dilemma of Einstein, Podolsky and Rosen, 60
years later: International symposium in honour of Nathan Rosen}}, edited by
\bibinfo{editor}{\bibfnamefont{A.}~\bibnamefont{Mann}} \bibnamefont{and}
\bibinfo{editor}{\bibfnamefont{M.}~\bibnamefont{Revzen}}
(\bibinfo{publisher}{Israel Physical Society, Haifa, Israel},
\bibinfo{year}{1996}).
\bibitem[{\citenamefont{Popescu and Rohrlich}(1997)}]{PR2}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Popescu}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Rohrlich}}, in
\emph{\bibinfo{booktitle}{Proceedings of the Symposium of Causality and
Locality in Modern Physics and Astronomy: Open Questions and Possible
Solutions}}, edited by
\bibinfo{editor}{\bibfnamefont{G.}~\bibnamefont{Hunter}},
\bibinfo{editor}{\bibfnamefont{S.}~\bibnamefont{Jeffers}}, \bibnamefont{and}
\bibinfo{editor}{\bibfnamefont{J.-P.} \bibnamefont{Vigier}}
(\bibinfo{publisher}{Kluwer Academic Publishers, Dordrecht/Boston/London},
\bibinfo{year}{1997}), p. \bibinfo{pages}{383}.
\bibitem[{\citenamefont{Brassard et~al.}(2006)\citenamefont{Brassard, Buhrman,
Linden, M\'ethot, Tapp, and Unger}}]{function}
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Brassard}},
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Buhrman}},
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Linden}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{M\'ethot}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Tapp}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Unger}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{96}},
\bibinfo{pages}{250401} (\bibinfo{year}{2006}).
\bibitem[{\citenamefont{Navascu\'{e}s}(2010)}]{glance}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Navascu\'{e}s},
\bibfnamefont{M.and~Wunderlich}}, \bibinfo{journal}{Proceedings of the Royal
Society A: Mathematical, Physical and Engineering Science}
\textbf{\bibinfo{volume}{466}}, \bibinfo{pages}{881} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Barnum et~al.}(2010{\natexlab{a}})\citenamefont{Barnum,
Beigi, Boixo, Elliot, and Wehner}}]{s3:nonlocal}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Barnum}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Beigi}},
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Boixo}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Elliot}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wehner}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{104}},
\bibinfo{pages}{140401} (\bibinfo{year}{2010}{\natexlab{a}}).
\bibitem[{\citenamefont{Pawlowski et~al.}(2009)\citenamefont{Pawlowski,
Paterek, Kaszlikowski, Scarani, Winter, and Zukowski}}]{causality}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Pawlowski}},
\bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Paterek}},
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Kaszlikowski}},
\bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Scarani}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Zukowski}},
\bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{461}},
\bibinfo{pages}{1101} (\bibinfo{year}{2009}).
\bibitem[{\citenamefont{Oppenheim and Wehner}(2010)}]{OppenheimWehner}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wehner}},
\bibinfo{journal}{Science} \textbf{\bibinfo{volume}{330}},
\bibinfo{pages}{1072} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Barrett}(2007)}]{barrett}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Barrett}},
\bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{75}},
\bibinfo{pages}{032304} (\bibinfo{year}{2007}).
\bibitem[{\citenamefont{Barnum et~al.}(2007)\citenamefont{Barnum, Barrett,
Leifer, and Wilce}}]{leifer}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Barnum}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Barrett}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Leifer}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Wilce}},
\bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{99}},
\bibinfo{pages}{240501} (\bibinfo{year}{2007}).
\bibitem[{\citenamefont{Hardy}(2001)}]{hardy}
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Hardy}} (\bibinfo{year}{2001}),
\bibinfo{note}{quant-ph/0101012}.
\bibitem[{\citenamefont{D'Ariano}(2008)}]{dariano}
\bibinfo{author}{\bibfnamefont{G.~M.} \bibnamefont{D'Ariano}}
(\bibinfo{year}{2008}), \bibinfo{note}{arXiv:0807.4383}.
\bibitem[{\citenamefont{Barnum and Wilce}(2008)}]{howard:survey}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Barnum}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Wilce}}
(\bibinfo{year}{2008}), \bibinfo{note}{arXiv:0908.2352}.
\bibitem[{\citenamefont{Barnum et~al.}(2008)\citenamefont{Barnum, Barrett,
Leifer, and Wilce}}]{teleportation}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Barnum}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Barrett}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Leifer}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Wilce}}, in
\emph{\bibinfo{booktitle}{Proceedings of the Clifford Lectures}}
(\bibinfo{year}{2008}), \varepsilonrint{0805.3553}.
\bibitem[{\citenamefont{Short and Wehner}(2010)}]{entropy}
\bibinfo{author}{\bibfnamefont{A.~J.} \bibnamefont{Short}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wehner}},
\bibinfo{journal}{New Journal of Physics} \textbf{\bibinfo{volume}{12}},
\bibinfo{pages}{033023} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Deutsch}(1983)}]{deutsch83}
\bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Deutsch}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{50}},
\bibinfo{pages}{631} (\bibinfo{year}{1983}).
\bibitem[{\citenamefont{Maassen and Uffink}(1988)}]{maassen88}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Maassen}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Uffink}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{60}},
\bibinfo{pages}{1103} (\bibinfo{year}{1988}).
\bibitem[{\citenamefont{Wehner and Winter}(2010)}]{WehnerWinter}
\bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Wehner}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Winter}},
\bibinfo{journal}{New Journal of Physics} \textbf{\bibinfo{volume}{12}},
\bibinfo{pages}{025009} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Barnum et~al.}(2010{\natexlab{b}})\citenamefont{Barnum,
Barrett, Clark, Leifer, Spekkens, Stepanik, Wilce, and
Wilke}}]{barnum:entropy}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Barnum}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Barrett}},
\bibinfo{author}{\bibfnamefont{L.}~\bibnamefont{Clark}},
\bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Leifer}},
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Spekkens}},
\bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Stepanik}},
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Wilce}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Wilke}},
\bibinfo{journal}{New Journal of Physics} \textbf{\bibinfo{volume}{12}},
\bibinfo{pages}{033024} (\bibinfo{year}{2010}{\natexlab{b}}).
\bibitem[{\citenamefont{Kimura et~al.}(2010)\citenamefont{Kimura, Nuida, and
Imai}}]{japanese:entropy}
\bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Kimura}},
\bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Nuida}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Imai}},
\bibinfo{journal}{Rep. Math. Phys} \textbf{\bibinfo{volume}{66}},
\bibinfo{pages}{175} (\bibinfo{year}{2010}).
\bibitem[{\citenamefont{Peres}(1993)}]{peres}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Peres}},
\emph{\bibinfo{title}{Quantum theory: concepts and methods}}, Fundamental
theories of physics (\bibinfo{publisher}{Kluwer Academic},
\bibinfo{year}{1993}).
\bibitem[{\citenamefont{Maruyama et~al.}(2009)\citenamefont{Maruyama, Nori, and
Vedral}}]{demon}
\bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Maruyama}},
\bibinfo{author}{\bibfnamefont{F.}~\bibnamefont{Nori}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vedral}},
\bibinfo{journal}{Reviews of Modern Physics} \textbf{\bibinfo{volume}{81}},
\bibinfo{pages}{1} (\bibinfo{year}{2009}).
\bibitem[{\citenamefont{Maruyama et~al.}(2005)\citenamefont{Maruyama, Brukner,
and Vedral}}]{cost}
\bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Maruyama}},
\bibinfo{author}{\bibfnamefont{{\v{C}}.}~\bibnamefont{Brukner}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vedral}},
\bibinfo{journal}{Journal of Physics A: Mathematical and General}
\textbf{\bibinfo{volume}{38}}, \bibinfo{pages}{7175} (\bibinfo{year}{2005}).
\bibitem[{\citenamefont{Holevo}(1973)}]{holevo}
\bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Holevo}},
\bibinfo{journal}{Problems of Information Transmission}
\textbf{\bibinfo{volume}{9}}, \bibinfo{pages}{3} (\bibinfo{year}{1973}).
\bibitem[{\citenamefont{Plenio}(1999)}]{plenio}
\bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Plenio}},
\bibinfo{journal}{Physics Letters~A} \textbf{\bibinfo{volume}{263}},
\bibinfo{pages}{281 } (\bibinfo{year}{1999}).
\bibitem[{\citenamefont{Plenio and Vitelli}(2001)}]{pleniovitelli}
\bibinfo{author}{\bibfnamefont{M.~B.} \bibnamefont{Plenio}} \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vitelli}},
\bibinfo{journal}{Contemporary Physics} \textbf{\bibinfo{volume}{42}},
\bibinfo{pages}{25} (\bibinfo{year}{2001}).
\bibitem[{\citenamefont{M{\"u}ller and Ududec}(2012)}]{markusprl}
\bibinfo{author}{\bibfnamefont{M.~P.} \bibnamefont{M{\"u}ller}}
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Ududec}},
\bibinfo{journal}{Physical Review Letters} \textbf{\bibinfo{volume}{108}},
\bibinfo{pages}{130401} (\bibinfo{year}{2012}).
\bibitem[{\citenamefont{Pfister}(2012)}]{corsin:mthesis}
\bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Pfister}}, Master's thesis,
\bibinfo{school}{ETH Zurich and CQT Singapore} (\bibinfo{year}{2012}),
\bibinfo{note}{arXiv:1203.5622}.
\bibitem[{\citenamefont{{Von Neumann}}(1955)}]{vonNeumann:entropyDef}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{{Von Neumann}}},
\emph{\bibinfo{title}{Mathematische Grundlagen der Quantenmechanik}}
(\bibinfo{publisher}{Springer, Berlin}, \bibinfo{year}{1955}).
\bibitem[{\citenamefont{Barrett}(2011)}]{barrett:entropy}
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Barrett}}
(\bibinfo{year}{2011}), \bibinfo{note}{personal communication}.
\bibitem[{\citenamefont{M{\"u}ller and Oppenheim}(2012)}]{jonathan:entropy}
\bibinfo{author}{\bibfnamefont{M.~P.} \bibnamefont{M{\"u}ller}}
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}}
(\bibinfo{year}{2012}), \bibinfo{note}{personal communication}.
\bibitem[{\citenamefont{M{\"u}ller et~al.}(2011)\citenamefont{M{\"u}ller,
Dahlsten, and Vedral}}]{markus:decoupling}
\bibinfo{author}{\bibfnamefont{M.~P.} \bibnamefont{M{\"u}ller}},
\bibinfo{author}{\bibfnamefont{O.}~\bibnamefont{Dahlsten}}, \bibnamefont{and}
\bibinfo{author}{\bibfnamefont{V.}~\bibnamefont{Vedral}}
(\bibinfo{year}{2011}), \bibinfo{note}{arXiv:1107.6029}.
\bibitem[{\citenamefont{M{\"u}ller et~al.}(2012)\citenamefont{M{\"u}ller,
Oppenheim, and Dahlsten}}]{markus:newdecoupling}
\bibinfo{author}{\bibfnamefont{M.~P.} \bibnamefont{M{\"u}ller}},
\bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Oppenheim}},
\bibnamefont{and} \bibinfo{author}{\bibfnamefont{O.}~\bibnamefont{Dahlsten}}
(\bibinfo{year}{2012}), \bibinfo{note}{in preparation}.
\end{thebibliography}
\appendix
\section*{General theories}
In this appendix, we extend our result to more general physical theories. To this end,
we need to introduce several additional assumptions and entropies.
As quantum mechanics satisfies all
assumptions made here, the derivation below can also be taken as a
detailed explanation of the results claimed in the
front matter.
In the main text, we have shown that a violation of uncertainty relations
leads to a violation of the second law of thermodynamics for the
quantum case. More precisely, we have assumed that all processes can be
described by the quantum formalism, with the exception of the
uncertainty relations. We now want to show that our result still
holds when the physical processes have to be described by a general
convex theory. We need several assumptions on these theories, which
we clearly state below.
\subsection*{General assumptions}
As already outlined we will make three very common assumptions on
a generalized physical theory. The first two are thereby essentially
made everywhere~\cite{howard:survey}, the third is made very
often (but not always (see e.g.~\cite{entropy}). We label assumptions as $A\cdot$.
Whereas such assumptions may seem rather elaborate, there are physical reasons for assuming them.
For example, a property known as bit symmetry~\cite{markusprl} implies A3, A6 and A7.
\begin{description}
\item[A1] The state space $\Omega$ is convex.
\item[A2\label{it:lin}] Effects are \emph{linear} functionals.
\item[A3\label{it:dual}] Pure states are dual to pure effects as outlined in the
background section. Uncertainty relations can thus be stated
equivalently in terms of states or measurements\footnote{In~\cite{markusprl} it
was shown that such a duality holds at least for any theory which has
a property called `bit symmetry', which means that it allows for reversible
computation.}.
\end{description}
Next, we will assume that pure effects are \emph{projective} in that there exists a way to implement
them in a physical measurement such that if we repeatedly apply
an effect $e_i$ by repeating the measurement we again obtain
the same outcome. That is, $p(e_ie_i|\rho)=p(e_i|\rho)$ for all $\rho$,
where with some abuse of notation we take $p(e_ie_i|\rho)$ to be the probability
of observing $e_i$ when making the measurement twice in a row, and don't consider
the outcome of the first. A measurement is projective if it consists only of projective effects.
Note that this is not the same as demanding that post-measurement
states are the same for all $\rho$, which has significant
consequences~\cite{corsin:mthesis}.
\begin{description}
\item[A4] Pure effects are projective.
\end{description}
We will also assume that the unit effect $u$, i.e.
the effect satisfying $u(\rho) = 1$ for all $\rho$, has a dual state that
is analogous to the maximally mixed state. If we accept duality between
states and measurements, than this assumption is very natural.
\begin{description}
\item[A5\label{it:halfe}] If $f_0 + f_1 = u$ for two effects $f_0$ and $f_1$,
then the dual states $\rho_{f_0} + \rho_{f_1} = \rho_{u}$
and $e(\rho_{u}/2) = 1/2$ for all pure effects $e$.
\end{description}
Note that this assumption again implies that we are dealing with the analogue of a qubit, i.e. a two-level
system. It is possible to extend our statements for quantum mechanics to traceless two-outcome observables
but as this requires additional assumptions in generality, we omit it.
Our next assumption, however, is rather strong and significant, and extends
beyond the duality of states and measurements. It is of course satisfied by quantum mechanics.
\begin{description}
\item[A6\label{it:cycle2}] Let $\rho = \sum_{j=1}^d q_j \sigma_j$ be a decomposition of $\rho$ into perfectly distinguishable pure states $\sigma_j$:
Let $h_{\sigma_j}$ denote the pure effect dual to $\sigma_j$. Then $\sum_{j=1}^d h_{\sigma_j} = u$
and $h_k(\sigma_j) = \delta_{jk}$ for all $j$ and $k$.
\end{description}
Finally, we will also need that pure states can be transformed into
different pure states and that this does not require any work. In quantum
mechanics, this is justified since the transformation just corresponds
to a unitary.
\begin{description}
\item[A7\label{it:pure}] Let $\rho$ and $\sigma$ be pure states. Then
the transformation $\rho\rightarrow \sigma$ is reversible (and thus does not require
any work, neither can any work be gained from it).
\end{description}
\subsection*{Entropies}
Several definitions of entropy are possible in generalized
theories~\cite{entropy} that happen to coincide in quantum
mechanics. The first is the so-called \emph{decomposition entropy} given as
\begin{align}
S(\rho) = \min_{\substack{\{p_j,\rho_j\}_j\\\rho = \sum_{j=1}^d p_j \sigma_j}} H(\{p_1,\ldots,p_d\})\,,
\end{align}
where the minimization is taken over decompositions into pure
states $\sigma_j$ and $H$ is the Shannon entropy. Here, we will take the minimum
over decompositions into perfectly distinguishable pure states. Note that the resulting quantity
is equally well defined, but avoids an unnecessary strengthening of assumption A6.
To define the other notion of entropy, we will need the following definition of
\emph{maximally fine-grained measurements}~\cite{entropy} as
measurements such that each of its effects cannot be re-expressed
as a non-trivial linear combination, i.e.,
\begin{align}
\nonumber \textbf{e}&=\{e_i\}_i :\text{ maximally fine-grained }\Leftrightarrow \\
\nonumber
& \text{ for all }e_i:\ e_i=\alpha e_\alpha^\prime+\beta e_\beta^\prime,\ \alpha,\beta>0 \ \Rightarrow\ e_\alpha^\prime=e_\beta^\prime\,.
\end{align}
We also call any effect satisfying this equation fine-grained.
Note that pure effects are automatically maximally fine-grained.
The \emph{measurement entropy} is then given by
\begin{align}
H(\rho) = \min_{\{e_j\}_{j=1}^\ell} H(\{e_1(\rho),\ldots,e_{\ell}(\rho)\})\,,
\end{align}
where the minimization is taken over maximally fine-grained measurements.
Finally, it would be possible to define entropies by a thermodynamical process itself~\cite{vonNeumann:entropyDef}, even
in general physical theories~\cite{barrett:entropy}.
In such a setting, also a difference between the decomposition and measurement entropy can
lead to a violation of the second law~\cite{jonathan:entropy}. As such, it is still under investigation what is the most relevant
entropy~\cite{entropy} in general theories, also when it comes to operational tasks from quantum information such as decoupling~\cite{markus:decoupling,markus:newdecoupling}.
\subsection*{First part of the cycle}
Below we state the explicit calculation of the work which can be
extracted from the first part of the cycle. The
measurements $\textbf{f} = \{f_0,f_1\}$ and $\textbf{g} = \{g_0,g_1\}$ are
now not necessarily quantum mechanical, but do obey the assumptions above.
We use a generalized notion of the completely mixed state and a projective measurement.
In order to determine the position of the semi-transparent membranes
in equilibrium, we assume that they perform a
projective measurement, in the sense that a particle which is once
measured to be $e_0$ ($e_1$) will give outcome $e_0$ ($e_1$) with certainty when
the measurement is repeated, and never outcome $e_1$ ($e_0$).
Additionally, we will use the following definitions in our calculation.
\begin{description}
\item[D1\label{it:def}] $\rho_0$ is the mixture of $\rho_{f_0}$ and $\rho_{g_0}$, and $\rho_1$ of $\rho_{f_1}$ and $\rho_{g_1}$, i.e.,
\begin{align}
\nonumber \rho_0 &=
\frac{1}{2}\left( \rho_{f_0}+ \rho_{g_0} \right)\\
\nonumber \rho_1&=\frac{1}{2}\left( \rho_{f_1}+ \rho_{g_1} \right) \,.
\end{align}
\item[D2\label{it:halfp}] We choose an equal mixture of $\rho_0$ and $\rho_1$, i.e., $p_i=1/2$ for all $i$.
\end{description}
Note
that by assumption A5 the state $\rho=\rho_0/2+\rho_1/2$ has analogous properties to the completely mixed state in dimension $2$, i.e., $p(e_j)=\sum_i p_i p(e_j|\rho_i)=1/2$ for all~$j$.
\begin{description}
\item[D3\label{it:binary}] We make a measurement with binary outcomes, i.e., $p(e_j|\rho_i)=1-p(e_{\bar{j}}|\rho_i)$.
\end{description}
The numbers on top of the equation refer to the assumptions and/or definitions
stated above which are used in this step of the calculation.
\begin{align}
\nonumber W &= NkT\left(
\sum_{i,j}
p_i p(e_j|\rho_i)\ln(p_i p(e_j|\rho_i))
\right. \\ \nonumber & \quad \left.
-
\sum_j p(e_j)\ln p(e_j) - \sum_i p_i \ln p_i \right) \\
\nonumber
&\mathop{=}^{\ref{it:halfp},\ref{it:halfe}} NkT \ln 2\left(
\sum_{i,j}
p_i p(e_j|\rho_i)\log (p_i p(e_j|\rho_i))
\right. \\ \nonumber & \quad \left.
-
\log \frac{1}{2} - \log \frac{1}{2}\right) \\
\nonumber
&\mathop{=}^{\ref{it:halfp}}
\nonumber
NkT \ln 2\left( 2
\vphantom{\frac{1}{2}}\right. \\ \nonumber & \quad \left.
+
\frac{1}{2} \sum_{i,j}
p(e_j|\rho_i)\left ( \log \frac{1}{2}+\log p(e_j|\rho_i))\right) \right)
\\
&=
\nonumber
NkT \ln 2\left( 1+
\frac{1}{2} \sum_{i,j}
p(e_j|\rho_i)\left ( \log p(e_j|\rho_i))\right) \right)
\\
&\mathop{=}^{\ref{it:binary}}
\nonumber
NkT \ln 2\left( 1+
\frac{1}{2} \sum_{i}\left(
p(e_j|\rho_i) \log p(e_j|\rho_i)
\right. \right. \\ \nonumber & \quad \left. \vphantom{\frac{1}{2}} \left.
+
(1-p(e_j|\rho_i)) \log (1- p(e_j|\rho_i))
\right)
\right)
\\
&=
\nonumber
NkT \ln 2\left( 1
- \frac{1}{2} \sum_{i}H(p(e_j|\rho_i))
\right)
\\
&=
\nonumber
NkT \ln 2\left( 1
- \frac{1}{2} H\left(p\left(e_j\middle|\frac{1}{2}\rho_{f_0}+\frac{1}{2}\rho_{g_0} \right)\right)
\right. \\ \nonumber & \quad \left.
-
\frac{1}{2} H\left(p\left(e_j \middle| \frac{1}{2}\rho_{f_1}+\frac{1}{2}\rho_{g_1}\right)
\right)
\right)\\
&\mathop{=}^{\ref{it:lin}}
\nonumber
NkT \ln 2\left( 1
- \frac{1}{2} H\left(\frac{1}{2} p\left(e_j\middle |\rho_{f_0}\right)+\frac{1}{2}p\left(e_j\middle|\rho_{g_0}\right)\right)
\right. \\ \nonumber & \quad \left.
-
\frac{1}{2} H\left(p\left(e_j\middle|\rho_{f_1}\right)+\frac{1}{2}p\left(e_j\middle|\rho_{g_1}\right)\right)
\right)\\
\nonumber
&\mathop{=}^{\ref{it:dual}} NkT \ln 2\left( 1
- \frac{1}{2} H\left(\frac{1}{2}p(f_0|\rho_{e_j})
+ \frac{1}{2}p(g_0|\rho_{e_j})
\right)
\right. \\ \nonumber & \quad \left.
- \frac{1}{2} H\left(\frac{1}{2}p(f_1|\rho_{e_j})+ \frac{1}{2}p(g_1|\rho_{e_j}) \right)
\right)\\
\label{eq:cycle1} & \leq
NkT \ln 2\left( 1
- \frac{1}{2} H\left(\zeta_{(f_0,g_0)}\right)
- \frac{1}{2} H\left(\zeta_{(f_1,g_1)}\right)
\right)\,.
\end{align}
Equality is achieved when the measurement can be formed from the maximally certain effects.
\subsection*{Second part of the cycle}
We calculate the work needed for the second part of the
cycle in two parts. First let us calculate the work needed to `decompose' $\rho$ into
its different pure components. We use the effects $h_{\sigma_j}$, which
are dual to the pure states $\sigma_j$ which form the components of
$\rho$, i.e., $\rho=\sum_j q_j \sigma_j$. Note that we can do this for any decomposition, in particular the one minimizing $S(\rho)$.
\begin{align}
\nonumber W&=-NkT \ln 2 \left(\sum_j p(h_{\sigma_j}|\rho)\log p(h_{\sigma_j}|\rho) \right)\\
&=
\nonumber
-NkT \ln 2 \left(\sum_j p\left(h_{\sigma_j}\middle|\sum_{j^\prime}q_{j^\prime}\sigma_{j^\prime}\right)
\right. \\ & \quad \left.
\nonumber
\log p\left(h_{\sigma_j}\middle|\sum_{j^\prime}q_{j^\prime}\sigma_{j^\prime}\right) \right)\\
&\mathop{=}^{\ref{it:lin}}
\nonumber
-NkT \ln 2 \left(\sum_j \left(\sum_{j^\prime} q_{j^\prime} p\left(h_{\sigma_j}\middle|\sigma_{j^\prime}\right)\right)
\right. \\ & \quad \left.
\nonumber
\log \left(\sum_{j^\prime} q_{j^\prime} p\left(h_{\sigma_j}\middle|\sigma_{j^\prime}\right)\right) \right)\\
&\mathop{=}^{\ref{it:cycle2}}
\nonumber
-NkT \ln 2 \left(\sum_{j^\prime} q_{j^\prime} \log q_{j^\prime} \right)\\
\label{eq:decompentropy}
&= NkT \ln 2 S(\rho)\,.
\end{align}
We then transform the pure states $\sigma_j$ into the pure states $\tau^0_j$ or $\tau^1_j$.
By~\ref{it:pure}, this does not require any work.
By performing a processes analogous to~\eqref{eq:decompentropy} but in the reverse direction,
we can then extract work $NkT \ln 2 \sum_i p_i S(\rho_i)$ by
`reassembling' the states $\rho_0$ and $\rho_1$. Overall, the work
needed for the second part of the cycle is given by
\begin{align}
W&=NkT \ln 2 (S(\rho)-\sum_i p_i S(\rho_i))\,.
\label{eq:cycle2}
\end{align}
\subsection*{Closing the cycle}
From the above calculation, i.e., by substracting~\eqref{eq:cycle2}
from~\eqref{eq:cycle1}, we see that for the total
cycle, the amount of work which can be extracted is
given by
\begin{align}
\nonumber \Delta W &= NkT \ln2 \left(
- \left( S(\rho)- \sum_i p_i S(\rho_i) \right)
\right. \\ \nonumber & \quad \left.
+
\left( 1
- \frac{1}{2} H\left(\zeta_{(f_0,g_0)}\right)
- \frac{1}{2} H\left(\zeta_{(f_1,g_1)}\right)\right)
\right)\,,
\end{align}
however, where $S$ is now the general decomposition
entropy. A net work gain of this cycle, and therefore
a violation of the second law of thermodynamics, can
therefore be reached if the uncertainty relations
can be violated without at the same time changing
the decomposition entropy.
\end{document} |
{\boldsymbol{e}}gin{document}
{\boldsymbol{e}}gin{abstract}
We prove the Abelian/non-Abelian Correspondence with bundles for target spaces that are partial flag bundles, combining and generalising results by Ciocan-Fontanine--Kim--Sabbah, Brown, and Oh. From this we deduce how genus-zero Gromov--Witten invariants change when a smooth projective variety $X$ is blown up in a complete intersection defined by convex line bundles. In the case where the blow-up is Fano, our result gives closed-form expressions for certain genus-zero invariants of the blow-up in terms of invariants of $X$. We also give a reformulation of the Abelian/non-Abelian Correspondence in terms of Givental's formalism, which may be of independent interest.
\end{abstract}
\maketitle
\section{Introduction}
Gromov--Witten invariants, roughly speaking, count the number of curves in a projective variety $X$ that are constrained to pass through various cycles. They play an essential role in mirror symmetry, and have been the focus of intense activity in symplectic and algebraic geometry over the last 25 years. Despite this, there are few effective tools for computing the Gromov--Witten invariants of blow-ups. In this paper we improve the situation somewhat: we determine how genus-zero Gromov--Witten invariants change when a smooth projective variety $X$ is blown up in a complete intersection of convex line bundles. In the case where the blow-up $\tilde{X}$ is Fano, a special case of our result gives closed-form expressions for genus-zero one-point descendant invariants of $\tilde{X}$ in terms of invariants of $X$, and hence determines the small $J$-function of $\tilde{X}$.
Suppose that $Z \subset X$ is the zero locus of a regular section of a direct sum of convex (or nef) line bundles $$E = L_0 \oplus \cdots \oplus L_r \to X$$ and that $\tilde{X}$ is the blow-up of $X$ in $Z$. To determine the genus-zero Gromov--Witten invariants of $\tilde{X}$, we proceed in two steps. First, we exhibit $\tilde{X}$ as the zero locus of a section of a convex vector bundle on the bundle of Grassmannians $\Gr(r,E^\vee) \to X$: this is Theorem~\ref{step one} below. We then establish a version of the Abelian/non-Abelian Correspondence~\cite{CFKS2008} that determines the genus-zero Gromov--Witten invariants of such zero loci. This is the Abelian/non-Abelian Correspondence with bundles, for target spaces that are partial flag bundles -- see Theorem~\ref{step two}. It builds on and generalises results by Ciocan-Fontanine--Kim--Sabbah~\cite[\S6]{CFKS2008}, Brown~\cite{Brown2014}, and Oh~\cite{Oh2016}.
{\boldsymbol{e}}gin{theorem}[see Proposition~\ref{geometricconstruction} below for a more general result] \label{step one}
Let $X$ be a smooth projective variety, let $E = L_0 \oplus \cdots \oplus L_r \to X$ be a direct sum of line bundles, and let $Z \subset X$ be the zero locus of a regular section $s$ of $E$. Let $\pi \colon \Gr(r,E^\vee) \to X$ be the Grassmann bundle of subspaces and let $S \to \Gr(r,E^\vee)$ be the tautological subbundle. Then the composition $$S \hookrightarrow \pi^*E^\vee \xrightarrow{\pi^* s^\vee} \cO $$ defines a regular section of $S^\vee$, and the zero locus of this section is the blow-up $\tilde{X} = \Bl_Z X$.
\end{theorem}
\noindent If the line bundles $L_i$ are convex, then the bundle $S^\vee$ is also convex.
The fact that $\tilde{X}$ is regularly embedded into $\Gr(r, E^\vee)\cong \PP(E)$ (where $\PP(E)$ is the projective bundle of lines) is well-known and true in more generality, see for example \cite[Appendix B8.2]{FultonIntersection} and \cite[Lemma $2.1$]{AluffiChernClasses}. However, to apply the Abelian/non-Abelian correpondence, the crucial point is that $\tilde{X}$ is cut out by a regular section of an explicit representation-theoretic bundle on $\Gr(r, E^\vee)$. Although this should be well-known to experts, we have been unable to find a reference for this.\\
To apply Theorem~\ref{step one} to Gromov--Witten theory, and to state the Abelian/non-Abelian Correspondence, we will use Givental's formalism~\cite{Givental2004}. This is a language for working with Gromov--Witten invariants and operations on them, in terms of linear symplectic geometry. We give details in \S\ref{givental formalism} below, but the key ingredients are, for each smooth projective variety $Y$, an infinite-dimensional symplectic vector space $\cH_Y$ called the Givental space and a Lagrangian submanifold $\cL_Y \subset \cH_Y$. Genus-zero Gromov--Witten invariants of $Y$ determine and are determined by $\cL_Y$.
We will also consider \emph{twisted} Gromov--Witten invariants~\cite{CoatesGivental2007}. These are invariants of a projective variety $Y$ which depend also on a bundle $F \to Y$ and a characteristic class $\mathbf{c}$. For us, this characteristic class will always be the equivariant Euler class (or total Chern class){\boldsymbol{e}}gin{align} \label{intro equivariant Euler}
\mathbf{c}(V) = \sum_{k=0}^d \lambda^{d-k} c_k(V) && \text{where $d$ is the rank of the vector bundle $V$.}
\end{align}
The parameter $\lambda$ here can be thought of as the generator for the $S^1$-equivariant cohomology of a point. There is a Lagrangian submanifold $\cL_{F_\lambda} \subset \cH_Y$ that encodes genus-zero Euler-twisted invariants of $Y$; the Quantum Riemann--Roch theorem~\cite{CoatesGivental2007} implies that
\[
\Delta_{F_\lambda} \cL_Y = \cL_{F_\lambda}
\]
where $\Delta_{F_\lambda} \colon \cH_Y \to \cH_Y$ is a certain linear symplectomorphism. This gives a family of Lagrangian submanifolds $\lambda \mapsto \cL_{F_\lambda}$ defined over $\QQ(\lambda)$, that is, a meromorphic family of Lagrangian submanifolds parameterised by $\lambda$. When $F$ satisfies a positivity condition called convexity, the family $\lambda \mapsto \cL_\lambda$ extends analytically across $\lambda=0$ and the limit $\cL_{F_0}$ exists. This limiting submanifold $\cL_{F_0} \subset \cH_Y$ determines genus-zero Gromov--Witten invariants of the subvariety of $Y$ cut out by a generic section of $F$~\cite{CoatesGivental2007,Coates2014}. Theorem~\ref{step one} therefore allows us to determine genus-zero Gromov--Witten invariants of the blow-up $\tilde{X}$, by analyzing the limiting submanifold~$\cL_{S^\vee_0}$.
Our second main result, Theorem~\ref{step two}, applies to the Grassmann bundle $\Gr(r,E^\vee) \to X$ considered in Theorem~\ref{step one}, and more generally to any partial flag bundle $\Fl(E) \to X$ induced by $E$. Such a partial flag bundle can be expressed as a GIT quotient $A /\!\!/ G$, where $G$ is a product of general linear groups, and so any representation $\rho$ of $G$ on a vector space $V$ induces a vector bundle $V^G \to \Fl(E)$ with fiber $V$. See \S\ref{flag} for details of the construction. We give an explicit family of elements of $\cH_{\Fl(E)}$,
{\boldsymbol{e}}gin{align} \label{intro twisted I}
(t, \tau) \mapsto I_{\GM}(t, \tau, z) && \text{$t \in \CC^R$ for some $R$, $\tau \in H^\bullet(X)$}
\end{align}
defined in terms of genus-zero Gromov--Witten invariants of $X$ and explicit hypergeometric functions, and show that this family, after changing the sign of $z$, lies on the Lagrangian submanifold that determines Euler-twisted Gromov--Witten invariants of $\Fl(E)$ with respect to~$V^G$.
{\boldsymbol{e}}gin{theorem}[see Definition~\ref{IGM definition special case} and Theorem~\ref{IGM on twisted cone}]\label{step two}
For all $t \in \CC^R$ and $\tau \in H^\bullet(X)$,
$$I_{\GM}(t, \tau, {-z}) \in \cL_{V^G_\lambda}$$
\end{theorem}
\noindent Under an ampleness condition -- which holds, for example, whenever the blow-up $\tilde{X}$ in Theorem~\ref{step one} is Fano -- the family \eqref{intro twisted I} takes a particularly simple form
$$
I_{\GM}(t, \tau, z) = z \left(1 + o(z^{-1})\right)
$$
and standard techniques in Givental formalism allow us to determine genus-zero twisted Gromov--Witten invariants of $\Fl(E)$ explicitly: see Corollaries~\ref{I=J} and~\ref{explicit Gr}. Applying this in the setting of Theorem~\ref{step one}, we recover genus-zero Gromov--Witten invariants of the blow-up $\tilde{X}$ by taking the non-equivariant limit $\lambda \to 0$.
The reader who is focussed on blow-ups can stop reading here, jumping to the end of the Introduction for connections to previous work, \S\ref{flag} for basic setup, Corollary~\ref{explicit Gr} for the key Gromov--Witten theoretic result, and then to \S\ref{examples} for worked examples. In the rest of the Introduction, we explain how Theorem~\ref{step two} should be regarded as an instance of the Abelian/non-Abelian Correspondence~\cite{CFKS2008}.
\pagebreak
The Abelian/non-Abelian Correspondence relates the genus-zero Gromov--Witten theory of quotients $A /\!\!/ G$ and $A /\!\!/ T$, where $A$ is a smooth quasiprojective variety equipped with the action of a reductive Lie group $G$, and $T$ is its maximal torus. We fix a linearisation of this action such that the stable and semistable loci coincide and we suppose that the quotients $A /\!\!/ G$ and $A /\!\!/ T$ are smooth. In our setting the non-Abelian quotient $A /\!\!/ G$ will be a partial flag bundle or Grassmann bundle over $X$, and the Abelian quotient $A /\!\!/ T$ will be a bundle of toric varieties over $X$, that is, a toric bundle in the sense of Brown~\cite{Brown2014}. To reformulate the Abelian/non-Abelian Correspondence of~\cite{CFKS2008} in terms of Givental's formalism, however, we pass to the following more general situation. Let $W$ denote the Weyl group of $T$ in $G$. A theorem of Martin (Theorem~\ref{thm:Martin} below) expresses the cohomology of the non-Abelian quotient $H^\bullet(A /\!\!/ G)$ as a quotient of the Weyl-invariant part of the cohomology of the Abelian quotient $H^\bullet(A /\!\!/ T)^W$ by an appropriate ideal, so there is a quotient map
{\boldsymbol{e}}gin{equation} \label{quotient}
H^\bullet(A /\!\!/ T)^W \to H^\bullet(A /\!\!/ G).
\end{equation}
The Abelian/non-Abelian Correspondence, in the form that we state it below, asserts that this map also controls the relationship between the quantum cohomology of $A /\!\!/ G$ and $A /\!\!/ T$.
When comparing the quantum cohomology algebras of $A /\!\!/ G$ and $A /\!\!/ T$, or when comparing the Givental spaces of $A /\!\!/ G$ and $A /\!\!/ T$, we need to account for the fact that there are fewer curve classes on $A /\!\!/ G$ than there are on $A /\!\!/ T$. We do this as follows. The Givental space $\cH_Y$ discussed above is defined using cohomology groups $H^\bullet(Y;\Lambda)$ where $\Lambda$ is the Novikov ring for~$Y$: see \S\ref{givental formalism}. The Novikov ring contains formal linear combinations of terms $Q^d$ where $d$ is a curve class on~$Y$. The quotient map \eqref{quotient} induces an isomorphism $H^2(A /\!\!/ T)^W \cong H^2(A /\!\!/ G)$, and by duality this gives a map $\varrho \colon \NE(A /\!\!/ T) \rightarrow \NE (A /\!\!/ G)$ where $\NE$ denotes the Mori cone: see Proposition~\ref{maponmori}. Combining the quotient map \eqref{quotient} with the map on Novikov rings induced by $\varrho$ gives a map
{\boldsymbol{e}}gin{equation} \label{quotientH}
p \colon \cH^W_{A /\!\!/ T} \to \cH_{A /\!\!/ G}
\end{equation}
between the Weyl-invariant part of the Givental space for the Abelian quotient and the Givental space for the non-Abelian quotient. Here, and also below when we discuss Weyl-invariant functions, we consider the Weyl group $W$ to act on $\cH_{A /\!\!/ T}$ through the combination of its action on cohomology classes and its action on the Novikov ring.
We consider now an appropriate twisted Gromov--Witten theory of $A /\!\!/ T$.
For each root $\rho$ of~$G$, write $L_\rho \to A /\!\!/ T$ for the line bundle determined by $\rho$, and let $\Phi = \oplus_\rho L_\rho$ where the sum runs over all roots. Consider the Lagrangian submanifold~$\cL_{\Phi_\lambda}$ that encodes genus-zero twisted Gromov--Witten invariants of $A /\!\!/ T$. The bundle $\Phi$ is very far from convex, so one cannot expect the non-equivariant limit of $\cL_{\Phi_\lambda}$ to exist. Nonetheless, the projection along \eqref{quotientH} of the Weyl-invariant part of this Lagrangian submanifold does have a non-equivariant limit.
{\boldsymbol{e}}gin{theorem}(see Corollary~\ref{GMlimit no bundle})
The limit as $\lambda \to 0$ of $p \Big( \cL_{\Phi_\lambda} \cap \cH^W_{A /\!\!/ T}\Big)$ exists.
\end{theorem}
\noindent We call this non-equivariant limit the \emph{Givental--Martin cone\footnote{We have not emphasised this point, but the Lagrangian submanifolds $\cL_Y$, $\cL_{F_{\lambda}}$, etc.~are in fact cones~\cite{Givental2004}.}} $\cL_{\GM} \subset \cH_{A/\!\!/ G}$.
{\boldsymbol{e}}gin{conjecture}[The Abelian/non-Abelian Correspondence] \label{AnA}
$\cL_{\GM} = \cL_{A /\!\!/ G}$.
\end{conjecture}
\noindent This is a reformulation of \cite[Conjecture~3.7.1]{CFKS2008}. The analogous statement for twisted Gromov--Witten invariants is the Abelian/non-Abelian Correspondence with bundles; this is a reformulation of \cite[Conjecture~6.1.1]{CFKS2008}. Fix a representation $\rho$ of $G$, and consider the vector bundles $V^G \to A /\!\!/ G$ and $V^T \to A /\!\!/ T$ induced by~$\rho$. Consider the Lagrangian submanifold $\cL_{\Phi_{\lambda} \oplus V^T_{\mu}}$ that encodes genus-zero twisted
Gromov–Witten invariants of $A /\!\!/ T$, where for the twist by the root bundle $\Phi$ we use
the equivariant Euler class \eqref{intro equivariant Euler} with parameter $\lambda$ and for the twist by $V^T$ we use the equivariant Euler class with a different parameter $\mu$. As before, the projection along \eqref{quotientH} of the Weyl-invariant part of this Lagrangian submanifold has a non-equivariant limit with respect to $\lambda$.
{\boldsymbol{e}}gin{theorem}(see Theorem~\ref{GMlimit})
The limit as $\lambda \to 0$ of $p \Big(\cL_{\Phi_\lambda \oplus V^T_\mu} \cap \cH^W_{A /\!\!/ T}\Big)$ exists.
\end{theorem}
\noindent Let us call this limit the \emph{twisted Givental--Martin cone} $\cL_{\GM,V^T_\mu} \subset \cH_{A /\!\!/ G}$.
{\boldsymbol{e}}gin{conjecture}[The Abelian/non-Abelian Correspondence with bundles]\label{AnA bundles}
$\cL_{\GM, V^T_\mu} = \cL_{V^G_\mu}$.
\end{conjecture}
As in~\cite{CFKS2008}, the Abelian/non-Abelian Correspondence implies the Abelian/non-Abelian Correspondence with bundles.
{\boldsymbol{e}}gin{proposition}
Conjectures~\ref{AnA} and~\ref{AnA bundles} are equivalent.
\end{proposition}
{\boldsymbol{e}}gin{proof}
Conjecture~\ref{AnA} is the special case of Conjecture~\ref{AnA bundles} where the vector bundles involved have rank zero. To see that Conjecture~\ref{AnA} implies Conjecture~\ref{AnA bundles}, observe that the projection of the Quantum Riemann--Roch operator $\Delta_{V^T_\mu}$ under the map \eqref{quotientH} is $\Delta_{V^G_\mu}$: see Definition~\ref{delta}. Now apply the Quantum Riemann--Roch theorem~\cite{CoatesGivental2007}.
\end{proof}
The following reformulations will also be useful. Given any Weyl-invariant family
{\boldsymbol{e}}gin{align*}
t \mapsto I(t) \in \cH^W_{A /\!\!/ T}
&& \text{of the form} &&
I(t) = \sum_{d \in \NE(A /\!\!/ T)} Q^d I_d(t)
\end{align*}
we define its \emph{Weyl modification} $t \mapsto \widetilde{I}(t) \in \cH^W_{A /\!\!/ T}$ to be
$$ \widetilde{I}(t) = \sum_{d \in \NE(A /\!\!/ T)} Q^d W_d I_d(t) $$
where $W_d$ is an explicit hypergeometric factor that depends on $\lambda$ -- see~\eqref{modg}. We prove in Lemma~\ref{IGMexists} below that, for a Weyl-invariant family $t \mapsto I(t)$ the image under \eqref{quotientH} of the Weyl modification $t \mapsto p(\widetilde{I}(t))$ has a well-defined limit as $\lambda \to 0$. We call this limit the \emph{Givental--Martin modification} of $t \mapsto I(t)$ and denote it by $t \mapsto I_{\GM}(t)$; it is a family of elements of $\cH_{A /\!\!/ G}$. Furthermore, if $t \mapsto I(t)$ satisfies the Divisor Equation in the sense of equation~\eqref{divisor equation}, then:
{\boldsymbol{e}}gin{itemize}[itemsep=0.5ex]
\item if $t \mapsto I(t)$ is a family of elements of $\cL_{A /\!\!/ T}$ then $t \mapsto I_{\GM}(t)$ is a family of elements on the Givental--Martin cone $\cL_{\GM}$; and
\item if $t \mapsto I(t)$ is a family of elements of the twisted cone $\cL_{V^T_\mu}$ then $t \mapsto I_{\GM}(t)$ is a family of elements on the twisted Givental--Martin cone $\cL_{\GM,V^T_\mu}$.
\end{itemize}
The first statement here is Corollary~\ref{IGMonLGM} with $F'=0$; the second statement is Corollary~\ref{IGMonLGM}. This lets us reformulate the Abelian/non-Abelian Correspondence in more concrete terms.
{\boldsymbol{e}}gin{conjecture}[a reformulation of Conjecture~\ref{AnA}] \label{AnA family}
Let $t \mapsto I(t)$ be a Weyl-invariant family of elements of $\cL_{A /\!\!/ T}$ that satisfies the Divisor Equation. Then the Givental--Martin modification $t \mapsto I_{\GM}(t)$ is a family of elements of $\cL_{A /\!\!/ G}$.
\end{conjecture}
{\boldsymbol{e}}gin{conjecture}[a reformulation of Conjecture~\ref{AnA bundles}] \label{AnA bundles family}
Let $t \mapsto I(t)$ be a Weyl-invariant family of elements of $\cL_{V^T_\mu}$ that satisfies the Divisor Equation. Then the Givental--Martin modification $t \mapsto I_{\GM}(t)$ is a family of elements of $\cL_{V^G_\mu}$.
\end{conjecture}
Let us now specialise to the case of partial flag bundles, as in \S\ref{notation} and the rest of the paper, so that $A /\!\!/ G$ is a partial flag bundle $\Fl(E) \to X$ and $A /\!\!/ T$ is a toric bundle $\Fl(E)_T \to X$. Theorem~\ref{brownoh AnA} below establishes the statement of Conjecture~\ref{AnA family} not for an arbitrary Weyl-invariant family $t \mapsto I(t)$ on $\cL_{A /\!\!/ T}$, but for a specific such family called the \emph{Brown $I$-function}. As we recall in Theorems~\ref{ohI} and~\ref{brown2014gromov}, Brown and Oh have defined families $t \mapsto I_{\Fl(E)_T}(t)$ and $t \mapsto I_{\Fl(E)}(t)$, given in terms of genus-zero Gromov--Witten invariants of $X$ and explicit hypergeometric functions, and have shown~\cite{Brown2014, Oh2016} that $I_{\Fl(E)_T}(t) \in \cL_{\Fl(E)_T}$ and $I_{\Fl(E)}(t) \in \cL_{\Fl(E)}$.
{\boldsymbol{e}}gin{theorem}[see Proposition~\ref{brownoh} for details] \label{brintroh} \label{brownoh AnA}
The Givental--Martin modification of the Brown $I$-function $t \mapsto I_{\Fl(E)_T}$ is $t \mapsto I_{\Fl(E)}(t)$.
\end{theorem}
\noindent The main result of this paper is the analogue of Theorem~\ref{brownoh AnA} for twisted Gromov--Witten invariants. We define a twisted version $t \mapsto I_{V^T_\mu}(t)$ of the Brown $I$-function and prove:
{\boldsymbol{e}}gin{theorem}[see Definition~\ref{IGM definition special case} and Corollary~\ref{IGM on twisted cone} for details] \ \label{step three}
{\boldsymbol{e}}gin{enumerate}
\item the twisted Brown $I$-function $t \mapsto I_{V^T_\mu}(t)$ is a Weyl-invariant family of elements of~$\cL_{V^T_\mu}$ that satisfies the Divisor Equation;
\item the Givental--Martin modification $t \mapsto I_{\GM}(t)$ of this family satisfies $I_{\GM}(t) \in \cL_{V^G_\mu}$.
\end{enumerate}
\end{theorem}
\noindent This establishes the statement of Conjecture~\ref{AnA bundles family}, not for an arbitrary Weyl-invariant family, but for the specific such family $t \mapsto I_{V^T_\mu}(t)$. Theorem~\ref{step three} follows from the Quantum Riemann--Roch theorem~\cite{CoatesGivental2007} together with the results of Brown~\cite{Brown2014} and Oh~\cite{Oh2016}, using a ``twisting the $I$-function'' argument as in~\cite{CCIT2019}.
As we will now explain, Theorem~\ref{brownoh AnA} is quite close to a proof of Conjecture~\ref{AnA family} in the flag bundle case, and similarly Theorem~\ref{step three} is close to a proof of Conjecture~\ref{AnA bundles family}. We will discuss only the former, as the latter is very similar. Theorem~\ref{brownoh AnA} implies that
{\boldsymbol{e}}gin{equation}
\label{AnA intermediate}
\text{the Givental--Martin modification $t \mapsto I_{\GM}(t)$ lies in $\cL_{\Fl(E)}$}
\end{equation}
for the family $t \mapsto I(t)$ given by the Brown I-function, because the Givental--Martin modification of the Brown $I$-function is the Oh $I$-function $t \mapsto I_{\Fl(E)}(t)$. If Oh's $I$-function were a \emph{big $I$-function}, in the sense of~\cite{CFK2016}, then Conjecture~\ref{AnA family} would follow. The special geometric properties of the Lagrangian submanifold $\cL_Y$ described in~\cite{Givental2004} and~\cite[Appendix B]{CCIT2009Computing}, taking $Y = \Fl(E)$, would then imply that any family $t \mapsto I(t)$ such that $I(t) \in \cL_{\Fl(E)}$ can be written as
{\boldsymbol{e}}gin{equation}
\label{special form}
I(t) = I_{\Fl(E)}(\tau(t)) + \sum_\alpha C_\alpha(t, z) z \frac{\partial I_{\Fl(E)}}{\partial \tau_\alpha}(\tau(t))
\end{equation}
for some coefficients $C_\alpha(t, z)$ that depend polynomially on $z$ and some change of variables $t \mapsto \tau(t)$. Furthermore the same geometric properties imply that any family of the form \eqref{special form} satisfies $I(t) \in \cL_{\Fl(E)}$. But $\cL_{\GM}$ has the same special geometric properties as $\cL_Y$ -- it inherits them from the Weyl-invariant part of $\cL_{\Phi_\lambda}$ by projection along \eqref{quotientH} followed by taking the non-equivariant limit -- and so if $t \mapsto I_{\Fl(E)}$ is a big $I$-function then any family of elements $t \mapsto I^\dagger(t)$ on $\cL_{\GM}$ can be written as
{\boldsymbol{e}}gin{equation*}
I^\dagger(t) = I_{\Fl(E)}(\tau^\dagger(t)) + \sum_\alpha C^\dagger_\alpha(t, z) z \frac{\partial I_{\Fl(E)}}{\partial \tau_\alpha}(\tau^\dagger(t))
\end{equation*}
That is, $I^\dagger(t)$ can be written in the form \eqref{special form}. It follows that $I^\dagger(t) \in \cL_{\Fl(E)}$. Applying this with $I^\dagger = I_{\GM}$ from Conjecture~\ref{AnA family} proves that Conjecture; note that we know that the family $t \mapsto I_{\GM}(t)$ here lies in $\cL_{\GM}$ by Corollary~\ref{IGMonLGM}.
If the Brown and Oh $I$-functions were big $I$-functions then Theorem~\ref{brownoh AnA} would continue to hold (with the same proof) and Conjecture~\ref{AnA family} would therefore follow. In reality the Brown and Oh $I$-functions are only small $I$-functions, not big $I$-functions, but Ciocan-Fontanine--Kim have explained in \cite[\S5]{CFK2016} how to pass from small $I$-functions to big $I$-functions, whenever the target space is the GIT quotient of a vector space. To apply their argument, and hence prove Conjecture~\ref{AnA family} for partial flag bundles, one would need to check that the Brown $I$-function arises from torus localization on an appropriate quasimap graph space~\cite[\S7.2]{CFKM2014}. The analogous result for the Oh $I$-function is~\cite[Proposition~5.1]{Oh2016}.
Webb has proved a `big $I$-function' version of the Abelian/non-Abelian Correspondence for target spaces that are GIT quotients of vector spaces~\cite{Webb2018}, and this immediately implies Conjectures~\ref{AnA family} and~\ref{AnA bundles family}.
{\boldsymbol{e}}gin{proposition}
Conjecture~\ref{AnA family} holds when $A$ is a vector space and $G$ acts on $A$ via a representation $G \mapsto \GL(A)$.
\end{proposition}
{\boldsymbol{e}}gin{proof}
Combining \cite[Corollary~6.3.1]{Webb2018} with \cite[Theorem~3.3]{CFK2016} shows that there are big $I$-functions $t \mapsto I_{A /\!\!/ T}(t)$ and $t \mapsto I_{A /\!\!/ G}(t)$ such that $I_{A /\!\!/ T}(t) \in \cL_{A /\!\!/ T}$ and $I_{A /\!\!/ G}(t) \in \cL_{A /\!\!/ G}$. Furthermore it is clear from \cite[equation 62]{Webb2018} that the Givental--Martin modification of the Weyl-invariant part of $t \mapsto I_{A /\!\!/ T}(t)$ is $t \mapsto I_{A /\!\!/ G}(t)$. Now argue as above.
\end{proof}
\subsection*{Connection to Earlier Work} Our formulation of the Abelian/non-Abelian Correspondence very roughly says that, for genus-zero Gromov--Witten theory, passing from an Abelian quotient $A /\!\!/ T$ to the corresponding non-Abelian quotient $A /\!\!/ G$ is almost the same as twisting by the non-convex bundle $\Phi \to A /\!\!/ T$ defined by the roots of $G$. This idea goes back to the earliest work on the subject, by Bertram--Ciocan-Fontanine--Kim, and indeed our Conjecture is very much in the spirit of the discussion in~\cite[\S4]{BCFK2008}. These ideas were given a precise form in~\cite{CFKS2008}, in terms of Frobenius manifolds and Saito's period mapping; the main difference with the approach that we take here is that in~\cite{CFKS2008} the authors realise the cohomology $H^\bullet(A /\!\!/ G)$ as the Weyl-anti-invariant subalgebra of the cohomology of the Abelian quotient $A /\!\!/ T$, whereas we realise it as a quotient of the Weyl-invariant part of $H^\bullet(A/\!\!/ T)$. The latter approach seems to fit better with Givental's formalism.
Ruan was the first to realise that there is a close connection between quantum cohomology (or more generally Gromov--Witten theory) and birational geometry~\cite{Ruan1999}, and the change in Gromov--Witten invariants under blow-up forms an important testing ground for these ideas. Despite the importance of the topic, however, Gromov--Witten invariants of blow-ups have been understood in rather few situations. Early work here focussed on blow-ups in points, and on exploiting structural properties of quantum cohomology such as the WDVV equations and Reconstruction Theorems~\cite{Gathmann1996, GottschePandharipande1998, Gathmann2001}. Subsequent approaches used symplectic methods pioneered by Li--Ruan~\cite{LiRuan2001,HuLiRuan2008,Hu2000,Hu2001}, or the Degeneration Formula following Maulik--Pandharipande~\cite{MaulikPandharipande2006,HeHuKeQi2018,ChenDuWang2020}, or a direct analysis of the moduli spaces involved and virtual birationality arguments~\cite{Manolache2012,Lai2009,AbramovichWise2018}. In each case the aim was to prove `birational invariance': that certain specific Gromov--Witten invariants remain invariant under blow-up. We take a different approach. Rather than deform the target space, or study the geometry of moduli spaces of stable maps explicitly, we give an elementary construction of the blow-up $\tilde{X} \to X$ in terms that are compatible with modern tools for computing Gromov--Witten invariants, and extend these tools so that they cover the cases we need. This idea -- of reworking classical constructions in birational geometry to make them amenable to computations using Givental formalism -- was pioneered in \cite{CCGK16}, and indeed Lemma~E.1 there gives the codimension-two case of our Theorem~\ref{step one}.
Compared to explicit invariance statements
\[
\langle \pi^*\phi_{i_1}, \ldots, \pi^*\phi_{i_n} \rangle^{\tilde{X}}_{0,n,\pi^! {\boldsymbol{e}}ta} =\langle \phi_{i_1}, \ldots, \phi_{i_n} \rangle^{X}_{0,n,{\boldsymbol{e}}ta}
\]
as in \cite[Theorem 1.4]{Lai2009},
we pay a price for our increased abstraction: the range of invariants for which we can extract closed-form expressions is different (see Corollary~\ref{I=J}) and in general does not overlap with Lai's. But we also gain a lot by taking a more structural approach: our results determine, via a Birkhoff factorization procedure as in~\cite{CoatesGivental2007, CFK2014}, genus-zero Gromov--Witten invariants of the blow-up $\tilde{X}$ for curves of arbitrary degree (not just proper transforms of curves in the base) and with a wide range of insertions that can include gravitional descendant classes. See Remark~\ref{what can we compute blow up}. Furthermore in general one should not expect Gromov--Witten invariants to remain invariant under blow-ups. The correct statement -- cf.~Ruan's Crepant Resolution Conjecture~\cite{CCIT2009WallCrossings, CoatesRuan, Iritani2008, Iritani2009} and its generalisation by Iritani~\cite{Iritani2020} -- is believed to involve analytic continuation of Givental cones, and we hope that our formulation here will be a step towards this.
After the first version of this paper appeared on the arXiv, Fenglong You pointed us to the work~\cite{LeeLinWang2017} in which Lee, Lin, and Wang sketch a construction of blow-ups that is very similar to Theorem~\ref{step one}, and use this to compute Gromov--Witten invariants of blow-ups in complete intersections. The methods they use are different: they rely on a very interesting extension of the Quantum Lefschetz theorem to certain non-split bundles, which they will prove in forthcoming work~\cite{LeeLinWangForthcoming}. At first sight, their result~\cite[Theorem 5.1]{LeeLinWang2017} is both more general and less explicit than our results.
In fact, we believe neither is true. Their theorem as stated applies to blow-ups in complete intersections defined by arbitrary line bundles whereas we require these line bundles to be convex; however, discussions with the authors suggest that both results apply under the same conditions, and the convexity hypothesis was omitted from~\cite[Theorem~5.1]{LeeLinWang2017} in error. Furthermore, Lee, Lin, and Wang extract genus-zero Gromov--Witten invariants by combining their generalised Quantum Lefschetz theorem with an inexplicit Birkhoff factorisation procedure whereas we use the formalism of Givental cones. We believe, though, that one can rephrase their argument entirely in terms of Givental's formalism, and after doing so their results become explicit in exactly the same range as ours. The explicit formulas are different, however, and it would be interesting to see if one can derive non-trivial identities from this.
Note that Proposition~\ref{geometricconstruction} below is more general than the construction in~\cite[Section~5]{LeeLinWang2017}: the fact that we consider Grassmann bundles rather than projective bundles allows us to treat blow-ups in certain degeneracy loci. Combining this with the methods in Section~\ref{examples} allows one to compute genus-zero Gromov--Witten invariants of blow-ups in such degeneracy loci.
One of the most striking features of Givental's formalism is that relationships between higher-genus Gromov--Witten invariants of different spaces can often be expressed as the quantisation, in a precise sense, of the corresponding relationship between the Lagrangian cones that encode genus-zero invariants~\cite{Givental2004}. Our version of the Abelian/non-Abelian Correspondence hints, therefore, at a higher-genus generalisation. It would be very interesting to develop and prove a higher-genus analog of Conjecture~\ref{AnA}.
\section{GIT Quotients and Flag Bundles}
\subsection{The topology of quotients by a non-Abelian group and its maximal torus}\label{topology}
Let $G$ be a complex reductive group acting on a smooth quasi-projective variety $A$ with polarisation given by a linearised ample line bundle $L$. Let $T \subset G$ be a maximal torus. One can then form the GIT-quotients $A /\!\!/ G$ and $A /\!\!/ T$. We will assume that the stable and semistable points with respect to these linearisations coincide, and that all the isotropy groups of the stable points are trivial; this ensures that the quotients $A /\!\!/ G$ and $A /\!\!/ T$ are smooth projective varieties. The Abelian/non-Abelian Correspondence \cite{CFKS2008} relates the genus zero Gromov--Witten invariants of these two quotients. Let $A^{s}(G)$, and respectively $A^s(T)$, denote the subsets of $A$ consisting of points that are stable for the action of $G$, and respectively $T$. The two geometric quotients $A /\!\!/ G$ and $A /\!\!/ T$ fit into a diagram
{\boldsymbol{e}}gin{equation}
\label{vbongit}
{\boldsymbol{e}}gin{tikzcd}
A /\!\!/ T & A^{s}(G)/T \arrow[d, "q"] \arrow[l, "j"', hook'] \\
& A /\!\!/ G
\end{tikzcd}
\end{equation}
where $j$ is the natural inclusion and $\pi$ the natural projection.
A representation $\rho \colon G \to \GL(V)$ induces a vector bundle $V(\rho)$ on $A /\!\!/ G$ with fiber $V$. Explicitly, $V(\rho)=(A\times V)/\!\!/ G$ where $G$ acts as
\[
g\colon (a,v) \mapsto (ag, \rho(g^{-1}) v).
\]
Similarly, the restriction $\rho|_T$ of the representation $\rho$ induces a vector bundle $V(\rho|_T)$ over $A /\!\!/ T$. Note that since $T$ is Abelian, $V(\rho|_T)$ splits as a direct sum of line bundles, $V(\rho|_T)=L_1 \oplus \dots \oplus L_k$
These bundles satisfy
{\boldsymbol{e}}gin{equation}\label{(21)}
j^*V({\rho |_T}) \cong q^*V(\rho).
\end{equation}
When the representation $\rho\colon G \rightarrow \GL(V)$ is clear from context, we will suppress it from the notation, writing $V^G$ for $V(\rho)$ and $V^T$ for $V(\rho|_T)$.
We will now describe the relationship between the cohomology rings of $A /\!\!/ G$ and $A /\!\!/ T$, following \cite{Martin2000}. Let $W$ be the Weyl group of $G$. $W$ acts on $A /\!\!/ T$ and hence on the cohomology ring $H^\bullet(A /\!\!/ T)$. Restricting the adjoint representation $\rho \colon G \to \GL(\mathfrak{g})$ to $T$, we obtain a splitting $\rho|_T=\oplus_{\alpha} \rho_\alpha$ into $1$-dimensional representations, i.e.~characters, of $T$. The set $\Delta$ of characters appearing in this decomposition is the set of roots of $G$, and forms a root system. Write $L_\alpha$ for the line bundle on $A /\!\!/ T$ corresponding to a root $\alpha$. Fix a set of positive roots $\Phi^+$ and define
\[
\omega=\prod_{\alpha \in \Phi^+} c_1(L_\alpha).
\]
{\boldsymbol{e}}gin{theorem}[Martin]
\label{thm:Martin}
There is a natural ring homomorphism
\[
H^{\bullet}(A /\!\!/ G) \cong\frac{ H^\bullet(A /\!\!/ T)^W}{ \Ann(\omega)}
\]
under which $x \in H^\bullet(A /\!\!/ G)$ maps to $\tilde{x} \in H^\bullet(A /\!\!/ T)$ if and only if $q^*x=j^*\tilde{x}$.
\end{theorem}
\noindent Theorem~\ref{thm:Martin} shows that any cohomology class $\tilde{x}\in H^\bullet(A /\!\!/ T)^W$ is a lift of a class ${x} \in H^\bullet(A /\!\!/ G)$, with $\tilde{x}$ unique up to an element of $\mathrm{Ann}(\omega)$.
{\boldsymbol{e}}gin{assumption}
Throughout this paper, we will assume that the $G$-unstable locus $A \setminus A^s(G)$ has codimension at least $2$.
\end{assumption}
This implies that elements of $H^2(A /\!\!/ G)$ can be lifted uniquely:
{\boldsymbol{e}}gin{proposition}\label{maponmori}
Pullback via $q$ gives an isomorphism $H^2(A /\!\!/ G) \cong H^2(A /\!\!/ T)^W$, and induces a map $\varrho \colon \NE(A /\!\!/ T) \rightarrow \NE (A /\!\!/ G)$ where $\NE$ denotes the Mori cone.
\end{proposition}
{\boldsymbol{e}}gin{proof}
The assumption that $A \setminus A^s(G)$ has codimension at least $2$ implies that $A^s(T)/T \setminus A^s(G)/T$ has codimension at least $2$, so $j$ induces an isomorphism $\Pic(A^s(G)/T) \cong \Pic(A^s(T)/T)$. This gives an isomorphism $H^2(A^s(G)/T) \cong H^2(A^s(T)/T)$ since the cycle class map is an isomorphism for both spaces. Since $q^*$ always induces an isomorphism between $H^2(A /\!\!/ G)$ and $H^2(A^s(G)/T)^W$ \cite{Borel1953}, the first claim follows. Consequently, the lifting of divisor classes is unique and can be identified with the pullback map $q^* \colon \Pic(A /\!\!/ G) \rightarrow \Pic(A^s(G)/T)$. Since the pullback of a nef divisor class along a proper map is nef, we obtain by duality a map $\varrho: \NE(A/\!\!/ T) \rightarrow \NE(A /\!\!/ G)$.
\end{proof}
{\boldsymbol{e}}gin{definition}
We say that $\tilde{{\boldsymbol{e}}ta} \in \NE(A/\!\!/ T)$ lifts ${\boldsymbol{e}}ta \in \NE(A /\!\!/ G)$ if $\varrho(\tilde{{\boldsymbol{e}}ta}) = {\boldsymbol{e}}ta$. Note that any effective ${\boldsymbol{e}}ta$ has finitely many lifts.
\end{definition}
\subsection{Partial flag varieties and partial flag bundles}\label{flag}
\subsubsection{Notation}\label{notation}
We will now specialise to the case of flag bundles and introduce notation used in the rest of the paper. Fix once and for all:
\label{setup}
{\boldsymbol{e}}gin{itemize}
\item a positive integer $n$ and a sequence of positive integers $r_1 < \dots < r_{\ell} < r_{\ell+1}=n$;
\item a vector bundle $E \rightarrow X$ of rank $n$ on a smooth projective variety $X$ which splits as a direct sum of line bundles $E=L_1 \oplus \dots \oplus L_n$.
\end{itemize}
We write $\Fl$ for the partial flag manifold $\Fl(r_1, \dots, r_{\ell};n)$, and $\Fl(E)$ for the partial flag bundle $\Fl(r_1, \dots, r_{\ell};E)$.
Set $N=\sum_{i=1}^\ell r_i r_{i+1}$ and $R=r_1+\dots+r_\ell$
It will be convenient to use the indexing $\{(1,1), \dots (1, r_1), (2, 1), \dots, (\ell, r_\ell)\}$ for the set of positive integers smaller or equal than $R$.
\subsubsection{Partial flag varieties and partial flag bundles as GIT quotients}
The partial flag manifold $\Fl$ arises as a GIT quotient, as follows. Consider $\CC^N$ as the space of homomorphisms
{\boldsymbol{e}}gin{equation}
\label{eq:hom}
\bigoplus_{i=1}^\ell \Hom\left(\CC^{r_{i}}, \CC^{r_{i+1}}\right).
\end{equation}
The group $G = \prod_{i=1}^{\ell} \mathrm{GL}_{r_i}(\CC)$ acts on $\CC^N$ by
\[
(g_1, \dots, g_{\ell}) \cdot (A_1, \dots, A_{\ell}) = (g_2^{-1} A_1 g_1, \dots,g_{\ell}^{-1}A_{\ell -1}g_{\ell-1}, A_{\ell}g_\ell).
\]
Let $\rho_i \colon G \rightarrow \GL_{r_i}(\CC)$ be the representation which is the identity on the $i$th factor and trivial on all other factors. Choosing the linearisation $\chi=\bigotimes_{i=1}^{\ell} \det(\rho_i)$,
we have that $\CC^N /\!\!/_\chi G$ is the partial flag manifold $\Fl$.
More generally, the partial flag bundle also arises as a GIT quotient, of the total space of the bundle of homomorphisms
{\boldsymbol{e}}gin{equation}
\label{eq:hom_bundle}
\bigoplus_{i=1}^{\ell-1} \Hom\left(\cO^{\oplus r_{i}}, \cO^{\oplus r_{i+1}} \right)
\oplus \Hom \left(\cO^{\oplus r_{\ell}},E \right)
\end{equation}
with respect to the same group $G$ and the same linearisation. $\Fl(E)$ carries $\ell$ tautological bundles of ranks $r_1, \dots, r_{\ell}$, which we will denote $S_1, \dots, S_{\ell}$. These bundles restrict to the usual tautological bundles on $\Fl$ on each fibre. The bundle $S_i$ is induced by the representation~$\rho_i$.
{\boldsymbol{e}}gin{definition}
Let
\[
p_i(t)=t^{r_i}-c_1(S_i)t^{r_i-1}+\dots + (-1)^{r_i} c_{r_i}(S_i)
\]
be the Chern polynomial of $S_i^\vee$. We denote the roots of $p_i$ by $H_{i,j}$,~$1 \leq j \leq r_i$. The $H_{i,j}$ are in general only defined over an appropriate ring extension of $H^\bullet(\Fl(E), \CC)$, but symmetric polynomials in the $H_{i,j}$ give well-defined elements of $H^\bullet(\Fl(E), \CC)$.
\end{definition}
The maximal torus $T\subset G$ is isomorphic to $(\CC^\times)^{R}$. The corresponding Abelian quotient
\[
\Fl(E)_T \coloneqq \Hom\big(\cdots\big) /\!\!/_\chi (\CC^\times)^{R},
\]
where $\Hom\big(\cdots\big)$ is the bundle of homomorphisms \eqref{eq:hom_bundle}, is a fibre bundle over $X$ with general fibre isomorphic to the toric variety $\Fl_T:= \CC^N /\!\!/_\chi (\CC^\times)^R$. The space $\Fl(E)_T$ also carries natural cohomology classes:
{\boldsymbol{e}}gin{definition}
Let $\rho_{i,j}\colon (\CC^\times)^{R} \rightarrow \GL_1(\CC)$ be the dual of the one-dimensional representation of $(\CC^\times)^{R}$ given by projection to the $(i,j)$th factor $\CC^{\times} = \GL_1(\CC)$; here we use the indexing of the set $\{1,2,\ldots,R\}$ specified in \S\ref{notation}. We define ${L}_{i,j} \in H^2(\Fl_T,\CC)$ to be the line bundle on $\Fl(E)_T$ induced by $\rho_{i,j}$ and denote its first Chern class by $\tilde{H}_{i,j}$. Similarly, we define $h_{i,j}$ to be the first Chern class of the line bundle on $\Fl_T$ induced by the represenation $\rho_{i,j}$. Equivalently, $h_{i,j}$ is the restriction of $\tilde{H}_{i,j}$ to a general fibre $\Fl_T$ of $\Fl(E)_T$.
\end{definition}
Recall that, for a representation $\rho$ of $G$, the corresponding vector bundle $V^T$ splits as a direct sum of line bundles $F_1 \oplus \cdots \oplus F_k$. It is a general fact that if $f$ is a symmetric polynomial in the $c_1(F_i)$, then $f$ can be written as a polynomial in the elementary symmetric polynomials $e_r(c_1(F_1), \dots, c_1(F_k))$, that is, in the Chern classes $c_r(V^T)$. By \eqref{(21)} we have that $j^*c_r(V^T)=q^*c_r(V^G)$, and so replacing any occurrence of $c_r(V^T)$ by $c_r(V^G)$ gives an expression $g \in H^\bullet(A /\!\!/ G)$ which satisfies $q^*g=j^*f$. That is, $f$ is a lift of $g$. Applying this to the dual of the standard representation $\rho_i$ of the $i$th factor of $G$ shows that any polynomial $p$ which is symmetric in each of the sets $\tilde{H}_{i,j}$ for fixed $i$ projects to the same expression in $H^\bullet(\Fl(E))$ with any occurrence of $\tilde{H}_{i,j}$ replaced by the corresponding Chern root $H_{i,j}$.
{\boldsymbol{e}}gin{lemma}\label{torus_invariant_divisors}
Let $(\CC^\times)^R$ act on $\CC^N$, arrange the weights for this action in an $R \times N$-matrix~$(m_{i,k})$ and
consider $E=L_1 \oplus \dots \oplus L_N \xrightarrow{\pi} X$ a direct sum of line bundles. Form the associated toric fibration $E /\!\!/ (\CC^\times)^R$ with general fibre $\CC^N /\!\!/ (\CC^\times)^R$ and let $h_i$ (respectively $H_i$) be the first Chern class of the line bundle on $\CC^N /\!\!/ (\CC^\times)^R$ (respectively on $E /\!\!/ (\CC^\times)^R$ induced by the dual of the representation which is standard on the $i$th factor of $(\CC^\times)^R$ and trivial on the other factors.
Then
{\boldsymbol{e}}gin{itemize}
\item the Poincar\'e duals $u_k$ of the torus invariant divisors of the toric variety $\CC^N /\!\!/ (\CC^\times)^R$ are:
$$u_k=\sum_{k=1}^Rm_{i,k}h_i$$
\item
the Poincar\'e duals $U_k$ of the torus invariant divisors of the total space of the toric fibration $E /\!\!/ (\CC^\times)^R\xrightarrow{\pi} X $ are:
$$U_k=\sum_{k=1}^Rm_{i,k}H_i+\pi^*c_1(L_k)$$
\end{itemize}
\end{lemma}
When applying Lemma~\ref{torus_invariant_divisors} to our situation \eqref{eq:hom_bundle} it will be convenient to define $H_{\ell+1, j}:=\pi^*c_1(L_j^\vee)$. Then the set of torus invariant divisors is
{\boldsymbol{e}}gin{align*}
H_{i,j} - H_{i+1,j'} && 1 \leq i \leq \ell, \, 1 \leq j \leq r_{i}, \, 1 \leq j' \leq r_{i+1}
\end{align*}
We will also need to know about the ample cone of a toric variety $\CC^N /\!\!/ (\CC^\times)^R$. This is most easily described in terms of the secondary fan, that is, by the wall-and-chamber decomposition of $\Pic(\CC^N /\!\!/ (\CC^\times)^R) \otimes \RR \cong \RR^R$ given by the cones spanned by size $R-1$ subsets of columns of the weight matrix. The ample cone of $\CC^N /\!\!/ (\CC^\times)^R$ is then the chamber that contains the stability condition $\chi$. Moreover, for a subset $\alpha \subset \{1, \dots, N\}$ of size $R$ the cone in the secondary fan spanned by the classes $u_k$, $k\in \alpha$, contains the stability condition (and therefore also the ample cone) iff the intersection $u_\alpha=\bigcap_{k \notin \alpha} u_k$ is nonempty. In this case, $U_\alpha=\bigcap_{k \notin \alpha} U_k$ restricts to a torus fixed point on every fibre and, since $E$ splits as a direct sum of line bundles, $U_\alpha$ is the image of a section of the toric fibration $\pi$. We denote this section by $s_\alpha$. By construction, the torus invariant divisors $U_{k}$,~$k \in \alpha$, do not meet $U_\alpha$, so that $s_\alpha^*(U_k)=0$ for all $k \in \alpha$.
For the toric variety $\Fl_T$ one can easily write down the set of $R$-dimensional cones containing $\chi=(1, \dots, 1)$.
For each index $(i,j)$, choose some $j' \in \{1, \dots, r_{\ell+1}\}$. Then the cone spanned by
{\boldsymbol{e}}gin{align}\label{ampleconegen}
h_{i,j}-h_{i+1,j'} && 1 \leq i < \ell-1, \, 1 \leq j \leq r_i && h_{\ell, j}, \, 1 \leq j \leq r_\ell
\end{align}
contains $\chi$ and every cone containing $\chi$ is of that form.
\section{Givental's Formalism} \label{givental formalism}
In this section we review Givental's geometric formalism for Gromov--Witten theory, concentrating on the genus-zero case. The main reference for this is \cite{Givental2004}. Let $Y$ be a smooth projective variety and consider
$$\cH_Y=H^\bullet(Y, \Lambda)[z, z^{-1}] \! ]=\Big\{ \sum_{k= -\infty}^{m} a_i z^i \colon \text{$a_i \in H^{\bullet}(Y,\Lambda)$, $m \in \ZZ$}\Big\}$$ where $z$ is an indeterminate and $\Lambda$ is the Novikov ring for $Y$.
After picking a basis $\{\phi_1, \dots, \phi_N\}$ for $H^\bullet(Y;\CC)$ with $\phi_1 = 1$ and writing $\{\phi^1, \dots, \phi^N\}$ for the Poincare dual basis, we can write elements of $\cH_Y$ as
{\boldsymbol{e}}gin{align}
\sum_{i=0}^m \sum_{\alpha=1}^N q_i^{\alpha}\phi_{\alpha}z^i+ \sum_{i=0}^\infty \sum_{\alpha=1}^N p_{i,\alpha}\phi^{\alpha}(-z)^{-1-i} \label{eq11}
\end{align}
where $q_i^{\alpha}$,~$p_{i,\alpha} \in \Lambda$. The $q_i^{\alpha}$,~$p_{i,\alpha}$ then provide coordinates on $\cH_Y$.
The space $\cH_Y$ carries a symplectic form
{\boldsymbol{e}}gin{align*}
\Omega\colon \cH_Y \otimes \cH_Y &\rightarrow \Lambda\\
f \otimes g &\rightarrow \text{Res}_{z=0}(f(-z), g(z)) \, dz
\end{align*}
where $(\cdot , \cdot )$ denotes the Poincar\'e pairing, extended $\CC[z, z^{-1}]\!]$-linearly to $\cH_Y$.
By construction, $\Omega$ is in Darboux form with respect to our coordinates:
\[
\Omega=\sum_i \sum_\alpha dp_{i,\alpha} \wedge dq_i^{\alpha}
\]
We fix a Lagrangian polarisation of $\cH$ as $\cH_Y=\cH_+ \oplus \cH_-$, where $$\cH_+=H^\bullet(Y;\Lambda)[z], \quad \cH_-=z^{-1}H^\bullet(Y;\Lambda)[\![z^{-1}]\!]$$
This polarisation $\cH_Y = \cH_+ \oplus \cH_-$ identifies $\cH_Y$ with $T^* \cH_+$. We now relate this to Gromov--Witten theory.
{\boldsymbol{e}}gin{definition} The \emph{genus-zero descendant potential} is a generating function for genus-zero Gromov--Witten invariants:
$$\mathcal{F}_{Y}^{0} = \sum_{n = 0}^\infty \sum_{d \in \NE(Y)} \frac{Q^d}{n!} t^{\alpha_1}_{i_1} \dots t^{\alpha_{n}}_{i_n}\langle \phi_{\alpha_1}\psi^{i_1}, \dots, \phi_{\alpha_n}\psi^{i_n} \rangle_{0,n,d}$$ Here $t_i^\alpha$ is a formal variable, $\NE(Y)$ denotes the Mori cone of $Y$, and Einstein summation is used for repeated lower and upper indices.
\end{definition}
\noindent After setting
{\boldsymbol{e}}gin{equation}
\label{eq:dilaton}
t^{\alpha}_{i} = q^{\alpha}_{i} + \delta^{i}_{1}\delta^{1}_{\alpha},
\end{equation}
where $\delta_i^j$ denotes the Kronecker delta, we obtain a (formal germ of a) function $\mathcal{F}^{0}_Y \colon \cH_+ \rightarrow \Lambda$.
{\boldsymbol{e}}gin{definition}
The Givental cone $\cL_Y$ of $Y$ is the graph of the differential of $\mathcal{F}_{Y}^{0}\colon \cH_+ \rightarrow \Lambda$:
$$\cL_Y = \left\{(\mathbf{q,p}) \in T^*\cH_Y= \cH_+ \oplus \cH_- \colon p_{i, \alpha} = \frac{\partial \mathcal{F}^0_{Y}}{\partial q^{\alpha}_i}\right\}$$
Note that $\cL_Y$ is Lagrangian by virtue of being the graph of the differential of a function. Moreover, it has the following special geometric properties \cite{Givental2004, CCIT2009Computing, CoatesGivental2007}
{\boldsymbol{e}}gin{itemize}
\item $\cL$ is preserved by scalar multiplication, i.e. it is (the formal germ of) a cone
\item the tangent space $T_f$ of $\cL_Y$ at $f \in \cL_Y$ is tangent to $\cL$ exactly along $z T_f$. This means:
{\boldsymbol{e}}gin{enumerate}
\item $zT_f \subset \cL_Y$
\item for $g \in zT_f$, we have $T_g = T_f$
\item $T_f \cap \cL_Y = z T_f$
\end{enumerate}
\end{itemize}
\end{definition}\label{J}
A general point of $\cL_Y$ can be written, in view of the dilaton shift \eqref{eq:dilaton}, as
{\boldsymbol{e}}gin{align*}
&{-z} + \sum_{i = 0}^\infty t^{\alpha}_i \phi_{\alpha}z^i + \sum_{n = 0}^\infty \sum_{d \in \NE(Y)} \frac{Q^d}{n!} t^{\alpha_1}_{i_1} \dots t^{\alpha_{n}}_{i_n}\langle \phi_{\alpha_1}\psi^{i_1}, \dots, \phi_{\alpha_n}\psi^{i_n}, \phi_{\alpha}\psi^{i} \rangle_{0,n+1,d} \phi^{\alpha}(-z)^{-i-1} \\
= &{-z} + \sum_{i = 0}^\infty t^{\alpha}_i \phi_{\alpha}z^i + \sum_{n = 0}^\infty \sum_{d \in \NE(Y)} \frac{Q^d}{n!} t^{\alpha_1}_{i_1} \dots t^{\alpha_{n}}_{i_n}\langle \phi_{\alpha_1}\psi^{i_1}, \dots, \phi_{\alpha_n}\psi^{i_n}, \frac{\phi_{\alpha}}{-z - \psi} \rangle_{0,n+1,d} \phi^{\alpha}
\end{align*}
Thus knowing $\cL_Y$ is equivalent to knowing all genus-zero Gromov--Witten invariants of $Y$. Setting $t_k^\alpha=0$ for all $k>0$, we obtain the \emph{$J$-function} of $
Y$:
{\boldsymbol{e}}gin{equation*}
J(\tau,-z) = -z + \tau + \sum_{n = 0}^\infty \sum_{d \in \mathrm{\NE(X)}} \frac{Q^d}{n!} \left\langle \tau, \dots \tau, \frac{\phi_{\alpha}}{-z - \psi} \right\rangle_{0,n+1,d} \phi^{\alpha}
\end{equation*}
where $\tau = t^1_0 \phi_1 + \dots t^N_0 \phi_N \in H^\bullet(Y)$. The $J$-function is the unique family of elements $\tau \mapsto J(\tau,-z)$ on the Lagrangian cone such that
{\boldsymbol{e}}gin{equation*}
J(\tau, -z) = -z + \tau + O(z^{-1}).
\end{equation*}
We will need a generalisation of all of this to twisted Gromov--Witten invariants~\cite{CoatesGivental2007}. Let~$F$ be a vector bundle on $Y$ and consider the universal family over the moduli space of stable maps
$${\boldsymbol{e}}gin{tikzcd}
{C_{0,n,d}} \arrow[d, "\pi"'] \arrow[r, "f"] & Y \\
{Y_{0,n,d}} &
\end{tikzcd}$$
Let $\pi_!$ be the pushforward in $K$-theory. We define $$F_{0,n,d} = \pi_{!}f^*F=R^{0}\pi_{*}f^*F-R^{1}\pi_{*}f^*F$$
(the higher derived functors vanish).
In general $F_{0,n,d}$ is a class in $K$-theory and not an honest vector bundle. This means that in order to evaluate a characteristic class $\mathbf{c}(\cdot)$ on $F_{0,n,d}$ we need $\mathbf{c}(\cdot)$ to be \emph{multiplicative} and \emph{invertible}. We can then set
\[
\mathbf{c}(F_{0,n,d}) = \mathbf{c}(R^{0}\pi_{*}f^* F) \cup \mathbf{c}(R^{1}\pi_{*}f^* F)^{-1}
\]
where $\mathbf{c}(R^{i}\pi_{*}f^* F)$ is defined using an appropriate locally free resolution.
{\boldsymbol{e}}gin{definition} \label{equivarianteuler}
Let $F$ be a vector bundle on $Y$ and let $\mathbf{c}(\cdot)$ be an invertible multiplicative characteristic class. We will refer to the pair $(F, {\bf c})$ as twisting data. Define $(F, {\bf c})$-twisted Gromov--Witten invariants as
{\boldsymbol{e}}gin{equation*}
\langle \alpha_1 \psi_1^{i_1}, \dots \alpha_n \psi_{n}^{i_n} \rangle_{0,n,d}^{F, {\bf c}} = \int_{[Y_{0,n,d}]^{\mathrm{vir}} \cap \mathbf{c}(F_{0,n,d})} \ev_1^{*}\alpha_1 \cup \dots \cup \ev_n^{*}\alpha_n \cup \psi_1^{i_1} \cup \dots \cup \psi_{n}^{i_n}
\end{equation*}
\end{definition}
Any multiplicative invertible characteristic class can be written as $\mathbf{c}(\cdot ) = \exp(\sum_{k \geq 0} s_k \ch_k(\cdot))$, where $\ch_k$ is the $k$th component of the Chern character and $s_0$,~$s_1$,~\ldots are appropriate coefficients. So we work with cohomology groups $H^{\bullet}(X, \Lambda_s)$, where $\Lambda_s$ is the completion of $\Lambda[s_0, s_1, \dots]$ with respect to the valuation
{\boldsymbol{e}}gin{equation*}
v(Q^d) = \big\langle c_1(\cO(1)), d \big \rangle, \quad v(s_k) = k+1.
\end{equation*}
Most of the definitions from before now carry over.
We have the twisted Poincar\'e pairing $(\alpha,{\boldsymbol{e}}ta)^{F, {\bf c}} = \int_Y \mathbf{c}(F) \cup \alpha \cup {\boldsymbol{e}}ta $ which defines the basis $\phi^1, \dots \phi^N$ dual to our chosen basis $1 =\phi_1, \dots, \phi_N$ for $H^\bullet(Y)$. The Givental space becomes $\cH_Y = H^{\bullet}(Y,\Lambda_s) \, \otimes \, \CC[z,z^{-1}]\!]$ with the twisted symplectic form $$\Omega^{F, {\bf c}}(f(z), g(z)) = \mathrm{Res}_{z=0}\big(f(-z),g(z)\big)^{F, {\bf c}}dz.$$ This form admits Darboux coordinates as before which give a Lagrangian polarisation of $\cH_Y$. Then the twisted Lagrangian cone $\cL_{F, {\bf c}}$ is defined, via the dilaton shift \eqref{eq:dilaton}, as the graph of the differential of the generating function $\mathcal{F}^{0,F, {\bf c}}_Y$ for genus zero \textit{twisted} Gromov--Witten invariants. Finally, just as before, we can define a twisted $J$-function:
{\boldsymbol{e}}gin{definition} \label{twisted J}
Given twisting data $(F, {\bf c})$ for $Y$, the twisted $J$-function is:
{\boldsymbol{e}}gin{equation*}
J_{F, {\bf c}}(\tau,{-z}) = {-z} + \tau + \sum_{n = 0}^\infty \sum_{d \in \NE(Y)} \frac{Q^d}{n!} \left\langle \tau, \dots \tau, \frac{\phi_{\alpha}}{-z - \psi} \right\rangle^{F, {\bf c}}_{0,n+1,d} \phi^{\alpha}
\end{equation*}
\end{definition}
\noindent This is once again characterised as the unique family $\tau \mapsto J_{F, {\bf c}}(\tau,-z)$ of elements of the twisted Lagrangian cone of the form
{\boldsymbol{e}}gin{equation*}
J_{F, {\bf c}}(\tau, -z) = -z + \tau + O(z^{-1})
\end{equation*}
Note that we can recover the untwisted theory by setting $\mathbf{c}=1$.
In what follows we take $\mathbf{c}$ to be the $\CC^\times$-equivariant Euler class \eqref{intro equivariant Euler}, which is multiplicative and invertible. The $\CC^\times$-action here is the canonical $\CC^\times$-action on any vector bundle given by rescaling the fibres. We write $F_\lambda$ for the twisting data $(F, \mathbf{c})$, where $F$ is equipped with the $\CC^\times$-action given by rescaling the fibres with equivariant parameter $\lambda$. In this setting, Gromov--Witten invariants (and the coefficients $s_k$) take values in the fraction field $\CC(\lambda)$ of the $\CC^\times$-equivariant cohomology of a point. Here $\lambda$ is the hyperplane class on $\mathbb{CP}^{\infty}$, so that $H^{\bullet}_{\CC^\times}(\{\mathrm{pt}\}) = \CC[\lambda ]$, and we work over the field $\CC(\lambda)$.
{\boldsymbol{e}}gin{remark}
As we have set things up, the twisted cone $\cL_{F_\lambda}$ is a Lagrangian submanifold of the symplectic vector space $\big(\cH_Y, \Omega^{F_\lambda}\big)$, so as $\lambda$ varies both the Lagrangian submanifold and the ambient symplectic space change. To obtain the picture described in the Introduction, where all the Lagrangian submanifolds $\cL_{F_\lambda}$ lie in a single symplectic vector space $\big(\cH_Y, \Omega \big)$, one can identify $\big(\cH_Y, \Omega \big)$ with $\big(\cH_Y, \Omega^{F_\lambda} \big)$ by multiplication by the square root of the equivariant Euler class of $F$. See~\cite[\S8]{CoatesGivental2007} for details.
\end{remark}
\subsection{Twisting the $I$-function}\label{I-functions}
We will now prove a general result following an argument from \cite{CCIT2009Computing}. We say that a family $\tau \mapsto I(\tau)$ of elements of $\cH_Y$ \emph{satisfies the Divisor Equation} if the parameter domain for $\tau$ is a product $U \times H^2(Y)$ and $I(\tau)$ takes the form
$$ I(\tau) = \sum_{{\boldsymbol{e}}ta \in \NE(Y)}Q^{{\boldsymbol{e}}ta} I_{{\boldsymbol{e}}ta}(\tau,z) $$
where
{\boldsymbol{e}}gin{align}\label{divisor equation}
z\nabla_{\rho}I_{{\boldsymbol{e}}ta}= \big(\rho + \langle \rho,{\boldsymbol{e}}ta \rangle z\big) I_{{\boldsymbol{e}}ta}
&&
\text{for all $\rho \in H^2(Y)$.}
\end{align}
Here $\nabla_\rho$ is the directional derivative along $\rho$. Let $F'$ be a vector bundle on $Y$, and consider any family $\tau \mapsto I(\tau) \in \cL_{F'_\mu}$ that satisfies the Divisor Equation. Given another vector bundle $F$ which splits as a direct sum of line bundles $F=F_1 \oplus \dots \oplus F_k$, we explain how to modify the family $\tau \mapsto I(\tau)$ by introducing explicit hypergeometric factors that depend on $F$. We prove that (1) this modified family can be written in terms of the {\it Quantum Riemann-Roch operator} and the original family; and (2) the modified family lies on the twisted Lagrangian cone $\cL_{F_\lambda \oplus F'_\mu}$.
{\boldsymbol{e}}gin{definition}
Define the element $G(x,z) \in \cH_Y$ by $$G(x,z) := \sum_{l=0}^\infty \sum_{m=0}^\infty s_{l + m - 1} \frac{B_m}{m!}\frac{x^l}{l!}z^{m-1}$$
where $B_m$ are the Bernoulli numbers and the $s_k$ are the coefficients obtained by writing the $\CC^\times$-equivariant Euler class \eqref{intro equivariant Euler} in the form $\exp\big(\sum_{k \geq 0} s_k \ch_k(\cdot)\big)$.
\end{definition}
{\boldsymbol{e}}gin{remark}
The discussion in this section is valid for any invertible multiplicative characteristic class, not just the equivariant Euler class, but we will neither need nor emphasize this.
\end{remark}
{\boldsymbol{e}}gin{definition}\label{delta}
Let $F$ be a vector bundle -- not necessarily split -- and let $f_i$ be the Chern roots of $F$. Define the \textit{Quantum Riemann-Roch operator}, $\Delta_{F_\lambda} \colon \cH_{Y} \rightarrow \cH_{Y}$ as multiplication by
{\boldsymbol{e}}gin{equation*}
\Delta_{F_\lambda} = \prod_{i=1}^{k} \exp(G(f_i, z))
\end{equation*}
\end{definition}
{\boldsymbol{e}}gin{theorem}[\cite{CoatesGivental2007}] \label{QRR} \
$\Delta_{F_\lambda}$ gives a linear symplectomorphism of $(\cH_{Y},\Omega_{Y})$ with $(\cH_{Y}, \Omega_{Y}^{F_\lambda})$ such that $$\Delta_{F_\lambda}(\cL_{Y}) = \cL_{F_\lambda}$$
\end{theorem}
Since $\Delta_{F_\lambda} \circ \Delta_{F'_\mu}=\Delta_{F_\lambda \oplus F'_\mu}$, it follows immediately that
$$\Delta_{F_\lambda}(\cL_{F'_\mu}) = \cL_{F_\lambda \oplus F'_\mu}.$$
{\boldsymbol{e}}gin{lemma} \label{(3)}
Let $F$ be a vector bundle and let $f_1, \ldots, f_k$ be the Chern roots of $F$. Let
$$D_{F_\lambda}=\prod_{i=1}^{k} \exp\big({-G}(z\nabla_{f_i},z)\big)$$
and suppose that $\tau \mapsto I(\tau)$ is a family of elements of $\cL_{F'_\mu}$. Then $\tau \mapsto D_{F_\lambda}(I(\tau))$ is also a family of elements of $\cL_{F'_\mu}$.
\end{lemma}
{\boldsymbol{e}}gin{proof}
This follows \cite[Theorem~4.6]{CCIT2009Computing}. Let $h = -z + \sum_{i=0}^m t_i z^i + \sum_{j=0}^{\infty} p_{j}(-z)^{-j-1}$ be a point on $\cH_{Y}$. The Lagrangian cone $\cL_{F'_\mu}$ is defined by the equations $E_j=0$,~$j=0,1,2,\dots$ where $$E_j(h) = p_j - \sum_{n \geq 0} \sum_{d \in \NE(Y)} \frac{Q^d}{n!} t^{\alpha_1}_{i_1} \dots t^{\alpha_{n}}_{i_n}\langle \phi_{\alpha_1}\psi^{i_1}, \dots, \phi_{\alpha_n}\psi^{i_n}, \phi_{\alpha}\psi^{j} \rangle_{0,n+1,d} \phi^{\alpha}$$
We need to show that $E_j(D_{F_\lambda}(I)) = 0$. Note that $D_{F_\lambda}(I) = \prod_{i=1}^{k} \exp(-G(z\nabla_{f_i},z))I$ depends on the parameters $s_i$. For notational simplicity assume that $k=1$, so that $$D_{F_\lambda}(I) = \exp\big({-G}(z\nabla_{f},z)\big)I$$ Set $\deg s_i = i+1$. We will prove the result by inducting on degree. Note that if $s_0 = s_1= \dots = 0$ then $D_{F_\lambda}(I) = I$ so that $E_j(D_{F_\lambda}(I)) = 0$. Assume by induction that $E_j(D_{F_\lambda}(I))$ vanishes up to degree $n$ in the variables $s_0, s_1, s_2, \dots$ Then $$\frac{\partial}{\partial s_i} E_j(D_{F_\lambda}(I)) = d_{D_{F_\lambda}(I)}E_j (z^{-1}P_{i}(z \nabla_{f},z)D_{F_\lambda}(I))$$ where $$P_{i}(z \nabla_{f},z) = \sum_{m=0}^{i+1} \frac{1}{m!(i+1-m)!}z^{m}B_m (z \nabla_{f})^{i+1-m}$$
By induction there exists $D_{F_\lambda}(I)' \in \cL_{F'_\mu}$ such that $$\frac{\partial}{\partial s_i} E_j(D_{F_\lambda}(I)) = d_{D_{F_\lambda}(I)'}E_j (z^{-1}P_{i}(z \nabla_{f},z)D_{F_\lambda}(I)')$$
up to degree $n$. But the right hand side of this expression is zero, since the term in brackets lies in the tangent space to the Lagrangian cone. Indeed, applying $\nabla_f$ to $D_{F_\lambda}(I_{Y})'$ -- or to any family lying on the cone -- takes it to the tangent space of the cone at the point. And then applying $z\nabla_f$ preserves that tangent space.
\end{proof}
{\boldsymbol{e}}gin{corollary}\label{Ithm}
Let $\tau \mapsto I(\tau)$ be a family of elements of $\cL_{F'_\mu}$. Then $\tau \mapsto \Delta_{F_\lambda}(D_{F_\lambda}(I(\tau)))$ is a family of elements of $\cL_{F_\lambda \oplus F'_\mu}$.
\end{corollary}
{\boldsymbol{e}}gin{proof}
This follows immediately by combining \ref{QRR} and \ref{(3)}
\end{proof}
Corollary \ref{Ithm} produces a family of elements on the twisted Lagrangian cone $\cL_{F_\lambda \oplus F'_\mu}$, but in general it is not obvious whether the nonequivariant limit $\lambda \rightarrow 0$ of this family exists. However, in the case when $F$ is split and $\tau \mapsto I(\tau)$ satisfies the Divisor Equation we will show that the family $\Delta_{F_\lambda}(D_{F_\lambda}(I(\tau, -z)))$ is equal to the \emph{twisted $I$-function} $I_{F'_\mu \oplus {F_\lambda}}$ given in Definition~\ref{twistedI}. This has an explicit expression, which makes it easy to check whether the nonequivariant limit exists. We make the following definitions.
{\boldsymbol{e}}gin{definition}\label{modification} \label{twistedI}
Let $\tau \mapsto I(\tau)$ be a family of elements of $\cL_{F'_\mu}$. Let $F= F_1 \oplus \dots \oplus F_k$ be a direct sum of line bundles, and let $f_i=c_1(F_i)$. For ${\boldsymbol{e}}ta \in \NE(Y)$, we define the modification factor
$$M_{{\boldsymbol{e}}ta}(z) = \prod_{i=1}^{k} \frac{\prod_{m=-\infty}^{\langle f_{i}, {\boldsymbol{e}}ta \rangle} \lambda + f_{i} + mz }{\prod_{m=-\infty}^{0} \lambda + f_{i} + mz }$$
The associated \textit{twisted $I$-function} is
{\boldsymbol{e}}gin{equation*}
I^{\text{\rm tw}}(\tau) = \sum_{{\boldsymbol{e}}ta \in \NE(Y)} Q^{{\boldsymbol{e}}ta} I_{{\boldsymbol{e}}ta}(\tau,z) \cdot M_{{\boldsymbol{e}}ta}(z)
\end{equation*}
\end{definition}
To relate $M_{{\boldsymbol{e}}ta}(z)$ to the Quantum Riemann--Roch operator we will need the following Lemma:
{\boldsymbol{e}}gin{lemma}\label{delta-M}
$$M_{{\boldsymbol{e}}ta}(-z) = \Delta_{F_\lambda}\left(\prod_{i=1}^{k}\exp(- G(f_{i} - \langle f_{i}, {\boldsymbol{e}}ta \rangle z, z))\right)$$
\end{lemma}
{\boldsymbol{e}}gin{proof}
Define $$\mathbf{s}(x) = \sum_{k \geq 0} s_k \frac{x^k}{k!}$$
By \cite[equation 13]{CCIT2009Computing} we have that
{\boldsymbol{e}}gin{equation}\label{gamma}
G(x+z,z) = G(x,z) + \mathbf{s}(x)
\end{equation}
We can rewrite
$$M_{{\boldsymbol{e}}ta}(z) = \prod_{i=1}^{k} \frac{\prod_{m=-\infty}^{\langle f_{i}, {\boldsymbol{e}}ta \rangle} \lambda + f_{i} + mz }{\prod_{m=-\infty}^{0} \lambda + f_{i} + mz } = \prod_{i=1}^{k} \frac{\prod_{m=-\infty}^{\langle f_{i}, {\boldsymbol{e}}ta \rangle} \exp[\mathbf{s}(f_{i} + mz)] }{\prod_{m=-\infty}^{0} \exp[\mathbf{s}(f_{i} + mz)] }$$
and so
{\boldsymbol{e}}gin{align*}
M_{{\boldsymbol{e}}ta}(-z) =& \prod_{i=1}^{k}\exp\left(\sum_{m=-\infty}^{\langle f_i, {\boldsymbol{e}}ta \rangle} \mathbf{s}(f_{i} - mz) - \sum_{m=-\infty}^{0} \mathbf{s}(f_{i} - mz))\right) \\
=& \prod_{i=1}^{k} \exp(G(f_i,z) - G(f_i - \langle f_i, {\boldsymbol{e}}ta \rangle z, z)
\end{align*}
where for the second equality we used \eqref{gamma}.
\end{proof}
{\boldsymbol{e}}gin{proposition}\label{twisted=Deltad}
Let $\tau \mapsto I(\tau)$ be a family of elements of $\cL_{F'_\mu}$ that satisfies the Divisor Equation, and let $F=F_1 \oplus \dots \oplus F_k$ be a direct sum of line bundles. Then
{\boldsymbol{e}}gin{equation}\label{twisted=Deltadeq}
I^{\text{\rm tw}}=\Delta_{F_\lambda}(D_{F_\lambda}(I)).
\end{equation}
As a consequence, $\tau \mapsto I^{\text{\rm tw}}(\tau)$ is a family of elements on the cone $\cL_{F_\lambda \oplus F'_\mu}$.
\end{proposition}
{\boldsymbol{e}}gin{proof}
Lemma \ref{delta-M} shows that
{\boldsymbol{e}}gin{equation}\label{imagebeta}
I^{\text{\rm tw}}(\tau) = \Delta_{F_\lambda}\left(\sum_{{\boldsymbol{e}}ta \in \NE(Y)}\prod_{i=1}^{k} \exp(-G(f_i - \langle f_i, {\boldsymbol{e}}ta \rangle z, z))I_{{\boldsymbol{e}}ta}(\tau,z)\right)
\end{equation}
Applying the Divisor Equation, we can rewrite this as
{\boldsymbol{e}}gin{equation}
I^{\text{\rm tw}}=\Delta_{F_\lambda}(D_{F_\lambda}(I))
\end{equation}
as required. The rest is immediate from \ref{Ithm}.
\end{proof}
{\boldsymbol{e}}gin{proposition}\label{nonequiexists}
If the line bundles $F_i$ are nef, then the nonequivariant limit $\lambda \rightarrow 0$ of $I^{\text{\rm tw}}(\tau)$ exists.
\end{proposition}
{\boldsymbol{e}}gin{proof}
This is immediate from Definition~\ref{twistedI}.
\end{proof}
\section{The Givental--Martin cone}\label{giventalmartincones}
We now restrict to the situation described in the Introduction, where the action of a reductive Lie group $G$ on a smooth quasiprojective variety $A$ leads to smooth GIT quotients $A /\!\!/ G$ and~$A /\!\!/ T$. As discussed, the roots of $G$ define a vector bundle $\Phi = \oplus_\rho L_\rho \to Y$, where $Y = A /\!\!/ T$, and we consider twisting data $(\Phi, \mathbf{c})$ for $Y$ where $\mathbf{c}$ is the $\CC^\times$-equivariant Euler class.
We call the modification factor in this setting the \emph{Weyl modification factor}, and denote it as
{\boldsymbol{e}}gin{equation}\label{modg}
W_{{\boldsymbol{e}}ta}(z) = \prod_{\alpha} \frac{\prod_{m=-\infty}^{\langle c_{1}(L_{\alpha}), {\boldsymbol{e}}ta \rangle} c_{1}(L_{\alpha}) + \lambda + mz}{\prod_{m=-\infty}^{0} c_{1}(L_{\alpha}) + \lambda + mz}
\end{equation}
where the product runs over all roots $\alpha$.
For any family $\tau \mapsto I(\tau)= \sum_{{\boldsymbol{e}}ta \in \NE(Y)}Q^{{\boldsymbol{e}}ta}I_{{\boldsymbol{e}}ta}(\tau,z)$ of elements of $\cH_Y$, the corresponding twisted $I$-function is
{\boldsymbol{e}}gin{equation} \label{general Weyl twist}
I^{\text{\rm tw}}(\tau) = \sum_{{\boldsymbol{e}}ta \in \NE(Y)}Q^{{\boldsymbol{e}}ta}I_{{\boldsymbol{e}}ta}(\tau,z) \cdot W_{{\boldsymbol{e}}ta}(z)
\end{equation}
Since the roots bundle $\Phi$ is not convex, in general the non-equivariant limit $\lambda \to 0$ of $I^{\text{\rm tw}}$ will not exist. Recall from \eqref{quotientH}, however, the map $p \colon \cH^W_{A /\!\!/ T} \to \cH_{A /\!\!/ G}$.
{\boldsymbol{e}}gin{lemma}\label{IGMexists}
Suppose that $I$ is Weyl-invariant. Then $p \circ I^\text{\rm tw}$ has a well-defined limit as $\lambda \rightarrow 0$.
\end{lemma}
{\boldsymbol{e}}gin{proof}
The map $p$ is given by the composition of the map on Novikov rings induced by $$\varrho \colon \NE(A /\!\!/ T) \to \NE(A /\!\!/ G)$$ (see Proposition~\ref{maponmori}) with the projection map $H^\bullet(A /\!\!/ T; \CC)^W \to H^\bullet(A /\!\!/ G; \CC)$ (see Theorem~\ref{thm:Martin}). Since $I(\tau)$ is Weyl-invariant, $I^\text{\rm tw}(\tau)$ is also Weyl invariant and so, after applying $\varrho$, the coefficient of each Novikov term $Q^{\boldsymbol{e}}ta$ in $\tau \mapsto I^\text{\rm tw}(\tau)$ lies in $H^\bullet(A /\!\!/ T; \CC)^W$. The composition $p \circ I^{\text{\rm tw}}$ is therefore well-defined.
The Weyl modification \eqref{modg} contains many factors
$$
\frac{c_1(L_\alpha) + \lambda + m z}{- c_1(L_\alpha) + \lambda - m z}
$$
which arise by combining the terms involving roots $\alpha$ and $-\alpha$. Such factors have a well-defined limit, $-1$, as $\lambda \to 0$. Therefore the limit of $p \circ I^\text{\rm tw}$ as $\lambda \to 0$ is well-defined if and only if the limit of
{\boldsymbol{e}}gin{equation} \label{p of I intermediate}
p \left( \sum_{{\boldsymbol{e}}ta \in \NE(Y)}Q^{{\boldsymbol{e}}ta}I_{{\boldsymbol{e}}ta}(\tau,z) \cdot (-1)^{\epsilon({\boldsymbol{e}}ta)}\prod_{\alpha \in \Phi^+} \frac{ c_1(L_\alpha) \pm \lambda + \langle c_1(L_\alpha), {\boldsymbol{e}}ta \rangle z}{c_1(L_\alpha) \mp \lambda} \right)
\end{equation}
as $\lambda \to 0$ is well-defined, and the two limits coincide. Here $\Phi^+$ is the set of positive roots of $G$, and $\epsilon({\boldsymbol{e}}ta) = \sum_{\alpha \in \Phi^+} \langle c_1(L_\alpha), {\boldsymbol{e}}ta \rangle$; cf.~\cite[equation 3.2.1]{CFKS2008}. The limit $\lambda \to 0$ of the denominator terms
$$
\prod_{\alpha \in \Phi^+} \big(c_1(L_\alpha) - \lambda\big)
$$
in \eqref{p of I intermediate} is the fundamental Weyl-anti-invariant class $\omega$ from the discussion before Theorem~\ref{thm:Martin}. Furthermore
$$ \sum_{{\boldsymbol{e}}ta \in \NE(Y)}Q^{{\boldsymbol{e}}ta}I_{{\boldsymbol{e}}ta}(\tau,z) \cdot (-1)^{\epsilon({\boldsymbol{e}}ta)}\prod_{\alpha \in \Phi^+} \big(c_1(L_\alpha) + \lambda + \langle c_1(L_\alpha), {\boldsymbol{e}}ta \rangle z\big)$$
has a well-defined limit as $\lambda \to 0$ which, as it is Weyl-anti-invariant, is divisible by $\omega$. The quotient here is unique up to an element of $\Ann(\omega)$, and therefore the projection of the quotient along Martin's map $H^\bullet(A /\!\!/ T; \CC)^W \to H^\bullet(A /\!\!/ G; \CC)$ is unique. It follows that the limit as $\lambda \to 0$ of $p \circ I^\text{\rm tw}$ is well-defined.
\end{proof}
{\boldsymbol{e}}gin{definition} \label{IGM definition}
Let $\tau \mapsto I(\tau)$ be a Weyl-invariant family of elements of $\cH_Y$ and let $I^{\text{\rm tw}}$ denote the twisted $I$-function as above. We call the nonequivariant limit of $\tau \mapsto p \big(I^{\text{\rm tw}}(\tau)\big)$ the \emph{Givental--Martin modification} of the family $\tau \mapsto I(\tau)$, and denote it by $\tau \mapsto I_{\GM}(\tau)$
\end{definition}
Recall that we have fixed a representation $\rho$ of $G$ on a vector space $V$, and that this induces vector bundles $V^T \to A /\!\!/ T$ and $V^G \to A /\!\!/ G$. Since the bundle $\Phi \to A /\!\!/ T$ is not convex, one cannot expect the non-equivariant limit of $\cL_{ \Phi_\lambda \oplus V^T_\mu}$ to exist. Nonetheless, the projection along \eqref{quotientH} of the Weyl-invariant part of $\cL_{ \Phi_\lambda \oplus V^T_\mu}$ does admit a non-equivariant limit.
{\boldsymbol{e}}gin{theorem}\label{GMlimit}
The non-equivariant limit $\lambda \to 0$ of $p \left(\cL_{\Phi_\lambda \oplus V^T_\mu} \cap \cH^{W}_{A /\!\!/ T} \right)$ exists.
\end{theorem}
\noindent We call this non-equivariant limit the \emph{twisted Givental--Martin cone} $\cL_{\GM, V^T_\mu} \subset \cH^{W}_{A/\!\!/ T}$.
{\boldsymbol{e}}gin{proof}[Proof of Theorem~\ref{GMlimit}]
Recall the twisted $J$-function $J_{V^T_\mu}(\tau,{-z})$ from Definition~\ref{twisted J}. By~\cite{CoatesGivental2007} a general point
$$ {-z} + t_0 + t_1 z + \cdots + O(z^{-1}) $$
on $\cL_{V^T_\mu}$ can be written as
$$ J_{V^T_\mu} \big(\tau({\bf t}),{-z}\big) + \sum_{\alpha=1}^N C_\alpha({\bf t}, z) z \frac{\partial J_{V^T_\mu}}{\partial \tau^\alpha}\big(\tau({\bf t}), {-z}\big)
$$
for some coefficients $C_\alpha({\bf t}, z)$ that depend polynomially on $z$ and some $H^\bullet(A /\!\!/ T)$-valued
function $\tau({\bf t})$ of ${\bf t} = (t_0,t_1,\ldots)$. The Weyl modification $\tau \mapsto I^\text{\rm tw}(\tau)$ of $\tau \mapsto J_{V^T_\mu}(\tau,-z)$ satisfies $I^\text{\rm tw}(\tau) \equiv J_{V^T_\mu}(\tau,{-z})$ modulo Novikov variables, and $I^\text{\rm tw}(\tau) \in \cL_{\Phi_\lambda \oplus V^T_\mu}$ by Proposition~\ref{twisted=Deltad}, so a general point
{\boldsymbol{e}}gin{equation} \label{general point on equivariant Weyl}
{-z} + t_0 + t_1 z + \cdots + O(z^{-1})
\end{equation}
on $\cL_{\Phi_\lambda \oplus V^T_\mu}$ can be written as
$$ I^\text{\rm tw} \big(\tau({\bf t})^\dagger,{-z}\big) + \sum_{\alpha=1}^N C_\alpha({\bf t}, z)^\dagger z \frac{\partial I^\text{\rm tw}}{\partial \tau^\alpha}\big(\tau({\bf t})^\dagger, {-z}\big) $$
for some coefficients $C_\alpha({\bf t}, z)^\dagger$ that depend polynomially on $z$ and some $H^\bullet(A /\!\!/ T)$-valued
function $\tau({\bf t})^\dagger$. Since the twisted $J$-function is Weyl-invariant, so is $I^\text{\rm tw}(\tau)$, and thus if \eqref{general point on equivariant Weyl} is Weyl-invariant then we may take $C_\alpha({\bf t}, z)^\dagger$ to be such that $\sum_\alpha C_\alpha({\bf t}, z)^\dagger \phi_\alpha$ is Weyl-invariant. Projecting along \eqref{quotientH} we see that a general point
{\boldsymbol{e}}gin{equation} \label{general point on equivariant GM}
{-z} + t_0 + t_1 z + \cdots + O(z^{-1})
\end{equation}
on $p \left(\cL_{\Phi_\lambda \oplus V^T_\mu} \cap \cH^{W}_{A /\!\!/ T} \right)$ can be written as
$$ p \circ I^\text{\rm tw} \big(\tau({\bf t})^\ddagger,{-z}\big) + \sum_{\alpha=1}^N C_\alpha({\bf t}, z)^\ddagger z \frac{\partial (p \circ I^\text{\rm tw})}{\partial \tau^\alpha}\big(\tau({\bf t})^\ddagger, {-z}\big) $$
for some coefficients $C_\alpha({\bf t}, z)^\ddagger$ that depend polynomially on $z$ and some $H^\bullet(A /\!\!/ T)$-valued
function $\tau({\bf t})^\ddagger$. Furthermore, since $p \circ I^\text{\rm tw}(\tau)$ has a well-defined non-equivariant limit $I_{\GM}(\tau)$, we see that $C_{\alpha}({\bf t}, z)^\ddagger$ also admits a non-equivariant limit. Hence a general point \eqref{general point on equivariant GM} on $p \left(\cL_{\Phi_\lambda \oplus V^T_\mu} \cap \cH^{W}_{A /\!\!/ T} \right)$ has a well-defined limit as $\lambda \to 0$.
\end{proof}
{\boldsymbol{e}}gin{corollary} \label{GMlimit no bundle}
The non-equivariant limit $\lambda \to 0$ of $p \left(\cL_{\Phi_\lambda} \cap \cH^{W}_{A /\!\!/ T} \right)$ exists.
\end{corollary}
\noindent We call this non-equivariant limit the \emph{Givental--Martin cone} $\cL_{\GM} \subset \cH^{W}_{A/\!\!/ T}$.
{\boldsymbol{e}}gin{proof}
Take the vector bundle $V^T$ in Theorem~\ref{GMlimit} to have rank zero.
\end{proof}
{\boldsymbol{e}}gin{corollary}\label{IGMonLGM}
If $\tau \mapsto I(\tau)$ is a Weyl-invariant family of elements of $\cL_{V^T_\mu}$ that satisfies the Divisor Equation \eqref{divisor equation} then the Givental--Martin modification $\tau \mapsto I_{\GM}(\tau)$ is a family of elements of~$\cL_{\GM, V^T_\mu}$
\end{corollary}
{\boldsymbol{e}}gin{proof}
Proposition~\ref{twisted=Deltad} implies that
$\tau \mapsto I^\text{\rm tw}(\tau, -z)$ is a family of elements on $\cL_{ \Phi_{\lambda} \oplus V^T_\mu}$. Projecting along \eqref{quotientH} and taking the limit $\lambda \rightarrow 0$, which exists by Lemma~\ref{IGMexists}, proves the result.
\end{proof}
This completes the results required to state the Abelian/non-Abelian Correspondence (Conjectures~\ref{AnA} and~\ref{AnA family}) and the Abelian/non-Abelian Correspondence with bundles (Conjectures~\ref{AnA bundles} and~\ref{AnA bundles family}).
\section{The Abelian/non-Abelian Correspondence for Flag Bundles}
\subsection{The Work of Brown and Oh}\label{brownohwn}
In this section we will review results by Brown~\cite{Brown2014} and Oh~\cite{Oh2016}, and situate their work in terms of the Abelian/non-Abelian Correspondence (Conjecture~\ref{AnA family}). In particular, we show that the Givental--Martin modification of the Brown $I$-function is the Oh $I$-function.
We freely use the notation introduced in Section~\ref{notation}.
Let $X$ be a smooth projective variety. We will decompose the $J$-function of $X$, defined in~\S\ref{J}, into contributions from different degrees:
{\boldsymbol{e}}gin{equation} \label{JX by degrees}
J_{X}(\tau,z)= \sum_{D \in \NE(X)} J_X^{D}(\tau,z) Q^{D}.
\end{equation}
Recall that we have a direct sum of line bundles $E = L_1 \oplus \dots \oplus L_n \xrightarrow{\pi} X$, and that $\Fl(E) = \Fl(r_1, \dots, r_\ell,E) = A /\!\!/ G$ is the partial flag bundle associated to $E$. As in \S\ref{flag}, we form the toric fibration $\Fl(E)_T = A /\!\!/ T$ with general fibre $\CC^N /\!\!/ (\CC^\times)^R$. We denote both projection maps $\Fl(E) \to X$ and $\Fl(E)_T \to X$ by $\pi$. For the sake of clarity, we will denote homology and cohomology classes on $\Fl(E)_T$ with a tilde and classes on $\Fl(E)$ without. Recall the cohomology classes $\tilde{H}_{\ell+1, j}=-\pi^*c_1(L_j)$ on $\Fl(E)_T$, and $H_{\ell + 1,j}=-\pi^*c_1(L_j)$ on $\Fl(E)$.
For a fixed homology class $\tilde{{\boldsymbol{e}}ta}$ on $\Fl(E)_T$ define $d_{\ell +1,j} = \langle -\pi^*c_{1}(L_j), \tilde{{\boldsymbol{e}}ta} \rangle$, and for a fixed homology class ${\boldsymbol{e}}ta$ on $\Fl(E)$ define $d_{\ell +1,j} = \langle -\pi^*c_{1}(L_j), {\boldsymbol{e}}ta \rangle$. We use the indexing of the set $\{1, \dots, R\}$ defined in Section~\ref{notation}, and denote the components of a vector $\underline{d} \in \ZZ^R$ by $d_{i,j}$. Similarly, we denote components of a vector $\underline{d} \in \ZZ^\ell$ by $d_i$.
In \cite{Oh2016}, the author proves that a certain generating function, the $I$-function of $\Fl(E)$, lies on the Lagrangian cone for $\Fl(E)$.
{\boldsymbol{e}}gin{theorem}\label{ohI}
Let $\tau \in H^{\bullet}(X)$, $t=\sum_{i}t_{i}c_1(S_i^\vee)$, and define the $I$-function of $\Fl(E)$ to be
{\boldsymbol{e}}gin{multline*}
I_{\Fl(E)}(t, \tau,z) = \\
e^{\frac{t}{z}} \sum_{{\boldsymbol{e}}ta \in {\NE}(\Fl(E))}
Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} \pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau,z)
\sum_{\substack{\underline{d} \in \ZZ^R\colon \\
\forall i \sum_j d_{i,j}=\langle {\boldsymbol{e}}ta, c_1(S_i^\vee) \rangle}}
\prod_{i=1}^{ \ell}
\prod_{j=1}^{r_{i}}\prod_{j'=1}^{r_{i+1}} \frac{\prod_{m=-\infty}^{0}H_{i,j} - H_{i+1,j'} + mz}{\prod_{m=-\infty}^{d_{i,j} - d_{i+1,j'}}H_{i,j} - H_{i+1,j'} + mz}\\
\times \prod_{i=1}^\ell \prod_{j \neq j'}
\frac{\prod_{m=-\infty}^{d_{i,j} - d_{i,j'}}H_{i,j} - H_{i,j'} + mz}{\prod_{m=-\infty}^{0} H_{i,j} -H_{i,j'} + mz}
\end{multline*}
Then $I_{\Fl(E)}(t,\tau,-z) \in \cL_{\Fl(E)}$ for all $t$ and $\tau$.
\end{theorem}
In \cite{Brown2014}, the author proves an analogous result for the corresponding Abelian quotient $\Fl(E)_T$.
{\boldsymbol{e}}gin{theorem}\label{brown2014gromov}
Let $\tau \in H^{\bullet}(X)$, $t=\sum_{i,j}t_{i,j}\tilde{H}_{i,j}$, and define the Brown $I$-function of $\Fl(E)_T$ to be
{\boldsymbol{e}}gin{multline*}
I_{\Fl(E)_T}(t, \tau,z) = \\
e^{\frac{t}{z}} \sum_{\tilde{{\boldsymbol{e}}ta} \in H_2\Fl(E)_T } Q^{\tilde{{\boldsymbol{e}}ta}} e^{\langle \tilde{{\boldsymbol{e}}ta}, t \rangle} \pi^{*}J_{X}^{\pi_* \tilde{{\boldsymbol{e}}ta}}(\tau, z)
\prod_{i=1}^{\ell} \prod_{j=1}^{r_i}\prod_{j'=1}^{r_{i+1}}\frac{ \prod_{m=-\infty}^{0}\tilde{H}_{i,j} - \tilde{H}_{i+1,j'} + mz}
{\prod_{m=-\infty}^{\langle \tilde{{\boldsymbol{e}}ta}, \tilde{H}_{i,j} - \tilde{H}
_{i+1,j'} \rangle }\tilde{H}_{i,j} - \tilde{H}_{i+1,j'} + mz}
\end{multline*}
Then $I_{\Fl(E)_T}(t,\tau,-z) \in \cL_{\Fl(E)_T}$ for all $t$ and $\tau$.
\end{theorem}
{\boldsymbol{e}}gin{remark}\label{novi}
We have chosen to state Theorem~\ref{brown2014gromov} in a different form than in Brown's original paper. The equivalence of the two versions follows from Lemma~\ref{effsummationrange} below. The classes $H_{i,j}$ here were denoted in~\cite{Brown2014} by $P_i$, and the classes $H_{i,j}-H_{i+1,j'}$ here were denoted there by~$U_k$.
\end{remark}
{\boldsymbol{e}}gin{lemma}\label{effsummationrange}
Writing $I_{\Fl(E)_T}=\sum_{\tilde{{\boldsymbol{e}}ta}} I_{\Fl(E)_T}^{\tilde{{\boldsymbol{e}}ta}} Q^{\tilde{{\boldsymbol{e}}ta}}$, any nonzero $I^{\tilde{{\boldsymbol{e}}ta}}$ must have $\tilde{{\boldsymbol{e}}ta} \in \NE(\Fl(E)_T)$.
\end{lemma}
{\boldsymbol{e}}gin{proof}
To see this we temporarily adopt the notation of Brown and denote the torus invariant divisors by $U_k$, as in Lemma~\ref{torus_invariant_divisors}.
Then $I_{\Fl(E)_T}$ takes the form
\[
I_{\Fl(E)_T}=\sum_{\substack{\tilde{{\boldsymbol{e}}ta} \in H_2\Fl(E)_T \colon \\ \pi_*\tilde{{\boldsymbol{e}}ta} \in \NE(X)}}(\dots)\prod_{k=1}^N \frac{\prod_{m=-\infty}^{0}U_k+mz}{\prod_{m=-\infty}^{\langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle}U_k+mz}
\]
Let $\alpha \subset \{1, \dots N\}$ be a subset of size $R$ which defines a section of the toric fibration as in Section~\ref{flag}. We have that
\[
s_\alpha^*I_{\Fl(E)_T}=(\dots)
\prod_{k \in \alpha} \frac{\prod_{m=-\infty}^{0}(0) + mz}{\prod_{m=-\infty}^{\langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle}(0)+ mz}\prod_{k \notin \alpha} \frac{\prod_{m=-\infty}^{0}s_\alpha^*U_k+ mz}{\prod_{m=-\infty}^{\langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle}s_\alpha^*U_k+ mz}
\]
since $s_\alpha^*(U_k)=0$ if $k \in \alpha$. Therefore, if $\langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle < 0$ for some $k \in \alpha$, the numerator contains a term $(0)$ and vanishes.
We conclude that any $\tilde{{\boldsymbol{e}}ta} \in H_2\Fl(E)_T$ which gives a nonzero contribution to $s_\alpha^*I_{\Fl(E)_T}$ must satisfy the conditions
\[
\pi_*\tilde{{\boldsymbol{e}}ta} \in \NE(X), \langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle \geq 0 \, \forall k \in \alpha.
\]
The section $s_\alpha$ gives a splitting $H_2(\Fl(E)_T)=H_2(X)\oplus H_2(\Fl_T)$, via which we may write $\tilde{{\boldsymbol{e}}ta}=s_{\alpha_*}D+\iota_*d$ where $\iota$ is the inclusion of a fibre. We have
\[
\langle \tilde{{\boldsymbol{e}}ta}, U_k \rangle= \langle D, s_\alpha^*U_k \rangle+ \langle d, \iota^*U_k \rangle=\langle d, \iota^*U_k \rangle
\geq 0
\]
for all $k \in \alpha$. However, the cone in the secondary fan spanned by the line bundles $\iota^*U_k$ contains the ample cone of $\Fl_T$ (see Section \ref{flag}), so this implies $d \in \NE(\Fl_T)$. It follows that any $\tilde{{\boldsymbol{e}}ta}$ which gives a nonzero contribution to $s_\alpha^*I_{\Fl(E)_T}$ is effective.
We now use the Atiyah-Bott localization formula
\[
I_{\Fl(E)_T}=\sum_{\alpha} s_{\alpha_*}\left(\frac{s_\alpha^*I_{\Fl(E)_T}}{e^\alpha}\right), \quad \text{where} \; e^\alpha=\prod_{k \notin \alpha} s_\alpha^*U_k
\]
where $\alpha$ ranges over the torus fixed point sections of the fibration, to conclude that the same is true for $I_{\Fl(E)_T}$.
\end{proof}
{\boldsymbol{e}}gin{lemma}\label{Brown I satisfies Divisor Equation}
Brown's $I$-function satisfies the Divisor Equation. That is,
$$z\nabla_{\rho}I_{\Fl(E)_T}^{\tilde{{\boldsymbol{e}}ta}} = (\rho + \langle \rho,\tilde{{\boldsymbol{e}}ta} \rangle z ) I_{\Fl(E)_T}^{\tilde{{\boldsymbol{e}}ta}}$$ for any $\rho \in H^2(\Fl(E)_T)$.
\end{lemma}
{\boldsymbol{e}}gin{proof}
Decompose $\rho=\rho_F + \pi^*\rho_B$ into fibre and base part.
Basic differentiation and the divisor equation for $J_X$ show that
$$ z\nabla_{\rho}I_{\Fl(E)_T}^{\tilde{{\boldsymbol{e}}ta}} = \, \left(\rho_F + \langle \rho_F, \tilde{{\boldsymbol{e}}ta} \rangle z+(\pi^*\rho_{B} + \langle \pi^*\rho_B, \tilde{{\boldsymbol{e}}ta}\rangle z)\right) e^{t/z} e^{\langle \tilde{{\boldsymbol{e}}ta}, t \rangle} \pi^{*}J_{X}^{\pi_{*}\tilde{{\boldsymbol{e}}ta}}(\tau,z) \cdot \mathbf{H} $$
where $\mathbf{H}$ is a hypergeometric factor with no dependence on $t$ or $\tau$. The right-hand simplifies to
$$(\rho+ \langle \rho, \tilde{{\boldsymbol{e}}ta} \rangle z)I_{\Fl(E)_T}^{\tilde{{\boldsymbol{e}}ta}}$$
as required.
\end{proof}
{\boldsymbol{e}}gin{lemma}\label{weylinvariant}
If we restrict $t$ to lie in the Weyl-invariant locus $H^2(\Fl(E)_T)^W \subset H^2(\Fl(E)_T)$ then $(t,\tau) \mapsto I_{\Fl(E)_T}(t,\tau,z)$ takes values in $H^\bullet(\Fl(E)_T)^W$.
\end{lemma}
{\boldsymbol{e}}gin{proof}
This is immediate from the definition of $I_{\Fl(E)_T}(t,\tau,z)$, in Theorem~\ref{brown2014gromov}.
\end{proof}
{\boldsymbol{e}}gin{proposition}\label{brownoh}
Restrict $t$ to lie in the Weyl-invariant locus $H^2(\Fl(E)_T)^W \subset H^2(\Fl(E)_T)$ and consider the Brown $I$-function $(t, \tau) \mapsto I_{\Fl(E)_T}(t, \tau, z)$. The Givental--Martin modification $I_{\GM}(t, \tau)$ of this family is equal to Oh's $I$-function $I_{\Fl(E)}(t, \tau)$.
\end{proposition}
{\boldsymbol{e}}gin{proof}
Lemma~\ref{weylinvariant} and Lemma~\ref{IGMexists} imply that the Givental--Martin modification $I_{\GM}(t, \tau)$ exists. We need to compute it. Note that the restrictions to the fibre of the classes $\tilde{H}_{i,j}$ form a basis for~$H^{2}(\Fl_T)$. Since the general fibre $\Fl_T$ of $\Fl(E)_T$ has vanishing first homology, the Leray--Hirsch theorem gives an identification $\QQ[H_2(\Fl(E)_T, \ZZ)]=\QQ[H_2(X,\ZZ)][q_{1,1}, \dots, q_{\ell, r_\ell}]$ via the map
{\boldsymbol{e}}gin{equation}\label{noviab}
Q^{\tilde{{\boldsymbol{e}}ta}} \mapsto Q^{\pi_*{\tilde{{\boldsymbol{e}}ta}}}\prod_{i,j}q_{i,j}^{\langle \tilde{H}_{i,j}, \tilde{{\boldsymbol{e}}ta} \rangle}
\end{equation}
By Lemma~\ref{effsummationrange}, the summation range in the sum defining $I_{\Fl(E)_T}$ is contained in $\NE(\Fl(E)_T)$. We can therefore write the corresponding twisted $I$-function \eqref{general Weyl twist} as
{\boldsymbol{e}}gin{align*}
I^{\text{\rm tw}}(t, \tau,z)= e^{\frac{t}{z}} \sum_{\substack{D \in \NE(X) \\ \underline{d} \in \ZZ^R}} Q^{D} \prod_{i,j}q_{i,j}^{d_{i,j}}e^{t \cdot \underline{d}} \pi^{*}J_{X}^{D}(\tau, z)
\prod_{i=1}^{\ell} \prod_{j=1}^{r_i}\prod_{j'=1}^{r_{i+1}}\frac{ \prod_{m=-\infty}^{0}\tilde{H}_{i,j} - \tilde{H}_{i+1,j'} + mz}
{\prod_{m=-\infty}^{d_{i,j} - d_{i+1,j'}}\tilde{H}_{i,j} - \tilde{H}_{i+1,j'} + mz} \\
\times \prod_{i=1}^{\ell} \prod_{j \neq j'} \frac{\prod_{m=-\infty}^{d_{i,j} - d_{i,j'}}\tilde{H}_{i,j} - \tilde{H}_{i,j'} + \lambda + mz}{\prod_{m=-\infty}^{0} \tilde{H}_{i,j} -\tilde{H}_{i,j'} + \lambda + mz}
\end{align*}
where the $t_{i,j} \in \CC$, $t = \sum_{i=1}^{\ell}\sum_{j=1}^{r_i} t_{i,j} \tilde{H}_{i,j}$, and $t \cdot \underline{d}=\sum_{i,j}t_{i,j}d_{i,j}$. For the Weyl modification factor we used the fact that the roots of $G$ are given by $\rho_{i,j} \rho_{i,j'}^{-1}$, where the character $\rho_{i,j}$ was defined in section \ref{flag}. By Lemma~\ref{effsummationrange} the effective summation range for the vector $\underline{d}$ here is contained in the set $S \subset \ZZ^R$ consisting of $\underline{d}$ such that $\langle \tilde{{\boldsymbol{e}}ta}, \tilde{H}_{i,j} \rangle=d_{i,j}$ for some $\tilde{{\boldsymbol{e}}ta} \in \NE(\Fl(E)_T)$.
We can identify the group ring $\QQ[H_2(\Fl(E))]$ with $\QQ[H_2(X,\ZZ)][q_{1}, \dots, q_{\ell}]$ via the map
{\boldsymbol{e}}gin{equation} \label{novinonab}
Q^{\boldsymbol{e}}ta \mapsto Q^{\pi_*{\boldsymbol{e}}ta}\prod_{i}q_{i}^{\langle c_1(S_i^\vee), {\boldsymbol{e}}ta \rangle}
\end{equation}
Via \eqref{noviab} and \eqref{novinonab} the map on Mori cones $\varrho: \NE(\Fl(E)_T) \rightarrow \NE(\Fl(E))$ becomes
$$Q^D\prod_{i,j}q_{i,j}^{d_{i,j}} \mapsto Q^D\prod_{i}q_{i}^{\sum_j d_{i,j}}$$
Restricting $t$ to the Weyl-invariant locus $H^2(\Fl(E)_T)^W$ corresponds to setting $t_{i,j}=t_i$ for all~$i$ and~$j$, which gives $e^{t \cdot \underline{d}}=e^{\sum_i t_i d_i}$ where $d_i=\sum_j d_{i,j}$. The identification $H^2(\Fl(E)_T)^W \cong H^2(\Fl(E))$ sends $\sum_{i,j} t_i \tilde{H}_{i,j}$ to $\sum_i t_i c_1(S_i^\vee)$, so projecting along \eqref{quotientH} and taking the limit as $\lambda = 0$ we obtain
{\boldsymbol{e}}gin{align*}
e^{\frac{t}{z}} \sum_{\substack{D \in \NE(X)\\\underline{\delta} \in \ZZ^\ell}} Q^D\prod_{i}q_{i}^{\delta_i} e^{t \cdot \underline{\delta}} \pi^{*}J_{X}^D(\tau, z) \sum_{\substack{\underline{d} \in \ZZ^R\colon \\
\forall i \sum_j d_{i,j}=\delta_i}}\prod_{i=1}^{\ell} \prod_{j=1}^{r_i}\prod_{j'=1}^{r_{i+1}}\frac{ \prod_{m=-\infty}^{0}{H}_{i,j} - {H}_{i+1,j'} + mz}{\prod_{m=-\infty}^{d_{i,j} - d_{i+1,j'}}{H}_{i,j} - {H}_{i+1,j'} + mz} \\
\times \prod_{i=1}^{\ell} \prod_{j \neq j'} \frac{\prod_{m=-\infty}^{d_{i,j} - d_{i,j'}}H_{i,j} - H_{i,j'} + mz}{\prod_{m=-\infty}^{0} H_{i,j} -H_{i,j'} + mz}
\end{align*}
where now $t=\sum_i t_i c_1(S_i^\vee)$. The effective summation range here is contained in $\NE(\Fl(E))$ by construction.
Using \eqref{novinonab} again we may rewrite this as
{\boldsymbol{e}}gin{align*}
e^{\frac{t}{z}} \sum_{\substack{{\boldsymbol{e}}ta \in \NE(\Fl(E))}} Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} \pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z) \sum_{\substack{\underline{d} \in \ZZ^R\colon \\ \forall i \sum_j d_{i,j}=\langle {\boldsymbol{e}}ta, c_1(S_i^\vee) \rangle}}\prod_{i=1}^{\ell} \prod_{j=1}^{r_i}\prod_{j'=1}^{r_{i+1}}\frac{ \prod_{m=-\infty}^{0}{H}_{i,j} - {H}_{i+1,j'} + mz}{\prod_{m=-\infty}^{d_{i,j} - d_{i+1,j'}}{H}_{i,j} - {H}_{i+1,j'} + mz} \\
\times \prod_{i=1}^{\ell} \prod_{j \neq j'} \frac{\prod_{m=-\infty}^{d_{i,j} - d_{i,j'}}H_{i,j} - H_{i,j'} + mz}{\prod_{m=-\infty}^{0} H_{i,j} -H_{i,j'} + mz}
\end{align*}
This is $I_{\Fl(E)}(t, \tau, z)$, as required.
\end{proof}
{\boldsymbol{e}}gin{remark}\label{effectivesummationrange}
In view of \eqref{ampleconegen}, we see that the effective summation range in $I_{\Fl(E)}$ is contained in the subset of vectors satisfying
\[
d_{i,j} \geq \min_{j'}d_{\ell+1,j'} \; \forall \, i, j
\]
This will prove useful in calculations in Section \ref{examples}.
\end{remark}
\subsection{The Abelian/non-Abelian Correspondence with bundles}\label{AnAwithbundles}
We are now ready to prove Theorem~\ref{step two}. Recall from the Introduction that we have fixed a representation $\rho\colon G \rightarrow \GL(V)$ where $G=\prod_i \GL_{r_i}(\CC)$, and that this determines vector bundles $V^G \to \Fl(E)$ and $V^T \to \Fl(E)_T$. Since $T$ is Abelian, $V^T$ splits as a direct sum of line bundles $$V^T=F_1 \oplus \dots \oplus F_k$$
The Brown $I$-function gives a family
{\boldsymbol{e}}gin{align*}
&(t,\tau) \mapsto I_{\Fl(E)_T}(t,\tau,{-z}) & \text{$t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$}
\intertext{of elements of $\cH_{\Fl(E)_T}$, and Theorem~\ref{brown2014gromov} shows that $I_{\Fl(E)_T}(t,\tau,{-z}) \in \cL_{\Fl(E)_T}$. Twisting by $(F, \mathbf{c})$ where $\mathbf{c}$ is the $\CC^\times$-equivariant Euler class with parameter $\mu$ gives a twisted $I$-function, as in Definition~\ref{twistedI}, which we denote by}
&(t, \tau) \mapsto I_{V^T_\mu}(t, \tau,{-z}) & \text{$t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$}
\intertext{Applying Proposition~\ref{twisted=Deltad} shows that $I_{V^T_\mu}(t, \tau, {-z}) \in \cL_{V^T_\mu}$. Twisting again, by $(\Phi, \mathbf{c'})$ where $\Phi \to \Fl(E)_T$ is the roots bundle from the Introduction and $\mathbf{c'}$ is the $\CC^\times$-equivariant Euler class with parameter $\lambda$ gives a twisted $I$-function, as in Definition~\ref{twistedI}, which we denote by}
& (t, \tau) \mapsto I_{\Phi_\lambda \oplus V^T_\mu}(t, \tau,{-z}) & \text{$t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$}
\intertext{Applying Proposition~\ref{twisted=Deltad} again shows that $I_{\Phi_\lambda \oplus V^T_\mu}(t, \tau, {-z}) \in \cL_{\Phi_\lambda \oplus V^T_\mu}$. We now project along \eqref{quotientH} and take the non-equivariant limit $\lambda \to 0$, obtaining the Givental--Martin modification of~$I_{V^T_\mu}$. This is a family}
& (t, \tau) \mapsto I_{\GM}(t, \tau, {-z}) & \text{$t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$}
\end{align*}
of elements of $\cH_{\Fl(E)}$. Explicitly:
{\boldsymbol{e}}gin{definition}[which is a specialisation of Definition~\ref{IGM definition} to the situation at hand] \label{IGM definition special case}
{\boldsymbol{e}}gin{align*}
& I_{\GM}(t, \tau, z) = \\
& e^{\frac{t}{z}} \sum_{\substack{{\boldsymbol{e}}ta \in \NE(\Fl(E))}} Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} \pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z)
\sum_{\substack{\underline{d} \in \ZZ^R\colon \\ \forall i \sum_j d_{i,j}=\langle {\boldsymbol{e}}ta, c_1(S_i^\vee) \rangle}}\prod_{i=1}^{\ell} \prod_{j=1}^{r_i}\prod_{j'=1}^{r_{i+1}}\frac{ \prod_{m=-\infty}^{0}{H}_{i,j} - {H}_{i+1,j'} + mz}{\prod_{m=-\infty}^{d_{i,j} - d_{i+1,j'}}{H}_{i,j} - {H}_{i+1,j'} + mz} \\
& \qquad \qquad \qquad \qquad \qquad \qquad \qquad
\times \prod_{i=1}^{\ell} \prod_{j \neq j'} \frac{\prod_{m=-\infty}^{d_{i,j} - d_{i,j'}}H_{i,j} - H_{i,j'} + mz}{\prod_{m=-\infty}^{0} H_{i,j} -H_{i,j'} + mz}
\prod_{s=1}^{k} \frac{\prod_{m=-\infty}^{f_s \cdot \underline{d}} f_s + \mu + mz}{\prod_{m=-\infty}^{0} f_s + \mu + mz}
\end{align*}
Here $J^D_X(\tau, z)$ is as in \eqref{JX by degrees}, $f_s \cdot \underline{d} = \sum_{i,j} f_{s,i,j} d_{i,j}$, and $f_s = \sum_{i,j} f_{s,i,j} H_{i,j}$, where $$c_1(F_s) = \sum_{i=1}^\ell \sum_{j=1}^{r_i}f_{s,i,j} \tilde{H}_{i,j}$$
\end{definition}
\noindent Lemma~\ref{IGMexists} shows that this expression is well-defined despite the presence of
$$\omega = \textstyle \prod_i \prod_{j < j'} (H_{i,j} - H_{i,j'})$$
in the denominator. Corollary~\ref{IGMonLGM} shows that $I_{\GM}(t, \tau, {-z}) \in \cL_{\GM, V^T_\mu}$. Note that $I_{\GM}(t, \tau)$ is \emph{not} the $V^G$-twist of Oh's $I$-function $I_{\Fl(E)}$. Indeed $V^G$ need not be a split bundle, so the twist may not even be defined.
{\boldsymbol{e}}gin{theorem} \label{IGM on twisted cone}
Let $I_{\GM}$ be as in Definition~\ref{IGM definition special case}. Then:
{\boldsymbol{e}}gin{align*}
I_{\GM}(t,\tau,-z) \in \cL_{V^G_\mu} &&
\text{for all $t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$.}
\end{align*}
\end{theorem}
{\boldsymbol{e}}gin{proof}
Before projecting and taking the non-equivariant limit, we have
$$
I_{\Phi_\lambda \oplus V^T_\mu} = \Delta_{V^T_\mu} \big( D_{V^T_\mu} \big( I_{\Phi_\lambda}\big)\big)
$$
by Proposition~\ref{twisted=Deltadeq}. Projecting along \eqref{quotientH} gives
$$
p \circ I_{\Phi_\lambda \oplus V^T_\mu} = \Delta_{V^G_\mu} \big( D_{V^G_\mu} \big( p \circ I_{\Phi_\lambda}\big)\big)
$$
and taking the limit $\lambda \to 0$, which is well-defined by Lemma~\ref{IGMexists}, gives
$$
I_{\GM} = \Delta_{V^G_\mu} \big( D_{V^G_\mu} \big( I_{\Fl(E)}\big)\big)
$$
by Proposition~\ref{brownoh}. The result now follows from Proposition~\ref{twisted=Deltad}.
\end{proof}
Exactly the same argument proves:
{\boldsymbol{e}}gin{corollary} \label{extra line bundle}
Let $L \to X$ be a line bundle with first Chern class $\rho$, and define the vector bundle $F \to \Fl(E)$ to be $F = V^G \otimes \pi^* L$. Let $I_{\GM}$ be as in Definition~\ref{IGM definition special case}, except that the factor
{\boldsymbol{e}}gin{align*}
\prod_{s=1}^{k} \frac{\prod_{m=-\infty}^{f_s \cdot \underline{d}} f_s + \mu + mz}{\prod_{m=-\infty}^{0} f_s + \mu + mz}
&& \text{is replaced by} &&
\prod_{s=1}^{k} \frac{\prod_{m=-\infty}^{f_s \cdot \underline{d} + \langle \rho, \pi_* {\boldsymbol{e}}ta \rangle} f_s + \pi^* \rho + \mu + mz}{\prod_{m=-\infty}^{0} f_s + \pi^* \rho + \mu + mz}
\end{align*}
Then:
{\boldsymbol{e}}gin{align*}
I_{\GM}(t,\tau,-z) \in \cL_{F_\mu} &&
\text{for all $t \in H^2(\Fl(E)_T)^W$, $\tau \in H^\bullet(X)$.}
\end{align*}
\end{corollary}
The following Corollary gives a closed-form expression for genus-zero Gromov--Witten invariants of the zero locus of a generic section $Z$ of $F$ in terms of invariants of $X$.
{\boldsymbol{e}}gin{corollary} \label{I=J}
With notation as in Corollary~\ref{extra line bundle}, let $Z$ be the zero locus of a generic section of $F \rightarrow \Fl(E)$. Suppose that ${-K_Z}$ is the restriction of an ample class on $\Fl(E)$ and that $\tau \in H^2(X)$. Then
$$J_{F_\mu}(t+\tau, z)=e^{-C(t)/z}I_{\GM}(t,\tau, z)$$
where
$$ C(t)=\sum_{{\boldsymbol{e}}ta} n_{\boldsymbol{e}}ta Q^{\boldsymbol{e}}ta e^{\langle {\boldsymbol{e}}ta, t \rangle} $$
for some constants $n_{\boldsymbol{e}}ta \in \QQ$ and the sum runs over the finite set
$$S=\{ {\boldsymbol{e}}ta \in \NE(\Fl(E)) : \langle {-K}_{\Fl(E)} - c_1(F), {\boldsymbol{e}}ta \rangle = 1\}$$
If $Z$ is of Fano index two or more then this set is empty and $C(t) \equiv 0$. Regardless, if the vector bundle $F$ is convex then the non-equivariant limit $\mu \to 0$ of $J_{F_\mu}$ exists and
$$
J_Z\big(i^* t + i^* \tau, z \big) = i^* J_{F_0}(t+\tau, z)
$$
where $i \colon Z \to \Fl(E)$ is the inclusion map.
\end{corollary}
{\boldsymbol{e}}gin{proof}[Proof of Corollary~\ref{I=J}]
The statement about Fano index two or more follows immediately from the Adjunction Formula
$$ K_Z=\big(K_{\Fl(E)}+c_1(F)\big)\big|_Z$$
We need to show that
{\boldsymbol{e}}gin{equation} \label{IGM asymptotics}
I_{\GM}(t, \tau, z) = z + t + \tau + C(t) + O(z^{-1})
\end{equation}
Everything else then follows from the characterisation of the twisted $J$-function just below Definition~\ref{twisted J}, the String Equation
{\boldsymbol{e}}gin{align*}
J_{F_\mu}(\tau + a, z) = e^{a/z} J_{F_\mu}(\tau,z) &&
a \in H^0(\Fl(E))
\end{align*}
and~\cite{Coates2014}. To establish \eqref{IGM asymptotics}, it will be convenient to set $\deg(z) = \deg(\mu) = 1$, $\deg(\phi)=k$ for $\phi \in H^{2k}(\Fl(E))$, and $\deg(Q^{\boldsymbol{e}}ta)=\langle -K_X, {\boldsymbol{e}}ta \rangle$ if ${\boldsymbol{e}}ta \in H_2(X)$. The degree axiom for Gromov--Witten invariants then shows that $J_X^{\pi_*{\boldsymbol{e}}ta}$ is homogeneous of degree $\langle K_X, \pi_*{\boldsymbol{e}}ta \rangle +1$. Write
$$I_{\GM}(t, \tau,z) = e^{\frac{t}{z}} \sum_{{\boldsymbol{e}}ta \in {\NE}(\Fl(E))}
Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} \pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z) \times {I}_{{\boldsymbol{e}}ta}(z) \times M_{\boldsymbol{e}}ta(z)
$$
where
$$ M_{\boldsymbol{e}}ta(z) = \prod_{s=1}^{k} \frac{\prod_{m=-\infty}^{f_s \cdot \underline{d} + \langle \rho, \pi_* {\boldsymbol{e}}ta \rangle} f_s + \pi^* \rho + \mu + mz}{\prod_{m=-\infty}^{0} f_s + \pi^* \rho + \mu + mz}
$$
A straightforward calculation shows that
{\boldsymbol{e}}gin{align*}
{I}_{{\boldsymbol{e}}ta}(z)&=z^{\langle K_{\Fl(E)}-\pi^*K_X, {\boldsymbol{e}}ta \rangle}{i}_{{\boldsymbol{e}}ta}(z)\\
M_{\boldsymbol{e}}ta(z)&=z^{\langle c_1(F), {\boldsymbol{e}}ta \rangle}m_{\boldsymbol{e}}ta(z)
\end{align*}
where $i_{\boldsymbol{e}}ta(z), m_{\boldsymbol{e}}ta(z) \in \cH_{\Fl(E)}$ are homogeneous of degree $0$.
It follows that $\pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z) \times {I}_{{\boldsymbol{e}}ta}(z) \times M_{\boldsymbol{e}}ta(z)$ is homogeneous of degree $\langle K_{\Fl(E)}+c_1(F), {\boldsymbol{e}}ta \rangle+1$ which is nonpositive for ${\boldsymbol{e}}ta \neq 0$ by the assumptions on $-K_Z$. Since $\tau \in H^2(X)$, any negative contribution to the homogenous degree must come from a negative power of $z$, so that $\pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z) \times {I}_{{\boldsymbol{e}}ta}(z) \times M_{\boldsymbol{e}}ta(z)$ is $O(z^{-1})$, unless ${\boldsymbol{e}}ta=0$ or ${\boldsymbol{e}}ta \in S$. In the latter case, the expression has homogeneous degree $0$ and is therefore of the form $c_0+\tfrac{c_1}{z}+O(z^{-2})$ with $c_i$ independent of $z$ and of degree $i$. Relabeling $n_{\boldsymbol{e}}ta=c_0$ and
expanding $I_{\GM}$ in powers of $z$, we obtain
{\boldsymbol{e}}gin{multline*}
I_{\GM}(t, \tau,z) =
\big(1+t z^{-1}+O(z^{-2})\big)
\big(\pi^*J_X^0 \times I_{0} \times M_0 +\Big(
\sum_{{\boldsymbol{e}}ta \in S}
n_{\boldsymbol{e}}ta Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} +O(z^{-1})\Big) +\sum_{0 \neq {\boldsymbol{e}}ta \notin S} O(z^{-1})\big)\\
=(z+\tau+t+C(t)+O(z^{-1}))
\end{multline*}
where $C(t)$ is as claimed. This proves \eqref{IGM asymptotics}, and the result follows.
\end{proof}
We restate Corollary~\ref{I=J} in the case where the flag bundle is a Grassmann bundle, i.e $\ell=1$, relabelling $H_{1,j}=H_j$, $d_{1,j} = d_j$ and $r_1=r$. The rest of the notation here is as in \S\ref{notation}.
{\boldsymbol{e}}gin{corollary}\label{explicit Gr}
Let $V^G\rightarrow \Gr(r, E)$ be a vector bundle induced by a representation of $G$, let $L \to X$ be a line bundle with first Chern class $\rho$, and let $F = V^G \otimes \pi^* L$. Let $Z$ be the zero locus of a generic section of $F$. Suppose that $F$ is convex, that $-K_{\Gr(E,r)} - c_1(F)$ is ample, and that $\tau \in H^2(\Gr(r,E))$. Then the non-equivariant limit $\mu \to 0$ of the twisted $J$-function $J_{F_\mu}$ exists and satisfies
$$ J_Z\big(i^* t + i^*\tau, z \big) = i^* J_{F_0}(t+\tau, z) $$
where $i \colon Z \to \Gr(r, E)$ is the inclusion map. Furthermore
{\boldsymbol{e}}gin{multline} \label{explicit J for Gr}
J_{F_0}(t+\tau, z) = e^{\frac{t - C(t)}{z}} \sum_{\substack{{\boldsymbol{e}}ta \in \NE(\Gr(r, E))}} Q^{{\boldsymbol{e}}ta} e^{\langle {\boldsymbol{e}}ta, t \rangle} \pi^{*}J_{X}^{\pi_*{\boldsymbol{e}}ta}(\tau, z) \\
\sum_{\substack{\underline{d} \in \ZZ^r\colon \\ d_1 + \cdots + d_r = \langle {\boldsymbol{e}}ta, c_1(S^\vee) \rangle}} (-1)^{\epsilon(\underline{d})}
\prod_{i=1}^{r}\prod_{j=1}^{n}\frac{ \prod_{m=-\infty}^{0}{H}_i + \pi^* c_1(L_j) + mz}{\prod_{m=-\infty}^{d_i + \langle \pi_* {\boldsymbol{e}}ta, c_1(L_j) \rangle} H_i + \pi^* c_1(L_j) + mz} \\
\\
\times \prod_{i < j} \frac{H_i - H_j + (d_i - d_j)z}{H_i -H_j}
\times \prod_{s=1}^{k} \prod_{m=1}^{f_s \cdot \underline{d} + \langle \rho, \pi_* {\boldsymbol{e}}ta \rangle} \big( f_s + \pi^* \rho + mz \big)
\end{multline}
Here the Abelianised bundle $V^T$ splits as a direct sum of line bundles $F_1 \oplus \cdots \oplus F_k$ with first Chern classes that we write as $c_1(F_s) = \sum_{i=1}^{r}f_{s,i} \tilde{H}_i$, $J^D_X(\tau,z )$ is as in \eqref{JX by degrees}, $\epsilon(\underline{d}) = \sum_{i<j} d_i - d_j$, $f_s \cdot \underline{d} = \sum_{i} f_{s,i} d_i$, $f_s = \sum_i f_{s,i} H_i$, and $C(t) \in H^0(\Gr(r, E), \Lambda)$ is the unique expression such that the right-hand side of \eqref{explicit J for Gr} has the form $z + t + \tau + O(z^{-1})$.
\end{corollary}
{\boldsymbol{e}}gin{remark} \label{effectivesummationrange Gr}
For a more explicit formula for $C(t)$, see Corollary~\ref{I=J}; in particular if $Z$ has Fano index two or greater then $C(t) \equiv 0$. By Remark~\ref{effectivesummationrange} the summand in \eqref{explicit J for Gr} is zero unless for each $i$ there exists a $j$ such that $d_i + \langle \pi_* {\boldsymbol{e}}ta, c_1(L_j) \rangle \geq 0$
\end{remark}
{\boldsymbol{e}}gin{proof}[Proof of Corollary~\ref{explicit Gr}]
We cancelled terms in the Weyl modification factor, as in the proof of Lemma~\ref{IGMexists}, and took the non-equivariant limit $\mu \to 0$.
\end{proof}
{\boldsymbol{e}}gin{remark}
The relationship between $I$-functions (or generating functions for genus-zero quasimap invariants) and $J$-functions (which are generating functions for genus-zero Gromov--Witten invariants) is particularly simple in the Fano case~\cite{Givental1996toric}~\cite[\S1.4]{CFK2014}, and for the same reason Corollary~\ref{I=J} holds without the restriction $\tau \in H^2(X)$ if $Z \to X$ is relatively Fano\footnote{That is, if the relative anticanonical bundle ${-K}_{Z/X}$ is ample.}. This never happens for blow-ups $\tilde{X} \to X$, however, and it is hard to construct examples where $Z \to X$ is relatively Fano and the rest of the conditions of Corollary~\ref{I=J} hold. We do not know of any such examples.
\end{remark}
{\boldsymbol{e}}gin{remark} \label{what can we compute}
Corollary~\ref{I=J} gives a closed-form expression for the small $J$-function of $Z$ -- or, equivalently, for one-point gravitional descendant invariants of $Z$ -- in the case where $Z$ is Fano. But in general (that is, without the Fano condition on $Z$) one can use Birkhoff factorization, as in~\cite{CoatesGivental2007, CFK2014} and~\cite[\S3.8]{CCIT2019}, to compute any twisted genus-zero gravitional descendant invariant of $\Fl(E)$ in terms of genus-zero descendant invariants of $X$. The twisting here is with respect to the $\CC^\times$-equivariant Euler class and the vector bundle $F$. Thus Corollary~\ref{I=J} determines the Lagrangian submanifold $\cL_{F_\mu}$ that encodes twisted Gromov--Witten invariants. Applying~\cite[Theorem~1.1]{Coates2014}, we see that Corollary~\ref{I=J} together with Birkhoff factorization allows us to compute any genus-zero Gromov--Witten invariant of the zero locus $Z$ of the form
{\boldsymbol{e}}gin{equation} \label{what can we compute GW}
\langle \theta_1 \psi^{i_1}, \dots, \theta_n \psi^{i_n} \rangle_{0,n,d}
\end{equation}
where all but one of the cohomology classes $\theta_i$ lie in $\image(i^*) \subset H^\bullet(Z)$ and the remaining $\theta_i$ is an arbitrary element of $H^\bullet(Z)$. Here $i \colon Z \to \Fl(E)$ is the inclusion map.
\end{remark}
{\boldsymbol{e}}gin{remark} \label{what can we compute blow up}
Applying Remark~\ref{what can we compute} to the blow-up $\tilde{X} \to X$ considered in the introduction, we see that Corollary~\ref{I=J} together with Birkhoff factorization allows us to compute arbitrary invariants of $\tilde{X}$ of the form \eqref{what can we compute GW} in terms of genus-zero gravitional descendants of $X$. In this case $\image(i^*) \subset H^\bullet(\tilde{X})$ contains all classes from $H^\bullet(X)$ and also the class of the exceptional divisor.
\end{remark}
\section{The Main Geometric Construction} \label{geometric}
\subsection{Main Geometric Construction}
Let $F$ be a locally free sheaf on a variety $X$. We denote by $F(x)$ its fibre over $x$, a vector space over the residue field $\kappa(x)$. A morphism $\varphi$ of locally free sheaves induces a linear map on fibres, denoted by $\varphi(x)$. We make the following definition:
{\boldsymbol{e}}gin{definition}
Let $\varphi \colon E^m \rightarrow F^n$ a morphism of locally free sheaves of rank $m$ and $n$ respectively.
The $k$-th degeneracy locus is the subvariety of $X$ defined by $$D_k(\varphi)=\big \{ x \in X\colon \rk \,
\varphi(x) \leq k \big\}$$
Note that $D_k(\varphi)=X$ if $k \geq \min\{m,n\}$; if $k=\min\{m,n\}-1$ we simply call $D_k(\varphi)$ the degeneracy locus of $\varphi$.
\end{definition}
We have the following results:
{\boldsymbol{e}}gin{itemize}
\item Scheme-theoretically, $D_k(\varphi)$ may be defined as the zero locus of the section $\wedge^k\varphi$; this shows that locally the ideal of $D_k(\varphi)$ is defined by the $(k+1) \times (k+1)$-minors of $\varphi$.
\item If $E^\vee \otimes F$ is globally generated, then $D_k(\varphi)$ of a generic $\varphi$ is either empty or has expected codimension $(m-k)(n-k)$, and the singular locus of $D_k(\varphi)$ is contained in $D_{k-1}(\varphi)$. In particular, if $\varphi$ is generic and $\dim X < (m-k+1)(n-k+1)$, then $D_k(\varphi)$ is smooth \cite[Theorem 2.8]{Ottaviani1995}.
\item We may freely assume that $m \geq n$ in what follows, since we can always replace $\varphi$ with its dual map whose degeneracy locus is the same.
\end{itemize}
{\boldsymbol{e}}gin{proposition}\label{geometricconstruction}
Let X be a smooth variety, and $\varphi\colon E^m \rightarrow F^n$ a generic morphism of locally free sheaves on $X$. Suppose that $m \geq n$ and write $r=m-n$. Let $Y=D_{n-1}(\varphi)$ be the degeneracy locus of $\varphi$, and assume that $\varphi$ has generically full rank, that $Y$ has the expected codimension $m-n+1$ and that $Y$ is smooth.
Let $\pi\colon \Gr(r,E) \rightarrow X$ be the Grassmann bundle of $E$ on $X$.
Then the blow-up $Bl_Y(X)$ of $X$ along $Y$ is a subvariety of $\Gr(r,E)$, cut out as the zero locus of the regular section $s \in \Gamma(\Hom(S, \pi^*F))$ defined by the composition $$S \hookrightarrow \pi^*E \xrightarrow{\pi^*\varphi} \pi^*F $$
where the first map is the canonical inclusion.
\end{proposition}
{\boldsymbol{e}}gin{proof}
We write points in $\Gr(r,E)$ as $(p, V)$, where $p \in X$ and $V$ is a $r$-dimensional subspace of the fibre $E(x)$. At $(p, V)$, the section $s$ is given by the composition
$$V \hookrightarrow E(x) \xrightarrow{\varphi(x)} F(x)$$
so $s$ vanishes at $(p, V)$ if and only if $V \subset \ker\varphi(x)$.
The statement is local on $X$, so fix a point $P \in X$ and a Zariski open neighbourhood $U=\Spec(A)$ with trivialisations $E |_U=A^m, F |_U=A^n $. We will show that the equations of $Z(s) \cap U$ and $Bl_{U \cap Y}U$ agree. Under these identifications
$\varphi$ is given by a $n \times m$ matrix with entries in $A$. Since $\varphi$ has generically maximal rank and $Y$ is nonsingular, after performing row and column operations and shrinking $U$ if necessary, we may assume that $\varphi$ is given by the matrix
$${\boldsymbol{e}}gin{pmatrix}
x_0&\dots &x_{r}&0&0&\dots&0\\
0& \dots &0&1&0&\dots&0\\
0& \dots &0&0&1&\dots&0\\
\vdots&\vdots&\vdots&\vdots&\vdots&\vdots&\vdots\\
0&\dots&0&0&\dots&0&1\\
\end{pmatrix}$$
Note that the ideal of the minors of this matrix is just $I=(x_0, \dots x_{r})$ and that $x_0, \dots, x_r$ form part of a regular system of parameters around $P$, so we may assume that $n=1, m=r+1$.
Writing $y_i$ for the basis of sections of $S^\vee$ on $\Gr(r,A^{r+1})$, we see that $Z(s)$ is given by the equation
{\boldsymbol{e}}gin{align*}
x_0y_0+\dots+x_ry_r&=0
\end{align*}
Under the Pl\"ucker isomorphism $$\Gr(r,A^{r+1}) \rightarrow \PP(\wedge^{r}A^{r+1})\cong U \times \PP^r_{y_0,\dots, y_r}$$
$Z(s)$ maps to the variety cut out by the minors of the matrix
$${\boldsymbol{e}}gin{pmatrix}
x_0&\dots& x_{r}\\
{y}_0& \dots &{y}_{r}
\end{pmatrix}$$
i.e the blowup of $Y \cap U$ in $U$.
\end{proof}
\section{Examples} \label{examples}
We close by presenting three example computations that use Theorems~\ref{step one} and~\ref{step two}, calculating genus-zero Gromov--Witten invariants of blow-ups of projective spaces in various high-codimension complete intersections. Recall, as we will need it below, that if $E \to X$ is a vector bundle of rank $n$ then the anticanonical divisor of $\Gr(r,E)$ is
{\boldsymbol{e}}gin{equation}
\label{-K}
{-K}_{\Gr(r,E)}=\pi^*\left(-K_X+r(\det E)\right) +n(\det S^\vee)
\end{equation}
where $S \to \Gr(r,E)$ is the tautological subbundle. Recall too that the \emph{regularised quantum period} of a Fano manifold $Z$ is the generating function
$$
\widehat{G}_Z(x) = 1 + \sum_{d = 2}^\infty d! c_d x^d
$$
for genus-zero Gromov--Witten invariants of $Z$, where
{\boldsymbol{e}}gin{align*}
c_d = \sum_{{\boldsymbol{e}}ta} \langle \theta \psi_1^{d-2} \rangle_{0,1,{\boldsymbol{e}}ta}
&& \text{for $\theta \in H^{\text{top}}(Z)$ the class of a volume form}
\end{align*}
and the sum runs over effective classes ${\boldsymbol{e}}ta$ such that $\langle {\boldsymbol{e}}ta, {-K}_Z\rangle = d$.
{\boldsymbol{e}}gin{example}
We will compute the regularised quantum period of $\tilde{X}=\Bl_Y\PP^4$ where $Y$ is a plane conic. Consider the situation as in \S\ref{notation} with:
{\boldsymbol{e}}gin{itemize}
\item $X=\PP^4$
\item $E= \cO \oplus \cO \oplus \cO(-1)$
\item $G = \GL_{2}(\CC)$, $T= (\CC^\times)^2 \subset G$
\end{itemize}
Then $A /\!\!/ G$ is $\Gr(2, E)$, and $A /\!\!/ T$ is the $\PP^2 \times \PP^2$-bundle $\PP(E) \times_{\PP^4} \PP(E) \to \PP^4$. By Proposition \ref{geometricconstruction} the zero locus $\tilde{X}$ of a section of $S^\vee \otimes \pi^*(\OO(1))$ on $\Gr(2,E)$ is the blowup of $\PP^4$ along the complete intersection of two hyperplanes and a quadric. We identify the group ring $\QQ[H_2(A /\!\!/ T,\ZZ)]$ with $\QQ[Q,Q_1,Q_2]$, where $Q$ corresponds to the pullback of the hyperplane class of $\PP^4$ and $Q_i$ corresponds to $\tilde{H}_i$. Similarly, we identify $\QQ[H_2(A /\!\!/ G,\ZZ)]$ with $\QQ[Q, q]$, where again $Q$ corresponds to the pullback of the hyperplane class of $\PP^4$ and $q$ corresponds to the first Chern class of $S^\vee$.
We will need Givental's formula~\cite{Givental1996equivariant} for the $J$-function of $\PP^4$:
{\boldsymbol{e}}gin{align*}
J_{\PP^4}(\tau, z)=z e^{\tau/z}\sum_{D = 0}^\infty \frac{Q^D e^{D\tau}}{\prod_{m=1}^D (H+mz)^5} && \tau \in H^2(\PP^4)
\end{align*}
In the notation of \S\ref{notation}, we have $\ell=1$, $r_\ell=r_1=2$, $r_{\ell+1}=3$. We relabel $\tilde{H}_{\ell,j}=\tilde{H}_j$ and $d_{\ell, j}=d_j$. We have that $\tilde{H}_{\ell+1, 1}=\tilde{H}_{\ell+1, 2}=0$, $\tilde{H}_{\ell+1, 3}=\pi^*H$ and $d_{\ell+1, 1}=d_{\ell+1, 2}=0$, $d_{\ell+1, 3}=D$. Write $F = S^\vee \otimes \pi^* \cO(1)$. Corollary~\ref{explicit Gr} and Remark~\ref{effectivesummationrange Gr} give
{\boldsymbol{e}}gin{multline*}
J_{F_0}(t, \tau,z) = z e^{\frac{t + \tau}{z}}\sum_{D=0}^\infty \sum_{d_1=0}^\infty \sum_{d_2=0}^\infty \frac{(-1)^{d_1-d_2} Q^{D} q^{d_1+d_2} e^{D\tau}e^{(d_1 +d_2)t} \prod_{i=1}^{2} \prod_{m=1}^{d_i + D} (H_i + H + mz)}{\prod_{m=1}^{D} (H+ mz)^5 \prod_{m=1}^{d_1}(H_1+mz)^2\prod_{m=1}^{d_2}(H_2+mz)^2} \\
\times \prod_{i=1}^2\frac{\prod_{m=-\infty}^{0} (H_i -H + mz)}{\prod_{m=-\infty}^{d_i-D}(H_i -H + mz)}
\frac{(H_1 - H_2 + z(d_1 - d_2))}{H_1 - H_2}
\end{multline*}
To obtain the quantum period we need to calculate the anticanonical bundle of $\tilde{X}$.
Equation \eqref{-K} and the adjunction formula give
$$-K_{\widetilde{X}}=3H+3\det S^\vee-(2H+\det S^\vee)=H+2 \det S^\vee.$$
To extract the quantum period from the non-equivariant limit $J_{F_0}$ of the twisted $J$-function, we take the component along the unit class $1 \in H^\bullet(A /\!\!/ G; \QQ)$, set $z=1$, and set $Q^{\boldsymbol{e}}ta=x^{\langle {\boldsymbol{e}}ta, -K_{\tilde{X}} \rangle}$. That is, we set $\lambda = 0$, $t=0$, $\tau=0$, $z=1$, $q=x^2$, $Q=x$, and take the component along the unit class, obtaining
{\boldsymbol{e}}gin{multline*}
G_{\tilde{X}}(x)= \sum_{n=0}^\infty \sum_{l=n+1}^\infty \sum_{m=l}^\infty \textstyle (-1)^{l+m-1}x^{l+2m+2n}
\frac{(l+n)!(l+m)!(l-n-1)!}{(l!)^5(m!)^2(n!)^2(n-l)!}(n-m)\\
+ \sum_{l=0}^\infty \sum_{m=l}^\infty \sum_{n=l}^\infty \textstyle (-1)^{m+n}x^{l+2m+2n}
\frac{(l+n)!(l+m)!}{(l!)^5(m!)^2(n!)^2(n-l)!(m-l)!}
\Big(1+(n-m)(-2H_{n}+H_{l+n}-H_{n-l}) \Big)
\end{multline*}
Thus the first few terms of the regularized quantum period are:
{\boldsymbol{e}}gin{multline*}
\widehat{G}_{\tilde{X}}(x)=1+12x^3+120x^5+540x^6+20160x^8+33600x^9+113400x^{10} \\ +2772000x^{11}+2425500x^{12}+\cdots
\end{multline*}
This strongly suggests that $\tilde{X}$ coincides with the quiver flag zero locus with ID 15 in~\cite{Kalashnikov2019}, although this is not obvious from the constructions.
\end{example}
{\boldsymbol{e}}gin{example}
We will compute the regularised quantum period of $\tilde{X}=\Bl_Y\PP^6$, where $Y$ is a 3-fold given by the intersection of a hyperplane and two quadric hypersurfaces. Consider the situation as in \S\ref{notation} with:
{\boldsymbol{e}}gin{itemize}
\item $X=\PP^6$
\item $E= \cO \oplus \cO \oplus \cO(1)$
\item $G = \GL_{2}(\CC)$, $T= (\CC^\times)^2 \subset G$
\end{itemize}
Then $A /\!\!/ G$ is $\Gr(2, E)$, and $A /\!\!/ T$ is the $\PP^2 \times \PP^2$-bundle $\PP(E) \times_{\PP^6} \PP(E) \to \PP^6$. By Proposition \ref{geometricconstruction} the zero locus $\tilde{X}$ of a section of $S^\vee \otimes \pi^*(\OO(2))$ on $\Gr(2,E)$ is the blowup of $\PP^6$ along the complete intersection of a hyperplane and two quadrics. We identify the group ring $\QQ[H_2(A /\!\!/ T,\ZZ)]$ here with $\QQ[Q,Q_1,Q_2]$, where $Q$ corresponds to the pullback of the hyperplane class of $\PP^6$ and $Q_i$ corresponds to $\tilde{H}_i$. Similarly, we identify $\QQ[H_2(A /\!\!/ G,\ZZ)]$ with $\QQ[Q, q]$, where again $Q$ corresponds to the pullback of the hyperplane class of $\PP^6$ and $q$ corresponds to the first Chern class of $S^\vee$.
The $J$-function of $\PP^6$ is~\cite{Givental1996equivariant}:
{\boldsymbol{e}}gin{align*}
J_{\PP^6}(\tau, z)=z e^{\tau/z}\sum_{D = 0}^\infty \frac{Q^D e^{D\tau}}{\prod_{m=1}^D (H+mz)^7} && \tau \in H^2(\PP^6)
\end{align*}
In the notation of \S\ref{notation}, we have $\ell=1$, $r_\ell=r_1=2$, $r_{\ell+1}=3$. We relabel $\tilde{H}_{\ell,j}=\tilde{H}_j$ and $d_{\ell, j}=d_j$. We have that $\tilde{H}_{\ell+1, 1}=\tilde{H}_{\ell+1, 2}=0$, $\tilde{H}_{\ell+1, 3}=- \pi^*H$ and $d_{\ell+1, 1}=d_{\ell+1, 2}=0$, $d_{\ell+1, 3}=-D$. Write $F = S^\vee \otimes \pi^* \cO(2)$. Corollary~\ref{explicit Gr} and Remark~\ref{effectivesummationrange Gr} give
{\boldsymbol{e}}gin{multline*}
J_{F_0}(t, \tau, z) = z e^{\frac{t+\tau}{z}} \sum_{D=0}^\infty \sum_{d_1 = -D}^\infty \sum_{d_2= -D}^\infty \frac{Q^D q^{d_1+d_2} e^{D\tau}e^{(d_1+d_2)t}}{\prod_{m=1}^D(H+mz)^7} \prod_{i=1}^2
\frac{\prod_{m=-\infty}^{0}(H_i+mz)^2}
{\prod_{m=-\infty}^{d_i}(H_i+mz)^2}\\
\times
\prod_{i=1}^2
\frac{\prod_{m=1}^{d_i+2D}(H_i+2H+mz)}{\prod_{m=1}^{d_i + D} (H_i + H + mz)}
(-1)^{d_1-d_2}\frac{(H_1-H_2+z(d_1-d_2))}{H_1-H_2}
\end{multline*}
Again we will need the anticanonical bundle of $\tilde{X}$, which by \eqref{-K} and the adjunction formula is
$$-K_{\widetilde{X}}=9H + 3 \det(S^*)-(4H+\det(S^*))=5H + 2\det(S^*).$$
To extract the quantum period from $J_{F_0}$, we take the component along the unit class $1 \in H^\bullet(A /\!\!/ G; \QQ)$, set $z=1$, and set $Q^{\boldsymbol{e}}ta=x^{\langle {\boldsymbol{e}}ta, -K_{\tilde{X}} \rangle}$.
That is, we set $\lambda = 0$, $t=0$, $\tau=0$, $z=1$, $q=x^2$, $Q=x^5$, and take the component along the unit class, obtaining
{\boldsymbol{e}}gin{multline*}
G_{\tilde{X}}(x)=\sum_{D=0}^\infty \sum_{d_1=0}^\infty \sum_{d_2=0}^\infty (-1)^{d_1 + d_2}x^{5D+2d_1+2d_2}
\frac{(d_1 + 2D)! (d_2 + 2D)!}{(D!)^7(d_1!)^2(d_2!)^2(d_1 +D)!(d_2 + D)!} \\
\times \Big(1+(d_1-d_2)(-2H_{d_1}+H_{d_1+2D}-H_{d_1+D}) \Big)
\end{multline*}
The first few terms of the regularized quantum period are:
$$\widehat{G}_{\tilde{X}}(x) = 1+ 480x^5 + 5040 x^7 + 4082400 x^{10} + 119750400 x^{12} + 681080400 x^{14} + \cdots$$
\end{example}
{\boldsymbol{e}}gin{example}
We will compute the regularised quantum period of $\tilde{X}=\Bl_Y\PP^6$, where $Y$ is a quadric surface given by the intersection of 3 generic hyperplanes and a quadric hypersurface. Consider the situation as in \S\ref{notation} with:
{\boldsymbol{e}}gin{itemize}
\item $X=\PP^6$
\item $E= \cO \oplus \cO \oplus \cO \oplus \cO(2)$
\item $G = \GL_{3}(\CC)$, $T= (\CC^\times)^3 \subset G$
\end{itemize}
Then $A /\!\!/ G$ is $\Gr(3, E)$, and $A /\!\!/ T$ is $\PP(E) \times_{\PP^6} \PP(E) \times_{\PP^6} \PP(E) \to \PP^6$. By Proposition \ref{geometricconstruction} the zero locus $\tilde{X}$ of a section of $S^\vee \otimes \pi^*(\OO(1))$ on $\Gr(3,E)$ is the blowup of $\PP^6$ along the complete intersection of three hyperplanes and a quadric. We identify the group ring $\QQ[H_2(A /\!\!/ T,\ZZ)]$ with $\QQ[Q,Q_1,Q_2,Q_3]$, where $Q$ corresponds to the pullback of the hyperplane class of $\PP^6$ and $Q_i$ corresponds to $\tilde{H}_i$. Similarly, we identify $\QQ[H_2(A /\!\!/ G,\ZZ)]$ with $\QQ[Q, q]$, where again $Q$ corresponds to the pullback of the hyperplane class of $\PP^6$ and $q$ corresponds the first Chern class of $S^\vee$.
In the notation of \S\ref{notation}, we have $\ell=1, r_\ell=r_1=3, r_{\ell+1}=4$. We relabel $\tilde{H}_{\ell,j}=\tilde{H}_j$ and $d_{\ell, j}=d_j$. We have that $\tilde{H}_{\ell+1, 1}=\tilde{H}_{\ell+1, 2} = \tilde{H}_{\ell + 1,3}=0$, $\tilde{H}_{\ell+1, 4}=- \pi^*2H$ and $d_{\ell+1, 1}=d_{\ell+1, 2}=d_{\ell + 1,3} = 0$, $d_{\ell+1, 4}=-2D$.
Write $F = S^\vee \otimes \pi^* \cO(1)$. Corollary~\ref{explicit Gr} and Remark~\ref{effectivesummationrange Gr} give
{\boldsymbol{e}}gin{multline*}
J^{F_0}(t, \tau, z) = z e^{\frac{t+\tau}{z}}
\sum_{D = 0}^\infty \sum_{d_1 = -2D}^\infty \sum_{d_2 = -2D}^\infty \sum_{d_3 = -2D}^\infty \frac{Q^{D}q^{d_1+d_2+d_3} e^{D\tau}e^{(d_1+d_2+d_3) t}}{\prod_{m=1}^D(H+mz)^7}\\
\times \prod_{i=1}^3
\frac{\prod_{m=-\infty}^{0}(H_i+mz)^3}
{\prod_{m=-\infty}^{d_i}(H_i+mz)^3}
\prod_{i=1}^3
\frac{1}
{\prod_{m=1}^{d_i+2D} (H_i + 2H+ mz)}
\prod_{i=1}^3
\frac{\prod_{m=-\infty}^{d_i+D}(H_i+H+mz)}{\prod_{m=-\infty}^{0}(H_i+H+mz)} \\
\times \frac{(H_1-H_2+z(d_1-d_2))}{H_1-H_2}
\frac{(H_1-H_3+z(d_1-d_3))}{H_1-H_3}\frac{(H_2-H_3+z(d_2-d_3))}{H_2-H_3}
\end{multline*}
Arguing as before,
$$-K_{\widetilde{X}}=11H + 4 \det(S^*)-(3H+\det(S^*))=8H + 3\det(S^*).$$
To extract the quantum period from $J_{F_0}$, we set $\lambda = 0$, $t=0$, $\tau=0$, $z=1$, $q=x^3$, $Q=x^8$, and take the component along the unit class. The first few terms of the regularised quantum period are:
{\boldsymbol{e}}gin{multline*}
\widehat{G}_{\tilde{X}}(x) = 1+ 108x^3 + 17820 x^6 + 5040 x^{8} + 5473440 x^{9} + 56364000 x^{11} + 1766526300 x^{12} \\ + 117076459500 x^{14} + 672012949608 x^{15} + \cdots
\end{multline*}
\end{example}
{\boldsymbol{e}}gin{remark}
Strictly speaking the use of Theorem~\ref{step two} in the examples just presented was not necessary. Whenever the base space $X$ is a projective space, or more generally a Fano complete intersection in a toric variety or flag bundle, then one can replace our use of Theorem~\ref{step two} (but not Theorem~\ref{step one}) by~\cite[Corollary~6.3.1]{CFKS2008}. However there are many examples that genuinely require both Theorem~\ref{step one} and Theorem~\ref{step two}: for instance when $X$ is a toric complete intersection but the line bundles that define the center of the blow-up do not arise by restriction from line bundles on the ambient space. (For a specific such example one could take $X$ to be the three-dimensional Fano manifold $\mathrm{MM}_{3\text{--}9}$: see~\cite[\S62]{CCGK16}.) For notational simplicity we chose to present examples with $X = \PP^N$, but the approach that we used applies without change to more general situations.
\end{remark}
\end{document} |
{\partial}egin{document}
\maketitle
{\partial}egin{abstract} We compute the analytic torsion of a cone over a sphere of dimension 1, 2, and 3, and we conjecture a general formula for the cone over an odd dimensional sphere.
\end{abstract}
\section{Introduction}
{\mathsf l}abel{s0}
An important open problem in geometric and global analysis is to extend the Cheeger M\"uller theorem to spaces with singularities of conical type. The aim of this work is to give some contribution to the quantitative aspect of the problem. For we give explicit formulas for the analytic torsion of the class of low dimensional spaces consisting of cones over spheres. The results cover also the smooth case of the discs, and therefore provides also a contribution to the discussion on the extension of the Cheeger M\"uller theorem to smooth manifolds with boundary, namely to the problem of establish the correct boundary term.
Let $(W,g)$ be a closed connected Riemannian manifold of dimension $n$ with metric $g$. Let $C W$ denote the completed finite metric cone over $W$, namely the space $[0,l]\times W$, with the metric $dr\otimes dr+r^2 g$, on $(0,l]\times W$, as defined in \cite{Che0} (2.1). A interesting open problem concerning the metric cone is to compute its analytic torsion. The analytic torsion of a smooth connected Riemannian manifold $(M,g)$ of dimension $m$ is defined by \cite{RS}, Section 6,
{\partial}egin{equation}{\mathsf l}abel{analytic}
{\mathsf l}og T(M)=\frac{1}{2}\sum_{q=1}^m(-1)^q q\zeta'(0,{\partial}elta^{(q)}),
\end{equation}
where ${\partial}elta^{(q)}$ is the Laplace operator on $q$-forms on $M$, and the zeta function is defined by \cite{RS} (1.5)
\[
\zeta(s,{\partial}elta^{(q)})=\sum_{{\mathsf l}ambda\in {\rm Sp}_+{\partial}elta^{(q)}}{\mathsf l}ambda^{-s},
\]
for ${\mathds{R}}e(s)>\frac{m}{2}$, and by analytic continuation elsewhere. This definition extends to the case of a cone $CW$ using the Hodge theory and the functional calculus for the Laplace operator on forms developed in \cite{Che0}.
More precisely, one would like to obtain formulas for $T(CW)$ as a function of some geometric invariant of $W$. Starting from the result of Cheeger \cite{Che0} \cite{Che2}, and applying absolute or relative boundary conditions \cite{RS}, Section 3, one obtain quite easily the eigenvalues of the Laplace operator on forms, necessary to compute the torsion. These eigenvalues turn out to be sequences of real numbers ${\rm Sp}_+{\partial}elta^{(q)}=\{{\mathsf l}ambda^{(q)}_{\mu,k}\}$ that correspond to the zeros of some linear combinations of Bessel functions of the first kind and their derivative. The index $k$ enumerate the zero, and the index $\mu$ is given by some explicit function of the eigenvalues of the Laplace operator on forms on the section of the cone, namely on $W$. The zeta function of this type of double sequences can be tackled using some recent results of Spreafico \cite{Spr3} \cite{Spr5} \cite{Spr6} \cite{Spr9}. The general strategy is to prove that the sequence ${\rm Sp}_+{\partial}elta^{(q)}$ is spectrally decomposable over some sequence ${\rm Sp}_+ {\partial}elta_{W}^{(p)}$ of eigenvalues of the Laplacian on forms on the section. Then, one can apply the result of Spreafico to obtain the value $\zeta'(0,{\partial}elta^{(q)})$. The final formula can be very complicate in general, and not particularly illuminating. The possibility of reducing and simplifying this formula is based on two facts: one fact is the explicit form of the coefficients of the uniform asymptotic expansion of the Bessel function $I_\nu(\nu z)$ (and of its derivative) with respect to the order $\nu$; the second fact, is the explicit knowledge of the eigenvalues of the Laplacian on forms on the section. While the first fact is proved to be true in general, the second one is not clear. For this reason it is interesting to study particular cases where the second fact is also true.
In this note, we study the analytic torsion of the cone over an $n$-dimensional sphere. More precisely, we prove in Section \ref{s4} the following theorem, and we state a conjecture for the general case at the end of Section \ref{s5}.
{\partial}egin{theo} {\mathsf l}abel{t1} The analytic torsion of the cone $C_\alpha S^{n}_{l\sin\alpha}$ of angle $\alpha$, and length $l>0$, over the sphere $S^{n}$, with the standard metric induced by the immersion in ${\mathds{R}}^{n+2}$, and absolute boundary conditions is, for $n=1,2$, and $3$:
{\partial}egin{align*}
{\mathsf l}og T(C_\alpha S^{1}_{l\sin\alpha})=&\frac{1}{2}{\mathsf l}og {\rm Vol}(C_\alpha S^{1}_{l\sin\alpha})+\frac{1}{2}\sin\alpha=\frac{1}{2}{\mathsf l}og\pi l^2\sin\alpha+\frac{1}{2}\sin\alpha,\\
{\mathsf l}og T(C_\alpha S^{2}_{l\sin\alpha})=&\frac{1}{2}{\mathsf l}og {\rm Vol}(C_\alpha S^{2}_{l\sin\alpha})-\frac{1}{2}f({\rm csc}\alpha)+\frac{1}{4}\sin^2\alpha\\
=&\frac{1}{2}{\mathsf l}og \frac{4\pi l^3\sin^2\alpha}{3}-\frac{1}{2}f({\rm csc}\alpha)+\frac{1}{4}\sin^2\alpha,\\
{\mathsf l}og T(C_\alpha S^{3}_{l\sin\alpha})=&\frac{1}{2}{\mathsf l}og {\rm Vol}(C_\alpha S^{3}_{l\sin\alpha})
+\frac{3}{4}\sin\alpha-\frac{1}{12}\sin^3\alpha\\
=&\frac{1}{2}{\mathsf l}og \frac{\pi^2l^4\sin^3\alpha}{2}+\frac{3}{4}\sin\alpha-\frac{1}{12}\sin^3\alpha,
\end{align*}
where the function $f(\nu)$ is given at the end of Section \ref{s4}.
\end{theo}
\section{Geometric setup}
{\mathsf l}abel{s1}
We describe in this section the geometric setup in details. Let $S^n_b$ be the standard sphere of radius $b>0$ in ${\mathds{R}}^{n+1}$, $S^{n}_b=\{x\in{\mathds{R}}^{n+1}~|~|x|=b\}$ (we simply write $S^n$ for $S^n_1$). Imbed $S^n_{l\sin\alpha}$ in ${\mathds{R}}^{n+2}$, with center in the point $\{0,...,0,l\sin\alpha\}$, with $l>0$. Let $C_\alpha
S^n_{l\sin\alpha}$ be the cone of angle $\alpha$ over $S^n_{l\sin\alpha}$ in ${\mathds{R}}^{n+2}$. Note that the disc corresponds to $D^{n+1}_l=C_\frac{\pi}{2} S^{n}_l$. We parameterize $C_{\alpha}S^n_{l\sin\alpha}$ by
{\partial}egin{equation*}{\mathsf l}abel{}C_{\alpha}S_{l\sin\alpha}^{n}={\mathsf l}eft\{
{\partial}egin{array}{rcl}
x_1&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\sin{\theta_2}\cos{\theta_1} \\[8pt]
x_2&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\sin{\theta_2}\sin{\theta_1} \\[8pt]
x_3&=&r \sin{\alpha} \sin{\theta_n}\sin{\theta_{n-1}}\cdots\sin{\theta_3}\cos{\theta_2} \\[8pt]
&\vdots& \\
x_{n+1}&=&r \sin{\alpha} \cos{\theta_n} \\[8pt]
x_{n+2}&=&r \cos{\alpha}
\end{array}
\right.\end{equation*}
with $r \in [0,l]$, $\theta_1 \in [0,2\pi]$, $\theta_2,{\mathsf l}dots,\theta_n \in [0,\pi]$, $\alpha$ is a fixed positive real number, and $0<a=\frac{1}{\nu}= \sin{\alpha}{\mathsf l}eq 1$. This is a compact connected space. The metric induced by the immersion in ${\mathds{R}}^{n+2}$ is
{\partial}egin{align*}
g &=dr \otimes dr + r^2 a^2 g_{S^{n}_{1}},\\
\end{align*}
and is smooth for $r>0$. Comparing with \cite{Che0}, Section 1, we see that the space $C_\alpha S^n_{l\sin\alpha}$ is a completed metric cone, and $X_\alpha=C_\alpha S^n_{l\sin\alpha}-\{0\}$, is a metric cone over $S^n_{l\sin \alpha}$. Note that $X_\alpha$ is not smooth, since the radius of the sphere is not unitary. Note also that the space $C_\alpha S^n_{l\sin\alpha}$ is simply connected (in fact it has the homotopy type of a point).
In order to define the opportune self adjoint extension of the Laplace operator on forms, we split the space of forms near the boundary as direct sum $\Lambda C_\alpha S^n_{l\sin\alpha}=\Lambda S^n_{l\sin\alpha}\oplus N^* C_\alpha S^n_{l\sin\alpha}$, where $N^*$ is the dual to the normal bundle to the boundary. Locally, this reads as follows. Let ${\partial}_r$ denotes the outward pointing unit normal vector to the boundary, and $dr$ the correspondent one form. Near the boundary we have the collar decomposition
$C_\alpha S^n_{l\sin\alpha}=[0,-\epsilon)\times S^n_{l\sin\alpha}$, and if $y$ is a system of local coordinates on the boundary, then $x=(r,y)$ is a local system of coordinates in $C_\alpha S^n_{l\sin\alpha}$. The smooth forms on $C_\alpha S^n_{l\sin\alpha}$ near the boundary decompose as
\[
\omega=\omega_{\rm tan}+\omega_{\rm norm},
\]
where $\omega_{\rm norm}$ is the orthogonal projection on the subspace generated by $dr$, and $\omega_{\rm tan}$ is in $\Lambda S^n_{l\sin\alpha}$. We write
\[
\omega=\omega_1+ \omega_{2}\wedge dr,
\]
where $\omega_j\in C^\infty(C_\alpha S^n_{l\sin\alpha})\otimes \Lambda S^n_{l\sin\alpha}$, and
\[
*\omega_2=dr \wedge *\omega.
\]
Define absolute boundary conditions by
\[
B_{\rm abs}(\omega)=\omega_{\rm norm}|_{S^n_{l\sin\alpha}}=\omega_2|_{S^n_{l\sin\alpha}}=0,
\]
and relative boundary conditions by
\[
B_{\rm rel}(\omega)=\omega_{\rm tan}|_{S^n_{l\sin\alpha}}=\omega_1|_{S^n_{l\sin\alpha}}=0.
\]
Let ${\mathcal{B}}(\omega)=B(\omega)\oplus B((d+d^\dagger)(\omega))$. Then the operator ${\partial}elta=(d+d^\dagger)^2$ with boundary conditions ${\mathcal{B}}(\omega)=0$ is self adjoint. Note that ${\mathcal{B}}$ correspond to
{\partial}egin{equation}{\mathsf l}abel{abs}
{\mathcal{B}}_{\rm abs}(\omega)=0\hspace{20pt}{\rm if~ and~ only~ if}\hspace{20pt}{\mathsf l}eft\{{\partial}egin{array}{l}\omega_{\rm norm}|_{S^n_{l\sin\alpha}}=0,\\
(d\omega)_{\rm norm}|_{S^n_{l\sin\alpha}}=0,\\
\end{array}
\right.
\end{equation}
{\partial}egin{equation}{\mathsf l}abel{rel}
{\mathcal{B}}_{\rm rel}(\omega)=0\hspace{20pt}{\rm if~ and~ only~ if}\hspace{20pt}{\mathsf l}eft\{{\partial}egin{array}{l}\omega_{\rm tan}|_{S^n_{l\sin\alpha}}=0,\\
(d^\dagger\omega)_{\rm tan}|_{S^n_{l\sin\alpha}}=0,\\
\end{array}
\right.
\end{equation}
\section{The spectrum of the Laplacian on forms}
{\mathsf l}abel{s2}
In this section we give the spectrum of the Laplacian on forms. The result for $n=1$, and $n=2$ is in \cite{HMS}, Lemmas 3, and 4. Thus we just need to study the case of $n=3$. Decomposing with respect to the projections on the eigenspaces of the restriction of the Laplacian on the section of the cone (i.e with respect to the angular momenta), the definition of an appropriate self adjoint extension of the Laplace operator (on functions) on a cone reduces to the analysis of the boundary values of a singular Sturm Liouville ordinary second order differential equation on the line segment $(0,l]$. The problem was addressed already by Rellich in \cite{Rel}, who parameterized the self adjoint extensions. In particular, it turns out that there are not boundary values (at zero) for the non zero mode of the angular momentum, while a boundary condition is necessary for the zero modes, and the unique self adjoint extension defined by this boundary condition is the maximal extension, corresponding to the Friedrich extension (see \cite{BS2} or \cite{Che2} for the boundary condition). The same argument works for the Laplacian on forms. However, in the present situation we do not actually need boundary conditions (at zero) for forms of positive degree, since the middle homology of the section of the cone is trivial (compare with \cite{Che0}).
Since the eigenvalues for relative boundary conditions follows by Hodge duality, we just give the eigenvalues for absolute boundary conditions.
In the following, we denote by $\{k:{\mathsf l}ambda\}$ the set of eigenvalues ${\mathsf l}ambda$
with multiplicity $k$.
{\partial}egin{lem}{\mathsf l}abel{eig1} The spectrum of the (Friedrich extension of the) Laplacian operator ${\partial}elta_{C_\alpha S^1_{l\sin\alpha}}^{(q)}$ on $q$-forms with absolute boundary conditions is (where $\nu={\rm csc}\alpha$):
{\partial}egin{align*}
{\rm Sp} {\partial}elta_{C_\alpha S^1_{l\sin\alpha}}^{(0)}=& {\mathsf l}eft\{j_{1,k}^2/l^{2}\right\}_{k=1}^{\infty}\cup {\mathsf l}eft\{2:(j_{\nu n,k}')^2/l^{2}\right\}_{n,k=1}^\infty, \\
{\rm Sp} {\partial}elta_{C_\alpha S^1_{l\sin\alpha}}^{(1)}=& {\mathsf l}eft\{j_{0,k}^2/l^{2}\right\}_{k=1}^{\infty}\cup{\mathsf l}eft\{j_{1,k}^2/l^{2}\right\}_{k=1}^\infty\cup
{\mathsf l}eft\{2:j_{\nu n,k}^2/l^{2}\right\}_{n,k=1}^\infty \\ & \cup {\mathsf l}eft\{2:(j_{\nu n,k}')^2/l^{2}\right\}_{n,k=1}^\infty , \\
{\rm Sp} {\partial}elta_{C_\alpha S^1_{l\sin\alpha}}^{(2)}=& {\mathsf l}eft\{j_{0,k}^2/l^{2}\right\}_{k=1}^\infty\cup {\mathsf l}eft\{2:j_{\nu n,k}^2/l^{2}\right\}_{n,k=1}^\infty. \\
\end{align*}
\end{lem}
{\partial}egin{lem}{\mathsf l}abel{eig2}
The spectrum of the (Friedrich extension of the) Laplacian operator ${\partial}elta_{C_{\alpha} S^2_{l\sin\alpha}}^{(q)}$ on $q$-forms with absolute boundary conditions is:
{\partial}egin{align*}
{\rm Sp} {\partial}elta_{C_{\alpha} S^2_{l\sin\alpha}}^{(0)}=& {\mathsf l}eft\{(2n+1): \hat j_{\mu_n,k,-}^2/l^{2}\right\}_{n,k=1}^{\infty}
\cup {\mathsf l}eft\{j_{\frac{3}{2},k}^2/l^{2}\right\}_{k=1}^\infty, \\
{\rm Sp} {\partial}elta_{C_{\alpha} S^2_{l\sin\alpha}}^{(1)}=& {\mathsf l}eft\{j_{\frac{3}{2},k}^2/l^{2}\right\}_{k=1}^\infty\cup
{\mathsf l}eft\{(2n+1):j_{\mu_n, k}^2/l^{2}\right\}_{n,k=1}^\infty\\
&\cup{\mathsf l}eft\{(2n+1):\hat j_{\mu_n,k,+}^2/l^{2}\right\}_{n,k=1}^\infty\cup
{\mathsf l}eft\{(2n+1):\hat j_{\mu_n,k,-}^2/l^{2}\right\}_{n,k=1}^\infty,\\
{\rm Sp} {\partial}elta_{C_{\alpha} S^2_{l\sin\alpha}}^{(2)}=& {\mathsf l}eft\{j_{\frac{1}{2},k}^2/l^{2}\right\}_{k=1}^\infty\cup
{\mathsf l}eft\{(2n+1):j_{\mu_n, k}^2/l^{2}\right\}_{n,k=1}^\infty\\
&\cup{\mathsf l}eft\{(2n+1):\hat j_{\mu_n,k,+}^2/l^{2}\right\}_{n,k=1}^\infty\cup
{\mathsf l}eft\{(2n+1):j_{\mu_n,k}^2/l^{2}\right\}_{n,k=1}^\infty, \\
{\rm Sp} {\partial}elta_{C_{\alpha} S^2_{l\sin\alpha}}^{(3)}=& {\mathsf l}eft\{(2n+1):j_{\mu_n,k}^2/l^{2}\right\}_{n,k=1}^{\infty} \cup
{\mathsf l}eft\{j_{\frac{1}{2},k}^2/l^{2}\right\}_{k=1}^\infty ,\\
\end{align*}
where $\mu_n=\sqrt{\nu^2 n(n+1)+\frac{1}{4}}$, and where the $\hat j_{\nu,k,\pm}$ are the zeros of the function $G^{\pm}_{\nu}(z)=\pm\frac{1}{2}J_{\nu}(z)+zJ'_\nu(z)$.
\end{lem}
{\partial}egin{lem}{\mathsf l}abel{eig3}
The spectrum of the (Friedrich extension of the) Laplacian operator ${\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(q)}$ on
$q$-forms with absolute boundary conditions is:
{\partial}egin{align*}
{\rm Sp} {\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(0)}=& {\mathsf l}eft\{j_{2,k}^2/l^{2}\right\}_{k=1}^\infty \cup {\mathsf l}eft\{(n+1)^2:
\tilde j_{\mu_{0,n},k,-}^2/l^{2}\right\}_{n,k=1}^{\infty}, \\
{\rm Sp} {\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(1)}=& {\mathsf l}eft\{j_{2}^2/l^{2}\right\}_{k=1}^\infty\cup
{\mathsf l}eft\{2n(n+2):(j'_{\mu_{1,n}, k})^2/l^{2}\right\}_{n,k=1}^\infty\\
&\cup{\mathsf l}eft\{(n+1)^2:\tilde j_{\mu_{0,n},k,-}^2/l^{2}\right\}_{n,k=1}^\infty\cup
{\mathsf l}eft\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty,\\
{\rm Sp} {\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(2)}=& {\mathsf l}eft\{(n+1)^2: \tilde
j_{\mu_{0,n},k,+}^2/l^{2}\right\}_{n,k=1}^\infty \cup
{\mathsf l}eft\{2n(n+2):(j'_{\mu_{1,n}, k})^2/l^{2}\right\}_{n,k=1}^\infty\\
&\cup{\mathsf l}eft\{2n(n+2): j_{\mu_{1,n},k}^2/l^{2}\right\}_{n,k=1}^\infty\cup
{\mathsf l}eft\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty, \\
{\rm Sp} {\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(3)}=& {\mathsf l}eft\{j_{1}^2/l^{2}\right\}_{k=1}^\infty\cup
{\mathsf l}eft\{(n+1)^2:\tilde j_{\mu_{0,n}, k,+}^2/l^{2}\right\}_{n,k=1}^\infty\\
&\cup{\mathsf l}eft\{(n+1)^2: j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^\infty\cup
{\mathsf l}eft\{2n(n+2): j_{\mu_{1,n},k}^2/l^{2}\right\}_{n,k=1}^\infty,\\
{\rm Sp} {\partial}elta_{C_{\alpha} S^3_{l\sin\alpha}}^{(4)}=& {\mathsf l}eft\{j_{1,k}^2/l^{2}\right\}_{k=1}^\infty \cup {\mathsf l}eft\{(n+1)^2:
\tilde j_{\mu_{0,n},k}^2/l^{2}\right\}_{n,k=1}^{\infty},
\end{align*}
where
\[
\mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}, \qquad \mu_{1,n} = \nu(n+1),
\]
and where the $\tilde j_{\nu,k,\pm}$ are the zeros of the function $T^{\pm}_{\nu}(z)=\pm J_{\nu}(z)+zJ'_\nu(z)$.
\end{lem}
{\partial}egin{proof} Recall we parameterize $C_{\alpha}S^{3}_{l\sin\alpha}$ by
\[C_{\alpha}S^2_{l\sin\alpha}={\partial}egin{cases}
x_1=x\sin{\alpha}\sin{\theta_3}\sin{\theta_2}\cos{\theta_1} \\[8pt]
x_2=x\sin{\alpha}\sin{\theta_3}\sin{\theta_2}\sin{\theta_1} \\[8pt]
x_3=x\sin{\alpha}\sin{\theta_3}\cos{\theta_2} \\[8pt]
x_4=x\sin{\alpha}\cos{\theta_3} \\[8pt]
x_5=x\cos{\alpha}
\end{cases}\] where $(x,\theta_1,\theta_2,\theta_{3})\in [0,l]\times[0,2\pi]\times[0,\pi]\times[0,\pi]$, $0<\alpha{\mathsf l}eq \pi/2$ is
a fixed real number and $0< a=\sin\alpha{\mathsf l}eq 1$. The induced metric is (for $x>0$)
\[
g = dx\otimes dx + (a^2x^2\sin^2\theta_2\sin^2\theta_3) d\theta_1\otimes d\theta_1 + (a^2x^2\sin^2\theta_3) d\theta_2 \otimes
d\theta_2 + (a^2x^2) d\theta_3 \otimes d\theta_3.
\]
Using the absolute boundary conditions on forms described in equation (\ref{abs})
of the previous section, we obtain the following equations. For the $0$-forms:
{\partial}egin{equation}{\mathsf l}abel{abs0S3}
{\partial}egin{aligned}
{\rm abs. }&: \partial_x \omega(l,\theta_1,\theta_2,\theta_3)=0.
\end{aligned}
\end{equation}
For the $1$-forms: {\partial}egin{equation}{\mathsf l}abel{abs1S3}
{\partial}egin{aligned}
{\rm abs.}&:
{\mathsf l}eft\{{\partial}egin{array}{lll} \omega_x (l,\theta_1,\theta_2,\theta_3) = 0 \\
\partial_x \omega_{\theta_1}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_2}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right.
\end{aligned}
\end{equation}
For the $2$-forms, with $i=1,2,3$: {\partial}egin{equation}{\mathsf l}abel{abs2S3}
{\partial}egin{aligned}
{\rm abs.}&:
{\mathsf l}eft\{{\partial}egin{array}{ll} \omega_{x\theta_i} (l,\theta_1,\theta_2,\theta_3) = 0 \\
\partial_x\omega_{\theta_1\theta_2}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_1\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right.
\end{aligned}
\end{equation}
For the $3$-forms: {\partial}egin{equation}{\mathsf l}abel{abs3S3}
{\partial}egin{aligned}
{\rm abs.}&:
{\mathsf l}eft\{{\partial}egin{array}{lll} \omega_{x\theta_1\theta_2} (l,\theta_1,\theta_2,\theta_3) = 0 \\
\omega_{x\theta_1\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\omega_{x\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0 \\
\partial_x\omega_{\theta_1\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.\end{array}\right.
\end{aligned}
\end{equation}
For the $4$-forms: {\partial}egin{equation}{\mathsf l}abel{abs4S3}
{\partial}egin{aligned}
{\rm abs. }&: \omega_{x\theta_1\theta_2\theta_3}(l,\theta_1,\theta_2,\theta_3)=0.
\end{aligned}
\end{equation}
Next we use the description of the eigenfunctions given in Section 3 of \cite{Che2} to determine the eigenvalues. By \cite{IT} the eigenvalues of the coexact forms of the Laplacian over $S^3$ are, with $n{\gamma}eq 1$:
{\partial}egin{center}
{\partial}egin{table}[htb]
\centering
{\partial}egin{tabular}{|c|c|c|}
\hline Dimension & Eigenvalue & Multiplicity \\
\hline \hline
$0$ & $n(n+2)$ & $(n+1)^2$ \\
\cline{1-3}
$1$ & $(n+1)^2$ & $2 n(n+2)$ \\
\cline{1-3} $2$ & $n(n+2)$ & $(n+1)^2$\\
\hline
\end{tabular}
\end{table}
\end{center}
And by \cite{Che2} we have $\mu_{0,n} = \mu_{2,n} = \sqrt{\nu^2 n(n+2) +1 }$ and $\mu_{1,n} = \nu(n+1)$, and the
eigenforms of the Laplacian of $C_{\alpha}S^{3}_{la}$ are as follows. For the $0$-forms:
{\partial}egin{equation*}
\alpha^{(0)}_{n} = x^{-1} J_{\mu_{0,n}}({\mathsf l}ambda x) \phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3), \qquad E^{(0)} = x^{-1}
J_{1}({\mathsf l}ambda x) h^{0}(\theta_1,\theta_2,\theta_3).
\end{equation*}
For the $1$-forms:
{\partial}egin{align*}
\alpha^{(1)}_{n} &= x^{-1} J_{\mu_{1,n}}({\mathsf l}ambda x) \phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\
{\partial}eta^{ (1)}_{n} &= x^{-1} J_{\mu_{0,n}}({\mathsf l}ambda x)d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3) + \partial_x(x^{-1}
J_{\mu_{0,n}}({\mathsf l}ambda x))dx\wedge
\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3),\\
{\gamma}amma^{ (1)}_{n} &= x^{-1} \partial_x(x J_{\mu_{0,n}}({\mathsf l}ambda x)) d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3) +x^{-2}
J_{\mu_{0,n}}({\mathsf l}ambda x) dx\wedge \tilde \delta \tilde d \phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3),\\
D^{(1)} &= \partial_x(x^{-1} J_1({\mathsf l}ambda x)) dx \wedge h^{(0)}(\theta_1,\theta_2,\theta_3)
\end{align*}
For the $2$-forms:
{\partial}egin{align*}
\alpha^{(2)}_{n} &= x J_{\mu_{0,n}}({\mathsf l}ambda x) \phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\
{\partial}eta^{ (2)}_{n} &= J_{\mu_{1,n}}({\mathsf l}ambda x)d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3) +
\partial_x(J_{\mu_{1,n}}({\mathsf l}ambda x))dx\wedge\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\
{\gamma}amma^{ (2)}_{n} &= x \partial_x(J_{\mu_{1,n}}({\mathsf l}ambda x)) d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3) +x^{-1}
J_{\mu_{1,n}}({\mathsf l}ambda x) dx\wedge \tilde \delta \tilde d \phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\
\delta^{(2)}_n &= J_{\mu_{0,n}}({\mathsf l}ambda x) dx \wedge d\phi^{(0)}_{n}(\theta_1,\theta_2,\theta_3).
\end{align*}
For the $3$-forms:
{\partial}egin{align*}
{\partial}eta^{ (3)}_{n} &= x J_{\mu_{0,n}}({\mathsf l}ambda x)d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3) +
\partial_x(x J_{\mu_{0,n}}({\mathsf l}ambda x))dx\wedge\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\
{\gamma}amma^{ (3)}_{n} &= x^{3} \partial_x(x^{-1} J_{\mu_{0,n}}({\mathsf l}ambda x)) d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3) +
J_{\mu_{0,n}}({\mathsf l}ambda x) dx\wedge \tilde \delta \tilde d \phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3),\\
\delta^{(3)}_n &= x J_{\mu_{1,n}}({\mathsf l}ambda x) dx \wedge d\phi^{(1)}_{n}(\theta_1,\theta_2,\theta_3),\\
E^{(3)} & = x^2 J_{2}({\mathsf l}ambda x)h^{3}(\theta_1,\theta_2,\theta_3).
\end{align*}
For $4$-forms:
{\partial}egin{equation*}
\delta^{(4)}_{n} = x^{2} J_{\mu_{0,n}}({\mathsf l}ambda x) dx \wedge d\phi^{(2)}_{n}(\theta_1,\theta_2,\theta_3), \qquad D^{(4)}
=\partial_x( x^{2} J_{2}({\mathsf l}ambda x)) dx \wedge h^{3}(\theta_1,\theta_2,\theta_3).
\end{equation*}
Where the $\phi^{(i)}_n(\theta_1,\theta_2,\theta_3)$, for $i=0,1,2$, are coexact eigenforms of the Laplacian on $S^{3}$, and $h^{(0)}(\theta_1,\theta_2,\theta_3)$, and $h^{(3)}(\theta_1,\theta_2,\theta_3)$ are harmonic forms of the Laplacian on $S^{3}$. Using these functions in the boundary conditions given in equation (\ref{abs}), we obtain the result.
\end{proof}
\section{Zeta determinants for some class of double sequences}
{\mathsf l}abel{s3}
We give in this section all the tools necessary in order to evaluate the zeta determinants appearing in the calculation of the analytic torsion. This is based on \cite{Spr3} \cite{Spr4} \cite{Spr5} and \cite{Spr9}. We present here a simplified version of the main result of those works (see in particular the general formulation in Theorem 3.9 of \cite{Spr9} or the Spectral Decomposition Lemma of \cite{Spr5}), that is sufficient for our purpose here.
Let $S=\{a_n\}_{n=1}^\infty$ be a sequence of non vanishing complex numbers, ordered by increasing modules, with the unique point of accumulation at infinite. The positive real number (possibly infinite)
\[
s_0={\rm limsup}_{n\to\infty} \frac{{\mathsf l}og n}{{\mathsf l}og |a_n|},
\]
is called the exponent of convergence of $S$, and denoted by ${\mathsf e}(S)$. We are only interested in sequences with ${\mathsf e}(S)=s_0<\infty$. If this is the case, then there exists a least integer $p$ such that the series $\sum_{n=1}^\infty a_n^{-p-1}$ converges absolutely. We assume $s_0-1< p{\mathsf l}eq s_0$, we call the integer $p$ the genus of the sequence $S$, and we write $p={\gamma}e(S)$. We define the zeta function associated to $S$ by the uniformly convergent series
\[
\zeta(s,S)=\sum_{n=1}^\infty a_n^{-s},
\]
when ${\mathds{R}}e(s)> {\mathsf e}(S)$, and by analytic continuation otherwise. We call the open subset $\rho(S)={\mathds{C}}-S$ of the complex plane the resolvent set of $S$. For all ${\mathsf l}ambda\in\rho(S)$, we define the Gamma function associated to $S$ by the canonical product
{\partial}egin{equation}{\mathsf l}abel{gamma}
\frac{1}{{\Gamma}amma(-{\mathsf l}ambda,S)}=\prod_{n=1}^\infty{\mathsf l}eft(1+\frac{-{\mathsf l}ambda}{a_n}\right)\e^{\sum_{j=1}^{{\gamma}e(S)}\frac{(-1)^j}{j}\frac{(-{\mathsf l}ambda)^j}{a_n^j}}.
\end{equation}
When necessary in order to define the meromorphic branch of an analytic function, the domain for ${\mathsf l}ambda$ will be the open subset ${\mathds{C}}-[0,\infty)$ of the complex plane.
We use the notation $\Sigmaigma_{\theta,c}={\mathsf l}eft\{z\in {\mathds{C}}~|~|\arg(z-c)|{\mathsf l}eq \frac{\theta}{2}\right\}$,
with $c{\gamma}eq \delta> 0$, $0< \theta<\pi$. We use
$D_{\theta,c}={\mathds{C}}-\Sigmaigma_{\theta,c}$, for the complementary (open) domain and $\Lambda_{\theta,c}=\partial \Sigmaigma_{\theta,c}={\mathsf l}eft\{z\in {\mathds{C}}~|~|\arg(z-c)|= \frac{\theta}{2}\right\}$, oriented counter clockwise, for the boundary.
With this notation, we define now a particular subclass of sequences. Let $S$ be as above, and assume that ${\mathsf e}(S)<\infty$, and that there exist $c>0$ and $0<\theta<\pi$, such that $S$ is contained in the interior of the sector $\Sigmaigma_{\theta,c}$. Furthermore, assume that the logarithm of the associated Gamma function has a uniform asymptotic expansion for large ${\mathsf l}ambda\in D_{\theta,c}(S)={\mathds{C}}-\Sigmaigma_{\theta,c}$ of the following form
\[
{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,S)\sim\sum_{j=0}^\infty a_{\alpha_j,0}(-{\mathsf l}ambda)^{\alpha_j} +\sum_{k=0}^{{\gamma}e(S)} a_{k,1}(-{\mathsf l}ambda)^k{\mathsf l}og(-{\mathsf l}ambda),
\]
where $\{\alpha_j\}$ is a decreasing sequence of real numbers. Then, we say that $S$ is a {\it totally regular sequence of spectral type with infinite order}. We call the open set $D_{\theta,c}(S)$ the asymptotic domain of $S$.
Next, let $S=\{{\mathsf l}ambda_{n,k}\}_{n,k=1}^\infty$ be a double sequence of non
vanishing complex numbers with unique accumulation point at the
infinity, finite exponent $s_0={\mathsf e}(S)$ and genus $p={\gamma}e(S)$. Assume if necessary that the elements of $S$ are ordered as $0<|{\mathsf l}ambda_{1,1}|{\mathsf l}eq|{\mathsf l}ambda_{1,2}|{\mathsf l}eq |{\mathsf l}ambda_{2,1}|{\mathsf l}eq \dots$. We use the notation $S_n$ ($S_k$) to denote the simple sequence with fixed $n$ ($k$). We call the exponents of $S_n$ and $S_k$ the relative exponents of $S$, and we use the notation $(s_0={\mathsf e}(S),s_1={\mathsf e}(S_k),s_2={\mathsf e}(S_n))$. We define relative genus accordingly.
{\partial}egin{defi} Let $S=\{{\mathsf l}ambda_{n,k}\}_{n,k=1}^\infty$ be a double
sequence with finite exponents $(s_0,s_1,s_2)$, genus
$(p_0,p_1,p_2)$, and positive spectral sector
$\Sigmaigma_{\theta_0,c_0}$. Let $U=\{u_n\}_{n=1}^\infty$ be a totally
regular sequence of spectral type of infinite order with exponent
$r_0$, genus $q$, domain $D_{\phi,d}$. We say that $S$ is
spectrally decomposable over $U$ with power $\kappa$, length $\ell$ and
asymptotic domain $D_{\theta,c}$, with $c={\rm min}(c_0,d,c')$,
$\theta={\rm max}(\theta_0,\phi,\theta')$, if there exist positive
real numbers $\kappa$, $\ell$ (integer), $c'$, and $\theta'$, with
$0< \theta'<\pi$, such that:
{\partial}egin{enumerate}
\item the sequence
$u_n^{-\kappa}S_n={\mathsf l}eft\{\frac{{\mathsf l}ambda_{n,k}}{u^\kappa_n}\right\}_{k=1}^\infty$ has
spectral sector $\Sigmaigma_{\theta',c'}$, and is a totally regular
sequence of spectral type of infinite order for each $n$;
\item the logarithmic ${\Gamma}amma$-function associated to $S_n/u_n^\kappa$ has an asymptotic expansion for large
$n$ uniformly in ${\mathsf l}ambda$ for ${\mathsf l}ambda$ in
$D_{\theta,c}$, of the following form
{\partial}egin{equation}{\mathsf l}abel{exp}
\hspace{30pt}{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,u_n^{-\kappa} S_n)=\sum_{h=0}^{\ell}
\phi_{\sigma_h}({\mathsf l}ambda) u_n^{-\sigma_h}+\sum_{l=0}^{L}
P_{\rho_l}({\mathsf l}ambda) u_n^{-\rho_l}{\mathsf l}og u_n+o(u_n^{-r_0}),
\end{equation}
where $\sigma_h$ and $\rho_l$ are real numbers with $\sigma_0<\dots <\sigma_\ell$, $\rho_0<\dots <\rho_L$, the
$P_{\rho_l}({\mathsf l}ambda)$ are polynomials in ${\mathsf l}ambda$ satisfying the condition $P_{\rho_l}(0)=0$, $\ell$ and $L$ are the larger integers
such that $\sigma_\ell{\mathsf l}eq r_0$ and $\rho_L{\mathsf l}eq r_0$.
\end{enumerate}
{\mathsf l}abel{spdec}
\end{defi}
When a double sequence $S$ is spectrally decomposable over a simple sequence $U$, Theorem 3.9 of \cite{Spr9} gives a formula for the derivative of the associated zeta function at zero. In order to understand such a formula, we need to introduce some other quantities. First, we define the functions
{\partial}egin{equation}{\mathsf l}abel{fi1}
{\mathcal{P}}hi_{\sigma_h}(s)=\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda t}}{-{\mathsf l}ambda} \phi_{\sigma_h}({\mathsf l}ambda) d{\mathsf l}ambda dt.
\end{equation}
Next, by Lemma 3.3 of \cite{Spr9}, for all $n$, we have the expansions:
{\partial}egin{equation}{\mathsf l}abel{form}{\partial}egin{aligned}
{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,S_n/{u_n^\kappa})&\sim\sum_{j=0}^\infty a_{\alpha_j,0,n}
(-{\mathsf l}ambda)^{\alpha_j}+\sum_{k=0}^{p_2} a_{k,1,n}(-{\mathsf l}ambda)^k{\mathsf l}og(-{\mathsf l}ambda),\\
\phi_{\sigma_h}({\mathsf l}ambda)&\sim\sum_{j=0}^\infty b_{\sigma_h,\alpha_j,0}
(-{\mathsf l}ambda)^{\alpha_j}+\sum_{k=0}^{p_2} b_{\sigma_h,k,1}(-{\mathsf l}ambda)^k{\mathsf l}og(-{\mathsf l}ambda),
\end{aligned}
\end{equation}
for large ${\mathsf l}ambda$ in $D_{\theta,c}$. We set (see Lemma 3.5 of \cite{Spr9})
{\partial}egin{equation}{\mathsf l}abel{fi2}
{\partial}egin{aligned}
A_{0,0}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{0, 0,n} -\sum_{h=0}^\ell
b_{\sigma_h,0,0}u_n^{-\sigma_h}\right)u_n^{-\kappa s},\\
A_{j,1}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{j, 1,n} -\sum_{h=0}^\ell
b_{\sigma_h,j,1}u_n^{-\sigma_h}\right)u_n^{-\kappa s},
~~~0{\mathsf l}eq j{\mathsf l}eq p_2.
\end{aligned}
\end{equation}
We can now state the formula for the derivative at zero of the double zeta function. We give here a modified version of Theorem 3.9 of \cite{Spr9}, more suitable for our purpose here. This is based on the following fact. The key point in the proof of Theorem 3.9 of \cite{Spr9} is the decomposition given in Lemma 3.5 of that paper of the sum
\[
\mathcal{T}(s,{\mathsf l}ambda, S,U)=\sum_{n=1}^\infty u_n^{-\kappa s} {\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda, u_n^{-\kappa}S_n),
\]
in two terms: the regular part $\mathcal{P}(s,{\mathsf l}ambda,S,U)$ and the remaining singular part. The regular part is obtained subtracting from ${\mathcal{T}}$ some terms constructed starting from the expansion of the logarithmic Gamma function given in equation (\ref{exp}), namely
\[
{\mathcal{P}}(s,{\mathsf l}ambda,S,u)={\mathcal{T}}(s,{\mathsf l}ambda, S,U)-\sum_{h=0}^{\ell}
\phi_{\sigma_h}({\mathsf l}ambda) u_n^{-\sigma_h}-\sum_{l=0}^{L}
P_{\rho_l}({\mathsf l}ambda)u_n^{-\rho_l}{\mathsf l}og u_n.
\]
Now, assume instead we subtract only the terms such that the zeta function $\zeta(s,U)$ has a pole at $s=\sigma_h$ or at $s=\rho_l$. Let $\hat {\mathcal{P}}(s,{\mathsf l}ambda, S,U)$ be the resulting function. Then the same argument as the one used in Section 3 of \cite{Spr9} in order to prove Theorem 3.9 applies, and we obtain similar formulas for the values of the residue, and of the finite part of the zeta function $\zeta(s,S)$ and of its derivative at zero, with just two differences: first, in the all the sums, all the terms with index $\sigma_h$ such that $s=\sigma_h$ is not a pole of $\zeta(s,U)$ must be omitted; and second, we must substitute the terms $A_{0,0}(0)$ and $A_{0,1}'(0)$, with the finite parts ${\mathds{R}}z_{s=0}A_{0,0}(s)$, and ${\mathds{R}}z_{s=0}A_{0,1}'(s)$. The first modification is an obvious consequence of the substitution of the function ${\mathcal{P}}$ by the function $\hat {\mathcal{P}}$. The second modification, follows by the same reason noting that the function $A_{\alpha_j,k}(s)$ defined in Lemma 3.5 of \cite{Spr9} are no longer regular at $s=0$ themselves. However, they both admits a meromorphic extension regular at $s=0$, using the extension of the zeta function $\zeta(s,U)$, and the expansion of the coefficients $a_{\alpha_j,k,n}$ for large $n$.
Thus we have the following result.
{\partial}egin{theo} {\mathsf l}abel{tt} The formulas of Theorem 3.9 of \cite{Spr9} hold if all the quantities with index $\sigma_h$ such that the zeta function $\zeta(s,U)$ has not a pole at $s=\sigma_h$ are omitted. In such a case, the result must be read by means of the analytic extension of the zeta function $\zeta(s,U)$.
\end{theo}
Next, assuming some simplified pole structure for the zeta function $\zeta(s,U)$, sufficient for the present analysis, we state the main result of this section.
{\partial}egin{theo} {\mathsf l}abel{t4} Let $S$ be spectrally decomposable over $U$ as in Definition \ref{spdec}. Assume that the functions ${\mathcal{P}}hi_{\sigma_h}(s)$ have at most simple poles for $s=0$. Then,
$\zeta(s,S)$ is regular at $s=0$, and
{\partial}egin{align*}
\zeta(0,S)=&-A_{0,1}(0)+\frac{1}{\kappa}{\sum_{h=0}^\ell} {\mathds{R}}u_{s=0}{\mathcal{P}}hi_{\sigma_h}(s){\mathds{R}}u_{s=\sigma_h}\zeta(s,U),\\
\zeta'(0,S)=&-A_{0,0}(0)-A_{0,1}'(0)+\frac{{\gamma}amma}{\kappa}\sum_{h=0}^\ell{\mathds{R}}u_{s=0}{\mathcal{P}}hi_{\sigma_h}(s){\mathds{R}}u_{s=\sigma_h}\zeta(s,U)\\
&+\frac{1}{\kappa}\sum_{h=0}^\ell{\mathds{R}}z_{s=0}{\mathcal{P}}hi_{\sigma_h}(s){\mathds{R}}u_{s=\sigma_h}\zeta(s,U)+{\sum_{h=0}^\ell}{^{\displaystyle
'}}{\mathds{R}}u_{s=0}{\mathcal{P}}hi_{\sigma_h}(s){\mathds{R}}z_{s=\sigma_h}\zeta(s,U),
\end{align*}
where the notation $\sum'$ means that only the terms such that $\zeta(s,U)$ has a pole at $s=\sigma_h$ appear in the sum.
\end{theo}
This result should be compared with the Spectral Decomposition Lemma of \cite{Spr5} and Proposition 1 of \cite{Spr6}.
{\partial}egin{corol} {\mathsf l}abel{c} Let $S_{(j)}=\{{\mathsf l}ambda_{(j),n,k}\}_{n,k=1}^\infty$, $j=1,2$, be two double sequences that satisfy all the requirements of Definition \ref{spdec} of spectral decomposability over a common sequence $U$, with the same parameters $\kappa$, $\ell$, etc., except that the polynomials $P_{(j),\rho}({\mathsf l}ambda)$ appearing in condition (2) do not vanish for ${\mathsf l}ambda=0$. Assume that the difference of such polynomials does satisfy this condition, namely that $P_{(1),\rho}(0)-P_{(2),\rho}(0)=0$. Then, the difference of the zeta functions $\zeta(s,S_{(1)})-\zeta(s,S_{(2)})$ is regular at $s=0$ and satisfies the formulas given in Theorem \ref{t4}.
\end{corol}
We conclude this section by recalling some results on zeta determinants of some simple sequences that will be necessary in the following. This results can be found in different places, and are known to specialists. We will use the formulation of \cite{Spr1}. For positive real numbers $l$ and $q$, define the non homogeneous quadratic Bessel zeta function by
\[
z(s,\nu,q,l)=\sum_{k=1}^\infty {\mathsf l}eft(\frac{j_{\nu,k}^2}{l^2}+q^2\right)^{-s},
\]
for ${\mathds{R}}e(s)>\frac{1}{2}$. Then, $z(s,\nu,q,l)$ extends analytically to a meromorphic function in the complex plane with simple poles at $s=\frac{1}{2}, -\frac{1}{2}, -\frac{3}{2}, \dots$. The point $s=0$ is a regular point and
{\partial}egin{equation}{\mathsf l}abel{p00}
{\partial}egin{aligned}
z(0,\nu,q,l)&=-\frac{1}{2}{\mathsf l}eft(\nu+\frac{1}{2}\right),\\
z'(0,\nu,q,l)&=-{\mathsf l}og\sqrt{2\pi l}\frac{I_\nu(lq)}{q^\nu}.
\end{aligned}
\end{equation}
In particular, taking the limit for $q\to 0$,
\[
z'(0,\nu,0,l)=-{\mathsf l}og\frac{\sqrt{\pi}l^{\nu+\frac{1}{2}}}{2^{\nu-\frac{1}{2}}{\Gamma}amma(\nu+1)}.
\]
\section{The analytic torsion }
{\mathsf l}abel{s4}
In this section we give the analytic torsions of $C_\alpha S^n_{l\sin\alpha}$, for $n=1,2$, and $3$. Actually, the case $n=1$ is essentially contained in \cite{Spr6}, and both the cases $n=1$ and $n=2$ are given in \cite{HMS}, Sections 5.4 and 5.5, so we will focus here on the new case of $C_\alpha S^3_{l\sin\alpha}$.
By the analysis in Section \ref{s3}, the relevant zeta functions are
{\partial}egin{align*}
\zeta(s,{\partial}elta^{(1)})&=\sum_{k=1}^\infty \frac{j_{2,k}^{-2s}}{l^{-2s}}+2\sum_{n,k=1}^\infty n(n+2)
\frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde
j_{\mu_{0,n},k,-}^{-2s}}{l^{-2s}}\\
&+\sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\
\zeta(s,{\partial}elta^{(2)})&= \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}} +
2\sum_{n,k=1}^{\infty} n(n+2) \frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} \\
&+2 \sum_{n,k=1}^{\infty} n(n+2) \frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}} +
\sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\
\zeta(s,{\partial}elta^{(3)})&=\sum_{k=1}^\infty \frac{j_{1,k}^{-2s}}{l^{-2s}}+2\sum_{n,k=1}^\infty n(n+2)
\frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}}\\
&+ \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},\\
\zeta(s,{\partial}elta^{(4)})&=\sum_{k=1}^{\infty} \frac{j_{1,k}^{-2s}}{l^{-2s}} + \sum_{n,k=1}^{\infty} (n+1)^2
\frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}},
\end{align*}
and by equation (\ref{analytic}), the torsion is ($a=\sin\alpha=\frac{1}{\nu}$)
{\partial}egin{align*}
{\mathsf l}og T(C_\alpha S^3_{la})&=-\frac{1}{2}\zeta'(0,{\partial}elta^{(1)}) +\zeta'(0,{\partial}elta^{(2)})-\frac{3}{2}\zeta'(0,{\partial}elta^{(3)})+2\zeta'(0,{\partial}elta^{(4)}).
\end{align*}
Define the function
{\partial}egin{align*}
t(s)=&-\frac{1}{2}\zeta(s,{\partial}elta^{(1)}) +\zeta(s,{\partial}elta^{(2)})-\frac{3}{2}\zeta(s,{\partial}elta^{(3)})+2\zeta(s,{\partial}elta^{(4)})\\
=&\frac{1}{2} \sum_{k=1}^{\infty} \frac{j_{1,k}^{-2s}}{l^{-2s}}-\frac{1}{2} \sum_{k=1}^\infty
\frac{j_{2,k}^{-2s}}{l^{-2s}}\\
& + \sum_{n,k=1}^\infty n(n+2) \frac{(j'_{\mu_{1,n},k})^{-2s}}{l^{-2s}} -
\sum_{n,k=1}^{\infty} n(n+2) \frac{j_{\mu_{1,n},k}^{-2s}}{l^{-2s}}\\
&+ \sum_{n,k=1}^{\infty} (n+1)^2 \frac{j_{\mu_{0,n},k}^{-2s}}{l^{-2s}} -\frac{1}{2}\sum_{n,k=1}^{\infty} (n+1)^2
\frac{\tilde j_{\mu_{0,n},k,-}^{-2s}}{l^{-2s}} -\frac{1}{2} \sum_{n,k=1}^{\infty} (n+1)^2 \frac{\tilde
j_{\mu_{0,n},k,+}^{-2s}}{l^{-2s}}\\
=& l^{2s}{\mathsf l}eft(\frac{1}{2}z_{1}(s) - \frac{1}{2}z_{2}(s) + \hat{Z}(s) -Z(s) +Z_0(s)
-\frac{1}{2}Z_+(s) - \frac{1}{2}Z_-(s) \right) ,
\end{align*}
then
{\partial}egin{align*}
{\mathsf l}og T(C_\alpha S^3_{la})=t'(0)=&\frac{1}{2}z'_{1}(0) - \frac{1}{2}z'_{2}(0) + \hat{Z}'(0) -Z'(0) +Z'_0(0)\\
&-\frac{1}{2}Z'_+(0)- \frac{1}{2}Z'_-(0) +
{\mathsf l}og l^2 {\mathsf l}eft(\frac{1}{2}z_{1}(0) - \frac{1}{2}z_{2}(0)\right.\\
& {\mathsf l}eft.+\hat{Z}(0) -Z(0) +Z_0(0) -\frac{1}{2}Z_+(0) - \frac{1}{2}Z_-(0)\right).
\end{align*}
Using equations (\ref{p00}) of Section \ref{s3}, we compute $z_{1/2}(0)$ e $z'_{1/2}(0)$. We obtain
{\partial}egin{equation}{\mathsf l}abel{ttt}
{\partial}egin{aligned}
{\mathsf l}og T(C_\alpha S^3_{la})&= {\mathsf l}eft(\frac{1}{4} +
\hat{Z}(0) -Z(0) +Z_0(0) -\frac{1}{2}Z_+(0) - \frac{1}{2}Z_-(0)\right){\mathsf l}og l^2\\
&+ {\mathsf l}eft(-{\mathsf l}og 2 + \hat{Z}'(0) -Z'(0) +Z'_0(0) -\frac{1}{2}Z'_+(0) - \frac{1}{2}Z'_-(0) \right).
\end{aligned}
\end{equation}
In order to evaluate the remaining part, we use Corollary \ref{c} of Theorem \ref{t4}. We consider separately the two functions $Z(s)-\hat Z(s)$, and $2Z_0(s)-Z_+(s)-Z_-(s)$. In the first case, the relevant sequences are the double sequences $S=\{n(n+2):j_{\mu_{1,n},k}^2\}$ and $\hat S=\{n(n+2):(j_{\mu_{1,n},k}')^2\}$, and the simple sequence $U_1=\{n(n+2):\mu_{1,n}\}_{n=1}^\infty$, and $Z(s)=\zeta(s,S)$, $\hat
Z(s)=\zeta(s,\hat S)$. In the second case, the relevant sequences are the double sequences $S_0=\{(n+1)^2:j_{\mu_{0,n},k}^2\}$ and $S_\pm=\{(n+1)^2:(\tilde j_{\mu_{0,n,\pm},k})^2\}$, and the simple sequence $U_0=\{\mu_{0,n}\}_{n=1}^\infty$, and $Z_0(s)=\zeta(s,S_0)$, $Z_\pm(s)=\zeta(s, S_\pm)$.
We start by analysing the two simple sequences $U_j$, $j=0,1$. Recall from Lemma \ref{eig3}, that
\[
\mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}, \qquad \mu_{1,n} = \nu(n+1).
\]
Consider first the sequence $U_1=\{n(n+2):\mu_{1,n}\}_{n=1}^\infty$. By definition of $\mu_{1,n}$, it is easy to see that
\[
\zeta(s,U_1)=\nu^{-s}{\mathsf l}eft(\zeta_R(s-2)-\zeta_R(s)\right),
\]
and therefore $U_1$ is a totally regular sequence of spectral type with infinite order, ${\mathsf e}(U_1)={\gamma}e(U_1)=3$, and $\zeta(s,U_1)$ has simple poles at $s=1$ and $s=3$ with residues:
{\partial}egin{equation}{\mathsf l}abel{rrr1}{\partial}egin{aligned}
&{\mathds{R}}z_{s=1}\zeta(s,U_1)=\frac{1}{\nu}{\mathsf l}eft({\mathsf l}og\nu-{\gamma}amma-\frac{1}{12}\right),&{\mathds{R}}u_{s=1}\zeta(s,U_1)=-\frac{1}{\nu},\\
&{\mathds{R}}z_{s=3}\zeta(s,U_1)=\frac{1}{\nu^3}{\mathsf l}eft({\gamma}amma-{\mathsf l}og\nu-\zeta(3)\right),&{\mathds{R}}u_{s=3}\zeta(s,U_1)=\frac{1}{\nu^3}.\\
\end{aligned}
\end{equation}
The analysis for the sequence $U_0$ is a little bit longer. By definition $U_0=\{(n+1)^2:\mu_{0,n}\}_{n=1}^\infty$, where
\[
\mu_{0,n}=\sqrt{\nu^2 n(n+2)+1}.
\]
For a positive $q$, consider the sequence
\[
L_q=\{(n+1)^2:\sqrt{n(n+2)+q}\}_{n=1^\infty},
\]
Then, it is clear that
\[
\zeta(s,U_0)=\nu^{-s}\zeta(s,L_{\frac{1}{\nu^2}}).
\]
The sequence $L_0$ is the sequence of the square roots of the positive eigenvalues of the Laplace operator on the three sphere $S^3$ of radius 1 (see \cite{Spr0}, and references therein). Thus,
\[
\zeta(2s,L_0)=\zeta(s,{\rm Sp}_+{\partial}elta_{S^3}).
\]
The zeta function $\zeta(s,{\rm Sp}_+{\partial}elta_{S^3})$ has been studied by various author. We will refer to \cite{Spr0}. Using the results in \cite{Spr0}, it follows that ${\mathsf e}({\rm Sp}_+{\partial}elta^{(0)}_{S^3})=\frac{3}{2}$, ${\gamma}e({\rm Sp}_+ {\partial}elta^{(0)}_{S^3})=1$, and that ${\rm Sp}_+ {\partial}elta^{(0)}_{S^3}$ is a totally regular sequence of spectral type with infinite order. Since shifting the sequence does not alter its character (see \cite{Spr4}), it follows that ${\mathsf e}(U_0)={\gamma}e(U_0)=3$, and that $U$ is a totally regular sequence of spectral type with infinite order.
In \cite{Spr0}, it is also proved that $\zeta(s,{\rm Sp}_+{\partial}elta_{S^3})$ has simple poles at $s=\frac{3}{2},\frac{1}{2},-\frac{j}{2}$, for all $j>0$, and formulas for the residues are given. In particular:
{\partial}egin{align*}
&{\mathds{R}}u_{s=\frac{3}{2}}\zeta(s,{\rm Sp}_+ {\partial}elta^{(0)}_{S^3})=\frac{1}{2},&{\mathds{R}}u_{s=3}\zeta(s,{\rm Sp}_+ {\partial}elta^{(0)}_{S^3})=\frac{1}{4},\\
\end{align*}
and hence, $\zeta(s,L_0)$ has one simple pole at $s=1$, and $s=3$ with the residues:
{\partial}egin{align*}
&{\mathds{R}}u_{s=3}\zeta(s,{\rm Sp}_+ {\partial}elta^{(0)}_{S^3})=1,&{\mathds{R}}u_{s=1}\zeta(s,{\rm Sp}_+ {\partial}elta^{(0)}_{S^3})=\frac{1}{2}.\\
\end{align*}
Expanding the power of the binomial, we have that
{\partial}egin{align*}
\zeta(s,L_q)&=\zeta(s,L_0)-\frac{s}{2}\zeta(s,L_0)q+\sum_{j=2}^\infty {\partial}inom{-\frac{s}{2}}{j}\zeta(s+2j,L_0)q^j,
\end{align*}
and therefore,
{\partial}egin{align*}
&{\mathds{R}}u_{s=1}\zeta(s,L_q)=\frac{1}{2}(1-q),&{\mathds{R}}u_{s=3}\zeta(s,L_q)=1,\\
\end{align*}
and we have the expansions
{\partial}egin{equation}{\mathsf l}abel{resz1}
{\partial}egin{aligned}
\zeta(s,U_0)&=\nu^{-s}\zeta(s,L_q)=\frac{1}{2\nu}{\mathsf l}eft(1-\frac{1}{\nu^2}\right)\frac{1}{s-1}+K_1(s),& {\rm near}~s=1,\\
\zeta(s,U_0)&=\nu^{-s}\zeta(s,L_q)=\frac{1}{\nu^3}\frac{1}{s-3}+K_3(s),& {\rm near}~s=3,\\
\end{aligned}
\end{equation}
where the $K_j(s)$ are some regular functions.
Next, we start the analysis of the double sequences. We split it into two parts.
\subsection{Part I} In this first part we deal with $Z(s)-\hat Z(s)$. Thus, we consider the sequences $S$ and $\hat S$. Using classical estimates for the zeros of Bessel function \cite{Wat}, we find that ${\mathsf e}(S)={\mathsf e}(\hat S)=2$, and the relative genus are $(2,0,1)$ for both sequences. The fact that $S_n$ and $\hat S_n$ are totally regular sequences of spectral type with infinite order, will be a consequence of the following analysis. Note that
we have the product representations (the first is classical, see for example \cite{Wat}, the second follows
using the Hadamard factorization theorem)
{\partial}egin{align*}
I_\nu(z)&=\frac{z^\nu}{2^\nu{\Gamma}amma(\nu+1)}\prod_{k=1}^\infty {\mathsf l}eft(1+\frac{z^2}{j_{\nu,k}^2}\right),\\
I_\nu'(z)&=\frac{z^{\nu-1}}{2^\nu{\Gamma}amma(\nu)}\prod_{k=1}^\infty {\mathsf l}eft(1+\frac{z^2}{(j_{\nu,k}')^2}\right).\\
\end{align*}
Using these representations, we obtain the following representations for the Gamma functions associated to the
sequences $S_n$ and $\hat S_n$. For further use, we give instead the representations for the Gamma functions associated
to the sequences $S_n/\mu_{1,n}^2$, and $\hat S_n/\mu_{1,n}^2$, that will do as well. By the definition in equation
(\ref{gamma}), with $z=\sqrt{-{\mathsf l}ambda}$, we have
{\partial}egin{align*}
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_n/(\mu_{1,n})^2)=&-{\mathsf l}og\prod_{k=1}^\infty {\mathsf l}eft(1+\frac{(-{\mathsf l}ambda)(\mu_{1,n})^2}{j_{\mu_{1,n},k}^2}\right)\\
=&-{\mathsf l}og I_{\mu_{1,n}}(\mu_{1,n}\sqrt{-{\mathsf l}ambda})+(\mu_{1,n}){\mathsf l}og\sqrt{-{\mathsf l}ambda} \\
&+\mu_{1,n}{\mathsf l}og (\mu_{1,n})-\mu_{1,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{1,n}+1),\\
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_n/(\mu_{1,n})^2)=&-{\mathsf l}og\prod_{k=1}^\infty {\mathsf l}eft(1+\frac{(-{\mathsf l}ambda)(\mu_{1,n})^2}{(j_{\mu_{1,n},k}')^2}\right)\\
=&-{\mathsf l}og I'_{\mu_{1,n}}(\mu_{1,n}\sqrt{-{\mathsf l}ambda})+(\mu_{1,n}-1){\mathsf l}og\sqrt{-{\mathsf l}ambda} \\
&+\mu_{1,n}{\mathsf l}og (\mu_{1,n})-\mu_{1,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{1,n}).
\end{align*}
A first consequence of these representations is that we have a complete asymptotic expansion of the Gamma functions
${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_n)$, and ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_n)$, and therefore $S_n$ and $\hat S_n$ are sequences of
spectral type. Considering the expansions, it follows that they are both totally regular sequences of infinite order.
Next, we prove that $S$ and $\hat S$ are spectrally decomposable over $U_1$ with power $\kappa=2$ and length
$\ell=4$, as in Definition \ref{spdec}. We have to show that the functions ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_n/\mu_{1,n}^2)$,
and ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_n/\mu_{1,n}^2)$ have the appropriate uniform expansions for large $n$. This follows
using the uniform expansions for the Bessel functions given for example in \cite{Olv} (7.18), and Ex. 7.2,
\[
I_{\nu}(\nu z)=\frac{\e^{\nu\sqrt{1+z^2}}\e^{\nu{\mathsf l}og\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi
\nu}(1+z^2)^\frac{1}{4}}{\mathsf l}eft(1+U_1(z)\frac{1}{\nu}+U_2(z)\frac{1}{\nu^2}+U_{3}(z)\frac{1}{\nu^{3}}+O(\frac{1}{\nu^4})\right),
\]
where
{\partial}egin{align*}
U_1(z)=&\frac{1}{8\sqrt{1+z^2}}-\frac{5}{24(1+z^2)^\frac{3}{2}},\\
U_2(z)=&\frac{9}{128(1+z^2)}-\frac{77}{192(1+z^2)^2}+\frac{385}{1152(1+z^2)^3},\\
U_3(z)=&\frac{75}{1024(1+z^2)^{\frac{3}{2}}} - \frac{4563}{5120(1+z^2)^{\frac{5}{2}}}+
\frac{17017}{9216(1+z^2)^{\frac{7}{2}}}-\frac{85085}{82944(1+z^2)^{\frac{9}{2}}},
\end{align*}
and
\[
I_{\nu}'(\nu z)=\frac{(1+z^2)^\frac{1}{4}\e^{\nu\sqrt{1+z^2}}\e^{\nu{\mathsf l}og\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi
\nu}z}{\mathsf l}eft(1+V_1(z)\frac{1}{\nu}+V_2(z)\frac{1}{\nu^2}+\dots+O(\frac{1}{\nu^4})\right),
\]
{\partial}egin{align*}
V_1(z)=&-\frac{3}{8\sqrt{1+z^2}}+\frac{7}{24(1+z^2)^\frac{3}{2}},\\
V_2(z)=&-\frac{15}{128(1+z^2)}+ \frac{33}{64(1+z^2)^2} - \frac{455}{1152(1+z^2)^3},\\
V_3(z)=&-\frac{105}{1024(1+z^2)^{\frac{3}{2}}} +
\frac{5577}{5120(1+z^2)^{\frac{5}{2}}}-\frac{6545}{3072(1+z^2)^{\frac{7}{2}}}+\frac{95095}{82944(1+z^2)^{\frac{9}{2}}}.
\end{align*}
Using the classical expansion for the logarithm of the Euler Gamma function \cite{GZ} 8.344, we obtain, for large $n$, uniformly in ${\mathsf l}ambda$, the expansion of ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_n/\mu_{1,n}^2)$ and of ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda, S_n/\mu_{1,n}^2)$, and consequentely of the difference
{\partial}egin{align*}
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_n/\mu_{1,n}^2) &- {\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda, S_n/\mu_{1,n}^2) =
\sum_{h=0}^\infty {\mathsf l}eft( \hat\phi_{h-1}({\mathsf l}ambda) - \phi_{h-1} ({\mathsf l}ambda) \right) \mu_{1,n}^{1-h}\\
&= -\frac{1}{2} {\mathsf l}og(1-{\mathsf l}ambda) -\frac{1}{2} {\mathsf l}og {\mathsf l}ambda + {\mathsf l}eft(\hat \phi_1({\mathsf l}ambda) -
\phi_1({\mathsf l}ambda)\right)\frac{1}{\mu_{1,n}}\\
&+ {\mathsf l}eft(\hat \phi_2({\mathsf l}ambda) - \phi_2({\mathsf l}ambda)\right)\frac{1}{\mu_{1,n}^2} + {\mathsf l}eft(\hat
\phi_3({\mathsf l}ambda) - \phi_3({\mathsf l}ambda)\right)\frac{1}{\mu_{1,n}^3} +O{\mathsf l}eft(\frac{1}{\mu_{1,n}^{4}}\right)
\end{align*}
with
{\partial}egin{align*}
\hat\phi_1({\mathsf l}ambda)-\phi_1({\mathsf l}ambda)&=\frac{1}{2}\frac{1}{(1-{\mathsf l}ambda)^\frac{1}{2}}-\frac{1}{2}\frac{1}{(1-{\mathsf l}ambda)^\frac{3}{2}},\\
\hat\phi_2({\mathsf l}ambda)-\phi_2({\mathsf l}ambda)&=\frac{1}{4}\frac{1}{(1-{\mathsf l}ambda)}- \frac{1}{(1-{\mathsf l}ambda)^{2}}-\frac{3}{4} \frac{1}{(1-{\mathsf l}ambda)^3},\\
\hat\phi_3({\mathsf l}ambda)-\phi_3({\mathsf l}ambda)&=\frac{11}{48}\frac{1}{(1-{\mathsf l}ambda)^\frac{3}{2}}-\frac{35}{16}\frac{1}{(1-{\mathsf l}ambda)^\frac{5}{2}}
+\frac{67}{16}\frac{1}{(1-{\mathsf l}ambda)^\frac{7}{2}}-\frac{107}{48}\frac{1}{(1-{\mathsf l}ambda)^\frac{9}{2}}.\\
\end{align*}
Note that the length $\ell$ of the decomposition is precisely $4$. For the ${\mathsf e}(U_1)=3$, and therefore the larger
integer such that $h-1=\sigma_h{\mathsf l}eq 3$ is $4$. However, note that by
Theorem \ref{tt}, only the term with $\sigma_h=1$, and $\sigma_h=3$, namely $h=2,4$, appear in the formula of Theorem \ref{t4}, since the
unique poles of $\zeta(s,U_1)$ are at $s=1$ and $s=3$. We now apply the formulas of Theorem \ref{t4}.
First, by the definition in equation (\ref{fi1}),
{\partial}egin{align*}
\hat {\mathcal{P}}hi_1(s) - {\mathcal{P}}hi_1(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda
t}}{-{\mathsf l}ambda}
{\mathsf l}eft(\frac{1}{2}\frac{1}{(1-{\mathsf l}ambda)^\frac{1}{2}}-\frac{1}{2}\frac{1}{(1-{\mathsf l}ambda)^\frac{3}{2}}\right) d{\mathsf l}ambda dt,\\
\hat {\mathcal{P}}hi_2(s) - {\mathcal{P}}hi_2(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda
t}}{-{\mathsf l}ambda}
{\mathsf l}eft(\frac{1}{4}\frac{1}{(1-{\mathsf l}ambda)}- \frac{1}{(1-{\mathsf l}ambda)^{2}}-\frac{3}{4} \frac{1}{(1-{\mathsf l}ambda)^3}\right)\hspace{-3pt} d{\mathsf l}ambda dt,\\
\hat {\mathcal{P}}hi_3(s) - {\mathcal{P}}hi_3(s)=&\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda
t}}{-{\mathsf l}ambda} {\mathsf l}eft(\frac{11}{48}\frac{1}{(1-{\mathsf l}ambda)^\frac{3}{2}}-\frac{35}{16}\frac{1}{(1-{\mathsf l}ambda)^\frac{5}{2}}\right) d{\mathsf l}ambda dt\\
&+\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda
t}}{-{\mathsf l}ambda} {\mathsf l}eft(\frac{67}{16}\frac{1}{(1-{\mathsf l}ambda)^\frac{7}{2}}-\frac{107}{48}\frac{1}{(1-{\mathsf l}ambda)^\frac{9}{2}}\right) d{\mathsf l}ambda dt.\\
\end{align*}
These integrals can be computed using the formula in Appendix \ref{appendixA}. We obtain
{\partial}egin{align*}
{\mathds{R}}z_{s=0}{\mathsf l}eft(\hat {\mathcal{P}}hi_1(s) - {\mathcal{P}}hi_1(s)\right)&=-1 ,&{\mathds{R}}u_{s=0}{\mathsf l}eft(\hat{\mathcal{P}}hi_1(s) - {\mathcal{P}}hi_1(s)\right)&=0,\\
{\mathds{R}}z_{s=0}{\mathsf l}eft(\hat {\mathcal{P}}hi_2(s) - {\mathcal{P}}hi_2(s)\right)&=\frac{1}{8} ,&{\mathds{R}}u_{s=0}{\mathsf l}eft(\hat{\mathcal{P}}hi_2(s) - {\mathcal{P}}hi_2(s)\right)&=0,\\
{\mathds{R}}z_{s=0}{\mathsf l}eft(\hat {\mathcal{P}}hi_3(s) - {\mathcal{P}}hi_3(s)\right)&=-\frac{2}{315} ,&{\mathds{R}}u_{s=0}{\mathsf l}eft(\hat{\mathcal{P}}hi_3(s) - {\mathcal{P}}hi_3(s)\right)&=0.
\end{align*}
Second, using this results and the residues of $\zeta(s,U_1)$ given in by equation (\ref{rrr1})
it follows that
{\partial}egin{equation}{\mathsf l}abel{p1}
{\partial}egin{aligned}
\hat Z(0)- Z(0)=&-\hat A_{0,1}(0)+ A_{0,1}(0)+\frac{1}{2}{\mathds{R}}u_{s=1}\zeta(s,U_1){\mathds{R}}u_{s=0}(\hat{\mathcal{P}}hi_1(s)-{\mathcal{P}}hi_1(s))\\
&+\frac{1}{2}{\mathds{R}}u_{s=3}\zeta(s,U_1){\mathds{R}}u_{s=0}(\hat{\mathcal{P}}hi_3(s)-{\mathcal{P}}hi_3(s)),\\
=&-\hat A_{0,1}(0)+ A_{0,1}(0),
\end{aligned}
\end{equation}
and
{\partial}egin{equation}{\mathsf l}abel{p2}
{\partial}egin{aligned}
\hat Z'(0)-Z'(0)=&-\hat A_{0,0}(0)-\hat A_{0,1}'(0)+ A_{0,0}(0)+ A_{0,1}'(0)\\
&+\frac{1}{2}{\mathds{R}}z_{s=1}\zeta(s,U_1){\mathds{R}}u_{s=0}(\hat{\mathcal{P}}hi_1(s)-{\mathcal{P}}hi_1(s))\\
&+\frac{1}{2}{\mathds{R}}z_{s=3}\zeta(s,U_1){\mathds{R}}u_{s=0}(\hat{\mathcal{P}}hi_3(s)-{\mathcal{P}}hi_3(s)),\\
=&-\hat A_{0,1}(0)+ A_{0,1}(0)+\frac{1}{2\nu}-\frac{1}{315\nu^3}.
\end{aligned}
\end{equation}
Third, by equation (\ref{fi2}) and Theorem \ref{tt}, the terms $A_{0,0}(0)$ and $A'_{0,1}(0)$, are
{\partial}egin{align*}
A_{0,0}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{0, 0,n} -b_{1,0,0}u_n^{-1}-b_{3,0,0}u_n^{-3}\right)u_n^{-2 s},\\
A_{0,1}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{0, 1,n} -b_{1,0,1}u_n^{-1}-b_{3,0,1}u_n^{-3}\right)u_n^{-2 s}.
\end{align*}
Hence, we need the expansion for large ${\mathsf l}ambda$ of the functions ${\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,\hat S_n/\mu_{1,n}^2)$,
$\hat\phi_{1}({\mathsf l}ambda)$, $\hat\phi_{3}({\mathsf l}ambda)$, ${\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda, S_n/\mu_{1,n}^2)$, $\phi_{1}({\mathsf l}ambda)$ and
$\phi_{3}({\mathsf l}ambda)$. Using classical expansions for the Bessel functions and their derivative and the formulas in
equation (\ref{form}), we obtain
{\partial}egin{align*}
a_{0,0,n}&=\frac{1}{2}{\mathsf l}og 2\pi+{\mathsf l}eft(\mu_{1,n}+\frac{1}{2}\right){\mathsf l}og\mu_{1,n}-\mu_{1,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{1,n}+1),\\
a_{0,1,n}&=\frac{1}{2}{\mathsf l}eft(\mu_{1,n}+\frac{1}{2}\right),\\
b_{1,0,0}&=-\frac{1}{12},\hspace{50pt} b_{3,0,0} = \frac{1}{360}, \hspace{50pt}b_{1,0,1}=b_{3,0,1}=0,
\end{align*}
and
{\partial}egin{align*}
\hat a_{0,0,n}&=\frac{1}{2}{\mathsf l}og 2\pi+{\mathsf l}eft(\mu_{1,n}+\frac{1}{2}\right){\mathsf l}og\mu_{1,n}-\mu_{1,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{1,n}+1),\\
\hat a_{0,1,n}&=\frac{1}{2}{\mathsf l}eft(\mu_{1,n}-\frac{1}{2}\right),\\
\hat b_{1,0,0}&=-\frac{1}{12}, \hspace{50pt}\hat b_{3,0,0} = \frac{1}{360}, \hspace{50pt}\hat b_{1,0,1}=\hat b_{3,0,1}=0.
\end{align*}
This shows that $A_{0,0}(0)=\hat A_{0,0}(0)$, and that
\[
\hat A_{0,1}(s)- A_{0,1}(s)=-\frac{1}{2}\sum_{n=1}^\infty n(n+2)\mu_{1,n}^{-2s}=-\frac{1}{2}\zeta(2s, U_1).
\]
Thus,
{\partial}egin{align*}
\hat A_{0,1}(0)- A_{0,1}(0)&=-\frac{1}{4},\\
\hat A'_{0,1}(0)- A'_{0,1}(0)&=\frac{1}{2}{\mathsf l}og\nu-\zeta'(-2)-\frac{1}{2}{\mathsf l}og 2\pi.
\end{align*}
Substitution in equations (\ref{p1}) and (\ref{p2}), gives
{\partial}egin{align*}
\hat Z(0)- Z(0)=&\frac{1}{4},\\
\hat Z'(0)-Z'(0)=&-\frac{1}{2}{\mathsf l}og\nu+\zeta'(-2)+\frac{1}{2}{\mathsf l}og 2\pi+\frac{1}{2\nu}-\frac{1}{315\nu^3}.
\end{align*}
\subsection{Part II} In this second part we deal with $2Z_0(s)-Z_+(s)-Z_-(s)$. Thus, we consider the sequences $S_0$ and $S_\pm$. The sequence $S_0$ is analogous to the sequence $S$ analyzed in the previous part. We have that
{\partial}egin{align*}
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_{0,n}/\mu_{0,n}^2)=&-{\mathsf l}og
I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-{\mathsf l}ambda})+\mu_{0,n}{\mathsf l}og\sqrt{-{\mathsf l}ambda}+\mu_{0,n} {\mathsf l}og\mu_{0,n}\\ &-\mu_{0,n}{\mathsf l}og2
-{\mathsf l}og{\Gamma}amma(\mu_{0,n}) - {\mathsf l}og \mu_{0,n}.
\end{align*}
Using the uniform expansion of ${\mathsf l}og I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-{\mathsf l}ambda})$, we obtain the uniform expansion for large $n$:
{\partial}egin{align*}
{\mathsf l}og &{\Gamma}amma(-{\mathsf l}ambda, S_{0,n}/\mu_{0,n}^2) \\
&= \sum_{h=0}^\infty \phi_{h-1,0} ({\mathsf l}ambda) \mu_{0,n}^{1-h}\\
&= {\mathsf l}eft( - \sqrt{1-{\mathsf l}ambda} + {\mathsf l}og(1+\sqrt{1-{\mathsf l}ambda}) - {\mathsf l}og 2 + 1) - {\mathsf l}og \sqrt{-{\mathsf l}ambda} \right)\mu_{0,n}\\
&+ \frac{1}{4}{\mathsf l}og(1-{\mathsf l}ambda) + {\mathsf l}eft( - U_1(\sqrt{-{\mathsf l}ambda})-\frac{1}{12}\right)\frac{1}{\mu_{0,n}}\\
&+ {\mathsf l}eft( - U_2(\sqrt{-{\mathsf l}ambda})+\frac{1}{2}U_1(\sqrt{-{\mathsf l}ambda})^2\right) \frac{1}{\mu_{0,n}^2}\\
&+ {\mathsf l}eft( -
U_3(\sqrt{-{\mathsf l}ambda})+U_1(\sqrt{-{\mathsf l}ambda})U_2(\sqrt{-{\mathsf l}ambda})-\frac{1}{3}U_1(\sqrt{-{\mathsf l}ambda})^3+\frac{1}{360}\right)\frac{1}{\mu_{0,n}^3} +
O{\mathsf l}eft(\frac{1}{\mu_{1,n}^{4}}\right)\hspace{-1.5pt},
\end{align*}
and hence
{\partial}egin{align*}
\phi_{1,0}({\mathsf l}ambda)&=-\frac{1}{8} \frac{1}{(1-{\mathsf l}ambda)^{\frac{1}{2}}} + \frac{5}{24}\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}-\frac{1}{12},\\
\phi_{2,0}({\mathsf l}ambda)&=-\frac{1}{16} \frac{1}{(1-{\mathsf l}ambda)}+\frac{3}{8} \frac{1}{(1-{\mathsf l}ambda)^{2}}-\frac{5}{16} \frac{1}{(1-{\mathsf l}ambda)^3},\\
\phi_{3,0}({\mathsf l}ambda)&=-\frac{25}{384} \frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}+\frac{531}{640}
\frac{1}{(1-{\mathsf l}ambda)^{\frac{5}{2}}}
-\frac{221}{128}\frac{1}{(1-{\mathsf l}ambda)^{\frac{7}{2}}}+\frac{1105}{1152}\frac{1}{(1-{\mathsf l}ambda)^{\frac{9}{2}}}+\frac{1}{360}.
\end{align*}
Using the expansion of ${\mathsf l}og I_{\mu_{0,n}}(\mu_{0,n}\sqrt{-{\mathsf l}ambda})$, and that of the $\phi_{j,0}({\mathsf l}ambda)$ for large ${\mathsf l}ambda$, and the definitions in equations (\ref{form}), we compute
{\partial}egin{align*}
a_{0,0,n,0}&=\frac{1}{2}{\mathsf l}og 2\pi+{\mathsf l}eft(\mu_{0,n}+\frac{1}{2}\right){\mathsf l}og\mu_{0,n}-\mu_{0,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{0,n}+1),\\
a_{0,1,n,0}&=\frac{1}{2}{\mathsf l}eft(\mu_{0,n}+\frac{1}{2}\right),\\
b_{1,0,0,0}&=-\frac{1}{12}, \hspace{50pt}b_{3,0,0,0} = \frac{1}{360}, \hspace{50pt}b_{1,0,1,0}=b_{3,0,1,0}=0.
\end{align*}
The analysis of the sequences $S_\pm$ needs more work. Let define the functions
\[
T^{\pm}_{\nu}(z)=\pm J_{\nu}(z)+zJ'_\nu(z).
\]
Recalling the series definition of the Bessel function
\[
J_\nu(z)=\frac{z^\nu}{2^\nu}\sum_{k=0}^\infty \frac{(-1)^kz^{2k}}{2^{2k}k!{\Gamma}amma(\nu+k+1)},
\]
we obtain that near $z=0$
\[
T_\nu^\pm(z) ={\mathsf l}eft(1\pm\frac{1}{\nu}\right) \frac{z^\nu}{2^\nu{\Gamma}amma(\nu)}.
\]
This means that the function $\hat T^\pm_\nu(z)=z^{-\nu} T^\pm_\nu(z)$ is an even function of $z$. Let $z_{\nu,k,\pm}$
be the positive zeros of $T^\pm_\nu(z)$ arranged in increasing order. By the Hadamard factorization theorem, we have
the product expansion
\[
\hat T^\pm_\nu(z)=\hat T^\pm_\nu(z){\prod_{k=-\infty}^{+\infty}}{\mathsf l}eft(1-\frac{z}{z_{\nu,k,\pm}}\right),
\]
and therefore
\[
T^\pm_\nu(z)={\mathsf l}eft(1\pm\frac{1}{\nu}\right)\frac{z^\nu}{2^\nu{\Gamma}amma(\nu)}
\prod_{k=1}^{\infty}{\mathsf l}eft(1-\frac{z^2}{z^2_{\nu,k,\pm}}\right).
\]
Next, recalling that (when $-\pi<\arg(z)<\frac{\pi}{2}$)
{\partial}egin{align*}
J_\nu(iz)&=\e^{\frac{\pi}{2}i\nu} I_\nu(z),\\
J'_\nu(iz)&=\e^{\frac{\pi}{2}i\nu}\e^{-\frac{\pi}{2}i} I'_\nu(z),\\
\end{align*}
we obtain
\[
T_\nu^\pm(iz)=\e^{\frac{\pi}{2}i\nu}{\mathsf l}eft(\pm I_\nu(z)+zI'_\nu(z)\right).
\]
Thus, we define (for $-\pi<\arg(z)<\frac{\pi}{2}$) {\partial}egin{equation}{\mathsf l}abel{pop} Q^\pm_\nu(z)=\e^{-\frac{\pi}{2}i\nu}T_\nu^\pm(i z),
\end{equation} and hence
{\partial}egin{align*}
Q^\pm_\nu(z)&=\pm I_\nu(z)+zI'_\nu(z)={\mathsf l}eft(1\pm\frac{1}{\nu}\right)\frac{z^\nu}{2^\nu{\Gamma}amma(\nu)}
\prod_{k=1}^{\infty}{\mathsf l}eft(1+\frac{z^2}{z^2_{\nu,k,\pm}}\right).
\end{align*}
Using these representations, we obtain the following representations for the Gamma functions associated to the
sequences $S_{\pm,n}$. By the definition in equation (\ref{gamma}), with $z=\sqrt{-{\mathsf l}ambda}$, we have
{\partial}egin{align*}
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_{\pm,n})=&-{\mathsf l}og\prod_{k=1}^\infty {\mathsf l}eft(1+\frac{(-{\mathsf l}ambda)}{\tilde j_{\mu_{0,n},k,\pm}^2}\right)\\
=&-{\mathsf l}og Q^\pm_{\mu_{0,n}}(\sqrt{-{\mathsf l}ambda})+\mu_{0,n}{\mathsf l}og\sqrt{-{\mathsf l}ambda}\\
&-\mu_{0,n}{\mathsf l}og 2-{\mathsf l}og{\Gamma}amma(\mu_{0,n})+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{\mu_{0,n}}\right).
\end{align*}
A first consequence of this representations is that we have a complete asymptotic expansion of the Gamma functions
${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_{\pm,n})$, and therefore both $S_{+,n}$ and $S_{-,n}$ are sequences of spectral type. Considering the
expansions, it follows that they are both totally regular sequences of infinite order.
Next, we prove that $S_\pm$ are spectrally decomposable over $U$ with power $\kappa=2$ and length $\ell=4$, as in
Definition \ref{spdec}. We have to show that the functions ${\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_{\pm,n}/u_n^2)$, have the
appropriate uniform expansions for large $n$. We have
{\partial}egin{align*}
{\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,S_{\pm,n}/\mu_{0,n}^2)=&-{\mathsf l}og
Q^\pm_{\mu_{0,n}}(\mu_{0,n}\sqrt{-{\mathsf l}ambda})+\mu_{0,n}{\mathsf l}og\sqrt{-{\mathsf l}ambda}+\mu_{0,n} {\mathsf l}og\mu_{0,n}\\ &-\mu_{0,n}{\mathsf l}og2
-{\mathsf l}og{\Gamma}amma(\mu_{0,n})+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{\mu_{0,n}}\right).
\end{align*}
Recalling the expansions given the previous part, we obtain
{\partial}egin{align*}
Q^\pm_\nu(\nu z)
&=\sqrt{\nu}(1+z^2)^\frac{1}{4}\frac{\e^{\nu\sqrt{1+z^2}}\e^{\nu{\mathsf l}og\frac{z}{1+\sqrt{1+z^2}}}}{\sqrt{2\pi }}\\
&\hspace{30pt}{\mathsf l}eft(1+W_{1,\pm}(z)\frac{1}{\nu}+W_{2,\pm}(z)\frac{1}{\nu^2}+W_{3,\pm}(z)\frac{1}{\nu^{3}} +
O(\nu^{-4})\right),
\end{align*}
where $p=\frac{1}{\sqrt{1+z^2}}$, and
{\partial}egin{align*}
W_{1,\pm}(p)=V_1(p)\pm p,\hspace{15pt}W_{2,\pm}(p)=V_2(p)\pm pU_1(p),\hspace{15pt}W_{3,\pm}(p)=V_3(p)\pm pU_2(p),
\end{align*}
{\partial}egin{align*}
&W_{1,+}(p)=\frac{5}{8}p+\frac{7}{24}p^3,\\
&W_{2,+}(p)=-\frac{1}{128}p^2+\frac{59}{192}p^4-\frac{455}{1152}p^6, \\
&W_{3,+}(p)=-\frac{33}{1024}p^3+\frac{10571}{15360}p^5-\frac{16555}{9216}p^7 + \frac{95095}{82944}p^9,\\
&W_{1,-}(p)=-\frac{11}{8}p+\frac{7}{24}p^3,\\
&W_{2,-}(p)=-\frac{31}{128}p^2+\frac{139}{192}p^4-\frac{455}{1152}p^6,\\
&W_{3,-}(p)=-\frac{177}{1024}p^3+\frac{22891}{15360}p^5-\frac{22715}{9216}p^7 + \frac{95095}{82944}p^9.\\
\end{align*}
This gives,
{\partial}egin{align*}
{\mathsf l}og{\Gamma}amma&(-{\mathsf l}ambda, S_{n,\pm}/\mu_{0,n}^2)\\
=&\sum_{h=0}^\infty \phi_{h-1,\pm}({\mathsf l}ambda) \mu_n^{1-h}=\\
=&{\mathsf l}eft(1-\sqrt{1-{\mathsf l}ambda}+{\mathsf l}og(1+\sqrt{1-{\mathsf l}ambda})-{\mathsf l}og 2\right)\mu_{0,n}\\
&-\frac{1}{4}{\mathsf l}og(1-{\mathsf l}ambda)+{\mathsf l}eft(-W_{1,\pm}(\sqrt{-{\mathsf l}ambda})\pm 1 -\frac{1}{12}\right)\frac{1}{\mu_{0,n}}\\
&+{\mathsf l}eft(-W_{2,\pm}(\sqrt{-{\mathsf l}ambda})+\frac{1}{2}W_{1,\pm}^2(\sqrt{-{\mathsf l}ambda})-\frac{1}{2}\right)\frac{1}{\mu^2_{0,n}}\\
&+{\mathsf l}eft(W_{1,\pm}(\sqrt{-{\mathsf l}ambda})W_{2,\pm}(\sqrt{-{\mathsf l}ambda})-W_{3,\pm}(\sqrt{-{\mathsf l}ambda})-\frac{1}{3}W^{3}_{1,\pm}(\sqrt{-{\mathsf l}ambda})\pm\frac{1}{3}+\frac{1}{360}
\right)\frac{1}{\mu_{0,n}^{3}}\\
& + O{\mathsf l}eft(\frac{1}{\mu_{0,n}^4}\right),
\end{align*}
and hence
\[
{\partial}egin{aligned}
\phi_{1,+}({\mathsf l}ambda)&=-\frac{5}{8}\frac{1}{(1-{\mathsf l}ambda)^{\frac{1}{2}}}-\frac{7}{24}\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}+\frac{11}{12},\\
\phi_{1,-}({\mathsf l}ambda)&=\frac{11}{8}\frac{1}{(1-{\mathsf l}ambda)^{\frac{1}{2}}}-\frac{7}{24}\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}+\frac{13}{12},\\
\end{aligned}
\]
\[
{\partial}egin{aligned}
\phi_{2,+}({\mathsf l}ambda)&=\frac{3}{16}\frac{1}{1-{\mathsf l}ambda}-\frac{1}{8}\frac{1}{(1-{\mathsf l}ambda)^2}+\frac{7}{16}\frac{1}{(1-{\mathsf l}ambda)^3}-\frac{1}{2},\\
\phi_{2,-}({\mathsf l}ambda)&=\frac{19}{16}\frac{1}{1-{\mathsf l}ambda}-\frac{9}{8}\frac{1}{(1-{\mathsf l}ambda)^2}+\frac{7}{16}\frac{1}{(1-{\mathsf l}ambda)^3}-\frac{1}{2}.\\
\end{aligned}
\]
\[
{\partial}egin{aligned}
\phi_{3,+}({\mathsf l}ambda)&=-\frac{17}{384}\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}-\frac{389}{640}\frac{1}{(1-{\mathsf l}ambda)^{\frac{5}{2}}}
+\frac{203}{128}\frac{1}{(1-{\mathsf l}ambda)^{\frac{7}{2}}}-\frac{1463}{1152}\frac{1}{(1-{\mathsf l}ambda)^{\frac{9}{2}}}+\frac{121}{360},\\
\phi_{3,-}({\mathsf l}ambda)&=\frac{527}{384}\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}-\frac{1989}{640}\frac{1}{(1-{\mathsf l}ambda)^{\frac{5}{2}}}
+\frac{427}{128}\frac{1}{(1-{\mathsf l}ambda)^{\frac{7}{2}}}-\frac{1463}{1152}\frac{1}{(1-{\mathsf l}ambda)^{\frac{9}{2}}}-\frac{119}{360}.\\
\end{aligned}
\]
By equation (\ref{fi2}) and Theorem \ref{tt}, the terms $A_{0,0}(s)$ and $A_{0,1}(s)$, are
{\partial}egin{align*}
A_{0,0,\pm}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{0, 0,n,\pm} -b_{1,0,0,\pm}u_n^{-1}-b_{3,0,0,\pm}u_n^{-3}\right)u_n^{-2 s},\\
A_{0,1,\pm}(s)&=\sum_{n=1}^\infty {\mathsf l}eft(a_{0, 1,n,\pm} -b_{1,0,1,\pm}u_n^{-1}-b_{3,0,1,\pm}u_n^{-3}\right)u_n^{-2 s}.
\end{align*}
Hence, we need the expansion for large ${\mathsf l}ambda$ of the functions ${\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,S_{n,\pm}/\mu_{0,n}^2)$,
$\phi_{1,\pm}({\mathsf l}ambda)$ and $\phi_{3,\pm}({\mathsf l}ambda)$. Using equations (\ref{pop}) and the definition, we obtain
\[
Q^\pm_\nu(z)\sim \frac{\sqrt{z}\e^z}{\sqrt{2\pi}}{\mathsf l}eft(1+\sum_{k=1}^\infty b_kz^{-k}\right)+O(\e^{-z}),
\]
for large $z$. Therefore,
{\partial}egin{align*}
{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,S_{n,\pm}/\mu_{0,n}^2)=&-\mu_{0,n}
\sqrt{-{\mathsf l}ambda}+\frac{1}{2}{\mathsf l}eft(\mu_{0,n}-\frac{1}{2}\right){\mathsf l}og(-{\mathsf l}ambda)
+\frac{1}{2}{\mathsf l}og 2\pi\\
&+{\mathsf l}eft(\mu_{0,n}-\frac{1}{2}\right){\mathsf l}og\mu_{0,n}
-{\mathsf l}og 2^{\mu_{0,n}}{\Gamma}amma(\mu_{0,n})\\
&+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{\mu_n}\right) +O{\mathsf l}eft(\frac{1}{\sqrt{-{\mathsf l}ambda}}\right).
\end{align*}
Thus,
{\partial}egin{align*}
a_{0,0,n,\pm}&=\frac{1}{2}{\mathsf l}og 2\pi+{\mathsf l}eft(\mu_{0,n}-\frac{1}{2}\right){\mathsf l}og\mu_{0,n}-{\mathsf l}og 2^{\mu_{0,n}}{\Gamma}amma(\mu_{0,n})
+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{\mu_{0,n}}\right),\\
a_{0,1,n,\pm}&=\frac{1}{2}{\mathsf l}eft(\mu_{0,n}-\frac{1}{2}\right),\\
b_{1,0,0,+}&=-\frac{11}{12}, \hspace{30pt}b_{3,0,0,+} = \frac{121}{360}, \hspace{30pt}b_{1,0,1,\pm}=b_{3,0,1,\pm} =0,\\
b_{1,0,0,-}&=-\frac{13}{12}, \hspace{30pt}b_{3,0,0,+} = \frac{119}{360}.
\end{align*}
Using these coefficients and the ones obtained for the sequence $S_0$, we conclude that
{\partial}egin{align*}
2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s) = - \sum_{n=1}^{\infty}
{\mathsf l}og{\mathsf l}eft(1-\frac{1}{\mu^2_{0,n}}\right)\frac{(n+1)^2}{\mu_{0,n}^{2s}},
\end{align*}
and
{\partial}egin{align*}
2A_{0,1,0}(s)-A_{0,1,+}(s)-A_{0,1,-}(s) = \sum_{n=1}^{\infty} \frac{(n+1)^2}{\mu_{0,n}^{2s}}.
\end{align*}
Next, we collect the results obtained for giving the uniform expansion of the sum of the logarithmic Gamma functions:
{\partial}egin{align*}
2 {\mathsf l}og {\Gamma}amma(-{\mathsf l}ambda,\hat S_{0,n}/\mu_{0,n}^2) - {\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda, S_{n,+}/\mu_{0,n}^2) - {\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,
&S_{n,-}/\mu_{0,n}^2) \\
&=\sum_{h=1}^\infty \phi_{h-1}(\sqrt{-{\mathsf l}ambda})\mu_{0,n}^{1-h},
\end{align*}
where
\[
\phi_{h-1}(\sqrt{-{\mathsf l}ambda})=2 \phi_{h-1,0}(\sqrt{-{\mathsf l}ambda}) - \phi_{h-1,+}(\sqrt{-{\mathsf l}ambda}) -
\phi_{h-1,-}(\sqrt{-{\mathsf l}ambda}),
\]
and
{\partial}egin{align*}
\phi_{1}(\sqrt{-{\mathsf l}ambda}) &= - \frac{1}{(1-{\mathsf l}ambda)^{\frac{1}{2}}} + \frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}}, \\
\phi_{2}(\sqrt{-{\mathsf l}ambda}) &= - \frac{3}{2} \frac{1}{1-{\mathsf l}ambda} + 2 \frac{1}{(1-{\mathsf l}ambda)^{2}} - \frac{3}{2} \frac{1}{(1-{\mathsf l}ambda)^3} + 1,\\
\phi_{3}(\sqrt{-{\mathsf l}ambda}) &=-\frac{35}{24}
\frac{1}{(1-{\mathsf l}ambda)^{\frac{3}{2}}} +\frac{43}{8} \frac{1}{(1-{\mathsf l}ambda)^{\frac{5}{2}}}
-\frac{67}{8}\frac{1}{(1-{\mathsf l}ambda)^{\frac{7}{2}}} +\frac{107}{24} \frac{1}{(1-{\mathsf l}ambda)^{\frac{9}{2}}}.
\end{align*}
Let ${\mathcal{P}}hi_{h-1}(s)=2 {\mathcal{P}}hi_{h-1,0}(s) - {\mathcal{P}}hi_{h-1,+}(s) - {\mathcal{P}}hi_{h-1,-}(s)$. Then, using the definition in equation (\ref{fi1}), and the formula for the integral in Appendix \ref{appendixA}, we have
{\partial}egin{align*}
{\mathcal{P}}hi_{1}(s) &= \frac{2 {\Gamma}amma(s+\frac{1}{2})}{\sqrt{\pi}}, \\
{\mathcal{P}}hi_{2}(s) &= -\frac{{\Gamma}amma(s+1)}{2}(5+5s+\frac{3}{2}s^2),\\
{\mathcal{P}}hi_{3}(s) &= \frac{{\Gamma}amma(s+\frac{3}{2})}{\sqrt{\pi}} {\mathsf l}eft(\frac{428}{315}
+\frac{22}{35}s + \frac{214}{315}s^2\right),
\end{align*}
and hence
{\partial}egin{align*}
{\mathds{R}}z_{s=0}{\mathcal{P}}hi_{1}(s)&=2 ,
&{\mathds{R}}u_{s=0}{\mathcal{P}}hi_{1}(s)&=0,\\
{\mathds{R}}z_{s=0}{\mathcal{P}}hi_{2}(s)&=-\frac{5}{2} ,
&{\mathds{R}}u_{s=0}{\mathcal{P}}hi_{2}(s)&=0,\\
{\mathds{R}}z_{s=0}{\mathcal{P}}hi_{3}(s)&=\frac{214}{315} , &{\mathds{R}}u_{s=0}{\mathcal{P}}hi_{3}(s)&=0.
\end{align*}
Using all these results and the residues of the function $\zeta(s,U_0)$ in the formulas given in Theorem \ref{t4}, we obtain
{\partial}egin{align*}
2Z_0(0)-Z_+(0)-Z_-(0)=&-2A_{0,1,0}(0)+A_{0,1,+}(0)+A_{0,1,-}(0),\\
2Z'_0(0)-Z'_+(0)-Z'_-(0)=&-2A_{0,0,0}(0)+A_{0,0,+}(0)+A_{0,0,-}(0)
-2A'_{0,1,0}(0)\\
&+A'_{0,1,+}(0)+A'_{0,1,-}(0)
+ \frac{1}{2\nu}{\mathsf l}eft(1-\frac{1}{\nu^2}\right)+\frac{107}{315\nu^3}.
\end{align*}
Recall that
{\partial}egin{align*}
2A_{0,1,0}(s)-A_{0,1,+}(s)-A_{0,1,-}(s) &= \sum_{n=1}^{\infty} \frac{(n+1)^2}{\mu_{0,n}^{2s}}\\
&=\nu^{-2s}\zeta(2s,U_0)
&=\nu^{-2s}\zeta{\mathsf l}eft(s,{\rm Sp}_+{\partial}elta_{S^3}+\frac{1}{\nu^2}\right),
\end{align*}
and this gives (see \cite{Spr0})
\[
2A_{0,1,0}(0)-A_{0,1,+}(0)-A_{0,1,-}(0) =\zeta{\mathsf l}eft(0,{\rm Sp}_+{\partial}elta_{S^3}+\frac{1}{\nu^2}\right)=-1,
\]
and hence
\[
2Z_0(0)-Z_+(0)-Z_-(0)=-2A_{0,1,0}(0)+A_{0,1,+}(0)+A_{0,1,-}(0)=1.
\]
In order to deal with the other term, it is convenient to proceed as follows. Since,
{\partial}egin{align*}
2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s) = -\sum_{n=1}^{\infty}(n+1)^2
{\mathsf l}og\frac{\mu_{0,n}^2-1}{\mu_{0,n}^2}\mu_{0,n}^{-2s},
\end{align*}
we have that
{\partial}egin{align*}
A(s)=&2A_{0,0,0}(s)-A_{0,0,+}(s)-A_{0,0,-}(s)+2A'_{0,1,0}(s)-A'_{0,1,+}(s)-A'_{0,1,-}(s)\\
=&-\sum_{n=1}^{\infty}(n+1)^2{\mathsf l}og(\mu_{0,n}^2-1)\mu_{0,n}^{-2s}.
\end{align*}
Recalling the definition of $\mu_{0,n}$,
{\partial}egin{align*}
A(s)=&-\sum_{n=1}^{\infty}(n+1)^2{\mathsf l}og(\nu^2n(n+2))\mu_{0,n}^{-2s}\\
=&-2{\mathsf l}og \nu\sum_{n=1}^{\infty}(n+1)^2\mu_{0,n}^{-2s}
-\sum_{n=1}^{\infty}(n+1)^2{\mathsf l}og(n(n+2))\mu_{0,n}^{-2s}\\
=&-2({\mathsf l}og\nu)\nu^{-2s}\zeta{\mathsf l}eft(s,{\rm Sp}_+{\partial}elta_{S^3}+\frac{1}{\nu^2}\right)+\nu^{-2s}\zeta'{\mathsf l}eft(s,{\rm Sp}_+{\partial}elta_{S^3}+\frac{1}{\nu^2}\right)\\
=&-2({\mathsf l}og\nu)\nu^{-2s}\sum_{j=0}^\infty{\partial}inom{-s}{j}\zeta(s+j,{\rm Sp}_+{\partial}elta_{S^3})\nu^{-2j}\\
&+\nu^{-2s}\sum_{j=0}^\infty{\partial}inom{-s}{j}\zeta'(s+j,{\rm Sp}_+{\partial}elta_{S^3})\nu^{-2j},
\end{align*}
and therefore
{\partial}egin{align*}
A(0)&=-2\zeta(0,{\rm Sp}_+{\partial}elta_{S^3}){\mathsf l}og\nu+\zeta'(0,{\rm Sp}_+{\partial}elta_{S^3})\\
&=2{\mathsf l}og\nu+2\zeta'(-2)+2\zeta'(0)+{\mathsf l}og 2.
\end{align*}
This give
{\partial}egin{align*}
2Z'_0(0)-Z'_+(0)-Z'_-(0)&=-A(0)\\
&=-2{\mathsf l}og\nu-2\zeta'(-2)+{\mathsf l}og \pi
+ \frac{1}{2\nu}{\mathsf l}eft(1-\frac{1}{\nu^2}\right)+\frac{107}{315\nu^3}.
\end{align*}
We can now compute the torsion using equation (\ref{ttt})
{\partial}egin{align*}
{\mathsf l}og T(C_\alpha S^3_{la})=&{\mathsf l}eft(\frac{1}{4}+\frac{1}{4}+\frac{1}{2}\right){\mathsf l}og l^2\\
&-{\mathsf l}og 2
-\frac{1}{2}{\mathsf l}og\nu+\zeta'(-2)+\frac{1}{2}{\mathsf l}og 2\pi+\frac{1}{2\nu}-\frac{1}{315\nu^3}\\
&-{\mathsf l}og\nu-\zeta'(-2)+\frac{1}{2}{\mathsf l}og \pi
+ \frac{1}{4\nu}{\mathsf l}eft(1-\frac{1}{\nu^2}\right)+\frac{107}{630\nu^3}\\
=&\frac{1}{2}{\mathsf l}og \frac{\pi^2l^4}{2\nu^3}+\frac{3}{4}\frac{1}{\nu}-\frac{1}{12\nu^3}.
\end{align*}
We conclude this section reviewing briefly the analysis of the case $n=1$, and $n=2$. All details can be found in \cite{HMS}. In the case $n=1$, the torsion is given by
\[
{\mathsf l}og T(C_\alpha S^1_{l\sin\alpha})={\mathsf l}eft(\frac{1}{4}+Z(0)-\hat Z(0)\right){\mathsf l}og l^2+Z'(0)-\hat Z'(0)-\frac{1}{2}{\mathsf l}og 2,
\]
where
\[
Z(s)=\sum_{n,k=1}^\infty j_{\nu n,k}^{-2s},\qquad \hat Z(s)=\sum_{n,k=1}^\infty (j'_{\nu n,k})^{-2s}.
\]
Therefore, the analysis is very similar to the one performed in the previous part I, with the main difference that now the zeta function $\zeta(s,U)$ is $\nu^{-s}\zeta(s)$. Therefore, we just have a simple pole at $s=1$, and we only need the expansion of the logarithmic Gamma function up to order $\nu^{-1}$.
The case of the sphere is a bit more complicate. Now,
\[
{\mathsf l}og T(C_\alpha S^2_{l\sin\alpha})= {\mathsf l}eft(\frac{3}{4} + \frac{1}{2} X_+(0) - \frac{1}{2} X_-(0) \right) {\mathsf l}og l^{2} +
\frac{1}{2}X_+'(0)-\frac{1}{2}X_-'(0)+\frac{1}{2}{\mathsf l}og\frac{4}{3},
\]
where
\[
X_+(s)=\sum_{n,k=1}^\infty (2n+1) \hat j_{\mu_n,k}^{-2s},\qquad X_-(s)=\sum_{n,k=1}^\infty (2n+1)\hat j_{\mu_n,k}^{-2s},
\]
$\mu_n=\sqrt{\nu^2 n(n+1)+\frac{1}{4}}$, and the $\hat j_{\nu,k,\pm}$ are the zeros of the function $G^{\pm}_{\nu}(z)=\pm\frac{1}{2}J_{\nu}(z)+zJ'_\nu(z)$. The zeta function $\zeta(s,U)$ is now related to the zeta function of the Laplace operator on the 2-sphere:
\[
\zeta(2s,U)=\nu^{-2s}\zeta{\mathsf l}eft(s,{\rm Sp}_+{\partial}elta_{S^2}^{(0)}+\frac{1}{4\nu^2}\right).
\]
It is known (see for example \cite{Spr4}), that $\zeta(s,{\rm Sp}_+{\partial}elta_{S^2}^{(0)})$ has one simple pole at $s=1$. This gives
\[
\zeta(s,U)=\frac{2}{\nu^2}\frac{1}{s-2}+f(s),
\]
where $f(s)$ is some regular function. Thus,
{\partial}egin{align*}
X_+(0) - X_-(0)=&-A_{0,1,+}(0)+A_{0,1,-}(0) + \frac{1}{\nu^2} {\mathds{R}}u_{s=0} ({\mathcal{P}}hi_{2,+}(s) - {\mathcal{P}}hi_{2,-}(s))\\
X'_+(0)-X'_-(0)=&-(A_{0,0,+}(0)+A_{0,1,+}'(0)-A_{0,0,-}(0)- A_{0,1,-}'(0))\\
&+\frac{1}{\nu^2}{\mathds{R}}z_{s=0}({\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s))\\
&+{\mathsf l}eft(\frac{{\gamma}amma}{\nu^2}+K\right){\mathds{R}}u_{s=0}({\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s)).\\
\end{align*}
Next, proceeding as in the part II above, and introducing the functions
\[
G^\pm_\nu(z)=\pm\frac{1}{2}J_\nu(z)+zJ'_\nu(z),
\]
we obtain the product representation
{\partial}egin{align*}
H^\pm_\nu(z)&=\pm\frac{1}{2}I_\nu(z)+zI'_\nu(z)={\mathsf l}eft(1\pm\frac{1}{2\nu}\right)\frac{z^\nu}{2^\nu{\Gamma}amma(\nu)}
\prod_{k=1}^{\infty}{\mathsf l}eft(1+\frac{z^2}{z^2_{\nu,k,\pm}}\right),
\end{align*}
where $H^\pm_\nu(z)=\e^{-\frac{\pi}{2}i\nu}G_\nu^\pm(i z)$. This allows to obtain the expansion
{\partial}egin{align*}
{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda, S_{n,\pm}/\mu^2_n)=&\sum_{h=0}^\infty \phi_{h-1,\pm}({\mathsf l}ambda) \mu_n^{1-h}\\
=&{\mathsf l}eft(1-\sqrt{1-{\mathsf l}ambda}+{\mathsf l}og(1+\sqrt{1-{\mathsf l}ambda})-{\mathsf l}og 2\right)\mu_n\\
&-\frac{1}{4}{\mathsf l}og(1-{\mathsf l}ambda)+{\mathsf l}eft(-W_{1,\pm}(\sqrt{-{\mathsf l}ambda})\pm \frac{1}{2}-\frac{1}{12}\right)\frac{1}{\mu_n}\\
&+{\mathsf l}eft(-W_{2,\pm}(\sqrt{-{\mathsf l}ambda})+\frac{1}{2}W_{1,\pm}^2(\sqrt{-{\mathsf l}ambda})-\frac{1}{8}\right)\frac{1}{\mu^2_n}+O{\mathsf l}eft(\frac{1}{\mu_n^3}\right),
\end{align*}
where $p=\frac{1}{(1-{\mathsf l}ambda)^\frac{1}{2}}$, and
{\partial}egin{align*}
&W_{1,\pm}(p)=V_1(p)\pm\frac{1}{2}p,&W_{2,\pm}(p)=V_2(p)\pm \frac{1}{2}pU_1(p),
\end{align*}
{\partial}egin{align*}
&W_{1,+}(p)=\frac{1}{8}p+\frac{7}{24}p^3,&W_{2,+}(p)=-\frac{7}{128}p^2+\frac{79}{192}p^4-\frac{455}{1152}p^6,\\
&W_{1,-}(p)=-\frac{7}{8}p+\frac{7}{24}p^3,&W_{2,-}(p)=-\frac{28}{128}p^2+\frac{119}{192}p^4-\frac{455}{1152}p^6.\\
\end{align*}
This gives,
\[
\phi_{2,+}({\mathsf l}ambda)-\phi_{2,-}({\mathsf l}ambda)=-\frac{1}{2}{\mathsf l}eft(\frac{1}{1-{\mathsf l}ambda}-\frac{1}{(1-{\mathsf l}ambda)^2}\right),
\]
and hence using the definition in equation (\ref{fi1}),
{\partial}egin{align*}
{\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s)&=-\frac{1}{2}\int_0^\infty t^{s-1}\frac{1}{2\pi i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda t}}{-{\mathsf l}ambda}{\mathsf l}eft(\frac{1}{1-{\mathsf l}ambda}-\frac{1}{(1-{\mathsf l}ambda)^2}\right).\\
\end{align*}
Using the formula in Appendix \ref{appendixA}, we obtain
{\partial}egin{align*}
{\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s)&=\frac{1}{2}{\Gamma}amma(s+1),\\
\end{align*}
and hence
{\partial}egin{align*}
&{\mathds{R}}z_{s=0}({\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s))=\frac{1}{2},&{\mathds{R}}u_{s=0}({\mathcal{P}}hi_{2,+}(s)-{\mathcal{P}}hi_{2,-}(s))=0.\\
\end{align*}
This gives
{\partial}egin{align*}
Z_+(0)-Z_-(0)&= -A_{0,1,+}(0) + A_{0,1,-}(0)\\
Z_+'(0)-Z_-'(0)&=-(A_{0,0,+}(0)+A_{0,1,+}'(0)-A_{0,0,-}(0)- A_{0,1,-}'(0))+\frac{1}{2\nu^2}.\\
\end{align*}
Eventually, using the expansion for large $z$ of the functions $H_\nu^\pm(z)$, we obtain
{\partial}egin{align*}
{\mathsf l}og{\Gamma}amma(-{\mathsf l}ambda,S_{n,\pm}/\mu_n^2)=&-\mu_n \sqrt{-{\mathsf l}ambda}+\frac{1}{2}{\mathsf l}eft(\mu_n-\frac{1}{2}\right){\mathsf l}og(-{\mathsf l}ambda)
+\frac{1}{2}{\mathsf l}og 2\pi\\
&+{\mathsf l}eft(\mu_n-\frac{1}{2}\right){\mathsf l}og\mu_n
-{\mathsf l}og 2^{\mu_n}{\Gamma}amma(\mu_n)\\
&+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{2\mu_n}\right) +O{\mathsf l}eft(\frac{1}{\sqrt{-{\mathsf l}ambda}}\right),
\end{align*}
and hence
{\partial}egin{align*}
a_{0,0,n,\pm}&=\frac{1}{2}{\mathsf l}og 2\pi+{\mathsf l}eft(\mu_n-\frac{1}{2}\right){\mathsf l}og\mu_n-{\mathsf l}og 2^{\mu_n}{\Gamma}amma(\mu_n)
+{\mathsf l}og{\mathsf l}eft(1\pm\frac{1}{2\mu_n}\right),\\
a_{0,1,n,\pm}&=\frac{1}{2}{\mathsf l}eft(\mu_n-\frac{1}{2}\right),\\
b_{2,0,0,\pm}&=-\frac{1}{8},\hspace{30pt}b_{2,0,1,\pm}=0.\\
\end{align*}
This immediately shows that $A_{0,1,+}(s)=A_{0,1,-}(s)$, and therefore $X_+(0)-X_-(0) = 0$. Next,
{\partial}egin{align*}
A_{0,0,+}(s)-A_{0,0,-}(s)&=\sum_{n=1}^\infty (2n+1) \mu_n^{-2s}{\mathsf l}eft({\mathsf l}og{\mathsf l}eft(1+\frac{1}{2\mu_n}\right)-{\mathsf l}og{\mathsf l}eft(1-\frac{1}{2\mu_n}\right)\right)\\
&=F(s,\nu).
\end{align*}
Note that this series converges uniformely for ${\mathds{R}}e(s)>2$, but using the analytic extension of the zeta function $\zeta(s,U)$, has an analytic extension that is regular at $s=0$. Therefore,
{\partial}egin{align*}
X_+'(0)- X_-'(0)=&-{\mathds{R}}z_{s=0}F(s,\nu)+\frac{1}{2\nu^2}=-{\mathsf l}og \frac{\nu^2}{\pi}-f(\nu)+\frac{1}{2\nu^2},
\end{align*}
and this concludes the proof in this case. A power series representation for the function $f(\nu)$ is (see \cite{HMS} Appendix B)
{\partial}egin{align*}
f(\nu)=&{\mathsf l}og\frac{\nu^2}{\pi}+\zeta(\frac{1}{2},{\rm Sp}_+{\partial}elta^{(0)}_{S^2})\frac{1}{\nu}\\
&+\sum_{\substack{j,k=0,\\ j+k\not=0}}^\infty \frac{1}{(2k+1)2^{2k}} \frac{1}{2^{2j}}{\partial}inom{-k-\frac{1}{2}}{j}\frac{\zeta(k+j+\frac{1}{2},{\rm Sp}_+{\partial}elta^{(0)}_{S^2})}{\nu^{2k+2j+1}}.
\end{align*}
\section{The higher dimensional cases}
{\mathsf l}abel{s5}
In case of a smooth compact connect Riemannian manifold $(M,g)$ with boundary ${\partial} M$, the analytic torsion is given by the Reidemeister torsion plus some further contributions. It was shown in \cite{Che1}, that this further contribution only depends on the boundary, namely that
\[
{\mathsf l}og T(M)={\mathsf l}og\tau(M)+C({\partial} M).
\]
In the case of a product metric near the boundary, the following formula for this contribution was given by L\"uck \cite{Luc}
\[
{\mathsf l}og T(M)={\mathsf l}og\tau(M)+\frac{1}{4}\chi({\partial} M){\mathsf l}og 2.
\]
In the general case a further contribution appears, that measures how the metric is {\it far} from a product metric:
\[
{\mathsf l}og T(M)={\mathsf l}og\tau(M)+\frac{1}{4}\chi({\partial} M){\mathsf l}og 2+A({\partial} M).
\]
A formula for this new {\it anomaly} contribution has been recently given by Br\"uning and Ma \cite{BM}. More precisely, in \cite{BM} (equation (0.6)) is given a formula for the ratio of the analytic torsion of two metrics, $g_0$ and $g_1$,
{\partial}egin{equation}{\mathsf l}abel{bat}
{\partial}egin{aligned}
{\mathsf l}og \frac{T(M,g_1)}{T(M,g_0)}=
\frac{1}{2}\int_{{\partial} M} {\mathsf l}eft(B(\nabla_1^{T M})-B(\nabla_0^{T M})\right),
\end{aligned}
\end{equation}
where $\nabla_j^{TM}$ is the curvature form of the metric $g_j$, and the
forms $B(\nabla_j^{TX})$ are defined in equation
(1.17) of \cite{BM} (see equation \ref{ebm1} below, and observe that we take the opposite sign with respect to the definition in \cite{BM}, since we are considering left actions instead of right actions). Note that we use the formula of \cite{BM} in the particular case of a flat trivial bundle $F$. Taking $g_1=g$, and $g_0$ an opportune deformation of $g$, that is a product metric near the boundary,
\[
A({\partial} M)={\mathsf l}og \frac{T(M,g_1)}{T(M,g_0)},
\]
and therefore
{\partial}egin{equation}{\mathsf l}abel{pop1}
{\mathsf l}og T(M)={\mathsf l}og\tau(M)+\frac{1}{4}\chi({\partial} M){\mathsf l}og 2+
\frac{1}{2}\int_{{\partial} M} {\mathsf l}eft(B(\nabla_1^{T M})-B(\nabla_0^{T M})\right).
\end{equation}
Since the whole boundary contribution is a local invariant of the boundary, the formula in equation (\ref{bat}) holds in the case of a cone $M=CW$, and therefore in the case under study: $M=C_\alpha S_{l\sin\alpha}^{m-1}$. We compute the contribution given by the formula in equation (\ref{bat}) with respect to the metric induced by the immersion and an opportune product metric. Our result is stated in the following lemma.
{\partial}egin{lem} Consider the two metrics
{\partial}egin{align*}
g_1 &= dr \otimes dr + a^2 r^2 g_{S^{n}},\\
g_0& = dr\otimes dr + a^2 l^{2} g_{S^{n}},
\end{align*}
on $C_\alpha S^{n}_{la}$, where $a=\sin\alpha$. Then, ($p>0$)
{\partial}egin{align*}
{\mathsf l}og &\frac{T(C_\alpha S^{2p}_{la},g_1)}{T(C_\alpha S^{2p}_{la},g_0)}= \frac{a^{2p}}{8}
\sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \chi(S^{2p}_{la})\\
{\mathsf l}og &\frac{T(C_\alpha S^{2p-1}_{la},g_1)}{T(C_\alpha S^{2p-1}_{la},g_0)}\\
&= \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \frac{a^{2p-1} (2p-1)!}{4^{p} (p-1)!}
\end{align*}
{\mathsf l}abel{ele}
\end{lem}
{\partial}egin{proof} The proof is a generalization of the proofs of Lemmas 1 and 2 of \cite{HMS}. We first recall some notation from \cite{BZ} Chapter III and \cite{BM} Section 1.1. For two ${\mathds{Z}}/2$-graded algebras ${\mathcal{A}}$ and ${\mathcal{B}}$,
let ${\mathcal{A}}\hat\otimes{\mathcal{B}}={\mathcal{A}}\wedge\hat{\mathcal{B}}$ denotes the ${\mathds{Z}}/2$-graded
tensor product. For two real finite dimensional vector spaces $V$
and $E$, of dimension $m$ and $n$, with $E$ Euclidean and oriented,
the Berezin integral is the linear map
{\partial}egin{align*}
\int^B&: \Lambda V^* \hat\otimes \Lambda E^* \to \Lambda V^*, \\
\int^B:&\alpha \hat\otimes {\partial}eta\mapsto \frac{(-1)^{\frac{n(n+1)}{2}}}{\pi^\frac{n}{2}}{\partial}eta(e_1,\dots, e_n)\alpha,
\end{align*}
where $\{e_j\}_{j=1}^n$ is an orthonormal base of $E$. Let $A$ be an antisymmetric endomorphism of $E$. Consider the map
\[
{\hat{}} :A\mapsto \hat A=\frac{1}{2} \sum_{j,l=1}^n (e_j,A e_l)
\hat e^j\wedge \hat e^l.
\]
Note that
{\partial}egin{equation}{\mathsf l}abel{pfpf}
\int^B \e^{-\frac{\hat A}{2}}=Pf{\mathsf l}eft(\frac{A}{2\pi}\right),
\end{equation}
and this vanishes if ${\rm dim}E=n$ is odd.
Let $\omega_j$ be the curvature one form over $C_{\alpha}S^{m-1}_{l\sin\alpha}$ associated to the metric $g_j$. Let
${\mathcal{T}}heta$ be the curvature two form of the boundary $S^{m-1}$ (with radius 1) and the standard Euclidean metric. Let
$\tensor{(\omega_j)}{^{a}_{b}}$ denotes the entries with line $a$ and column $b$ of the matrix of one forms $\omega_j$.
Then, we introduce the following quantities (see \cite{BM} equations (1.8) and (1.15))
{\partial}egin{equation}{\mathsf l}abel{pippo}{\partial}egin{aligned}
\mathcal{S}_j&=\frac{1}{2}\sum_{k=1}^{m-1}\tensor{(\omega_j-\omega_0)}{^{r}_{\theta_k}}\hat e^{\theta_k},\\
\hat \Omega&=\mathcal{R}^{T C_{\alpha}S^{m-1}_{l\sin\alpha}}|S^{m-1}_{la}=
\frac{1}{2}\sum_{k,l=1}^{m-1}\tensor{\Omega}{^{\theta_k} _{\theta_l}}
\hat e^{\theta_k}\wedge \hat e^{\theta_l}\\
\mathcal{R}&=\hat {\mathcal{T}}heta=\frac{1}{2}\sum_{k,l=1}^{m-1}\tensor{{\mathcal{T}}heta}{^{\theta_k}_{\theta_l}} \hat e^{\theta_k}\wedge \hat e^{\theta_l}.\\
\end{aligned}
\end{equation}
Direct calculations starting from the metrics $g_j$ allow to obtain explicit formulas for all these forms. The calculations in the present case are a slight generalization of the calculations presented in the proof of Lemma 2 of \cite{HMS}, and we refer to that work for further details. We find that the non zero entries of the matrices appearing in equation (\ref{pippo}) are
{\partial}egin{align*}
\tensor{(\omega_1 - \omega_0)}{^{r}_{\theta_i}} &= - a \prod^{m-1}_{j=i+1} \sin{\theta_j} d\theta_i,\\
\tensor{\Omega}{^{\theta_i}_{\theta_k}}&= (1-a^{2})\prod_{j=i+1}^{k} \sin{\theta_j} \prod_{s=k+1}^{m-1}
\sin^{2}{\theta_s} d\theta_i \wedge d\theta_k, \hspace{20pt}i<k,\\
\tensor{{\mathcal{T}}heta}{^{\theta_i}_{\theta_k}}&= \prod_{j=i+1}^{k} \sin{\theta_j} \prod_{s=k+1}^{m-1} \sin^{2}{\theta_s}
d\theta_i \wedge d\theta_k, \hspace{20pt}i<k.
\end{align*}
Note that for $i<k$
\[
\tensor{((\omega_1 - \omega_0)^2)}{^{\theta_k}_{\theta_i}} = -a^{2} \prod_{j=i+1}^{k}
\sin{\theta_j} \prod^{m-1}_{s=k+1} \sin^{2}{\theta_s} d\theta_i \wedge d\theta_k.
\]
Then, recalling $\mathcal{R} = \hat\Omega - 2 \mathcal{S}_1^2$ by equation (1.16) of \cite{BM}, is easy to see
\[
\mathcal{R} =-\frac{2}{a^2} \mathcal{S}_1^2.
\]
Following \cite{BM}, equation (1.17), we define
{\partial}egin{equation}{\mathsf l}abel{ebm1}
B(\nabla_j^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})=\frac{1}{2}\int_0^1\int^B
\e^{-\frac{1}{2}\mathcal{R}-u^2 \mathcal{S}_j^2}\sum_{k=1}^\infty \frac{1}{{\Gamma}amma{\mathsf l}eft(\frac{k}{2}+1\right)}u^{k-1}
\mathcal{S}_j^k du.
\end{equation}
From this definition it follows that $B(\nabla_0^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})$ vanishes identically, since
$\mathcal{S}_0$ does. It remains to evaluate $B(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})$. Equation
(\ref{ebm1}) gives
{\partial}egin{align*}
B(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})&=\frac{1}{2}\int_0^1\int^B \e^{(\frac{1}{a^2}-u^2)
\mathcal{S}_1^2}\sum_{k=1}^\infty
\frac{1}{{\Gamma}amma{\mathsf l}eft(\frac{k}{2}+1\right)}u^{k-1} \mathcal{S}_1^k du\\
&=\frac{1}{2}\int^B \sum_{j=0,k=1}^\infty
\frac{1}{j!{\Gamma}amma{\mathsf l}eft(\frac{k}{2}+1\right)}\int_0^1 {\mathsf l}eft(\frac{1}{a^2}-u^2\right)^ju^{k-1} d u \mathcal{S}_1^{k+2j} \\
&=\frac{1}{2}\int^B \sum_{j=0,k=1}^\infty \frac{1}{j!{\Gamma}amma{\mathsf l}eft(\frac{k}{2}+1\right)} \sum_{h=0}^{j}
{\partial}inom{j}{h} \frac{(-1)^{h}}{(2h+k)a^{2(j-h)}} \mathcal{S}_1^{k+2j}.
\end{align*}
Since the Berezin integral vanishes identically whenever $k+2j\not=m-1$, we obtain
{\partial}egin{equation}{\mathsf l}abel{epe1}
{\partial}egin{aligned}
B&(\nabla_1^{T C_{\alpha}S^{m-1}_{l\sin\alpha}})\\
&=\frac{1}{2} \sum_{j=0}^{[\frac{m}{2} -1]}
\frac{1}{j!{\Gamma}amma{\mathsf l}eft(\frac{m-2j+1}{2}\right)} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(m-2(j-h)-1)a^{2(j-h)}}\int^B \mathcal{S}_1^{m-1}.
\end{aligned}
\end{equation}
Now consider the two cases of even and odd $m$ independently. First, assume $m=2p+1$ ($p{\gamma}eq 0$). Then, using equation
(\ref{pfpf}), equation (\ref{epe1}) gives
{\partial}egin{align*}
B(\nabla_1^{T C_{\alpha}S^{2p}_{l\sin\alpha}})
&=\frac{1}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int^B \mathcal{S}_1^{2p}\\
&=\frac{1}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+s)a^{2(j-h)}} \int^B \frac{(-a^2)^p}{2^p}\mathcal{R}^{p}\\
&=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int^B \e^{-\frac{\mathcal{R}}{2}}\\
&=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} Pf{\mathsf l}eft(\frac{{\mathcal{T}}heta}{2\pi}\right)\\
&=\frac{a^{2p}}{4} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} e(S^{2p},g_E)\\
\end{align*}
where $e(S^{2p},g_E)$ is the Euler class of $(S^{2p},g_E)$, and we use the fact that
\[
e(S^{2p}_l,g_l)= Pf{\mathsf l}eft(\frac{{\mathcal{T}}heta}{2\pi}\right)=\int^{B} \exp(-\frac{\hat{\mathcal{T}}heta}{2}).
\]
Therefore,
{\partial}egin{align*}
\frac{1}{2}\int_{S^{2p}_{l\sin\alpha}}& B(\nabla_1^{T C_{\alpha}S^{2p}_{l\sin\alpha}})\\
&= \frac{a^{2p}}{8}
\sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \int_{S^{2p}_{la}} e(S^{2p}_{la},g_E)\\
&= \frac{a^{2p}}{8} \sum_{j=0}^{[p-\frac{1}{2}]} \frac{1}{j!(p-j)!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(p-j+h)a^{2(j-h)}} \chi(S^{2p}_{la}).
\end{align*}
Second, assume $m=2p$ ($p{\gamma}eq 1$). Then, equation (\ref{epe1}) gives
{\partial}egin{align*}
B(\nabla_1&^{T C_{\alpha}S^{2p-1}_{l\sin\alpha}})\\
&=\frac{1}{2} \sum_{j=0}^{p -1} \frac{1}{j!{\Gamma}amma{\mathsf l}eft(p-j +
\frac{1}{2}\right)} \sum_{h=0}^{j} {\partial}inom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \int^B
\mathcal{S}_1^{2p-1}.
\end{align*}
Now we evaluate $\int^B \mathcal{S}_1^{2p-1}$. Recalling that
\[
\mathcal{R} =-\frac{2}{a^2} \mathcal{S}_1^2,
\]
we obtain that
{\partial}egin{align*}
\int^B \mathcal{S}_1^{2p-1} &= \int^{B} \mathcal{S}_1 \mathcal{S}_1^{2p-2},\\
&= \frac{(-1)^{p-1} a^{2p-2}}{2^{p-1}}\int^{B} \mathcal{S}_1 \mathcal{R}^{p-1}
\end{align*}
and using the esplicit definitions of these forms given in equation (\ref{pippo}), we have
{\partial}egin{align*}
\int^B \mathcal{S}_1^{2p-1} =& \frac{(-1)^{p-1} a^{2p-2}}{2^{2p-1}}\int^{B}
{\mathsf l}eft(\sum_{k=1}^{2p-1}\tensor{(\omega_1-\omega_0)}{^{r}_{\theta_k}} {\hat \e^{\theta_k}}\right)
{\mathsf l}eft(\sum_{i,j=1}^{2p-1}\tensor{{\mathcal{T}}heta}{^{\theta_i}_{\theta_j}} {\hat \e^{\theta_i}}\wedge {\hat
e^{\theta_j}}\right)^{p-1}\\
=&\frac{(-1)^{p} a^{2p-1}}{2^{2p-1}} c_B \\
&\times \sum_{\substack{\sigma \in S_{2p}\\ \sigma(1) = 1}} {\rm sgn}(\sigma)
\tensor{(\omega_1-\omega_0)}{^{1}_{\sigma(2)}} \tensor{(\Omega_0)}{^{\sigma(3)}_{\sigma(4)}} {\mathsf l}dots
\tensor{(\Omega_0)}{^{\sigma(2p-1)}_{\sigma(2p)}}.
\end{align*}
where $c_B=\frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}}$. Using the same argument used in the final part of the proof of Lemma 2 of \cite{HMS}, we show that
{\partial}egin{align*}
\int^B \mathcal{S}_1^{2p-1} &= c_B \frac{(-1)^{p} a^{2p-1} (2p-1)!}{2^{p-1} 2^p} \prod_{j=2}^{2p-1}
(\sin\theta_{j})^{j-1} d\theta_1 \wedge {\mathsf l}dots \wedge d\theta_{2p-1}.
\end{align*}
Then,
{\partial}egin{align*}
\int_{S^{2p-1}_{la}} \int^B \mathcal{S}_1^{2p-1} &= \frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}} \frac{(-1)^{p} a^{2p-1}
(2p-1)!}{2^{p-1} 2^p (la)^{2p-1}} {\rm Vol}(S^{2p-1}_{la}) \\
&= \frac{(-1)^{p(2p-1)}}{\pi^{\frac{2p-1}{2}}} \frac{(-1)^{p} a^{2p-1} (2p-1)!}{2^{p-1} 2^p (la)^{2p-1}} \frac{2
\pi^{p} (la)^{2p-1}}{(p-1)!}\\
&=\frac{1}{\pi^{-\frac{1}{2}}} \frac{a^{2p-1} (2p-1)!}{2^{p-1} 2^{p-1} } \frac{1}{(p-1)!}
\end{align*}
and
{\partial}egin{align*}
\frac{1}{2}\int_{S^{2p-1}_{l\sin\alpha}} &B(\nabla_1^{T C_{\alpha}S^{2p-1}_{l\sin\alpha}})\\
=& \sum_{j=0}^{p -1} \frac{1}{j!{\Gamma}amma{\mathsf l}eft(p-j + \frac{1}{2}\right)} \sum_{h=0}^{j}{\partial}inom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \int_{S^{2p-1}_{l\sin\alpha}} \int^B \frac{\mathcal{S}_1^{2p-1}}{4}\\
=& \sum_{j=0}^{p -1} \frac{1}{j!{\Gamma}amma{\mathsf l}eft(p-j + \frac{1}{2}\right)} \sum_{h=0}^{j}
{\partial}inom{j}{h} \frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}} \frac{a^{2p-1} (2p-1)!}{\pi^{-\frac{1}{2}} 4^{p}
(p-1)!}\\
=& \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}}{(2(p-j+h)-1)a^{2(j-h)}}\frac{a^{2p-1} (2p-1)!}{4^{p} (p-1)!}.
\end{align*}
\end{proof}
We have now all the terms appearing in equation (\ref{pop1}). In fact, the Reidemeister torsion of the cone over a sphere was computed in \cite{HMS}, Proposition 2,
\[
{\mathsf l}og\tau(C_\alpha S^{m-1}_{l\sin\alpha})=\frac{1}{2}{\rm Vol}(C_\alpha S^{m-1}_{l\sin\alpha}).
\]
Comparing with the results given in Theorem \ref{t1}, we detect the contribution of the singularity. It is easy to see that the formula in equation (\ref{pop1}) holds for the cone over the circle and over the $3$-spheres, while a contribution due to the singularity appears in the case of the sphere. This motivates the following conjecture, that is a theorem for $p<3$.
{\partial}egin{conj} {\mathsf l}abel{c1} The analytic torsion of the cone $C_\alpha S^{2p-1}_{l\sin\alpha}$, of angle $\alpha$, and length $l>0$, over the odd dimensional sphere $S^{2p-1}$, with the standard metric induced by the immersion in ${\mathds{R}}^{m+1}$, and absolute boundary conditions is (where $p>0$):
{\partial}egin{align*}
{\mathsf l}og T(C_\alpha &S^{2p-1}_{l\sin\alpha})=\frac{1}{2}{\mathsf l}og {\rm Vol} (C_\alpha S^{2p-1}_{l\sin\alpha})\\
+& \sum_{j=0}^{p -1} \frac{2^{p-j}}{j!(2(p-j)-1)!!} \sum_{h=0}^{j} {\partial}inom{j}{h}
\frac{(-1)^{h}{\rm csc}^{2(j-h)}\alpha}{(2(p-j+h)-1)} \frac{ (2p-1)!\sin^{2p-1}\alpha}{4^{p} (p-1)!}.
\end{align*}
\end{conj}
\section{Appendix A}
{\mathsf l}abel{appendixA}
We give here a formula for a contour integral appearing in the text. The proof is in \cite{Spr3} Section 4.2. Let
$\Lambda_{\theta,c}=\{{\mathsf l}ambda\in{\mathds{C}}~|~|\arg({\mathsf l}ambda-c)|=\theta\}$,
$0<\theta<\pi$, $0<c<1$, $a$ real, then
\[
\int_0^\infty t^{s-1} \frac{1}{2\pi
i}\int_{\Lambda_{\theta,c}}\frac{\e^{-{\mathsf l}ambda
t}}{-{\mathsf l}ambda}\frac{1}{(1-{\mathsf l}ambda)^a}d{\mathsf l}ambda
dt=\frac{{\Gamma}amma(s+a)}{{\Gamma}amma(a)s}.
\]
{\partial}egin{thebibliography}{99}
{\partial}ibitem{BZ} J.-M. Bismut and W. Zhang, {\em An extension of a theorem by Cheeger and M\"{u}ller}, Ast\'{e}risque 205 (1992).
{\partial}ibitem{BM} J. Br\"uning and Xiaonan Ma, {\em An anomaly formula for Ray-Singer metrics on manifolds with boundary}, GAFA 16 (2006) 767-873.
{\partial}ibitem{BS2} J. Br\"uning and R. Seeley, {\em The resolvent expansion for
second order regular singular operators}, J. of Funct. An. 73 (1988) 369-415.
{\partial}ibitem{Che1} J. Cheeger, {\em Analytic torsion and the heat equation}, Ann. Math. 109 (1979) 259-322.
{\partial}ibitem{Che0} J. Cheeger, {\em On the spectral geometry of spaces with conical singularities}, Proc. Nat. Acad. Sci. 76 (1979) 2103-2106.
{\partial}ibitem{Che2} J. Cheeger, {\em Spectral geometry of singular Riemannian spaces}, J. Diff. Geom. 18 (1983) 575-657.
{\partial}ibitem{Che3} J. Cheeger, {\em On the Hodge theory of Riemannian pseudomanifolds}, Proc. Sympos. Pure Math. 36 (1980) 91-146.
{\partial}ibitem{GZ} I. S. Gradshteyn and I. M. Ryzhik, {\em Table of integrals, Series and
Products}, Academic Press, 2007.
{\partial}ibitem{HMS} L. Hartmann, T. de Melo and M. Spreafico, {\em Reidemeister torsion and analytic torsion of discs}, (2008)
{\partial}ibitem{IT} A. Ikeda and Y. Taniguchi, {\it Spectra and eigenforms of the Laplacian on $S\sp{n}$ and $P\sp{n}({\mathds{C}})$},
Osaka J. Math. 15 (1978) 515-546.
{\partial}ibitem{Luc} W. L\"uck, \emph{Analytic and topological torsion for manifolds with boundary and symmetry}, J. Differential Geom. 37 (1993) 263-322.
{\partial}ibitem{Mul} W. M\"{u}ller, {\em Analytic torsion and R-torsion of Riemannian manifolds}, Adv. Math. 28 (1978) 233-305.
{\partial}ibitem{Olv} F. W. J. Olver, {\em Asymptotics and special functions}, AKP, 1997.
{\partial}ibitem{RS} D. B. Ray and I.M. Singer, {\em R-torsion and the Laplacian
on Riemannian manifolds}, Adv. Math. 7 (1971) 145-210.
{\partial}ibitem{Rel} F. Rellich, {\em Die zul\"{a}ssigen Randbedingungen bei den singul\"{a}ren Eigenwert-problem der mathematischen Physik}, Math. Z. 49 (1943/44) 702-723.
{\partial}ibitem{Spr0} M. Spreafico, {\em Zeta function and regularized determinant on projective spaces}, Rocky Mount. Jour. Math. 33 (2003) 1499-1512.
{\partial}ibitem{Spr1} M. Spreafico, {\em On the Bessel zeta function}, Mathematika 51 (2004) 123-130.
{\partial}ibitem{Spr3} M. Spreafico, {\em Zeta function and regularized determinant on a disc and on a cone}, J. Geo. Phys. 54 (2005) 355-371.
{\partial}ibitem{Spr4} M. Spreafico, {\em Zeta invariants for sequences of spectral type, special functions and the Lerch formula}, Proc. Roy. Soc. Edinburgh 136A (2006) 863-887.
{\partial}ibitem{Spr5} M. Spreafico, {\em Zeta invariants for Dirichlet series}, Pacific. J. Math. 224 (2006) 180-199.
{\partial}ibitem{Spr6} M. Spreafico, {\it Determinant for the Laplacian on forms and a torsion type invariant on a cone over the circle}, Far East J. Math. Sc. 29 (2008) 353-368.
{\partial}ibitem{Spr9} M. Spreafico, {\it Zeta invariants for double sequences of spectral type and a generalization of the Kronecker first limit formula}, preprint (2006), arXiv:math/0607816.
{\partial}ibitem{Wat} G. N. Watson, {\em A treatise on the theory of Bessel
functions}, Cambridge University Press, 1922.
\end{thebibliography}
\end{document} |
\begin{document}
\title[Constant slope maps and the Vere-Jones classification]
{Constant slope maps and the Vere-Jones classification}
\author{Jozef Bobok and Henk Bruin}
\date{\today}
\address{Czech Technical University in Prague, FCE, Th{\'a}kurova 7,
166 29 Praha 6,
Czech Republic }
\text{e}mail{[email protected]}
\urladdr{http://mat.fsv.cvut.cz/bobok/}
\address{
Faculty of Mathematics, University of Vienna,
Oskar Morgensternplatz 1, 1090 Wien, Austria}
\text{e}mail{[email protected]}
\urladdr{http://www.mat.univie.ac.at/}
\subjclass[2000]{37E05, 37B40, 46B25}
\keywords{interval map, topological entropy, conjugacy, constant slope}
\begin{abstract}
We study continuous countably piecewise monotone interval maps, and formulate conditions under which these are conjugate to maps of constant slope,
particularly when this slope is given by the topological entropy of the map. We confine our investigation to the Markov case and phrase our conditions in the
terminology of the Vere-Jones classification of infinite matrices.
\text{e}nd{abstract}
\text{\large\bf M}aketitle
\section{Introduction}
For $a,b\in\mbox{$\mathbb{R}$}$, $a<b$, a continuous map $T:[a,b] \to \mbox{$\mathbb{R}$}$
is said to be piecewise monotone
if there are $k\in \mbox{$\mathbb{N}$}$ and points
$a=c_0< c_1<\mbox{$\mathbb{D_{\infty}}$}ots<c_{k-1}<c_k=b$ such that $T$ is
monotone on each $[c_i,c_{i+1}]$, $i=0,\dots,k-1$.
A piecewise monotone map $T$ has {\text{e}m constant slope} $s$ if $|T'(x)| = s$ for all $x \neq c_i$.
The following results are well known for piecewise monotone interval maps:
\begin{Theorem}\cite{Par66,MiThu88}
If $T:[0,1] \to [0,1]$ is piecewise monotone and $h_{top}(T)>0$ then $T$ is semiconjugate via a
continuous non-decreasing onto map $\varphi\colon~[0,1]\to [0,1]$ to a map $S$ of constant
slope $e^{h_{top}(T)}$. The map $\varphi$ is a conjugacy ($\varphi$ is
strictly increasing) if $T$ is transitive.
\text{e}nd{Theorem}
\begin{Theorem}\cite{MiSl80}\label{t:8} If $T$ has a constant slope $s$ then
$h_{top}(T)=\text{\large\bf M}ax\{ 0,\log s\}$.
\text{e}nd{Theorem}
For continuous interval maps with a countably infinite number of pieces of
monotonicity
neither theorem is true - for examples, see \cite{MiRa05} and
\cite{BoSou11}.
One of the few facts that remains true in the countably piecewise monotone setting is:
\begin{Proposition}\cite{KH95}\label{p:5}
If $T$ is $s$-Lipschitz then $h_{top}(T) \le \text{\large\bf M}ax\{ 0, \log s\}$.
\text{e}nd{Proposition}
A continuous interval map $T$ has {\text{e}m constant slope} $s$ if $|T'(x)| = s$
for all but countably many points.
The question we want to address is when a {\it continuous countably piecewise monotone} interval map $T$ is conjugate to a map of constant slope $\lambda$. Particular attention will be given to the case when a slope is given by the topological entropy of $T$, which we call {\it linearizability}:
\begin{Definition}\label{d:5}
A continuous map $T:[0,1] \to [0,1]$ is said to be {\text{e}m linearizable} if it is conjugate to an {\it interval map} of constant slope $\lambda=e^{h_{top}(T)}$.
\text{e}nd{Definition}
We will confine ourselves to the Markov case, and explore what can be said if only the transition matrix of a countably piecewise monotone map is known
in terms of the Vere-Jones classification \cite{Ver-Jo67}, refined by \cite{Rue03}.
The structure of our paper is as follows.
In Section~\ref{s:2} ({\it ${\mathbb C}PM$: the class of countably piecewise monotone Markov maps}) we
make precise the conditions on continuous interval maps under which we conduct our investigation - the set of all such maps will be denoted by ${\mathbb C}PM$
(for {\text{e}m countably piecewise monotone Markov}). In particular, we introduce a {\text{e}m slack} countable Markov partition of a map and distinguish between {\text{e}m operator}, resp.\ {\text{e}m non-operator type}.
In Section~\ref{s:3} ({\it Conjugacy of a map from ${\mathbb C}PM$ to a map of constant slope}) we rephrase the key equivalence from \cite[Theorem 2.5]{Bo12}: for the sake of completeness we formulate Theorem~\ref{t:5},
which relates the existence of a conjugacy to an ``eigenvalue equation'' \text{e}qref{e:2}, using both classical and slack countable Markov partitions,
see Definition~\ref{def:primary}.
Section~\ref{s:4} ({\it The Vere-Jones Classification}) is devoted to the Vere-Jones classification \cite{Ver-Jo67} that we use as a crucial tool in the most of our proofs in later sections.
In Section~\ref{s:5} ({\it Entropy and the Vere-Jones classification in ${\mathbb C}PM$}) we show in Proposition~\ref{p:7} that the topological entropy of a map in question and (the logarithm of) the Perron value of its transition matrix coincide. Using this fact, we are able to verify in Proposition~\ref{p:10} that all the transition matrices of a map corresponding to all the possible Markov partitions of that map belong to the same class in the Vere-Jones classification; so we can speak about the Vere-Jones classification of a map from ${\mathbb C}PM$.
In Section~\ref{s:6} ({\it Linearizability}) we present the main results of this text. We start with Proposition~\ref{p:3} showing two basic properties of a $\lambda$-solution of equation \text{e}qref{e:2} and Theorem~\ref{t:7} on leo maps, see Definition~\ref{def:leo}. Afterwards we describe conditions under which a local window perturbation - Theorems~\ref{t:6},~\ref{t:9}, resp.\ a global window perturbation - Theorems~\ref{t:3}, \ref{t:10} results to a linearizable map.
In Section~\ref{s:7} ({\it Examples}) various examples illustrating linearizability/conjugacy to a map of constant slope in the Vere-Jones classes are presented.
\text{\large\bf M}edskip
{\bf Acknowledgments:}
We are grateful for the support of the Austrian Science Fund (FWF): project
number P25975,
and also of the Erwin Schr\"odinger International Institute for Mathematical Physics at the University of Vienna, where the final part of this research was carried out.
Furthermore, we would like to thanks the anonymous referees for the careful
reading and valuable suggestions.
\section{${\mathbb C}PM$\label{s:2}: the class of countably piecewise monotone Markov maps}
\begin{Definition}\label{def:primary}
A countable Markov partition ${\mathcal P}$ for a continuous map $T\colon~[0,1]\to [0,1]$ consists of closed intervals with the following properties:
\begin{itemize}
\item Two elements of ${\mathcal P}$ have pairwise disjoint interiors
and $[0,1] \setminus \bigcup {\mathcal P}$ is at most countable.
\item The partition ${\mathcal P}$ is finite or countably infinite;
\item $T|_{i}$ is monotone for each $i\in {\mathcal P}$ ({\text{e}m classical Markov partition}) or piecewise monotone for each $i\in {\mathcal P}$; in the latter case we will speak
of {\text{e}m a slack Markov partition}.
\item For every $i,j\in {\mathcal P}$ and every maximal interval $i'\subset i$ of monotonicity of $T$, if $T(i')\cap j^{\circ}\neq \text{e}mptyset$, then $T(i') \supset j$.
\text{e}nd{itemize}
\text{e}nd{Definition}
\begin{Remark} The notion of a slack Markov partition will be useful in later sections of this paper where we will work with {\it window perturbations}.
If $\#{\mathcal P} = \infty$, then the ordinal type of ${\mathcal P}$ need not be ${\text{\large\bf M}athbb N}$ or $\text{\large\bf M}athbb Z$.
\text{e}nd{Remark}
\begin{Definition}\label{def:CPMM}
The class ${\mathbb C}PM$ is the set of continuous interval maps $T:[0,1]\to [0,1]$ satisfying
\begin{itemize}
\item $T$ is {\it topologically mixing}, {\em i.e.,\ } for every open sets $U,V$ there is an $n$ such that $T^m(U)\cap V\neq\text{e}mptyset$ for all $m\ge n$.
\item $T$ admits a countably infinite Markov partition.
\item $h_{top}(T) < \infty$.
\text{e}nd{itemize}
\text{e}nd{Definition}
\begin{Remark} Since $T\in{\mathbb C}PM$ is topologically mixing by definition, it
cannot be constant on any subinterval of $[0,1]$.
\text{e}nd{Remark}
\begin{Definition}\label{def:leo}
A map $T \in {\mathbb C}PM$
is called {\text{e}m leo} ({\text{e}m locally eventually onto})
if for every nonempty open set $U$ there is an $n\in{\text{\large\bf M}athbb N}$
such that $f^n(U)=[0,1]$.
\text{e}nd{Definition}
\begin{Remark}\label{r:4}~
Let $T\colon~[0,1]\to [0,1]$ be a piecewise monotone Markov map, {\em i.e.,\ } such that orbits of turning points and endpoints $\{0,1\}$ are finite. Those orbits naturally determine a {\text{e}m finite} Markov partition for $T$.
This partition can be easily be refined, in infinitely ways,
to {\text{e}m countably infinite} Markov partitions.
If $T$ is topologically mixing and continuous then we will consider
$T$ as an element of ${\mathbb C}PM$.
\text{e}nd{Remark}
\begin{Proposition}\label{p:12} Let $T\in{\mathbb C}PM$ with a Markov partition ${\mathcal P}$. For every pair $i,j\in{\mathcal P}$ satisfying $T(i)\supset j$ there exist a maximal $\kappa=\kappa(i,j) \in {\text{\large\bf M}athbb N}$ and intervals $i_1,\dots,i_{\kappa}\subset i$ with pairwise disjoint interiors such that $T\vert_{i_{\text{e}ll}}$ is monotone and $T(i_{\text{e}ll})\supset j$ for each $\text{e}ll=1,\dots,\kappa$.
\text{e}nd{Proposition}
\begin{proof}
Since $T\in{\mathbb C}PM$ is topologically mixing, it is not constant on any subinterval of $[0,1]$. Fix a pair $i,j\in{\mathcal P}$ with $T(i)\supset j$. Since $T$ is continuous, there has to be at least one but at most a finite number of pairwise disjoint subintervals of $i$ satisfying the conclusion.
\text{e}nd{proof}
For a given $T\in{\mathbb C}PM$ with a Markov partition ${\mathcal P}$, applying Proposition~\ref{p:12} we associate to ${\mathcal P}$ the transition
matrix $M=M(T) = (m_{ij})_{i,j\in {\mathcal P}}$ defined by
\begin{equation}\label{e:3}
m_{ij} = \begin{cases}
\kappa(i,j) & \text{ if } T(i) \supset j,\\
0 & \text{ otherwise.}
\text{e}nd{cases}
\text{e}nd{equation}
If ${\mathcal P}$ is a classical Markov partition of some $T\in{\mathbb C}PM$ then $m_{ij}\in\{0,1\}$
each $i,j \in {\mathcal P}$.
\begin{Remark}\label{r:5} For the sake of clarity we will write $(T,{\mathcal P},M)\in{\mathbb C}PM^*$ when a map $T\in{\mathbb C}PM$, a concrete Markov partition ${\mathcal P}$ for $T$ and its transition matrix $M=M(T)$ with respect to ${\mathcal P}$ are assumed.
\text{e}nd{Remark}
For an infinite matrix $M$ indexed by a countable index set ${\mathcal P}$ we can consider the powers $M^n=(m_{ij}(n))_{i,j\in{\mathcal P}}$ of $M$:
\begin{equation}\label{e:11}M^0=E=(\delta_{ij})_{i,j\in {\mathcal P}},
\quad M^n=\left (\sum_{k\in {\mathcal P}}m_{ik}m_{kj}(n-1)\right )_{i,j\in {\mathcal P}}, ~n\in\mbox{$\mathbb{N}$}.\text{e}nd{equation}
\begin{Proposition}\label{p:11}
Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$.
\begin{itemize}
\item[(i)] For each $n\in\mbox{$\mathbb{N}$}$ and $i,j\in{\mathcal P}$, the entry $m_{ij}(n)$
of $M^n$ is finite.
\item[(ii)] The entry $m_{ij}(n) = m$ if and
only if there are exactly $m$ subintervals $i_1$, $\dots$, $i_{m}$ of $i$ with
pairwise disjoint interiors such that
$T^n(i_k)\supset j$, $k=1,\dots,m$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof}(i) From the continuity of $T$ and the definition of $M$ follows that the sum
$\sum_{i\in{\mathcal P}}m_{ij}$ is finite for each $j\in{\mathcal P}$, which directly
implies (i).\\
(ii) For $n=1$ this is given by the relation \text{e}qref{e:3} defining the matrix $M$. The induction step follows from \text{e}qref{e:11} of the product of the nonnegative matrices $M$ and $M^{n-1}$.
\text{e}nd{proof}
A matrix $M$ indexed by the elements of ${\mathcal P}$ represents a bounded linear operator ${\mathcal M}$ on the Banach space $\text{e}ll^1=\text{e}ll^1({\mathcal P})$ of summable sequences indexed by ${\mathcal P}$, provided that
the supremum of the columnar sums is finite. Then ${\mathcal M}$ is realized through left multiplication
\begin{align}&{\mathcal M}(v):=\left (\sum_{j\in{\mathcal P}}m_{ij}v_j\right )_{i\in{\mathcal P}},~v\in\text{e}ll^1({\mathcal P}),\nonumber\\&
\label{e:36}\| {\mathcal M} \| =
\sup_j \sum_i m_{ij}.\text{e}nd{align}
The matrix $M^n$ represents the $n$th power ${\mathcal M}^n$ of ${\mathcal M}$ and by Gelfand's formula, the spectral radius $r_{{\mathcal M}}=\lim_{n\to\infty}\| {\mathcal M}^n \|^{\frac1{n}}$.
\begin{Remark}\label{r:3}
If $(T,{\mathcal P},M)\in{\mathbb C}PM^*$ the supremum in \text{e}qref{e:36} is finite if and only if
\begin{equation}\label{e:52}
\text{e}xists~K > 0 \ \forall~y\in [0,1]\colon~\#T^{-1}(y)\le K.
\text{e}nd{equation}
Since this condition does not depend on a concrete choice of ${\mathcal P}$, we will say the map $T$ is of an {\it operator type} when the condition \text{e}qref{e:52} is fulfilled and of a {\it non-operator type} otherwise.
\text{e}nd{Remark}
\section{Conjugacy of a map from ${\mathbb C}PM$ to a map of constant slope}\label{s:3}
This section is devoted to the fundamental observation regarding a possible conjugacy of an element of ${\mathbb C}PM$ to a map of constant slope. It is presented in Theorem~\ref{t:5}.
Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$. We are interested in positive real numbers $\lambda$ and nonzero nonnegative sequences $(v_i)_{i\in{\mathcal P}}$ satisfying $Mv=\lambda v$, or equivalently
\begin{equation}\label{e:2}
\forall~i\in {\mathcal P}\colon~\sum_{j\in {\mathcal P}}m_{ij}v_j=\lambda~v_i.\text{e}nd{equation}
\begin{Definition}\label{d:3}
A nonzero nonnegative sequence $v=(v_i)_{i\in{\mathcal P}}$ satisfying \text{e}qref{e:2} will be called a {\text{e}m $\lambda$-solution} (for $M$). If in addition $v\in\text{e}ll^1({\mathcal P})$, it will be called a {\text{e}m summable $\lambda$-solution} (for $M$).
\text{e}nd{Definition}
\begin{Remark}Since every $T\in{\mathbb C}PM$ is topologically mixing, any nonzero nonnegative $\lambda$-solution is in fact positive: If $v=(v_i)_{i\in{\mathcal P}}$ solves \text{e}qref{e:2}, $k,j\in{\mathcal P}$ and $v_j>0$ then by Proposition~\ref{p:11}(ii) for some sufficiently large $n$, $\lambda^n v_k\ge m_{kj}(n)v_j>0$.\text{e}nd{Remark}
Let ${\mathbb C}PM_{\lambda}$ denote the class of all maps from ${\mathbb C}PM$ of constant slope $\lambda$, {\em i.e.,\ } $S\in{\mathbb C}PM_{\lambda}$ if $|S'(x)| = s$ for all but countably many points.
The core of the following theorem has been proved in \cite[Theorem 2.5]{Bo12}. Since we will work with maps from ${\mathbb C}PM$ that are topologically mixing, we use topological conjugacies only - see \cite[Proposition 4.6.9]{ALM00}. The theorem will enable us to change freely between classical/slack Markov partitions of the map in question.
\begin{Theorem}\label{t:5}Let $T\in{\mathbb C}PM$. The following conditions are equivalent.
\begin{itemize}
\item[(i)] For some $\lambda >1$, the map $T$ is conjugate via a continuous increasing onto map $\psi\colon~[0,1]\to [0,1]$ to some map $S\in{\mathbb C}PM_{\lambda}$.
\item[(ii)] For some classical Markov partition ${\mathcal P}$ for $T$ there is a positive summable $\lambda$-solution $u=(u_i)_{i\in {\mathcal P}}$ of equation \text{e}qref{e:2}.
\item[(iii)] For every classical Markov partition ${\mathcal P}$ for $T$ there is a positive summable $\lambda$-solution $u=(u_i)_{i\in {\mathcal P}}$ of equation \text{e}qref{e:2}.
\item[(iv)] For every slack Markov partition ${\mathcal Q}$ for $T$ there is a positive summable $\lambda$-solution $v=(v_i)_{i\in {\mathcal Q}}$ of equation \text{e}qref{e:2}.
\item[(v)] For some slack Markov partition ${\mathcal Q}$ for $T$ there is a positive summable $\lambda$-solution $v=(v_i)_{i\in {\mathcal Q}}$ of equation \text{e}qref{e:2}.
\text{e}nd{itemize}
\text{e}nd{Theorem}
\begin{Remark} Recently, Misiurewicz and Roth \cite{MiRo16} have pointed out that if $v$ is a $\lambda$-solution of equation \text{e}qref{e:2} that is not summable, then the map $T$ is conjugate to a map of constant slope defined on the real line or half-line.\text{e}nd{Remark}
\begin{Remark}\label{r:2}Let $T\in{\mathbb C}PM$ be piecewise monotone with a finite Markov partition. It is well known \cite[Theorem 0.16]{Wal82}, \cite[Theorem 4.4.5]{ALM00} that the corresponding equation \text{e}qref{e:2} has a positive $e^{h_{top}(T)}$-solution which is trivially summable. By Theorem~\ref{t:5} it is also true for any countably infinite Markov partition for $T$.\text{e}nd{Remark}
\begin{proof}[Proof of Theorem~\ref{t:5}]
The equivalence of (i), (ii) and (iii) has been proved in \cite{Bo12}. Since (iv) implies (iii) and (v), it suffices to show that (iii) implies (iv) and (v) implies (ii).
(iii)$\mbox{Imp}lies$(iv).~Let us assume that ${\mathcal Q}$ is a slack Markov partition for $T$. Obviously there is a classical partition ${\mathcal P}$ for $T$ which is finer than ${\mathcal Q}$,
{\em i.e.,\ } every element of ${\mathcal P}$ is contained in some element of ${\mathcal Q}$. Using (iii) we can consider a positive summable $\lambda$-solution $u=(u_i)_{i\in {\mathcal P}}$ of equation \text{e}qref{e:2}. Let $v=(v_i)_{i\in {\mathcal Q}}$ be defined as
$$v_i=\sum_{i'\subset i}u_{i'};$$
clearly the positive sequence $v=(v_i)_{i\in {\mathcal Q}}$ is from $\text{e}ll^1({\mathcal Q})$. Denoting $(m^{{\mathcal P}}_{ij})_{i,j\in{\mathcal P}}$ and $(m^{{\mathcal Q}}_{ij})_{i,j\in{\mathcal Q}}$ the transition matrices corresponding to the partitions ${\mathcal P}$, ${\mathcal Q}$, we can write
\begin{align}\label{a:2}
\lambda v_i&=\lambda\sum_{i'\subset i}u_{i'}=\sum_{i'\subset i}\lambda u_{i'}=\sum_{i'\subset i}\sum_{k\in{\mathcal Q}}\sum_{k'\subset k}m^{{\mathcal P}}_{i'k'}u_{k'} \\
&=\sum_{k\in{\mathcal Q}}\left(\sum_{k'\subset k}u_{k'}\right )\left (\sum_{i'\subset i}m^{{\mathcal P}}_{i'k'}\right )=\sum_{k\in{\mathcal Q}}m^{{\mathcal Q}}_{ik}v_k,\nonumber
\text{e}nd{align}
where the equality $m^{{\mathcal Q}}_{ik}=\sum_{i'\subset i}m^{{\mathcal P}}_{i'k'}$ follows from the Markov property of $T$ on ${\mathcal P}$ and ${\mathcal Q}$:
\begin{equation}\label{e:24}
\text{if }T(i')\supset k'\text{ for some }k'\subset k\text{ then also } T(i')\supset k.
\text{e}nd{equation}
So by \text{e}qref{a:2}, for a given slack Markov partition ${\mathcal Q}$ (for $T$) we find a positive summable $\lambda$-solution $v=(v_i)_{i\in {\mathcal Q}}$ of equation \text{e}qref{e:2}.
(v)$\mbox{Imp}lies$(ii).~Assume that for some slack Markov partition ${\mathcal Q}$ for $T$ there is a positive summable $\lambda$-solution $v=(v_i)_{i\in {\mathcal Q}}$ of equation \text{e}qref{e:2}. As in the previous part we can consider a classical Markov partition ${\mathcal P}$ finer than ${\mathcal Q}$. Using again property \text{e}qref{e:24} let us put
\begin{equation}\label{e:25}
u_{i'}=\sum_{T(i')\supset j}v_j,~i'\in{\mathcal P}.
\text{e}nd{equation}
Then $u=(u_{i'})_{i'\in {\mathcal P}}$ is positive and we will show that it is a summable $\lambda$-solution of equation \text{e}qref{e:2}. Fix an $i'\in{\mathcal P}$ and using the property \text{e}qref{e:24} for $j\in{\mathcal Q}$ for which $T(i')\supset j$. Then
\begin{equation}\label{e:26}\lambda v_j=\sum_{k\in{\mathcal Q}}m^{{\mathcal Q}}_{jk}v_k=\sum_{j'\subset j}\sum_{T(j')\supset \text{e}ll}v_{\text{e}ll}=\sum_{j'\subset j}m^{{\mathcal P}}_{i'j'} \sum_{T(j')\supset\text{e}ll}v_{\text{e}ll}=\sum_{j'\subset j}m^{{\mathcal P}}_{i'j'} u_{j'},
\text{e}nd{equation}
hence summing \text{e}qref{e:26} through all $j$'s from ${\mathcal Q}$ that are $T$-covered by $i'\in{\mathcal P}$, we obtain with the help of \text{e}qref{e:25},
\begin{equation*}\label{e:27}\lambda u_{i'}=\sum_{T(i')\supset j}\lambda v_j=\sum_{T(i')\supset j}\sum_{j'\subset j}m^{{\mathcal P}}_{i'j'} u_{j'}=\sum_{j'\in{\mathcal P}}m^{{\mathcal P}}_{i'j'} u_{j'}.
\text{e}nd{equation*}
Since by our assumption on $v=(v_i)_{i\in {\mathcal Q}}$ and \text{e}qref{e:25}
\begin{equation*}\label{e:37}
\sum_{i'\in{\mathcal P}}u_{i'}=\sum_{i\in{\mathcal Q}}\sum_{j\in{\mathcal Q}}m^{{\mathcal Q}}_{ij}v_j=
\lambda\sum_{i\in{\mathcal Q}} v_{i}<\infty,
\text{e}nd{equation*}
so $u=(u_{i'})_{i'\in {\mathcal P}}$ is a summable $\lambda$-solution of equation \text{e}qref{e:2}.
\text{e}nd{proof}
Maps $T\in{\mathbb C}PM$ are continuous, topologically mixing with positive topological entropy.
Thus all possible semiconjugacies described in \cite[Theorem 2.5]{Bo12} will be in fact conjugacies, see \cite[Proposition 4.6.9]{ALM00}.
Many properties hold under the assumption of positive entropy or for {\text{e}m countably piecewise continuous} maps.
One interesting example of a countably piecewise continuous and countably piecewise monotone (still topologically mixing) map will be presented in Section~\ref{s:7}.
However, since the technical details are much more involved and would obscure
the ideas, we confine the proofs to ${\mathbb C}PM$.
\section{The Vere-Jones Classification}\label{s:4}
Let us consider a matrix $M=(m_{ij})_{i,j\in{\mathcal P}}$, where the index set ${\mathcal P}$ is finite or countably infinite. The matrix $M$ will be called
\begin{itemize}\item {\text{e}m irreducible},
if for each pair of indices $i,j$ there exists a positive integer $n$ such
that $m_{ij}(n)>0$, and
\item {\text{e}m aperiodic}, if for each index $i\in{\mathcal P}$ the value $gcd\{\text{e}ll\colon~m_{ii}(\text{e}ll)>0\}=1$.
\text{e}nd{itemize}
\begin{Remark}
Since $T\in{\mathbb C}PM$ is topologically mixing, its transition matrix $M$ is irreducible and aperiodic.
\text{e}nd{Remark}
In the sequel we follow the approach suggested by Vere-Jones \cite{Ver-Jo67}.
\begin{Proposition}\label{p:2}(i)~Let $M=(m_{ij})_{i,j\in{\mathcal P}}$ be
a nonnegative irreducible aperiodic matrix indexed by a countable index set ${\mathcal P}$. There exists a
common value $\lambda_M$ such that for each $i,j$
\begin{equation}\label{e:13}\lim_{n\to\infty} [m_{ij}(n)]^{\frac{1}{n}}=\sup_{n\in{\tiny \mbox{$\mathbb{N}$}}}[m_{ii}(n)]^{\frac{1}{n}}=\lambda_M.\text{e}nd{equation}
(ii)~For any value $r>0$ and all $i,j$
\begin{itemize}
\item the series $\sum_{n}m_{ij}(n)r^n$ are either all convergent or all divergent;
\item as $n\to\infty$, either all or none of the sequences
$\{m_{ij}(n)r^n\}_{n}$ tend to zero.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{Remark}\label{r:6}The number $\lambda_M$ defined by \text{e}qref{e:13} is often called the {\it Perron value of $M$}. In the whole text we will assume that for a given nonnegative irreducible aperiodic matrix $M=(m_{ij})_{i,j\in{\mathcal P}}$ its Perron value $\lambda_M$ is finite.\text{e}nd{Remark}
\subsection{Entropy, generating functions and the Vere-Jones classes}\label{ss:15}
To a given irreducible aperiodic matrix $M=(m_{ij})_{i,j\in
{\mathcal P}}$ with entries from $\mbox{$\mathbb{N}$}\cup\{0\}$ corresponds a strongly connected directed graph $G=G(M)=({\mathcal P},{\mathcal E}\subset {\mathcal P}\times{\mathcal P})$ containing $m_{ij}$ edges from $i$ to $j$.
The {\text{e}m Gurevich entropy} of $M$ (or of $G=G(M)$) is defined as
\begin{equation*}\label{e:12}
h(G)=h(M) = \sup\{\log r(M') : M' \text{ is a finite submatrix of } M \},
\text{e}nd{equation*}
where $r(M')$ is the large eigenvalue of the finite transition matrix $M'$.
Gurevich proved that
\begin{Proposition}\cite{Gur69}\label{p:4}
$h(M) = \log \lambda_M$.
\text{e}nd{Proposition}
Since by Proposition~\ref{p:2} the value $R=\lambda_M^{-1}$ is a common radius of convergence of the power series $M_{ij}(z)=\sum_{n\ge 0}m_{ij}(n)z^n$, we immediately obtain for each pair $i,j\in{\mathcal P}$,
\begin{equation*}\label{e:10}M_{ij}(r)\begin{cases}
\in\mbox{$\mathbb{R}$}, & 0\le r<R,\\
=\infty, & r>R.
\text{e}nd{cases}
\text{e}nd{equation*}
It is well known that in $G(M)$
\begin{itemize}
\item $m_{ij}(n)$ equals to the number of paths of length $n$ connecting $i$ to $j$.
\text{e}nd{itemize}
Following \cite{Ver-Jo67}, for each $n\in\mbox{$\mathbb{N}$}$ we will consider the following coefficients:
\begin{itemize}
\item First entrance to $j$: $f_{ij}(n)$ equals the number of paths of length $n$ connecting $i$ to $j$, without appearance of $j$ in between.
\item Last exit of $i$: $\text{e}ll_{ij}(n)$ equals the number of paths of length $n$ connecting $i$ to $j$, without appearance of $i$ in between.
\text{e}nd{itemize}
Clearly $f_{ii}(n)=\text{e}ll_{ii}(n)$ for each $i\in{\mathcal P}$. Also it will be useful to introduce
\begin{itemize}
\item First entrance to ${\mathcal P}'\subset {\mathcal P}$: for a nonempty ${\mathcal P}'\subset{\mathcal P}$ and $j\in{\mathcal P}'$, $g^{{\mathcal P}'}_{ij}(n)$ equals the number of paths of length $n$ connecting $i$ to $j$, without appearance of any element of ${\mathcal P}'$ in between.
\text{e}nd{itemize}
The first entrance to ${\mathcal P}'\subset {\mathcal P}$ will provide us a new type of a generating function used in \text{e}qref{a:500} and its applications.
\begin{Remark}\label{r:1}Let us denote by ${\mathcal P}hi_{ij}$, $\Lambda_{ij}$ the radius of convergence of the power series $F_{ij}(z) = \sum_{n\ge 1} f_{ij}(n) z^n$, $L_{ij}(z) = \sum_{n\ge 1} \text{e}ll_{ij}(n) z^n$. Since $f_{ij}(n)\le m_{ij}(n)$, $\text{e}ll_{ij}(n)\le m_{ij}(n)$ for each $n\in\mbox{$\mathbb{N}$}$ and each $i,j\in{\mathcal P}$, we always have $R\le {\mathcal P}hi_{ij}$, $R\le \Lambda_{ij}$.
\text{e}nd{Remark}
Proposition~\ref{p:1} has been stated in \cite{Rue03}. Since the argument showing the part (i) presented in \cite{Rue03} is not correct, we offer our own version of its proof.
\begin{Proposition}\label{p:1}\cite[Proposition 2.6]{Rue03} Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$, consider the graph $G=G(M)$, $R=\lambda_M^{-1}$.
\begin{itemize}
\item[(i)] If there is a vertex $j$ such that $R={\mathcal P}hi_{jj}$ then there exists a strongly connected subgraph $G'\subsetneq G$ such that $h(G')=h(G)$.
\item[(ii)] If there is a vertex $j$ such that $R<{\mathcal P}hi_{jj}$ then for all proper strongly connected subgraphs $G'$ one has $h(G')<h(G)$.
\item[(iii)] If there is a vertex $j$ such that $R<{\mathcal P}hi_{jj}$ then $R<{\mathcal P}hi_{ii}$ for all $i$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof}For the proof of part (ii) see \cite{Rue03}.
Let us prove (i).
Fix a vertex $j\in{\mathcal P}$ for which $R={\mathcal P}hi_{jj}$ and choose arbitrary $i\neq j$. We can write
\begin{equation}\label{e:51}f_{jj}(n)=~_if_{jj}(n)+ ~^if_{jj}(n),\text{e}nd{equation}
where $_if_{jj}(n)$, resp.\ $^if_{jj}(n)$ denotes the number of $f_{jj}$-paths of length $n$ that do not contain $i$, resp.\ contain $i$.\newline
\noindent {\bf I.} If $\limsup_{n\to\infty}[_if_{jj}(n)]^{1/n}=\lambda_M$,
then there is nothing to prove. \newline
\noindent {\bf II.} Assume that $\limsup_{n\to\infty}[_if_{jj}(n)]^{1/n}<\lambda_M$. Then by our assumption and \text{e}qref{e:51}
\begin{equation}\label{e:49}\limsup_{n\to\infty}[^if_{jj}(n)]^{1/n}=\lambda_M.
\text{e}nd{equation}
Let us denote $g_{ij}(n)$ the number of paths of length $n$ connecting $i$ to $j$, without appearance of $i,j$ after the initial $i$ and before the final $j$. If we denote $^{1,j}f_{ii}(n)$ the number of $f_{ii}$-paths of length $n$ connecting $i$ to $i$ with exactly one appearance of $j$ after the initial $i$ and before the final $i$, we can write for $n\ge 2$ (the coefficients $_jm_{ii}(n)$ are defined analogously as $_jf_{ii}(n)$ - compare the proof of Theorem \ref{t:12})
\begin{align}\label{e:50}^if_{jj}(n) &=\sum_{m=2}^n\sum_{k=1}^{m-1}g_{ji}(k)~_jm_{ii}(n-m)g_{ij}(m-k) \\
&=\sum_{m=2}^n {_j}m_{ii}(n-m)\sum_{k=1}^{m-1}g_{ji}(k)~g_{ij}(m-k)\nonumber\\
&=\sum_{m=2}^n {_j}m_{ii}(n-m)~ ^{1,i}f_{jj}(m)
=\sum_{m=2}^n {_j}m_{ii}(n-m)~ ^{1,j}f_{ii}(m)\nonumber.
\text{e}nd{align}
By the formula of \cite[Lemma 4.3.6]{ALM00} and our assumption \text{e}qref{e:49}, for arbitrary $i\in{\mathcal P}\setminus\{j\}$ we obtain from \text{e}qref{e:50} either
\begin{equation}\label{e:5}\limsup_{n}[{_j}m_{ii}(n)]^{1/n}=\lambda_M\text{e}nd{equation}
or
\begin{equation}\label{e:6}\limsup_{n\to\infty}~[^{1,j}f_{ii}(n)]^{1/n}=\lambda_M.
\text{e}nd{equation}
If \text{e}qref{e:5} is fulfilled for some $i$ the existence of a strongly connected subgraph $G'\subsetneq G$ such that $h(G')=h(G)$ immediately follows. Otherwise, since
$$\limsup_{n\to\infty}~[^{1,j}f_{ii}(n)]^{1/n}\le \limsup_{n\to\infty}~[f_{ii}(n)]^{1/n},$$
we get $R={\mathcal P}hi_{ii}$ for each $i\in{\mathcal P}$ and the conclusion follows from \cite[Theorem 2.2]{Sal88}.
The assertion (iii) immediately follows from (i) and (ii).
\text{e}nd{proof}
The behavior of the series $M_{ij}(z)$, $F_{ij}(z)$ for $z=R$ was used in the Vere-Jones classification of irreducible aperiodic matrices \cite{Ver-Jo67}. Vere-Jones originally distinguished $R$-{\it transient}, {\it null $R$-recurrent} and {\it positive $R$-recurrent} case. Later on, the classification was refined by Ruette in \cite{Rue03}, who added {\it strongly positive $R$-recurrent} case. All is summarized in Table 1 which applies independently of the sites $i, j \in {\mathcal P}$ for $M$ irreducible - compare the last row of Table 1 and Proposition~\ref{p:1}. We call corresponding classes of matrices {\it transient, null recurrent, weakly recurrent, strongly recurrent}. The last three, resp.\ two possibilities will occasionally be summarized by '$M$ is recurrent', resp.\ '$M$ is positive recurrent'.
\begin{quote}
\vline
\begin{tabular}{c|c|c|c|c|c|c|c|c|c|c|c|c|c}
\hline
& transient & null & weakly & strongly \\
& & recurrent & recurrent & recurrent\\
\hline
$F_{ii}(R)$ & $< 1$ & $= 1$ & $= 1$ & $= 1$ \\[2mm]
\hline
$F'_{ii}(R)$ & $\le \infty$ & $\infty$ & $< \infty$
& $< \infty$ \\[2mm]
\hline
$M_{ij}(R)$ & $< \infty$ & $= \infty$ & $= \infty$
& $= \infty$ \\[2mm]
\hline
$\lim_{n \to\infty} m_{ij}(n) R^n$ & $=0$ & $=0$ & $\lambda_{ij}\in (0,\infty)$
& $\lambda_{ij}\in (0,\infty)$ \\[2mm]
\hline
for all $i$ & $R = {\mathcal P}hi_{ii}$ & $R={\mathcal P}hi_{ii}$ & $R = {\mathcal P}hi_{ii}$ & $R < {\mathcal P}hi_{ii}$\\[2mm]
\hline
\text{e}nd{tabular}
\vline
\vskip2mm
\centerline{\bf Table 1.}
\text{e}nd{quote}
\subsubsection{Salama's criteria}
There are geometrical criteria - see \cite{Sal88} and also \cite{Rue03} - for cases of the Vere-Jones classification to apply depending on whether the underlying strongly connected directed graph can be enlarged/reduced (in the class of strongly connected directed graphs) without changing the entropy. We will use some of them in Section~\ref{s:7}.
\begin{Theorem}\label{t:1}\cite{Sal88,Rue03}~The following are true:
\begin{itemize}
\item[(i)] A graph $G$ is transient if and only if there is a graph $G'$ such that $G\subsetneq G'$ and $h(G) = h(G')$.
\item[(ii)] $G$ is strongly recurrent if and only if $h(G_0) < h(G)$ for any $G_0\subsetneq G$.
\item[(iii)] $G$ is recurrent but not strongly recurrent if and only if there exists $G_0\subsetneq G$ with $h(G_0) = h(G)$, but $h(G) < h(G_1)$ for every $G_1\supsetneq G$.
\text{e}nd{itemize}
\text{e}nd{Theorem}
\subsubsection{Further useful facts}
In the whole paper we are interested in nonzero nonnegative solutions of equation \text{e}qref{e:2}. Analogously, in the next proposition we consider nonzero nonnegative {\it subinvariant} $\lambda$-solutions $v=(v_i)_{i\in{\mathcal P}}$ for a matrix $M$,
{\em i.e.,\ } satisfying the inequality $Mv\le \lambda v$.
\begin{Theorem}\label{t:2}\cite[Theorem 4.1]{Ver-Jo67} Let $M=(m_{ij})_{i,j\in{\mathcal P}}$ be irreducible. There is no subinvariant $\lambda$-solution for $\lambda<\lambda_M$. If $M$ is transient there are infinitely many linearly independent subinvariant $\lambda_M$-solutions. If $M$ is recurrent there is a unique subinvariant $\lambda_M$-solution which is in fact $\lambda_M$-solution of equation \text{e}qref{e:2} proportional to the vector $(F_{ij}(R))_{i\in{\mathcal P}}$ ($j\in{\mathcal P}$ fixed), $R=\lambda_M^{-1}$. \text{e}nd{Theorem}
A general statement (a slight adaption of \cite[Theorem 2]{Pr64}) on solvability of equation \text{e}qref{e:2} is as follows:
\begin{Theorem}\label{t:12}~Let $M=(m_{ij})_{i,j\in{\mathcal P}}$ be irreducible. The system $Mv=\lambda v$ has a nonzero nonnegative solution $v$ if and only if
\begin{itemize}
\item[(a)]$\lambda=\lambda_M$ and $M$ is recurrent, or
\item[(b)] when either $\lambda>\lambda_M$ or\newline
$\lambda=\lambda_M$ and $M$ is transient,\newline
there is an infinite sequence of indices $K\subset {\mathcal P}$ such that ($z=\lambda^{-1}$)
\begin{equation}\label{e:15}\lim_{j\to\infty}\lim_{k\to\infty,~k\in K}\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}{\sum_{\alphapha=1}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}=0
\text{e}nd{equation}
\text{e}nd{itemize}
for each $i\in{\mathcal P}$.
\text{e}nd{Theorem}
\begin{proof}
Following Chung \cite{Chu60} we will use the analogues of the taboo probabilities: For $k\in{\mathcal P}$ define $_km_{ij}(1)=m_{ij}$ and for $n\ge 1$,
$$_km_{ij}(n+1)=\sum_{\alphapha\neq k}m_{i\alphapha}~_km_{\alphapha j}(n);$$
clearly, $_km_{ij}(n)$ equals to the number of paths of length $n$ connecting $i$ to $j$ with no appearance of $k$ between. Denote also
$^km_{ij}(n)=m_{ij}(n)-_km_{ij}(n)$ the number of paths of length $n$ connecting $i$ to $j$ with at least one appearance of $k$ between.
The usual convention that $_km_{ij}(0)=\delta_{ij}(1-\delta_{ik})$ will be used. The following identities directly follow from the definitions of the corresponding generating functions - see before Table 1 - or are easy to verify:~For all $i,j,k\in{\mathcal P}$ and $0\le z<R$,
\begin{itemize}
\item[(i)] $M_{ik}(z)=_jM_{ik}(z)+^jM_{ik}(z)$,
\item[(ii)] $_iM_{ik}(z)=L_{ik}(z)$,
\item[(iii)] $^jM_{ik}(z)=M_{ij}(z)L_{jk}(z)$,
\item[(iv)] $M_{ik}(z)=M_{ii}(z)L_{ik}(z)$,
\item[(v)] \cite{Ver-Jo67} for $i\neq k$,
\begin{equation*}\frac{\sum_{\alphapha\le j-1}m_{i\alphapha}M_{\alphapha k}(z)}{M_{ik}(z)}+\frac{\sum_{\alphapha\ge j}m_{i\alphapha}M_{\alphapha k}(z)}{M_{ik}(z)}=\frac{1}{z},\text{e}nd{equation*}
\item[(vi)] \cite{Ver-Jo67} $\sum_{\alphapha\ge 1}m_{i\alphapha}M_{\alphapha i}(z)=\frac{M_{ii}(z)}{z}-\frac{1}{z}$ is finite.
\text{e}nd{itemize}
By \cite[Theorem 2]{Pr64} the double limit \text{e}qref{e:15} can be replaced by
\begin{equation}\label{e:18}\lim_{j\to\infty}\lim_{k\to\infty,~k\in K}\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ _iM_{\alphapha k}(1/\lambda)}{_iM_{ik}(1/\lambda)}=0.
\text{e}nd{equation}
Using the identities (i)-(vi) we can write \text{e}qref{e:18} as
\begin{align*}\label{a:1}
A(j,k) &:=\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ _iM_{\alphapha k}(z)}{_iM_{ik}(z)}=\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}{L_{ik}(z)}-\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ ^iM_{\alphapha k}(z)}{L_{ik}(z)}\\
&=M_{ii}(z)\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}{M_{ii}(z)L_{ik}(z)}-\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha i}(z)L_{ik}(z)}{L_{ik}(z)}\\
&=M_{ii}(z)\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}{M_{ik}(z)}-\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha i}(z)=:B(j,k).
\text{e}nd{align*}
Since by (vi), $\lim_{j\to\infty}\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha i}(z)=0$,
using (v) we obtain that
$$\lim_{j\to\infty}\lim_{k\to\infty,~k\in K}A(j,k)=0$$
if and only if
$$
\lim_{j\to\infty}\lim_{k\to\infty,~k\in K}B(j,k)=\lim_{j\to\infty}\lim_{k\to\infty,~k\in K}\frac{\sum_{\alphapha=j}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}
{\sum_{\alphapha=1}^{\infty}m_{i\alphapha}~ M_{\alphapha k}(z)}=0.
$$
The conclusion follows from \cite[Theorem 2]{Pr64}.
\text{e}nd{proof}
\begin{Corollary}\label{c:1}If for each $i$, $m_{ij}=0$ except for a finite set of $j$ values, then $Mv=\lambda v$ has a nonzero nonnegative solution if and only if $\lambda\ge \lambda_M$.
\text{e}nd{Corollary}
\subsubsection{Useful matrix operations in the Vere-Jones classes}
In order to be able to modify nonnegative matrices in question we will need the following observation. In some cases it will enable us to produce transition matrices of maps from ${\mathbb C}PM$. Let $E$ be the identity matrix, see
\text{e}qref{e:11}.
\begin{Proposition}\label{p:19}Let $M=(m_{ij})_{i,j\in{\mathcal P}}$ be irreducible. For arbitrary pair of positive integer $k$ and nonnegative integer $\text{e}ll$ consider the matrix $N=kM+\text{e}ll E$. Then
\begin{itemize}
\item[(i)] $\lambda_N=k\lambda_M+\text{e}ll$,
\item[(ii)] if for each $i$, $m_{ij}=0$ except for a finite set of $j$ values, the matrix $N$ belongs to the same class of the Vere-Jones classification as the matrix $M$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof}Both the conclusions clearly hold if $N$ is a multiple of $M$, {\em i.e.,\ } when $\text{e}ll=0$. So to show our statement it is sufficient to verify the case when $N=M+E$.
(i) Since $Mv=\lambda v$ if and only if $Nv=(\lambda+1)v$, property (i) follows from Corollary~\ref{c:1}.
(ii) By our assumption, for each $i$, $n_{ij}=0$ except for a finite set of $j$ values, so Theorem~\ref{t:12} and Corollary~\ref{c:1} can be applied. Notice that for any nonnegative $v$, \begin{equation}\label{e:47}
Mv\le\lambda v\text{ if and only if }Nv\le(\lambda+1)v,\text{e}nd{equation}
so by Theorem~\ref{t:2}, the matrix $M$ is transient, resp.\ recurrent if and only if $N=M+E$ is transient, resp.\ recurrent. In order to distinguish different recurrent cases we will use Table 1. Since by (i) $\lambda_N=\lambda_M+1$, we can write
\begin{equation}\label{a:1}
\frac{n_{11}(n)}{\lambda_N^n} =
\frac{\sum_{k=0}^{n}\binom{n}{k}m_{11}(k)}
{\sum_{k=0}^{n}\binom{n}{k}\lambda_M^k}
= \underbrace{\frac{\sum_{k=0}^{n_1-1}\binom{n}{k}\frac{m_{11}(k)}{\lambda_M^k}\lambda_M^k}
{\sum_{k=0}^{n}\binom{n}{k}\lambda_M^k}}_{U(n,n_1)}
+
\underbrace{\frac{\sum_{k=n_1}^{n}\binom{n}{k}\frac{m_{11}(k)}{\lambda_M^k}\lambda_M^k}
{\sum_{k=0}^{n}\binom{n}{k}\lambda_M^k}}_{V(n,n_1)}.
\text{e}nd{equation}
By \text{e}qref{e:13} $\frac{m_{11}(k)}{\lambda_M^k}\le 1$ for each $k$ and we can put $\text{\large\bf M}u=\lim_{k\to\infty}\frac{m_{11}(k)}{\lambda_M^k}$. For each $\varepsilon>0$ there exists $n_1 \in {\text{\large\bf M}athbb N}$ such that $\frac{m_{11}(k)}{\lambda_M^k}\in (\text{\large\bf M}u-\varepsilon,\text{\large\bf M}u+\varepsilon)$ whenever $k>n_1$. Then using the fact that
$$
\lim_{n\to\infty}U(n,n_1)=0,
\qquad \lim_{n\to\infty}\frac{\sum_{k=n_1}^{n}\binom{n}{k}\lambda_M^k}{\sum_{k=0}^{n}\binom{n}{k}\lambda_M^k}=1,
$$ we can write for any $\delta>0$ and sufficiently large $n=n(\delta)$:
\begin{equation*}\text{\large\bf M}u-\varepsilon\le \frac{n_{11}(n)}{\lambda_N^n}\le \delta+\text{\large\bf M}u+\varepsilon,\text{e}nd{equation*}
hence $\lim_n\frac{n_{11}(n)}{\lambda_N^n}=\lim_n\frac{m_{11}(n)}{\lambda_M^n}=\text{\large\bf M}u$. By Table 1, $M$ is null, resp.\ positive recurrent if and only if $N$ is null, resp.\ positive recurrent.
Finally, let $M=(m_{ij})_{i,j\in{\mathcal P}}$ be positive recurrent and assume its irreducible submatrix $K=(k_{ij})_{i,j\in{\mathcal P}'}$ for some ${\mathcal P}'\subset {\mathcal P}$, denote $L=K+E$. Then similarly as above we obtain that $\lambda_N=\lambda_M+1$, resp.\ $\lambda_L=\lambda_K+1$. If $M$ is weakly, resp.\ strongly recurrent, then for some $K$, resp.\ for each $K$ we obtain $\lambda_N=\lambda_L$, resp.\ $\lambda_N>\lambda_L$ and Theorem~\ref{t:1} can be applied.
This finishes the proof for $N=M+E$. Now, the case when $N=M+\text{e}ll E$, $\text{e}ll>1$, can be verified inductively.
\text{e}nd{proof}
\section{Entropy and the Vere-Jones classification in ${\mathbb C}PM$}\label{s:5}
The following statement identifies the topological entropy of a map and the Perron value $\lambda_M$ of its transition matrix.
\begin{Proposition}\label{p:7} Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$. Then $\lambda_M= e^{h_{top}(T)}$.
and if there is a summable $\lambda$-solution of equation \text{e}qref{e:2} then $\lambda_M\le \lambda$.
\text{e}nd{Proposition}
\begin{proof}
For the first equality, we start by proving $\lambda_M\le e^{h_{top}(T)}$. We use Proposition~\ref{p:2}(i) and Proposition~\ref{p:11}(ii). By those
statements, $\lambda_M=\lim_n [m_{jj}(n)]^{\frac{1}{n}}$ for any $j\in{\mathcal P}$ and and for each sufficiently large $n$, the interval $j$
contains $m_{jj}(n)$ intervals $j_1,\dots,j_{m_{jj}(n)}$ with
pairwise disjoint interiors such that $T^n(j_i)\supset j_{\text{e}ll}$ for all $1\le i,\text{e}ll\le
m_{jj}(n)$. Clearly, the map $T^n$ has a $m_{jj}(n)$-horseshoe \cite{Mi79} hence $h_{top}(T^n)=nh_{top}(T)\ge \log m_{jj}(n)$ and $e^{h_{top}(T)}\ge
[m_{jj}(n)]^{\frac{1}{n}}$. Since $n$ can be arbitrarily large, the inequality
$\lambda_M\le e^{h_{top}(T)}$ follows.
Now we look at the reverse inequality
$\lambda_M\ge e^{h_{top}(T)}$. A pair $(S,T\vert_{S})$ is a subsystem of $T$ if $S\subset [0,1]$ is closed and $T(S)\subset S$. It has been showed in \cite[Theorem 3.1]{Bo03} that the entropy of $T$ can be expressed as the supremum of entropies of {\text{e}m minimal} subsystems. Let us fix a minimal subsystem $(S(\varepsilon),T\vert_{S(\varepsilon)})$ of $T$ for which $h_{top}(T\vert_{S(\varepsilon)})>h_{top}(T)-\varepsilon>0$.
\noindent {\bf Claim.}
{\it There are finitely many elements $i_1,\dots,i_m\in{\mathcal P}$ such that $S(\varepsilon)\subset\bigcup_{j=1}^mi_j^{\circ}$.}
\noindent {\it Proof of Claim.}~Let us denote $P=[0,1]\setminus\bigcup_{i\in{\mathcal P}}i^{\circ}$. Then $P$ is closed, at most countable and $T(P)\subset P$. Assume that $x\in P\cap S(\varepsilon)\neq\text{e}mptyset$. Then $\text{orb}_T(x)\subset P$ which is impossible for $(S(\varepsilon),T\vert_{S(\varepsilon)})$ minimal of positive topological entropy. If $S(\varepsilon)$ intersected infinitely many elements of ${\mathcal P}$ then, since $S(\varepsilon)$ is closed, it would intersect also $P$, a contradiction. Thus, there are finitely many $i_1,\dots,i_m \in {\mathcal P}$ of the required property.\hskip110mm $\blacksquare$
\\[3mm]
Our claim together with Proposition~\ref{p:12} say that connect-the-dots map of $(S(\varepsilon),T\vert_{S(\varepsilon)})$ is piecewise monotone and the finite submatrix $M'$ of $M$ corresponding to the elements $i_1,\dots,i_m$ satisfies $r(M')\ge e^{h_{top}(T\vert_{S(\varepsilon)})}$. Now the conclusion follows from Proposition~\ref{p:4}.
The second statement follows from Theorem~\ref{t:5}, Proposition~\ref{p:5} and the fact that topological entropy is a conjugacy invariant: $\lambda_M=e^{h_{top}(T)}=e^{h_{top}(S)}\le \lambda$.
\text{e}nd{proof}
We would like to transfer the Vere-Jones classification to ${\mathbb C}PM$. That is why it is necessary to be sure that a change of Markov partition for the map in question does not change the Vere-Jones type of its transition matrix. This is guaranteed by the following proposition.
Given $T \in {\mathbb C}PM$, consider the family $({\mathcal P}_{\alphapha})_{\alphapha}$ of {\it all Markov partitions} for $T$. Write $Q_{\alphapha}=[0,1]\setminus\bigcup_{i\in {\mathcal P}_{\alphapha}}i^{\circ}$. The minimal Markov partition ${\mathcal R}$ for $T$ consists of the closures of connected components of $[0,1]\setminus\bigcap_{\alphapha}Q_{\alphapha}$.
\begin{Proposition}\label{p:10}Let $T\in{\mathbb C}PM$ with two Markov partitions ${\mathcal P}$, resp.\ ${\mathcal Q}$ and corresponding matrices $M^{{\mathcal P}}=(m^{{\mathcal P}}_{ij})_{i,j\in{\mathcal P}}$, resp.\ $M^{{\mathcal Q}}=(m^{{\mathcal Q}}_{ij})_{i,j\in{\mathcal Q}}$. The matrices $M^{{\mathcal P}}$ and $M^{{\mathcal Q}}$ belong to the same class of the Vere-Jones classification.\text{e}nd{Proposition}
\begin{proof}Since the map $T$ is topologically mixing, each of the matrices $M^{{\mathcal P}}$, $M^{{\mathcal Q}}$ is irreducible and aperiodic. Moreover, by Proposition~\ref{p:7} the value guaranteed in Proposition~\ref{p:2}(i) equals $e^{h_{top}(T)}$ and so is the same for both the matrices $M^{{\mathcal P}}$, $M^{{\mathcal Q}}$ - denote it $\lambda$.
Let $P=[0,1]\setminus\bigcup_{j\in{\mathcal P}}j^{\circ}$ and $Q=[0,1]\setminus\bigcup_{j\in{\mathcal Q}}j^{\circ}$.
First, let us assume that $P\subset Q$. Fix two elements $j\in{\mathcal P}$, resp.\ $j'\in{\mathcal Q}$ such that $j'\subset j$. Let us consider a path of the length $n$
\begin{equation}\label{e:39}j=j_0\rightarrow^{{\mathcal P}}j_1
\rightarrow^{{\mathcal P}}j_2\mbox{$\mathbb{D_{\infty}}$}ots\rightarrow^{{\mathcal P}}j_n=j\text{e}nd{equation}
with respect to ${\mathcal P}$; by Proposition~\ref{p:12} each interval $j_i$ contains $k_i=k^{{\mathcal P}}(j_i,j_{i+1})$ intervals of monotonicity of $T$ - denote them $\iota_i(1),\dots,\iota_i(k_i)$ - such that $T(x)\notin j_{i+1}$ whenever $x\in j_i\setminus\bigcup_{\text{e}ll=1}^{k_i}\iota_i$. This implies that
\begin{equation}\label{e:38}\prod_{i=0}^{n-1}k_i
\text{e}nd{equation}
is the number of paths with respect to ${\mathcal P}$ through the same vertices in order given by \text{e}qref{e:39} and, at the same time, it is an upper bound of a number of paths
$$
j'=j'_0\rightarrow^{{\mathcal Q}}j'_1\rightarrow^{{\mathcal Q}}j'_2\mbox{$\mathbb{D_{\infty}}$}ots\rightarrow^{{\mathcal Q}}j'_n=j'
$$
with respect to finer ${\mathcal Q}$ such that $j'_i\subset j_i$ for each $i$. Considering all possible paths in \text{e}qref{e:39} and summing their numbers given by \text{e}qref{e:38}, we obtain
\begin{equation}\label{e:19}
m^{{\mathcal Q}}_{j'j'}(n)\le m^{{\mathcal P}}_{jj}(n)
\text{e}nd{equation}
for each $n$. On the other hand, since $T$ is topologically mixing and Markov, there is a positive integer $\text{e}ll=\text{e}ll(j,j')$ such that $T^{\text{e}ll}(j')\supset j$. It implies for each $n$,
\begin{equation}\label{e:20}m^{{\mathcal P}}_{jj}(n)\le m^{{\mathcal Q}}_{j'j'}(n+k).\text{e}nd{equation}
Using \text{e}qref{e:19} and \text{e}qref{e:20}, we can write,
$$\sum_{n\ge 0}m^{{\mathcal Q}}_{j'j'}(n)\lambda^{-n}\le \sum_{n\ge 0}m^{{\mathcal P}}_{jj}(n)\lambda^{-n}\le \lambda^k\sum_{n\ge 0}m^{{\mathcal Q}}_{j'j'}(n+k)\lambda^{-n-k}.$$
Hence by the third row of Table 1, $M^{{\mathcal P}}$ is recurrent if and only if $M^{{\mathcal Q}}$ is recurrent. Again from \text{e}qref{e:19} and \text{e}qref{e:20} we can see that
$\lim_nm^{{\mathcal P}}_{jj}(n)\lambda^{-n}$ is positive if and only if $\lim_nm^{{\mathcal Q}}_{j'j'}(n)\lambda^{-n}$ is positive and the fourth row of Table 1 for $R=\lambda^{-1}$ can be applied.
In order to distinguish weak, resp. strong recurrence, for a ${\mathcal P}'\subset{\mathcal P}$ let ${\mathcal Q}'\subset Q$ be such that
\begin{equation}\label{e:14}{\mathcal Q}'=\{j'\in {\mathcal Q}\colon~j'\subset j\text{ for some }j\in{\mathcal P}'\}.\text{e}nd{equation}
Using \text{e}qref{e:19} and \text{e}qref{e:20} again we can see that the Perron values of the irreducible aperiodic matrices $M^{{\mathcal P}'}$ and $M^{{\mathcal Q}'}$ coincide hence the Gurevich entropies $h(M^{{\mathcal P}})$, $h(M^{{\mathcal P}'})$ are equal if and only if it the case for $h(M^{{\mathcal Q}})$, $h(M^{{\mathcal Q}'})$; now Theorem \ref{t:1}(ii),(iii) applies.
Second, if $P\nsubseteq Q$ and $Q\nsubseteq P$, let us consider the partition ${\mathcal R}$, where any element of ${\mathcal R}$ equals the closure of a connected component of the set $I\setminus (P\cap Q)$. The reader can easily verify that ${\mathcal R}$ is a Markov partition for $T$. By the previous, the pairs of matrices $M^{{\mathcal P}}$, $M^{{\mathcal R}}$, resp. $M^{{\mathcal R}}$, $M^{{\mathcal Q}}$ belong to the same class of the Vere-Jones classification. So it is true also for the pair $M^{{\mathcal P}}$, $M^{{\mathcal Q}}$.
\text{e}nd{proof}
\begin{Remark}Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$. Applying Proposition~\ref{p:10} in what follows we will call $T$ transient, null recurrent, weakly recurrent or strongly recurrent respectively if it is the case for its transition matrix $M$. The last three, resp.\ two possibilities will occasionally be summarized by '$T$ is recurrent', resp. '$T$ is positive recurrent'.
It is well known that if $T$ is piecewise monotone then it is strongly recurrent \cite[Theorem 0.16]{Wal82}.
\text{e}nd{Remark}
\section{Linearizability}\label{s:6}
In this section we investigate in more details the set of maps from ${\mathbb C}PM$ that are conjugate to maps of constant slope (linearizable, in particular). Relying on Theorem~\ref{t:5}, Theorem~\ref{t:2} and Proposition~\ref{p:10} our main tools will be local and global perturbations of maps from ${\mathbb C}PM$ resulting to maps from ${\mathbb C}PM$. Some examples illustrating the results achieved in this section will be presented in Section~\ref{s:7}.
We start with an easy but rather useful observation. Its second part will play the key role in our evaluation using centralized perturbation - formula \text{e}qref{a:500} and its applications.
\begin{Proposition}\label{p:3}~Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$. \begin{itemize}\item[(i)] If $T$ is leo then any $\lambda$-solution of \text{e}qref{e:2} is summable.
\item[(ii)]Any $\lambda$-solution of \text{e}qref{e:2} satisfies
\begin{equation*}\forall~\varepsilon\in (0,1/2)\colon~\sum_{j\in {\mathcal P},j\subset (\varepsilon,1-\varepsilon)}v_j<\infty.\text{e}nd{equation*}
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof}(i)~Since $T$ is leo, for a fixed element $i$ of ${\mathcal P}$,
there is an $n\in\mbox{$\mathbb{N}$}$ such that $T^n(i)=[0,1]$. Then by Proposition~\ref{p:11}(ii), $m_{ij}(n)\ge 1$ for each $j\in {\mathcal P}$.
This implies that any $\lambda$-solution $v=(v_j)_{j\in {\mathcal P}}$ of \text{e}qref{e:2} satisfies
$$
\lambda^nv_i = \sum_{j\in {\mathcal P}}m_{ij}(n) v_j \ge \sum_{j\in {\mathcal P}} v_j,
$$
so $v\in\text{e}ll^1({\mathcal P})$.
\noindent (ii)~We assume that $T$ is topologically mixing - see Definition~\ref{def:CPMM}. For any fixed element $i \in {\mathcal P}$ there is an $n\in\mbox{$\mathbb{N}$}$ such that $T^n(i)\supset (\varepsilon,1-\varepsilon)$: since $T$ is topologically mixing, there exist positive integers $n_1$ and $n_2$ such that $T^{m_1}(i)\cap [0,\varepsilon/2)\neq\text{e}mptyset$ for every $m_1\ge n_1$, resp. $T^{m_2}(i)\cap (1-\varepsilon/2,1]\neq\text{e}mptyset$ for every $m_2\ge n_2$. This implies that the interval $T^n(i)$ contains $(\varepsilon,1-\varepsilon)$ whenever $n\ge\text{\large\bf M}ax\{n_1,n_2\}$ - fix one such $n$. Then $m_{ij}(n)\ge 1$ for any element $j$ of ${\mathcal P}$ such that $j\subset (\varepsilon,1-\varepsilon)$; hence
$$\lambda^nv_i = \sum_{j\in {\mathcal P}}m_{ij}(n) v_j \ge
\sum_{j\in {\mathcal P},j\subset (\varepsilon,1-\varepsilon)}v_j.$$
for any $\lambda$-solution $v=(v_j)_{j\in {\mathcal P}}$ of \text{e}qref{e:2}.
\text{e}nd{proof}
The fundamental conclusion regarding linearizability of a map from ${\mathbb C}PM$ provided by the Vere-Jones theory follows.
\begin{Theorem}\label{t:7} If $T\in{\mathbb C}PM$ is leo and recurrent, then $T$ is linearizable.\text{e}nd{Theorem}
\begin{proof} By assumption there exists a Markov partition ${\mathcal P}$ for $T$ such that the transition matrix $M=M(T)=(m_{ij})_{i,j\in{\mathcal P}}$ is recurrent. In such a case equation \text{e}qref{e:2} has a $\lambda_M$-solution described in Theorem~\ref{t:2}. Since $T$ is leo the $\lambda_M$-solution is summable by Proposition~\ref{p:3}(i) and the conclusion follows from Theorem~\ref{t:5}.\text{e}nd{proof}
\begin{Remark}\label{r:7} In Section~\ref{s:7} we present various examples illustrating Theorem~\ref{t:7}. In particular, we show a strongly recurrent non-leo map of an operator type that is not conjugate to any map of constant slope.\text{e}nd{Remark}
\subsection{Window perturbation}In this subsection we introduce and study two types of perturbations of a map $T$ from ${\mathbb C}PM$: local and global window perturbation.
\subsubsection{Local window perturbation}
\begin{Definition}\label{d:2}
For $S\in{\mathbb C}PM$ with Markov partition ${\mathcal P}$, let $j\in{\mathcal P}$ such that $S_{\vert j}$ is monotone. We say that $T\in{\mathbb C}PM$ is a {\it window perturbation of $S$ on $j$ (of order $k$, $k\in\mbox{$\mathbb{N}$}$)}, if
\begin{itemize}
\item $T$ equals $S$ on $[0,1]\setminus j^{\circ}$
\item there is a nontrivial partition $(j_i)_{i=1}^{2k+1}$ of $j$ such that $T(j_i)=S(j)$ and $T\vert_{j_i}$ is monotone for each $i$.\text{e}nd{itemize}
\text{e}nd{Definition}
Notice that due to Definition~\ref{d:2} a window perturbation does not change partition ${\mathcal P}$ (but renders it slack). Using a sufficiently fine Markov partition for $S$, its window perturbation $T$ can be arbitrarily close to $S$ with respect to the supremum norm.
In the above definition an element of monotonicity of a partition is used. So, for example we can take ${\mathcal P}$ classical ({\em i.e.,\ } non-slack), or to a given partition ${\mathcal P}'$ and a given maximal interval of monotonicity $i$ of a map we can consider a partition ${\mathcal P}''$ finer than ${\mathcal P}'$ such that $i\in{\mathcal P}''$.
\begin{Proposition}\label{p:14}Let $T\in{\mathbb C}PM$ be a window perturbation of a map $S\in{\mathbb C}PM$. The following is true.
\begin{itemize}
\item[(i)] If $S$ is recurrent then $T$ is strongly recurrent and $R_T<R_S$.
\item[(ii)] If $S$ is transient then $T$ is strongly recurrent for each sufficiently large $k$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof}Fix a partition ${\mathcal P}$ for $S$, let $T$ be a window perturbation of $S$ on $j\in{\mathcal P}$. Applying Proposition~\ref{p:10} it is sufficient to specify the Vere-Jones class of $T$ with respect to ${\mathcal P}$. Consider generating functions $F^S(z)=F^S_{jj}(z)=\sum_{n\ge 1}f^S(n)z^n,\text{ resp. }F^{T}(z)=F^{T}_{jj}(z)=\sum_{n\ge 1}f^{T}(n)z^n$, corresponding to $S$, resp. $T$ and with radius of the convergence ${\mathcal P}hi_S={\mathcal P}hi^S_{jj}$, resp. ${\mathcal P}hi_T={\mathcal P}hi^T_{jj}$. Notice that
\begin{equation}\label{e:30}\forall~n\in\mbox{$\mathbb{N}$}\colon~f^T(n)=(2k+1)f^S(n),
\text{e}nd{equation}
hence ${\mathcal P}hi_S={\mathcal P}hi_T$.\vskip1mm
(i)~If $S$ is recurrent then by Table 1 and \text{e}qref{e:30},
$$\sum_{n\ge 1}f^S(n)R_S^n=1, \qquad \sum_{n\ge 1}f^T(n)R_S^n=2k+1.$$
Then, since $R_S\le {\mathcal P}hi_S={\mathcal P}hi_T$,
$$\sum_{n\ge 1}f^T(n)R_{T}^n\le 1<2k+1\le\sum_{n\ge 1}f^T(n)({\mathcal P}hi_T)^n,$$
hence $R_{T}<{\mathcal P}hi_T$ and $T$ is strongly recurrent.
(ii)~If $S$ is transient then by Table 1 and \text{e}qref{e:30},
$$s=\sum_{n\ge 1}f^S(n)R_S^n<1,~\sum_{n\ge 1}f^T(n)R_S^n=(2k+1)s.$$
If for a sufficiently large $k$, $(2k+1)s>1$, necessarily $R_{T}<R_S={\mathcal P}hi_S={\mathcal P}hi_T$ and $T$ is strongly recurrent by Table 1.
\text{e}nd{proof}
Let $M$ be a matrix indexed by the elements of some ${\mathcal P}$ and representing a bounded linear operator ${\mathcal M}$ on the Banach space $\text{e}ll^1=\text{e}ll^1({\mathcal P})$ - see Section~\ref{s:2}. It is well known \cite[p. 264]{aet.80}, \cite[Theorem 3.3]{aet.80} that for $\lambda>r_{{\mathcal M}}$ the formula
\begin{equation}\label{e:40}\left (\frac{1}{\lambda}M_{ij}\left (\frac{1}{\lambda}\right )=\sum_{n\ge 0}m_{ij}(n)/\lambda^{n+1}\right )_{i,j\in{\mathcal P}}
\text{e}nd{equation}
defines the resolvent operator $R_{\lambda}({\mathcal M})\colon~\text{e}ll^1({\mathcal P})\to\text{e}ll^1({\mathcal P})$ to the operator $${\mathcal M}_{\lambda}=\lambda I-{\mathcal M}.$$
We will repeatedly use this fact when proving our main results. The following theorem implies that in the space of maps from ${\mathbb C}PM$ of operator type an arbitrarily small (with respect to the supremum norm) local change of a map will result to a linearizable map.
\begin{Theorem}\label{t:6}Let $T\in{\mathbb C}PM$ be a window perturbation of order $k$ of a map $S\in{\mathbb C}PM$ of operator type. Then $T$ is linearizable for every sufficiently large $k$.
\text{e}nd{Theorem}
\begin{proof}We will use the same notation as in the proof of Proposition~\ref{p:14}.
Let us denote $M^{T(k)}=(m^{T(k)}_{ij})_{i,j\in{\mathcal P}}$ the transition matrix of a considered window perturbation $T(k)$ of $S$, let $\lambda_{T(k)}$ be the value ensured for $M^{T(k)}$ by Proposition~\ref{p:2}, put $R_{T(k)}=1/\lambda_{T(k)}$. Since $S$ is of operator type, it is also the case for each $T(k)$. Using Proposition~\ref{p:14} and Theorem~\ref{t:2} we obtain that for some $k_0$ the perturbation $T(k_0)$ is recurrent and equation \text{e}qref{e:2} is $\lambda_{T(k_0)}$-solvable:
\begin{equation*}\label{e:34}\forall~i\in {\mathcal P}\colon~\sum_{\text{e}ll\in {\mathcal P}}m^{T(k_0)}_{i\text{e}ll}~F^{T(k_0)}_{\text{e}ll j}(R_{T(k_0)})=\lambda_{T(k_0)}F^{T(k_0)}_{ij}(R_{T(k_0)}),\text{e}nd{equation*}
where $F^{T(k_0)}_{\text{e}ll j}(z)=\sum_{n\ge 1}f^{T(k_0)}_{\text{e}ll j}(n)z^n$, $\text{e}ll\in{\mathcal P}$.
Since by \text{e}qref{e:30} for each $k$,
$$(2k+1)\sum_{n\ge 1}f^S_{jj}(n)R_{T(k)}^n=\sum_{n\ge 1}f^{T(k)}_{jj}(n)R_{T(k)}^n=1,$$
we can deduce that $(R_{T(k)})_{k\ge 1}$ is decreasing and
\begin{equation}\label{e:33}
\lim_kR_{T(k)}=0.
\text{e}nd{equation}
By our definition of a window perturbation, for each $i\in{\mathcal P}\setminus\{j\}$,
\begin{equation}\label{e:32}
\forall~\text{ order }k~\forall~n\in\mbox{$\mathbb{N}$}\colon~f^{T(k)}_{ij}(n)=f^S_{ij}(n).
\text{e}nd{equation}
Denote $r_{k_0}$ the spectral radius of the operator ${\mathcal M}\colon~\text{e}ll^1\to\text{e}ll^1$ represented by the matrix $M=M^{T(k_0)}$. Using \text{e}qref{e:33} we can consider a $k>k_0$ for which $\lambda_{T(k)}>r_{k_0}$. Then, since the resolvent operator $(\lambda-{\mathcal M})^{-1}$ represented by the matrix \text{e}qref{e:40} is defined well for each real $\lambda>r_{k_0}$ as a bounded operator on $\text{e}ll^1$ \cite[p. 264]{aet.80}, we obtain from \text{e}qref{e:32}, Remark~\ref{r:1} and \text{e}qref{e:36}
\begin{equation*}\label{e:35}
\sum_{i\in{\mathcal P}}F^{T(k)}_{ij}(R_{T(k)})=1+\sum_{i\in{\mathcal P},~i\neq j}F^{T(k_0)}_{ij}(R_{T(k)})\le \sum_{i\in{\mathcal P}}M_{ij}(R_{T(k)})<\infty;
\text{e}nd{equation*}
now since $T(k)$ is recurrent, Theorem~\ref{t:2} and Theorem~\ref{t:5} can be applied.
\text{e}nd{proof}
Let $(T,{\mathcal P},M)\in{\mathbb C}PM^*$. For any pair $i,j\in{\mathcal P}$ we define the number
$$
n(i,j)=\text{\large\bf M}in\{n\in\mbox{$\mathbb{N}$}\colon~m_{ij}(n)\neq 0\}.
$$
In the corresponding strongly connected directed graph $G=G(M)$, $n(i,j)$ is the length of the shortest path from $i$ to $j$. In particular, such a path
contains neither $i$ nor $j$ inside, so at the same time
$$
\text{e}ll_{ij}(n(i,j))\neq 0,~ f_{ij}(n(i,j))\neq 0
$$
and $\text{e}ll_{ij}(n)=f_{ij}(n)=0$ for every $n<n(i,j)$. Since
$$
\frac{n(j',i)-n(j',j)}{n(i,j')+n(j',j)}\le \frac{n(j,i)}{n(i,j)}
$$
for every pair $j,j'\in{\mathcal P}$, the suprema
\begin{equation}\label{e:42}
S(j,{\mathcal P})\colon=\sup_{i\in{\mathcal P}}\frac{n(j,i)}{n(i,j)},~j\in{\mathcal P}
\text{e}nd{equation}
are either all finite or all infinite. Moreover, we have the following.
\begin{Proposition}\label{p:13}Let $T\in{\mathbb C}PM$ with two Markov partitions ${\mathcal P}$, resp. ${\mathcal Q}$. Then $S(k,{\mathcal P})$ is finite for some $k\in{\mathcal P}$ if and only if $S(k',{\mathcal Q})$ is finite for some $k'\in{\mathcal Q}$.
\text{e}nd{Proposition}
\begin{proof} Let $P=[0,1]\setminus\bigcup_{j\in{\mathcal P}}j^{\circ}$ and $Q=[0,1]\setminus\bigcup_{j\in{\mathcal Q}}j^{\circ}$. First, let us assume that $P\subset Q$. Fix two elements $j\in{\mathcal P}$, $j'\in{\mathcal Q}$ such that $j'\subset j$. Since the map $T$ is topologically mixing, there exists a positive integer $m$ for which $T^mj'\supset j$. For an $i\in{\mathcal P}$ and an $i'\in{\mathcal Q}$ satisfying $i'\subset i$ we obtain $n(i',j')\ge n(i,j)$ and $n(j',i')\le n(j,i)+m$; hence
\begin{equation}\label{e:53}
(\forall~i\in{\mathcal P})(\forall~i'\in{\mathcal Q},~i'\subset i)\colon~\frac{n(j',i')}{n(i',j')}\le \frac{n(j,i)+m}{n(i,j)}.
\text{e}nd{equation}
Inequality \text{e}qref{e:53} together with property \text{e}qref{e:42} show that if $S(k,{\mathcal P})$ is finite for some $k\in{\mathcal P}$ then $S(k',{\mathcal Q})$ is finite for some $k'\in{\mathcal Q}$.
On the other hand, there has to exist an $i''\in{\mathcal Q}$, $i''\subset i$ such that $T^{n(i,j)}i''\supset j'$, {\em i.e.,\ } $n(i,j)\ge n(i'',j')$. Since also $n(j,i)\le n(j',i'')$, we can write for $i''\in{\mathcal Q}$
\begin{equation}\label{e:54}
(\forall~i\in{\mathcal P})(\text{e}xists~i''\in{\mathcal Q},~i''\subset i)\colon~ \frac{n(j,i)+m}{n(i,j)}\le \frac{n(j',i'')+m}{n(i'',j')}.
\text{e}nd{equation}
inequality \text{e}qref{e:54} together with property \text{e}qref{e:42} show that if $S(k',{\mathcal Q})$ is finite for some $k'\in{\mathcal Q}$ then $S(k,{\mathcal P})$ is finite for some $k\in{\mathcal P}$.
If $P\nsubseteq Q$ and $Q\nsubseteq P$, we can consider the partition for $T$ $${\mathcal R}={\mathcal P}\vee{\mathcal Q}=\{i\cap i'\colon~i\in{\mathcal P},~i'\in{\mathcal Q}\}.$$ Clearly, $R=[0,1]\setminus\bigcup_{j\in{\mathcal R}}j^{\circ}=P\cup Q$ and we can use the above arguments for the pairs ${\mathcal R},{\mathcal P}$ and ${\mathcal R},{\mathcal Q}$ hence the conclusion for the pair ${\mathcal P},{\mathcal Q}$ follows.
\text{e}nd{proof}
So, in \text{e}qref{e:42}, for fixed $(T,{\mathcal P},M)\in{\mathbb C}PM^*$ and $j\in{\mathcal P}$, we compare the shortest path from $j$ to $i$ (numerator) to the shortest path from $i$ to $j$ (denominator) and take the supremum with respect to $i$. For example, for our map from Subsection~\ref{ss:2} the values \text{e}qref{e:42} are equal to $1$, when $T$ is leo, \text{e}qref{e:42} is finite for every $j\in{\mathcal P}$. Theorem~\ref{t:6} explains the role of a window perturbation in case of maps of operator type. In Theorem~\ref{t:9} we obtain an analogous statement for maps of non-operator type under the assumption that the quantities in \text{e}qref{e:42} are finite.
\begin{Theorem}\label{t:9}
Let $S\in{\mathbb C}PM$ with a Markov partition ${\mathcal Q}$ and such that the supremum
in \text{e}qref{e:42} is finite for some $j'\in{\mathcal Q}$. Let $T\in{\mathbb C}PM$ be a window perturbation of order $k$ of $S$. Then $T$ is linearizable for every sufficiently large $k$.
\text{e}nd{Theorem}
\begin{proof}Fix a partition ${\mathcal P}$ for $S$ and $j\in{\mathcal P}$. A perturbation of $S$ on $j$ of order $k\in\mbox{$\mathbb{N}$}$ will be denoted by $T(k)$. By our assumption, Proposition~\ref{p:13} and \text{e}qref{e:42}, the supremum $S(j,{\mathcal P})$ is finite. The numbers $n(j,i),n(i,j)$, $i\in{\mathcal P}$, do not depend on any window perturbation on an element of ${\mathcal P}$, because such a perturbation does not change ${\mathcal P}$; we define $V(n)=\{i\in{\mathcal P}\colon~n(i,j)=n\}$, $c(n)=\text{\large\bf M}ax\{n(j,i)\colon~i\in V(n)\}$, $V(n,p)=\{i\in V(n)\colon~n(j,i)=p\}$, $1\le p\le c(n)$. Obviously for every $n$,
\begin{equation}\label{e:43}\frac{c(n)}{n}\le \sup_{i\in{\mathcal P}}\frac{n(j,i)}{n(i,j)}=S(j,{\mathcal P})<\infty.\text{e}nd{equation}
To simplify our notation, using Proposition~\ref{p:14} we will assume that $S$ is strongly recurrent, so this is also true for $T(k)$. Similarly as in the proof of Proposition~\ref{p:14} we obtain for each $k$,
\begin{equation}\label{e:45}1/\lambda_{T(k)}=R_{T(k)}<R_S=1/\lambda_S< {\mathcal P}hi_S={\mathcal P}hi_{T(k)}=1/\lambda.\text{e}nd{equation}
Moreover, as in \text{e}qref{e:33}, the sequence $(R_{T(k)})_{k\ge 1}$ is decreasing and $\lim_kR_{T(k)}=0$, {\em i.e.,\ } $\lim_k\lambda_{T(k)}=\infty$.
Let us show that for each sufficiently large $k$ there is a summable $\lambda_{T(k)}$-solution $v=(v_i)_{i\in {\mathcal P}}$ of equation \text{e}qref{e:2}. Using \text{e}qref{e:32}, we can write for any $\varepsilon>0$, sufficiently large $n_0=n_0(\varepsilon)\in\mbox{$\mathbb{N}$}$ and some positive constants $K,K'$,
\begin{align}\label{a:6}
B&:=\sum_{n\ge n_0}\sum_{i\in{\mathcal P}\setminus\{j\}}f^{T(k)}_{ij}(n)R_{T(k)}^n=\sum_{n\ge n_0}\sum_{i\in{\mathcal P}\setminus\{j\}}f^S_{ij}(n)R_{T(k)}^n \\
\nonumber& \le \sum_{n\ge n_0}\sum_{m\ge n}\sum_{p=1}^{c(n)}\sum_{i\in V(n,p)\setminus\{j\}}\!\!\! \!\!\! \text{e}ll^S_{ji}(p)f^S_{ij}(m)R_{T(k)}^m\le \sum_{n\ge n_0}\sum_{m\ge n}\sum_{p=1}^{c(n)}f^S_{jj}(p+m)R_{T(k)}^m \\
\nonumber&\le\sum_{n\ge n_0}\sum_{m\ge n}\sum_{p=1}^{c(n)}(\lambda+\varepsilon)^{p+m}R_{T(k)}^m\le
K\mbox{$\mathbb{D_{\infty}}$}ot\sum_{n\ge n_0}(\lambda+\varepsilon)^{c(n)}\sum_{m\ge n}\left(\frac{\lambda+\varepsilon}{\lambda_{T(k)}}\right)^m \\
\label{a:7}&\le K'\mbox{$\mathbb{D_{\infty}}$}ot\sum_{n\ge n_0}\left [\frac{(\lambda+\varepsilon)^{1+\frac{c(n)}{n}}}{\lambda_{T(k)}}\right ]^n.
\text{e}nd{align}
Since by \text{e}qref{e:45} the value $\lambda$ does not depend on $k$ and $\lim_k\lambda_{T(k)}=\infty$, from \text{e}qref{e:43} follows that
\begin{align}
\label{a:3}&\frac{(\lambda+\varepsilon)^{1+\frac{c(n)}{n}}}{\lambda_{T(k)}}\le \frac{(\lambda+\varepsilon)^{1+S(j,{\mathcal P})}}{\lambda_{T(k)}}<9/10
\text{e}nd{align}
for any $k>k_1$.
Clearly the value
$$A=\sum_{n=1}^{n_0-1}\sum_{i\in{\mathcal P}}f^{T(k)}_{ij}(n)R_{T(k)}^n$$
given by a finite number of summands is finite, so taking \text{e}qref{a:6}, \text{e}qref{a:7} and \text{e}qref{a:3} together, using $\sum_{n\ge n_0}f^{T(k)}_{jj}(n)R_{T(k)}^n<F^{T(k)}_{jj}(R_{T(k)})=1$ we obtain
$$\sum_{i\in{\mathcal P}}F^{T(k)}_{ij}(R_{T(k)})=A+(B+1)\le A+1+ K'\mbox{$\mathbb{D_{\infty}}$}ot\sum_{n\ge n_0}(9/10)^n<\infty$$
whenever $k>k_1$. This finishes the proof.
\text{e}nd{proof}
\subsubsection{Global window perturbation}
Let $S$ be from ${\mathbb C}PM$. In this part we will consider a perturbation of $S$ with a Markov partition ${\mathcal P}$ consisting of infinitely many window perturbations on elements of ${\mathcal P}$ (and with independent orders) done due to Definition~\ref{d:2}.
\begin{Definition}\label{d:6}A perturbation $T$ of $S$ on ${\mathcal P}'\subset {\mathcal P}$ will be called {\it centralized} if there is an interval $[a,b]$, $a,b\in (0,1)\setminus\bigcup_{i\in{\mathcal P}}i^{\circ}$ such that $\bigcup{\mathcal P}'\subset [a,b]$.
\text{e}nd{Definition}
For technical reasons we consider also an {\it empty perturbation} ($T=S$) as centralized.
Let $T$ be a global (centralized) perturbation of $S$ on ${\mathcal P}'\subsetneq {\mathcal P}$, denote $Q={\mathcal P}\setminus{\mathcal P}'$.
We can write for $j\in{\mathcal P}'$
\begin{align}\label{a:500}
\sum_{i\in{\mathcal Q}}F^T_{ij}(R_T) &=\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}\sum_{k\in{\mathcal P}'\setminus\{j\}}g^{{\mathcal P}'}_{ik}(n)R_T^nF^T_{kj}(R_T)+
\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ij}(n)R^n_T \\
\nonumber&=\sum_{k\in{\mathcal P}'\setminus\{j\}}F^T_{kj}(R_T)\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ik}(n)R_T^n+
\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ij}(n)R^n_T,
\text{e}nd{align}
where the coefficients $g^{{\mathcal P}'}_{ij}(n)$ were defined before Remark~\ref{r:1}. We use formula \text{e}qref{a:500} to argue in our proofs.
In the next theorem the perturbation $T$ need not be of an operator type.
\begin{Theorem}\label{t:3}
Let $(S,{\mathcal P},M)\in{\mathbb C}PM^*$ be recurrent and linearizable. Assume that $T$ is a recurrent centralized perturbation of $S$ on ${\mathcal P}'$. If there are finitely many elements of ${\mathcal P}'$ that are $S$-covered by elements of ${\mathcal P}\setminus{\mathcal P}'$, then $T$ is linearizable.
\text{e}nd{Theorem}
\begin{proof}Let $k_1,\dots,k_m$ be all elements of ${\mathcal P}'$ that are $S$-covered by elements of ${\mathcal Q}={\mathcal P}\setminus{\mathcal P}'$. Then
\begin{align}\label{a:30}&\forall~k\in{\mathcal P}'\colon~\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ik}(n)R_T^n\le\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ik}(n)R_S^n\\
\nonumber &\le \text{\large\bf M}ax_{1\le\text{e}ll\le m}\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ik_{\text{e}ll}}(n)R_S^n\le K :=\text{\large\bf M}ax_{1\le\text{e}ll\le m}\sum_{i\in{\mathcal P}}F^S_{ik_{\text{e}ll}}(R_S)<\infty.
\text{e}nd{align}
Here, the last inequality follows from our assumption that the map $S$ is recurrent and linearizable together with Theorem~\ref{t:2} and Theorem~\ref{t:5}.
Using \text{e}qref{a:500}, \text{e}qref{a:30} and Proposition~\ref{p:3}(ii) we obtain
\begin{eqnarray*} \sum_{i\in{\mathcal P}}F^T_{ij}(R_T) &=&
\sum_{i\in{\mathcal P}'}F^T_{ij}(R_T)+\sum_{i\in{\mathcal Q}}F^T_{ij}(R_T) \\
\nonumber&\le& \sum_{i\in{\mathcal P}'}F^T_{ij}(R_T)+K\mbox{$\mathbb{D_{\infty}}$}ot\left(1+
\sum_{k\in{\mathcal P}'\setminus\{j\}}F^T_{kj}(R_T)\right )<\infty.
\text{e}nd{eqnarray*}
So by Theorem~\ref{t:2} and Theorem~\ref{t:5} the map $T$ is linearizable.
\text{e}nd{proof}
In the next theorem the perturbation $T$ need not be of operator type.
\begin{Theorem}\label{t:10}Let $S\in{\mathbb C}PM$ be of operator type. If the transition matrix $M=M(S)$ represents an operator ${\mathcal M}$ of the spectral radius $\lambda_S$ then any centralized recurrent perturbation $T$ of $S$ such that $h_{top}(T)>h_{top}(S)$ is linearizable. The entropy assumption is always satisfied when $S$ is recurrent.
\text{e}nd{Theorem}
\begin{proof}Let $T$ be a centralized perturbation of $S$ on ${\mathcal P}'\subset {\mathcal P}$, denote $Q={\mathcal P}\setminus{\mathcal P}'$. From Proposition~\ref{p:7} and our assumption on the topological entropy of $S$ and $T$ we obtain $1/\lambda_T=R_T<R_S=1/\lambda_S$.
We can write for $j\in{\mathcal P}'$
\begin{align}\label{a:5}
\sum_{i\in{\mathcal Q}}F^T_{ij}(R_T)&=\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}\sum_{k\in{\mathcal P}'\setminus\{j\}}g^{{\mathcal P}'}_{ik}(n)R_T^nF^T_{kj}(R_T)+
\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ij}(n)R^n_T \\
\nonumber&=\sum_{k\in{\mathcal P}'\setminus\{j\}}F^T_{kj}(R_T)\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ik}(n)R_T^n+
\sum_{i\in{\mathcal Q}}\sum_{n\ge 1}g^{{\mathcal P}'}_{ij}(n)R^n_T \\
\label{a:4}&\le \sum_{k\in{\mathcal P}'\setminus\{j\}}\left (\sum_{i\in{\mathcal Q}}F^S_{ik}(R_T)\right )F^T_{kj}(R_T)
+ \sum_{i\in{\mathcal Q}}F^S_{ij}(R_T)=V,
\text{e}nd{align}
where the last inequality follows from the fact that $g^{{\mathcal P}'}_{ik}(n)\le f_{ik}(n)$ for each $k\in{\mathcal P}'$ and $n\in\mbox{$\mathbb{N}$}$ - for the definition of $g^{{\mathcal P}'}_{ik}(n)$, see before Remark~\ref{r:1}. By our assumption, formula \text{e}qref{e:40} represents the resolvent operator $R_{\lambda}({\mathcal M})$ for every $\lambda>\lambda_S$. In particular, $R_{\lambda_T}({\mathcal M})$ is a bounded operator on $\text{e}ll^1({\mathcal P})$ \cite[p. 264]{aet.80}, hence with the help of Remark~\ref{r:1} we obtain
\begin{equation*}\label{e:46}
\forall~k\in{\mathcal P}\colon~\sum_{i\in{\mathcal Q}}F^S_{ik}(R_T)<\sum_{i\in{\mathcal P}}F^S_{ik}(R_T)<
\sum_{i\in{\mathcal P}}M^S_{ik}(R_T)\le\lambda_T\| R_{\lambda_T}({\mathcal M})\|
\text{e}nd{equation*}
and \text{e}qref{a:5}, \text{e}qref{a:4} can be rewritten as
\begin{align}
\label{a:8}\sum_{i\in{\mathcal P}}F^T_{ij}(R_T)&\le\sum_{i\in{\mathcal P}'}F^T_{ij}(R_T)+V \\
\label{a:9}&\le \sum_{i\in{\mathcal P}'}F^T_{ij}(R_T)+\lambda_T\| R_{\lambda_T}({\mathcal M})\|\left (1+\sum_{k\in{\mathcal P}'\setminus\{j\}}F^T_{kj}(R_T)\right )<\infty,
\text{e}nd{align}
because $\sum_{k\in{\mathcal P}'}F^T_{kj}(R_T)<\infty$ for topologically mixing $T$ by Proposition~\ref{p:3}(ii) and Theorem~\ref{t:2}. The conclusion follows from Theorem~\ref{t:2} and \text{e}qref{a:8}, \text{e}qref{a:9}. It was shown in Proposition~\ref{p:14}(i) that for a recurrent $S$ we always have $h_{top}(T)>h_{top}(S)$.
\text{e}nd{proof}
In order to apply Theorem~\ref{t:10} let us consider any map $R\in{\mathbb C}PM$ of operator type, fix $\varepsilon>0$. By Theorem~\ref{t:6} there is a strongly recurrent linearizable map $S$ of operator type for which $\| R-S\|<\varepsilon$. Similarly as in \text{e}qref{e:33} we can conclude that the transition matrix of $S$ satisfies the assumption of Theorem~\ref{t:10}. By that theorem, any centralized perturbation $T$ (operator/non-operator) of $S$ is linearizable (such a centralized perturbation $T$ can be taken to satisfy $\| R-T\|<\varepsilon $).
\section{Examples}\label{s:7}
\subsection{Non-leo maps in the Vere-Jones classes}
For some $a,b\in\mbox{$\mathbb{N}$}$ consider the matrix $M=M(a,b)=(m_{ij})_{i,j\in{\small\text{\large\bf M}athbb Z}}$ given as
\begin{equation}\label{e:31}
M(a,b) = \begin{pmatrix}
\ddots & \ddots &\ddots & & & & & & \\
\ddots & a & 0 & b & 0 & & & & \\
& 0 & a & 0 & b & 0 & & & \\
& & 0 & a & 0 & b & 0 & & \\
& & & 0 & a & 0 & b & 0 & \\
& & & & 0 & a & 0 & b & \ddots \\
& & & & & & \ddots & \ddots & \ddots
\text{e}nd{pmatrix}
\text{e}nd{equation}
Clearly $M$ is irreducible but not aperiodic. It has period $2$, so we consider only $m_{ii}(2n)$. Obviously,
$$
m_{ii}(2n)=\binom{2n}{n}a^nb^n.
$$
Using Stirling's formula, we can write
\begin{equation}\label{e:21}
m_{ii}(2n)\sigmam \frac{2^{2n}}{\pi^{1/2}n^{1/2}}a^nb^n.
\text{e}nd{equation}
So, $\lambda_M=2\sqrt{ab}=R^{-1}$. At the same time we can see from \text{e}qref{e:21} that
$$\lim_{n\to\infty}m_{ii}(n)R^n=0 \quad \text{ and } \quad \sum_{n\ge 0}m_{ii}(n)R^n=\infty,
$$
so by Table 1, $M(a,b)$ is null recurrent for each pair $a,b\in\mbox{$\mathbb{N}$}$.
In the following statement we describe a class of maps that are not conjugate to any map of constant slope. In particular they are not linearizable. A rich space of such maps (not only Markov) has been studied by different methods in \cite{MiRo14}.
\begin{Proposition}\label{p:20}Let $a,b,k,\text{e}ll\in\mbox{$\mathbb{N}$}$, $k$ even and $\text{e}ll$ odd, consider the matrix $M(a,b)$ defined in \text{e}qref{e:31}. Then $N=kM(a,b)+\text{e}ll E$ is a transition matrix of a non-leo map $T$ from ${\mathbb C}PM$. The map $T$ is null recurrent and it is not conjugate to any map of constant slope. The matrix $N$ represents an operator ${\mathcal N}$ on $\text{e}ll^1(\mbox{$\mathbb{Z}$})$ and
\begin{equation}\label{e:1}\lambda_N=2k\sqrt{ab}+\text{e}ll.\text{e}nd{equation}
\text{e}nd{Proposition}
\begin{proof}Notice that the entries of $N$ away from resp.\ on the diagonal are even, resp.\ odd. Draw a (countably piecewise affine, for example) graph of a map $T$ from ${\mathbb C}PM$ for which $N$ is its transition matrix. Since $M(a,b)$ is null recurrent, the matrix $N$ is also null recurrent by Proposition~\ref{p:19}. Solving the difference equation
\begin{equation}\label{e:44}a~x_{n-1}+b~x_{n+1}=\lambda~x_n, \qquad n\in\mbox{$\mathbb{Z}$},
\text{e}nd{equation}
one can verify that equation \text{e}qref{e:2} with $M=M(a,b)$ has a $\lambda$-solution if and only if $\lambda\ge \lambda_M=2\sqrt{ab}$ (this follows also from Corollary~\ref{c:1}) and none of these solutions is summable. So by Proposition~\ref{p:19} and Theorem~\ref{t:5}, the map $T$ is not conjugate to any map of constant slope.
\text{e}nd{proof}
For some $a,b,c\in\mbox{$\mathbb{N}$}$ let $M=M(a,b,c)=(m_{ij})_{i,j\in {\small{\text{\large\bf M}athbb N}\cup\{0\}}}$ be given by
\begin{equation}\label{e:4}
M(a,b,c) = \begin{pmatrix}
0 &\ c\ &\ 0\ &\ 0\ &\ 0\ & \dots \\
a & 0 & b & 0 & 0 & \dots \\
0 & a & 0 & b & 0 & \\
0 & 0 & a & 0 & b & \\
0 & 0 & 0 & a & 0 & \\
\vdots & & & \ddots & \ddots & \ddots \\
\text{e}nd{pmatrix},
\text{e}nd{equation}
Again, the matrix $M$ is irreducible but not aperiodic. It has period $2$,
so we consider only the coefficients $f_{00}(2n)$, see Subsection~\ref{ss:15}.
In order to find a $\lambda$-solution for $M$ we can use the difference equation \text{e}qref{e:44} for $n\ge 0$ with the additional conditions $x_0=1$ and $x_1=\lambda/c$. Using Corollary~\ref{c:1} and the direct computation one can show:
\begin{Proposition}\label{p:M}
\begin{itemize}
\item[(a)]
For any choice of $a,b,c\in\mbox{$\mathbb{N}$}$,
$$
f_{00}(2n)=c~b^{n-1}a^n\frac{1}{n}\binom{2n-2}{n-1}\sigmam \frac{c~b^{n-1}a^n4^{n-1}}{\pi^{1/2}n(n-1)^{1/2}},
$$
so that ${\mathcal P}hi_{ii}^{-1} = (2\sqrt{ab})^{-1}$.
\item[(b)] If $2b > c$ then
$\lambda_M = 2\sqrt{ab}$ and $M$ is transient.
There is a summable $\lambda_M$-solution for $M$ if and only if $a<b$.
\item[(c)] If $2b = c$ then
$\lambda_M = 2\sqrt{ab}$ and $M$ is null recurrent.
There is a summable $\lambda_M$-solution for $M$ if and only if $a<b$.
\item[(d)] If $2b < c$ then
$\lambda_M=c\sqrt{a/(c-b)}>2\sqrt{ab}$, and $M$ is strongly recurrent.
There is a summable $\lambda_M$-solution for $M$ if and only if $a+b<c$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\iffalse
\begin{itemize}
\item[(a)] $\lambda_M=2\sqrt{ab}$ if and only if $2b\ge c$, if $2b<c$ then $\lambda_M=c\sqrt{a/(c-b)}>2\sqrt{ab}$.
\item[(b)] For any choice of $a,b,c\in\mbox{$\mathbb{N}$}$, $$f_{00}(2n)=c~b^{n-1}a^n\frac{1}{n}\binom{2n-2}{n-1}\sigmam \frac{c~b^{n-1}a^n4^{n-1}}{\pi^{1/2}n(n-1)^{1/2}},$$
hence $\lim_{n\to\infty}[f_{00}(2n)]^{1/2n}=2\sqrt{ab}$. The last equality together with (a) show that $M$ is strongly recurrent if and only if $2b<c$, see Table 1.
\item[(c)] If $2b\ge c$ then there is a summable $\lambda_M$-solution for $M$ if and only if $a<b$.
\item[(d)] If $2b< c$ then there is a summable $\lambda_M$-solution for $M$ if and only if $a+b<c$.
\item[(e)] If $2b>c$ then $M$ is transient; if $2b=c$ then $M$ is
null-recurrent; $2b<c$ then $M$ is strongly recurrent.
\text{e}nd{itemize}
\fi
Using Propositions ~\ref{p:19} and~\ref{p:M} we can conclude.
\begin{Proposition}\label{p:16}Let $a,b,c\in\mbox{$\mathbb{N}$}$. The following hold:
\begin{itemize}
\item[(i)] The matrix $K=2M(a,b,c)+E$ is a transition matrix of a strongly recurrent non-leo map $T\in{\mathbb C}PM$ if and only if $2b<c$. The map $T$ is not linearizable for $a+b\ge c$.
\item[(ii)] The matrix $L=2M(a,b,b)+E$ is a transition matrix of a transient non-leo map $T\in{\mathbb C}PM$. The map $T$ is linearizable if $a<b$.
\text{e}nd{itemize}
\text{e}nd{Proposition}
\begin{proof} Clearly $K$ and $L$ are transition matrices of non-leo maps from ${\mathbb C}PM$. The property (i), resp. (ii) follows from the above properties (a),(b),(d), resp. (a),(b),(c),(e).
\text{e}nd{proof}
\subsection{Leo maps in the Vere-Jones classes}\label{ss:1}
We have shown in Section 5 that the subset of maps from ${\mathbb C}PM$ that {\it are linearizable} is sufficiently rich in the case of {\it non-leo maps} of operator/non-operator type. In order to refine the whole picture, in this paragraph we show how to detect interesting {\it leo} maps of operator/non-operator type. In the next two collections of examples we will use a simple countably infinite Markov partition for the full tent map and test various possibilities of its global window perturbations.
\subsubsection{Perturbations of the full tent map of operator type}\label{ss:11} For the full tent map $S(x)=1-\vert 1-2x\vert$, $x\in [0,1]$, consider the Markov partition
$${\mathcal P}=\{i_{n}=[1/2^{n+1},1/2^{n}]\colon~n=0,1,\dots\}.
$$ We will
study several global window perturbations of $S$ of the following general form: let $a=(a_n)_{n\ge 1}$ be a sequence of odd positive integers and consider a global window perturbation $T^a$ of $S$ such that
\begin{itemize}\item the window perturbation on $i_n$ is of order $(a_n-1)/2$ ({\em i.e.,\ } if $a_n=1$ we do not perturb $S$ on $i_n$).
\text{e}nd{itemize}
Then using the notation of Section~\ref{s:4} and Remark~\ref{r:1} we can consider generating functions $F(z)=F^a(z)=F^a_{00}(z)=\sum_{n\ge 1}f^a_{00}(n)z^n$ corresponding to the element $i_0$: $f^a_{00}(n)=f(n)$ for each $n$. One can easily verify that
\begin{equation}\label{e:16}f(1)=1,~f(n)=a_1\mbox{$\mathbb{D_{\infty}}$}ots a_{n-1}, n\ge 2.\text{e}nd{equation}
With the help of Proposition~\ref{p:7} we denote $\lambda=\lambda_a$, resp. ${\mathcal P}hi={\mathcal P}hi_a$ the topological entropy of $T=T^a$, resp. radius of convergence of $F^a(z)$; also we put $R=R_a=1/\lambda_a$.
\vskip1mm
\noindent {\it Strongly recurrent:} First of all, consider the set $A(\text{e}ll)=\{1,\dots,\text{e}ll\}$ and the choice
\begin{equation*}\label{c:6}a_n(0)=\begin{cases}
1,~n\in A(\text{e}ll),\\
3,~n\notin A(\text{e}ll).
\text{e}nd{cases}
\text{e}nd{equation*}
Then by \text{e}qref{e:16}, $f(n)=1$ for $n\in A(\text{e}ll)$ and $f(n)=3^{n-\text{e}ll-1}$ for each $n\ge \text{e}ll+1$, hence
\begin{equation}\label{e:29}
\lim_{n\to\infty}[f(n)]^{1/n}=3, \quad {\mathcal P}hi=1/3, \quad
\sum_{n\ge 1}f(n){\mathcal P}hi^n=\infty.
\text{e}nd{equation}
Therefore by Table 1, $R<{\mathcal P}hi$, {\em i.e.,\ } $h_{top}(T)=\log\lambda\in (\log3,\log4)$ - for the upper bound, see \cite{ALM00}. This implies that the map $T_{a(0)}$ is strongly recurrent hence by Theorems~\ref{t:2} and \ref{t:6} also linearizable for any $\text{e}ll$.
\vskip1mm
\noindent {\it Transient:} Denoting $B(1)=\{1,2,3,4\}\cup \bigcup_{k\ge 2}\{3^k+1,3^k+2\}$ let us define
\begin{equation}\label{c:2}a_n(1)=\begin{cases}
1,~n\in B(1),\\
3,~n\notin B(1).
\text{e}nd{cases}
\text{e}nd{equation}
From \text{e}qref{e:16} we obtain $\lim_{n\to\infty}[f(n)]^{1/n}=3$, {\em i.e.,\ } ${\mathcal P}hi=1/3$. Moreover, by direct computation we can verify that
\begin{equation}\label{e:28}
\sum_{n\ge 1}f(n){\mathcal P}hi^n<1\text{ hence also }\sum_{n\ge 1}f(n)R^n<1
\text{e}nd{equation}
since always $R\le {\mathcal P}hi$. It means that the map $T_{a(1)}$ define by the choice
\text{e}qref{c:2} is transient and by Table 1 from Section 4 in fact $R={\mathcal P}hi$, {\em i.e.,\ } Proposition~\ref{p:7} implies $h_{top}(T)=\log 3$. If we consider in \text{e}qref{c:2} any set $B'(1)\supset B(1)$ such that the inequalities \text{e}qref{e:28} are still satisfied, the same is true for a resulting perturbation $T'$.
\begin{Remark} Misiurewicz and Roth \cite{MiRo16}
observed that the map $T_{a(1)}$ is not conjugate to any map of constant slope. It can be shown that for each choice of a sequence $a=(a_n)_{n\ge 1}$ such that the corresponding $T$ has finite topological entropy the following dichotomy is true: either $T$ is recurrent and then equation \text{e}qref{e:2} has no $\lambda$-solution for $\lambda>e^{h(T)}$, or $T$ is transient and then equation \text{e}qref{e:2} does not have any $\lambda$-solution.\text{e}nd{Remark}
\vskip1mm
\noindent {\it Null recurrent:} The choice \text{e}qref{c:2} was proposed to satisfy $f(n)\sigmam 3^n/n^2$. Using this fact and \text{e}qref{e:28} we obtain ($R=1/3$)
$$
\sum_{n\ge 1}f(n)R^n<1 \quad \text{ and } \quad \sum_{n\ge 1}nf(n)R^n=\infty.
$$
Let us define inductively a new set $B(2)\subset B(1)$ as follows: put $n_0=0$ and $B(2,0)=B(1)$; assuming that for some $k\in\mbox{$\mathbb{N}$}\cup\{0\}$ we have already defined $n_k$ and $B(2,k)\subset B(1)$, to obtain $B(2,k+1)$ we omit from $B(2,k)$ the least number - denoted $n_{k+1}$- such that the choice
\begin{equation*}\label{c:3}a_{n,k+1}=\begin{cases}
1, &n\in B(2,k+1),\\
3, &n\notin B(2,k+1)
\text{e}nd{cases}
\text{e}nd{equation*}
still gives $\sum_{n\ge 1}f(n)R^n<1$ for corresponding window perturbation of $T_{B(2,k)}$. Clearly $n_k<n_{k+1}$ for each $k$. Let $B(2)=\bigcap_{k\ge 0}B(2,k)$ and consider the global perturbation of $S$ corresponding to $a(2)=(a_n(2))_{n\ge 1}$ given by formula \text{e}qref{c:2} with $B(1)$ replaced by $B(2)$. The set $B(2)$ contains infinitely many units (by \text{e}qref{e:29} any choice $A(\text{e}ll)$ gives $\sum_{n\ge 1}f(A(\text{e}ll);n)R^n=\infty$). Moreover, our definition of $B(2)$ implies $R=L=1/3$,
$$\sum_{n\ge 1}f(n)R^n=1\quad \text{ and }\quad \sum_{n\ge 1}nf(n)R^n=\infty.$$
So, the corresponding perturbation $T_{B(2)}$ of $S$ is null recurrent hence by Theorem~\ref{t:6} it {\it is linearizable}. By Proposition~\ref{p:7}, $h_{top}(T_{B(2)})=\log3$.
\subsubsection{One more collection of perturbations of the full tent map}\label{ss:12}
Expanding on the example of Ruette \cite[Example 2.9]{Rue03}
(see also \cite[page 1800]{Pr64}), we have the following construction.
Let $(a_n)_{n \ge 0}$ be a non-negative integer sequence with $a_0=0$,
let $\lambda > 1$ a slope determined below in \text{e}qref{eq:lambda},
and let $i_n = [\lambda^{-n}, \lambda^{-(n+1)}]$, $n \ge 0$, be
adjacent intervals converging to $0$.
Also let $j_n$, $n \ge 1$, be adjacent intervals of
length $\lambda^{-(n+1)}(1-\lambda^{-1})(1+2a_n)$ converging to $1$
and such that $\lambda^{-2}(2\lambda-1)$ is the left boundary point of $j_1$.
\begin{figure}[ht]
\unitlength=5mm
\begin{picture}(14,13)(-1,0) \let\ts\textstyle
\put(0,0){\line(1,0){12}}\put(0,0){\line(0,1){12}}
\put(0,12){\line(1,0){12}} \put(12,0){\line(0,1){12}}
\put(0,0){\line(1,1){12}}
\put(0,2){\line(1,0){12}} \put(-1.2,1.9){\tiny$\lambda^{-1}$}
\put(-0.5,7){\tiny$I_0$}
\put(0,1.2){\line(1,0){12}} \put(-1.2,1.1){\tiny$\lambda^{-2}$}
\put(-0.5,1.6){\tiny$I_1$}
\put(0,0.8){\line(1,0){12}} \put(-1.2,0.6){\tiny$\lambda^{-3}$}
\thicklines
\put(0,0){\line(1,6){2}}
\put(2,12){\line(1,-6){1.8}}
\put(3.8,1.2){\line(1,6){0.13}}
\put(3.98, 2){\line(1,-6){0.13}}
\put(4.16,1.2){\line(1,6){0.13}}
\put(4.34, 2){\line(1,-6){0.13}}
\put(4.52,1.2){\line(1,6){0.13}}
\put(4.70, 2){\line(1,-6){0.2}}
\put(4.90,0.8){\line(1,6){0.03}}
\put(5.2,0.85){$\mbox{$\mathbb{D_{\infty}}$}ots$} \put(7.2,0.4){$\mbox{$\mathbb{D_{\infty}}$}ots$}
\text{e}nd{picture}
\caption{The map $T\in{\mathbb C}PM$.}
\label{fig:map1}
\text{e}nd{figure}
We define the interval map $T:[0,1] \to [0,1]$ with slope $\pm \lambda$
by
\begin{eqnarray*}
T(x) = \begin{cases}
\lambda x & \text{ if } x \in [0, \lambda^{-1}], \\
2-\lambda x & \text{ if } x \in [\lambda^{-1}, \lambda^{-2}(2\lambda-1)], \\
\text{composed of } 1+2a_n & \text{ branches of slope } \text{\large\bf M}p \lambda \text{ alternatively } \\
\text{mapping into } i_n & \text{ if } x \in j_n,~ n \ge 1.
\text{e}nd{cases}
\text{e}nd{eqnarray*}
To make sure that $\lim_{x \to 1} f(x) = 0$, we need $\lambda \in (0, \infty)$
to satisfy
\begin{equation}\label{eq:lambda}
\lambda = 2 + \sum_{n \ge 1} 2a_n (1-\lambda^{-1}) \lambda^{-n}.
\text{e}nd{equation}
So, any sequence $(a_n)_{n\ge 0}$ such that \text{e}qref{eq:lambda} has a positive finite solution $\lambda$ leads to the {\it linearizable} map $T\in{\mathbb C}PM_{\lambda}$. One can easily see that ${\mathcal P}=\{i_n\}_{n\ge 0}$ is a Markov (slack) partition for $T$ as defined in Section~\ref{s:2}.
Applying Proposition~\ref{p:12} we associate to ${\mathcal P}$ the transition
matrix
$$
M = M(T) = (m_{ij})_{i,j\in {\mathcal P}} = \begin{pmatrix}
1 & 1+2a_1 & 1+2a_2 & 1+2a_3 & \mbox{$\mathbb{D_{\infty}}$}ots \\
1 & 0 & 0 & 0 & \dots \\
0 & 1 & 0 & & \\
\vdots & 0 & \ddots & \ddots &
\text{e}nd{pmatrix}
$$
and also the corresponding strongly connected directed graph $G=G(M)$:
\begin{figure}[ht]
\unitlength=7mm
\begin{picture}(12,4)(0,0) \let\ts\textstyle
\put(1,1){\circle*{0.3}} \put(3.5,1){\vector(-1,0){2}}
\put(0.8,0){\tiny$i_0$}
\put(4,1){\circle*{0.3}} \put(6.5,1){\vector(-1,0){2}}
\put(3.8,0){\tiny$i_1$}
\put(7,1){\circle*{0.3}} \put(9.5,1){\vector(-1,0){2}}
\put(6.8,0){\tiny$i_2$}
\put(10,1){\circle*{0.3}} \put(12.5,1){\vector(-1,0){2}}
\put(9.8,0){\tiny$i_3$}
\put(0.2,0.7){$\circlearrowright$} \put(0.3,1.5){\tiny $1$}
\put(1.5,1.7){\tiny $1+2a_1$}
\put(1.5,1.5){\vector(1,0){2}}
\put(2,2.7){\tiny $1+2a_2$}
\put(1,1.5){\line(0,1){1}}
\put(1,2.5){\line(1,0){3}}\put(4,2.5){\vector(3,-1){2.5}}
\put(5,3.7){\tiny $1+2a_3$}
\put(0.8,1.5){\line(0,1){2}}
\put(0.8,3.5){\line(1,0){6.2}}\put(7,3.5){\vector(3,-2){2.5}}
\text{e}nd{picture}
\caption{The Markov graph of $T\in{\mathbb C}PM_{\lambda}$;
$1+2a_n$ indicates the number of edges in $G$ from $i_0$ to $i_n$.}
\label{fig:graph1}
\text{e}nd{figure}
In particular, the number of loops of length $n$ from $i_0$
to itself is $f_{00}(n)=1+2a_{n-1}$.
We use the rome technique from \cite{BGMY} (see also \cite[Section 9.3]{BB})
to compute the entropy of this graph:
it is the leading root of the equation
\begin{equation}\label{eq:entropy1}
z = 1+\sum_{n \ge 1} (1+2a_n) z^{-n}.
\text{e}nd{equation}
If we divide this equation by $z$, then we get
$$
1= z^{-1} + \sum_{n \ge 1} (1+2a_n) z^{-(n+1)} = \sum_{n\ge 1} f_{00}(n)z^{-n};
$$
from Table 1 follows that the graph $G$ (the matrix $M$, the map $T$) is recurrent for any choice of a sequence $(a_n)_{n \ge 0}$ and corresponding finite $\lambda>0$. Proposition~\ref{p:7} and comparing equations \text{e}qref{eq:lambda} and \text{e}qref{eq:entropy1} we find that $e^{h_{top}(T)}=\lambda_M=\lambda$.
By Remark~\ref{r:3} the map $T$ is of operator type if and only if $\sup_na_n<\infty$. In this case, by Table 1 and Proposition~\ref{p:7}, ${\mathcal P}hi_{00}=1>1/2\ge R=1/\lambda_M$, so the corresponding map is always strongly recurrent. For the choice $a_n = a^n$ for some fixed integer $a \ge 2$ the map $T$ is of non-operator type. In this case, $\sum_{n\ge 1}f_{00}(n)a^{-n}=\infty$, so $e^{-h_{top}(T)}=1/\lambda_M=R<a^{-1}={\mathcal P}hi_{00}$, hence by Table 1 the map $T$ is still strongly recurrent.
We can also take $a_n = a^{n-\psi_n}$ for some sublinearly growing integer
sequence $(\psi_n)_{n \ge 1}$ chosen such that \text{e}qref{eq:entropy1}
holds for $z = a$, {\em i.e.,\ } $a = 1+ \sum_{n \ge 1} a^{-n} + 2a^{-\psi_n}$.
In this case, ${\mathcal P}hi_{00} = R$ and $\sum_n f_{00}^{(n)} R^n = 1$,
and the system is null-recurrent or weakly recurrent (not strongly
recurrent) depending on whether
$\sum_n n a^{-\psi_n}$ is infinite or finite.
\subsubsection{Transient non-operator example from \cite{BT12}}\label{ss:13}
Although up to now all our main results have been formulated and proved in the context of continuous maps, many statements remain true also for countably piecewise monotone Markov interval maps that are countably piecewise continuous.\footnote{This example can be made continuous by replacing each branch with a tent-map of the same height. The $\lambda$ will be twice as large, and the entropy increases accordingly in that case}
We will present a countably piecewise continuous, countably piecewise monotone example adapted from \cite{BT12}, where
it is studied in detail for its thermodynamic properties.
Let $(w_k)_{k \ge 0}$ be a strictly decreasing sequence in $[0,1]$ with
$w_0 = 1$ and $\lim_k w_k = 0$. We will consider the partition ${\mathcal P}=\{p_k\}_{k\in\mbox{$\mathbb{N}$}}$, where the interval map $T$ is designed to be linear increasing on each interval
$p_k = [w_k, w_{k-1})$ for $k \ge 2$, $p_1 = [w_1,w_0]$, $T(p_k) = \bigcup_{i\ge k-1}p_i$ for $k \ge 2$ and $T(p_1) = [0,1]$. With a slight modification of our definition from Section~\ref{s:2}, ${\mathcal P}$ is a Markov partition for $T$ and $T$ is leo. Let $M=M(T)$ be the matrix corresponding to ${\mathcal P}$, see below. In order to have constant slope $\lambda$, we need to solve
the recursive relation
$$
\begin{cases}
w_{k+1} = w_k - w_{k-1}/\lambda & \text{ for } k \ge 1; \\
w_0 = 1, \ w_1 = 1-1/\lambda &
\text{e}nd{cases}
$$
The characteristic equation $\alphapha^2 - \alphapha + 1/\lambda = 0$
has real solutions $\alphapha_\pm = \frac12(1\pm \sqrt{1-4/\lambda}) \in (0,1)$
whenever $\lambda \ge 4$. We obtain the solution:
$$
w^4_k = 2^{-k} (1+k/2)\quad \text{ if } \lambda = 4,
$$
and
$$
w^{\lambda}_k = \frac{1-2/\lambda}{2 \sqrt{1-4/\lambda}} \alphapha_+^k +
\frac{2\sqrt{1-4/\lambda}-1+2/\lambda}{2 \sqrt{1-4/\lambda}} \alphapha_-^k \quad
\text{ if } \lambda > 4.
$$
\begin{figure}[ht]
\unitlength=4mm
\begin{picture}(9,11)(-10,-0.5) \let\ts\textstyle
\put(-21,6){$M = \begin{pmatrix}
1 &\ 1\ &\ 1\ & 1 & 1 & \dots \\
1 & 1 & 1 & 1 & 1 & \dots \\
0 & 1 & 1 & 1 & 1 & \\
0 & 0 & 1 & 1 & 1 & \\
0 & 0 & 0 & 1 & 1 & \\
\vdots & & & \ddots & \ddots & \ddots \\
\text{e}nd{pmatrix}$
}
\put(-19,0){
$S^{\lambda}(x):= \lambda(x-w^{\lambda}_k)$ if $x \in p_k$
}
\thinlines
\put(0,0){\line(1,0){10}}\put(0,10){\line(1,0){10}}
\put(0,0){\line(0,1){10}} \put(10,0){\line(0,1){10}}
\put(0,0){\line(1,1){10}}
\thicklines
\put(7.5,0){\line(1,4){2.5}} \put(8.3,-0.9){$\tiny p_1$}
\put(5.5,0){\line(1,5){2}} \put(6.2,-0.9){$\tiny p_2$}
\put(4,0){\line(1,5){1.5}} \put(4.5,-0.9){$\tiny p_3$}
\put(2.9,0){\line(1,5){1.1}} \put(3.0,-0.9){$\tiny p_4$}
\put(2.1,0){\line(1,5){0.8}} \put(1.2,-0.9){$\ldots$}
\put(1.52,0){\line(1,5){0.58}}
\put(1.095,0){\line(1,5){0.425}}
\put(0.791,0){\line(1,5){0.304}}
\put(0.572,0){\line(1,5){0.219}}
\put(0.4138,0){\line(1,5){0.1582}}
\put(0.2974,0){\line(1,5){0.1164}}
\put(0.21464,0){\line(1,5){0.08276}}
\text{e}nd{picture}
\caption{Right: The map $T\colon~[0,1] \to [0,1]$. }\label{fig:Flambda}
\text{e}nd{figure}
It is known that $h_{top}(T) =\log4$ hence by Proposition~\ref{p:7}, $\lambda_M=4$.
If we remove site $p_1$ ({\em i.e.,\ } remove the first row and column) from $M$,
the resulting matrix is $M$ again, so strongly connected directed graph $G=G(M)$ contains its copy as a proper subgraph and due to Theorem~\ref{t:1}(i), $M$ and also $T$ is transient.
Writing $v^{\lambda}_k = |p_k| = w^{\lambda}_{k-1}-w^{\lambda}_k$, we have found, in accordance with Theorem~\ref{t:2}, a positive summable $\lambda$-solution of
equation \text{e}qref{e:2} for each $\lambda\ge 4$. Summarizing, the map $T$ is conjugate to a map of constant slope $\lambda$ whenever $\lambda\ge 4$. $T$ is also {\it linearizable}, since $\lambda=4=\lambda_M=e^{h_{top}(T)}$.
\subsubsection{Transient non-operator example from \cite{BoSou11}}\label{ss:14}
Let $V=\{v_{i}\}_{i\geq -1}$, $X=\{x_{i}\}_{i\geq 1}$
\noindent $V,X$ converge to $1/2$ and
\noindent $0=v_{-1}=x_0=v_0<x_1<v_1<x_2<v_2<x_3<v_3<\mbox{$\mathbb{D_{\infty}}$}ots$; the interval map
$T=T(V,X):[0,1]\rightarrow [0,1]$ satisfies
\begin{enumerate}
\item[(a)] $T(v_{2i-1})=1-v_{2i-1}$, $i\geq 1$, $T(v_{2i})=v_{2i}$, $i\geq 0$,
\item[(b)] $T(x_{2i-1})=1-v_{2i-3}$, $i\geq 1$, $T(x_{2i})=v_{2i-2},~i\geq 1$,
\item[(c)] $T_{u,v}=\left\vert\frac{T(u)-T(v)}{u-v}\right\vert>1$ for each interval $[u,v]\subset [x_i,x_{i+1}]$,
\item[(d)] $T(1/2)=1/2$ and $T(t)=T(1-t)$ for each $t\in [1/2,1]$.
\text{e}nd{enumerate}
Property (c) holds for our $V,X$ since by
(a),(b), we have $T_{x_i,x_{i+1}}>2$ for each $i\ge 0$.
\begin{figure}[ht]
\unitlength=10mm
\begin{center}
\text{e}psfig{file=ms3uprava.eps,width=5.5cm}
\text{e}nd{center}
\caption{The leo map $T\in\text{\large\bf M}athcal{F}\subset {\mathbb C}PM$.}
\text{e}nd{figure}
Let us denote $\text{\large\bf M}athcal{F}(V,X)$ the set of all continuous interval maps
fulfilling (a)-(d) for a fixed pair $V,X$ and put $\text{\large\bf M}athcal{F}:=\bigcup_{V,X}\text{\large\bf M}athcal{F}(V,X)$. It was shown in \cite{BoSou11} that
\begin{itemize}
\item $\text{\large\bf M}athcal{F}$ is a conjugacy class of maps in ${\mathbb C}PM$.
\item strongly connected directed graph $G=G(M)$ contains its copy as a proper subgraph \cite[Theorem 4.5, Fig. 3]{BoSou11}, so due to Theorem~\ref{t:1}(i), $T$ is transient.
\item the common topological entropy equals $\log9$.
\item equation \text{e}qref{e:2} has a positive summable $\lambda$-solution for each $\lambda\ge 9=e^{h_{top}(T)}$.
\text{e}nd{itemize}
\begin{figure}[ht]
\unitlength=10mm
\begin{center}
\text{e}psfig{file=ms7uprava.eps,width=10cm}
\text{e}nd{center}
\caption{$T\in\text{\large\bf M}athcal{F}$ is conjugate to a map of slope $9$ (a) and
and of slope $20$ (b).}
\text{e}nd{figure}
We can factor out the left-right symmetry of this map by using the
semiconjugacy $h(x) = 2|x-\frac12|$, and the factor map $\tilde T$
has transition matrix
$$
M = \begin{pmatrix}
4 &\ 4\ &\ 4\ &\ 4\ &\ 4\ & \dots \\
1 & 4 & 4 & 4 & 4 & \dots \\
0 & 1 & 4 & 4 & 4 & \\
0 & 0 & 1 & 4 & 4 & \\
0 & 0 & 0 & 1 & 4 & \\
\vdots & & & \ddots & \ddots & \ddots \\
\text{e}nd{pmatrix}
$$
with similar properties as the previous example. Therefore $T$ is conjugate to a map of constant slope $\lambda$ whenever $\lambda\ge 9$, and also linearizable, since $\lambda=9=\lambda_M=e^{h_{top}(T)}$.
\subsection{One application of our results}\label{ss:2}Using Proposition~\ref{p:20} let $K=2M(1,1)+E$. We have discussed the fact that $K$ is a transition matrix of a non-leo map $T\in{\mathbb C}PM$ with corresponding Markov partition denoted by ${\mathcal P}$. Clearly by Remark~\ref{r:3} $K$ represents a bounded linear operator - denote it by ${\mathcal K}$ - on $\text{e}ll^1({\mathcal P})$, so $T$ is of operator type. We can conclude that:
\begin{itemize}
\item[(i)] $\lambda_K=5=e^{h_{top}(T)}$ - Propositions~\ref{p:7}, \ref{p:20}.
\item[(ii)] $\lambda_K=r_{{\mathcal K}}=\|{\mathcal K}\|$ - Proposition~\ref{p:20}, Section~\ref{s:2}\text{e}qref{e:36}.
\item[(iii)] $T$ is not conjugate to a map of constant slope (is not linearizable) - Proposition~\ref{p:20}.
\item[(iv)] $T$ is null recurrent - Proposition~\ref{p:20}.
\item[(v)] Let ${\mathcal P}'$ be a Markov partition for $T$, denote $K'$ the transition matrix of $T$ with respect to ${\mathcal P}'$ representing a bounded linear operator $\text{\large\bf M}athcal K'$ on $\text{e}ll^1({\mathcal P}')$.
Since
\begin{equation*}\label{e:55}\forall~y\in (0,1)\colon~\#T^{-1}(y)=5,\text{e}nd{equation*}
we have $\lambda_{K'}=r_{\text{\large\bf M}athcal K'}=5$ - see (i), Section~\ref{s:2} and Proposition~\ref{p:7}. Then by Theorem~\ref{t:10} any recurrent centralized (operator/non-operator) perturbation of $T$ is linearizable. In particular it is true for any local window perturbation of $T$ on some element of ${\mathcal P}'$ - Proposition~\ref{p:14}(i).
\item[(vi)] Let ${\mathcal P}'$ be a Markov partition for $T$ which equals ${\mathcal P}$ outside of some interval $[a,b]\subset (0,1)$. Let $T'$ be a local window perturbation of $T$ on some element of ${\mathcal P}'$; from the previous paragraph (v) follows that $T'$ is strongly recurrent and linearizable. Consider a centralized (operator/non-operator) perturbation $T''$ of $T'$ on some ${\mathcal P}''\subset{\mathcal P}'$. Then if $T''$ is recurrent it is linearizable by Theorem~\ref{t:3}. Otherwise we can use either Theorem~\ref{t:6} (an operator case) or Theorem~\ref{t:9} (non-operator case, $S(j,{\mathcal P}')$ is finite for $j\in{\mathcal P}'$) to show that a local window perturbation of $T''$ of a sufficiently large order is linearizable.
\text{e}nd{itemize}
\setcounter{section}{-1}
\renewcommand\bibname{References}
\begin{thebibliography}{999}
\bibitem{ALM00} Alsed\'a, Ll.; Llibre, J.; Misiurewicz, M.\
\text{e}mph{Combinatorial dynamics and the entropy in dimension one},
{\text{e}m Adv. Ser. in Nonlinear Dynamics} \textbf{5}, 2nd Edition, World Scientific,
Singapore, 2000.
\bibitem{BGMY}
Block, L.; Guckenheimer, J.; Misiurewicz, M.; Young, L.-S.\
Periodic points and topological entropy of one-dimensional maps,
{\text{e}m Lecture Notes in Math.\ } {\bf 1980,} {\text{e}m 819} Springer Verlag, Berlin 18-34.
\bibitem{Bo12} Bobok, J.\
Semiconjugacy to a map of a constant slope,
{\text{e}m Studia Math.\ } {\bf 2012,} {\text{e}m 208,} 213--228.
\bibitem{Bo03} Bobok, J.\
Strictly ergodic patterns and entropy for interval maps,
{\text{e}m Acta Math.\ Univ.\ Comenianae} {\bf 2003,}
{\text{e}m LXXII,} 111--118.
\bibitem{BoSou11} Bobok, J.; Soukenka, M.\
On piecewise affine interval maps with countably many laps,
{\text{e}m Discrete and Continuous Dynamical Systems} {\bf 2011,} {\text{e}m 31.3,} 753--762.
\bibitem{BB} Brucks, K.; Bruin, H.\
Topics from one--dimensional dynamics,
London Mathematical Society, Student Texts {\text{e}m 62} Cambridge University Press 2004.
\bibitem{BT12} Bruin, H.; Todd, M.\
Transience and thermodynamic formalism for infinitely branched
interval maps,
{\text{e}m Journal of the London Math.\ Soc.\ } {\bf 2012,} {\text{e}m 86,} 171--194.
\bibitem{Gur69} Gurevi\v c, B.\ M.\
Topological entropy for denumerable Markov chains,
{\text{e}m Dokl.\ Akad.\ Nauk SSSR} {\bf 1969,} {\text{e}m 10,} 911--915.
\bibitem{Chu60} Chung, K.\ L.\
Markov chains with stationary transition probabilities,
Springer, Berlin, 1960.
\bibitem{KH95} Katok, A.; Hasselblatt, B.\
Introduction to the modern theory of dynamical systems,
Cambridge University Press, Cambridge (1995).
\bibitem{MiThu88} Milnor, J.; W.\ Thurston, W.\
On iterated maps of the interval,
In: {\text{e}m Dynamical Systems, Lecture Notes in Math.\ }
{\text{e}m 1342} Springer, Berlin, 1988; pp.\ 465-563,
\bibitem{Mi79} Misiurewicz, M.\
Horseshoes for mappings of an interval,
{\text{e}m Bull.\ Acad.\ Pol.\ Sci., S\'er.\ Sci.\ Math.} {\bf 1979,} {\text{e}m 27}, 167--169.
\bibitem{MiRa05} Misiurewicz, M.; Raith, P.\
Strict inequalities for the entropy of transitive piecewise monotone maps,
{\text{e}m Discrete Contin.\ Dyn.\ Syst.} {\bf 2005,} {\text{e}m 13}, 451--468.
\bibitem{MiRo14} Misiurewicz, M.; Roth, S.\
No semiconjugacy to a map of constant slope,
{\text{e}m Ergod.\ Th.\ \& Dynam.\ Sys.} {\bf 2016,} {\text{e}m 36,} 875--889.
\bibitem{MiRo16} Misiurewicz, M.; Roth, S.\
Constant slope maps on the extended real line,
Preprint 2016, arXiv:1603.04198.
\bibitem{MiSl80} Misiurewicz, M.; Szlenk, W.\
Entropy of piecewise monotone mappings,
{\text{e}m Studia Math.} {\bf 1988,} {\text{e}m 67(1),} 45-63.
\bibitem{Par66} W.\ Parry,
Symbolic dynamics and transformations of the unit interval,
{\text{e}m Trans.\ Amer.\ Math.\ Soc.} {\bf 1966,} {\text{e}m 122,} 368--378.
\bibitem{Pr64} Pruitt, W.\
Eigenvalues of non-negative matrices
{\text{e}m The Annals of Mathematical Statistics} {\bf 1964,} {\text{e}m 35(4),} 1797--1800.
\bibitem{Rue03} Ruette, S.\
On the Vere-Jones classification and existence of maximal measures for countable topological Markov chains,
{\text{e}m Pacific J.\ Math.\ } {\bf 2003,} {\text{e}m 209,} 366--380.
\bibitem{Sal88} Salama, I.\
Topological entropy and recurrence of countable chains,
{\text{e}m Pacific J.\ Math.\ }{|bf 1988,} {\text{e}m 134,} 325--341.
Errata {\bf 1989,} {\text{e}m 140(2)}.
\bibitem{aet.80} Taylor, A.\ E.\; Lay, D.\ C.\
Introduction to Fuctional Analysis,
Robert E. Krieger Publishing Company, 2nd Edition, Malabar, Florida, 1980.
\bibitem{Ver-Jo67} Vere-Jones, D.\
Ergodic properties of non-negative matrices-I,
{\text{e}m Pacific J.\ of Math.\ } {\bf 1967,} {\text{e}m 22(2),} 361--385.
\bibitem{Wal82} Walters, P.\
An introduction to ergodic theory,
Springer Verlag (Heidelberg-New York), 1982.
\text{e}nd{thebibliography}
\text{e}nd{document} |
\begin{document}
\baselineskip3.15ex
\vskip .3truecm
\maketitle
{
\small
\noindentindent
$^1$ Centre de Math\'ematiques Appliqu\'ees, \'Ecole Polytechnique,
91128 Palaiseau, France.\\
Email: [email protected]\\
\noindentindent
$^2$ Centre de Math\'ematiques Appliqu\'ees, \'Ecole Polytechnique,
91128 Palaiseau, France.\\
Email: [email protected]
}
\begin{abstract}
\small{
We study the homogenization of a Schr\"{o}dinger equation in a
locally periodic medium. For the time and space scaling of
semi-classical analysis we consider well-prepared initial data
that are concentrated near a stationary point (with respect
to both space and phase) of the energy, i.e. the Bloch cell
eigenvalue. We show that there exists a localized solution
which is asymptotically given as the product of a Bloch
wave and of the solution of an homogenized Schr\"{o}dinger
equation with quadratic potential.
\vskip.3truecm
\noindentindent {\bf Key words:}
Homogenization, localization, Bloch waves, Schr\"{o}dinger.
\vskip.2truecm
\noindentindent {\bf 2000 Mathematics Subject Classification:}
35B27, 35J10.
}
\varphiepsilonnd{abstract}
\section{introduction}
\noindent
We study the homogenization of the following Schr\"{o}dinger equation
\begin{equation}\label{start}
\left\{
\begin{array}{ll}
\displaystyle
\frac{i}{\varphiepsilon}\frac{\partial u_{\varphiepsilon}}{\partial t} -
\hbox{{\rm div}}\left(A\left(x,\frac{x}{\varphiepsilon}\right)\nabla u_{\varphiepsilon}\right)
+ \frac{1}{\varphiepsilon^{2}} c\left(x,\frac{x}{\varphiepsilon}\right) u_{\varphiepsilon}=0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\
[3mm]
u_{\varphiepsilon}(0,x)=u^{0}_{\varphiepsilon}(x) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
\noindent
where the unknown $u_{\varphiepsilon}(t,x)$ is a complex-valued function.
The coefficients $A(x,y)$ and $c(x,y)$ are real and sufficiently smooth
bounded functions defined for $x\in {\mathbb R}^{N}$ (the macroscopic variable)
and $y\in {\mathbb T}^{N}$ (the microscopic variable in the unit torus).
The period $\varphiepsilon$ is a small positive parameter which is
intended to go to zero.
Furthermore the matrix $A$ is symmetric, uniformly positive definite.
Of course the usual Schr\"{o}dinger equation is recovered when
$A\varphiepsilonquiv Id$ but, since there is no additional difficulty, we keep
the general form of equation (\ref{start}) in the sequel (which can
be interpreted as introducing a non flat locally periodic metric).
\par
The scaling of (\ref{start}) is that of semi-classical analysis
(see e.g. \cite{blp}, \cite{buslaev}, \cite{guillot},
\cite{gerard}, \cite{gerard2}, \cite{sjostrand}, \cite{guillot2},
\cite{pst}, \cite{pr}): if the period is rescaled to 1, it amounts
to look at large, time and space, variables of order $\varphiepsilon^{-1}$.
At least in the case when $A\varphiepsilonquiv Id$ and $c(x,y)=c_0(x)+c_1(y)$,
there is a well-known theory for the asymptotic
limit of (\ref{start}) when $\varphiepsilon$ goes to zero. By using
WKB asymptotic expansion or the notion of semi-classical measures
(or Wigner transforms) the homogenized problem is in some sense
the Liouville transport equation for a classical particle which
is the limit of the wave function $u_\varphiepsilon$. In other words,
for an initial data living in the $n$-th Bloch band and
under some technical assumptions on the Bloch spectral cell
problem (\ref{celleq}),
the semi-classical limit of (\ref{start}) is given by the
dynamic of the following Hamiltonian system in the phase space
$(x,\theta)\in{\mathbb R}^N\times{\mathbb T}^N$
\begin{equation}
\label{hamilton}
\left\{ \begin{array}{l}
\dot x = \nabla_\theta \lambda_n(x,\theta) \\
\dot \theta = - \nabla_x \lambda_n(x,\theta)
\varphiepsilonnd{array} \right.
\varphiepsilonnd{equation}
where the Hamiltonian $\lambda_n(x,\theta)$ is precisely
the $n$-th Bloch eigenvalue of (\ref{celleq}) (see \cite{buslaev},
\cite{guillot}, \cite{gerard}, \cite{gerard2}, \cite{sjostrand},
\cite{guillot2}, \cite{pst}, \cite{pr} for more details).
\par
Our approach to (\ref{start}) is different since we consider
special initial data that are monochromatic, have zero group
velocity and zero applied force. Namely the initial data is
concentrating at a point $(x^n,\theta^n)$ of the phase space
where $\nabla_\theta \lambda_n(x^n,\theta^n)=\nabla_x \lambda_n(x^n,\theta^n)=0$.
In such a case, the previous Hamiltonian system (\ref{hamilton})
degenerates (its solution is constant) and is unable to describe the precise
dynamic of the wave function $u_\varphiepsilon$. We exhibit another limit
problem which is again a Schr\"{o}dinger equation with quadratic
potential. In other words we build a sequence of approximate
solutions of (\ref{start}) which are the product of a Bloch
wave and of the solution of an homogenized Schr\"{o}dinger
equation. Furthermore, if the full Hessian tensor of the
Bloch eigenvalue $\lambda_n(x,\theta)$ is positive definite
at $(x^n,\theta^n)$, we prove that all the eigenfunctions
of an homogenized Schr\"{o}dinger equation are exponentially
decreasing at infinity. In other words, we exhibit a localization
phenomenon for (\ref{start}) since we build a sequence of approximate
solutions that decay exponentially fast away from $x^n$. The
root of this localization phenomenon is the macroscopic modulation
(i.e. with respect to $x$) of the periodic coefficients which
is similar in spirit to the randomness that causes Anderson's
localization (see \cite{cl} and references therein).
\par
Let us describe more precisely the type of well-prepared
initial data that we consider. For a given point $(x^n,\theta^n)\in{\mathbb R}^N\times{\mathbb T}^N$
and a given function $v^{0}\in H^{1}({\mathbb R}^{N})$ we take
\begin{equation}\label{wp}
u_{\varphiepsilon}^{0}(x)=\psi_{n} B_{0}ig(x^{n},\frac{x}{\varphiepsilon},\theta^{n} B_{0}ig)
e^{2i\pi\frac{\theta^{n}\cdot x}{\varphiepsilon}}
v^{0} B_{0}ig(\frac{x-x^n}{\sqrt{\varphiepsilon}} B_{0}ig)
\varphiepsilonnd{equation}
where $\psi_{n}(x,y,\theta)$ is a so-called Bloch eigenfunction,
solution of the following Bloch spectral cell equation
\begin{equation}\label{celleq}
- (\hbox{{\rm div}}_{y} + 2i\pi\theta)(A(x,y)(\nabla_{y} + 2i\pi\theta)\psi_{n}) + c(x,y) =
\lambda_{n}(x,\theta)\psi_{n}
\hspace{1cm} \mbox{ in }{\mathbb T}^{N}\,,
\varphiepsilonnd{equation}
\noindent
corresponding to the $n$-th eigenvalue or energy level $\lambda_{n}$.
The Bloch wave $\psi_{n}$ is periodic with respect to $y$ but
$v^0$ is not periodic, so $v^{0} B_{0}ig(\frac{x-x^n}{\sqrt{\varphiepsilon}} B_{0}ig)$
means that the initial data is concentrated around $x^n$ with
a support of asymptotic size $\sqrt\varphiepsilon$.
The Bloch frequency $\theta^n\in{\mathbb T}^{N}$, the localization point
$x^n\in{\mathbb R}^{N}$ and the energy level $n$ are chosen such that
$\lambda_{n}(x^n,\theta^n)$ is simple and $\nabla_x\lambda_{n}(x^n,\theta^n)
= \nabla_\theta\lambda_{n}(x^n,\theta^n) =0$.
Our main result (Theorem \ref{mainth}) shows that the solution
of \varphiepsilonqref{start} is approximately given by
\begin{equation}\label{asymptot}
u_{\varphiepsilon}(t,x)\approx \psi_{n} B_{0}ig(x^{n},\frac{x}{\varphiepsilon},\theta^{n} B_{0}ig)
e^{i\frac{\lambda_{n}(x^n,\theta^n) t}{\varphiepsilon}}e^{2i\pi\frac{\theta^{n}\cdot x}{\varphiepsilon}}
v B_{0}ig(t,\frac{x-x^n}{\sqrt{\varphiepsilon}} B_{0}ig)\,,
\varphiepsilonnd{equation}
where $v$ is the unique solution of the homogenized Schr\"odinger equation
\begin{equation}\label{hom0}
\left\{
\begin{array}{ll}
\displaystyle
i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right)
+ \hbox{{\rm div}}(v B^{*} z) + c^{*} v
+ v D^{*} z \cdot z = 0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\
[3mm]
v (0,z)=v^{0}(z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
\noindent
where $c^*$ is a constant coefficient and $A^*,B^*,D^*$ are constant
matrices defined by
$$
A^{*}=\frac{1}{8\pi^{2}}\nabla_{\theta}\nabla_{\theta}\lambda_{n}(x^n,\theta^n) \,, \
B^{*}=\frac{1}{2i\pi}\nabla_{\theta}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,, \
D^{*}=\frac{1}{2}\nabla_{x}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,.
$$
In Proposition \ref{propoself} we show that the homogenized
problem \varphiepsilonqref{hom0} is well-posed since the underlying
operator is self-adjoint. Furthermore, under the additional
assumption that the Hessian tensor $\nabla\nabla\lambda_{n}(x^n,\theta^n)$
(with respect to both variables $x$ and $\theta$) is positive
definite, we prove that \varphiepsilonqref{hom0} admits a countable
number of eigenvalues and eigenfunctions which all decay
exponentially at infinity (see Proposition \ref{localization}).
In such a case, formula \varphiepsilonqref{asymptot} defines a family
of approximate (exponentially) localized solutions of
\varphiepsilonqref{start}.
\par
Let us indicate that the case of the first eigenvalue (ground state)
$n=1$ with $\theta^1=0$ was already studied in \cite{alpiat1} (for
the spectral problem rather than the evolution equation).
The case of purely periodic coefficients (i.e. that depend only on
$y$ and not on $x$) is completely different and was studied in
\cite{alpiat2}. Indeed, in this latter case there is no localization
effect and one proves that, for a longer time scale (of order $\varphiepsilon^{-1}$
with respect to \varphiepsilonqref{start}), the homogenized
limit is again a Schr\"{o}dinger equation without the drift and quadratic
potential in \varphiepsilonqref{hom0}.
\section{Preliminaries}
\noindent
In the present section we give our main assumptions, set some notation
and a few preliminary results needed in the proof of the main results in
Section \ref{mainsec}.
We first assume that the coefficients $A_{ij}(x,y)$ and
$c(x,y)$ are real, bounded, and Carath\'eodory functions
(measurable with respect to $y$ and continuous in $x$),
which are periodic with respect to $y$. In other words,
they belong to $C_b\left({\mathbb R}^N;L^\infty({\mathbb T}^N)\right)$.
Furthermore, the tensor $A(x,y)$ is symmetric uniformly coercive.
Under these assumptions, it is well-known that, for any values
of the parameters ${\theta\in{\mathbb T}^{N}}$ and ${x\in{\mathbb R}^{N}}$,
the cell problem \varphiepsilonqref{celleq} defines
a compact self-adjoint operator on $L^{2}({\mathbb T}^{N})$ which admits a
countable sequence of real increasing eigenvalues
${\displaystyle \{\lambda_{n}(x,\theta)\}_{n\geq 1}}$ (repeated with their multiplicity)
with corresponding eigenfunctions
${\displaystyle \{\psi_{n}(x,\theta,y)\}_{n\geq 1}}$
normalized by
$$
||\psi_{n}(x,\theta,\cdot)||_{L^{2}({\mathbb T}^{N})} =1\,.
$$
Our main assumptions are:
\noindent
{\bf Hypothesis H1.}
There exist $x^{n}\in{\mathbb R}^{N}$ and $\theta^{n}\in{\mathbb T}^{N}$ such that
\begin{equation}\label{assumpt}
\begin{cases}
&\hspace{-3mm}(i)\: \lambda_{n}(x^{n},\theta^{n}) \text{ is a simple eigenvalue,} \\
&\hspace{-3mm}(ii)\: (x^{n},\theta^{n}) \text{ is a critical point of } \lambda_{n}(x,\theta),
i.e. \: \nabla_{x}\lambda_{n}(x^{n},\theta^{n})=\nabla_{\theta}\lambda_{n}(x^{n},\theta^{n})=0.
\\
\varphiepsilonnd{cases}
\varphiepsilonnd{equation}
\noindent
{\bf Hypothesis H2.}
The coefficients $A(x,y)$ and $c(x,y)$ are of class $C^{2}$
with respect to the variable $x$ in a neighborhood of $x=x^{n}$.
\noindent
Then we set:
\begin{equation*}
A_{1,h}(y):=\frac{\partial A}{\partial x_{h}}(x^{n},y)\,,\quad
A_{2,lh}(y):=\frac{\partial^{2} A}{\partial x_{l}\partial x_{h}}(x^{n},y)\,,\quad
\text{ for } \: l,h=1,\dots,N\,.
\varphiepsilonnd{equation*}
Similar notation is used to denote the derivatives of the function $c$ with
respect to the $x$-variable.
With an abuse of notation we further set
$$
A(y):=A(x^{n},y)\,,\quad \lambda_{n}:=\lambda_{n}(x^{n},\theta^{n})\,,\quad
\psi_{n}(y):=\psi_{n}(x^{n},y,\theta^{n})\,,
$$
and analogous notation holds for all derivatives of $\psi_{n}$ and $\lambda_{n}$ with
respect to the $x$-variable and the $\theta$-variable evaluated at
$x=x^{n}$ and $\theta=\theta^{n}$.
Without loss of generality we will assume in the sequel that $x^{n}=0$.\\
{\bf Notation.} For any function $\rho(y)$ defined on ${\mathbb T}^{N}$ we set
$$
\rho^{\varphiepsilon}(z):=\rho(z/\sqrt{\varphiepsilon})
$$
where $z:=\sqrt{\varphiepsilon}y\varphiepsilonquiv x/\sqrt\varphiepsilon$.
In the sequel the symbols $\hbox{{\rm div}}_y$ and $\nabla_y$ will stand for the divergence
and gradient operators which act
with respect to the $y$-variable while div and $\nabla$ will indicate the divergence and
gradient operators which act with respect to the $z$-variable.
Finally throughout this paper the Einstein summation convention is used.
Under assumption \varphiepsilonqref{assumpt}-(i) it is a classical matter to prove that
the $n$-th eigencouple of \varphiepsilonqref{celleq} is smooth with respect to the variable
$\theta$ in a
neighborhood of $\theta=\theta^n$ (see \cite{kato}) and has the same differentiability
property as the coefficients with respect to the variable $x$.
Introducing the unbounded operator ${\mathbb A}_n(x,\theta)$ defined on $L^2({\mathbb T}^N)$ by
\begin{equation*}
{\mathbb A}_n(x,\theta)\psi =
- (\hbox{{\rm div}}_y +2i\pi\theta) B_{0}ig( A(x,y) (\nabla_y +2i\pi\theta)\psi B_{0}ig) +
c(x,y) \psi - \lambda_n(x,\theta) \psi ,
\varphiepsilonnd{equation*}
it is easy to differentiate \varphiepsilonqref{celleq}.
Denoting by $(e_k)_{1\leq k\leq N}$ the canonical basis
of ${\mathbb R}^N$, the first derivatives satisfy
\begin{equation}
\label{deriv1t}
\begin{array}{ll}
\displaystyle {\mathbb A}_n(x,\theta)\frac{\partial\psi_{n}}{\partial\theta_k} =
&\displaystyle 2i\pi e_k A(x,y)(\nabla_y +2i\pi\theta)\psi_{n} \\[0.3cm]
&\displaystyle + (\hbox{{\rm div}}_y +2i\pi\theta) \left( A(x,y) 2i\pi e_k \psi_{n} \right) +
\frac{\partial\lambda_n}{\partial\theta_k}(x,\theta) \psi_{n} \,,
\varphiepsilonnd{array}
\varphiepsilonnd{equation}
\begin{equation}
\label{deriv1x}
\begin{array}{ll}
\displaystyle {\mathbb A}_n(x,\theta)\frac{\partial\psi_{n}}{\partial x_l} =
&\displaystyle (\hbox{{\rm div}}_y +2i\pi\theta) B_{0}ig( \frac{\partial A}{\partial x_l}(x,\theta)
(\nabla_y +2i\pi\theta)\psi_{n} B_{0}ig) \\[0.3cm]
&\displaystyle -\frac{\partial c}{\partial x_l}(x,y)\psi_{n}
+ \frac{\partial \lambda_n}{\partial x_l}(x,\theta)\psi_{n} \,.
\varphiepsilonnd{array}
\varphiepsilonnd{equation}
\noindent
Similar formulas hold for second order derivatives. By integrating
the cell equations for the second order derivatives against $\psi_{n}$
we obtain the following formulas that will be useful in the sequel
(their proofs are safely left to the reader).
\begin{lemma}
Assume that assumptions {\bf H1} and {\bf H2} hold true.
Then the following equalities hold:
\begin{align}\label{eq4}
& \int_{{\mathbb T}^{N}} \frac{1}{2\pi i} B_{0}ig[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial \theta_{k}}
\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n} +
c_{1,h}\,\frac{\partial \psi_{n}}{\partial \theta_{k}} \bar{\psi}_{n} B_{0}ig] \: dy \\
\noindentn & +\int_{{\mathbb T}^{N}} B_{0}ig[
A_{1,h} e_{k}\psi_{n} \cdot (\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}
+ A e_{k}\frac{\partial \psi_{n}}{\partial x_{h}}\cdot
(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} B_{0}ig]\: dy \\
\noindentn & -\int_{{\mathbb T}^{N}} B_{0}ig[
e_{k}\bar{\psi}_{n} A_{1,h}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n}
+ e_{k}\bar{\psi}_{n} A \cdot
(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{h}} B_{0}ig]\: dy \\
\noindentn & - \frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial \theta_{k}} = 0\,,
\varphiepsilonnd{align}
\begin{align}\label{eq5}
& \int_{{\mathbb T}^{N}} B_{0}ig[
A_{2,lh}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n}
+ B_{0}ig(c_{2,lh} - \frac{\partial^{2} \lambda_{n}}{\partial x_{l}\partial x_{h}} B_{0}ig)
|\psi_{n}|^{2} B_{0}ig] \: dy \\
\noindentn & + \int_{{\mathbb T}^{N}} B_{0}ig[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{l}}
\cdot (\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n} +
c_{1,h}\,\frac{\partial \psi_{n}}{\partial x_{l}}\bar{\psi}_{n} B_{0}ig]\: dy \\
\noindentn & + \int_{{\mathbb T}^{N}} B_{0}ig[
A_{1,l}(\nabla_{y}+2i\pi\theta^{n})\frac{\partial \psi_{n}}{\partial x_{h}}\cdot
(\nabla_{y} -2i\pi\theta^{n})\bar{\psi}_{n}
+ c_{1,l}\,\frac{\partial \psi_{n}}{\partial x_{h}}\bar{\psi}_{n} B_{0}ig]
\: dy = 0\,,
\varphiepsilonnd{align}
\begin{align}\label{eq6}
& \int_{{\mathbb T}^{N}} B_{0}ig[
2i\pi e_k A(y)
(\nabla_y +2i\pi\theta^{n})\frac{\partial\psi_n}{\partial\theta_l}\bar{\psi}_{n}
- \left( A(y) 2i\pi e_k
\frac{\partial\psi_n}{\partial\theta_l} \right)(\nabla_y -2i\pi\theta^{n})\bar{\psi}_{n}
B_{0}ig]\: dy \\
\noindentn & + \int_{{\mathbb T}^{N}} B_{0}ig[
2i\pi e_l A(y)
(\nabla_y +2i\pi\theta^{n})\frac{\partial\psi_n}{\partial\theta_k}\bar{\psi}_{n}
-\left( A(y) 2i\pi e_l \frac{\partial\psi_n}{\partial\theta_k} \right)
(\nabla_y -2i\pi\theta^{n})\bar{\psi}_{n} B_{0}ig]\: dy \\
\noindentn & -\int_{{\mathbb T}^{N}} B_{0}ig[
4\pi^2e_k A(y) e_l |\psi_n|^{2} +4\pi^2e_l A(y) e_k |\psi_n|^{2} B_{0}ig]\: dy \\
\noindentn & + \frac{\partial^2\lambda_n}{\partial\theta_l\partial\theta_k}(\theta^{n}) =0\,.
\varphiepsilonnd{align}
\varphiepsilonnd{lemma}
We now give the variational formulations of the above cell problems,
rescaled at size $\varphiepsilon$.
\begin{lemma}
Assume that assumptions {\bf H1} and {\bf H2} hold true and let
$\varphi(z)$ be a smooth compactly supported function defined from ${\mathbb R}^{N}$
into ${\mathbb C}$.
Then the following equalities hold:
\begin{align}\label{eq1}
& \int_{{\mathbb R}^{N}} B_{0}ig[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})\psin^{\e}\cdot
(\sqrt{\varphiepsilon}\nabla-2i\pi\theta^{n})\bar{\varphi}(z) +(c^{\e} - \lan^{\e})\psin^{\e} \bar{\varphi} B_{0}ig] \: dz = 0 \,,
\varphiepsilonnd{align}
\begin{align}\label{eq2}
& \int_{{\mathbb R}^{N}} B_{0}ig[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\frac{\partial \psin^{\e}}{\partial \theta^{n}_{k}}
\cdot (\sqrt{\varphiepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi} + (c^{\e}- \lan^{\e})
\frac{\partial \psin^{\e}}{\partial \theta^{n}_{k}} \bar{\varphi} B_{0}ig] \: dz \\
\noindentn & +\int_{{\mathbb R}^{N}} B_{0}ig[ -2\pi i e_{k} \cdot A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\psin^{\e}\bar{\varphi} + A^{\e} \, 2\pi i e_{k} \psin^{\e} \cdot
(\sqrt{\varphiepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi} B_{0}ig] \: dz=0\,,
\varphiepsilonnd{align}
\begin{align}\label{eq3}
& \int_{{\mathbb R}^{N}} B_{0}ig[ A^{\e}(\nabla_{y}+2i\pi\theta^{n})
\frac{\partial \psin^{\e}}{\partial x_{h}}\cdot(\sqrt{\varphiepsilon}\nabla -2i\pi\theta^{n}) \bar{\varphi}
+(c^{\e} - \lan^{\e}) \frac{\partial \psin^{\e}}{\partial x_{h}}\bar{\varphi} B_{0}ig] \: dz \\
\noindentn & +\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psin^{\e} \cdot
(\sqrt{\varphiepsilon}\nabla -2i\pi\theta^{n})\bar{\varphi} +
c^{\e}_{1,h}\,\psin^{\e} \bar{\varphi} B_{0}ig] \: dz =0\,.
\varphiepsilonnd{align}
\varphiepsilonnd{lemma}
\begin{proof}
Formula \varphiepsilonqref{eq1} follows straightforwardly from equation \varphiepsilonqref{celleq} while
\varphiepsilonqref{eq2}-\varphiepsilonqref{eq3} are consequences of \varphiepsilonqref{deriv1t}-\varphiepsilonqref{deriv1x}.
\varphiepsilonnd{proof}
Finally we recall the notion of two-scale convergence
introduced in \cite{allaire}, \cite{nguetseng} (that will be used
with $\delta=\sqrt\varphiepsilon$).
\begin{proposition}
\label{prop2s}
Let $f_{\delta}$ be a sequence uniformly bounded in $L^2({\mathbb R}^N)$.
\begin{enumerate}
\item There exists a subsequence, still denoted by $f_\delta$, and a
limit $f_0(x,y) \in L^2({\mathbb R}^N\times{\mathbb T}^N)$ such that
$f_\delta$ {\varphiepsilonm two-scale converges} weakly to $f_0$ in the
sense that
$$
\lim_{\delta\to 0}
\int_{{\mathbb R}^N} f_\delta(x)\phi(x,x/\delta)\,dx =
\int_{{\mathbb R}^N}\int_{{\mathbb T}^N} f_0(x,y)\phi(x,y)\,dx\,dy
$$
for all functions $\phi(x,y)\in L^2\left( {\mathbb R}^N ; C({\mathbb T}^N) \right)$.
\item Assume further that $f_\delta$ two-scale converges
weakly to $f_0$ and that
$$
\lim_{\delta\to 0} \| f_{\delta} \|_{L^2({\mathbb R}^N)} =
\| f_0 \|_{L^2\left({\mathbb R}^N\times{\mathbb T}^N\right)} .
$$
Then $f_\delta$ is said to two-scale converge {\varphiepsilonm strongly} to its
limit $f_0$ in the sense that, if $f_0$ is smooth enough, e.g.
$f_0\in L^2\left( {\mathbb R}^N ; C({\mathbb T}^N) \right)$, we have
$$
\lim_{\delta\to 0} \int_{{\mathbb R}^N} \left| f_\delta(x)-
f_0\right(x,x/\delta\left) \right|^2 dx = 0.
$$
\item Assume that $\delta \nabla f_\delta$ is also
uniformly bounded in $L^2({\mathbb R}^N)^N$. Then
there exists a subsequence, still denoted by $f_\delta$, and a limit
$f_0(x,y) \in L^2({\mathbb R}^N ;H^1({\mathbb T}^N))$ such that
$f_\delta$ two-scale converges to $f_0(x,y)$ and $\delta\nabla
f_\delta$ two-scale converges to $\nabla_y f_0(x,y)$.
\varphiepsilonnd{enumerate}
\varphiepsilonnd{proposition}
\section{Main results}\label{mainsec}
\noindent
We begin by recalling the usual a priori estimates for the solution
of the Schr\"{o}dinger equation \varphiepsilonqref{start} which hold true since
the coefficients are real. They are obtained by multiplying the
equation successively by $\overline u_\varphiepsilon$ and
$\frac{\partial \overline u_\varphiepsilon}{\partial t}$, and integrating by parts.
\begin{lemma}\label{apriori}
There exists $C>0$ independent of $\varphiepsilon$ such that the solution of \varphiepsilonqref{start} satisfies
\begin{align*}
& ||u_{\varphiepsilon}||_{L^{\infty}({\mathbb R}^+;L^{2}({\mathbb R}^{N}))} = ||u_{\varphiepsilon}^{0}||_{L^{2}({\mathbb R}^{N})}\,,\\
& \varphiepsilon||\nabla u_{\varphiepsilon}||_{L^{\infty}({\mathbb R}^+;L^{2}({\mathbb R}^{N}))} \leq
C B_{0}ig( ||u_{\varphiepsilon}^{0}||_{L^{2}({\mathbb R}^{N})}+\varphiepsilon ||\nabla u_{\varphiepsilon}^{0}||_{L^{2}({\mathbb R}^{N})} B_{0}ig)\,.
\varphiepsilonnd{align*}
\varphiepsilonnd{lemma}
\begin{theorem}\label{mainth}
Assume that assumptions {\bf H1} and {\bf H2} hold true and that the initial data $u_{\varphiepsilon}^{0}$
is of the form \varphiepsilonqref{wp}. Then the solution of \varphiepsilonqref{start} can be written as
\begin{equation}\label{form}
u_{\varphiepsilon}(t,x)=e^{i\frac{\lambda_{n} t}{\varphiepsilon}}e^{2i\pi\frac{\theta^{n}\cdot x}{\varphiepsilon}}
v_{\varphiepsilon} B_{0}ig(t,\frac{x-x^n}{\sqrt{\varphiepsilon}} B_{0}ig)\,,
\varphiepsilonnd{equation}
where $v_{\varphiepsilon}(t,z)$ two-scale converges strongly to $\psi_{n}(y)v(t,z)$, i.e.
\begin{equation}
\label{eq2s.6c}
\lim_{\varphiepsilon\to0} \int_{{\mathbb R}^N} \left| v_\varphiepsilon(t,z) -
\psi_{n}\left(\frac{z}{\sqrt\varphiepsilon}\right) v(t,z) \right|^2 dz = 0 ,
\varphiepsilonnd{equation}
uniformly on compact time intervals in ${\mathbb R}^+$, and
$v$ is the unique solution of the homogenized Schr\"odinger equation
\begin{equation}\label{hom}
\left\{
\begin{array}{ll}
\displaystyle
i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right)
+ \hbox{{\rm div}}(v B^{*} z) + c^{*} v
+ v D^{*} z \cdot z = 0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\
[3mm]
v (0,z)=v^{0}(z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
\noindent
where
$$
A^{*}=\frac{1}{8\pi^{2}}\nabla_{\theta}\nabla_{\theta}\lambda_{n}(x^n,\theta^n) \,, \
B^{*}=\frac{1}{2i\pi}\nabla_{\theta}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,, \
D^{*}=\frac{1}{2}\nabla_{x}\nabla_{x}\lambda_{n}(x^n,\theta^n) \,,
$$
and $c^{*}$ is given by
\begin{equation*}
c^{*}=\int_{{\mathbb T}^{N}}\hspace{-1mm} B_{0}ig[
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot \frac{\partial \bar{\psi}_{n}}{\partial x_{k}} e_{k}
- A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}\cdot \psi_{n}\,e_{k}
- A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}\cdot \psi_{n} e_{k} B_{0}ig] dy \,.
\varphiepsilonnd{equation*}
\varphiepsilonnd{theorem}
\begin{remark}
{\rm
Notice that even if the tensor $A^*$ might be
non-coercive, the homogenized problem \varphiepsilonqref{hom} is well posed.
Indeed the operator
${\mathbb A}^{*}:L^2({\mathbb R}^N)\to L^2({\mathbb R}^N)$ defined by
\begin{equation}\label{herm}
{\mathbb A}^* \varphi = - \hbox{{\rm div}}\left(A^{*}\nabla \varphi \right)
+ \hbox{{\rm div}}(\varphi B^{*} z) + c^{*} \varphi
+ \varphi D^{*} z \cdot z
\varphiepsilonnd{equation}
is self-adjoint (see Proposition \ref{propoself}) and therefore
by using semi-group theory (see {\varphiepsilonm e.g.} \cite{brezis} or
Chapter X in \cite{reedsimon}), one can show that there exists a unique solution
in $C({\mathbb R}^+;L^2({\mathbb R}^N))$, although it may not belong to
$L^2({\mathbb R}^+;H^1({\mathbb R}^N))$.
}
\varphiepsilonnd{remark}
The next result establishes the conservation of the $L^2$-norm for the
solution $v$ of the homogenized equation \varphiepsilonqref{hom} and the self-adjointness
of the operator ${\mathbb A}^*$.
\begin{proposition}\label{propoself}
Let $v\in C({\mathbb R}^+;L^2({\mathbb R}^N))$ be solution to \varphiepsilonqref{hom}. Then
\begin{equation}\label{cons}
||v(t,\cdot)||_{L^2({\mathbb R}^N)}=||v^0||_{L^2({\mathbb R}^N)} \quad \forall \,t\in{\mathbb R}^+\,.
\varphiepsilonnd{equation}
Moreover the operator ${\mathbb A}^{*}$ defined in \varphiepsilonqref{herm} is self-adjoint.
\varphiepsilonnd{proposition}
\begin{proof}
We multiply the equation \varphiepsilonqref{hom} by $\bar{v}$ and take the imaginary part to
obtain
\begin{equation}\label{impart}
\frac{1}{2}\frac{d}{dt}\int_{{\mathbb R}^N}|v|^2 \, dz =
{\rm Im}\left(\int_{{\mathbb R}^N} v B^*z\cdot \nabla\bar{v}-c^* |v|^2 \, dz \right)\,.
\varphiepsilonnd{equation}
After integrating by parts one finds that the right hand side of \varphiepsilonqref{impart} equals
$$
- B_{0}ig(\frac{1}{2i}{\rm tr}\, B^* + {\rm Im}c^* B_{0}ig)\int_{{\mathbb R}^N}|v|^2 \, dz
$$
and therefore \varphiepsilonqref{cons} is proved as soon as we show that
\begin{equation}\label{contaccio}
\frac{1}{2i}{\rm tr}\, B^* + {\rm Im}c^*=0\,.
\varphiepsilonnd{equation}
In order to do this we first rewrite the coefficients $c^*$ and $B^*$ in a
suitable form.
Denoting by $\lambda_{n}gle\cdot\,,\cdot\rangle$ the Hermitian inner product in $L^2({\mathbb T}^N)$
and using equation \varphiepsilonqref{deriv1t} we write
\begin{equation}\label{formc}
c^*= \frac{1}{2i\pi}\lambda_{n}gle
{\mathbb A}_n\frac{\partial \psi_{n}}{\partial \theta_k},\frac{\partial \psi_{n}}{\partial x_k}\rangle
-\int_{{\mathbb T}^N}A_{1,k}(\nabla_y-2i\pi\theta^n)\bar{\psi}_{n}\cdot\psi_{n} e_k\, dy\,,
\varphiepsilonnd{equation}
\noindent
while by equations \varphiepsilonqref{deriv1t}-\varphiepsilonqref{eq4} it follows that
\begin{align}\label{formB}
\frac{1}{2i\pi}\frac{\partial^2 \lambda_{n}}{\partial x_h\partial\theta_k}=
& -\frac{1}{2i\pi}\lambda_{n}gle\overline{
{\mathbb A}_n\frac{\partial \psi_{n}}{\partial \theta_k},\frac{\partial \psi_{n}}{\partial x_h}}\rangle
-\frac{1}{2i\pi}\lambda_{n}gle\overline{
{\mathbb A}_n\frac{\partial \psi_{n}}{\partial x_h},\frac{\partial \psi_{n}}{\partial\theta_k}}\rangle \\
\noindentn & +2i{\rm Im}\int_{{\mathbb T}^N}A_{1,h}(\nabla_y-2i\pi\theta^n)\bar{\psi}_{n}\cdot\psi_{n} e_k\, dy \,.
\varphiepsilonnd{align}
\noindent
By formulae \varphiepsilonqref{formc}-\varphiepsilonqref{formB} it is readily seen that equality
\varphiepsilonqref{contaccio} holds true.
In order to prove the self-adjointness of the operator ${\mathbb A}^*$, one first checks
that ${\mathbb A}^*$ is symmetric, which easily follows by \varphiepsilonqref{contaccio} and
the fact that ${\displaystyle \overline{B}^*=-B^*}$, and then observes that
up to addition of a multiple of the identity the operator ${\mathbb A}^*$ is monotone
(see {\varphiepsilonm e.g.} \cite{brezis2}, Chapter VII).
\varphiepsilonnd{proof}
In the next proposition we will denote by $\nabla\nabla \lambda_{n}$ the Hessian matrix of the function
$\lambda_{n}(x,\theta)$ evaluated at the point $(x^n,\theta^n)$, namely
$$
\nabla\nabla \lambda_{n} =
\left(
\begin{array}{ll}
\nabla_{x}\nabla_{x}\lambda_{n} & \nabla_{\theta}\nabla_{x}\lambda_{n} \\
\nabla_{\theta}\nabla_{x}\lambda_{n} & \nabla_{\theta}\nabla_{\theta}\lambda_{n}
\varphiepsilonnd{array}
\right)
(x^n,\theta^n)\,.
$$
\begin{proposition}\label{localization}
Assume that the matrix $\nabla\nabla \lambda_{n}$ is positive
definite. Then there exists an orthonormal basis ${\displaystyle \{\varphi_n\}_{n\geq 1}}$
of eigenfunctions
of ${\mathbb A}^*$; moreover for each $n$ there exists a real constant $\gamma_n>0$ such that
\begin{equation}\label{decay}
e^{\gamma_n |z|}\varphi_n\,,\,e^{\gamma_n |z|}\nabla\varphi_n \in L^2({\mathbb R}^N)\,.
\varphiepsilonnd{equation}
\varphiepsilonnd{proposition}
\begin{proof}
Up to shifting the spectrum of the operator ${\mathbb A}^{*}$, we may assume that Re$(c^*)=0$.
In order to prove the existence of an orthonormal basis of eigenfunctions
we introduce the inverse operator of ${\mathbb A}^{*}$, denoted by $G^*$
\begin{align}
\noindentn G^{*}: L^2({\mathbb R}^N) & \to L^2({\mathbb R}^N) \\
\noindentn f & \to \varphi \text{ unique solution in } H^1({\mathbb R}^N) \text{ of} \\
\label{inverse} &\quad\quad {\mathbb A}^* \varphi = f \quad\text{ in }{\mathbb R}^N
\varphiepsilonnd{align}
and we show that $G^*$ is compact. Indeed multiplication of \varphiepsilonqref{inverse} by
$\bar{\varphi}$ yields
\begin{equation}\label{compact}
\int_{{\mathbb R}^N}[
A^*\nabla\varphi\cdot\nabla\bar{\varphi}-iB^*{\rm Im}(\varphi z\cdot\nabla\bar{\varphi})+
D^*z\cdot z|\varphi|^2 ] \, dz=
\int_{{\mathbb R}^N}f\bar{\varphi}\, dz\,.
\varphiepsilonnd{equation}
Upon defining the $2N$-dimensional vector-valued function $\Phi$
$$
\Phi:=\left(\hspace{-2mm}
\begin{array}{c}
2i\pi z\varphi \\
\nabla\varphi
\varphiepsilonnd{array}\hspace{-2mm}
\right)
$$
we rewrite \varphiepsilonqref{compact} in agreement with this block notation
\begin{equation*}
\int_{{\mathbb R}^N}\frac{1}{8\pi^2} \nabla\nabla\lambda_{n} \Phi\cdot\overline\Phi \, dz=
\int_{{\mathbb R}^N}f\bar{\varphi}\, dz \,.
\varphiepsilonnd{equation*}
By the positivity assumption on the matrix $\nabla\nabla\lambda_{n}$ it follows that there
exists a positive constant $c_0$ such that
\begin{equation*}
c_0 B_{0}ig(||\nabla\varphi||_{L^2({\mathbb R}^N)}^2+||z\varphi||_{L^2({\mathbb R}^N)}^2 B_{0}ig)
\leq
||f||_{L^2({\mathbb R}^N)}||\varphi||_{L^2({\mathbb R}^N)}\,,
\varphiepsilonnd{equation*}
which implies by a standard argument
$$
||\varphi||_{L^2({\mathbb R}^N)}^2 + ||\nabla\varphi||_{L^2({\mathbb R}^N)}^2+||z\varphi||_{L^2({\mathbb R}^N)}^2 \leq
C ||f||_{L^2({\mathbb R}^N)}^2,
$$
from which we deduce the compactness of $G^*$ in $L^2({\mathbb R}^N)$-strong.
Thus there exists an infinite countable number of eigenvalues for ${\mathbb A}^{*}$.
We are left to prove the exponential decay of the eigenfunctions (this
fact is quite standard, see {\varphiepsilonm e.g.} \cite{alam}). Let $\varphi_n$ be an
eigenfunction and let $\sigma_n$ be the associated eigenvalue
\begin{equation}\label{eigenfn}
{\mathbb A}^* \varphi_n = \sigma_n \varphi_n\,.
\varphiepsilonnd{equation}
Let $R_0>0$ and $\rho\in C^\infty({\mathbb R})$ be a real function such that
$0\leq\rho\leq 1$, $\rho(s)=0$ for $s\leq R_0$ and $\rho(s)=1$ for $s\geq R_0+1$ and
for every positive integer $k$ define $\rho_k\in C^\infty({\mathbb R}^N)$ in the following way
$$
\rho_k(z):=\rho(|z|-k).
$$
We now multiply \varphiepsilonqref{eigenfn} by $\bar{\varphi}_n\rho_k^2$ to get
\begin{equation*}
\int_{{\mathbb R}^N}\rho_k^2\left(
A^*\nabla\varphi_n\cdot\nabla\bar{\varphi}_n-iB^*{\rm Im}(\varphi_n z\cdot\nabla\bar{\varphi}_n)+
D^*z\cdot z|\varphi_n|^2 -\sigma_n|\varphi_n|^2 \right) dz=
\varphiepsilonnd{equation*}
\begin{equation}\label{trucco}
\int_{{\mathbb R}^N}\left(\rho_k|\varphi_n|^2B^*z\cdot\nabla\rho_k -
2\rho_k \,\bar{\varphi}_n A^*\nabla\varphi_n\cdot\nabla\rho_k \right) dz\,.
\varphiepsilonnd{equation}
Next remark that since the left hand side of \varphiepsilonqref{trucco} is real the right hand side
must be also real and therefore it is equal to
\begin{equation}\label{real}
\int_{{\mathbb R}^N}- 2\rho_k \,{\rm Re}(\bar{\varphi}_n A^*\nabla\varphi_n)\cdot\nabla\rho_k \, dz\,.
\varphiepsilonnd{equation}
Let $B_k$ denote the ball of radius $R_0+k$ and center $z=0$ and observe
that the support of $\nabla\rho_k$ is contained in $B_{k+1}\setminus B_k$.
Then putting up together \varphiepsilonqref{trucco} and \varphiepsilonqref{real} and using again the positive
definiteness of the matrix $\nabla\nabla\lambda_{n}$ we obtain for $R_0$ sufficiently large
($\sqrt{R_0}>\sigma_n$ does the job)
\begin{equation*}
||\varphi_n||_{H^1({\mathbb R}^N\setminus B_{k+1})}^2
\leq
c_1 B_{0}ig(||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_k)}-||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_{k+1})} B_{0}ig)
\varphiepsilonnd{equation*}
where $c_1$ is a positive constant independent of $k$.
Thus we deduce that
\begin{equation}\label{estim}
||\varphi_n||_{H^1({\mathbb R}^N\setminus B_{k+1})}^2
\leq
B_{0}ig(\frac{c_1}{1+c_1} B_{0}ig)^k||\varphi_n||^2_{H^1({\mathbb R}^N\setminus B_0)}\,.
\varphiepsilonnd{equation}
Upon defining a positive constant $\gamma_0>0$ by
$$
B_{0}ig(\frac{c_1}{1+c_1} B_{0}ig)^k=e^{-2\gamma_0(k+R_0)}
$$
it is finally seen that \varphiepsilonqref{estim} implies the estimate \varphiepsilonqref{decay} for any
exponent $0<\gamma_n<\gamma_0$.
\varphiepsilonnd{proof}
\noindent
{\bf Proof of Theorem \ref{mainth}.}
We rescale the space variable by introducing
$$
z=\frac{x}{\sqrt{\varphiepsilon}} \,,
$$
and define the sequence $v_{\varphiepsilon}$ by
\begin{equation}\label{vdef}
v_{\varphiepsilon}(t,z):=e^{-i\frac{\lambda_{n} t}{\varphiepsilon}}
e^{-2i\pi\frac{\theta^{n}\cdot x}{\varphiepsilon}}u_{\varphiepsilon}(t,x)\,.
\varphiepsilonnd{equation}
By the a priori estimates of Lemma \ref{apriori}
it follows that $v_{\varphiepsilon}(t,z)$ satisfies
$$
|| v_\varphiepsilon ||_{L^\infty\left({\mathbb R}^+;L^2({\mathbb R}^N)\right)} +
\sqrt{\varphiepsilon} || \nabla v_\varphiepsilon ||_ {L^\infty\left({\mathbb R}^+;L^2({\mathbb R}^N)\right)}
\leq C ,
$$
and applying the compactness of two-scale convergence
(see Proposition \ref{prop2s}), up to a subsequence, there exists a limit
$v^*(t,z,y)\in L^2\left({\mathbb R}^+\times{\mathbb R}^N;H^1({\mathbb T}^N)\right)$
such that $v_\varphiepsilon$ and $\sqrt{\varphiepsilon}\nabla v_\varphiepsilon$ two-scale
converge to $v^*$ and $\nabla_y v^*$, respectively.
Similarly, by definition of the initial data,
$v_\varphiepsilon(0,z)$ two-scale converges to
$\psi_n (y) v^0(z)$.
\noindent
Although $v_\varphiepsilon$ is the unknown which will pass to the limit
in the sequel, it is simpler to write an equation for another
function, namely
\begin{equation}\label{wdef}
w_{\varphiepsilon}(t,z):=e^{2i\pi\frac{\theta^{n}\cdot z}{\sqrt{\varphiepsilon}}}v_{\varphiepsilon}(t,z)
= e^{-i\frac{\lambda_{n} t}{\varphiepsilon}} u_{\varphiepsilon}(t,x)\,.
\varphiepsilonnd{equation}
By \varphiepsilonqref{wdef} it follows that
\begin{equation}\label{formula}
\nabla w_{\varphiepsilon}=e^{2i\pi\frac{\theta^{n}\cdot z}{\sqrt{\varphiepsilon}}}
B_{0}ig(\nabla +2i\pi\frac{\theta^{n}}{\sqrt{\varphiepsilon}} B_{0}ig)v_{\varphiepsilon}\,,
\varphiepsilonnd{equation}
and it can be checked that the new unknown $w_{\varphiepsilon}$ solves the following equation
\begin{equation}\label{neweq}
\left\{
\begin{array}{ll}
\displaystyle
i \frac{\partial w_{\varphiepsilon}}{\partial t} -
\hbox{{\rm div}} [A\left(\sqrt{\varphiepsilon}z,z/\sqrt{\varphiepsilon}\right)\nabla w_{\varphiepsilon}]
+ \frac{1}{\varphiepsilon} [ c(\sqrt{\varphiepsilon}z, z/\sqrt{\varphiepsilon}) - \lambda_{n} ] w_{\varphiepsilon}=0 & \mbox{ in } {\mathbb R}^{N}\times {\mathbb R}^+\\
[3mm]
w_{\varphiepsilon}(0,z)=u^{0}_{\varphiepsilon}(\sqrt{\varphiepsilon}z) \hspace{5.1cm} & \mbox{ in } {\mathbb R}^{N}\\
\varphiepsilonnd{array}
\right.
\varphiepsilonnd{equation}
\noindent
where the differential operators div and $\nabla$ act with respect to
the new variable $z$.
\noindent
{\bf First step.}
We multiply the equation \varphiepsilonqref{neweq} by the complex conjugate of
$$
\varphiepsilon\phi B_{0}ig(t,z,\frac{z}{\sqrt{\varphiepsilon}} B_{0}ig) e^{2i\pi\frac{\theta^n\cdot z}{\sqrt{\varphiepsilon}}}
$$
where $\phi(s,z,y)$ is a smooth test function defined on
${\mathbb R}^+\times{\mathbb R}^N\times{\mathbb T}^N$, with compact support in ${\mathbb R}^+\times{\mathbb R}^N$.
Since this test function has compact support (fixed with respect to $\varphiepsilon$), the effect of
the non-periodic variable in the coefficients is negligible for sufficiently small $\varphiepsilon$.
Therefore we can replace the value
of each coefficient at $(\sqrt{\varphiepsilon}z, z/\sqrt{\varphiepsilon})$ by its Taylor expansion of order two
about the point $(0, z/\sqrt{\varphiepsilon})$.
Integrating by parts and using \varphiepsilonqref{wdef} and \varphiepsilonqref{formula} yields
\begin{align*}
& -i \varphiepsilon \int_{0}^{+\infty}\hspace{-3mm} \int_{{\mathbb R}^{N}}
v_{\varphiepsilon} \frac{\partial \bar{\phi}^{\varphiepsilon}}{\partial t}\: dt\, dz
- i \varphiepsilon \int_{{\mathbb R}^{N}}
v_{\varphiepsilon}(0,z)\bar{\phi} B_{0}ig(0,z,\frac{z}{\sqrt{\varphiepsilon}} B_{0}ig) \, dz \\
& + \int_{0}^{+\infty}\hspace{-3mm}
\int_{{\mathbb R}^{N}}
[ A^{\e} + A^{\e}_{1,h} \, \sqrt{\varphiepsilon} z_{h} + \textstyle{\frac{1}{2}} A^{\e}_{2,lh} \, \varphiepsilon z_{l} z_{h}
+ o(\varphiepsilon)]
(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n})v_{\varphiepsilon}\hspace{-1mm} \cdot
(\sqrt{\varphiepsilon}\nabla-2i\pi\theta^{n})\bar{\phi}^{\varphiepsilon} \,dz \, dt \\
& + \int_{0}^{+\infty}\hspace{-3mm} \int_{{\mathbb R}^{N}}
[c^{\e} + c^{\e}_{1,h} \sqrt{\varphiepsilon} z_{h} + \textstyle{\frac{1}{2}} c^{\e}_{2,lh}\,\varphiepsilon z_{l} z_{h}
+o(\varphiepsilon)- \lambda_{n} ] \, v_{\varphiepsilon} \bar{\phi}^{\varphiepsilon}
\:dz \, dt = 0 .
\varphiepsilonnd{align*}
Passing to the two-scale limit we get the variational formulation
of
$$
- (\hbox{{\rm div}}_y +2i\pi\theta^n) B_{0}ig( A(y) (\nabla_y +2i\pi\theta^n)v^* B_{0}ig) +
c(y)v^* = \lambda_{n} v^* \quad \mbox{ in } {\mathbb T}^N .
$$
The simplicity of $\lambda_{n}$ implies that there exists a scalar function $v(t,z)\in
L^2\left({\mathbb R}^+\times{\mathbb R}^N\right)$ such that
\begin{equation}\label{vstar}
v^*(t,z,y) = v(t,z) \psi_n(y) .
\varphiepsilonnd{equation}
\noindent
{\bf Second step.}
We multiply (\ref{neweq}) by the complex conjugate of
\begin{equation*}
\Psi_{\varphiepsilon}(t,z)=e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varphiepsilon}}} B_{0}ig[
\psin^{\e} \phi(t,z) +
\sqrt{\varphiepsilon}
\sum_{k=1}^{N}
B_{0}ig(
\frac{1}{2i\pi}
\frac{\partial \psin^{\e}}{\partial \theta_{k}}
\frac{\partial \phi}{\partial z_{k}}(t,z) +
z_{k}
\frac{\partial \psin^{\e}}{\partial x_{k}}
\phi(t,z)
B_{0}ig)
B_{0}ig]\,,
\varphiepsilonnd{equation*}
where $\phi(t,z)$ is a smooth test function with compact support
in ${\mathbb R}^+\times{\mathbb R}^N$.
\noindent
We first look at those terms of the equation involving time derivatives:
\begin{align}\label{timeder}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
i\frac{\partial w_\varphiepsilon}{\partial t}\bar{\Psi}_\varphiepsilon \, dt\,dz =\\
\noindentn &\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}-i v_{\varphiepsilon}
\left[
\bpsin^{\e} \frac{\partial \bar{\phi}}{\partial t} +\sqrt{\varphiepsilon}\sum_{k=1}^{N}
\left(
-\frac{1}{2i\pi}\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial^{2} \bar{\phi}}{\partial t \partial z_{k}} + z_{k}
\frac{\partial \bpsin^{\e}}{\partial x_{k}}\frac{\partial \bar{\phi}}{\partial t}
\right)
\right]\,
dt\, dz \\
\noindentn & -i \int_{{\mathbb R}^{N}}
v_{\varphiepsilon}(0,z)
\left[
\bpsin^{\e} \bar{\phi} (0,z) +
\sqrt{\varphiepsilon} \sum_{k=1}^{N}
\left(
-\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}}(0,z) +
z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} (0,z)
\right)
\right] \, dz\,.
\varphiepsilonnd{align}
\noindent
Recalling the normalization $\int_{{\mathbb T}^{N}}|\psi_{n}|^{2}\,dy=1$,
we find that the two-scale limit of the term on the left hand side of
\varphiepsilonqref{timeder} is given by the expression
\begin{equation}\label{term1}
-i \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} v \frac{\partial \bar{\phi}}{\partial t}\: dz\, dt
-i \int_{{\mathbb R}^{N}} v^{0}\bar{\phi} (0,z) \, dz \,.
\varphiepsilonnd{equation}
\noindent
We further decompose $\Psi_\varphiepsilon$ as follows
$$
\Psi_\varphiepsilon=\Psi_\varphiepsilon^1+\Psi_\varphiepsilon^2\cdot z \quad\text{ with }\quad
\Psi_\varphiepsilon^2 = \sqrt{\varphiepsilon}
e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varphiepsilon}}}
\sum_{k=1}^{N}\frac{\partial \psin^{\e}}{\partial x_{k}}\phi(t,z)e_k.
$$
\noindent
Getting rid of all terms multiplied by $o(\varphiepsilon)$ and taking into account \varphiepsilonqref{wdef}
and \varphiepsilonqref{formula} we next pass to the limit in the remaining terms
of \varphiepsilonqref{neweq} multiplied by $\bar{\Psi}_{\varphiepsilon}$.
The computation is similar to \cite{alpiat2} but it involves new terms since
$\psi_{n}$ and its derivatives also depend on $x$.
We first look at those terms which are of zero order with respect to $z$, namely
\begin{align}\label{zero}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e} \nabla w_\varphiepsilon \cdot (\nabla\bar{\Psi}_\varphiepsilon^1+ \bar{\Psi}_\varphiepsilon^2) +
\frac{1}{\varphiepsilon}(c^{\e}-\lambda_{n})w_\varphiepsilon \bar{\Psi}_\varphiepsilon^1 B_{0}ig] \: dz\, dt \\
\noindentn & = \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\frac{1}{\varphiepsilon} A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla + 2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon}
\cdot (\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e} \bar{\phi} +
\frac{1}{\varphiepsilon}(c^{\e}-\lambda_{n})\bpsin^{\e} v_{\varphiepsilon} \bar{\phi}
B_{0}ig] \: dz\, dt\\
\noindentn & -\frac{1}{2i\pi}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\frac{1}{\sqrt{\varphiepsilon}}A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla + 2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}} \\
\noindentn & \hspace{2.7cm} + \frac{1}{\sqrt{\varphiepsilon}} (c^{\e}-\lambda_{n}) v_{\varphiepsilon}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}
B_{0}ig] \:dz\, dt \\
\noindentn & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{\sqrt{\varphiepsilon}}A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla + 2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon} \cdot
\bpsin^{\e} \nabla \bar{\phi} \: dz\, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} -\frac{1}{2\pi i}
A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla + 2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon} \cdot
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\nabla\frac{\partial \bar{\phi}}{\partial z_{k}} \:dz\, dt\,\\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla + 2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon}
\cdot \frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} \, e_{k}
\:dz\, dt\,.
\varphiepsilonnd{align}
\noindent
Using equation \varphiepsilonqref{eq1} with $\varphi=v_{\varphiepsilon}\bar{\phi}$ and equation \varphiepsilonqref{eq2} with
${\displaystyle \varphi=v_{\varphiepsilon}\frac{\partial \bar{\phi}}{\partial z_{k}}}$ we rewrite the first
two integrals in the right hand side of \varphiepsilonqref{zero} as follows
\begin{align*}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
-\frac{1}{\sqrt{\varphiepsilon}} A^{\e}(\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e}\cdot v_{\varphiepsilon}\nabla \bar{\phi}
\,dz\, dt\, \\
& +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\frac{1}{2i\pi} A^{\e}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\cdot v_{\varphiepsilon}\nabla \frac{\partial \bar{\phi}}{\partial z_{k}}
+ \frac{1}{\sqrt{\varphiepsilon}}A^{\e} e_{k}\cdot
v_{\varphiepsilon} \frac{\partial \bar{\phi}}{\partial z_{k}}(\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \\
& \hspace{2cm} - \frac{1}{\sqrt{\varphiepsilon}}A^{\e} \bpsin^{\e} e_{k}\cdot
B_{0}ig(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n} B_{0}ig)
B_{0}ig(v_{\varphiepsilon}\frac{\partial \bar{\phi}}{\partial z_{k}} B_{0}ig) B_{0}ig] \,dz\, dt\,.
\varphiepsilonnd{align*}
\noindent
Combining the above terms with the other terms in \varphiepsilonqref{zero} and passing to the
two-scale limit in \varphiepsilonqref{zero} yields
\begin{align}\label{term2}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
B_{0}ig[
\frac{1}{2i\pi}A\psi_{n} (\nabla_{y}-2i\pi\theta^{n})\frac{\partial\bar{\psi}_{n}}{\partial\theta_{k}}
-\frac{1}{2i\pi}A \frac{\partial\bar{\psi}_{n}}{\partial\theta_{k}}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}
- A |\psi_{n}|^{2} e_{k} B_{0}ig] \\
\noindentn & \hspace{2.4cm}\cdot v \nabla \frac{\partial \bar{\phi}}{\partial z_{k}}
\:dy \,dz\, dt \\
\noindentn & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot \frac{\partial \bar{\psi}_{n}}{\partial x_{k}}v\bar{\phi} \, e_{k}
\:dy \,dz\, dt\,.
\varphiepsilonnd{align}
By equation \varphiepsilonqref{eq6} it can be seen that the first integral of \varphiepsilonqref{term2} equals
\begin{equation}\label{tensorA}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} A^{*}\nabla v \nabla \bar{\phi} \,dz\, dt\,.
\varphiepsilonnd{equation}
\noindent
We now focus on those terms which are linear in $z$:
\begin{align}\label{linear}
\noindentn & \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}\nabla w_\varphiepsilon\cdot(\nabla\bar{\Psi}_\varphiepsilon^2 z)+\frac{1}{\varphiepsilon}(c^{\e}-\lambda_{n})w_\varphiepsilon\bar{\Psi}_\varphiepsilon^2 z
+A^{\e}_{1,k}\sqrt{\varphiepsilon}z_k\nabla w_\varphiepsilon\cdot (\nabla\bar{\Psi}_\varphiepsilon ^1+\bar{\Psi}_\varphiepsilon^2)\\
\noindentn & \hspace{1.5cm} +\frac{1}{\sqrt{\varphiepsilon}}c^{\e}_{1,k}z_k w_{\varphiepsilon}\bar{\Psi}_e^1
B_{0}ig] \: dz \, dt \\
\noindentn & = \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\frac{1}{\sqrt{\varphiepsilon}} A^{\e} B_{0}ig(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}}\,\bar{\phi} z_{k}
+ \frac{1}{\sqrt{\varphiepsilon}}(c^{\e}-\lambda_{n})v_{\varphiepsilon}\frac{\partial \bpsin^{\e}}{\partial x_{k}}\,\bar{\phi} z_{k}
B_{0}ig] dz \, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\frac{1}{\sqrt{\varphiepsilon}}A^{\e}_{1,k} B_{0}ig(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon}\hspace{-1mm}
\cdot \hspace{-1mm}
(\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \,\bar{\phi} z_{k}
+ \frac{1}{\sqrt{\varphiepsilon}} c^{\e}_{1,k} v_{\varphiepsilon}\bpsin^{\e} \, \bar{\phi} \, z_{k}
B_{0}ig]\: dz \, dt \\
\noindentn & +
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n})v_{\varphiepsilon}\cdot
\frac{\partial \bpsin^{\e}}{\partial x_{k}}\nabla\bar{\phi}\, z_{k}
+ A^{\e}_{1,k} (\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n})v_{\varphiepsilon}
\cdot \bpsin^{\e} \nabla\bar{\phi} \, z_{k} B_{0}ig] \: dz \, dt \\
\noindentn & -\frac{1}{2i\pi}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}_{1,h} (\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n})v_{\varphiepsilon}\cdot
(\nabla_{y}-2i\pi\theta^{n})
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}\, z_{h}
+ c^{\e}_{1,h} v_{\varphiepsilon}
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}\frac{\partial \bar{\phi}}{\partial z_{k}}\, z_{h}
B_{0}ig]\: dz \, dt \\
\noindentn & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
\sqrt{\varphiepsilon}A^{\e}_{1,h}(\sqrt{\varphiepsilon}\nabla +2i\pi\theta^{n})v_{\varphiepsilon}\cdot
B_{0}ig(-\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}\nabla\frac{\partial \bar{\phi}}{\partial z_{k}}
+\frac{\partial \bpsin^{\e}}{\partial x_{k}}\bar{\phi} \, e_{k}
B_{0}ig) z_{h}
B_{0}ig]\: dz \, dt \,. \\
\varphiepsilonnd{align}
By equation \varphiepsilonqref{eq3} with ${\displaystyle \varphi = v_{\varphiepsilon}\bar{\phi} z_{k}}$
it can be seen that the sum of the first two integrals in the right hand side of \varphiepsilonqref{linear} gives
\begin{equation}\label{byeq3}
- \int_{0}^{+\infty}\int_{{\mathbb R}^{N}}
A^{\e}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}} \cdot
v_{\varphiepsilon} \nabla (\bar{\phi} z_{k})
+ A^{\e}_{1,k} (\nabla_{y}-2i\pi\theta^{n}) \bpsin^{\e} \cdot v_{\varphiepsilon} \nabla (\bar{\phi} z_{k})
B_{0}ig) \: dz \, dt \,.
\varphiepsilonnd{equation}
\noindent
Therefore passing to the two-scale limit in \varphiepsilonqref{linear} we find
\begin{align} \label{linear1}
& -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}} B_{0}ig[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v \psi_{n} \bar{\phi}\, e_{k}
+ A_{1,k} (\nabla_{y}-2i\pi\theta^{n}) \bar{\psi}_{n}\cdot
v \psi_{n} \bar{\phi} \, e_{k} B_{0}ig] \: dy \, dz \, dt\\
\noindentn & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}} B_{0}ig[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v \psi_{n} z_{k} \nabla\bar{\phi}
+ A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} \cdot
v \psi_{n} z_{k} \nabla\bar{\phi} B_{0}ig] \: dy \, dz \, dt\\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}} B_{0}ig[
A(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot v
\frac{\partial \bar{\psi}_{n}}{\partial x_{k}} z_{k}\nabla\bar{\phi}
+ A_{1,k}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
v \bar{\psi}_{n} z_{k}\nabla\bar{\phi} B_{0}ig]\: dy \, dz \, dt\\
\noindentn & -\frac{1}{2i\pi}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}} B_{0}ig[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}
v z_{h}\frac{\partial \bar{\phi}}{\partial z_{k}}\\
\noindentn & \hspace{3cm} + c_{1,h} \psi_{n}
\frac{\partial \bar{\psi}_{n}}{\partial \theta_{k}}v z_{h}\frac{\partial \bar{\phi}}{\partial z_{k}}
B_{0}ig]\: dy \, dz \, dt \,.
\varphiepsilonnd{align}
\noindent
By equation \varphiepsilonqref{eq4} it follows that the last integral in \varphiepsilonqref{linear1} is equal to
\begin{align}\label{linear2}
& \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
B_{0}ig[
A_{1,h}\psi_{n} e_{k} \cdot(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n} + A\psi_{n} e_{k}
\cdot(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{h}}\psi_{n} B_{0}ig]
v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \\
\noindentn & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
B_{0}ig[
A_{1,h}\bar{\psi}_{n} e_{k}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n}
+ A\frac{\partial \bar{\psi}_{n}}{\partial x_{h}}e_{k}\cdot (\nabla_{y}+2i\pi\theta^{n})\psi_{n} B_{0}ig]
v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \\
\noindentn & - \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}}
\frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial\theta_{k}}
|\psi_{n}|^{2} v z_{h} \frac{\partial \bar{\phi}}{\partial z_{k}}
\:\, dy \, dz \, dt \,.
\varphiepsilonnd{align}
\noindent
Next notice that the first and the second line of \varphiepsilonqref{linear2} cancel out with the second
and the third line of \varphiepsilonqref{linear1} respectively and therefore \varphiepsilonqref{linear1} reduces to
\begin{align}\label{term3}
& -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}\int_{{\mathbb T}^{N}} B_{0}ig[
A (\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\cdot v\psi_{n}\bar{\phi} \,e_{k}
+ A_{1,k}(\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}\cdot v \psi_{n}
\bar{\phi} \, e_{k} B_{0}ig] \:\, dy \, dz \, dt \\
\noindentn & -\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{2i\pi}\frac{\partial^{2} \lambda_{n}}{\partial x_{h}\partial\theta_{k}}
v \frac{\partial \bar{\phi}}{\partial z_{k}} \, z_{h}
\: dz \, dt \,.
\varphiepsilonnd{align}
\noindent
Finally we consider all quadratic in $z$ terms:
\begin{align}
\noindentn & \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}_{2,lh} \,\varphiepsilon z_{l}z_{h}\nabla w_\varphiepsilon\cdot(\nabla\bar{\Psi}_\varphiepsilon^1+\bar{\Psi}_\varphiepsilon^2)
+ c^{\e}_{2,lh} z_{l} z_{h} w_{\varphiepsilon}\bar{\Psi_e^1} B_{0}ig] \: dz \, dt \\
\noindentn & +\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} B_{0}ig[
A^{\e}_{1,k}\sqrt{\varphiepsilon}z_k\nabla w_\varphiepsilon\cdot (z\nabla\bar{\Psi}_\varphiepsilon ^2)
+\frac{1}{\sqrt{\varphiepsilon}}c^{\e}_{1,k}z_k w_{\varphiepsilon}z\cdot\bar{\Psi}_\varphiepsilon^2 B_{0}ig]\: dz \, dt \\
\noindentn & = \frac{1}{2} \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{2,lh} \, \sqrt{\varphiepsilon} z_{l}z_{h}
B_{0}ig(\sqrt{\varphiepsilon}\nabla+2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon}\cdot
B_{0}ig[
\frac{1}{\sqrt{\varphiepsilon}}(\nabla_{y}-2i\pi\theta^{n})\bpsin^{\e} \bar{\phi}
+\bpsin^{\e} \nabla\bar{\phi} B_{0}ig] \: dz \, dt \\
\noindentn & - \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{2,lh} \, \sqrt{\varphiepsilon} z_{l}z_{h}
B_{0}ig(\sqrt{\varphiepsilon}\nabla+2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon} \\
\noindentn & \hspace{2.4cm}\cdot
B_{0}ig[
\frac{1}{2\pi i}
\nabla_{y}\frac{\partial \bpsin^{\e}}{\partial \theta_{k}}
\frac{\partial \bar{\phi}}{\partial z_{k}}
+\sqrt{\varphiepsilon}
B_{0}ig(
\frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}} \nabla\frac{\partial \bar{\phi}}{\partial z_{k}}
+ e_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
B_{0}ig) B_{0}ig] \, dz \, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
A^{\e}_{1,h}\,z_{h} B_{0}ig(\sqrt{\varphiepsilon}\nabla+2i\pi\theta^{n} B_{0}ig)v_{\varphiepsilon}
\cdot
B_{0}ig[
z_{k}(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
+\sqrt{\varphiepsilon} z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \nabla \bar{\phi}
B_{0}ig] \: \, dz \, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{2} c^{\e}_{2,lh} z_{l} z_{h} v_{\varphiepsilon}
B_{0}ig(
\bpsin^{\e}\bar{\phi}
- \sqrt{\varphiepsilon} \frac{1}{2i\pi}
\frac{\partial \bpsin^{\e}}{\partial \theta_{k}} \frac{\partial \bar{\phi}}{\partial z_{k}}
B_{0}ig) \: \, dz \, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
c^{\e}_{1,h} z_{h} v_{\varphiepsilon}
z_{k} \frac{\partial \bpsin^{\e}}{\partial x_{k}} \bar{\phi}
\: dz \, dt
\varphiepsilonnd{align}
\noindent
which give on passing to the two-scale limit
\begin{align} \label{quad}
& \frac{1}{2}\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \int_{{\mathbb T}^{N}}
B_{0}ig[
A_{2,lh}(\nabla_{y}+2i\pi\theta^{n})\psi_{n}\cdot (\nabla_{y}-2i\pi\theta^{n})\bar{\psi}_{n}
+ c_{2,lh} \psi_{n} \bar{\psi}_{n} B_{0}ig] v \bar{\phi} \, z_{l} z_{h} \: dy \, dz \, dt \\
\noindentn & + \int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}} \int_{{\mathbb T}^{N}}
B_{0}ig[
A_{1,h}(\nabla_{y}+2i\pi\theta^{n})\psi_{n} \cdot
(\nabla_{y}-2i\pi\theta^{n})\frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
+ c_{1,h} \psi_{n} \frac{\partial \bar{\psi}_{n}}{\partial x_{k}}
\, B_{0}ig] v \bar{\phi} \, z_{h} z_{k} \: dy \, dz \, dt \,.
\varphiepsilonnd{align}
\noindent
Now using equation \varphiepsilonqref{eq5} we find that \varphiepsilonqref{quad} reduces itself to
\begin{equation}\label{term4}
\int_{0}^{+\infty}\hspace{-3mm}\int_{{\mathbb R}^{N}}
\frac{1}{2}\frac{\partial^{2} \lambda_{n}}{\partial x_{l}\partial x_{h}} \:
v \bar{\phi} \, z_{l} z_{h} \: dz \, dt \,.
\varphiepsilonnd{equation}
\noindent
Summing up together \varphiepsilonqref{term1}, \varphiepsilonqref{term2}, \varphiepsilonqref{tensorA},
\varphiepsilonqref{term3} and \varphiepsilonqref{term4}
yields the weak formulation of \varphiepsilonqref{hom}.
By uniqueness of the solution of the homogenized problem \varphiepsilonqref{hom},
we deduce that the entire sequence $v_\varphiepsilon$ two-scale converges
weakly to $\psi_{n}(y)v(t,x)$.
It remains to prove the strong two-scale convergence of $v_\varphiepsilon$.
By Lemma \ref{apriori} we have
$$
|| v_\varphiepsilon(t) ||_{L^2({\mathbb R}^N)} = || u_\varphiepsilon(t) ||_{L^2({\mathbb R}^N)} =
|| u^0_\varphiepsilon ||_{L^2({\mathbb R}^N)} \to || \psi_n v^0 ||_{L^2({\mathbb R}^N\times{\mathbb T}^N)}
= || v^0 ||_{L^2({\mathbb R}^N)}
$$
by the normalization condition of $\psi_n$. From the conservation of
energy of the homogenized equation \varphiepsilonqref{hom} we have
$$
|| v(t) ||_{L^2({\mathbb R}^N)} = || v^0 ||_{L^2({\mathbb R}^N)} ,
$$
and thus we deduce the strong convergence from Proposition \ref{prop2s}.
$ B_{0}ox$
\begin{remark}\label{lastrem}
{\rm As usual in periodic homogenization \cite{allaire}, \cite{blp},
the choice of the test function $\Psi_\varphiepsilon$, in the proof
of Theorem \ref{mainth}, is dictated by the formal two-scale
asymptotic expansion that can be obtained for the
solution $w_\varphiepsilon$ of \varphiepsilonqref{neweq}, namely
$$
w_\varphiepsilon(t,z) \approx
e^{2i\pi\theta^{n}\cdot\frac{z}{\sqrt{\varphiepsilon}}} B_{0}ig[
\psi_{n} B_{0}ig(\frac{z}{\sqrt{\varphiepsilon}} B_{0}ig) v(t,z) +
\sqrt{\varphiepsilon}
\sum_{k=1}^{N}
B_{0}ig(
\frac{1}{2i\pi}
\frac{\partial \psi_{n}}{\partial \theta_{k}} B_{0}ig(\frac{z}{\sqrt{\varphiepsilon}} B_{0}ig)
\frac{\partial v}{\partial z_{k}}(t,z) +
z_{k}
\frac{\partial \psi_{n}}{\partial x_{k}} B_{0}ig(\frac{z}{\sqrt{\varphiepsilon}} B_{0}ig)v(t,z)
B_{0}ig)
B_{0}ig]
$$
where $v$ is the homogenized solution of \varphiepsilonqref{hom}.
Actually the homogenized equation that one gets by the asymptotic
expansion method is
\begin{equation}\label{asym}
i \frac{\partial v}{\partial t} - \hbox{{\rm div}}\left(A^{*}\nabla v \right)
+ B^{*} \nabla v \cdot z + \bar{c}^{*} v
+ v D^{*} z \cdot z = 0 \, ,
\varphiepsilonnd{equation}
which apparently differs from \varphiepsilonqref{hom} by the following
zero-order term
$$
\left( {\rm tr}\,(\nabla_{\theta}\nabla_{x}\lambda_{n}) - 4\pi \text{Im}( c^{*}) \right) v \,.
$$
By virtue of \varphiepsilonqref{contaccio} the above term vanishes,
so that formulae \varphiepsilonqref{asym} and \varphiepsilonqref{hom} are equivalent.
}
\varphiepsilonnd{remark}
c^{\e}nterline{\sc Acknowledgments}
This work was done while M. Palombaro was post-doc at the Centre de
Math\'ematiques Appliqu\'ees of Ecole Polytechnique. The hospitality
of people there is gratefully acknowledged. This work was partly
supported by the MULTIMAT european network MRTN-CT-2004-505226 funded by the EEC.
\begin{thebibliography}{50}
\bibitem{allaire} G.~Allaire, {\varphiepsilonm Homogenization and
two-scale convergence},
SIAM J. Math. Anal. 23(6), 1482--1518 (1992).
\bibitem{alam} G.~Allaire, M.~Amar,
{\varphiepsilonm Boundary layer tails in periodic homogenization},
ESIAM Control Optim. Calc. Var. 4, 209--243 (1999).
\bibitem{alpiat1} G.~Allaire, A.~Piatnitski,
{\varphiepsilonm Uniform spectral asymptotics for singularly perturbed locally periodic operators,}
Comm. in Partial Differential Equations 27, 705--725 (2002).
\bibitem{alpiat2} G.~Allaire, A.~Piatnitski,
{\varphiepsilonm Homogenization of the Schr\"odinger equation and effective mass theorems,}
Comm. Math. Phys. 258, no. 1, 1--22 (2005).
\bibitem{blp} A.~Bensoussan, J.-L.~Lions, G.~Papanicolaou,
{\varphiepsilonm Asymptotic analysis for periodic structures,}
North-Holland, Amsterdam (1978).
\bibitem{brezis} H. Br\'ezis,
{\varphiepsilonm Op\'erateurs maximaux monotones et semi-groupes
de contractions dans les espaces de Hilbert,}
North-Holland, Amsterdam (1973).
\bibitem{brezis2} H. Br\'ezis,
{\varphiepsilonm Analyse fonctionelle,}
Masson, Paris (1983).
\bibitem{buslaev} V. Buslaev,
{\varphiepsilonm Semiclassical approximation for equations with periodic coefficients,}
Russ. Math. Surv. 42, 97--125 (1987).
\bibitem{cl} R.~Carmona, J.~Lacroix,
{\varphiepsilonm Spectral Theory of Random Schr\"odinger Operators},
Birkh\"auser, Boston (1990).
\bibitem{guillot}
M. Dimassi, J.-C. Guillot, J. Ralston,
{\varphiepsilonm Semiclassical asymptotics in magnetic Bloch bands,}
J. Phys. A 35, no. 35, 7597--7605 (2002).
\bibitem{gerard} P. G\'erard,
{\varphiepsilonm Mesures semi-classiques et ondes de Bloch,}
S\'eminaire sur les \'equations aux D\'eriv\'ees Partielles, 1990--1991,
Exp. No. XVI, 19 pp., \'Ecole Polytech., Palaiseau (1991).
\bibitem{gerard2} P. G\'erard, P. Markowich, N. Mauser, F. Poupaud,
{\varphiepsilonm Homogenization limits and Wigner transforms,}
Comm. Pure Appl. Math. 50, no. 4, 323--379 (1997).
\bibitem{sjostrand}
C. G\'erard, A. Martinez, J. Sj\"{o}strand,
{\varphiepsilonm A mathematical approach to the effective Hamiltonian in perturbed periodic problems,}
Comm. Math. Phys. 142, no. 2, 217--244 (1991).
\bibitem{guillot2}
J.-C. Guillot, J. Ralston, E. Trubowitz,
{\varphiepsilonm Semi-classical methods in solid state physics,}
Comm. Math. Phys. 116, 401--415 (1988).
\bibitem{jikov94a}
V.~V. Jikov, S.~M. Kozlov, and O.~A. Oleinik.
{\varphiepsilonm Homogenization of Differential Operators and Integral
Functionals}, Springer Verlag (1994).
\bibitem{kato} T. Kato,
{\varphiepsilonm Perturbation theory for linear operators,}
Springer-Verlag, Berlin (1966).
\bibitem{nguetseng} G. Nguetseng,
{\varphiepsilonm A general convergence result for a functional related to the
theory of homogenization}, SIAM J. Math. Anal. {\bf20}(3), 608--623 (1989).
\bibitem{pst} G. Panati, H. Sohn, S. Teufel,
{\varphiepsilonm Effective dynamics for Bloch electrons: Peierls substitution
and beyond,}
Comm. Math. Phys. 242, 547--578 (2003).
\bibitem{pr} F. Poupaud, C. Ringhofer,
{\varphiepsilonm Semi-classical limits in a crystal with exterior potentials
and effective mass theorems,}
Comm. Partial Differential Equations, 21, no. 11-12, 1897--1918 (1996).
\bibitem{reedsimon} M. Reed, B. Simon,
{\varphiepsilonm Methods of modern mathematical physics,}
Academic Press, New York (1978).
\varphiepsilonnd{thebibliography}
\varphiepsilonnd{document} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.